language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/lazytoone/Airport.java
|
{
"start": 342,
"end": 664
}
|
class ____ {
@Id
private Integer id;
private String code;
public Airport() {
}
public Airport(Integer id, String code) {
this.id = id;
this.code = code;
}
public Integer getId() {
return id;
}
public String getCode() {
return code;
}
public void setCode(String code) {
this.code = code;
}
}
|
Airport
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
|
{
"start": 1865,
"end": 11459
}
|
class ____
implements OutboundBandwidthResourceHandler {
private static final Logger LOG =
LoggerFactory.getLogger(TrafficControlBandwidthHandlerImpl.class);
//In the absence of 'scheduling' support, we'll 'infer' the guaranteed
//outbound bandwidth for each container based on this number. This will
//likely go away once we add support on the RM for this resource type.
private static final int MAX_CONTAINER_COUNT = 50;
private final PrivilegedOperationExecutor privilegedOperationExecutor;
private final CGroupsHandler cGroupsHandler;
private final TrafficController trafficController;
private final ConcurrentHashMap<ContainerId, Integer> containerIdClassIdMap;
private Configuration conf;
private String device;
private boolean strictMode;
private int containerBandwidthMbit;
private int rootBandwidthMbit;
private int yarnBandwidthMbit;
public TrafficControlBandwidthHandlerImpl(PrivilegedOperationExecutor
privilegedOperationExecutor, CGroupsHandler cGroupsHandler,
TrafficController trafficController) {
this.privilegedOperationExecutor = privilegedOperationExecutor;
this.cGroupsHandler = cGroupsHandler;
this.trafficController = trafficController;
this.containerIdClassIdMap = new ConcurrentHashMap<>();
}
/**
* Bootstrapping 'outbound-bandwidth' resource handler - mounts net_cls
* controller and bootstraps a traffic control bandwidth shaping hierarchy
* @param configuration yarn configuration in use
* @return (potentially empty) list of privileged operations to execute.
* @throws ResourceHandlerException
*/
@Override
public List<PrivilegedOperation> bootstrap(Configuration configuration)
throws ResourceHandlerException {
conf = configuration;
//We'll do this inline for the time being - since this is a one time
//operation. At some point, LCE code can be refactored to batch mount
//operations across multiple controllers - cpu, net_cls, blkio etc
cGroupsHandler
.initializeCGroupController(CGroupsHandler.CGroupController.NET_CLS);
device = conf.get(YarnConfiguration.NM_NETWORK_RESOURCE_INTERFACE,
YarnConfiguration.DEFAULT_NM_NETWORK_RESOURCE_INTERFACE);
strictMode = configuration.getBoolean(YarnConfiguration
.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, YarnConfiguration
.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE);
rootBandwidthMbit = conf.getInt(YarnConfiguration
.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT, YarnConfiguration
.DEFAULT_NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT);
yarnBandwidthMbit = conf.getInt(YarnConfiguration
.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT, rootBandwidthMbit);
containerBandwidthMbit = (int) Math.ceil((double) yarnBandwidthMbit /
MAX_CONTAINER_COUNT);
StringBuilder logLine = new StringBuilder("strict mode is set to :")
.append(strictMode).append(System.lineSeparator());
if (strictMode) {
logLine.append("container bandwidth will be capped to soft limit.")
.append(System.lineSeparator());
} else {
logLine.append(
"containers will be allowed to use spare YARN bandwidth.")
.append(System.lineSeparator());
}
logLine
.append("containerBandwidthMbit soft limit (in mbit/sec) is set to : ")
.append(containerBandwidthMbit);
LOG.info(logLine.toString());
trafficController.bootstrap(device, rootBandwidthMbit, yarnBandwidthMbit);
return null;
}
/**
* Pre-start hook for 'outbound-bandwidth' resource. A cgroup is created
* and a net_cls classid is generated and written to a cgroup file. A
* traffic control shaping rule is created in order to limit outbound
* bandwidth utilization.
* @param container Container being launched
* @return privileged operations for some cgroups/tc operations.
* @throws ResourceHandlerException
*/
@Override
public List<PrivilegedOperation> preStart(Container container)
throws ResourceHandlerException {
String containerIdStr = container.getContainerId().toString();
int classId = trafficController.getNextClassId();
String classIdStr = trafficController.getStringForNetClsClassId(classId);
cGroupsHandler.createCGroup(CGroupsHandler.CGroupController
.NET_CLS,
containerIdStr);
cGroupsHandler.updateCGroupParam(CGroupsHandler.CGroupController.NET_CLS,
containerIdStr, CGroupsHandler.CGROUP_PARAM_CLASSID, classIdStr);
containerIdClassIdMap.put(container.getContainerId(), classId);
//Now create a privileged operation in order to update the tasks file with
//the pid of the running container process (root of process tree). This can
//only be done at the time of launching the container, in a privileged
//executable.
String tasksFile = cGroupsHandler.getPathForCGroupTasks(
CGroupsHandler.CGroupController.NET_CLS, containerIdStr);
String opArg = new StringBuilder(PrivilegedOperation.CGROUP_ARG_PREFIX)
.append(tasksFile).toString();
List<PrivilegedOperation> ops = new ArrayList<>();
ops.add(new PrivilegedOperation(
PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, opArg));
//Create a privileged operation to create a tc rule for this container
//We'll return this to the calling (Linux) Container Executor
//implementation for batching optimizations so that we don't fork/exec
//additional times during container launch.
TrafficController.BatchBuilder builder = trafficController.new
BatchBuilder(PrivilegedOperation.OperationType.TC_MODIFY_STATE);
builder.addContainerClass(classId, containerBandwidthMbit, strictMode);
ops.add(builder.commitBatchToTempFile());
return ops;
}
/**
* Reacquires state for a container - reads the classid from the cgroup
* being used for the container being reacquired
* @param containerId if of the container being reacquired.
* @return (potentially empty) list of privileged operations
* @throws ResourceHandlerException
*/
@Override
public List<PrivilegedOperation> reacquireContainer(ContainerId containerId)
throws ResourceHandlerException {
String containerIdStr = containerId.toString();
LOG.debug("Attempting to reacquire classId for container: {}",
containerIdStr);
String classIdStrFromFile = cGroupsHandler.getCGroupParam(
CGroupsHandler.CGroupController.NET_CLS, containerIdStr,
CGroupsHandler.CGROUP_PARAM_CLASSID);
int classId = trafficController
.getClassIdFromFileContents(classIdStrFromFile);
LOG.info("Reacquired containerId -> classId mapping: " + containerIdStr
+ " -> " + classId);
containerIdClassIdMap.put(containerId, classId);
return null;
}
@Override
public List<PrivilegedOperation> updateContainer(Container container)
throws ResourceHandlerException {
return null;
}
/**
* Returns total bytes sent per container to be used for metrics tracking
* purposes.
* @return a map of containerId to bytes sent
* @throws ResourceHandlerException
*/
public Map<ContainerId, Integer> getBytesSentPerContainer()
throws ResourceHandlerException {
Map<Integer, Integer> classIdStats = trafficController.readStats();
Map<ContainerId, Integer> containerIdStats = new HashMap<>();
for (Map.Entry<ContainerId, Integer> entry : containerIdClassIdMap
.entrySet()) {
ContainerId containerId = entry.getKey();
Integer classId = entry.getValue();
Integer bytesSent = classIdStats.get(classId);
if (bytesSent == null) {
LOG.warn("No bytes sent metric found for container: " + containerId +
" with classId: " + classId);
continue;
}
containerIdStats.put(containerId, bytesSent);
}
return containerIdStats;
}
/**
* Cleanup operations once container is completed - deletes cgroup and
* removes traffic shaping rule(s).
* @param containerId of the container that was completed.
* @return null
* @throws ResourceHandlerException
*/
@Override
public List<PrivilegedOperation> postComplete(ContainerId containerId)
throws ResourceHandlerException {
LOG.info("postComplete for container: " + containerId.toString());
cGroupsHandler.deleteCGroup(CGroupsHandler.CGroupController.NET_CLS,
containerId.toString());
Integer classId = containerIdClassIdMap.get(containerId);
if (classId != null) {
PrivilegedOperation op = trafficController.new
BatchBuilder(PrivilegedOperation.OperationType.TC_MODIFY_STATE)
.deleteContainerClass(classId).commitBatchToTempFile();
try {
privilegedOperationExecutor.executePrivilegedOperation(op, false);
trafficController.releaseClassId(classId);
} catch (PrivilegedOperationException e) {
LOG.warn("Failed to delete tc rule for classId: " + classId);
throw new ResourceHandlerException(
"Failed to delete tc rule for classId:" + classId);
}
} else {
LOG.warn("Not cleaning up tc rules. classId unknown for container: " +
containerId.toString());
}
return null;
}
@Override
public List<PrivilegedOperation> teardown()
throws ResourceHandlerException {
LOG.debug("teardown(): Nothing to do");
return null;
}
@Override
public String toString() {
return TrafficControlBandwidthHandlerImpl.class.getName();
}
}
|
TrafficControlBandwidthHandlerImpl
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/orm/domain/gambit/SimpleEntity.java
|
{
"start": 497,
"end": 2385
}
|
class ____ {
private Integer id;
// NOTE : alphabetical
private Date someDate;
private Instant someInstant;
private Integer someInteger;
private Long someLong;
private String someString;
public SimpleEntity() {
}
public SimpleEntity(
Integer id,
String someString) {
this.id = id;
this.someString = someString;
}
public SimpleEntity(
Integer id,
String someString,
Long someLong) {
this.id = id;
this.someString = someString;
this.someLong = someLong;
}
public SimpleEntity(
Integer id,
String someString,
Long someLong,
Integer someInteger) {
this.id = id;
this.someString = someString;
this.someLong = someLong;
this.someInteger = someInteger;
}
public SimpleEntity(
Integer id,
Date someDate,
Instant someInstant,
Integer someInteger,
Long someLong,
String someString) {
this.id = id;
this.someDate = someDate;
this.someInstant = someInstant;
this.someInteger = someInteger;
this.someLong = someLong;
this.someString = someString;
}
@Id
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getSomeString() {
return someString;
}
public void setSomeString(String someString) {
this.someString = someString;
}
@NaturalId
public Integer getSomeInteger() {
return someInteger;
}
public void setSomeInteger(Integer someInteger) {
this.someInteger = someInteger;
}
public Long getSomeLong() {
return someLong;
}
public void setSomeLong(Long someLong) {
this.someLong = someLong;
}
@Temporal( TemporalType.TIMESTAMP )
public Date getSomeDate() {
return someDate;
}
public void setSomeDate(Date someDate) {
this.someDate = someDate;
}
public Instant getSomeInstant() {
return someInstant;
}
public void setSomeInstant(Instant someInstant) {
this.someInstant = someInstant;
}
}
|
SimpleEntity
|
java
|
apache__camel
|
core/camel-support/src/main/java/org/apache/camel/support/PollingConsumerSupport.java
|
{
"start": 1106,
"end": 1179
}
|
class ____ implementations of {@link PollingConsumer}
*/
public abstract
|
for
|
java
|
google__guava
|
android/guava/src/com/google/common/util/concurrent/ExecutionSequencer.java
|
{
"start": 5764,
"end": 13335
}
|
class ____ {
/**
* This field is only used for identity comparisons with the current thread. Field assignments
* are atomic, but do not provide happens-before ordering; however:
*
* <ul>
* <li>If this field's value == currentThread, we know that it's up to date, because write
* operations in a thread always happen-before subsequent read operations in the same
* thread
* <li>If this field's value == null because of unsafe publication, we know that it isn't the
* object associated with our thread, because if it was the publication wouldn't have been
* unsafe and we'd have seen our thread as the value. This state is also why a new
* ThreadConfinedTaskQueue object must be created for each inline execution, because
* observing a null thread does not mean the object is safe to reuse.
* <li>If this field's value is some other thread object, we know that it's not our thread.
* <li>If this field's value == null because it originally belonged to another thread and that
* thread cleared it, we still know that it's not associated with our thread
* <li>If this field's value == null because it was associated with our thread and was
* cleared, we know that we're not executing inline any more
* </ul>
*
* All the states where thread != currentThread are identical for our purposes, and so even
* though it's racy, we don't care which of those values we get, so no need to synchronize.
*/
@LazyInit @Nullable Thread thread;
/** Only used by the thread associated with this object */
@Nullable Runnable nextTask;
/** Only used by the thread associated with this object */
@Nullable Executor nextExecutor;
}
/**
* Enqueues a task to run when the previous task (if any) completes.
*
* <p>Cancellation does not propagate from the output future to a callable that has begun to
* execute, but if the output future is cancelled before {@link Callable#call()} is invoked,
* {@link Callable#call()} will not be invoked.
*/
public <T extends @Nullable Object> ListenableFuture<T> submit(
Callable<T> callable, Executor executor) {
checkNotNull(callable);
checkNotNull(executor);
return submitAsync(
new AsyncCallable<T>() {
@Override
public ListenableFuture<T> call() throws Exception {
return immediateFuture(callable.call());
}
@Override
public String toString() {
return callable.toString();
}
},
executor);
}
/**
* Enqueues a task to run when the previous task (if any) completes.
*
* <p>Cancellation does not propagate from the output future to the future returned from {@code
* callable} or a callable that has begun to execute, but if the output future is cancelled before
* {@link AsyncCallable#call()} is invoked, {@link AsyncCallable#call()} will not be invoked.
*/
public <T extends @Nullable Object> ListenableFuture<T> submitAsync(
AsyncCallable<T> callable, Executor executor) {
checkNotNull(callable);
checkNotNull(executor);
TaskNonReentrantExecutor taskExecutor = new TaskNonReentrantExecutor(executor, this);
AsyncCallable<T> task =
new AsyncCallable<T>() {
@Override
public ListenableFuture<T> call() throws Exception {
if (!taskExecutor.trySetStarted()) {
return immediateCancelledFuture();
}
return callable.call();
}
@Override
public String toString() {
return callable.toString();
}
};
/*
* Four futures are at play here:
* taskFuture is the future tracking the result of the callable.
* newFuture is a future that completes after this and all prior tasks are done.
* oldFuture is the previous task's newFuture.
* outputFuture is the future we return to the caller, a nonCancellationPropagating taskFuture.
*
* newFuture is guaranteed to only complete once all tasks previously submitted to this instance
* have completed - namely after oldFuture is done, and taskFuture has either completed or been
* cancelled before the callable started execution.
*/
SettableFuture<@Nullable Void> newFuture = SettableFuture.create();
ListenableFuture<@Nullable Void> oldFuture = ref.getAndSet(newFuture);
// Invoke our task once the previous future completes.
TrustedListenableFutureTask<T> taskFuture = TrustedListenableFutureTask.create(task);
oldFuture.addListener(taskFuture, taskExecutor);
ListenableFuture<T> outputFuture = Futures.nonCancellationPropagating(taskFuture);
// newFuture's lifetime is determined by taskFuture, which can't complete before oldFuture
// unless taskFuture is cancelled, in which case it falls back to oldFuture. This ensures that
// if the future we return is cancelled, we don't begin execution of the next task until after
// oldFuture completes.
Runnable listener =
() -> {
if (taskFuture.isDone()) {
// Since the value of oldFuture can only ever be immediateVoidFuture() or setFuture of a
// future that eventually came from immediateVoidFuture(), this doesn't leak throwables
// or completion values.
newFuture.setFuture(oldFuture);
} else if (outputFuture.isCancelled() && taskExecutor.trySetCancelled()) {
// If this CAS succeeds, we know that the provided callable will never be invoked,
// so when oldFuture completes it is safe to allow the next submitted task to
// proceed. Doing this immediately here lets the next task run without waiting for
// the cancelled task's executor to run the noop AsyncCallable.
//
// ---
//
// If the CAS fails, the provided callable already started running (or it is about
// to). Our contract promises:
//
// 1. not to execute a new callable until the old one has returned
//
// If we were to cancel taskFuture, that would let the next task start while the old
// one is still running.
//
// Now, maybe we could tweak our implementation to not start the next task until the
// callable actually completes. (We could detect completion in our wrapper
// `AsyncCallable task`.) However, our contract also promises:
//
// 2. not to cancel any Future the user returned from an AsyncCallable
//
// We promise this because, once we cancel that Future, we would no longer be able to
// tell when any underlying work it is doing is done. Thus, we might start a new task
// while that underlying work is still running.
//
// So that is why we cancel only in the case of CAS success.
taskFuture.cancel(false);
}
};
// Adding the listener to both futures guarantees that newFuture will always be set. Adding to
// taskFuture guarantees completion if the callable is invoked, and adding to outputFuture
// propagates cancellation if the callable has not yet been invoked.
outputFuture.addListener(listener, directExecutor());
taskFuture.addListener(listener, directExecutor());
return outputFuture;
}
|
ThreadConfinedTaskQueue
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PerJobCheckpointRecoveryFactory.java
|
{
"start": 1542,
"end": 4305
}
|
class ____<T extends CompletedCheckpointStore>
implements CheckpointRecoveryFactory {
@VisibleForTesting
public static <T extends CompletedCheckpointStore>
CheckpointRecoveryFactory withoutCheckpointStoreRecovery(IntFunction<T> storeFn) {
return new PerJobCheckpointRecoveryFactory<>(
(maxCheckpoints, previous, sharedStateRegistry, ioExecutor, recoveryClaimMode) -> {
if (previous != null) {
throw new UnsupportedOperationException(
"Checkpoint store recovery is not supported.");
}
return storeFn.apply(maxCheckpoints);
});
}
private final CheckpointStoreRecoveryHelper<T> checkpointStoreRecoveryHelper;
private final Supplier<CheckpointIDCounter> checkpointIDCounterPerJobFactory;
private final ConcurrentMap<JobID, T> store;
private final ConcurrentMap<JobID, CheckpointIDCounter> counter;
public PerJobCheckpointRecoveryFactory(
CheckpointStoreRecoveryHelper<T> checkpointStoreRecoveryHelper) {
this(checkpointStoreRecoveryHelper, StandaloneCheckpointIDCounter::new);
}
public PerJobCheckpointRecoveryFactory(
CheckpointStoreRecoveryHelper<T> checkpointStoreRecoveryHelper,
Supplier<CheckpointIDCounter> checkpointIDCounterPerJobFactory) {
this.checkpointIDCounterPerJobFactory = checkpointIDCounterPerJobFactory;
this.store = new ConcurrentHashMap<>();
this.counter = new ConcurrentHashMap<>();
this.checkpointStoreRecoveryHelper = checkpointStoreRecoveryHelper;
}
@Override
public CompletedCheckpointStore createRecoveredCompletedCheckpointStore(
JobID jobId,
int maxNumberOfCheckpointsToRetain,
SharedStateRegistryFactory sharedStateRegistryFactory,
Executor ioExecutor,
RecoveryClaimMode recoveryClaimMode) {
return store.compute(
jobId,
(key, previous) ->
checkpointStoreRecoveryHelper.recoverCheckpointStore(
maxNumberOfCheckpointsToRetain,
previous,
sharedStateRegistryFactory,
ioExecutor,
recoveryClaimMode));
}
@Override
public CheckpointIDCounter createCheckpointIDCounter(JobID jobId) {
return counter.computeIfAbsent(jobId, jId -> checkpointIDCounterPerJobFactory.get());
}
/** Restores or creates a {@link CompletedCheckpointStore}, optionally using an existing one. */
@Internal
public
|
PerJobCheckpointRecoveryFactory
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/sort/NestedSortBuilder.java
|
{
"start": 1120,
"end": 6926
}
|
class ____ implements Writeable, ToXContentObject {
public static final ParseField NESTED_FIELD = new ParseField("nested");
public static final ParseField PATH_FIELD = new ParseField("path");
public static final ParseField FILTER_FIELD = new ParseField("filter");
public static final ParseField MAX_CHILDREN_FIELD = new ParseField("max_children");
private final String path;
private QueryBuilder filter;
private int maxChildren = Integer.MAX_VALUE;
private NestedSortBuilder nestedSort;
public NestedSortBuilder(String path) {
this.path = path;
}
public NestedSortBuilder(StreamInput in) throws IOException {
path = in.readOptionalString();
filter = in.readOptionalNamedWriteable(QueryBuilder.class);
nestedSort = in.readOptionalWriteable(NestedSortBuilder::new);
maxChildren = in.readVInt();
}
public String getPath() {
return path;
}
public QueryBuilder getFilter() {
return filter;
}
public int getMaxChildren() {
return maxChildren;
}
public NestedSortBuilder setFilter(final QueryBuilder filter) {
this.filter = filter;
return this;
}
public NestedSortBuilder setMaxChildren(final int maxChildren) {
this.maxChildren = maxChildren;
return this;
}
public NestedSortBuilder getNestedSort() {
return nestedSort;
}
public NestedSortBuilder setNestedSort(final NestedSortBuilder nestedSortBuilder) {
this.nestedSort = nestedSortBuilder;
return this;
}
/**
* Write this object's fields to a {@linkplain StreamOutput}.
*/
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeOptionalString(path);
out.writeOptionalNamedWriteable(filter);
out.writeOptionalWriteable(nestedSort);
out.writeVInt(maxChildren);
}
@Override
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
builder.startObject();
if (path != null) {
builder.field(PATH_FIELD.getPreferredName(), path);
}
if (filter != null) {
builder.field(FILTER_FIELD.getPreferredName(), filter);
}
if (maxChildren != Integer.MAX_VALUE) {
builder.field(MAX_CHILDREN_FIELD.getPreferredName(), maxChildren);
}
if (nestedSort != null) {
builder.field(NESTED_FIELD.getPreferredName(), nestedSort);
}
builder.endObject();
return builder;
}
public static NestedSortBuilder fromXContent(XContentParser parser) throws IOException {
String path = null;
QueryBuilder filter = null;
int maxChildren = Integer.MAX_VALUE;
NestedSortBuilder nestedSort = null;
XContentParser.Token token = parser.currentToken();
if (token == XContentParser.Token.START_OBJECT) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String currentName = parser.currentName();
parser.nextToken();
if (currentName.equals(PATH_FIELD.getPreferredName())) {
path = parser.text();
} else if (currentName.equals(FILTER_FIELD.getPreferredName())) {
filter = parseNestedFilter(parser);
} else if (currentName.equals(MAX_CHILDREN_FIELD.getPreferredName())) {
maxChildren = parser.intValue();
} else if (currentName.equals(NESTED_FIELD.getPreferredName())) {
nestedSort = NestedSortBuilder.fromXContent(parser);
} else {
throw new IllegalArgumentException("malformed nested sort format, unknown field name [" + currentName + "]");
}
} else {
throw new IllegalArgumentException("malformed nested sort format, only field names are allowed");
}
}
} else {
throw new IllegalArgumentException("malformed nested sort format, must start with an object");
}
return new NestedSortBuilder(path).setFilter(filter).setMaxChildren(maxChildren).setNestedSort(nestedSort);
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
NestedSortBuilder that = (NestedSortBuilder) obj;
return Objects.equals(path, that.path)
&& Objects.equals(filter, that.filter)
&& Objects.equals(maxChildren, that.maxChildren)
&& Objects.equals(nestedSort, that.nestedSort);
}
@Override
public int hashCode() {
return Objects.hash(path, filter, nestedSort, maxChildren);
}
public NestedSortBuilder rewrite(QueryRewriteContext ctx) throws IOException {
if (filter == null && nestedSort == null) {
return this;
}
QueryBuilder rewriteFilter = this.filter;
NestedSortBuilder rewriteNested = this.nestedSort;
if (filter != null) {
rewriteFilter = filter.rewrite(ctx);
}
if (nestedSort != null) {
rewriteNested = nestedSort.rewrite(ctx);
}
if (rewriteFilter != this.filter || rewriteNested != this.nestedSort) {
return new NestedSortBuilder(this.path).setFilter(rewriteFilter).setMaxChildren(this.maxChildren).setNestedSort(rewriteNested);
} else {
return this;
}
}
}
|
NestedSortBuilder
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/main/java/org/springframework/web/reactive/result/method/AbstractHandlerMethodMapping.java
|
{
"start": 21573,
"end": 22080
}
|
class ____ {
private final T mapping;
private final MappingRegistration<T> registration;
public Match(T mapping, MappingRegistration<T> registration) {
this.mapping = mapping;
this.registration = registration;
}
public HandlerMethod getHandlerMethod() {
return this.registration.getHandlerMethod();
}
public boolean hasCorsConfig() {
return this.registration.hasCorsConfig();
}
@Override
public String toString() {
return this.mapping.toString();
}
}
private
|
Match
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/config/HttpClientProperties.java
|
{
"start": 13316,
"end": 14078
}
|
class ____ {
/** Max frame payload length. */
private Integer maxFramePayloadLength;
/** Proxy ping frames to downstream services, defaults to true. */
private boolean proxyPing = true;
public Integer getMaxFramePayloadLength() {
return this.maxFramePayloadLength;
}
public void setMaxFramePayloadLength(Integer maxFramePayloadLength) {
this.maxFramePayloadLength = maxFramePayloadLength;
}
public boolean isProxyPing() {
return proxyPing;
}
public void setProxyPing(boolean proxyPing) {
this.proxyPing = proxyPing;
}
@Override
public String toString() {
return new ToStringCreator(this).append("maxFramePayloadLength", maxFramePayloadLength)
.append("proxyPing", proxyPing)
.toString();
}
}
}
|
Websocket
|
java
|
spring-projects__spring-boot
|
core/spring-boot-test/src/test/java/org/springframework/boot/test/context/filter/annotation/TypeExcludeFiltersContextCustomizerFactoryTests.java
|
{
"start": 5001,
"end": 5474
}
|
class ____ extends TypeExcludeFilter {
@Override
public boolean match(MetadataReader metadataReader, MetadataReaderFactory metadataReaderFactory) {
return metadataReader.getClassMetadata().getClassName().equals(getClass().getName());
}
@Override
public boolean equals(@Nullable Object obj) {
return obj != null && obj.getClass() == getClass();
}
@Override
public int hashCode() {
return SimpleExclude.class.hashCode();
}
}
static
|
SimpleExclude
|
java
|
quarkusio__quarkus
|
extensions/grpc/deployment/src/test/java/io/quarkus/grpc/server/blocking/MultiThreadedBlockingImplTest.java
|
{
"start": 937,
"end": 2531
}
|
class ____ {
private static final Logger logger = Logger.getLogger(GrpcServices.class);
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.setFlatClassPath(true)
.setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addPackage(StandardBlocking.class.getPackage())
.addPackage(
StandardBlockingGrpcServiceGrpc.StandardBlockingGrpcServiceImplBase.class.getPackage()));
@GrpcClient
StandardBlockingGrpcServiceGrpc.StandardBlockingGrpcServiceBlockingStub client;
static ExecutorService executor = Executors.newCachedThreadPool();
@AfterAll
static void cleanup() {
executor.shutdown();
}
@Test
void testTheBlockingCallsCanBeDispatchedOnMultipleThreads() throws InterruptedException {
int count = 100;
ConcurrentHashSet<String> threads = new ConcurrentHashSet<>();
CountDownLatch latch = new CountDownLatch(count);
for (int i = 0; i < count; i++) {
int id = i;
executor.submit(() -> {
threads.add(invokeService(id));
latch.countDown();
});
}
Assertions.assertTrue(latch.await(10, TimeUnit.SECONDS));
Assertions.assertTrue(threads.size() > 1);
}
String invokeService(int id) {
return client.invoke(Request.newBuilder().setId(id).build()).getThread();
}
@GrpcService
@Blocking
static
|
MultiThreadedBlockingImplTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/inlineme/InlinerTest.java
|
{
"start": 35676,
"end": 36160
}
|
class ____ {
public void doTest() {
Client client = new Client();
int x = (1 + 2) * 3;
int y = 1 + 2 + 3;
}
}
""")
.doTest();
}
@Test
public void orderOfOperationsWhenInliningCasts() {
refactoringTestHelper
.addInputLines(
"Client.java",
"""
import com.google.errorprone.annotations.InlineMe;
public final
|
Caller
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/schemavalidation/BooleanAsTinyintValidationTests.java
|
{
"start": 5207,
"end": 5457
}
|
class ____ {
@Id
private Integer id;
private String name;
private boolean active;
public Client() {
}
public Client(Integer id, String name, boolean active) {
this.id = id;
this.name = name;
this.active = active;
}
}
}
|
Client
|
java
|
alibaba__nacos
|
api/src/test/java/com/alibaba/nacos/api/ai/model/mcp/registry/ServerResponseTest.java
|
{
"start": 1060,
"end": 6274
}
|
class ____ extends BasicRequestTest {
@Test
void testSerializeServerResponseBasic() throws JsonProcessingException {
ServerResponse response = new ServerResponse();
McpRegistryServerDetail server = new McpRegistryServerDetail();
server.setName("TestServer");
server.setDescription("Test Server Description");
server.setVersion("1.0.0");
response.setServer(server);
OfficialMeta official = new OfficialMeta();
official.setPublishedAt("2025-01-01T00:00:00Z");
official.setUpdatedAt("2025-01-15T00:00:00Z");
official.setIsLatest(true);
official.setStatus("active");
ServerResponse.Meta meta = new ServerResponse.Meta();
meta.setOfficial(official);
response.setMeta(meta);
String json = mapper.writeValueAsString(response);
assertNotNull(json);
assertTrue(json.contains("\"server\":{"));
assertTrue(json.contains("\"name\":\"TestServer\""));
assertTrue(json.contains("\"_meta\":{"));
assertTrue(json.contains("\"io.modelcontextprotocol.registry/official\":{"));
assertTrue(json.contains("\"publishedAt\":\"2025-01-01T00:00:00Z\""));
assertTrue(json.contains("\"status\":\"active\""));
}
@Test
void testDeserializeServerResponseBasic() throws JsonProcessingException {
String json = "{\"server\":{\"name\":\"TestServer\",\"version\":\"1.0.0\"},"
+ "\"_meta\":{\"io.modelcontextprotocol.registry/official\":"
+ "{\"publishedAt\":\"2025-01-01T00:00:00Z\",\"isLatest\":true}}}";
ServerResponse response = mapper.readValue(json, ServerResponse.class);
assertNotNull(response);
assertNotNull(response.getServer());
assertEquals("TestServer", response.getServer().getName());
assertEquals("1.0.0", response.getServer().getVersion());
assertNotNull(response.getMeta());
assertNotNull(response.getMeta().getOfficial());
assertEquals("2025-01-01T00:00:00Z", response.getMeta().getOfficial().getPublishedAt());
assertEquals(true, response.getMeta().getOfficial().getIsLatest());
}
@Test
void testServerResponseWithMetadataExtensions() throws JsonProcessingException {
Map<String, Object> extensionData = new HashMap<>();
extensionData.put("customField", "customValue");
extensionData.put("metadata", new HashMap<String, Object>() {
{
put("key1", "value1");
}
});
String json = "{\"server\":{\"name\":\"ExtendedServer\"},"
+ "\"_meta\":{"
+ "\"io.modelcontextprotocol.registry/official\":{\"publishedAt\":\"2025-01-01T00:00:00Z\"},"
+ "\"customExtension\":\"extensionValue\"}}";
ServerResponse response = mapper.readValue(json, ServerResponse.class);
assertNotNull(response);
assertNotNull(response.getServer());
assertEquals("ExtendedServer", response.getServer().getName());
assertNotNull(response.getMeta());
}
@Test
void testServerResponseMinimal() throws JsonProcessingException {
ServerResponse response = new ServerResponse();
McpRegistryServerDetail server = new McpRegistryServerDetail();
server.setName("MinimalServer");
response.setServer(server);
String json = mapper.writeValueAsString(response);
assertTrue(json.contains("\"server\":{"));
assertTrue(json.contains("\"name\":\"MinimalServer\""));
}
@Test
void testServerResponseMetaNullSafe() throws JsonProcessingException {
ServerResponse response = new ServerResponse();
McpRegistryServerDetail server = new McpRegistryServerDetail();
server.setName("TestServer");
response.setServer(server);
// Meta is not set - should handle null gracefully
String json = mapper.writeValueAsString(response);
assertNotNull(json);
assertTrue(json.contains("\"server\":{"));
}
@Test
void testServerResponseMetaOfficialNested() throws JsonProcessingException {
String json = "{"
+ "\"server\":{\"name\":\"NestedServer\"},"
+ "\"_meta\":{"
+ "\"io.modelcontextprotocol.registry/official\":{"
+ "\"publishedAt\":\"2025-01-01T00:00:00Z\","
+ "\"updatedAt\":\"2025-01-15T00:00:00Z\","
+ "\"isLatest\":true,"
+ "\"status\":\"active\"}}}";
ServerResponse response = mapper.readValue(json, ServerResponse.class);
assertEquals("NestedServer", response.getServer().getName());
assertEquals("2025-01-01T00:00:00Z", response.getMeta().getOfficial().getPublishedAt());
assertEquals("2025-01-15T00:00:00Z", response.getMeta().getOfficial().getUpdatedAt());
assertEquals(true, response.getMeta().getOfficial().getIsLatest());
assertEquals("active", response.getMeta().getOfficial().getStatus());
}
}
|
ServerResponseTest
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/join/stream/multijoin/StreamingFourWayMixedInnerJoinOperatorTest.java
|
{
"start": 4559,
"end": 20220
}
|
class ____ for index > 0)
// Input 3 (Shipments): Must be withoutUniqueKey, explicitly set as per user edit.
this.inputSpecs.set(3, JoinInputSideSpec.withoutUniqueKey());
}
/**
* Test simple append-only data for the 4-way inner join.
*
* <p>SQL Equivalent:
*
* <pre>
* SELECT u.*, o.*, p.*, s.*
* FROM Users u
* INNER JOIN Orders o ON u.user_id_0 = o.user_id_1
* INNER JOIN Payments p ON u.user_id_0 = p.user_id_2
* INNER JOIN Shipments s ON u.user_id_0 = s.user_id_3
* </pre>
*/
@TestTemplate
void testFourWayMixedInnerJoin() throws Exception {
// Schema:
// Users(user_id_0, u_id_0, u_details_0)
// Orders(user_id_1, o_id_1, o_details_1)
// Payments(user_id_2, pay_id_2, pay_details_2)
// Shipments(user_id_3, s_id_3, s_details_3)
/* -------- INITIAL INSERTS - NO EMIT UNTIL ALL FOUR JOIN PARTNERS ARRIVE ----------- */
insertUser("uid1", "u1", "User 1");
emitsNothing();
insertOrder("uid1", "o1", "Order 1");
emitsNothing();
insertPayment("uid1", "pay1", "Payment 1");
emitsNothing();
insertShipment("uid1", "s1", "Shipment 1");
emits(
INSERT,
"uid1",
"u1",
"User 1",
"uid1",
"o1",
"Order 1",
"uid1",
"pay1",
"Payment 1",
"uid1",
"s1",
"Shipment 1");
/* -------- SECOND SET OF DATA ----------- */
insertUser("uid2", "u2", "User 2");
insertOrder("uid2", "o2", "Order 2");
insertPayment("uid2", "pay2", "Payment 2");
emitsNothing();
insertShipment("uid2", "s2", "Shipment 2");
emits(
INSERT,
"uid2",
"u2",
"User 2",
"uid2",
"o2",
"Order 2",
"uid2",
"pay2",
"Payment 2",
"uid2",
"s2",
"Shipment 2");
/* -------- ADDITIONAL DATA FOR EXISTING JOIN KEYS ----------- */
// Insert a second order for uid1, should join with existing U1, P1, S1
insertOrder("uid1", "o1_extra", "Order 1 Extra");
emits(
INSERT,
"uid1",
"u1",
"User 1",
"uid1",
"o1_extra",
"Order 1 Extra",
"uid1",
"pay1",
"Payment 1",
"uid1",
"s1",
"Shipment 1");
// Insert a second shipment for uid1 (no unique key on shipment)
// This should join with U1, O1, P1 and also with U1, O1_extra, P1
insertShipment("uid1", "s1_another", "Shipment 1 Another");
emits(
INSERT,
r(
"uid1",
"u1",
"User 1",
"uid1",
"o1",
"Order 1",
"uid1",
"pay1",
"Payment 1",
"uid1",
"s1_another",
"Shipment 1 Another"),
INSERT,
r(
"uid1",
"u1",
"User 1",
"uid1",
"o1_extra",
"Order 1 Extra",
"uid1",
"pay1",
"Payment 1",
"uid1",
"s1_another",
"Shipment 1 Another"));
}
/**
* Test changelog operations (updates, deletes) for the 4-way inner join. Despite the method
* name containing "LeftJoin", this test currently uses INNER joins as defined in the class
* constructor. It demonstrates various data transitions.
*
* <p>SQL Equivalent: (Same as inner join test)
*
* <pre>
* SELECT u.*, o.*, p.*, s.*
* FROM Users u
* INNER JOIN Orders o ON u.user_id_0 = o.user_id_1
* INNER JOIN Payments p ON u.user_id_0 = p.user_id_2
* INNER JOIN Shipments s ON u.user_id_0 = s.user_id_3
* </pre>
*/
@TestTemplate
void testFourWayMixedInnerJoinUpdating() throws Exception {
/* -------- SETUP INITIAL JOIN ----------- */
insertUser("uid1", "u1", "User 1");
insertOrder("uid1", "o1", "Order 1");
insertPayment("uid1", "pay1", "Payment 1");
insertShipment("uid1", "s1", "Shipment 1");
emits(
INSERT,
"uid1",
"u1",
"User 1",
"uid1",
"o1",
"Order 1",
"uid1",
"pay1",
"Payment 1",
"uid1",
"s1",
"Shipment 1");
/* -------- 1. USER (INPUT 0) DELETION AND RE-INSERTION ----------- */
deleteUser("uid1", "u1", "User 1");
emits(
DELETE,
"uid1",
"u1",
"User 1",
"uid1",
"o1",
"Order 1",
"uid1",
"pay1",
"Payment 1",
"uid1",
"s1",
"Shipment 1");
insertUser("uid1", "u1_new", "User 1 New");
emits(
INSERT,
"uid1",
"u1_new",
"User 1 New",
"uid1",
"o1",
"Order 1",
"uid1",
"pay1",
"Payment 1",
"uid1",
"s1",
"Shipment 1");
/* -------- 2. ORDER (INPUT 1) DELETION AND RE-INSERTION ----------- */
deleteOrder("uid1", "o1", "Order 1");
emits(
DELETE,
"uid1",
"u1_new",
"User 1 New",
"uid1",
"o1",
"Order 1",
"uid1",
"pay1",
"Payment 1",
"uid1",
"s1",
"Shipment 1");
insertOrder("uid1", "o1_new", "Order 1 New");
emits(
INSERT,
"uid1",
"u1_new",
"User 1 New",
"uid1",
"o1_new",
"Order 1 New",
"uid1",
"pay1",
"Payment 1",
"uid1",
"s1",
"Shipment 1");
/* -------- 3. PAYMENT (INPUT 2) DELETION AND RE-INSERTION ----------- */
deletePayment("uid1", "pay1", "Payment 1");
emits(
DELETE,
"uid1",
"u1_new",
"User 1 New",
"uid1",
"o1_new",
"Order 1 New",
"uid1",
"pay1",
"Payment 1",
"uid1",
"s1",
"Shipment 1");
insertPayment("uid1", "pay1_new", "Payment 1 New");
emits(
INSERT,
"uid1",
"u1_new",
"User 1 New",
"uid1",
"o1_new",
"Order 1 New",
"uid1",
"pay1_new",
"Payment 1 New",
"uid1",
"s1",
"Shipment 1");
/* -------- 4. SHIPMENT (INPUT 3 - NO UNIQUE KEY) DELETION AND RE-INSERTION ----------- */
deleteShipment("uid1", "s1", "Shipment 1"); // Simulates UB for no-PK
emits(
DELETE,
"uid1",
"u1_new",
"User 1 New",
"uid1",
"o1_new",
"Order 1 New",
"uid1",
"pay1_new",
"Payment 1 New",
"uid1",
"s1",
"Shipment 1");
insertShipment("uid1", "s1_new", "Shipment 1 New"); // Simulates UA for no-PK
emits(
INSERT,
"uid1",
"u1_new",
"User 1 New",
"uid1",
"o1_new",
"Order 1 New",
"uid1",
"pay1_new",
"Payment 1 New",
"uid1",
"s1_new",
"Shipment 1 New");
/* -------- 5. UPDATES TO EACH INPUT ----------- */
updateBeforeUser("uid1", "u1_new", "User 1 New");
emits(
UPDATE_BEFORE,
"uid1",
"u1_new",
"User 1 New", // Old User
"uid1",
"o1_new",
"Order 1 New",
"uid1",
"pay1_new",
"Payment 1 New",
"uid1",
"s1_new",
"Shipment 1 New");
updateAfterUser("uid1", "u1_updated", "User 1 Updated");
emits(
UPDATE_AFTER,
"uid1",
"u1_updated",
"User 1 Updated", // New User
"uid1",
"o1_new",
"Order 1 New",
"uid1",
"pay1_new",
"Payment 1 New",
"uid1",
"s1_new",
"Shipment 1 New");
updateBeforeOrder("uid1", "o1_new", "Order 1 New");
emits(
UPDATE_BEFORE,
"uid1",
"u1_updated",
"User 1 Updated",
"uid1",
"o1_new",
"Order 1 New", // Old Order
"uid1",
"pay1_new",
"Payment 1 New",
"uid1",
"s1_new",
"Shipment 1 New");
updateAfterOrder("uid1", "o1_updated", "Order 1 Updated");
emits(
UPDATE_AFTER,
"uid1",
"u1_updated",
"User 1 Updated",
"uid1",
"o1_updated",
"Order 1 Updated", // New Order
"uid1",
"pay1_new",
"Payment 1 New",
"uid1",
"s1_new",
"Shipment 1 New");
updateBeforePayment("uid1", "pay1_new", "Payment 1 New");
emits(
UPDATE_BEFORE,
"uid1",
"u1_updated",
"User 1 Updated",
"uid1",
"o1_updated",
"Order 1 Updated",
"uid1",
"pay1_new",
"Payment 1 New", // Old Payment
"uid1",
"s1_new",
"Shipment 1 New");
updateAfterPayment("uid1", "pay1_updated", "Payment 1 Updated");
emits(
UPDATE_AFTER,
"uid1",
"u1_updated",
"User 1 Updated",
"uid1",
"o1_updated",
"Order 1 Updated",
"uid1",
"pay1_updated",
"Payment 1 Updated", // New Payment
"uid1",
"s1_new",
"Shipment 1 New");
// Update for Shipment (Input 3 - no unique key)
// Treated as Delete + Insert
deleteShipment("uid1", "s1_new", "Shipment 1 New");
emits(
DELETE,
"uid1",
"u1_updated",
"User 1 Updated",
"uid1",
"o1_updated",
"Order 1 Updated",
"uid1",
"pay1_updated",
"Payment 1 Updated",
"uid1",
"s1_new",
"Shipment 1 New"); // Old Shipment (deleted)
insertShipment("uid1", "s1_updated", "Shipment 1 Updated");
emits(
INSERT,
"uid1",
"u1_updated",
"User 1 Updated",
"uid1",
"o1_updated",
"Order 1 Updated",
"uid1",
"pay1_updated",
"Payment 1 Updated",
"uid1",
"s1_updated",
"Shipment 1 Updated"); // New Shipment (inserted)
/* -------- 6. MULTIPLE RECORDS FOR NON-UNIQUE KEY INPUT (SHIPMENTS) ----------- */
insertShipment("uid1", "s2_another", "Shipment 2 Another");
emits(
INSERT,
"uid1",
"u1_updated",
"User 1 Updated",
"uid1",
"o1_updated",
"Order 1 Updated",
"uid1",
"pay1_updated",
"Payment 1 Updated",
"uid1",
"s2_another",
"Shipment 2 Another");
// Now we have U1-O1-P1-S_updated and U1-O1-P1-S2_another
// Delete Payment (Input 2)
deletePayment("uid1", "pay1_updated", "Payment 1 Updated");
emits(
DELETE,
r(
"uid1",
"u1_updated",
"User 1 Updated",
"uid1",
"o1_updated",
"Order 1 Updated",
"uid1",
"pay1_updated",
"Payment 1 Updated",
"uid1",
"s1_updated",
"Shipment 1 Updated"),
DELETE,
r(
"uid1",
"u1_updated",
"User 1 Updated",
"uid1",
"o1_updated",
"Order 1 Updated",
"uid1",
"pay1_updated",
"Payment 1 Updated",
"uid1",
"s2_another",
"Shipment 2 Another"));
// Re-insert Payment, should join with both shipments again
insertPayment("uid1", "pay1_final", "Payment 1 Final");
emits(
INSERT,
r(
"uid1",
"u1_updated",
"User 1 Updated",
"uid1",
"o1_updated",
"Order 1 Updated",
"uid1",
"pay1_final",
"Payment 1 Final",
"uid1",
"s1_updated",
"Shipment 1 Updated"),
INSERT,
r(
"uid1",
"u1_updated",
"User 1 Updated",
"uid1",
"o1_updated",
"Order 1 Updated",
"uid1",
"pay1_final",
"Payment 1 Final",
"uid1",
"s2_another",
"Shipment 2 Another"));
}
}
|
logic
|
java
|
processing__processing4
|
app/src/processing/app/Language.java
|
{
"start": 8751,
"end": 8910
}
|
class ____ consistent encoding.
* http://stackoverflow.com/questions/4659929/how-to-use-utf-8-in-resource-properties-with-resourcebundle
*/
/*
static
|
for
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/floatarray/FloatArrayAssert_containsAnyOf_Test.java
|
{
"start": 863,
"end": 1227
}
|
class ____ extends FloatArrayAssertBaseTest {
@Override
protected FloatArrayAssert invoke_api_method() {
return assertions.containsAnyOf(1.0f, 2.0f);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContainsAnyOf(getInfo(assertions), getActual(assertions), arrayOf(1.0f, 2.0f));
}
}
|
FloatArrayAssert_containsAnyOf_Test
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/beanparam/BeanParamTest.java
|
{
"start": 4166,
"end": 5714
}
|
class ____ {
@RestPath
private String restPathDefault = "restPathDefault";
@RestPath("restPath_Overridden")
private String restPathOverridden = "restPathOverridden";
@PathParam("pathParam")
private String pathParam = "pathParam";
@RestHeader
private String restHeaderDefault = "restHeaderDefault";
@RestHeader("restHeader_Overridden")
private String restHeaderOverridden = "restHeaderOverridden";
@HeaderParam("headerParam")
private String headerParam = "headerParam";
@RestCookie
private String restCookieDefault = "restCookieDefault";
@RestCookie("restCookie_Overridden")
private String restCookieOverridden = "restCookieOverridden";
@CookieParam("cookieParam")
private String cookieParam = "cookieParam";
@RestForm
private String restFormDefault = "restFormDefault";
@RestForm
private SomeEnum someEnum = SomeEnum.TEST;
@RestForm("restForm_Overridden")
private String restFormOverridden = "restFormOverridden";
@FormParam("formParam")
private String formParam = "formParam";
@RestQuery
private String restQueryDefault = "restQueryDefault";
@RestQuery("restQuery_Overridden")
private String restQueryOverridden = "restQueryOverridden";
@QueryParam("queryParam")
private String queryParam = "queryParam";
// FIXME: Matrix not supported
}
public static
|
MyBeanParamWithFields
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/http/server/reactive/AbstractListenerReadPublisher.java
|
{
"start": 9599,
"end": 15616
}
|
enum ____ {
UNSUBSCRIBED {
@Override
<T> void subscribe(AbstractListenerReadPublisher<T> publisher, Subscriber<? super T> subscriber) {
Assert.notNull(publisher, "Publisher must not be null");
Assert.notNull(subscriber, "Subscriber must not be null");
if (publisher.changeState(this, SUBSCRIBING)) {
Subscription subscription = publisher.createSubscription();
publisher.subscriber = subscriber;
subscriber.onSubscribe(subscription);
publisher.changeState(SUBSCRIBING, NO_DEMAND);
publisher.handlePendingCompletionOrError();
}
else {
throw new IllegalStateException("Failed to transition to SUBSCRIBING, " +
"subscriber: " + subscriber);
}
}
@Override
<T> void onAllDataRead(AbstractListenerReadPublisher<T> publisher) {
publisher.completionPending = true;
publisher.handlePendingCompletionOrError();
}
@Override
<T> void onError(AbstractListenerReadPublisher<T> publisher, Throwable ex) {
publisher.errorPending = ex;
publisher.handlePendingCompletionOrError();
}
},
/**
* Very brief state where we know we have a Subscriber but must not
* send onComplete and onError until we after onSubscribe.
*/
SUBSCRIBING {
@Override
<T> void request(AbstractListenerReadPublisher<T> publisher, long n) {
if (Operators.validate(n)) {
Operators.addCap(DEMAND_FIELD_UPDATER, publisher, n);
publisher.changeToDemandState(this);
}
}
@Override
<T> void onAllDataRead(AbstractListenerReadPublisher<T> publisher) {
publisher.completionPending = true;
publisher.handlePendingCompletionOrError();
}
@Override
<T> void onError(AbstractListenerReadPublisher<T> publisher, Throwable ex) {
publisher.errorPending = ex;
publisher.handlePendingCompletionOrError();
}
@Override
<T> void cancel(AbstractListenerReadPublisher<T> publisher) {
publisher.completionPending = true;
publisher.handlePendingCompletionOrError();
}
},
NO_DEMAND {
@Override
<T> void request(AbstractListenerReadPublisher<T> publisher, long n) {
if (Operators.validate(n)) {
Operators.addCap(DEMAND_FIELD_UPDATER, publisher, n);
publisher.changeToDemandState(this);
}
}
},
DEMAND {
@Override
<T> void request(AbstractListenerReadPublisher<T> publisher, long n) {
if (Operators.validate(n)) {
Operators.addCap(DEMAND_FIELD_UPDATER, publisher, n);
// Did a concurrent read transition to NO_DEMAND just before us?
publisher.changeToDemandState(NO_DEMAND);
}
}
@Override
<T> void onDataAvailable(AbstractListenerReadPublisher<T> publisher) {
if (publisher.changeState(this, READING)) {
try {
boolean demandAvailable = publisher.readAndPublish();
if (demandAvailable) {
publisher.changeToDemandState(READING);
publisher.handlePendingCompletionOrError();
}
else {
publisher.readingPaused();
if (publisher.changeState(READING, NO_DEMAND)) {
if (!publisher.handlePendingCompletionOrError()) {
// Demand may have arrived since readAndPublish returned
long r = publisher.demand;
if (r > 0) {
publisher.changeToDemandState(NO_DEMAND);
}
}
}
}
}
catch (IOException ex) {
publisher.onError(ex);
}
}
// Else, either competing onDataAvailable (request vs container), or concurrent completion
}
},
READING {
@Override
<T> void request(AbstractListenerReadPublisher<T> publisher, long n) {
if (Operators.validate(n)) {
Operators.addCap(DEMAND_FIELD_UPDATER, publisher, n);
// Did a concurrent read transition to NO_DEMAND just before us?
publisher.changeToDemandState(NO_DEMAND);
}
}
@Override
<T> void onAllDataRead(AbstractListenerReadPublisher<T> publisher) {
publisher.completionPending = true;
publisher.handlePendingCompletionOrError();
}
@Override
<T> void onError(AbstractListenerReadPublisher<T> publisher, Throwable ex) {
publisher.errorPending = ex;
publisher.handlePendingCompletionOrError();
}
@Override
<T> void cancel(AbstractListenerReadPublisher<T> publisher) {
publisher.discardData();
publisher.completionPending = true;
publisher.handlePendingCompletionOrError();
}
},
COMPLETED {
@Override
<T> void request(AbstractListenerReadPublisher<T> publisher, long n) {
// ignore
}
@Override
<T> void cancel(AbstractListenerReadPublisher<T> publisher) {
// ignore
}
@Override
<T> void onAllDataRead(AbstractListenerReadPublisher<T> publisher) {
// ignore
}
@Override
<T> void onError(AbstractListenerReadPublisher<T> publisher, Throwable t) {
// ignore
}
};
<T> void subscribe(AbstractListenerReadPublisher<T> publisher, Subscriber<? super T> subscriber) {
throw new IllegalStateException(toString());
}
<T> void request(AbstractListenerReadPublisher<T> publisher, long n) {
throw new IllegalStateException(toString());
}
<T> void cancel(AbstractListenerReadPublisher<T> publisher) {
if (publisher.changeState(this, COMPLETED)) {
publisher.discardData();
}
else {
publisher.state.get().cancel(publisher);
}
}
<T> void onDataAvailable(AbstractListenerReadPublisher<T> publisher) {
// ignore
}
<T> void onAllDataRead(AbstractListenerReadPublisher<T> publisher) {
if (publisher.changeState(this, COMPLETED)) {
Subscriber<? super T> s = publisher.subscriber;
if (s != null) {
s.onComplete();
}
}
else {
publisher.state.get().onAllDataRead(publisher);
}
}
<T> void onError(AbstractListenerReadPublisher<T> publisher, Throwable t) {
if (publisher.changeState(this, COMPLETED)) {
publisher.discardData();
Subscriber<? super T> s = publisher.subscriber;
if (s != null) {
s.onError(t);
}
}
else {
publisher.state.get().onError(publisher, t);
}
}
}
}
|
State
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/solver/impl/BaseSolver.java
|
{
"start": 1794,
"end": 4344
}
|
class ____ {
/**
* Used to generate {@link ReservationId}.
*/
private static final Random RAND = new Random();
/**
* Translate the estimated {@link Resource} requirements of the pipeline to
* Hadoop's {@link ReservationSubmissionRequest}.
*
* @param containerSpec the {@link Resource} to be allocated to each
* container;
* @param containerRequests the predicted {@link Resource} to be allocated to
* the job in each discrete time intervals;
* @param config configuration file for BaseSolver.
* @return {@link ReservationSubmissionRequest} to be submitted to Hadoop to
* make recurring resource reservation for the pipeline.
*/
public final ReservationSubmissionRequest toRecurringRDL(
final Resource containerSpec,
final RLESparseResourceAllocation containerRequests,
final Configuration config) {
final int timeInterval =
config.getInt(ResourceEstimatorConfiguration.TIME_INTERVAL_KEY, 5);
long pipelineSubmissionTime = containerRequests.getEarliestStartTime();
long pipelineFinishTime = containerRequests.getLatestNonNullTime();
final long containerMemAlloc = containerSpec.getMemorySize();
final long jobLen =
(pipelineFinishTime - pipelineSubmissionTime) / timeInterval;
List<ReservationRequest> reservationRequestList = new ArrayList<>();
for (int i = 0; i < jobLen; i++) {
// container spec, # of containers, concurrency, duration
ReservationRequest reservationRequest = ReservationRequest
.newInstance(containerSpec, (int) (
containerRequests.getCapacityAtTime(i * timeInterval)
.getMemorySize() / containerMemAlloc), 1, timeInterval);
reservationRequestList.add(reservationRequest);
}
ReservationRequests reservationRequests = ReservationRequests
.newInstance(reservationRequestList,
ReservationRequestInterpreter.R_ALL);
ReservationDefinition reservationDefinition = ReservationDefinition
.newInstance(pipelineSubmissionTime, pipelineFinishTime,
reservationRequests, "LpSolver#toRecurringRDL");
ReservationId reservationId =
ReservationId.newInstance(RAND.nextLong(), RAND.nextLong());
ReservationSubmissionRequest reservationSubmissionRequest =
ReservationSubmissionRequest
.newInstance(reservationDefinition, "resourceestimator",
reservationId);
return reservationSubmissionRequest;
}
}
|
BaseSolver
|
java
|
apache__camel
|
components/camel-kafka/src/test/java/org/apache/camel/component/kafka/integration/KafkaProducerUseIteratorFalseIT.java
|
{
"start": 1329,
"end": 3278
}
|
class ____ extends BaseKafkaTestSupport {
private static final String TOPIC = "use-iterator-false";
private static final String FROM_URI = "kafka:" + TOPIC
+ "?groupId=KafkaProducerUseIteratorFalseIT&autoOffsetReset=earliest&keyDeserializer=org.apache.kafka.common.serialization.StringDeserializer&"
+ "valueDeserializer=org.apache.kafka.common.serialization.StringDeserializer"
+ "&autoCommitIntervalMs=1000&pollTimeoutMs=1000&autoCommitEnable=true&interceptorClasses=org.apache.camel.component.kafka.MockConsumerInterceptor";
@BeforeEach
public void init() {
MockConsumerInterceptor.recordsCaptured.clear();
}
@AfterEach
public void after() {
// clean all test topics
kafkaAdminClient.deleteTopics(Collections.singletonList(TOPIC));
}
@Test
public void testUseIteratorFalse() throws Exception {
List<String> body = new ArrayList<>();
body.add("first");
body.add("second");
MockEndpoint mock = contextExtension.getMockEndpoint("mock:result");
mock.expectedBodiesReceived(body.toString());
contextExtension.getProducerTemplate().sendBody("direct:start", body);
mock.assertIsSatisfied(5000);
assertEquals(1, MockConsumerInterceptor.recordsCaptured.stream()
.flatMap(i -> StreamSupport.stream(i.records(TOPIC).spliterator(), false)).count());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").to("kafka:" + TOPIC + "?groupId=KafkaProducerUseIteratorFalseIT&useIterator=false");
from(FROM_URI)
.to("mock:result");
}
};
}
}
|
KafkaProducerUseIteratorFalseIT
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ai21/Ai21Service.java
|
{
"start": 10730,
"end": 12241
}
|
class ____ {
public static InferenceServiceConfiguration get() {
return CONFIGURATION.getOrCompute();
}
private static final LazyInitializable<InferenceServiceConfiguration, RuntimeException> CONFIGURATION = new LazyInitializable<>(
() -> {
var configurationMap = new HashMap<String, SettingsConfiguration>();
configurationMap.put(
ServiceFields.MODEL_ID,
new SettingsConfiguration.Builder(SUPPORTED_TASK_TYPES).setDescription(
"Refer to the AI21 models documentation for the list of available inference models."
)
.setLabel("Model")
.setRequired(true)
.setSensitive(false)
.setUpdatable(false)
.setType(SettingsConfigurationFieldType.STRING)
.build()
);
configurationMap.putAll(DefaultSecretSettings.toSettingsConfiguration(SUPPORTED_TASK_TYPES));
configurationMap.putAll(RateLimitSettings.toSettingsConfiguration(SUPPORTED_TASK_TYPES));
return new InferenceServiceConfiguration.Builder().setService(NAME)
.setName(SERVICE_NAME)
.setTaskTypes(SUPPORTED_TASK_TYPES)
.setConfigurations(configurationMap)
.build();
}
);
}
}
|
Configuration
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/collectionelement/Widgets.java
|
{
"start": 914,
"end": 974
}
|
class ____ extends Widgets{
private String name2;
}
}
|
Widget2
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/Sqs2EndpointBuilderFactory.java
|
{
"start": 128124,
"end": 128431
}
|
class ____ extends AbstractEndpointBuilder implements Sqs2EndpointBuilder, AdvancedSqs2EndpointBuilder {
public Sqs2EndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new Sqs2EndpointBuilderImpl(path);
}
}
|
Sqs2EndpointBuilderImpl
|
java
|
apache__camel
|
components/camel-openstack/src/test/java/org/apache/camel/component/openstack/it/OpenstackKeystoneProjectTest.java
|
{
"start": 1425,
"end": 5516
}
|
class ____ extends OpenstackWiremockTestSupport {
private static final String URI_FORMAT
= "openstack-keystone://%s?username=user&password=secret&project=project&operation=%s&subsystem="
+ KeystoneConstants.PROJECTS;
private static final String PROJECT_NAME = "ProjectX";
private static final String PROJECT_ID = "3337151a1c38496c8bffcb280b19c346";
private static final String PROJECT_DOMAIN_ID = "7a71863c2d1d4444b3e6c2cd36955e1e";
private static final String PROJECT_DESCRIPTION = "Project used for CRUD tests";
private static final String PROJECT_DESCRIPTION_UPDATED = "An updated project used for CRUD tests";
private static final String PROJECT_EXTRA_KEY_1 = "extra_key1";
private static final String PROJECT_EXTRA_VALUE_1 = "value1";
private static final String PROJECT_EXTRA_KEY_2 = "extra_key2";
private static final String PROJECT_EXTRA_VALUE_2 = "value2";
private static final List<String> TAGS = Arrays.asList("one", "two", "three");
@Test
void createShouldSucceed() {
Project in = Builders.project().name(PROJECT_NAME).description(PROJECT_DESCRIPTION).domainId(PROJECT_DOMAIN_ID)
.setExtra(PROJECT_EXTRA_KEY_1, PROJECT_EXTRA_VALUE_1)
.enabled(true).setTags(TAGS).build();
String uri = String.format(URI_FORMAT, url(), OpenstackConstants.CREATE);
Project out = template.requestBody(uri, in, Project.class);
assertNotNull(out);
assertEquals(PROJECT_NAME, out.getName());
assertEquals(PROJECT_ID, out.getId());
assertEquals(PROJECT_DOMAIN_ID, out.getDomainId());
assertEquals(PROJECT_DESCRIPTION, out.getDescription());
assertEquals(PROJECT_EXTRA_VALUE_1, out.getExtra(PROJECT_EXTRA_KEY_1));
assertEquals(TAGS, out.getTags());
}
@Test
void getShouldSucceed() {
String uri = String.format(URI_FORMAT, url(), OpenstackConstants.GET);
Project out = template.requestBodyAndHeader(uri, null, OpenstackConstants.ID, PROJECT_ID, Project.class);
assertNotNull(out);
assertEquals(PROJECT_NAME, out.getName());
assertEquals(PROJECT_ID, out.getId());
assertEquals(PROJECT_DOMAIN_ID, out.getDomainId());
assertEquals(PROJECT_DESCRIPTION, out.getDescription());
}
@Test
void getAllShouldSucceed() {
String uri = String.format(URI_FORMAT, url(), OpenstackConstants.GET_ALL);
Project[] projects = template.requestBody(uri, null, Project[].class);
assertEquals(3, projects.length);
assertEquals("10b40033bbef48f89fe838fef62398f0", projects[0].getId());
assertEquals("600905d353a84b20b644d2fe55a21e8a", projects[1].getId());
assertEquals("8519dba9f4594f0f87071c87784a8d2c", projects[2].getId());
assertNotNull(projects[2].getOptions());
assertTrue(projects[2].getOptions().isEmpty());
assertNotNull(projects[2].getTags());
assertTrue(projects[2].getTags().isEmpty());
}
@Test
void updateShouldSucceed() {
Project in = Builders.project().id(PROJECT_ID).description(PROJECT_DESCRIPTION_UPDATED)
.setExtra(PROJECT_EXTRA_KEY_2, PROJECT_EXTRA_VALUE_2).build();
String uri = String.format(URI_FORMAT, url(), OpenstackConstants.UPDATE);
Project out = template.requestBody(uri, in, Project.class);
assertNotNull(out);
assertEquals(PROJECT_ID, out.getId());
assertEquals(PROJECT_NAME, out.getName());
assertEquals(PROJECT_DOMAIN_ID, out.getDomainId());
assertEquals(PROJECT_DESCRIPTION_UPDATED, out.getDescription());
assertEquals(PROJECT_EXTRA_VALUE_1, out.getExtra(PROJECT_EXTRA_KEY_1));
assertEquals(PROJECT_EXTRA_VALUE_2, out.getExtra(PROJECT_EXTRA_KEY_2));
}
@Test
void deleteShouldSucceed() {
String uri = String.format(URI_FORMAT, url(), OpenstackConstants.DELETE);
assertDoesNotThrow(() -> template.requestBodyAndHeader(uri, null, OpenstackConstants.ID, PROJECT_ID));
}
}
|
OpenstackKeystoneProjectTest
|
java
|
grpc__grpc-java
|
gcp-observability/src/test/java/io/grpc/gcp/observability/ObservabilityConfigImplTest.java
|
{
"start": 1485,
"end": 19093
}
|
class ____ {
private static final String LOG_FILTERS = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_logging\": {\n"
+ " \"client_rpc_events\": [{\n"
+ " \"methods\": [\"*\"],\n"
+ " \"max_metadata_bytes\": 4096\n"
+ " }"
+ " ],\n"
+ " \"server_rpc_events\": [{\n"
+ " \"methods\": [\"*\"],\n"
+ " \"max_metadata_bytes\": 32,\n"
+ " \"max_message_bytes\": 64\n"
+ " }"
+ " ]\n"
+ " }\n"
+ "}";
private static final String CLIENT_LOG_FILTERS = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_logging\": {\n"
+ " \"client_rpc_events\": [{\n"
+ " \"methods\": [\"*\"],\n"
+ " \"max_metadata_bytes\": 4096,\n"
+ " \"max_message_bytes\": 2048\n"
+ " },"
+ " {\n"
+ " \"methods\": [\"service1/Method2\", \"Service2/*\"],\n"
+ " \"exclude\": true\n"
+ " }"
+ " ]\n"
+ " }\n"
+ "}";
private static final String SERVER_LOG_FILTERS = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_logging\": {\n"
+ " \"server_rpc_events\": [{\n"
+ " \"methods\": [\"service1/method4\", \"service2/method234\"],\n"
+ " \"max_metadata_bytes\": 32,\n"
+ " \"max_message_bytes\": 64\n"
+ " },"
+ " {\n"
+ " \"methods\": [\"service4/*\", \"Service2/*\"],\n"
+ " \"exclude\": true\n"
+ " }"
+ " ]\n"
+ " }\n"
+ "}";
private static final String VALID_LOG_FILTERS = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_logging\": {\n"
+ " \"server_rpc_events\": [{\n"
+ " \"methods\": [\"service.Service1/*\", \"service2.Service4/method4\"],\n"
+ " \"max_metadata_bytes\": 16,\n"
+ " \"max_message_bytes\": 64\n"
+ " }"
+ " ]\n"
+ " }\n"
+ "}";
private static final String PROJECT_ID = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_logging\": {}\n"
+ "}";
private static final String EMPTY_CONFIG = "{}";
private static final String ENABLE_CLOUD_MONITORING_AND_TRACING = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_monitoring\": {},\n"
+ " \"cloud_trace\": {}\n"
+ "}";
private static final String ENABLE_CLOUD_MONITORING = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_monitoring\": {}\n"
+ "}";
private static final String ENABLE_CLOUD_TRACE = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_trace\": {}\n"
+ "}";
private static final String TRACING_ALWAYS_SAMPLER = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_trace\": {\n"
+ " \"sampling_rate\": 1.00\n"
+ " }\n"
+ "}";
private static final String TRACING_NEVER_SAMPLER = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_trace\": {\n"
+ " \"sampling_rate\": 0.00\n"
+ " }\n"
+ "}";
private static final String TRACING_PROBABILISTIC_SAMPLER = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_trace\": {\n"
+ " \"sampling_rate\": 0.75\n"
+ " }\n"
+ "}";
private static final String TRACING_DEFAULT_SAMPLER = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_trace\": {}\n"
+ "}";
private static final String GLOBAL_TRACING_BAD_PROBABILISTIC_SAMPLER = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_trace\": {\n"
+ " \"sampling_rate\": -0.75\n"
+ " }\n"
+ "}";
private static final String CUSTOM_TAGS = "{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_logging\": {},\n"
+ " \"labels\": {\n"
+ " \"SOURCE_VERSION\" : \"J2e1Cf\",\n"
+ " \"SERVICE_NAME\" : \"payment-service\",\n"
+ " \"ENTRYPOINT_SCRIPT\" : \"entrypoint.sh\"\n"
+ " }\n"
+ "}";
private static final String BAD_CUSTOM_TAGS =
"{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_monitoring\": {},\n"
+ " \"labels\": {\n"
+ " \"SOURCE_VERSION\" : \"J2e1Cf\",\n"
+ " \"SERVICE_NAME\" : { \"SUB_SERVICE_NAME\" : \"payment-service\"},\n"
+ " \"ENTRYPOINT_SCRIPT\" : \"entrypoint.sh\"\n"
+ " }\n"
+ "}";
private static final String LOG_FILTER_GLOBAL_EXCLUDE =
"{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_logging\": {\n"
+ " \"client_rpc_events\": [{\n"
+ " \"methods\": [\"service1/Method2\", \"*\"],\n"
+ " \"max_metadata_bytes\": 20,\n"
+ " \"max_message_bytes\": 15,\n"
+ " \"exclude\": true\n"
+ " }"
+ " ]\n"
+ " }\n"
+ "}";
private static final String LOG_FILTER_INVALID_METHOD =
"{\n"
+ " \"project_id\": \"grpc-testing\",\n"
+ " \"cloud_logging\": {\n"
+ " \"client_rpc_events\": [{\n"
+ " \"methods\": [\"s*&%ervice1/Method2\", \"*\"],\n"
+ " \"max_metadata_bytes\": 20\n"
+ " }"
+ " ]\n"
+ " }\n"
+ "}";
ObservabilityConfigImpl observabilityConfig = new ObservabilityConfigImpl();
@Rule public TemporaryFolder tempFolder = new TemporaryFolder();
@Test
public void nullConfig() throws IOException {
try {
observabilityConfig.parse(null);
fail("exception expected!");
} catch (IllegalArgumentException iae) {
assertThat(iae.getMessage()).isEqualTo("GRPC_GCP_OBSERVABILITY_CONFIG value is null!");
}
}
@Test
public void emptyConfig() throws IOException {
observabilityConfig.parse(EMPTY_CONFIG);
assertFalse(observabilityConfig.isEnableCloudLogging());
assertFalse(observabilityConfig.isEnableCloudMonitoring());
assertFalse(observabilityConfig.isEnableCloudTracing());
assertThat(observabilityConfig.getClientLogFilters()).isEmpty();
assertThat(observabilityConfig.getServerLogFilters()).isEmpty();
assertThat(observabilityConfig.getSampler()).isNull();
assertThat(observabilityConfig.getProjectId()).isNull();
assertThat(observabilityConfig.getCustomTags()).isEmpty();
}
@Test
public void emptyConfigFile() throws IOException {
File configFile = tempFolder.newFile();
try {
observabilityConfig.parseFile(configFile.getAbsolutePath());
fail("exception expected!");
} catch (IllegalArgumentException iae) {
assertThat(iae.getMessage()).isEqualTo(
"GRPC_GCP_OBSERVABILITY_CONFIG_FILE is empty!");
}
}
@Test
public void setProjectId() throws IOException {
observabilityConfig.parse(PROJECT_ID);
assertTrue(observabilityConfig.isEnableCloudLogging());
assertThat(observabilityConfig.getProjectId()).isEqualTo("grpc-testing");
}
@Test
public void logFilters() throws IOException {
observabilityConfig.parse(LOG_FILTERS);
assertTrue(observabilityConfig.isEnableCloudLogging());
assertThat(observabilityConfig.getProjectId()).isEqualTo("grpc-testing");
List<LogFilter> clientLogFilters = observabilityConfig.getClientLogFilters();
assertThat(clientLogFilters).hasSize(1);
assertThat(clientLogFilters.get(0).headerBytes).isEqualTo(4096);
assertThat(clientLogFilters.get(0).messageBytes).isEqualTo(0);
assertThat(clientLogFilters.get(0).excludePattern).isFalse();
assertThat(clientLogFilters.get(0).matchAll).isTrue();
assertThat(clientLogFilters.get(0).services).isEmpty();
assertThat(clientLogFilters.get(0).methods).isEmpty();
List<LogFilter> serverLogFilters = observabilityConfig.getServerLogFilters();
assertThat(serverLogFilters).hasSize(1);
assertThat(serverLogFilters.get(0).headerBytes).isEqualTo(32);
assertThat(serverLogFilters.get(0).messageBytes).isEqualTo(64);
assertThat(serverLogFilters.get(0).excludePattern).isFalse();
assertThat(serverLogFilters.get(0).matchAll).isTrue();
assertThat(serverLogFilters.get(0).services).isEmpty();
assertThat(serverLogFilters.get(0).methods).isEmpty();
}
@Test
public void setClientLogFilters() throws IOException {
observabilityConfig.parse(CLIENT_LOG_FILTERS);
assertTrue(observabilityConfig.isEnableCloudLogging());
assertThat(observabilityConfig.getProjectId()).isEqualTo("grpc-testing");
List<LogFilter> logFilterList = observabilityConfig.getClientLogFilters();
assertThat(logFilterList).hasSize(2);
assertThat(logFilterList.get(0).headerBytes).isEqualTo(4096);
assertThat(logFilterList.get(0).messageBytes).isEqualTo(2048);
assertThat(logFilterList.get(0).excludePattern).isFalse();
assertThat(logFilterList.get(0).matchAll).isTrue();
assertThat(logFilterList.get(0).services).isEmpty();
assertThat(logFilterList.get(0).methods).isEmpty();
assertThat(logFilterList.get(1).headerBytes).isEqualTo(0);
assertThat(logFilterList.get(1).messageBytes).isEqualTo(0);
assertThat(logFilterList.get(1).excludePattern).isTrue();
assertThat(logFilterList.get(1).matchAll).isFalse();
assertThat(logFilterList.get(1).services).isEqualTo(Collections.singleton("Service2"));
assertThat(logFilterList.get(1).methods)
.isEqualTo(Collections.singleton("service1/Method2"));
}
@Test
public void setServerLogFilters() throws IOException {
Set<String> expectedMethods = Stream.of("service1/method4", "service2/method234")
.collect(Collectors.toCollection(HashSet::new));
observabilityConfig.parse(SERVER_LOG_FILTERS);
assertTrue(observabilityConfig.isEnableCloudLogging());
List<LogFilter> logFilterList = observabilityConfig.getServerLogFilters();
assertThat(logFilterList).hasSize(2);
assertThat(logFilterList.get(0).headerBytes).isEqualTo(32);
assertThat(logFilterList.get(0).messageBytes).isEqualTo(64);
assertThat(logFilterList.get(0).excludePattern).isFalse();
assertThat(logFilterList.get(0).matchAll).isFalse();
assertThat(logFilterList.get(0).services).isEmpty();
assertThat(logFilterList.get(0).methods)
.isEqualTo(expectedMethods);
Set<String> expectedServices = Stream.of("service4", "Service2")
.collect(Collectors.toCollection(HashSet::new));
assertThat(logFilterList.get(1).headerBytes).isEqualTo(0);
assertThat(logFilterList.get(1).messageBytes).isEqualTo(0);
assertThat(logFilterList.get(1).excludePattern).isTrue();
assertThat(logFilterList.get(1).matchAll).isFalse();
assertThat(logFilterList.get(1).services).isEqualTo(expectedServices);
assertThat(logFilterList.get(1).methods).isEmpty();
}
@Test
public void enableCloudMonitoring() throws IOException {
observabilityConfig.parse(ENABLE_CLOUD_MONITORING);
assertTrue(observabilityConfig.isEnableCloudMonitoring());
}
@Test
public void enableCloudTracing() throws IOException {
observabilityConfig.parse(ENABLE_CLOUD_TRACE);
assertTrue(observabilityConfig.isEnableCloudTracing());
}
@Test
public void enableCloudMonitoringAndTracing() throws IOException {
observabilityConfig.parse(ENABLE_CLOUD_MONITORING_AND_TRACING);
assertFalse(observabilityConfig.isEnableCloudLogging());
assertTrue(observabilityConfig.isEnableCloudMonitoring());
assertTrue(observabilityConfig.isEnableCloudTracing());
}
@Test
public void alwaysSampler() throws IOException {
observabilityConfig.parse(TRACING_ALWAYS_SAMPLER);
assertTrue(observabilityConfig.isEnableCloudTracing());
Sampler sampler = observabilityConfig.getSampler();
assertThat(sampler).isNotNull();
assertThat(sampler).isEqualTo(Samplers.alwaysSample());
}
@Test
public void neverSampler() throws IOException {
observabilityConfig.parse(TRACING_NEVER_SAMPLER);
assertTrue(observabilityConfig.isEnableCloudTracing());
Sampler sampler = observabilityConfig.getSampler();
assertThat(sampler).isNotNull();
assertThat(sampler).isEqualTo(Samplers.probabilitySampler(0.0));
}
@Test
public void probabilisticSampler() throws IOException {
observabilityConfig.parse(TRACING_PROBABILISTIC_SAMPLER);
assertTrue(observabilityConfig.isEnableCloudTracing());
Sampler sampler = observabilityConfig.getSampler();
assertThat(sampler).isNotNull();
assertThat(sampler).isEqualTo(Samplers.probabilitySampler(0.75));
}
@Test
public void defaultSampler() throws IOException {
observabilityConfig.parse(TRACING_DEFAULT_SAMPLER);
assertTrue(observabilityConfig.isEnableCloudTracing());
Sampler sampler = observabilityConfig.getSampler();
assertThat(sampler).isNotNull();
assertThat(sampler).isEqualTo(Samplers.probabilitySampler(0.00));
}
@Test
public void badProbabilisticSampler_error() throws IOException {
try {
observabilityConfig.parse(GLOBAL_TRACING_BAD_PROBABILISTIC_SAMPLER);
fail("exception expected!");
} catch (IllegalArgumentException iae) {
assertThat(iae.getMessage()).isEqualTo(
"'sampling_rate' needs to be between [0.0, 1.0]");
}
}
@Test
public void configFileLogFilters() throws Exception {
File configFile = tempFolder.newFile();
Files.write(
Paths.get(configFile.getAbsolutePath()),
CLIENT_LOG_FILTERS.getBytes(StandardCharsets.US_ASCII));
observabilityConfig.parseFile(configFile.getAbsolutePath());
assertTrue(observabilityConfig.isEnableCloudLogging());
assertThat(observabilityConfig.getProjectId()).isEqualTo("grpc-testing");
List<LogFilter> logFilters = observabilityConfig.getClientLogFilters();
assertThat(logFilters).hasSize(2);
assertThat(logFilters.get(0).headerBytes).isEqualTo(4096);
assertThat(logFilters.get(0).messageBytes).isEqualTo(2048);
assertThat(logFilters.get(1).headerBytes).isEqualTo(0);
assertThat(logFilters.get(1).messageBytes).isEqualTo(0);
assertThat(logFilters).hasSize(2);
assertThat(logFilters.get(0).headerBytes).isEqualTo(4096);
assertThat(logFilters.get(0).messageBytes).isEqualTo(2048);
assertThat(logFilters.get(0).excludePattern).isFalse();
assertThat(logFilters.get(0).matchAll).isTrue();
assertThat(logFilters.get(0).services).isEmpty();
assertThat(logFilters.get(0).methods).isEmpty();
assertThat(logFilters.get(1).headerBytes).isEqualTo(0);
assertThat(logFilters.get(1).messageBytes).isEqualTo(0);
assertThat(logFilters.get(1).excludePattern).isTrue();
assertThat(logFilters.get(1).matchAll).isFalse();
assertThat(logFilters.get(1).services).isEqualTo(Collections.singleton("Service2"));
assertThat(logFilters.get(1).methods)
.isEqualTo(Collections.singleton("service1/Method2"));
}
@Test
public void customTags() throws IOException {
observabilityConfig.parse(CUSTOM_TAGS);
assertTrue(observabilityConfig.isEnableCloudLogging());
Map<String, String> customTags = observabilityConfig.getCustomTags();
assertThat(customTags).hasSize(3);
assertThat(customTags).containsEntry("SOURCE_VERSION", "J2e1Cf");
assertThat(customTags).containsEntry("SERVICE_NAME", "payment-service");
assertThat(customTags).containsEntry("ENTRYPOINT_SCRIPT", "entrypoint.sh");
}
@Test
public void badCustomTags() throws IOException {
try {
observabilityConfig.parse(BAD_CUSTOM_TAGS);
fail("exception expected!");
} catch (IllegalArgumentException iae) {
assertThat(iae.getMessage()).isEqualTo(
"'labels' needs to be a map of <string, string>");
}
}
@Test
public void globalLogFilterExclude() throws IOException {
try {
observabilityConfig.parse(LOG_FILTER_GLOBAL_EXCLUDE);
fail("exception expected!");
} catch (IllegalArgumentException iae) {
assertThat(iae.getMessage()).isEqualTo(
"cannot have 'exclude' and '*' wildcard in the same filter");
}
}
@Test
public void logFilterInvalidMethod() throws IOException {
try {
observabilityConfig.parse(LOG_FILTER_INVALID_METHOD);
fail("exception expected!");
} catch (IllegalArgumentException iae) {
assertThat(iae.getMessage()).contains(
"invalid service or method filter");
}
}
@Test
public void validLogFilter() throws Exception {
observabilityConfig.parse(VALID_LOG_FILTERS);
assertTrue(observabilityConfig.isEnableCloudLogging());
assertThat(observabilityConfig.getProjectId()).isEqualTo("grpc-testing");
List<LogFilter> logFilterList = observabilityConfig.getServerLogFilters();
assertThat(logFilterList).hasSize(1);
assertThat(logFilterList.get(0).headerBytes).isEqualTo(16);
assertThat(logFilterList.get(0).messageBytes).isEqualTo(64);
assertThat(logFilterList.get(0).excludePattern).isFalse();
assertThat(logFilterList.get(0).matchAll).isFalse();
assertThat(logFilterList.get(0).services).isEqualTo(Collections.singleton("service.Service1"));
assertThat(logFilterList.get(0).methods)
.isEqualTo(Collections.singleton("service2.Service4/method4"));
}
}
|
ObservabilityConfigImplTest
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/function/support/RouterFunctionMappingVersionTests.java
|
{
"start": 2144,
"end": 3894
}
|
class ____ {
private final MockServletContext servletContext = new MockServletContext();
private RouterFunctionMapping mapping;
@BeforeEach
void setUp() {
AnnotationConfigWebApplicationContext wac = new AnnotationConfigWebApplicationContext();
wac.setServletContext(this.servletContext);
wac.register(WebConfig.class);
wac.refresh();
this.mapping = wac.getBean(RouterFunctionMapping.class);
}
@Test
void mapVersion() throws Exception {
testGetHandler("1.0", "none");
testGetHandler("1.1", "none");
testGetHandler("1.2", "1.2");
testGetHandler("1.3", "1.2");
testGetHandler("1.5", "1.5");
}
private void testGetHandler(String version, String expectedBody) throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/");
request.addHeader("API-Version", version);
HandlerFunction<?> handler = (HandlerFunction<?>) this.mapping.getHandler(request).getHandler();
assertThat(((TestHandler) handler).body()).isEqualTo(expectedBody);
}
@Test
void deprecation() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/");
request.addHeader("API-Version", "1");
HandlerExecutionChain chain = this.mapping.getHandler(request);
assertThat(chain).isNotNull();
MockHttpServletResponse response = new MockHttpServletResponse();
for (HandlerInterceptor interceptor : chain.getInterceptorList()) {
interceptor.preHandle(request, response, chain.getHandler());
}
assertThat(((TestHandler) chain.getHandler()).body()).isEqualTo("none");
assertThat(response.getHeader("Link"))
.isEqualTo("<https://example.org/deprecation>; rel=\"deprecation\"; type=\"text/html\"");
}
@EnableWebMvc
private static
|
RouterFunctionMappingVersionTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/merge/CompositeIdWithAssociationsAndGeneratedValuesMerge2Test.java
|
{
"start": 1079,
"end": 1551
}
|
class ____ {
@Test
public void testMerge(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Middle m1 = new Middle( "Middle" );
Bottom bottom = new Bottom( m1, 0, "Bottom" );
Middle merge = session.merge( m1 );
assertThat( merge.getId() ).isNotNull();
assertThat( m1.getId() ).isNull();
}
);
}
@Entity(name = "Middle")
@Table(name = "middle_table")
public static
|
CompositeIdWithAssociationsAndGeneratedValuesMerge2Test
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/filter/config/ConfigFilterTest.java
|
{
"start": 473,
"end": 4270
}
|
class ____ extends ConfigFileGenerator {
String encryptedString = "OJfUm6WCHi7EuXqE6aEc+Po2xFrAGBeSNy8O2jWhV2FTG8/5kbRRr2rjNKhptlevm/03Y0048P7h88gdUOXAYg==";
@Test
public void testInitRemoteConfigFile() throws SQLException {
DruidDataSource dataSource = new DruidDataSource();
dataSource.setFilters("config");
dataSource.setConnectionProperties("config.file=file://" + this.filePath);
try {
dataSource.init();
assertEquals("The username is " + dataSource.getUsername(), "test1", dataSource.getUsername());
} finally {
JdbcUtils.close(dataSource);
}
}
@Test
public void testInitRemoteConfigFileBySystemProperty() throws SQLException {
DruidDataSource dataSource = new DruidDataSource();
dataSource.setFilters("config");
System.setProperty(ConfigFilter.SYS_PROP_CONFIG_FILE, "file://" + this.filePath);
try {
dataSource.init();
assertEquals("The username is " + dataSource.getUsername(), "test1", dataSource.getUsername());
} finally {
System.clearProperty(ConfigFilter.SYS_PROP_CONFIG_FILE);
JdbcUtils.close(dataSource);
}
}
@Test
public void testInitInvalidRemoteConfigFile() throws SQLException {
DruidDataSource dataSource = new DruidDataSource();
dataSource.setFilters("config");
dataSource.setConnectionProperties("config.file=abcdef");
Exception error = null;
try {
dataSource.init();
} catch (IllegalArgumentException e) {
error = e;
} finally {
JdbcUtils.close(dataSource);
}
assertNotNull(error);
}
@Test
public void testInitDecrypt() throws SQLException {
DruidDataSource dataSource = new DruidDataSource();
dataSource.setDriver(MockDriver.instance);
dataSource.setUrl("");
dataSource.setUsername("test");
dataSource.setPassword(encryptedString);
dataSource.setFilters("config");
dataSource.setConnectionProperties("config.decrypt=true");
try {
dataSource.init();
assertEquals("The password is " + dataSource.getPassword() + ", is not xiaoyu", "xiaoyu",
dataSource.getPassword());
} finally {
JdbcUtils.close(dataSource);
}
}
@Test
public void testInitRemoteConfigAndDecrypt() throws SQLException {
DruidDataSource dataSource = new DruidDataSource();
dataSource.setFilters("config");
dataSource.setConnectionProperties("config.decrypt=true;config.file=" + "file://" + this.filePath);
try {
dataSource.init();
assertEquals("The password is " + dataSource.getPassword(), "xiaoyu", dataSource.getPassword());
} finally {
JdbcUtils.close(dataSource);
}
}
@Test
public void testNormalInit() throws SQLException {
DruidDataSource dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:oracle:thin:@");
try {
dataSource.init();
} finally {
JdbcUtils.close(dataSource);
}
}
@Test
public void testInvalidInit() throws SQLException {
DruidDataSource dataSource = new DruidDataSource();
dataSource.setDriver(MockDriver.instance);
dataSource.setFilters("config");
dataSource.setConnectionProperties("config.file=abcdefeg");
Exception error = null;
try {
dataSource.init();
} catch (IllegalArgumentException e) {
error = e;
} finally {
JdbcUtils.close(dataSource);
}
assertNotNull(error);
}
}
|
ConfigFilterTest
|
java
|
apache__camel
|
components/camel-kubernetes/src/main/java/org/apache/camel/component/kubernetes/replication_controllers/KubernetesReplicationControllersConsumer.java
|
{
"start": 1814,
"end": 3281
}
|
class ____ extends DefaultConsumer {
private static final Logger LOG = LoggerFactory.getLogger(KubernetesReplicationControllersConsumer.class);
private final Processor processor;
private ExecutorService executor;
private ReplicationControllersConsumerTask rcWatcher;
public KubernetesReplicationControllersConsumer(AbstractKubernetesEndpoint endpoint, Processor processor) {
super(endpoint, processor);
this.processor = processor;
}
@Override
public AbstractKubernetesEndpoint getEndpoint() {
return (AbstractKubernetesEndpoint) super.getEndpoint();
}
@Override
protected void doStart() throws Exception {
super.doStart();
executor = getEndpoint().createExecutor(this);
rcWatcher = new ReplicationControllersConsumerTask();
executor.submit(rcWatcher);
}
@Override
protected void doStop() throws Exception {
super.doStop();
LOG.debug("Stopping Kubernetes Replication Controllers Consumer");
if (executor != null) {
KubernetesHelper.close(rcWatcher, rcWatcher::getWatch);
if (getEndpoint() != null && getEndpoint().getCamelContext() != null) {
getEndpoint().getCamelContext().getExecutorServiceManager().shutdownNow(executor);
} else {
executor.shutdownNow();
}
}
executor = null;
}
|
KubernetesReplicationControllersConsumer
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/OpensearchEndpointBuilderFactory.java
|
{
"start": 19298,
"end": 19869
}
|
class ____ use as HostnameVerifier. By default there is no
* HostnameVerifier.
*
* The option is a: <code>javax.net.ssl.HostnameVerifier</code> type.
*
* Group: advanced
*
* @param hostnameVerifier the value to set
* @return the dsl builder
*/
default AdvancedOpensearchEndpointBuilder hostnameVerifier(javax.net.ssl.HostnameVerifier hostnameVerifier) {
doSetProperty("hostnameVerifier", hostnameVerifier);
return this;
}
/**
* The
|
to
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/float2darrays/Float2DArrays_assertNotEmpty_Test.java
|
{
"start": 1019,
"end": 1278
}
|
class ____ extends Float2DArraysBaseTest {
@Test
void should_delegate_to_Arrays2D() {
// WHEN
float2dArrays.assertNotEmpty(info, actual);
// THEN
verify(arrays2d).assertNotEmpty(info, failures, actual);
}
}
|
Float2DArrays_assertNotEmpty_Test
|
java
|
google__guice
|
core/src/com/google/inject/internal/util/DirectStackWalkerFinder.java
|
{
"start": 182,
"end": 664
}
|
class ____ implements CallerFinder {
private static final StackWalker WALKER =
StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE);
@Override
public StackTraceElement findCaller(Predicate<String> shouldBeSkipped) {
return WALKER
.walk(s -> s.skip(2).filter(f -> !shouldBeSkipped.test(f.getClassName())).findFirst())
.map(StackWalker.StackFrame::toStackTraceElement)
.orElseThrow(AssertionError::new);
}
}
|
DirectStackWalkerFinder
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/error/ShouldBePeriod_create_test.java
|
{
"start": 1086,
"end": 1843
}
|
class ____ {
@Test
void should_create_error_message_for_positive() {
// GIVEN
ErrorMessageFactory factory = shouldBePositive(Period.ofMonths(-1));
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo("[Test] %nExpecting Period:%n P-1M%nto be positive".formatted());
}
@Test
void should_create_error_message_for_negative() {
// GIVEN
ErrorMessageFactory factory = shouldBeNegative(Period.ofMonths(1));
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo("[Test] %nExpecting Period:%n P1M%nto be negative".formatted());
}
}
|
ShouldBePeriod_create_test
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/scheduling/annotation/ScheduledAnnotationBeanPostProcessorTests.java
|
{
"start": 38347,
"end": 38425
}
|
class ____ implements FixedRatesDefaultMethod {
}
static
|
FixedRatesDefaultBean
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/DependencyCycleValidationTest.java
|
{
"start": 27157,
"end": 27487
}
|
interface ____");
});
}
@Test
public void cycleFromMembersInjectionMethod_WithSameKeyAsMembersInjectionMethod() {
Source a =
CompilerTests.javaSource(
"test.A",
"package test;",
"",
"import javax.inject.Inject;",
"",
"
|
TestComponent
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/DataSetEndpointBuilderFactory.java
|
{
"start": 40076,
"end": 41446
}
|
interface ____
extends
AdvancedDataSetEndpointConsumerBuilder,
AdvancedDataSetEndpointProducerBuilder {
default DataSetEndpointBuilder basic() {
return (DataSetEndpointBuilder) this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option is a: <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedDataSetEndpointBuilder browseLimit(int browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedDataSetEndpointBuilder browseLimit(String browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
}
public
|
AdvancedDataSetEndpointBuilder
|
java
|
apache__camel
|
components/camel-kamelet/src/main/java/org/apache/camel/component/kamelet/KameletReifier.java
|
{
"start": 1149,
"end": 2416
}
|
class ____ extends ProcessorReifier<KameletDefinition> {
public KameletReifier(Route route, KameletDefinition definition) {
super(route, definition);
}
@Override
public Processor createProcessor() throws Exception {
Processor processor = createChildProcessor(false);
if (processor == null) {
// use an empty noop processor, as there should be a single processor
processor = new NoopProcessor();
}
// wrap in uow
String outputId = definition.idOrCreate(camelContext.getCamelContextExtension().getContextPlugin(NodeIdFactory.class));
camelContext.getCamelContextExtension().createProcessor(outputId);
try {
Processor answer = new KameletProcessor(camelContext, parseString(definition.getName()), processor);
if (answer instanceof DisabledAware da) {
da.setDisabled(isDisabled(camelContext, definition));
}
answer = PluginHelper.getInternalProcessorFactory(camelContext)
.addUnitOfWorkProcessorAdvice(camelContext, answer, null);
return answer;
} finally {
camelContext.getCamelContextExtension().createProcessor(null);
}
}
}
|
KameletReifier
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/i18n/MessageBundleEnumExampleFileTest.java
|
{
"start": 680,
"end": 1884
}
|
class ____ {
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withApplicationRoot(root -> root
.addClasses(Messages.class, MyEnum.class)
.addAsResource(new StringAsset("""
myEnum_ON=On
myEnum_OFF=Off
myEnum_UNDEFINED=Undefined
"""),
"messages/enu.properties"));
@ProdBuildResults
ProdModeTestResults testResults;
@Test
public void testExampleProperties() throws FileNotFoundException, IOException {
Path path = testResults.getBuildDir().resolve("qute-i18n-examples").resolve("enu.properties");
assertTrue(path.toFile().canRead());
Properties props = new Properties();
props.load(new FileInputStream(path.toFile()));
assertEquals(3, props.size());
assertTrue(props.containsKey("myEnum_ON"));
assertTrue(props.containsKey("myEnum_OFF"));
assertTrue(props.containsKey("myEnum_UNDEFINED"));
}
@MessageBundle(value = "enu", locale = "en")
public
|
MessageBundleEnumExampleFileTest
|
java
|
apache__camel
|
components/camel-file-watch/src/main/java/org/apache/camel/component/file/watch/utils/PathUtils.java
|
{
"start": 898,
"end": 1162
}
|
class ____ {
private PathUtils() {
}
public static Path normalize(Path path) {
return path.normalize();
}
public static String normalizeToString(Path path) {
return normalize(path).toString().replace("\\", "/");
}
}
|
PathUtils
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ClassificationConfig.java
|
{
"start": 9639,
"end": 11489
}
|
class ____ {
private Integer numTopClasses;
private String topClassesResultsField;
private String resultsField;
private PredictionFieldType predictionFieldType;
private Integer numTopFeatureImportanceValues;
Builder() {}
Builder(ClassificationConfig config) {
this.numTopClasses = config.numTopClasses;
this.topClassesResultsField = config.topClassesResultsField;
this.resultsField = config.resultsField;
this.numTopFeatureImportanceValues = config.numTopFeatureImportanceValues;
this.predictionFieldType = config.predictionFieldType;
}
public Builder setNumTopClasses(Integer numTopClasses) {
this.numTopClasses = numTopClasses;
return this;
}
public Builder setTopClassesResultsField(String topClassesResultsField) {
this.topClassesResultsField = topClassesResultsField;
return this;
}
public Builder setResultsField(String resultsField) {
this.resultsField = resultsField;
return this;
}
public Builder setNumTopFeatureImportanceValues(Integer numTopFeatureImportanceValues) {
this.numTopFeatureImportanceValues = numTopFeatureImportanceValues;
return this;
}
public Builder setPredictionFieldType(PredictionFieldType predictionFieldType) {
this.predictionFieldType = predictionFieldType;
return this;
}
public ClassificationConfig build() {
return new ClassificationConfig(
numTopClasses,
resultsField,
topClassesResultsField,
numTopFeatureImportanceValues,
predictionFieldType
);
}
}
}
|
Builder
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientMetrics.java
|
{
"start": 1462,
"end": 1676
}
|
class ____ for maintaining the various Router Client activity statistics
* and publishing them through the metrics interfaces.
*/
@Metrics(name="RouterClientActivity", about="Router metrics", context="dfs")
public
|
is
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/ext/javatime/ser/ZonedDateTimeSerializer.java
|
{
"start": 413,
"end": 4048
}
|
class ____ extends InstantSerializerBase<ZonedDateTime> {
public static final ZonedDateTimeSerializer INSTANCE = new ZonedDateTimeSerializer();
/**
* Flag for <code>JsonFormat.Feature.WRITE_DATES_WITH_ZONE_ID</code>
*/
protected final Boolean _writeZoneId;
protected ZonedDateTimeSerializer() {
// ISO_ZONED_DATE_TIME is an extended version of ISO compliant format
// ISO_OFFSET_DATE_TIME with additional information :Zone Id
// (This is not part of the ISO-8601 standard)
this(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
}
public ZonedDateTimeSerializer(DateTimeFormatter formatter) {
super(ZonedDateTime.class, dt -> dt.toInstant().toEpochMilli(),
ZonedDateTime::toEpochSecond, ZonedDateTime::getNano,
formatter);
_writeZoneId = null;
}
protected ZonedDateTimeSerializer(ZonedDateTimeSerializer base,
DateTimeFormatter formatter,
Boolean useTimestamp, Boolean useNanoseconds,
Boolean writeZoneId,
JsonFormat.Shape shape)
{
super(base, formatter, useTimestamp, useNanoseconds, shape);
_writeZoneId = writeZoneId;
}
@Override
protected JSR310FormattedSerializerBase<?> withFormat(DateTimeFormatter formatter,
Boolean useTimestamp,
JsonFormat.Shape shape)
{
return new ZonedDateTimeSerializer(this, formatter,
useTimestamp, _useNanoseconds, _writeZoneId,
shape);
}
@Override
protected JSR310FormattedSerializerBase<?> withFeatures(Boolean writeZoneId,
Boolean useNanoseconds)
{
return new ZonedDateTimeSerializer(this, _formatter,
_useTimestamp, useNanoseconds, writeZoneId, _shape);
}
@Override
public void serialize(ZonedDateTime value, JsonGenerator g, SerializationContext ctxt)
throws JacksonException
{
if (!useTimestamp(ctxt)) {
// [modules-java8#333]: `@JsonFormat` with pattern should override
// `SerializationFeature.WRITE_DATES_WITH_ZONE_ID`
if ((_formatter != null) && (_shape == JsonFormat.Shape.STRING)) {
; // use default handling
} else if (shouldWriteWithZoneId(ctxt)) {
// write with zone
g.writeString(DateTimeFormatter.ISO_ZONED_DATE_TIME.format(value));
return;
}
}
super.serialize(value, g, ctxt);
}
@Override
protected String formatValue(ZonedDateTime value, SerializationContext ctxt) {
String formatted = super.formatValue(value, ctxt);
// [modules-java8#333]: `@JsonFormat` with pattern should override
// `SerializationFeature.WRITE_DATES_WITH_ZONE_ID`
if (_formatter != null && _shape == JsonFormat.Shape.STRING) {
// Why not `if (shouldWriteWithZoneId(provider))` ?
if (Boolean.TRUE.equals(_writeZoneId)) {
formatted += "[" + value.getZone().getId() + "]";
}
}
return formatted;
}
public boolean shouldWriteWithZoneId(SerializationContext ctxt) {
return (_writeZoneId != null)
? _writeZoneId
: ctxt.isEnabled(DateTimeFeature.WRITE_DATES_WITH_ZONE_ID);
}
@Override
protected JsonToken serializationShape(SerializationContext ctxt) {
if (!useTimestamp(ctxt) && shouldWriteWithZoneId(ctxt)) {
return JsonToken.VALUE_STRING;
}
return super.serializationShape(ctxt);
}
}
|
ZonedDateTimeSerializer
|
java
|
quarkusio__quarkus
|
independent-projects/bootstrap/app-model/src/test/java/io/quarkus/paths/SharedArchivePathTreeTest.java
|
{
"start": 542,
"end": 1467
}
|
class ____ {
private static final int WORKERS_COUNT = 128;
@Test
void nullPointerException() throws IOException, InterruptedException, ExecutionException {
/* Reproduce https://github.com/quarkusio/quarkus/issues/48220 */
stress((OpenPathTree opened) -> {
});
}
@Test
void closedFileSystemException() throws IOException, InterruptedException, ExecutionException {
/* Reproduce https://github.com/quarkusio/quarkus/issues/48220 */
stress((OpenPathTree opened) -> {
try {
Path p = opened.getPath("org/assertj/core/api/Assertions.class");
Files.readAllBytes(p);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
static void stress(Consumer<OpenPathTree> consumer) throws IOException {
/* Find assertj-core jar in the
|
SharedArchivePathTreeTest
|
java
|
grpc__grpc-java
|
core/src/test/java/io/grpc/internal/SpiffeUtilTest.java
|
{
"start": 8350,
"end": 18296
}
|
class ____ {
private static final String SPIFFE_PEM_FILE = "spiffe_cert.pem";
private static final String MULTI_URI_SAN_PEM_FILE = "spiffe_multi_uri_san_cert.pem";
private static final String SERVER_0_PEM_FILE = "server0.pem";
private static final String TEST_DIRECTORY_PREFIX = "io/grpc/internal/";
private static final String SPIFFE_TRUST_BUNDLE = "spiffebundle.json";
private static final String SPIFFE_TRUST_BUNDLE_WITH_EC_KTY = "spiffebundle_ec.json";
private static final String SPIFFE_TRUST_BUNDLE_MALFORMED = "spiffebundle_malformed.json";
private static final String SPIFFE_TRUST_BUNDLE_CORRUPTED_CERT =
"spiffebundle_corrupted_cert.json";
private static final String SPIFFE_TRUST_BUNDLE_WRONG_KTY = "spiffebundle_wrong_kty.json";
private static final String SPIFFE_TRUST_BUNDLE_WRONG_KID = "spiffebundle_wrong_kid.json";
private static final String SPIFFE_TRUST_BUNDLE_WRONG_USE = "spiffebundle_wrong_use.json";
private static final String SPIFFE_TRUST_BUNDLE_WRONG_MULTI_CERTS =
"spiffebundle_wrong_multi_certs.json";
private static final String SPIFFE_TRUST_BUNDLE_DUPLICATES = "spiffebundle_duplicates.json";
private static final String SPIFFE_TRUST_BUNDLE_WRONG_ROOT = "spiffebundle_wrong_root.json";
private static final String SPIFFE_TRUST_BUNDLE_WRONG_SEQ = "spiffebundle_wrong_seq_type.json";
private static final String DOMAIN_ERROR_MESSAGE =
" Certificate loading for trust domain 'google.com' failed.";
@Rule public TemporaryFolder tempFolder = new TemporaryFolder();
private X509Certificate[] spiffeCert;
private X509Certificate[] multipleUriSanCert;
private X509Certificate[] serverCert0;
@Before
public void setUp() throws Exception {
spiffeCert = CertificateUtils.getX509Certificates(TlsTesting.loadCert(SPIFFE_PEM_FILE));
multipleUriSanCert = CertificateUtils.getX509Certificates(TlsTesting
.loadCert(MULTI_URI_SAN_PEM_FILE));
serverCert0 = CertificateUtils.getX509Certificates(TlsTesting.loadCert(SERVER_0_PEM_FILE));
}
private String copyFileToTmp(String fileName) throws Exception {
File tempFile = tempFolder.newFile(fileName);
try (InputStream resourceStream = SpiffeUtilTest.class.getClassLoader()
.getResourceAsStream(TEST_DIRECTORY_PREFIX + fileName);
OutputStream fileStream = new FileOutputStream(tempFile)) {
ByteStreams.copy(resourceStream, fileStream);
fileStream.flush();
}
return tempFile.toString();
}
@Test
public void extractSpiffeIdSuccessTest() throws Exception {
Optional<SpiffeId> spiffeId = SpiffeUtil.extractSpiffeId(spiffeCert);
assertTrue(spiffeId.isPresent());
assertEquals("foo.bar.com", spiffeId.get().getTrustDomain());
assertEquals("/client/workload/1", spiffeId.get().getPath());
}
@Test
public void extractSpiffeIdFailureTest() throws Exception {
Optional<SpiffeUtil.SpiffeId> spiffeId = SpiffeUtil.extractSpiffeId(serverCert0);
assertFalse(spiffeId.isPresent());
IllegalArgumentException iae = assertThrows(IllegalArgumentException.class, () -> SpiffeUtil
.extractSpiffeId(multipleUriSanCert));
assertEquals("Multiple URI SAN values found in the leaf cert.", iae.getMessage());
}
@Test
public void extractSpiffeIdFromChainTest() throws Exception {
// Check that the SPIFFE ID is extracted only from the leaf cert in the chain (spiffeCert
// contains it, but serverCert0 does not).
X509Certificate[] leafWithSpiffeChain = new X509Certificate[]{spiffeCert[0], serverCert0[0]};
assertTrue(SpiffeUtil.extractSpiffeId(leafWithSpiffeChain).isPresent());
X509Certificate[] leafWithoutSpiffeChain =
new X509Certificate[]{serverCert0[0], spiffeCert[0]};
assertFalse(SpiffeUtil.extractSpiffeId(leafWithoutSpiffeChain).isPresent());
}
@Test
public void extractSpiffeIdParameterValidityTest() {
NullPointerException npe = assertThrows(NullPointerException.class, () -> SpiffeUtil
.extractSpiffeId(null));
assertEquals("certChain", npe.getMessage());
IllegalArgumentException iae = assertThrows(IllegalArgumentException.class, () -> SpiffeUtil
.extractSpiffeId(new X509Certificate[]{}));
assertEquals("certChain can't be empty", iae.getMessage());
}
@Test
public void loadTrustBundleFromFileSuccessTest() throws Exception {
SpiffeBundle tb = SpiffeUtil.loadTrustBundleFromFile(copyFileToTmp(SPIFFE_TRUST_BUNDLE));
assertEquals(2, tb.getSequenceNumbers().size());
assertEquals(12035488L, (long) tb.getSequenceNumbers().get("example.com"));
assertEquals(-1L, (long) tb.getSequenceNumbers().get("test.example.com"));
assertEquals(3, tb.getBundleMap().size());
assertEquals(0, tb.getBundleMap().get("test.google.com.au").size());
assertEquals(1, tb.getBundleMap().get("example.com").size());
assertEquals(2, tb.getBundleMap().get("test.example.com").size());
Optional<SpiffeId> spiffeId = SpiffeUtil.extractSpiffeId(tb.getBundleMap().get("example.com")
.toArray(new X509Certificate[0]));
assertTrue(spiffeId.isPresent());
assertEquals("foo.bar.com", spiffeId.get().getTrustDomain());
SpiffeBundle tb_ec = SpiffeUtil.loadTrustBundleFromFile(
copyFileToTmp(SPIFFE_TRUST_BUNDLE_WITH_EC_KTY));
assertEquals(2, tb_ec.getSequenceNumbers().size());
assertEquals(12035488L, (long) tb_ec.getSequenceNumbers().get("example.com"));
assertEquals(-1L, (long) tb_ec.getSequenceNumbers().get("test.example.com"));
assertEquals(3, tb_ec.getBundleMap().size());
assertEquals(0, tb_ec.getBundleMap().get("test.google.com.au").size());
assertEquals(1, tb_ec.getBundleMap().get("example.com").size());
assertEquals(2, tb_ec.getBundleMap().get("test.example.com").size());
Optional<SpiffeId> spiffeId_ec =
SpiffeUtil.extractSpiffeId(tb_ec.getBundleMap().get("example.com")
.toArray(new X509Certificate[0]));
assertTrue(spiffeId_ec.isPresent());
assertEquals("foo.bar.com", spiffeId_ec.get().getTrustDomain());
}
@Test
public void loadTrustBundleFromFileFailureTest() {
// Check the exception if JSON root element is different from 'trust_domains'
NullPointerException npe = assertThrows(NullPointerException.class, () -> SpiffeUtil
.loadTrustBundleFromFile(copyFileToTmp(SPIFFE_TRUST_BUNDLE_WRONG_ROOT)));
assertEquals("Mandatory trust_domains element is missing", npe.getMessage());
// Check the exception if JSON root element is different from 'trust_domains'
ClassCastException cce = assertThrows(ClassCastException.class, () -> SpiffeUtil
.loadTrustBundleFromFile(copyFileToTmp(SPIFFE_TRUST_BUNDLE_WRONG_SEQ)));
assertTrue(cce.getMessage().contains("Number expected to be long"));
// Check the exception if JSON file doesn't contain an object
IllegalArgumentException iae = assertThrows(IllegalArgumentException.class, () -> SpiffeUtil
.loadTrustBundleFromFile(copyFileToTmp(SPIFFE_TRUST_BUNDLE_MALFORMED)));
assertTrue(iae.getMessage().contains("SPIFFE Trust Bundle should be a JSON object."));
// Check the exception if JSON contains duplicates
iae = assertThrows(IllegalArgumentException.class, () -> SpiffeUtil
.loadTrustBundleFromFile(copyFileToTmp(SPIFFE_TRUST_BUNDLE_DUPLICATES)));
assertEquals("Duplicate key found: google.com", iae.getMessage());
// Check the exception if 'x5c' value cannot be parsed
iae = assertThrows(IllegalArgumentException.class, () -> SpiffeUtil
.loadTrustBundleFromFile(copyFileToTmp(SPIFFE_TRUST_BUNDLE_CORRUPTED_CERT)));
assertEquals("Certificate can't be parsed." + DOMAIN_ERROR_MESSAGE, iae.getMessage());
// Check the exception if 'kty' value differs from 'RSA'
iae = assertThrows(IllegalArgumentException.class, () -> SpiffeUtil
.loadTrustBundleFromFile(copyFileToTmp(SPIFFE_TRUST_BUNDLE_WRONG_KTY)));
assertEquals(
"'kty' parameter must be one of [RSA, EC] but 'null' found." + DOMAIN_ERROR_MESSAGE,
iae.getMessage());
// Check the exception if 'kid' has a value
iae = assertThrows(IllegalArgumentException.class, () -> SpiffeUtil
.loadTrustBundleFromFile(copyFileToTmp(SPIFFE_TRUST_BUNDLE_WRONG_KID)));
assertEquals("'kid' parameter must not be set." + DOMAIN_ERROR_MESSAGE, iae.getMessage());
// Check the exception if 'use' value differs from 'x509-svid'
iae = assertThrows(IllegalArgumentException.class, () -> SpiffeUtil
.loadTrustBundleFromFile(copyFileToTmp(SPIFFE_TRUST_BUNDLE_WRONG_USE)));
assertEquals("'use' parameter must be 'x509-svid' but 'i_am_not_x509-svid' found."
+ DOMAIN_ERROR_MESSAGE, iae.getMessage());
// Check the exception if multiple certs are provided for 'x5c'
iae = assertThrows(IllegalArgumentException.class, () -> SpiffeUtil
.loadTrustBundleFromFile(copyFileToTmp(SPIFFE_TRUST_BUNDLE_WRONG_MULTI_CERTS)));
assertEquals("Exactly 1 certificate is expected, but 2 found." + DOMAIN_ERROR_MESSAGE,
iae.getMessage());
}
@Test
public void loadTrustBundleFromFileParameterValidityTest() {
NullPointerException npe = assertThrows(NullPointerException.class, () -> SpiffeUtil
.loadTrustBundleFromFile(null));
assertEquals("trustBundleFile", npe.getMessage());
FileNotFoundException nsfe = assertThrows(FileNotFoundException.class, () -> SpiffeUtil
.loadTrustBundleFromFile("i_do_not_exist"));
assertTrue(
"Did not contain expected substring: " + nsfe.getMessage(),
nsfe.getMessage().contains("i_do_not_exist"));
}
}
}
|
CertificateApiTest
|
java
|
alibaba__nacos
|
auth/src/main/java/com/alibaba/nacos/auth/config/NacosAuthConfigHolder.java
|
{
"start": 880,
"end": 2800
}
|
class ____ {
private static final NacosAuthConfigHolder INSTANCE = new NacosAuthConfigHolder();
private final Map<String, NacosAuthConfig> nacosAuthConfigMap;
NacosAuthConfigHolder() {
this.nacosAuthConfigMap = new HashMap<>();
for (NacosAuthConfig each : NacosServiceLoader.load(NacosAuthConfig.class)) {
nacosAuthConfigMap.put(each.getAuthScope(), each);
}
}
public static NacosAuthConfigHolder getInstance() {
return INSTANCE;
}
public NacosAuthConfig getNacosAuthConfigByScope(String scope) {
return nacosAuthConfigMap.get(scope);
}
public Collection<NacosAuthConfig> getAllNacosAuthConfig() {
return nacosAuthConfigMap.values();
}
public boolean isAnyAuthEnabled() {
return nacosAuthConfigMap.values().stream().anyMatch(NacosAuthConfig::isAuthEnabled);
}
/**
* Is any auth config by input scope is enabled.
*
* @param scope the scopes to check whether enabled
* @return {@code true} if any input scope auth is enabled, {@code false} all input scope auth is disabled.
*/
public boolean isAnyAuthEnabled(String... scope) {
for (String each : scope) {
NacosAuthConfig config = nacosAuthConfigMap.get(each);
if (null != config && config.isAuthEnabled()) {
return true;
}
}
return false;
}
/**
* Get nacos auth system type from the first {@link NacosAuthConfig}.
*
* <p>
* It should be same with for all {@link NacosAuthConfig}s in one nacos server.
* </p>
* @return nacos auth system type
*/
public String getNacosAuthSystemType() {
return nacosAuthConfigMap.values().stream().findFirst().map(NacosAuthConfig::getNacosAuthSystemType)
.orElse(null);
}
}
|
NacosAuthConfigHolder
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/association_nested/FolderMapper.java
|
{
"start": 837,
"end": 937
}
|
interface ____ {
List<FolderFlatTree> findWithSubFolders(@Param("name") String name);
}
|
FolderMapper
|
java
|
quarkusio__quarkus
|
extensions/agroal/deployment/src/test/java/io/quarkus/agroal/test/ConfigActiveFalseDefaultDatasourceDynamicInjectionTest.java
|
{
"start": 525,
"end": 2233
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.overrideConfigKey("quarkus.datasource.active", "false");
@Inject
InjectableInstance<DataSource> dataSource;
@Inject
InjectableInstance<AgroalDataSource> agroalDataSource;
@Test
public void dataSource() {
doTest(dataSource);
}
@Test
public void agroalDataSource() {
doTest(agroalDataSource);
}
private void doTest(InjectableInstance<? extends DataSource> instance) {
// The bean is always available to be injected during static init
// since we don't know whether the datasource will be active at runtime.
// So the bean proxy cannot be null.
assertThat(instance.getHandle().getBean())
.isNotNull()
.returns(false, InjectableBean::isActive);
var ds = instance.get();
assertThat(ds).isNotNull();
// However, any attempt to use it at runtime will fail.
assertThatThrownBy(() -> ds.getConnection())
.isInstanceOf(InactiveBeanException.class)
.hasMessageContainingAll("Datasource '<default>' was deactivated through configuration properties.",
"To avoid this exception while keeping the bean inactive", // Message from Arc with generic hints
"To activate the datasource, set configuration property 'quarkus.datasource.active'"
+ " to 'true' and configure datasource '<default>'",
"Refer to https://quarkus.io/guides/datasource for guidance.");
}
}
|
ConfigActiveFalseDefaultDatasourceDynamicInjectionTest
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/ancestor_ref/AncestorRefTest.java
|
{
"start": 1097,
"end": 3056
}
|
class ____ {
private static SqlSessionFactory sqlSessionFactory;
@BeforeAll
static void setUp() throws Exception {
// create an SqlSessionFactory
try (Reader reader = Resources.getResourceAsReader("org/apache/ibatis/submitted/ancestor_ref/mybatis-config.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
// populate in-memory database
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/ancestor_ref/CreateDB.sql");
}
@Test
void circularAssociation() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user = mapper.getUserAssociation(1);
assertEquals("User2", user.getFriend().getName());
}
}
@Test
void circularCollection() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user = mapper.getUserCollection(2);
assertEquals("User2", user.getFriends().get(0).getName());
assertEquals("User3", user.getFriends().get(1).getName());
}
}
@Test
void ancestorRef() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
Blog blog = mapper.selectBlog(1);
assertEquals("Author1", blog.getAuthor().getName());
assertEquals("Author2", blog.getCoAuthor().getName());
// author and coauthor should have a ref to blog
assertEquals(blog, blog.getAuthor().getBlog());
assertEquals(blog, blog.getCoAuthor().getBlog());
// reputation should point to it author? or fail but do not point to a random one
assertEquals(blog.getAuthor(), blog.getAuthor().getReputation().getAuthor());
assertEquals(blog.getCoAuthor(), blog.getCoAuthor().getReputation().getAuthor());
}
}
}
|
AncestorRefTest
|
java
|
quarkusio__quarkus
|
extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/geo/GeoCommands.java
|
{
"start": 609,
"end": 14184
}
|
interface ____<K, V> extends RedisCommands {
/**
* Execute the command <a href="https://redis.io/commands/geoadd">GEOADD</a>.
* Summary: Add one geospatial item in the geospatial index represented using a sorted set
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param longitude the longitude coordinate according to WGS84.
* @param latitude the latitude coordinate according to WGS84.
* @param member the member to add.
* @return {@code true} if the geospatial item was added, {@code false} otherwise
**/
boolean geoadd(K key, double longitude, double latitude, V member);
/**
* Execute the command <a href="https://redis.io/commands/geoadd">GEOADD</a>.
* Summary: Add one geospatial item in the geospatial index represented using a sorted set
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param position the geo position
* @param member the member to add.
* @return {@code true} if the geospatial item was added, {@code false} otherwise
**/
boolean geoadd(K key, GeoPosition position, V member);
/**
* Execute the command <a href="https://redis.io/commands/geoadd">GEOADD</a>.
* Summary: Add one geospatial item in the geospatial index represented using a sorted set
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param item the item to add
* @return {@code true} if the geospatial item was added, {@code false} otherwise
**/
boolean geoadd(K key, GeoItem<V> item);
/**
* Execute the command <a href="https://redis.io/commands/geoadd">GEOADD</a>.
* Summary: Add one or more geospatial items in the geospatial index represented using a sorted set
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param items the geo-item triplets containing the longitude, latitude and name / value
* @return the number of elements added to the sorted set (excluding score updates).
**/
int geoadd(K key, GeoItem<V>... items);
/**
* Execute the command <a href="https://redis.io/commands/geoadd">GEOADD</a>.
* Summary: Add one geospatial item in the geospatial index represented using a sorted set
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param longitude the longitude coordinate according to WGS84.
* @param latitude the latitude coordinate according to WGS84.
* @param member the member to add.
* @param args additional arguments.
* @return {@code true} if the geospatial item was added, {@code false} otherwise
**/
boolean geoadd(K key, double longitude, double latitude, V member, GeoAddArgs args);
/**
* Execute the command <a href="https://redis.io/commands/geoadd">GEOADD</a>.
* Summary: Add one geospatial item in the geospatial index represented using a sorted set
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param item the item to add
* @param args additional arguments.
* @return {@code true} if the geospatial item was added, {@code false} otherwise
**/
boolean geoadd(K key, GeoItem<V> item, GeoAddArgs args);
/**
* Execute the command <a href="https://redis.io/commands/geoadd">GEOADD</a>.
* Summary: Add one or more geospatial items in the geospatial index represented using a sorted set
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param args additional arguments.
* @param items the items containing the longitude, latitude and name / value
* @return the number of elements added to the sorted set (excluding score updates). If the {@code CH} option is
* specified, the number of elements that were changed (added or updated).
**/
int geoadd(K key, GeoAddArgs args, GeoItem<V>... items);
/**
* Execute the command <a href="https://redis.io/commands/geodist">GEODIST</a>.
* Summary: Returns the distance between two members of a geospatial index
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param from from member
* @param to to member
* @param unit the unit
* @return The command returns the distance as a double in the specified unit, or {@code empty} if one or both the
* elements are missing.
**/
OptionalDouble geodist(K key, V from, V to, GeoUnit unit);
/**
* Execute the command <a href="https://redis.io/commands/geohash">GEOHASH</a>.
* Summary: Returns members of a geospatial index as standard geohash strings
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param members the members
* @return The command returns an array where each element is the Geohash corresponding to each member name passed
* as argument to the command.
**/
List<String> geohash(K key, V... members);
/**
* Execute the command <a href="https://redis.io/commands/geopos">GEOPOS</a>.
* Summary: Returns longitude and latitude of members of a geospatial index
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param members the items
* @return The command returns an array where each element is a{@link GeoPosition} representing longitude and
* latitude (x,y) of each member name passed as argument to the command. Non-existing elements are reported as
* {@code null} elements.
**/
List<GeoPosition> geopos(K key, V... members);
/**
* Execute the command <a href="https://redis.io/commands/georadius">GEORADIUS</a>.
* Summary: Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a
* point
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param longitude the longitude
* @param latitude the latitude
* @param radius the radius
* @param unit the unit
* @return the list of values.
* @deprecated See https://redis.io/commands/georadius
**/
@Deprecated
Set<V> georadius(K key, double longitude, double latitude, double radius, GeoUnit unit);
/**
* Execute the command <a href="https://redis.io/commands/georadius">GEORADIUS</a>.
* Summary: Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a
* point
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param position the position
* @param radius the radius
* @param unit the unit
* @return the list of values.
* @deprecated See https://redis.io/commands/georadius
**/
@Deprecated
Set<V> georadius(K key, GeoPosition position, double radius, GeoUnit unit);
/**
* Execute the command <a href="https://redis.io/commands/georadius">GEORADIUS</a>.
* Summary: Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a
* point
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param longitude the longitude
* @param latitude the latitude
* @param radius the radius
* @param unit the unit
* @param geoArgs the extra arguments of the {@code GEORADIUS} command
* @return the list of {@link GeoValue}. Only the field requested using {@code geoArgs} are populated in the returned
* {@link GeoValue}.
* @deprecated See https://redis.io/commands/georadius
**/
@Deprecated
List<GeoValue<V>> georadius(K key, double longitude, double latitude, double radius, GeoUnit unit, GeoRadiusArgs geoArgs);
/**
* Execute the command <a href="https://redis.io/commands/georadius">GEORADIUS</a>.
* Summary: Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a
* point
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param position the position
* @param radius the radius
* @param unit the unit
* @param geoArgs the extra arguments of the {@code GEORADIUS} command
* @return the list of {@link GeoValue}. Only the field requested using {@code geoArgs} are populated in the returned
* {@link GeoValue}.
* @deprecated See https://redis.io/commands/georadius
**/
@Deprecated
List<GeoValue<V>> georadius(K key, GeoPosition position, double radius, GeoUnit unit, GeoRadiusArgs geoArgs);
/**
* Execute the command <a href="https://redis.io/commands/georadius">GEORADIUS</a>.
* Summary: Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a
* point.
* It also stores the results in a sorted set.
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param longitude the longitude
* @param latitude the latitude
* @param radius the radius
* @param unit the unit
* @param geoArgs the extra {@code STORE} arguments of the {@code GEORADIUS} command
* @return The number of items contained in the result written at the configured key.
* @deprecated See https://redis.io/commands/georadius
**/
@Deprecated
long georadius(K key, double longitude, double latitude, double radius, GeoUnit unit, GeoRadiusStoreArgs<K> geoArgs);
/**
* Execute the command <a href="https://redis.io/commands/georadius">GEORADIUS</a>.
* Summary: Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a
* point.
* It also stores the results in a sorted set.
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param position the position
* @param radius the radius
* @param unit the unit
* @param geoArgs the extra {@code STORE} arguments of the {@code GEORADIUS} command
* @return The number of items contained in the result written at the configured key.
* @deprecated See https://redis.io/commands/georadius
**/
@Deprecated
long georadius(K key, GeoPosition position, double radius, GeoUnit unit, GeoRadiusStoreArgs<K> geoArgs);
/**
* Execute the command <a href="https://redis.io/commands/georadiusbymember">GEORADIUSBYMEMBER</a>.
* Summary: Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a
* member
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param member the member
* @param distance the max distance
* @return the set of values
* @deprecated See https://redis.io/commands/georadiusbymember
**/
@Deprecated
Set<V> georadiusbymember(K key, V member, double distance, GeoUnit unit);
/**
* Execute the command <a href="https://redis.io/commands/georadiusbymember">GEORADIUSBYMEMBER</a>.
* Summary: Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a
* member
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param member the member
* @param distance the max distance
* @param geoArgs the extra arguments of the {@code GEORADIUS} command
* @return the list of {@link GeoValue}. Only the field requested using {@code geoArgs} are populated in the
* returned {@link GeoValue values}.
* @deprecated See https://redis.io/commands/georadiusbymember
**/
@Deprecated
List<GeoValue<V>> georadiusbymember(K key, V member, double distance, GeoUnit unit, GeoRadiusArgs geoArgs);
/**
* Execute the command <a href="https://redis.io/commands/georadiusbymember">GEORADIUSBYMEMBER</a>.
* Summary: Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a
* member.
* It also stores the results in a sorted set.
* Group: geo
* Requires Redis 3.2.0
*
* @param key the key
* @param member the member
* @param distance the max distance
* @param geoArgs the extra arguments of the {@code GEORADIUS} command
* @return The number of items contained in the result written at the configured key.
* @deprecated See https://redis.io/commands/georadiusbymember
**/
@Deprecated
long georadiusbymember(K key, V member, double distance, GeoUnit unit, GeoRadiusStoreArgs<K> geoArgs);
/**
* Execute the command <a href="https://redis.io/commands/geosearch">GEOSEARCH</a>.
* Summary: Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle.
* Group: geo
* Requires Redis 6.2.0
*
* @return the list of {@code GeoValue<V>>}. The populated data depends on the parameters configured in {@code args}.
**/
List<GeoValue<V>> geosearch(K key, GeoSearchArgs<V> args);
/**
* Execute the command <a href="https://redis.io/commands/geosearchstore">GEOSEARCHSTORE</a>.
* Summary: Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle,
* and store the result in another key.
* Group: geo
* Requires Redis 6.2.0
*
* @return the number of elements in the resulting set.
**/
long geosearchstore(K destination, K key, GeoSearchStoreArgs<V> args, boolean storeDist);
}
|
GeoCommands
|
java
|
lettuce-io__lettuce-core
|
src/main/templates/io/lettuce/core/api/RedisStringCommands.java
|
{
"start": 1040,
"end": 19150
}
|
interface ____<K, V> {
/**
* Append a value to a key.
*
* @param key the key.
* @param value the value.
* @return Long integer-reply the length of the string after the append operation.
*/
Long append(K key, V value);
/**
* Count set bits in a string.
*
* @param key the key.
* @return Long integer-reply The number of bits set to 1.
*/
Long bitcount(K key);
/**
* Count set bits in a string.
*
* @param key the key.
* @param start the start.
* @param end the end.
* @return Long integer-reply The number of bits set to 1.
*/
Long bitcount(K key, long start, long end);
/**
* Execute {@code BITFIELD} with its subcommands.
*
* @param key the key.
* @param bitFieldArgs the args containing subcommands, must not be {@code null}.
* @return Long bulk-reply the results from the bitfield commands.
*/
List<Long> bitfield(K key, BitFieldArgs bitFieldArgs);
/**
* Find first bit set or clear in a string.
*
* @param key the key.
* @param state the state.
* @return Long integer-reply The command returns the position of the first bit set to 1 or 0 according to the request.
*
* If we look for set bits (the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is
* returned.
*
* If we look for clear bits (the bit argument is 0) and the string only contains bit set to 1, the function returns
* the first bit not part of the string on the right. So if the string is three bytes set to the value 0xff the
* command {@code BITPOS key 0} will return 24, since up to bit 23 all the bits are 1.
*
* Basically the function consider the right of the string as padded with zeros if you look for clear bits and
* specify no range or the <em>start</em> argument <strong>only</strong>.
*/
Long bitpos(K key, boolean state);
/**
* Find first bit set or clear in a string.
*
* @param key the key.
* @param state the bit type: long.
* @param start the start type: long.
* @return Long integer-reply The command returns the position of the first bit set to 1 or 0 according to the request.
*
* If we look for set bits (the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is
* returned.
*
* If we look for clear bits (the bit argument is 0) and the string only contains bit set to 1, the function returns
* the first bit not part of the string on the right. So if the string is three bytes set to the value 0xff the
* command {@code BITPOS key 0} will return 24, since up to bit 23 all the bits are 1.
*
* Basically the function consider the right of the string as padded with zeros if you look for clear bits and
* specify no range or the <em>start</em> argument <strong>only</strong>.
* @since 5.0.1
*/
Long bitpos(K key, boolean state, long start);
/**
* Find first bit set or clear in a string.
*
* @param key the key.
* @param state the bit type: long.
* @param start the start type: long.
* @param end the end type: long.
* @return Long integer-reply The command returns the position of the first bit set to 1 or 0 according to the request.
*
* If we look for set bits (the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is
* returned.
*
* If we look for clear bits (the bit argument is 0) and the string only contains bit set to 1, the function returns
* the first bit not part of the string on the right. So if the string is three bytes set to the value 0xff the
* command {@code BITPOS key 0} will return 24, since up to bit 23 all the bits are 1.
*
* Basically the function consider the right of the string as padded with zeros if you look for clear bits and
* specify no range or the <em>start</em> argument <strong>only</strong>.
*
* However this behavior changes if you are looking for clear bits and specify a range with both
* <strong>start</strong> and <strong>end</strong>. If no clear bit is found in the specified range, the function
* returns -1 as the user specified a clear range and there are no 0 bits in that range.
*/
Long bitpos(K key, boolean state, long start, long end);
/**
* Perform bitwise AND between strings.
*
* @param destination result key of the operation.
* @param keys operation input key names.
* @return Long integer-reply The size of the string stored in the destination key, that is equal to the size of the longest
* input string.
*/
Long bitopAnd(K destination, K... keys);
/**
* Perform bitwise NOT between strings.
*
* @param destination result key of the operation.
* @param source operation input key names.
* @return Long integer-reply The size of the string stored in the destination key, that is equal to the size of the longest
* input string.
*/
Long bitopNot(K destination, K source);
/**
* Perform bitwise OR between strings.
*
* @param destination result key of the operation.
* @param keys operation input key names.
* @return Long integer-reply The size of the string stored in the destination key, that is equal to the size of the longest
* input string.
*/
Long bitopOr(K destination, K... keys);
/**
* Perform bitwise XOR between strings.
*
* @param destination result key of the operation.
* @param keys operation input key names.
* @return Long integer-reply The size of the string stored in the destination key, that is equal to the size of the longest
* input string.
*/
Long bitopXor(K destination, K... keys);
/**
* Perform bitwise DIFF between strings. Members of the source key that are not members of any of the other keys. Equivalent
* to: X ∧ ¬(Y1 ∨ Y2 ∨ …)
*
* @param destination result key of the operation.
* @param sourceKey the source key (X) for comparison.
* @param keys one or more additional keys (Y1, Y2, ...). At least one key is required.
* @return Long integer-reply The size of the string stored in the destination key, that is equal to the size of the longest
* input string.
*/
Long bitopDiff(K destination, K sourceKey, K... keys);
/**
* Perform bitwise DIFF1 between strings. Members of one or more of the keys that are not members of the source key.
* Equivalent to: ¬X ∧ (Y1 ∨ Y2 ∨ …)
*
* @param destination result key of the operation.
* @param sourceKey the source key (X) for comparison.
* @param keys one or more additional keys (Y1, Y2, ...). At least one key is required.
* @return Long integer-reply The size of the string stored in the destination key, that is equal to the size of the longest
* input string.
*/
Long bitopDiff1(K destination, K sourceKey, K... keys);
/**
* Perform bitwise ANDOR between strings. Members of the source key that are also members of one or more of the other keys.
* Equivalent to: X ∧ (Y1 ∨ Y2 ∨ …)
*
* @param destination result key of the operation.
* @param sourceKey the source key (X) for comparison.
* @param keys one or more additional keys (Y1, Y2, ...). At least one key is required.
* @return Long integer-reply The size of the string stored in the destination key, that is equal to the size of the longest
* input string.
*/
Long bitopAndor(K destination, K sourceKey, K... keys);
/**
* Perform bitwise ONE between strings. Members of exactly one of the given keys. For two keys this is equivalent to XOR.
* For more than two keys, returns members that appear in exactly one key.
*
* @param destination result key of the operation.
* @param keys operation input key names.
* @return Long integer-reply The size of the string stored in the destination key, that is equal to the size of the longest
* input string.
*/
Long bitopOne(K destination, K... keys);
/**
* Decrement the integer value of a key by one.
*
* @param key the key.
* @return Long integer-reply the value of {@code key} after the decrement.
*/
Long decr(K key);
/**
* Decrement the integer value of a key by the given number.
*
* @param key the key.
* @param amount the decrement type: long.
* @return Long integer-reply the value of {@code key} after the decrement.
*/
Long decrby(K key, long amount);
/**
* Get the value of a key.
*
* @param key the key.
* @return V bulk-string-reply the value of {@code key}, or {@code null} when {@code key} does not exist.
*/
V get(K key);
/**
* Returns the bit value at offset in the string value stored at key.
*
* @param key the key.
* @param offset the offset type: long.
* @return Long integer-reply the bit value stored at <em>offset</em>.
*/
Long getbit(K key, long offset);
/**
* Get the value of key and delete the key.
*
* @param key the key.
* @return V bulk-string-reply the value of {@code key}, or {@code null} when {@code key} does not exist.
* @since 6.1
*/
V getdel(K key);
/**
* Get the value of key and optionally set its expiration.
*
* @param key the key.
* @param args the arguments for {@code GETEX}.
* @return V bulk-string-reply the value of {@code key}, or {@code null} when {@code key} does not exist.
* @since 6.1
*/
V getex(K key, GetExArgs args);
/**
* Get a substring of the string stored at a key.
*
* @param key the key.
* @param start the start type: long.
* @param end the end type: long.
* @return V bulk-string-reply.
*/
V getrange(K key, long start, long end);
/**
* Set the string value of a key and return its old value.
*
* @param key the key.
* @param value the value.
* @return V bulk-string-reply the old value stored at {@code key}, or {@code null} when {@code key} did not exist.
*/
V getset(K key, V value);
/**
* Increment the integer value of a key by one.
*
* @param key the key.
* @return Long integer-reply the value of {@code key} after the increment.
*/
Long incr(K key);
/**
* Increment the integer value of a key by the given amount.
*
* @param key the key.
* @param amount the increment type: long.
* @return Long integer-reply the value of {@code key} after the increment.
*/
Long incrby(K key, long amount);
/**
* Increment the float value of a key by the given amount.
*
* @param key the key.
* @param amount the increment type: double.
* @return Double bulk-string-reply the value of {@code key} after the increment.
*/
Double incrbyfloat(K key, double amount);
/**
* Get the values of all the given keys.
*
* @param keys the key.
* @return List<V> array-reply list of values at the specified keys.
*/
List<KeyValue<K, V>> mget(K... keys);
/**
* Stream over the values of all the given keys.
*
* @param channel the channel.
* @param keys the keys.
* @return Long array-reply list of values at the specified keys.
*/
Long mget(KeyValueStreamingChannel<K, V> channel, K... keys);
/**
* Set multiple keys to multiple values.
*
* @param map the map containing key-value pairs.
* @return String simple-string-reply always {@code OK} since {@code MSET} can't fail.
*/
String mset(Map<K, V> map);
/**
* Set multiple keys to multiple values, only if none of the keys exist.
*
* @param map the map containing key-value pairs.
* @return Boolean integer-reply specifically:
*
* {@code 1} if the all the keys were set. {@code 0} if no key was set (at least one key already existed).
*/
Boolean msetnx(Map<K, V> map);
/**
* Set multiple keys to multiple values with optional conditions and expiration. Emits: numkeys, pairs, then [NX|XX] and one
* of [EX|PX|EXAT|PXAT|KEEPTTL].
*
* @param map the map of keys and values.
* @param args the {@link MSetExArgs} specifying NX/XX and expiration.
* @return Boolean from integer-reply: {@code 1} if all keys were set, {@code 0} otherwise.
* @since 7.1
*/
Boolean msetex(Map<K, V> map, MSetExArgs args);
/**
* Set the string value of a key.
*
* @param key the key.
* @param value the value.
* @return String simple-string-reply {@code OK} if {@code SET} was executed correctly.
*/
String set(K key, V value);
/**
* Set the string value of a key.
*
* @param key the key.
* @param value the value.
* @param setArgs the setArgs.
* @return String simple-string-reply {@code OK} if {@code SET} was executed correctly.
*/
String set(K key, V value, SetArgs setArgs);
/**
* Set the string value of a key and return its old value.
*
* @param key the key.
* @param value the value.
* @return V bulk-string-reply the old value stored at {@code key}, or {@code null} when {@code key} did not exist.
* @since 6.1
*/
V setGet(K key, V value);
/**
* Set the string value of a key and return its old value.
*
* @param key the key.
* @param value the value.
* @param setArgs the command arguments.
* @return V bulk-string-reply the old value stored at {@code key}, or {@code null} when {@code key} did not exist.
* @since 6.1
*/
V setGet(K key, V value, SetArgs setArgs);
/**
* Sets or clears the bit at offset in the string value stored at key.
*
* @param key the key.
* @param offset the offset type: long.
* @param value the value type: string.
* @return Long integer-reply the original bit value stored at <em>offset</em>.
*/
Long setbit(K key, long offset, int value);
/**
* Set the value and expiration of a key.
*
* @param key the key.
* @param seconds the seconds type: long.
* @param value the value.
* @return String simple-string-reply.
*/
String setex(K key, long seconds, V value);
/**
* Set the value and expiration in milliseconds of a key.
*
* @param key the key.
* @param milliseconds the milliseconds type: long.
* @param value the value.
* @return String simple-string-reply.
*/
String psetex(K key, long milliseconds, V value);
/**
* Set the value of a key, only if the key does not exist.
*
* @param key the key.
* @param value the value.
* @return Boolean integer-reply specifically:
*
* {@code 1} if the key was set {@code 0} if the key was not set.
*/
Boolean setnx(K key, V value);
/**
* Overwrite part of a string at key starting at the specified offset.
*
* @param key the key.
* @param offset the offset type: long.
* @param value the value.
* @return Long integer-reply the length of the string after it was modified by the command.
*/
Long setrange(K key, long offset, V value);
/**
* The STRALGO command implements complex algorithms that operate on strings. This method uses the LCS algorithm (longest
* common substring).
* <p>
* Command is no longer available in Redis server versions 7.0.x and later.
*
* <ul>
* <li>Without modifiers the string representing the longest common substring is returned.</li>
* <li>When {@link StrAlgoArgs#justLen() LEN} is given the command returns the length of the longest common substring.</li>
* <li>When {@link StrAlgoArgs#withIdx() IDX} is given the command returns an array with the LCS length and all the ranges
* in both the strings, start and end offset for each string, where there are matches. When
* {@link StrAlgoArgs#withMatchLen() WITHMATCHLEN} is given each array representing a match will also have the length of the
* match.</li>
* </ul>
*
* @param strAlgoArgs command arguments.
* @return StringMatchResult.
* @deprecated since 6.6 in favor of {@link #lcs(LcsArgs)}.
* @since 6.0
*/
@Deprecated
StringMatchResult stralgoLcs(StrAlgoArgs strAlgoArgs);
/**
* The LCS command implements the longest common subsequence algorithm.
*
* <ul>
* <li>Without modifiers, the string representing the longest common substring is returned.</li>
* <li>When {@link LcsArgs#justLen() LEN} is given the command returns the length of the longest common substring.</li>
* <li>When {@link LcsArgs#withIdx() IDX} is given the command returns an array with the LCS length and all the ranges in
* both the strings, start and end offset for each string, where there are matches. When {@link LcsArgs#withMatchLen()
* WITHMATCHLEN} is given each array representing a match will also have the length of the match.</li>
* </ul>
*
* @param lcsArgs command arguments supplied by the {@link LcsArgs}.
* @return StringMatchResult
* @see <a href="https://redis.io/commands/lcs">LCS command refference</a>
* @since 6.6
*/
StringMatchResult lcs(LcsArgs lcsArgs);
/**
* Get the length of the value stored in a key.
*
* @param key the key.
* @return Long integer-reply the length of the string at {@code key}, or {@code 0} when {@code key} does not exist.
*/
Long strlen(K key);
}
|
RedisStringCommands
|
java
|
apache__dubbo
|
dubbo-plugin/dubbo-filter-cache/src/test/java/org/apache/dubbo/cache/support/expiring/ExpiringCacheFactoryTest.java
|
{
"start": 1422,
"end": 4062
}
|
class ____ extends AbstractCacheFactoryTest {
private static final String EXPIRING_CACHE_URL =
"test://test:12/test?cache=expiring&cache.seconds=1&cache.interval=1";
@Test
void testExpiringCacheFactory() throws Exception {
Cache cache = super.constructCache();
assertThat(cache instanceof ExpiringCache, is(true));
}
@Test
void testExpiringCacheGetExpired() throws Exception {
URL url = URL.valueOf("test://test:12/test?cache=expiring&cache.seconds=1&cache.interval=1");
AbstractCacheFactory cacheFactory = getCacheFactory();
Invocation invocation = new RpcInvocation();
Cache cache = cacheFactory.getCache(url, invocation);
cache.put("testKey", "testValue");
Thread.sleep(2100);
assertNull(cache.get("testKey"));
}
@Test
void testExpiringCacheUnExpired() throws Exception {
URL url = URL.valueOf("test://test:12/test?cache=expiring&cache.seconds=0&cache.interval=1");
AbstractCacheFactory cacheFactory = getCacheFactory();
Invocation invocation = new RpcInvocation();
Cache cache = cacheFactory.getCache(url, invocation);
cache.put("testKey", "testValue");
Thread.sleep(1100);
assertNotNull(cache.get("testKey"));
}
@Test
void testExpiringCache() throws Exception {
Cache cache = constructCache();
assertThat(cache instanceof ExpiringCache, is(true));
// 500ms
TimeUnit.MILLISECONDS.sleep(500);
cache.put("testKey", "testValue");
// 800ms
TimeUnit.MILLISECONDS.sleep(300);
assertNotNull(cache.get("testKey"));
// 1300ms
TimeUnit.MILLISECONDS.sleep(500);
assertNotNull(cache.get("testKey"));
}
@Test
void testExpiringCacheExpired() throws Exception {
Cache cache = constructCache();
assertThat(cache instanceof ExpiringCache, is(true));
// 500ms
TimeUnit.MILLISECONDS.sleep(500);
cache.put("testKey", "testValue");
// 1000ms ExpireThread clear all expire cache
TimeUnit.MILLISECONDS.sleep(500);
// 1700ms get should be null
TimeUnit.MILLISECONDS.sleep(700);
assertNull(cache.get("testKey"));
}
@Override
protected Cache constructCache() {
URL url = URL.valueOf(EXPIRING_CACHE_URL);
Invocation invocation = new RpcInvocation();
return getCacheFactory().getCache(url, invocation);
}
@Override
protected AbstractCacheFactory getCacheFactory() {
return new ExpiringCacheFactory();
}
}
|
ExpiringCacheFactoryTest
|
java
|
micronaut-projects__micronaut-core
|
http/src/main/java/io/micronaut/http/body/ContextlessMessageBodyHandlerRegistry.java
|
{
"start": 1245,
"end": 6388
}
|
class ____ extends AbstractMessageBodyHandlerRegistry {
private final List<ReaderEntry> readerEntries = new ArrayList<>();
private final List<WriterEntry> writerEntries = new ArrayList<>();
private final List<TypedMessageBodyReader<?>> typedMessageBodyReaders;
private final List<TypedMessageBodyWriter<?>> typedMessageBodyWriters;
/**
* @param applicationConfiguration The configuration
* @param byteBufferFactory The buffer factory
* @param otherRawHandlers Raw handlers to add on top of the default ones
*/
public ContextlessMessageBodyHandlerRegistry(ApplicationConfiguration applicationConfiguration,
ByteBufferFactory<?, ?> byteBufferFactory,
TypedMessageBodyHandler<?>... otherRawHandlers) {
this.typedMessageBodyReaders = new ArrayList<>(3 + otherRawHandlers.length);
this.typedMessageBodyReaders.add(new StringBodyReader(applicationConfiguration));
this.typedMessageBodyReaders.add(new ByteArrayBodyHandler());
this.typedMessageBodyReaders.add(new ByteBufferBodyHandler(byteBufferFactory));
this.typedMessageBodyWriters = new ArrayList<>(3 + otherRawHandlers.length);
this.typedMessageBodyWriters.add(new CharSequenceBodyWriter(applicationConfiguration));
this.typedMessageBodyWriters.add(new ByteArrayBodyHandler());
this.typedMessageBodyWriters.add(new ByteBodyWriter());
this.typedMessageBodyWriters.add(new ByteBufferBodyHandler(byteBufferFactory));
for (TypedMessageBodyHandler<?> otherRawHandler : otherRawHandlers) {
this.typedMessageBodyReaders.add(otherRawHandler);
this.typedMessageBodyWriters.add(otherRawHandler);
}
add(MediaType.TEXT_PLAIN_TYPE, new TextPlainObjectBodyReader<>(applicationConfiguration, ConversionService.SHARED));
add(MediaType.TEXT_PLAIN_TYPE, new TextPlainObjectBodyWriter());
}
/**
* Add a {@link MessageBodyHandler} for the given media type.
*
* @param mediaType The media type the handler applies to
* @param handler The handler
*/
public void add(@NonNull MediaType mediaType, @NonNull MessageBodyHandler<?> handler) {
writerEntries.add(new WriterEntry(handler, mediaType));
readerEntries.add(new ReaderEntry(handler, mediaType));
}
/**
* Add a {@link MessageBodyWriter} for the given media type.
*
* @param mediaType The media type the handler applies to
* @param handler The handler
*/
public void add(@NonNull MediaType mediaType, @NonNull MessageBodyWriter<?> handler) {
writerEntries.add(new WriterEntry(handler, mediaType));
}
/**
* Add a {@link MessageBodyReader} for the given media type.
*
* @param mediaType The media type the handler applies to
* @param handler The handler
*/
public void add(@NonNull MediaType mediaType, @NonNull MessageBodyReader<?> handler) {
readerEntries.add(new ReaderEntry(handler, mediaType));
}
@Override
protected <T> MessageBodyReader<T> findReaderImpl(Argument<T> type, List<MediaType> mediaTypes) {
for (TypedMessageBodyReader<?> messageBodyReader : typedMessageBodyReaders) {
TypedMessageBodyReader<T> reader = (TypedMessageBodyReader<T>) messageBodyReader;
if (type.getType().isAssignableFrom(reader.getType().getType())
&& (mediaTypes.isEmpty() && reader.isReadable(type, null))
|| mediaTypes.stream().anyMatch(mt -> reader.isReadable(type, mt))) {
return reader;
}
}
for (MediaType mediaType : mediaTypes) {
for (ReaderEntry entry : readerEntries) {
if (mediaType.matches(entry.mediaType)) {
return (MessageBodyReader<T>) entry.handler;
}
}
}
return null;
}
@Override
protected <T> MessageBodyWriter<T> findWriterImpl(Argument<T> type, List<MediaType> mediaTypes) {
for (TypedMessageBodyWriter<?> messageBodyReader : typedMessageBodyWriters) {
TypedMessageBodyWriter<T> writer = (TypedMessageBodyWriter<T>) messageBodyReader;
if (messageBodyReader.getType().isAssignableFrom(type.getType())
&& (mediaTypes.isEmpty() && writer.isWriteable(type, null)
|| mediaTypes.stream().anyMatch(mt -> writer.isWriteable(type, mt)))) {
return (MessageBodyWriter<T>) messageBodyReader;
}
}
for (MediaType mediaType : mediaTypes) {
for (WriterEntry entry : writerEntries) {
if (mediaType.matches(entry.mediaType)) {
return (MessageBodyWriter<T>) entry.handler;
}
}
}
return null;
}
private record ReaderEntry(MessageBodyReader<?> handler, MediaType mediaType) {
}
private record WriterEntry(MessageBodyWriter<?> handler, MediaType mediaType) {
}
}
|
ContextlessMessageBodyHandlerRegistry
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/LogsIndexModeTests.java
|
{
"start": 855,
"end": 9056
}
|
class ____ extends ESTestCase {
public void testLogsIndexModeSetting() {
assertThat(IndexSettings.MODE.get(buildSettings()), equalTo(IndexMode.LOGSDB));
}
public void testDefaultHostNameSortField() {
final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", buildSettings());
assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB));
boolean sortOnHostName = randomBoolean();
final IndexSettings settings = new IndexSettings(
metadata,
Settings.builder().put(IndexSettings.LOGSDB_SORT_ON_HOST_NAME.getKey(), sortOnHostName).build()
);
assertThat(settings.getIndexSortConfig().hasPrimarySortOnField("host.name"), equalTo(sortOnHostName));
}
public void testDefaultHostNameSortFieldAndMapping() {
final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", buildSettings());
assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB));
final IndexSettings settings = new IndexSettings(
metadata,
Settings.builder()
.put(IndexSettings.LOGSDB_SORT_ON_HOST_NAME.getKey(), true)
.put(IndexSettings.LOGSDB_ADD_HOST_NAME_FIELD.getKey(), true)
.build()
);
assertThat(settings.getIndexSortConfig().hasPrimarySortOnField("host.name"), equalTo(true));
assertThat(IndexMode.LOGSDB.getDefaultMapping(settings).string(), containsString("host.name"));
}
public void testDefaultHostNameSortFieldBwc() {
final IndexMetadata metadata = IndexMetadata.builder("test")
.settings(
indexSettings(IndexVersionUtils.getPreviousVersion(IndexVersions.LOGSB_OPTIONAL_SORTING_ON_HOST_NAME), 1, 1).put(
buildSettings()
)
)
.build();
assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB));
final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY);
assertThat(settings.getIndexSortConfig().hasPrimarySortOnField("host.name"), equalTo(true));
}
public void testDefaultHostNameSortWithOrder() {
final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", buildSettings());
assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB));
var exception = expectThrows(
IllegalArgumentException.class,
() -> new IndexSettings(
metadata,
Settings.builder()
.put(IndexSettings.LOGSDB_SORT_ON_HOST_NAME.getKey(), randomBoolean())
.put(IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), "desc")
.build()
)
);
assertEquals("setting [index.sort.order] requires [index.sort.field] to be configured", exception.getMessage());
}
public void testDefaultHostNameSortWithMode() {
final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", buildSettings());
assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB));
var exception = expectThrows(
IllegalArgumentException.class,
() -> new IndexSettings(
metadata,
Settings.builder()
.put(IndexSettings.LOGSDB_SORT_ON_HOST_NAME.getKey(), randomBoolean())
.put(IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(), "MAX")
.build()
)
);
assertEquals("setting [index.sort.mode] requires [index.sort.field] to be configured", exception.getMessage());
}
public void testDefaultHostNameSortWithMissing() {
final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", buildSettings());
assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB));
var exception = expectThrows(
IllegalArgumentException.class,
() -> new IndexSettings(
metadata,
Settings.builder()
.put(IndexSettings.LOGSDB_SORT_ON_HOST_NAME.getKey(), randomBoolean())
.put(IndexSortConfig.INDEX_SORT_MISSING_SETTING.getKey(), "_first")
.build()
)
);
assertEquals("setting [index.sort.missing] requires [index.sort.field] to be configured", exception.getMessage());
}
public void testCustomSortField() {
final Settings sortSettings = Settings.builder()
.put(buildSettings())
.put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "agent_id")
.build();
final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", sortSettings);
assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB));
final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY);
assertThat(settings.getMode(), equalTo(IndexMode.LOGSDB));
assertThat(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey()), equalTo("agent_id"));
assertThat(settings.getIndexSortConfig().hasPrimarySortOnField("host.name"), equalTo(false));
assertThat(IndexMode.LOGSDB.getDefaultMapping(settings).string(), not(containsString("host")));
}
public void testSortMode() {
final Settings sortSettings = Settings.builder()
.put(buildSettings())
.put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "agent_id")
.put(IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(), "max")
.build();
final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", sortSettings);
assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB));
final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY);
assertThat(settings.getMode(), equalTo(IndexMode.LOGSDB));
assertThat("agent_id", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey())));
assertThat("max", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey())));
}
public void testSortOrder() {
final Settings sortSettings = Settings.builder()
.put(buildSettings())
.put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "agent_id")
.put(IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), "desc")
.build();
final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", sortSettings);
assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB));
final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY);
assertThat(settings.getMode(), equalTo(IndexMode.LOGSDB));
assertThat("agent_id", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey())));
assertThat("desc", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey())));
}
public void testSortMissing() {
final Settings sortSettings = Settings.builder()
.put(buildSettings())
.put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "agent_id")
.put(IndexSortConfig.INDEX_SORT_MISSING_SETTING.getKey(), "_last")
.build();
final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", sortSettings);
assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB));
final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY);
assertThat(settings.getMode(), equalTo(IndexMode.LOGSDB));
assertThat("agent_id", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey())));
assertThat("_last", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_MISSING_SETTING.getKey())));
}
private Settings buildSettings() {
return Settings.builder().put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.getName()).build();
}
private String getIndexSetting(final IndexSettings settings, final String name) {
return settings.getIndexMetadata().getSettings().get(name);
}
}
|
LogsIndexModeTests
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/LifeCycleMonitor.java
|
{
"start": 1177,
"end": 1961
}
|
enum ____ {
OPEN,
INITIALIZE_STATE,
PROCESS_ELEMENT,
PREPARE_SNAPSHOT_PRE_BARRIER,
SNAPSHOT_STATE,
NOTIFY_CHECKPOINT_COMPLETE,
NOTIFY_CHECKPOINT_ABORT,
FINISH,
CLOSE
}
private final Map<LifeCyclePhase, Integer> callTimes = new HashMap<>();
public void incrementCallTime(LifeCyclePhase phase) {
callTimes.compute(phase, (k, v) -> v == null ? 1 : v + 1);
}
public void assertCallTimes(int expectedTimes, LifeCyclePhase... phases) {
for (LifeCyclePhase phase : phases) {
assertThat(callTimes.getOrDefault(phase, 0))
.as("The phase %s has unexpected call times", phase)
.isEqualTo(expectedTimes);
}
}
}
|
LifeCyclePhase
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/dynamic/output/OutputType.java
|
{
"start": 862,
"end": 1713
}
|
class ____ {
private final Class<? extends CommandOutput> commandOutputClass;
private final TypeInformation<?> typeInformation;
private final boolean streaming;
/**
* Create a new {@link OutputType} given {@code primaryType}, the {@code commandOutputClass}, {@link TypeInformation} and
* whether the {@link OutputType} is for a {@link io.lettuce.core.output.StreamingOutput}.
*
* @param commandOutputClass must not be {@code null}.
* @param typeInformation must not be {@code null}.
* @param streaming {@code true} if the type descriptor concerns the {@link io.lettuce.core.output.StreamingOutput}
*/
OutputType(Class<? extends CommandOutput> commandOutputClass, TypeInformation<?> typeInformation, boolean streaming) {
LettuceAssert.notNull(commandOutputClass, "CommandOutput
|
OutputType
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/state/storage/ExternalizedSnapshotLocation.java
|
{
"start": 2305,
"end": 6514
}
|
class ____ {
@Nullable Path baseCheckpointPath;
@Nullable Path baseSavepointPath;
ReadableConfig config = new Configuration();
Builder withCheckpointPath(@Nullable Path baseCheckpointPath) {
this.baseCheckpointPath = baseCheckpointPath;
return this;
}
Builder withSavepointPath(@Nullable Path baseSavepointPath) {
this.baseSavepointPath = baseSavepointPath;
return this;
}
Builder withConfiguration(ReadableConfig config) {
this.config = config;
return this;
}
ExternalizedSnapshotLocation build() {
ExternalizedSnapshotLocation location = new ExternalizedSnapshotLocation();
location.baseCheckpointPath =
validatePath(
parameterOrConfigured(
baseCheckpointPath,
config,
CheckpointingOptions.CHECKPOINTS_DIRECTORY));
location.baseSavepointPath =
validatePath(
parameterOrConfigured(
baseSavepointPath,
config,
CheckpointingOptions.SAVEPOINT_DIRECTORY));
return location;
}
}
/**
* Checks the validity of the path's scheme and path.
*
* @param path The path to check.
* @return The URI as a Path.
* @throws IllegalArgumentException Thrown, if the URI misses scheme or path.
*/
private static Path validatePath(Path path) {
if (path == null) {
return null;
}
Optional.ofNullable(path.toUri().getScheme())
.orElseThrow(
() ->
new IllegalArgumentException(
"The scheme (hdfs://, file://, etc) is null. "
+ "Please specify the file system scheme explicitly in the URI."));
Optional.ofNullable(path.getPath())
.orElseThrow(
() ->
new IllegalArgumentException(
"The path to store the checkpoint data in is null. "
+ "Please specify a directory path for the checkpoint data."));
Optional.ofNullable(path.getParent())
.orElseThrow(
() ->
new IllegalArgumentException(
"Cannot use the root directory for checkpoints."));
return path;
}
@Nullable
private static Path parameterOrConfigured(
@Nullable Path path, ReadableConfig config, ConfigOption<String> option) {
return Optional.ofNullable(path)
.orElseGet(
() -> {
try {
return config.getOptional(option).map(Path::new).orElse(null);
} catch (IllegalArgumentException e) {
throw new IllegalConfigurationException(
"Cannot parse value for "
+ option.key()
+ " . Not a valid path.",
e);
}
});
}
@Override
public int hashCode() {
return Objects.hash(baseCheckpointPath, baseSavepointPath);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
ExternalizedSnapshotLocation that = (ExternalizedSnapshotLocation) other;
return Objects.equals(baseCheckpointPath, that.baseCheckpointPath)
&& Objects.equals(baseSavepointPath, that.baseSavepointPath);
}
}
|
Builder
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/cache/ManyToOneTestReusedColumn.java
|
{
"start": 2060,
"end": 3812
}
|
class ____ {
@BeforeEach
public void setUp(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Fridge fridge = new Fridge();
FruitContainer fruitContainer = new FruitContainer();
CheeseContainer cheeseContainer = new CheeseContainer();
Fruit fruit = new Fruit();
Cheese cheese = new Cheese();
Fruit otherFruit = new Fruit();
Cheese otherCheese = new Cheese();
fruit.bestPairedWith = otherFruit;
cheese.bestPairedWith = otherCheese;
fruitContainer.fruit = fruit;
cheeseContainer.cheese = cheese;
fridge.addToFridge( fruitContainer );
fridge.addToFridge( cheeseContainer );
session.persist( fridge );
session.persist( otherFruit );
session.persist( otherCheese );
session.persist( fruit );
session.persist( cheese );
session.persist( fruitContainer );
session.persist( cheeseContainer );
}
);
}
@Test
public void testSelect(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Fridge fridge = session.getReference( Fridge.class, 1 );
for ( Container container : fridge.getContainers() ) {
if ( container instanceof FruitContainer ) {
Fruit f = ( (FruitContainer) container ).getFruit();
assertThat( f.toString() ).isNotNull();
assertThat( f.getBestPairedWith() ).isNotNull();
}
else if ( container instanceof CheeseContainer ) {
Cheese c = ( (CheeseContainer) container ).getCheese();
assertThat( c.toString() ).isNotNull();
assertThat( c.getBestPairedWith() ).isNotNull();
}
}
}
);
}
@Entity(name = "Fridge")
@Cacheable
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE)
public static
|
ManyToOneTestReusedColumn
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_165_json.java
|
{
"start": 315,
"end": 1232
}
|
class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "select concat(l_shipdate,'10') from lineitem join orders on l_orderkey = o_orderkey where l_shipdate between '1997-01-27' and '1997-02-20' and json_extract(l_comment,'$.id') = json '1997-01-2810' limit 3";
//
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.MYSQL);
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
assertEquals(1, statementList.size());
assertEquals("SELECT concat(l_shipdate, '10')\n" +
"FROM lineitem\n" +
"\tJOIN orders ON l_orderkey = o_orderkey\n" +
"WHERE l_shipdate BETWEEN '1997-01-27' AND '1997-02-20'\n" +
"\tAND json_extract(l_comment, '$.id') = JSON '1997-01-2810'\n" +
"LIMIT 3", stmt.toString());
}
}
|
MySqlSelectTest_165_json
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/serialization/ProxySerializationNoSessionFactoryTest.java
|
{
"start": 3559,
"end": 3930
}
|
class ____ {
@Id
private Long id;
@ManyToOne(fetch = FetchType.LAZY)
@JoinColumn
private SimpleEntity parent;
public Long getId() {
return id;
}
public void setId(final Long id) {
this.id = id;
}
public SimpleEntity getParent() {
return parent;
}
public void setParent(SimpleEntity parent) {
this.parent = parent;
}
}
}
|
ChildEntity
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/entitygraph/EntityGraphNativeQueryTest.java
|
{
"start": 4118,
"end": 4292
}
|
class ____ {
@Id
@GeneratedValue
public Integer id;
@OneToMany(mappedBy = "bar")
public Set<Foo> foos = new HashSet<>();
}
@Entity(name = "Baz")
public static
|
Bar
|
java
|
apache__maven
|
its/core-it-suite/src/test/resources/mng-4331/maven-it-plugin-dependency-collection/src/main/java/org/apache/maven/plugin/coreit/AggregateTestMojo.java
|
{
"start": 2803,
"end": 3364
}
|
class ____ but as a minimum it must not cause an
// exception
project.getTestClasspathElements();
}
} catch (DependencyResolutionRequiredException e) {
throw new MojoExecutionException("Failed to resolve dependencies", e);
}
}
private String filter(String filename, MavenProject project) {
String result = filename;
if (filename != null) {
result = result.replaceAll("@artifactId@", project.getArtifactId());
}
return result;
}
}
|
path
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/api/async/BaseRedisAsyncCommands.java
|
{
"start": 1246,
"end": 6321
}
|
interface ____<K, V> {
/**
* Post a message to a channel.
*
* @param channel the channel type: key.
* @param message the message type: value.
* @return Long integer-reply the number of clients that received the message.
*/
RedisFuture<Long> publish(K channel, V message);
/**
* Lists the currently *active channels*.
*
* @return List<K> array-reply a list of active channels, optionally matching the specified pattern.
*/
RedisFuture<List<K>> pubsubChannels();
/**
* Lists the currently *active channels*.
*
* @param channel the key.
* @return List<K> array-reply a list of active channels, optionally matching the specified pattern.
*/
RedisFuture<List<K>> pubsubChannels(K channel);
/**
* Returns the number of subscribers (not counting clients subscribed to patterns) for the specified channels.
*
* @param channels channel keys.
* @return array-reply a list of channels and number of subscribers for every channel.
*/
RedisFuture<Map<K, Long>> pubsubNumsub(K... channels);
/**
* Lists the currently *active shard channels*.
*
* @return List<K> array-reply a list of active channels.
*/
RedisFuture<List<K>> pubsubShardChannels();
/**
* Lists the currently *active shard channels*.
*
* @param pattern the pattern type: patternkey (pattern).
* @return List<K> array-reply a list of active channels, optionally matching the specified pattern.
*/
RedisFuture<List<K>> pubsubShardChannels(K pattern);
/**
* Returns the number of subscribers (not counting clients subscribed to patterns) for the specified shard channels.
*
* @param shardChannels channel keys.
* @return array-reply a list of channels and number of subscribers for every channel.
* @since 6.4
*/
RedisFuture<Map<K, Long>> pubsubShardNumsub(K... shardChannels);
/**
* Returns the number of subscriptions to patterns.
*
* @return Long integer-reply the number of patterns all the clients are subscribed to.
*/
RedisFuture<Long> pubsubNumpat();
/**
* Post a message to a shard channel.
*
* @param shardChannel the shard channel type: key.
* @param message the message type: value.
* @return Long integer-reply the number of clients that received the message.
* @since 6.4
*/
RedisFuture<Long> spublish(K shardChannel, V message);
/**
* Echo the given string.
*
* @param msg the message type: value.
* @return V bulk-string-reply.
*/
RedisFuture<V> echo(V msg);
/**
* Return the role of the instance in the context of replication.
*
* @return List<Object> array-reply where the first element is one of master, slave, sentinel and the additional
* elements are role-specific.
*/
RedisFuture<List<Object>> role();
/**
* Ping the server.
*
* @return String simple-string-reply.
*/
RedisFuture<String> ping();
/**
* Switch connection to Read-Only mode when connecting to a cluster.
*
* @return String simple-string-reply.
*/
RedisFuture<String> readOnly();
/**
* Switch connection to Read-Write mode (default) when connecting to a cluster.
*
* @return String simple-string-reply.
*/
RedisFuture<String> readWrite();
/**
* Instructs Redis to disconnect the connection. Note that if auto-reconnect is enabled then Lettuce will auto-reconnect if
* the connection was disconnected. Use {@link io.lettuce.core.api.StatefulConnection#close} to close connections and
* release resources.
*
* @return String simple-string-reply always OK.
*/
RedisFuture<String> quit();
/**
* Wait for replication.
*
* @param replicas minimum number of replicas.
* @param timeout timeout in milliseconds.
* @return number of replicas.
*/
RedisFuture<Long> waitForReplication(int replicas, long timeout);
/**
* Dispatch a command to the Redis Server. Please note the command output type must fit to the command response.
*
* @param type the command, must not be {@code null}.
* @param output the command output, must not be {@code null}.
* @param <T> response type.
* @return the command response.
*/
<T> RedisFuture<T> dispatch(ProtocolKeyword type, CommandOutput<K, V, T> output);
/**
* Dispatch a command to the Redis Server. Please note the command output type must fit to the command response.
*
* @param type the command, must not be {@code null}.
* @param output the command output, must not be {@code null}.
* @param args the command arguments, must not be {@code null}.
* @param <T> response type.
* @return the command response.
*/
<T> RedisFuture<T> dispatch(ProtocolKeyword type, CommandOutput<K, V, T> output, CommandArgs<K, V> args);
}
|
BaseRedisAsyncCommands
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
|
{
"start": 3806,
"end": 3925
}
|
class ____.
*/
@InterfaceAudience.LimitedPrivate("management tools")
@InterfaceStability.Evolving
public abstract
|
directly
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/filter/wall/MinusTest.java
|
{
"start": 168,
"end": 633
}
|
class ____ extends TestCase {
public void test_false() throws Exception {
WallConfig config = new WallConfig();
config.setIntersectAllow(false);
assertFalse(WallUtils.isValidateOracle(//
"SELECT * FROM A Intersect SELECT * FROM B", config));
}
public void test_true() throws Exception {
assertTrue(WallUtils.isValidateOracle(//
"SELECT * FROM A Intersect SELECT * FROM B"));
}
}
|
MinusTest
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableRefactoring.java
|
{
"start": 4484,
"end": 4714
}
|
class ____ annotated with javax.annotation.concurrent.Immutable, but didn't seem"
+ " to be provably immutable."
+ "\n");
}
return describeMatch(immutableImport.get(), fixBuilder.build());
}
}
|
was
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jetty/src/main/java/org/springframework/boot/jetty/metrics/JettyConnectionMetricsBinder.java
|
{
"start": 1045,
"end": 1615
}
|
class ____ extends AbstractJettyMetricsBinder {
private final MeterRegistry meterRegistry;
private final Iterable<Tag> tags;
public JettyConnectionMetricsBinder(MeterRegistry meterRegistry) {
this(meterRegistry, Collections.emptyList());
}
public JettyConnectionMetricsBinder(MeterRegistry meterRegistry, Iterable<Tag> tags) {
this.meterRegistry = meterRegistry;
this.tags = tags;
}
@Override
protected void bindMetrics(Server server) {
JettyConnectionMetrics.addToAllConnectors(server, this.meterRegistry, this.tags);
}
}
|
JettyConnectionMetricsBinder
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/validation/beanvalidation/MethodValidationAdapterPropertyPathTests.java
|
{
"start": 6146,
"end": 8160
}
|
class ____ {
@Test
void fieldOfObjectPropertyOfBean() {
Method method = getMethod("getCourse");
Course course = new Course("CS 101", invalidPerson, Collections.emptyList());
MethodValidationResult result =
validationAdapter.validateReturnValue(new MyService(), method, null, course, HINTS);
assertThat(result.getAllErrors()).hasSize(1);
ParameterErrors errors = result.getBeanResults().get(0);
assertSingleFieldError(errors, 1, null, null, null, "professor.name", invalidPerson.name());
}
@Test
void fieldOfObjectPropertyOfListElement() {
Method method = getMethod("addCourseList");
List<Course> courses = List.of(new Course("CS 101", invalidPerson, Collections.emptyList()));
MethodValidationResult result = validationAdapter.validateArguments(
new MyService(), method, null, new Object[] {courses}, HINTS);
assertThat(result.getAllErrors()).hasSize(1);
ParameterErrors errors = result.getBeanResults().get(0);
assertSingleFieldError(errors, 1, courses, 0, null, "professor.name", invalidPerson.name());
}
}
private void assertSingleFieldError(
ParameterErrors errors, int errorCount,
@Nullable Object container, @Nullable Integer index, @Nullable Object key,
String field, Object rejectedValue) {
assertThat(errors.getErrorCount()).isEqualTo(errorCount);
assertThat(errors.getErrorCount()).isEqualTo(1);
assertThat(errors.getContainer()).isEqualTo(container);
assertThat(errors.getContainerIndex()).isEqualTo(index);
assertThat(errors.getContainerKey()).isEqualTo(key);
FieldError fieldError = errors.getFieldError();
assertThat(fieldError).isNotNull();
assertThat(fieldError.getField()).isEqualTo(field);
assertThat(fieldError.getRejectedValue()).isEqualTo(rejectedValue);
}
private static Method getMethod(String methodName) {
return ClassUtils.getMethod(MyService.class, methodName, (Class<?>[]) null);
}
@SuppressWarnings({"unused", "OptionalUsedAsFieldOrParameterType"})
private static
|
ReturnValueTests
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/test/java/org/springframework/web/reactive/result/method/HandlerMethodMappingTests.java
|
{
"start": 8873,
"end": 9165
}
|
class ____ {
@RequestMapping
@SuppressWarnings("unused")
public void handlerMethod1() {
}
@RequestMapping
@SuppressWarnings("unused")
public void handlerMethod2() {
}
@RequestMapping
@CrossOrigin(originPatterns = "*")
public void corsHandlerMethod() {
}
}
}
|
MyHandler
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/SuperclassCollectionTest.java
|
{
"start": 4386,
"end": 4681
}
|
class ____ extends PersonBase {
@OneToMany(cascade = CascadeType.ALL)
@JoinTable(name = "person_localaddress")
List<Address> localAddresses = new ArrayList<>();
@Override
public List<Address> getLocalAddresses() {
return localAddresses;
}
}
@MappedSuperclass
public static
|
Person
|
java
|
junit-team__junit5
|
junit-platform-suite-api/src/main/java/org/junit/platform/suite/api/ConfigurationParameter.java
|
{
"start": 1391,
"end": 1708
}
|
interface ____ {
/**
* The configuration parameter key under which to add the {@link #value() value}
* to the discovery request; never {@code null} or blank.
*/
String key();
/**
* The value to add to the discovery request for the specified {@link #key() key}.
*/
String value();
}
|
ConfigurationParameter
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/event/AbstractApplicationEventListenerTests.java
|
{
"start": 3042,
"end": 3255
}
|
class ____ implements ApplicationListener<GenericTestEvent<String>> {
@Override
public void onApplicationEvent(GenericTestEvent<String> event) {
}
}
@SuppressWarnings("rawtypes")
static
|
StringEventListener
|
java
|
alibaba__nacos
|
core/src/main/java/com/alibaba/nacos/core/remote/ClientConnectionEventListener.java
|
{
"start": 1010,
"end": 2087
}
|
class ____ {
/**
* listener name.
*/
private String name;
@Autowired
protected ClientConnectionEventListenerRegistry clientConnectionEventListenerRegistry;
@PostConstruct
public void init() {
clientConnectionEventListenerRegistry.registerClientConnectionEventListener(this);
}
/**
* Getter method for property <tt>name</tt>.
*
* @return property value of name
*/
public String getName() {
return name;
}
/**
* Setter method for property <tt>name</tt>.
*
* @param name value to be assigned to property name
*/
public void setName(String name) {
this.name = name;
}
/**
* notified when a client connected.
*
* @param connect connect.
*/
public abstract void clientConnected(Connection connect);
/**
* notified when a client disconnected.
*
* @param connect connect.
*/
public abstract void clientDisConnected(Connection connect);
}
|
ClientConnectionEventListener
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/web/client/ResponseExtractor.java
|
{
"start": 941,
"end": 1430
}
|
interface ____ the actual work of extracting data
* from a {@link ClientHttpResponse}, but don't need to worry about exception
* handling or closing resources.
*
* <p>Used internally by the {@link RestTemplate}, but also useful for
* application code. There is one available factory method, see
* {@link RestTemplate#responseEntityExtractor(Type)}.
*
* @author Arjen Poutsma
* @since 3.0
* @param <T> the data type
* @see RestTemplate#execute
*/
@FunctionalInterface
public
|
perform
|
java
|
quarkusio__quarkus
|
integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/OpenshiftV4DeploymentConfigTest.java
|
{
"start": 567,
"end": 3707
}
|
class ____ {
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withApplicationRoot((jar) -> jar.addClasses(GreetingResource.class))
.setApplicationName("openshift-v4-deploymentconfig")
.setApplicationVersion("0.1-SNAPSHOT")
.withConfigurationResource("openshift-v4.properties")
.overrideConfigKey("quarkus.openshift.deployment-kind", "deployment-config");
@ProdBuildResults
private ProdModeTestResults prodModeTestResults;
@Test
public void assertGeneratedResources() throws IOException {
Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir)
.isDirectoryContaining(p -> p.getFileName().endsWith("openshift.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("openshift.yml"))
.satisfies(p -> assertThat(p.toFile().listFiles()).hasSize(2));
List<HasMetadata> openshiftList = DeserializationUtil
.deserializeAsList(kubernetesDir.resolve("openshift.yml"));
assertThat(openshiftList).filteredOn(h -> "DeploymentConfig".equals(h.getKind())).singleElement().satisfies(h -> {
assertThat(h.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("openshift-v4-deploymentconfig");
assertThat(m.getLabels().get("app.openshift.io/runtime")).isEqualTo("test");
assertThat(m.getLabels().get("app.kubernetes.io/name")).isEqualTo("openshift-v4-deploymentconfig");
assertThat(m.getLabels().get("app")).isNull();
assertThat(m.getNamespace()).isNull();
assertThat(m.getAnnotations().get("app.quarkus.io/quarkus-version")).isNotBlank();
});
AbstractObjectAssert<?, ?> specAssert = assertThat(h).extracting("spec");
specAssert.extracting("selector").isInstanceOfSatisfying(Map.class, selectorsMap -> {
assertThat(selectorsMap).containsOnly(entry("app.kubernetes.io/name", "openshift-v4-deploymentconfig"),
entry("app.kubernetes.io/version", "0.1-SNAPSHOT"));
});
});
assertThat(openshiftList).filteredOn(h -> "Service".equals(h.getKind())).singleElement().satisfies(h -> {
assertThat(h).isInstanceOfSatisfying(Service.class, s -> {
assertThat(s.getMetadata()).satisfies(m -> {
assertThat(m.getNamespace()).isNull();
assertThat(m.getLabels().get("app.kubernetes.io/name")).isEqualTo("openshift-v4-deploymentconfig");
assertThat(m.getLabels().get("app")).isNull();
});
assertThat(s.getSpec()).satisfies(spec -> {
assertThat(spec.getSelector()).containsOnly(
entry("app.kubernetes.io/name", "openshift-v4-deploymentconfig"),
entry("app.kubernetes.io/version", "0.1-SNAPSHOT"));
});
});
});
}
}
|
OpenshiftV4DeploymentConfigTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/discriminator/JoinedInheritanceForceDiscriminatorTest.java
|
{
"start": 1651,
"end": 4838
}
|
class ____ {
@BeforeEach
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final ElementEntity element = new ElementEntity( 1L, "element_1" );
session.persist( element );
final AnotherEntity another = new AnotherEntity( 2L, "another_2" );
session.persist( another );
final ElementGroup elementGroup = new ElementGroup( 3L );
elementGroup.addElement( element );
session.persist( elementGroup );
} );
scope.inTransaction( session -> {
// Emulate association with AnotherEntity on the same element_table
session.createNativeMutationQuery( "update element_table set group_id = 3 where id = 2" ).executeUpdate();
} );
}
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.dropData();
}
@Test
public void testFindAndLoad(SessionFactoryScope scope) {
final SQLStatementInspector inspector = scope.getCollectingStatementInspector();
scope.inTransaction( session -> {
final ElementGroup group = session.find( ElementGroup.class, 3L );
inspector.clear();
final List<ElementEntity> elements = group.getElements();
assertThat( Hibernate.isInitialized( elements ) ).isFalse();
assertThat( elements ).hasSize( 1 );
inspector.assertNumberOfOccurrenceInQueryNoSpace( 0, "disc_col", 1 );
final CommonBase commonBase = elements.get( 0 );
assertThat( commonBase.getId() ).isEqualTo( 1L );
assertThat( commonBase.getName() ).isEqualTo( "element_1" );
} );
}
@Test
public void testQueryAndLoad(SessionFactoryScope scope) {
final SQLStatementInspector inspector = scope.getCollectingStatementInspector();
scope.inTransaction( session -> {
final ElementGroup group = session.createQuery(
"from ElementGroup",
ElementGroup.class
).getSingleResult();
inspector.clear();
final List<ElementEntity> elements = group.getElements();
assertThat( Hibernate.isInitialized( elements ) ).isFalse();
assertThat( elements ).hasSize( 1 );
inspector.assertNumberOfOccurrenceInQueryNoSpace( 0, "disc_col", 1 );
final CommonBase commonBase = elements.get( 0 );
assertThat( commonBase.getId() ).isEqualTo( 1L );
assertThat( commonBase.getName() ).isEqualTo( "element_1" );
} );
}
@Test
public void testQueryAndJoinFetch(SessionFactoryScope scope) {
final SQLStatementInspector inspector = scope.getCollectingStatementInspector();
scope.inTransaction( session -> {
final ElementGroup group = session.createQuery(
"from ElementGroup g join fetch g.elements",
ElementGroup.class
).getSingleResult();
inspector.assertNumberOfOccurrenceInQueryNoSpace( 0, "disc_col", 1 );
final List<ElementEntity> elements = group.getElements();
assertThat( Hibernate.isInitialized( elements ) ).isTrue();
assertThat( elements ).hasSize( 1 );
final CommonBase commonBase = elements.get( 0 );
assertThat( commonBase.getId() ).isEqualTo( 1L );
assertThat( commonBase.getName() ).isEqualTo( "element_1" );
} );
}
@Inheritance( strategy = InheritanceType.JOINED )
@DiscriminatorColumn( name = "disc_col" )
@DiscriminatorOptions( force = true )
@Entity( name = "CommonBase" )
public static
|
JoinedInheritanceForceDiscriminatorTest
|
java
|
google__dagger
|
javatests/dagger/functional/builder/BuilderTest.java
|
{
"start": 6680,
"end": 6836
}
|
interface ____ {
String s();
int i();
long l();
float f();
double d();
abstract static
|
TestComponentWithGenericBuilderAbstractClass
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/stats/StatisticsWithNoCachingTest.java
|
{
"start": 954,
"end": 1339
}
|
class ____ implements SettingProvider.Provider<String> {
@Override
public String getSetting() {
return NoCachingRegionFactory.class.getName();
}
}
@Test
@JiraKey(value = "HHH-12508")
public void testUncachedRegion(SessionFactoryScope scope) {
scope.getSessionFactory().getStatistics().getCacheRegionStatistics( "hibernate.test.unknown" );
}
}
|
RegionFactorySettingProvider
|
java
|
micronaut-projects__micronaut-core
|
test-suite/src/test/java/io/micronaut/docs/config/env/EachPropertyTest.java
|
{
"start": 1158,
"end": 2901
}
|
class ____ {
@Test
void testEachProperty() throws URISyntaxException {
// tag::config[]
ApplicationContext applicationContext = ApplicationContext.run(PropertySource.of(
"test",
CollectionUtils.mapOf(
"test.datasource.one.url", "jdbc:mysql://localhost/one",
"test.datasource.two.url", "jdbc:mysql://localhost/two")
));
// end::config[]
// tag::beans[]
Collection<DataSourceConfiguration> beansOfType = applicationContext.getBeansOfType(DataSourceConfiguration.class);
assertEquals(2, beansOfType.size()); // <1>
DataSourceConfiguration firstConfig = applicationContext.getBean(
DataSourceConfiguration.class,
Qualifiers.byName("one") // <2>
);
assertEquals(new URI("jdbc:mysql://localhost/one"), firstConfig.getUrl());
// end::beans[]
applicationContext.close();
}
@Test
void testEachPropertyList() {
List<Map> limits = new ArrayList<>();
limits.add(CollectionUtils.mapOf("period", "10s", "limit", "1000"));
limits.add(CollectionUtils.mapOf("period", "1m", "limit", "5000"));
ApplicationContext applicationContext = ApplicationContext.run(Collections.singletonMap("ratelimits", limits));
List<RateLimitsConfiguration> beansOfType = applicationContext.streamOfType(RateLimitsConfiguration.class).collect(Collectors.toList());
assertEquals(2, beansOfType.size());
assertEquals(1000, beansOfType.get(0).getLimit().intValue());
assertEquals(5000, beansOfType.get(1).getLimit().intValue());
applicationContext.close();
}
}
|
EachPropertyTest
|
java
|
apache__camel
|
components/camel-kubernetes/src/main/java/org/apache/camel/component/kubernetes/customresources/KubernetesCustomResourcesProducer.java
|
{
"start": 2065,
"end": 12072
}
|
class ____ extends DefaultProducer {
private static final Logger LOG = LoggerFactory.getLogger(KubernetesCustomResourcesProducer.class);
public KubernetesCustomResourcesProducer(AbstractKubernetesEndpoint endpoint) {
super(endpoint);
}
@Override
public AbstractKubernetesEndpoint getEndpoint() {
return (AbstractKubernetesEndpoint) super.getEndpoint();
}
@Override
public void process(Exchange exchange) throws Exception {
String operation = KubernetesHelper.extractOperation(getEndpoint(), exchange);
String namespace;
if (ObjectHelper.isEmpty(getEndpoint().getKubernetesConfiguration().getNamespace())) {
namespace = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, String.class);
} else {
namespace = getEndpoint().getKubernetesConfiguration().getNamespace();
}
if (ObjectHelper.isEmpty(namespace)) {
throw new IllegalArgumentException("Custom Resource producer requires a namespace argument");
}
switch (operation) {
case KubernetesOperations.LIST_CUSTOMRESOURCES:
doList(exchange, namespace);
break;
case KubernetesOperations.LIST_CUSTOMRESOURCES_BY_LABELS_OPERATION:
doListByLabels(exchange, namespace);
break;
case KubernetesOperations.GET_CUSTOMRESOURCE:
doGet(exchange, namespace);
break;
case KubernetesOperations.DELETE_CUSTOMRESOURCE:
doDelete(exchange, namespace);
break;
case KubernetesOperations.CREATE_CUSTOMRESOURCE:
doCreate(exchange, namespace);
break;
case KubernetesOperations.UPDATE_CUSTOMRESOURCE:
doUpdate(exchange, namespace);
break;
default:
throw new IllegalArgumentException("Unsupported operation " + operation);
}
}
protected void doList(Exchange exchange, String namespaceName) {
CustomResourceDefinitionContext context = getCRDContext(exchange.getIn());
GenericKubernetesResourceList list = getEndpoint().getKubernetesClient()
.genericKubernetesResources(context)
.inNamespace(namespaceName)
.list();
if (LOG.isDebugEnabled()) {
LOG.debug(Serialization.asJson(list));
}
JsonArray customResourcesListItems;
if (list.getItems() != null) {
customResourcesListItems = new JsonArray(list.getItems());
} else {
customResourcesListItems = new JsonArray();
}
prepareOutboundMessage(exchange, customResourcesListItems);
}
protected void doListByLabels(Exchange exchange, String namespaceName) {
Map<String, String> labels = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_CRD_LABELS, Map.class);
GenericKubernetesResourceList list = getEndpoint().getKubernetesClient()
.genericKubernetesResources(getCRDContext(exchange.getIn()))
.inNamespace(namespaceName)
.withLabels(labels)
.list();
if (LOG.isDebugEnabled()) {
LOG.debug(Serialization.asJson(list));
}
JsonArray customResourcesListItems = new JsonArray(list.getItems());
prepareOutboundMessage(exchange, customResourcesListItems);
}
protected void doGet(Exchange exchange, String namespaceName) {
String customResourceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_CRD_INSTANCE_NAME, String.class);
if (ObjectHelper.isEmpty(customResourceName)) {
throw new IllegalArgumentException("Get a specific custom resource require specify a custom resource name");
}
JsonObject customResourceJSON = new JsonObject();
try {
customResourceJSON = new JsonObject(
getEndpoint().getKubernetesClient().genericKubernetesResources(getCRDContext(exchange.getIn()))
.inNamespace(namespaceName)
.withName(customResourceName)
.require()
.get());
} catch (KubernetesClientException e) {
if (e.getCode() == 404) {
LOG.info("Custom resource instance not found", e);
} else {
throw e;
}
}
prepareOutboundMessage(exchange, customResourceJSON);
}
protected void doDelete(Exchange exchange, String namespaceName) {
String customResourceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_CRD_INSTANCE_NAME, String.class);
if (ObjectHelper.isEmpty(customResourceName)) {
LOG.error("Deleting a specific custom resource require specify a custom resource name");
throw new IllegalArgumentException("Deleting a specific custom resource require specify a custom resource name");
}
try {
List<StatusDetails> statusDetails
= getEndpoint().getKubernetesClient().genericKubernetesResources(getCRDContext(exchange.getIn()))
.inNamespace(namespaceName).withName(customResourceName).delete();
boolean deleted = ObjectHelper.isNotEmpty(statusDetails);
exchange.getMessage().setHeader(KubernetesConstants.KUBERNETES_DELETE_RESULT, deleted);
} catch (KubernetesClientException e) {
if (e.getCode() == 404) {
LOG.info("Custom resource instance not found", e);
} else {
throw e;
}
}
}
protected void doUpdate(Exchange exchange, String namespaceName) {
doCreateOrUpdate(exchange, namespaceName, Resource::update);
}
protected void doCreate(Exchange exchange, String namespaceName) {
doCreateOrUpdate(exchange, namespaceName, Resource::create);
}
private void doCreateOrUpdate(
Exchange exchange, String namespaceName,
Function<Resource<GenericKubernetesResource>, GenericKubernetesResource> operation) {
String customResourceInstance = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_CRD_INSTANCE, String.class);
GenericKubernetesResource customResource = new GenericKubernetesResource();
try {
customResource = operation.apply(getEndpoint().getKubernetesClient()
.genericKubernetesResources(getCRDContext(exchange.getIn()))
.inNamespace(namespaceName)
.resource(Serialization.unmarshal(customResourceInstance, GenericKubernetesResource.class)));
} catch (KubernetesClientException e) {
if (e.getCode() == 409) {
LOG.info("Custom resource instance already exists", e);
} else {
throw e;
}
}
prepareOutboundMessage(exchange, customResource);
}
private CustomResourceDefinitionContext getCRDContext(Message message) {
String name;
String group;
String scope;
String version;
String plural;
if (ObjectHelper.isEmpty(getEndpoint().getKubernetesConfiguration().getCrdName())) {
name = message.getHeader(KubernetesConstants.KUBERNETES_CRD_NAME, String.class);
} else {
name = getEndpoint().getKubernetesConfiguration().getCrdName();
}
if (ObjectHelper.isEmpty(getEndpoint().getKubernetesConfiguration().getCrdGroup())) {
group = message.getHeader(KubernetesConstants.KUBERNETES_CRD_GROUP, String.class);
} else {
group = getEndpoint().getKubernetesConfiguration().getCrdGroup();
}
if (ObjectHelper.isEmpty(getEndpoint().getKubernetesConfiguration().getCrdScope())) {
scope = message.getHeader(KubernetesConstants.KUBERNETES_CRD_SCOPE, String.class);
} else {
scope = getEndpoint().getKubernetesConfiguration().getCrdScope();
}
if (ObjectHelper.isEmpty(getEndpoint().getKubernetesConfiguration().getCrdVersion())) {
version = message.getHeader(KubernetesConstants.KUBERNETES_CRD_VERSION, String.class);
} else {
version = getEndpoint().getKubernetesConfiguration().getCrdVersion();
}
if (ObjectHelper.isEmpty(getEndpoint().getKubernetesConfiguration().getCrdPlural())) {
plural = message.getHeader(KubernetesConstants.KUBERNETES_CRD_PLURAL, String.class);
} else {
plural = getEndpoint().getKubernetesConfiguration().getCrdPlural();
}
if (ObjectHelper.isEmpty(name) || ObjectHelper.isEmpty(group) || ObjectHelper.isEmpty(scope)
|| ObjectHelper.isEmpty(version) || ObjectHelper.isEmpty(plural)) {
LOG.error("one of more of the custom resource definition argument(s) are missing.");
throw new IllegalArgumentException("one of more of the custom resource definition argument(s) are missing.");
}
return new CustomResourceDefinitionContext.Builder()
.withName(message.getHeader(KubernetesConstants.KUBERNETES_CRD_NAME, String.class)) // example: "githubsources.sources.knative.dev"
.withGroup(message.getHeader(KubernetesConstants.KUBERNETES_CRD_GROUP, String.class)) // example: "sources.knative.dev"
.withScope(message.getHeader(KubernetesConstants.KUBERNETES_CRD_SCOPE, String.class)) // example: "Namespaced"
.withVersion(message.getHeader(KubernetesConstants.KUBERNETES_CRD_VERSION, String.class)) // example: "v1alpha1"
.withPlural(message.getHeader(KubernetesConstants.KUBERNETES_CRD_PLURAL, String.class)) // example: "githubsources"
.build();
}
}
|
KubernetesCustomResourcesProducer
|
java
|
grpc__grpc-java
|
alts/src/main/java/io/grpc/alts/internal/RpcProtocolVersionsUtil.java
|
{
"start": 843,
"end": 3545
}
|
class ____ {
private static final int MAX_RPC_VERSION_MAJOR = 2;
private static final int MAX_RPC_VERSION_MINOR = 1;
private static final int MIN_RPC_VERSION_MAJOR = 2;
private static final int MIN_RPC_VERSION_MINOR = 1;
private static final RpcProtocolVersions RPC_PROTOCOL_VERSIONS =
RpcProtocolVersions.newBuilder()
.setMaxRpcVersion(
RpcProtocolVersions.Version.newBuilder()
.setMajor(MAX_RPC_VERSION_MAJOR)
.setMinor(MAX_RPC_VERSION_MINOR)
.build())
.setMinRpcVersion(
RpcProtocolVersions.Version.newBuilder()
.setMajor(MIN_RPC_VERSION_MAJOR)
.setMinor(MIN_RPC_VERSION_MINOR)
.build())
.build();
/** Returns default Rpc Protocol Versions. */
public static RpcProtocolVersions getRpcProtocolVersions() {
return RPC_PROTOCOL_VERSIONS;
}
/**
* Returns true if first Rpc Protocol Version is greater than or equal to the second one. Returns
* false otherwise.
*/
@VisibleForTesting
static boolean isGreaterThanOrEqualTo(Version first, Version second) {
if ((first.getMajor() > second.getMajor())
|| (first.getMajor() == second.getMajor() && first.getMinor() >= second.getMinor())) {
return true;
}
return false;
}
/**
* Performs check between local and peer Rpc Protocol Versions. This function returns true and the
* highest common version if there exists a common Rpc Protocol Version to use, and returns false
* and null otherwise.
*/
static RpcVersionsCheckResult checkRpcProtocolVersions(
RpcProtocolVersions localVersions, RpcProtocolVersions peerVersions) {
Version maxCommonVersion;
Version minCommonVersion;
// maxCommonVersion is MIN(local.max, peer.max)
if (isGreaterThanOrEqualTo(localVersions.getMaxRpcVersion(), peerVersions.getMaxRpcVersion())) {
maxCommonVersion = peerVersions.getMaxRpcVersion();
} else {
maxCommonVersion = localVersions.getMaxRpcVersion();
}
// minCommonVersion is MAX(local.min, peer.min)
if (isGreaterThanOrEqualTo(localVersions.getMinRpcVersion(), peerVersions.getMinRpcVersion())) {
minCommonVersion = localVersions.getMinRpcVersion();
} else {
minCommonVersion = peerVersions.getMinRpcVersion();
}
if (isGreaterThanOrEqualTo(maxCommonVersion, minCommonVersion)) {
return new RpcVersionsCheckResult.Builder()
.setResult(true)
.setHighestCommonVersion(maxCommonVersion)
.build();
}
return new RpcVersionsCheckResult.Builder().setResult(false).build();
}
/** Wrapper
|
RpcProtocolVersionsUtil
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/component/cascading/toone/CascadeToComponentAssociationTest.java
|
{
"start": 573,
"end": 1959
}
|
class ____ {
@Test
public void testMerging(SessionFactoryScope scope) {
// step1, we create a document with owner
Document doc = new Document();
scope.inTransaction( session -> {
User user = new User();
doc.setOwner( user );
session.persist( doc );
}
);
// step2, we verify that the document has owner and that owner has no personal-info; then we detach
Document d = scope.fromTransaction( session -> {
Document document = session.find( Document.class, doc.getId() );
assertThat( document.getOwner() ).isNotNull();
assertThat( document.getOwner().getPersonalInfo() ).isNull();
return document;
}
);
// step3, try to specify the personal-info during detachment
Address addr = new Address();
addr.setStreet1( "123 6th St" );
addr.setCity( "Austin" );
addr.setState( "TX" );
d.getOwner().setPersonalInfo( new PersonalInfo( addr ) );
// step4 we merge the document
scope.inTransaction( session ->
session.merge( d )
);
// step5, final test
scope.inTransaction(
session -> {
Document document = session.find( Document.class, d.getId() );
assertThat( document.getOwner() ).isNotNull();
assertThat( document.getOwner().getPersonalInfo() ).isNotNull();
assertThat( document.getOwner().getPersonalInfo().getHomeAddress() ).isNotNull();
}
);
}
}
|
CascadeToComponentAssociationTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/common/ParsingException.java
|
{
"start": 1051,
"end": 3329
}
|
class ____ extends ElasticsearchException {
protected static final int UNKNOWN_POSITION = -1;
private final int lineNumber;
private final int columnNumber;
public ParsingException(XContentLocation contentLocation, String msg, Object... args) {
this(contentLocation, msg, null, args);
}
public ParsingException(XContentLocation contentLocation, String msg, Throwable cause, Object... args) {
super(msg, cause, args);
int lineNumber = UNKNOWN_POSITION;
int columnNumber = UNKNOWN_POSITION;
if (contentLocation != null) {
lineNumber = contentLocation.lineNumber();
columnNumber = contentLocation.columnNumber();
}
this.columnNumber = columnNumber;
this.lineNumber = lineNumber;
}
/**
* This constructor is provided for use in unit tests where a
* {@link XContentParser} may not be available
*/
public ParsingException(int line, int col, String msg, Throwable cause) {
super(msg, cause);
this.lineNumber = line;
this.columnNumber = col;
}
public ParsingException(StreamInput in) throws IOException {
super(in);
lineNumber = in.readInt();
columnNumber = in.readInt();
}
/**
* Line number of the location of the error
*
* @return the line number or -1 if unknown
*/
public int getLineNumber() {
return lineNumber;
}
/**
* Column number of the location of the error
*
* @return the column number or -1 if unknown
*/
public int getColumnNumber() {
return columnNumber;
}
@Override
public RestStatus status() {
return RestStatus.BAD_REQUEST;
}
@Override
protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException {
if (lineNumber != UNKNOWN_POSITION) {
builder.field("line", lineNumber);
builder.field("col", columnNumber);
}
}
@Override
protected void writeTo(StreamOutput out, Writer<Throwable> nestedExceptionsWriter) throws IOException {
super.writeTo(out, nestedExceptionsWriter);
out.writeInt(lineNumber);
out.writeInt(columnNumber);
}
}
|
ParsingException
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/hashset/HashSetAssert_startsWith_Test.java
|
{
"start": 1255,
"end": 2914
}
|
class ____ extends HashSetAssertBaseTest {
@Override
protected HashSetAssert<Object> invoke_api_method() {
return assertions.startsWith(someValues);
}
@Override
protected void verify_internal_effects() {
verify(iterables).assertStartsWith(getInfo(assertions), getActual(assertions), someValues);
}
@HashSetTest
void should_pass(HashSetFactory hashSetFactory) {
// GIVEN
Date first = Date.from(EPOCH.plusSeconds(1));
Date second = Date.from(EPOCH.plusSeconds(2));
Date third = Date.from(EPOCH.plusSeconds(3));
HashSet<Date> dates = hashSetFactory.createWith(first, second, third);
// WHEN
Date[] exactElements = dates.toArray(new Date[0]);
// THEN
then(dates).startsWith(exactElements);
}
@Test
void should_fail_after_hashCode_changed() {
// GIVEN
Date first = Date.from(EPOCH.plusSeconds(1));
Date second = Date.from(EPOCH.plusSeconds(2));
Date third = Date.from(EPOCH.plusSeconds(3));
HashSet<Date> dates = newLinkedHashSet(first, second, third);
first.setTime(4_000);
second.setTime(5_000);
// WHEN
var assertionError = expectAssertionError(() -> assertThat(dates).startsWith(first, second));
// THEN
then(assertionError).hasMessageContainingAll(STANDARD_REPRESENTATION.toStringOf(first),
STANDARD_REPRESENTATION.toStringOf(second),
"(elements were checked as in HashSet, as soon as their hashCode change, the HashSet won't find them anymore - use skippingHashCodeComparison to get a collection like comparison)");
}
}
|
HashSetAssert_startsWith_Test
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
|
{
"start": 6835,
"end": 7616
}
|
class ____ implements Closeable {
/**
* Store context; left as "owner" for historical reasons.
*/
private final StoreContext owner;
protected BlockFactory(StoreContext owner) {
this.owner = owner;
}
/**
* Create a block.
*
* @param index index of block
* @param limit limit of the block.
* @param statistics stats to work with
* @return a new block.
* @throws IOException any failure to create a block in the local FS.
* @throws OutOfMemoryError lack of space to create any memory buffer
*/
abstract DataBlock create(long index, long limit,
BlockOutputStreamStatistics statistics)
throws IOException;
/**
* Implement any close/cleanup operation.
* Base
|
BlockFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/cdi/events/nocdi/ValidNoCdiSupportTest.java
|
{
"start": 2033,
"end": 2430
}
|
class ____ {
private Integer id;
private String name;
public AnotherEntity() {
}
public AnotherEntity(Integer id) {
this.id = id;
}
@Id
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
public static
|
AnotherEntity
|
java
|
playframework__playframework
|
dev-mode/play-run-support/src/main/java/play/runsupport/RunHookCompositeThrowable.java
|
{
"start": 306,
"end": 950
}
|
class ____ extends RuntimeException {
public RunHookCompositeThrowable(Collection<Throwable> throwables) {
super(
"Multiple exceptions thrown during RunHook run: "
+ throwables.stream()
.map(
t ->
"\n\n"
+ t
+ Arrays.stream(t.getStackTrace())
.limit(10)
.map(Objects::toString)
.collect(Collectors.joining("\n", "\n", "\n...")))
.collect(Collectors.joining()));
}
}
|
RunHookCompositeThrowable
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/config/provider/FileConfigProvider.java
|
{
"start": 1507,
"end": 5170
}
|
class ____ implements ConfigProvider {
private static final Logger log = LoggerFactory.getLogger(FileConfigProvider.class);
public static final String ALLOWED_PATHS_CONFIG = "allowed.paths";
public static final String ALLOWED_PATHS_DOC = "A comma separated list of paths that this config provider is " +
"allowed to access. If not set, all paths are allowed.";
private volatile AllowedPaths allowedPaths;
public void configure(Map<String, ?> configs) {
allowedPaths = new AllowedPaths((String) configs.getOrDefault(ALLOWED_PATHS_CONFIG, null));
}
/**
* Retrieves the data at the given Properties file.
*
* @param path the file where the data resides
* @return the configuration data
*/
public ConfigData get(String path) {
if (allowedPaths == null) {
throw new IllegalStateException("The provider has not been configured yet.");
}
Map<String, String> data = new HashMap<>();
if (path == null || path.isEmpty()) {
return new ConfigData(data);
}
Path filePath = allowedPaths.parseUntrustedPath(path);
if (filePath == null) {
log.warn("The path {} is not allowed to be accessed", path);
return new ConfigData(data);
}
try (Reader reader = reader(filePath)) {
Properties properties = new Properties();
properties.load(reader);
Enumeration<Object> keys = properties.keys();
while (keys.hasMoreElements()) {
String key = keys.nextElement().toString();
String value = properties.getProperty(key);
if (value != null) {
data.put(key, value);
}
}
return new ConfigData(data);
} catch (IOException e) {
log.error("Could not read properties from file {}", path, e);
throw new ConfigException("Could not read properties from file " + path);
}
}
/**
* Retrieves the data with the given keys at the given Properties file.
*
* @param path the file where the data resides
* @param keys the keys whose values will be retrieved
* @return the configuration data
*/
public ConfigData get(String path, Set<String> keys) {
if (allowedPaths == null) {
throw new IllegalStateException("The provider has not been configured yet.");
}
Map<String, String> data = new HashMap<>();
if (path == null || path.isEmpty()) {
return new ConfigData(data);
}
Path filePath = allowedPaths.parseUntrustedPath(path);
if (filePath == null) {
log.warn("The path {} is not allowed to be accessed", path);
return new ConfigData(data);
}
try (Reader reader = reader(filePath)) {
Properties properties = new Properties();
properties.load(reader);
for (String key : keys) {
String value = properties.getProperty(key);
if (value != null) {
data.put(key, value);
}
}
return new ConfigData(data);
} catch (IOException e) {
log.error("Could not read properties from file {}", path, e);
throw new ConfigException("Could not read properties from file " + path);
}
}
// visible for testing
protected Reader reader(Path path) throws IOException {
return Files.newBufferedReader(path, StandardCharsets.UTF_8);
}
public void close() {
}
}
|
FileConfigProvider
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/pool/exception/OracleExceptionSorterTest_setSavepointWithName.java
|
{
"start": 529,
"end": 2487
}
|
class ____ extends TestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
assertEquals(0, JdbcStatManager.getInstance().getSqlList().size());
dataSource = new DruidDataSource();
dataSource.setExceptionSorter(new OracleExceptionSorter());
dataSource.setDriver(new OracleMockDriver());
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setFilters("log4j");
dataSource.setPoolPreparedStatements(true);
dataSource.setMaxOpenPreparedStatements(100);
}
protected void tearDown() throws Exception {
JdbcUtils.close(dataSource);
}
public void test_connect() throws Exception {
String sql = "SELECT 1";
{
DruidPooledConnection conn = dataSource.getConnection();
PreparedStatement pstmt = conn.prepareStatement(sql);
pstmt.execute();
pstmt.close();
conn.close();
assertEquals(0, dataSource.getActiveCount());
assertEquals(1, dataSource.getPoolingCount());
assertEquals(1, dataSource.getCreateCount());
}
DruidPooledConnection conn = dataSource.getConnection();
MockConnection mockConn = conn.unwrap(MockConnection.class);
assertNotNull(mockConn);
SQLException exception = new SQLException("xx", "xxx", 28);
mockConn.setError(exception);
Exception setError = null;
try {
conn.setSavepoint("xxx");
} catch (Exception ex) {
setError = ex;
}
assertNotNull(setError);
conn.close();
{
Connection conn2 = dataSource.getConnection();
conn2.close();
}
assertEquals(0, dataSource.getActiveCount());
assertTrue(dataSource.getPoolingCount() >= 1);
assertTrue(dataSource.getCreateCount() >= 2);
}
}
|
OracleExceptionSorterTest_setSavepointWithName
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/select/OracleSelectTest128.java
|
{
"start": 1002,
"end": 5530
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
String sql = "SELECT id,\n" +
" MAX(myRank) area\n" +
"FROM\n" +
" (SELECT t2.id ,\n" +
" wmsys.wm_concat(t2.name) over (partition BY t2.id order by to_number(t2.cc) ASC ) myRank\n" +
" FROM\n" +
" (SELECT adr.name,\n" +
" t1.*\n" +
" FROM\n" +
" (SELECT t.id,\n" +
" t.addresscode,\n" +
" regexp_substr(t.addresscode, '[^_]+', 1, x.n) cc\n" +
" FROM\n" +
" ( SELECT id,addresscode FROM srm1.CONSIGNEE_ADDRESS ca\n" +
" ) t,\n" +
" (SELECT ROWNUM n FROM dual CONNECT BY ROWNUM <= 5\n" +
" ) x\n" +
" ORDER BY 1\n" +
" ) t1\n" +
" LEFT JOIN srm1.address adr\n" +
" ON adr.id = t1.cc\n" +
" WHERE t1.cc IS NOT NULL\n" +
" ) t2\n" +
" )\n" +
"GROUP BY id";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE);
assertEquals(1, statementList.size());
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
assertEquals("SELECT id, MAX(myRank) AS area\n" +
"FROM (\n" +
"\tSELECT t2.id, wmsys.wm_concat(t2.name) OVER (PARTITION BY t2.id ORDER BY to_number(t2.cc) ASC) AS myRank\n" +
"\tFROM (\n" +
"\t\tSELECT adr.name, t1.*\n" +
"\t\tFROM (\n" +
"\t\t\tSELECT t.id, t.addresscode\n" +
"\t\t\t\t, regexp_substr(t.addresscode, '[^_]+', 1, x.n) AS cc\n" +
"\t\t\tFROM (\n" +
"\t\t\t\tSELECT id, addresscode\n" +
"\t\t\t\tFROM srm1.CONSIGNEE_ADDRESS ca\n" +
"\t\t\t) t, (\n" +
"\t\t\t\tSELECT ROWNUM AS n\n" +
"\t\t\t\tFROM dual\n" +
"\t\t\t\tCONNECT BY ROWNUM <= 5\n" +
"\t\t\t) x\n" +
"\t\t\tORDER BY 1\n" +
"\t\t) t1\n" +
"\t\t\tLEFT JOIN srm1.address adr ON adr.id = t1.cc \n" +
"\t\tWHERE t1.cc IS NOT NULL\n" +
"\t) t2\n" +
")\n" +
"GROUP BY id", stmt.toString());
assertEquals("select id, max(myRank) as area\n" +
"from (\n" +
"\tselect t2.id, wmsys.wm_concat(t2.name) over (partition by t2.id order by to_number(t2.cc) asc) as myRank\n" +
"\tfrom (\n" +
"\t\tselect adr.name, t1.*\n" +
"\t\tfrom (\n" +
"\t\t\tselect t.id, t.addresscode\n" +
"\t\t\t\t, regexp_substr(t.addresscode, '[^_]+', 1, x.n) as cc\n" +
"\t\t\tfrom (\n" +
"\t\t\t\tselect id, addresscode\n" +
"\t\t\t\tfrom srm1.CONSIGNEE_ADDRESS ca\n" +
"\t\t\t) t, (\n" +
"\t\t\t\tselect ROWNUM as n\n" +
"\t\t\t\tfrom dual\n" +
"\t\t\t\tconnect by ROWNUM <= 5\n" +
"\t\t\t) x\n" +
"\t\t\torder by 1\n" +
"\t\t) t1\n" +
"\t\t\tleft join srm1.address adr on adr.id = t1.cc \n" +
"\t\twhere t1.cc is not null\n" +
"\t) t2\n" +
")\n" +
"group by id", stmt.toLowerCaseString());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(2, visitor.getTables().size());
assertEquals(4, visitor.getColumns().size());
assertEquals(2, visitor.getConditions().size());
assertEquals(0, visitor.getRelationships().size());
assertEquals(1, visitor.getOrderByColumns().size());
assertTrue(visitor.containsColumn("srm1.CONSIGNEE_ADDRESS", "id"));
}
}
|
OracleSelectTest128
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/jdk/MapDeserializationTest.java
|
{
"start": 4451,
"end": 9488
}
|
class
____ JSON = "{ \"a\" : \"x\" }";
@SuppressWarnings("unchecked")
HashMap<String,Object> result = /*(HashMap<String,Object>)*/ MAPPER.readValue(JSON, HashMap.class);
assertNotNull(result);
assertTrue(result instanceof Map<?,?>);
assertEquals(1, result.size());
assertEquals("x", result.get("a"));
}
/**
* Unit test for [JACKSON-185]
*/
@Test
public void testUntypedMap3() throws Exception
{
String JSON = "{\"a\":[{\"a\":\"b\"},\"value\"]}";
Map<?,?> result = MAPPER.readValue(JSON, Map.class);
assertTrue(result instanceof Map<?,?>);
assertEquals(1, result.size());
Object ob = result.get("a");
assertNotNull(ob);
Collection<?> list = (Collection<?>)ob;
assertEquals(2, list.size());
JSON = "{ \"var1\":\"val1\", \"var2\":\"val2\", "
+"\"subvars\": ["
+" { \"subvar1\" : \"subvar2\", \"x\" : \"y\" }, "
+" { \"a\":1 } ]"
+" }"
;
result = MAPPER.readValue(JSON, Map.class);
assertTrue(result instanceof Map<?,?>);
assertEquals(3, result.size());
}
private static final String UNTYPED_MAP_JSON =
"{ \"double\":42.0, \"string\":\"string\","
+"\"boolean\":true, \"list\":[\"list0\"],"
+"\"null\":null }";
@Test
public void testSpecialMap() throws IOException
{
final ObjectWrapperMap map = MAPPER.readValue(UNTYPED_MAP_JSON, ObjectWrapperMap.class);
assertNotNull(map);
_doTestUntyped(map);
}
@Test
public void testGenericMap() throws IOException
{
final Map<String, ObjectWrapper> map = MAPPER.readValue
(UNTYPED_MAP_JSON,
new TypeReference<Map<String, ObjectWrapper>>() { });
_doTestUntyped(map);
}
private void _doTestUntyped(final Map<String, ObjectWrapper> map)
{
ObjectWrapper w = map.get("double");
assertNotNull(w);
assertEquals(Double.valueOf(42), w.getObject());
assertEquals("string", map.get("string").getObject());
assertEquals(Boolean.TRUE, map.get("boolean").getObject());
assertEquals(Collections.singletonList("list0"), map.get("list").getObject());
assertTrue(map.containsKey("null"));
assertNull(map.get("null"));
assertEquals(5, map.size());
}
@Test
public void testFromEmptyString() throws Exception
{
ObjectMapper m = jsonMapperBuilder()
.configure(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT, true)
.build();
Map<?,?> result = m.readValue(q(""), Map.class);
assertNull(result);
}
/*
/**********************************************************
/* Test methods, typed maps
/**********************************************************
*/
@Test
public void testExactStringIntMap() throws Exception
{
// to get typing, must use type reference
String JSON = "{ \"foo\" : 13, \"bar\" : -39, \n \"\" : 0 }";
Map<String,Integer> result = MAPPER.readValue
(JSON, new TypeReference<HashMap<String,Integer>>() { });
assertNotNull(result);
assertEquals(HashMap.class, result.getClass());
assertEquals(3, result.size());
assertEquals(Integer.valueOf(13), result.get("foo"));
assertEquals(Integer.valueOf(-39), result.get("bar"));
assertEquals(Integer.valueOf(0), result.get(""));
assertNull(result.get("foobar"));
assertNull(result.get(" "));
}
/**
* Let's also check that it is possible to do type conversions
* to allow use of non-String Map keys.
*/
@Test
public void testIntBooleanMap() throws Exception
{
// to get typing, must use type reference
String JSON = "{ \"1\" : true, \"-1\" : false }";
Map<?,?> result = MAPPER.readValue
(JSON, new TypeReference<HashMap<Integer,Boolean>>() { });
assertNotNull(result);
assertEquals(HashMap.class, result.getClass());
assertEquals(2, result.size());
assertEquals(Boolean.TRUE, result.get(Integer.valueOf(1)));
assertEquals(Boolean.FALSE, result.get(Integer.valueOf(-1)));
assertNull(result.get("foobar"));
assertNull(result.get(0));
}
@Test
public void testExactStringStringMap() throws Exception
{
// to get typing, must use type reference
String JSON = "{ \"a\" : \"b\" }";
Map<String,String> result = MAPPER.readValue
(JSON, new TypeReference<TreeMap<String,String>>() { });
assertNotNull(result);
assertEquals(TreeMap.class, result.getClass());
assertEquals(1, result.size());
assertEquals("b", result.get("a"));
assertNull(result.get("b"));
}
/**
* Unit test that verifies that it's ok to have incomplete
* information about Map
|
String
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/atomic/integerarray/AtomicIntegerArrayAssert_containsSubsequence_Test.java
|
{
"start": 886,
"end": 1260
}
|
class ____ extends AtomicIntegerArrayAssertBaseTest {
@Override
protected AtomicIntegerArrayAssert invoke_api_method() {
return assertions.containsSubsequence(6, 8);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContainsSubsequence(info(), internalArray(), arrayOf(6, 8));
}
}
|
AtomicIntegerArrayAssert_containsSubsequence_Test
|
java
|
quarkusio__quarkus
|
extensions/virtual-threads/runtime/src/main/java/io/quarkus/virtual/threads/VirtualThreads.java
|
{
"start": 607,
"end": 821
}
|
class ____ extends AnnotationLiteral<VirtualThreads> implements VirtualThreads {
public static final Literal INSTANCE = new Literal();
private static final long serialVersionUID = 1L;
}
}
|
Literal
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableMergeWithMaybe.java
|
{
"start": 9876,
"end": 10726
}
|
class ____<T> extends AtomicReference<Disposable>
implements MaybeObserver<T> {
private static final long serialVersionUID = -2935427570954647017L;
final MergeWithObserver<T> parent;
OtherObserver(MergeWithObserver<T> parent) {
this.parent = parent;
}
@Override
public void onSubscribe(Disposable d) {
DisposableHelper.setOnce(this, d);
}
@Override
public void onSuccess(T t) {
parent.otherSuccess(t);
}
@Override
public void onError(Throwable e) {
parent.otherError(e);
}
@Override
public void onComplete() {
parent.otherComplete();
}
}
}
}
|
OtherObserver
|
java
|
apache__avro
|
lang/java/avro/src/main/java/org/apache/avro/SchemaBuilder.java
|
{
"start": 71297,
"end": 71783
}
|
class ____<R> extends FieldDefault<R, LongDefault<R>> {
private LongDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided **/
public final FieldAssembler<R> longDefault(long defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final LongDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static
|
LongDefault
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableDirectoryCreateCommand.java
|
{
"start": 1610,
"end": 3542
}
|
class ____ extends RetriableCommand {
private static final Logger LOG =
LoggerFactory.getLogger(RetriableDirectoryCreateCommand.class);
/**
* Constructor, taking a description of the action.
* @param description Verbose description of the copy operation.
*/
public RetriableDirectoryCreateCommand(String description) {
super(description);
}
/**
* Implementation of RetriableCommand::doExecute().
* This implements the actual mkdirs() functionality.
* @param arguments Argument-list to the command.
* @return Boolean. True, if the directory could be created successfully.
* @throws Exception IOException, on failure to create the directory.
*/
@Override
protected Object doExecute(Object... arguments) throws Exception {
assert arguments.length == 4 : "Unexpected argument list.";
Path target = (Path)arguments[0];
Mapper.Context context = (Mapper.Context)arguments[1];
FileStatus sourceStatus = (FileStatus)arguments[2];
FileSystem sourceFs = (FileSystem)arguments[3];
FileSystem targetFS = target.getFileSystem(context.getConfiguration());
if(!targetFS.mkdirs(target)) {
return false;
}
boolean preserveEC = getFileAttributeSettings(context)
.contains(DistCpOptions.FileAttribute.ERASURECODINGPOLICY);
if (preserveEC && sourceStatus.isErasureCoded()
&& checkFSSupportsEC(sourceFs, sourceStatus.getPath())
&& checkFSSupportsEC(targetFS, target)) {
ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByName(
((WithErasureCoding) sourceFs).getErasureCodingPolicyName(
sourceStatus));
LOG.debug("EC Policy for source path is {}", ecPolicy);
WithErasureCoding ecFs = (WithErasureCoding) targetFS;
if (ecPolicy != null) {
ecFs.setErasureCodingPolicy(target, ecPolicy.getName());
}
}
return true;
}
}
|
RetriableDirectoryCreateCommand
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/sameids/SameIds.java
|
{
"start": 830,
"end": 2659
}
|
class ____ {
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
scope.inTransaction( em -> {
SameIdTestEntity1 site1 = new SameIdTestEntity1( 1, "str1" );
SameIdTestEntity2 site2 = new SameIdTestEntity2( 1, "str1" );
em.persist( site1 );
em.persist( site2 );
} );
scope.inTransaction( em -> {
SameIdTestEntity1 site1 = em.find( SameIdTestEntity1.class, 1 );
SameIdTestEntity2 site2 = em.find( SameIdTestEntity2.class, 1 );
site1.setStr1( "str2" );
site2.setStr1( "str2" );
} );
}
@Test
public void testRevisionsCounts(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( Arrays.asList( 1, 2 ), auditReader.getRevisions( SameIdTestEntity1.class, 1 ) );
assertEquals( Arrays.asList( 1, 2 ), auditReader.getRevisions( SameIdTestEntity2.class, 1 ) );
} );
}
@Test
public void testHistoryOfSite1(EntityManagerFactoryScope scope) {
SameIdTestEntity1 ver1 = new SameIdTestEntity1( 1, "str1" );
SameIdTestEntity1 ver2 = new SameIdTestEntity1( 1, "str2" );
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( ver1, auditReader.find( SameIdTestEntity1.class, 1, 1 ) );
assertEquals( ver2, auditReader.find( SameIdTestEntity1.class, 1, 2 ) );
} );
}
@Test
public void testHistoryOfSite2(EntityManagerFactoryScope scope) {
SameIdTestEntity2 ver1 = new SameIdTestEntity2( 1, "str1" );
SameIdTestEntity2 ver2 = new SameIdTestEntity2( 1, "str2" );
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( ver1, auditReader.find( SameIdTestEntity2.class, 1, 1 ) );
assertEquals( ver2, auditReader.find( SameIdTestEntity2.class, 1, 2 ) );
} );
}
}
|
SameIds
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/codec/vectors/diskbbq/next/ESNextDiskBBQVectorsReader.java
|
{
"start": 22376,
"end": 32563
}
|
class ____ implements PostingVisitor {
final long quantizedByteLength;
final IndexInput indexInput;
final float[] target;
final FieldEntry entry;
final FieldInfo fieldInfo;
final Bits acceptDocs;
private final ESNextOSQVectorsScorer osqVectorsScorer;
final float[] scores = new float[BULK_SIZE];
final float[] correctionsLower = new float[BULK_SIZE];
final float[] correctionsUpper = new float[BULK_SIZE];
final int[] correctionsSum = new int[BULK_SIZE];
final float[] correctionsAdd = new float[BULK_SIZE];
final int[] docIdsScratch = new int[BULK_SIZE];
byte docEncoding;
int docBase = 0;
int vectors;
boolean quantized = false;
float centroidDp;
final float[] centroid;
long slicePos;
OptimizedScalarQuantizer.QuantizationResult queryCorrections;
final ESNextDiskBBQVectorsFormat.QuantEncoding quantEncoding;
final float[] scratch;
final int[] quantizationScratch;
final byte[] quantizedQueryScratch;
final OptimizedScalarQuantizer quantizer;
final DocIdsWriter idsWriter = new DocIdsWriter();
final float[] correctiveValues = new float[3];
final long quantizedVectorByteSize;
MemorySegmentPostingsVisitor(
float[] target,
ESNextDiskBBQVectorsFormat.QuantEncoding quantEncoding,
IndexInput indexInput,
FieldEntry entry,
FieldInfo fieldInfo,
Bits acceptDocs
) throws IOException {
this.target = target;
this.quantEncoding = quantEncoding;
this.indexInput = indexInput;
this.entry = entry;
this.fieldInfo = fieldInfo;
this.acceptDocs = acceptDocs;
centroid = new float[fieldInfo.getVectorDimension()];
scratch = new float[target.length];
final int discretizedDimensions = quantEncoding.discretizedDimensions(fieldInfo.getVectorDimension());
quantizationScratch = new int[discretizedDimensions];
quantizedQueryScratch = new byte[quantEncoding.getQueryPackedLength(fieldInfo.getVectorDimension())];
quantizedVectorByteSize = quantEncoding.getDocPackedLength(fieldInfo.getVectorDimension());
quantizedByteLength = quantizedVectorByteSize + (Float.BYTES * 3) + Short.BYTES;
quantizer = new OptimizedScalarQuantizer(fieldInfo.getVectorSimilarityFunction(), DEFAULT_LAMBDA, 1);
osqVectorsScorer = ESVectorUtil.getESNextOSQVectorsScorer(
indexInput,
quantEncoding.queryBits(),
quantEncoding.bits(),
fieldInfo.getVectorDimension(),
(int) quantizedVectorByteSize
);
}
@Override
public int resetPostingsScorer(long offset) throws IOException {
quantized = false;
indexInput.seek(offset);
indexInput.readFloats(centroid, 0, centroid.length);
centroidDp = Float.intBitsToFloat(indexInput.readInt());
vectors = indexInput.readVInt();
docEncoding = indexInput.readByte();
docBase = 0;
slicePos = indexInput.getFilePointer();
return vectors;
}
private float scoreIndividually() throws IOException {
float maxScore = Float.NEGATIVE_INFINITY;
// score individually, first the quantized byte chunk
for (int j = 0; j < BULK_SIZE; j++) {
int doc = docIdsScratch[j];
if (doc != -1) {
float qcDist = osqVectorsScorer.quantizeScore(quantizedQueryScratch);
scores[j] = qcDist;
} else {
indexInput.skipBytes(quantizedVectorByteSize);
}
}
// read in all corrections
indexInput.readFloats(correctionsLower, 0, BULK_SIZE);
indexInput.readFloats(correctionsUpper, 0, BULK_SIZE);
for (int j = 0; j < BULK_SIZE; j++) {
correctionsSum[j] = Short.toUnsignedInt(indexInput.readShort());
}
indexInput.readFloats(correctionsAdd, 0, BULK_SIZE);
// Now apply corrections
for (int j = 0; j < BULK_SIZE; j++) {
int doc = docIdsScratch[j];
if (doc != -1) {
scores[j] = osqVectorsScorer.score(
queryCorrections.lowerInterval(),
queryCorrections.upperInterval(),
queryCorrections.quantizedComponentSum(),
queryCorrections.additionalCorrection(),
fieldInfo.getVectorSimilarityFunction(),
centroidDp,
correctionsLower[j],
correctionsUpper[j],
correctionsSum[j],
correctionsAdd[j],
scores[j]
);
if (scores[j] > maxScore) {
maxScore = scores[j];
}
}
}
return maxScore;
}
private static int docToBulkScore(int[] docIds, Bits acceptDocs) {
assert acceptDocs != null : "acceptDocs must not be null";
int docToScore = BULK_SIZE;
for (int i = 0; i < BULK_SIZE; i++) {
if (acceptDocs.get(docIds[i]) == false) {
docIds[i] = -1;
docToScore--;
}
}
return docToScore;
}
private void collectBulk(KnnCollector knnCollector, float[] scores) {
for (int i = 0; i < BULK_SIZE; i++) {
final int doc = docIdsScratch[i];
if (doc != -1) {
knnCollector.collect(doc, scores[i]);
}
}
}
private void readDocIds(int count) throws IOException {
idsWriter.readInts(indexInput, count, docEncoding, docIdsScratch);
// reconstitute from the deltas
for (int j = 0; j < count; j++) {
docBase += docIdsScratch[j];
docIdsScratch[j] = docBase;
}
}
@Override
public int visit(KnnCollector knnCollector) throws IOException {
indexInput.seek(slicePos);
// block processing
int scoredDocs = 0;
int limit = vectors - BULK_SIZE + 1;
int i = 0;
// read Docs
for (; i < limit; i += BULK_SIZE) {
// read the doc ids
readDocIds(BULK_SIZE);
final int docsToBulkScore = acceptDocs == null ? BULK_SIZE : docToBulkScore(docIdsScratch, acceptDocs);
if (docsToBulkScore == 0) {
indexInput.skipBytes(quantizedByteLength * BULK_SIZE);
continue;
}
quantizeQueryIfNecessary();
final float maxScore;
if (docsToBulkScore < BULK_SIZE / 2) {
maxScore = scoreIndividually();
} else {
maxScore = osqVectorsScorer.scoreBulk(
quantizedQueryScratch,
queryCorrections.lowerInterval(),
queryCorrections.upperInterval(),
queryCorrections.quantizedComponentSum(),
queryCorrections.additionalCorrection(),
fieldInfo.getVectorSimilarityFunction(),
centroidDp,
scores
);
}
if (knnCollector.minCompetitiveSimilarity() < maxScore) {
collectBulk(knnCollector, scores);
}
scoredDocs += docsToBulkScore;
}
// process tail
// read the doc ids
if (i < vectors) {
readDocIds(vectors - i);
}
int count = 0;
for (; i < vectors; i++) {
int doc = docIdsScratch[count++];
if (acceptDocs == null || acceptDocs.get(doc)) {
quantizeQueryIfNecessary();
float qcDist = osqVectorsScorer.quantizeScore(quantizedQueryScratch);
indexInput.readFloats(correctiveValues, 0, 3);
final int quantizedComponentSum = Short.toUnsignedInt(indexInput.readShort());
float score = osqVectorsScorer.score(
queryCorrections.lowerInterval(),
queryCorrections.upperInterval(),
queryCorrections.quantizedComponentSum(),
queryCorrections.additionalCorrection(),
fieldInfo.getVectorSimilarityFunction(),
centroidDp,
correctiveValues[0],
correctiveValues[1],
quantizedComponentSum,
correctiveValues[2],
qcDist
);
scoredDocs++;
knnCollector.collect(doc, score);
} else {
indexInput.skipBytes(quantizedByteLength);
}
}
if (scoredDocs > 0) {
knnCollector.incVisitedCount(scoredDocs);
}
return scoredDocs;
}
private void quantizeQueryIfNecessary() {
if (quantized == false) {
assert fieldInfo.getVectorSimilarityFunction() != COSINE || VectorUtil.isUnitVector(target);
queryCorrections = quantizer.scalarQuantize(target, scratch, quantizationScratch, quantEncoding.queryBits(), centroid);
quantEncoding.packQuery(quantizationScratch, quantizedQueryScratch);
quantized = true;
}
}
}
}
|
MemorySegmentPostingsVisitor
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/jdk8/ZoneIdTest.java
|
{
"start": 164,
"end": 566
}
|
class ____ extends TestCase {
public void test_for_issue() throws Exception {
VO vo = new VO();
vo.setDate(ZoneId.of("Europe/Paris"));
String text = JSON.toJSONString(vo);
System.out.println(text);
VO vo1 = JSON.parseObject(text, VO.class);
Assert.assertEquals(vo.getDate(), vo1.getDate());
}
public static
|
ZoneIdTest
|
java
|
square__retrofit
|
retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java
|
{
"start": 81855,
"end": 82302
}
|
class ____ {
@FormUrlEncoded //
@POST("/foo") //
Call<ResponseBody> method(@Field("foo") List<Object> fields, @Field("kit") String kit) {
return null;
}
}
List<Object> values = Arrays.asList("foo", "bar", null, 3);
Request request = buildRequest(Example.class, values, "kat");
assertBody(request.body(), "foo=foo&foo=bar&foo=3&kit=kat");
}
@Test
public void formEncodedFieldArray() {
|
Example
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.