language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | alibaba__nacos | auth/src/main/java/com/alibaba/nacos/auth/parser/grpc/AbstractGrpcResourceParser.java | {
"start": 946,
"end": 1285
} | class ____ extends AbstractResourceParser<Request> {
@Override
protected Properties getProperties(Request request) {
Properties properties = new Properties();
properties.setProperty(Constants.Resource.REQUEST_CLASS, request.getClass().getSimpleName());
return properties;
}
}
| AbstractGrpcResourceParser |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/util/RequestAndCompletionHandler.java | {
"start": 1004,
"end": 1898
} | class ____ {
public final long creationTimeMs;
public final Node destination;
public final AbstractRequest.Builder<? extends AbstractRequest> request;
public final RequestCompletionHandler handler;
public RequestAndCompletionHandler(
long creationTimeMs,
Node destination,
AbstractRequest.Builder<? extends AbstractRequest> request,
RequestCompletionHandler handler
) {
this.creationTimeMs = creationTimeMs;
this.destination = destination;
this.request = request;
this.handler = handler;
}
@Override
public String toString() {
return "RequestAndCompletionHandler(" +
"creationTimeMs=" + creationTimeMs +
", destination=" + destination +
", request=" + request +
", handler=" + handler +
')';
}
}
| RequestAndCompletionHandler |
java | micronaut-projects__micronaut-core | http-netty/src/main/java/io/micronaut/http/netty/channel/converters/KQueueChannelOptionFactory.java | {
"start": 1601,
"end": 2746
} | class ____ the channelOption to work
KQueueChannelOption.SO_ACCEPTFILTER.name();
}
@Override
public ChannelOption<?> channelOption(String name) {
return DefaultChannelOptionFactory.channelOption(name, KQueueChannelOption.class, UnixChannelOption.class);
}
@Override
public Object convertValue(ChannelOption<?> option, Object value, Environment env) {
return DefaultChannelOptionFactory.convertValue(option, KQueueChannelOption.class, value, env);
}
@Override
public void register(MutableConversionService conversionService) {
conversionService.addConverter(
Map.class,
AcceptFilter.class,
(map, targetType, context) -> {
Object filterName = map.get("filterName");
Object filterArgs = map.get("filterArgs");
if (filterName != null && filterArgs != null) {
return Optional.of(new AcceptFilter(filterName.toString(), filterArgs.toString()));
}
return Optional.empty();
}
);
}
}
| for |
java | dropwizard__dropwizard | dropwizard-jersey/src/main/java/io/dropwizard/jersey/params/AbstractParamConverterProvider.java | {
"start": 1559,
"end": 1971
} | class ____ not have a (String, String) constructor. We return null,
// leaving Jersey to handle these parameters as it normally would.
return null;
}
final String defaultValue = DefaultValueUtils.getDefaultValue(annotations);
return new AbstractParamConverter<>(constructor, parameterName, defaultValue);
}
return null;
}
}
| did |
java | micronaut-projects__micronaut-core | http-client/src/test/groovy/io/micronaut/http/client/stream/User.java | {
"start": 675,
"end": 1058
} | class ____ {
private String userName;
private List<Movie> movies;
public String getUserName() {
return userName;
}
public void setUserName(String userName) {
this.userName = userName;
}
public List<Movie> getMovies() {
return movies;
}
public void setMovies(List<Movie> movies) {
this.movies = movies;
}
}
| User |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFsCheckpointStorageAccess.java | {
"start": 2938,
"end": 15957
} | class ____ implements CheckpointStorageAccess {
// ------------------------------------------------------------------------
// Constants
// ------------------------------------------------------------------------
/** The prefix of the directory containing the data exclusive to a checkpoint. */
public static final String CHECKPOINT_DIR_PREFIX = "chk-";
/** The name of the directory for shared checkpoint state. */
public static final String CHECKPOINT_SHARED_STATE_DIR = "shared";
/**
* The name of the directory for state not owned/released by the master, but by the
* TaskManagers.
*/
public static final String CHECKPOINT_TASK_OWNED_STATE_DIR = "taskowned";
/** The name of the metadata files in checkpoints / savepoints. */
public static final String METADATA_FILE_NAME = "_metadata";
/** The magic number that is put in front of any reference. */
private static final byte[] REFERENCE_MAGIC_NUMBER = new byte[] {0x05, 0x5F, 0x3F, 0x18};
// ------------------------------------------------------------------------
// Fields and properties
// ------------------------------------------------------------------------
/** The jobId, written into the generated savepoint directories. */
private final JobID jobId;
/** The default location for savepoints. Null, if none is configured. */
@Nullable private final Path defaultSavepointDirectory;
/**
* Creates a new checkpoint storage.
*
* @param jobId The ID of the job that writes the checkpoints.
* @param defaultSavepointDirectory The default location for savepoints, or null, if none is
* set.
*/
protected AbstractFsCheckpointStorageAccess(
JobID jobId, @Nullable Path defaultSavepointDirectory) {
this.jobId = checkNotNull(jobId);
this.defaultSavepointDirectory = defaultSavepointDirectory;
}
/**
* Gets the default directory for savepoints. Returns null, if no default savepoint directory is
* configured.
*/
@Nullable
public Path getDefaultSavepointDirectory() {
return defaultSavepointDirectory;
}
// ------------------------------------------------------------------------
// CheckpointStorage implementation
// ------------------------------------------------------------------------
@Override
public boolean hasDefaultSavepointLocation() {
return defaultSavepointDirectory != null;
}
@Override
public CompletedCheckpointStorageLocation resolveCheckpoint(String checkpointPointer)
throws IOException {
return resolveCheckpointPointer(checkpointPointer);
}
/**
* Creates a file system based storage location for a savepoint.
*
* <p>This methods implements the logic that decides which location to use (given optional
* parameters for a configured location and a location passed for this specific savepoint) and
* how to name and initialize the savepoint directory.
*
* @param externalLocationPointer The target location pointer for the savepoint. Must be a valid
* URI. Null, if not supplied.
* @param checkpointId The checkpoint ID of the savepoint.
* @return The checkpoint storage location for the savepoint.
* @throws IOException Thrown if the target directory could not be created.
*/
@Override
public CheckpointStorageLocation initializeLocationForSavepoint(
@SuppressWarnings("unused") long checkpointId, @Nullable String externalLocationPointer)
throws IOException {
// determine where to write the savepoint to
final Path savepointBasePath;
if (externalLocationPointer != null) {
savepointBasePath = new Path(externalLocationPointer);
} else if (defaultSavepointDirectory != null) {
savepointBasePath = defaultSavepointDirectory;
} else {
throw new IllegalArgumentException(
"No savepoint location given and no default location configured.");
}
// generate the savepoint directory
final FileSystem fs = savepointBasePath.getFileSystem();
final String prefix = "savepoint-" + jobId.toString().substring(0, 6) + '-';
Exception latestException = null;
for (int attempt = 0; attempt < 10; attempt++) {
final Path path = new Path(savepointBasePath, FileUtils.getRandomFilename(prefix));
try {
if (fs.mkdirs(path)) {
// we make the path qualified, to make it independent of default schemes and
// authorities
final Path qp = path.makeQualified(fs);
return createSavepointLocation(fs, qp);
}
} catch (Exception e) {
latestException = e;
}
}
throw new IOException(
"Failed to create savepoint directory at " + savepointBasePath, latestException);
}
protected abstract CheckpointStorageLocation createSavepointLocation(
FileSystem fs, Path location) throws IOException;
// ------------------------------------------------------------------------
// Creating and resolving paths
// ------------------------------------------------------------------------
/**
* Builds directory into which a specific job checkpoints, meaning the directory inside which it
* creates the checkpoint-specific subdirectories.
*
* <p>This method only succeeds if a base checkpoint directory has been set; otherwise the
* method fails with an exception.
*
* @param jobId The ID of the job
* @return The job's checkpoint directory, re
* @throws UnsupportedOperationException Thrown, if no base checkpoint directory has been set.
*/
protected static Path getCheckpointDirectoryForJob(Path baseCheckpointPath, JobID jobId) {
return new Path(baseCheckpointPath, jobId.toString());
}
/**
* Creates the directory path for the data exclusive to a specific checkpoint.
*
* @param baseDirectory The base directory into which the job checkpoints.
* @param checkpointId The ID (logical timestamp) of the checkpoint.
*/
protected static Path createCheckpointDirectory(Path baseDirectory, long checkpointId) {
return new Path(baseDirectory, CHECKPOINT_DIR_PREFIX + checkpointId);
}
/**
* Takes the given string (representing a pointer to a checkpoint) and resolves it to a file
* status for the checkpoint's metadata file.
*
* @param checkpointPointer The pointer to resolve.
* @return A state handle to checkpoint/savepoint's metadata.
* @throws IOException Thrown, if the pointer cannot be resolved, the file system not accessed,
* or the pointer points to a location that does not seem to be a checkpoint/savepoint.
*/
@Internal
public static FsCompletedCheckpointStorageLocation resolveCheckpointPointer(
String checkpointPointer) throws IOException {
checkNotNull(checkpointPointer, "checkpointPointer");
checkArgument(!checkpointPointer.isEmpty(), "empty checkpoint pointer");
// check if the pointer is in fact a valid file path
final Path path;
try {
path = new Path(checkpointPointer);
} catch (Exception e) {
throw new IOException(
"Checkpoint/savepoint path '"
+ checkpointPointer
+ "' is not a valid file URI. "
+ "Either the pointer path is invalid, or the checkpoint was created by a different state backend.");
}
// check if the file system can be accessed
final FileSystem fs;
try {
fs = path.getFileSystem();
} catch (IOException e) {
throw new IOException(
"Cannot access file system for checkpoint/savepoint path '"
+ checkpointPointer
+ "'.",
e);
}
final FileStatus status;
try {
status = fs.getFileStatus(path);
} catch (FileNotFoundException e) {
throw new FileNotFoundException(
"Cannot find checkpoint or savepoint "
+ "file/directory '"
+ checkpointPointer
+ "' on file system '"
+ fs.getUri().getScheme()
+ "'.");
}
// if we are here, the file / directory exists
final Path checkpointDir;
final FileStatus metadataFileStatus;
// If this is a directory, we need to find the meta data file
if (status.isDir()) {
checkpointDir = status.getPath();
final Path metadataFilePath = new Path(path, METADATA_FILE_NAME);
try {
metadataFileStatus = fs.getFileStatus(metadataFilePath);
} catch (FileNotFoundException e) {
throw new FileNotFoundException(
"Cannot find meta data file '"
+ METADATA_FILE_NAME
+ "' in directory '"
+ path
+ "'. Please try to load the checkpoint/savepoint "
+ "directly from the metadata file instead of the directory.");
}
} else {
// this points to a file and we either do no name validation, or
// the name is actually correct, so we can return the path
metadataFileStatus = status;
checkpointDir = status.getPath().getParent();
}
final FileStateHandle metaDataFileHandle =
new FileStateHandle(metadataFileStatus.getPath(), metadataFileStatus.getLen());
final String pointer = checkpointDir.makeQualified(fs).toString();
return new FsCompletedCheckpointStorageLocation(
fs, checkpointDir, metaDataFileHandle, pointer);
}
// ------------------------------------------------------------------------
// Encoding / Decoding of References
// ------------------------------------------------------------------------
/**
* Encodes the given path as a reference in bytes. The path is encoded as a UTF-8 string and
* prepended as a magic number.
*
* @param path The path to encode.
* @return The location reference.
*/
public static CheckpointStorageLocationReference encodePathAsReference(Path path) {
byte[] refBytes = path.toString().getBytes(StandardCharsets.UTF_8);
byte[] bytes = new byte[REFERENCE_MAGIC_NUMBER.length + refBytes.length];
System.arraycopy(REFERENCE_MAGIC_NUMBER, 0, bytes, 0, REFERENCE_MAGIC_NUMBER.length);
System.arraycopy(refBytes, 0, bytes, REFERENCE_MAGIC_NUMBER.length, refBytes.length);
return new CheckpointStorageLocationReference(bytes);
}
/**
* Decodes the given reference into a path. This method validates that the reference bytes start
* with the correct magic number (as written by {@link #encodePathAsReference(Path)}) and
* converts the remaining bytes back to a proper path.
*
* @param reference The bytes representing the reference.
* @return The path decoded from the reference.
* @throws IllegalArgumentException Thrown, if the bytes do not represent a proper reference.
*/
public static Path decodePathFromReference(CheckpointStorageLocationReference reference) {
if (reference.isDefaultReference()) {
throw new IllegalArgumentException("Cannot decode default reference");
}
final byte[] bytes = reference.getReferenceBytes();
final int headerLen = REFERENCE_MAGIC_NUMBER.length;
if (bytes.length > headerLen) {
// compare magic number
for (int i = 0; i < headerLen; i++) {
if (bytes[i] != REFERENCE_MAGIC_NUMBER[i]) {
throw new IllegalArgumentException(
"Reference starts with the wrong magic number");
}
}
// covert to string and path
try {
return new Path(
new String(
bytes,
headerLen,
bytes.length - headerLen,
StandardCharsets.UTF_8));
} catch (Exception e) {
throw new IllegalArgumentException("Reference cannot be decoded to a path", e);
}
} else {
throw new IllegalArgumentException("Reference too short.");
}
}
}
| AbstractFsCheckpointStorageAccess |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/support/DynamicValuesPropertySource.java | {
"start": 1411,
"end": 3181
} | class ____ extends MapPropertySource {
static final String PROPERTY_SOURCE_NAME = "Dynamic Test Properties";
final DynamicPropertyRegistry dynamicPropertyRegistry;
DynamicValuesPropertySource() {
this(Collections.synchronizedMap(new LinkedHashMap<>()));
}
DynamicValuesPropertySource(Map<String, Supplier<Object>> valueSuppliers) {
super(PROPERTY_SOURCE_NAME, Collections.unmodifiableMap(valueSuppliers));
this.dynamicPropertyRegistry = (name, valueSupplier) -> {
Assert.hasText(name, "'name' must not be null or blank");
Assert.notNull(valueSupplier, "'valueSupplier' must not be null");
valueSuppliers.put(name, valueSupplier);
};
}
@Override
public @Nullable Object getProperty(String name) {
return SupplierUtils.resolve(super.getProperty(name));
}
/**
* Get the {@code DynamicValuesPropertySource} registered in the environment
* or create and register a new {@code DynamicValuesPropertySource} in the
* environment.
*/
static DynamicValuesPropertySource getOrCreate(ConfigurableEnvironment environment) {
MutablePropertySources propertySources = environment.getPropertySources();
PropertySource<?> propertySource = propertySources.get(PROPERTY_SOURCE_NAME);
if (propertySource instanceof DynamicValuesPropertySource dynamicValuesPropertySource) {
return dynamicValuesPropertySource;
}
else if (propertySource == null) {
DynamicValuesPropertySource dynamicValuesPropertySource = new DynamicValuesPropertySource();
propertySources.addFirst(dynamicValuesPropertySource);
return dynamicValuesPropertySource;
}
else {
throw new IllegalStateException("PropertySource with name '%s' must be a DynamicValuesPropertySource"
.formatted(PROPERTY_SOURCE_NAME));
}
}
}
| DynamicValuesPropertySource |
java | quarkusio__quarkus | integration-tests/main/src/main/java/io/quarkus/it/arc/interceptor/TestSimpleBeanEndpoint.java | {
"start": 153,
"end": 317
} | class ____ {
@Inject
SimpleBean simpleBean;
@GET
public String manualValidation() {
return simpleBean.ping();
}
}
| TestSimpleBeanEndpoint |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java | {
"start": 278202,
"end": 283833
} | class ____ implements SqlVisitor<RelDataType> {
private final SqlValidatorScope scope;
DeriveTypeVisitor(SqlValidatorScope scope) {
this.scope = scope;
}
@Override
public RelDataType visit(SqlLiteral literal) {
return resolveLiteral(literal).createSqlType(typeFactory);
}
@Override
public RelDataType visit(SqlCall call) {
// ----- FLINK MODIFICATION BEGIN -----
FlinkSqlCallBinding flinkSqlCallBinding =
new FlinkSqlCallBinding(this.scope.getValidator(), scope, call);
final SqlOperator operator = call.getOperator();
return operator.deriveType(
SqlValidatorImpl.this, scope, flinkSqlCallBinding.permutedCall());
// ----- FLINK MODIFICATION END -----
}
@Override
public RelDataType visit(SqlNodeList nodeList) {
// Operand is of a type that we can't derive a type for. If the
// operand is of a peculiar type, such as a SqlNodeList, then you
// should override the operator's validateCall() method so that it
// doesn't try to validate that operand as an expression.
throw Util.needToImplement(nodeList);
}
@Override
public RelDataType visit(SqlIdentifier id) {
// First check for builtin functions which don't have parentheses,
// like "LOCALTIME".
final SqlCall call = makeNullaryCall(id);
if (call != null) {
return call.getOperator().validateOperands(SqlValidatorImpl.this, scope, call);
}
RelDataType type = null;
if (!(scope instanceof EmptyScope)) {
id = scope.fullyQualify(id).identifier;
}
// Resolve the longest prefix of id that we can
int i;
for (i = id.names.size() - 1; i > 0; i--) {
// REVIEW jvs 9-June-2005: The name resolution rules used
// here are supposed to match SQL:2003 Part 2 Section 6.6
// (identifier chain), but we don't currently have enough
// information to get everything right. In particular,
// routine parameters are currently looked up via resolve;
// we could do a better job if they were looked up via
// resolveColumn.
final SqlNameMatcher nameMatcher = catalogReader.nameMatcher();
final SqlValidatorScope.ResolvedImpl resolved =
new SqlValidatorScope.ResolvedImpl();
scope.resolve(id.names.subList(0, i), nameMatcher, false, resolved);
if (resolved.count() == 1) {
// There's a namespace with the name we seek.
final SqlValidatorScope.Resolve resolve = resolved.only();
type = resolve.rowType();
for (SqlValidatorScope.Step p : Util.skip(resolve.path.steps())) {
type = type.getFieldList().get(p.i).getType();
}
break;
}
}
// Give precedence to namespace found, unless there
// are no more identifier components.
if (type == null || id.names.size() == 1) {
// See if there's a column with the name we seek in
// precisely one of the namespaces in this scope.
RelDataType colType = scope.resolveColumn(id.names.get(0), id);
if (colType != null) {
type = colType;
}
++i;
}
if (type == null) {
final SqlIdentifier last = id.getComponent(i - 1, i);
throw newValidationError(last, RESOURCE.unknownIdentifier(last.toString()));
}
// Resolve rest of identifier
for (; i < id.names.size(); i++) {
String name = id.names.get(i);
final RelDataTypeField field;
if (name.equals("")) {
// The wildcard "*" is represented as an empty name. It never
// resolves to a field.
name = "*";
field = null;
} else {
final SqlNameMatcher nameMatcher = catalogReader.nameMatcher();
field = nameMatcher.field(type, name);
}
if (field == null) {
throw newValidationError(id.getComponent(i), RESOURCE.unknownField(name));
}
type = field.getType();
}
type = SqlTypeUtil.addCharsetAndCollation(type, getTypeFactory());
return type;
}
@Override
public RelDataType visit(SqlDataTypeSpec dataType) {
// Q. How can a data type have a type?
// A. When it appears in an expression. (Say as the 2nd arg to the
// CAST operator.)
validateDataType(dataType);
return dataType.deriveType(SqlValidatorImpl.this);
}
@Override
public RelDataType visit(SqlDynamicParam param) {
return unknownType;
}
@Override
public RelDataType visit(SqlIntervalQualifier intervalQualifier) {
return typeFactory.createSqlIntervalType(intervalQualifier);
}
}
/** Converts an expression into canonical form by fully-qualifying any identifiers. */
private static | DeriveTypeVisitor |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/Glide.java | {
"start": 25834,
"end": 27643
} | class ____ used, this method will only attempt to
* discover support {@link Fragment}s. Any non-support {@link android.app.Fragment}s attached to
* the {@link FragmentActivity} will be ignored.
*
* @param view The view to search for a containing Fragment or Activity from.
* @return A RequestManager that can be used to start a load.
* @throws IllegalArgumentException if the activity associated with the view is destroyed.
*/
@NonNull
public static RequestManager with(@NonNull View view) {
return getRetriever(view.getContext()).get(view);
}
@NonNull
public Registry getRegistry() {
return glideContext.getRegistry();
}
boolean removeFromManagers(@NonNull Target<?> target) {
synchronized (managers) {
for (RequestManager requestManager : managers) {
if (requestManager.untrack(target)) {
return true;
}
}
}
return false;
}
void registerRequestManager(RequestManager requestManager) {
synchronized (managers) {
if (managers.contains(requestManager)) {
throw new IllegalStateException("Cannot register already registered manager");
}
managers.add(requestManager);
}
}
void unregisterRequestManager(RequestManager requestManager) {
synchronized (managers) {
if (!managers.contains(requestManager)) {
throw new IllegalStateException("Cannot unregister not yet registered manager");
}
managers.remove(requestManager);
}
}
@Override
public void onTrimMemory(int level) {
trimMemory(level);
}
@Override
public void onConfigurationChanged(Configuration newConfig) {
// Do nothing.
}
@Override
public void onLowMemory() {
clearMemory();
}
/** Creates a new instance of {@link RequestOptions}. */
public | is |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/web/servlet/assertj/AbstractHttpServletResponseAssert.java | {
"start": 1921,
"end": 8599
} | class ____<R extends HttpServletResponse, SELF extends AbstractHttpServletResponseAssert<R, SELF, ACTUAL>, ACTUAL>
extends AbstractObjectAssert<SELF, ACTUAL> {
private final Supplier<MediaTypeAssert> contentTypeAssertSupplier;
private final Supplier<HttpHeadersAssert> headersAssertSupplier;
private final Supplier<AbstractIntegerAssert<?>> statusAssert;
protected AbstractHttpServletResponseAssert(ACTUAL actual, Class<?> selfType) {
super(actual, selfType);
this.contentTypeAssertSupplier = SingletonSupplier.of(() -> new MediaTypeAssert(getResponse().getContentType()));
this.headersAssertSupplier = SingletonSupplier.of(() -> new HttpHeadersAssert(getHttpHeaders(getResponse())));
this.statusAssert = SingletonSupplier.of(() -> Assertions.assertThat(getResponse().getStatus()).as("HTTP status code"));
}
private static HttpHeaders getHttpHeaders(HttpServletResponse response) {
MultiValueMap<String, String> headers = new LinkedMultiValueMap<>();
response.getHeaderNames().forEach(name -> headers.put(name, new ArrayList<>(response.getHeaders(name))));
return new HttpHeaders(headers);
}
/**
* Provide the response to use if it is available.
* <p>Throws an {@link AssertionError} if the request has failed to process,
* and the response is not available.
* @return the response to use
*/
protected abstract R getResponse();
/**
* Return a new {@linkplain MediaTypeAssert assertion} object that uses the
* response's {@linkplain MediaType content type} as the object to test.
*/
public MediaTypeAssert contentType() {
return this.contentTypeAssertSupplier.get();
}
/**
* Return a new {@linkplain HttpHeadersAssert assertion} object that uses
* {@link HttpHeaders} as the object to test. The returned assertion
* object provides all the regular {@linkplain AbstractMapAssert map
* assertions}, with headers mapped by header name.
* Examples: <pre><code class="java">
* // Check for the presence of the Accept header:
* assertThat(response).headers().containsHeader(HttpHeaders.ACCEPT);
*
* // Check for the absence of the Content-Length header:
* assertThat(response).headers().doesNotContainsHeader(HttpHeaders.CONTENT_LENGTH);
* </code></pre>
*/
public HttpHeadersAssert headers() {
return this.headersAssertSupplier.get();
}
// Content-type shortcuts
/**
* Verify that the response's {@code Content-Type} is equal to the given value.
* @param contentType the expected content type
*/
public SELF hasContentType(MediaType contentType) {
contentType().isEqualTo(contentType);
return this.myself;
}
/**
* Verify that the response's {@code Content-Type} is equal to the given
* string representation.
* @param contentType the expected content type
*/
public SELF hasContentType(String contentType) {
contentType().isEqualTo(contentType);
return this.myself;
}
/**
* Verify that the response's {@code Content-Type} is
* {@linkplain MediaType#isCompatibleWith(MediaType) compatible} with the
* given value.
* @param contentType the expected compatible content type
*/
public SELF hasContentTypeCompatibleWith(MediaType contentType) {
contentType().isCompatibleWith(contentType);
return this.myself;
}
/**
* Verify that the response's {@code Content-Type} is
* {@linkplain MediaType#isCompatibleWith(MediaType) compatible} with the
* given string representation.
* @param contentType the expected compatible content type
*/
public SELF hasContentTypeCompatibleWith(String contentType) {
contentType().isCompatibleWith(contentType);
return this.myself;
}
// Headers shortcuts
/**
* Verify that the response contains a header with the given {@code name}.
* @param name the name of an expected HTTP header
*/
public SELF containsHeader(String name) {
headers().containsHeader(name);
return this.myself;
}
/**
* Verify that the response does not contain a header with the given {@code name}.
* @param name the name of an HTTP header that should not be present
*/
public SELF doesNotContainHeader(String name) {
headers().doesNotContainHeader(name);
return this.myself;
}
/**
* Verify that the response contains a header with the given {@code name}
* and primary {@code value}.
* @param name the name of an expected HTTP header
* @param value the expected value of the header
*/
public SELF hasHeader(String name, String value) {
headers().hasValue(name, value);
return this.myself;
}
// Status
/**
* Verify that the HTTP status is equal to the specified status code.
* @param status the expected HTTP status code
*/
public SELF hasStatus(int status) {
status().isEqualTo(status);
return this.myself;
}
/**
* Verify that the HTTP status is equal to the specified
* {@linkplain HttpStatus status}.
* @param status the expected HTTP status code
*/
public SELF hasStatus(HttpStatus status) {
return hasStatus(status.value());
}
/**
* Verify that the HTTP status is equal to {@link HttpStatus#OK}.
* @see #hasStatus(HttpStatus)
*/
public SELF hasStatusOk() {
return hasStatus(HttpStatus.OK);
}
/**
* Verify that the HTTP status code is in the 1xx range.
* @see <a href="https://datatracker.ietf.org/doc/html/rfc2616#section-10.1">RFC 2616</a>
*/
public SELF hasStatus1xxInformational() {
return hasStatusSeries(Series.INFORMATIONAL);
}
/**
* Verify that the HTTP status code is in the 2xx range.
* @see <a href="https://datatracker.ietf.org/doc/html/rfc2616#section-10.2">RFC 2616</a>
*/
public SELF hasStatus2xxSuccessful() {
return hasStatusSeries(Series.SUCCESSFUL);
}
/**
* Verify that the HTTP status code is in the 3xx range.
* @see <a href="https://datatracker.ietf.org/doc/html/rfc2616#section-10.3">RFC 2616</a>
*/
public SELF hasStatus3xxRedirection() {
return hasStatusSeries(Series.REDIRECTION);
}
/**
* Verify that the HTTP status code is in the 4xx range.
* @see <a href="https://datatracker.ietf.org/doc/html/rfc2616#section-10.4">RFC 2616</a>
*/
public SELF hasStatus4xxClientError() {
return hasStatusSeries(Series.CLIENT_ERROR);
}
/**
* Verify that the HTTP status code is in the 5xx range.
* @see <a href="https://datatracker.ietf.org/doc/html/rfc2616#section-10.5">RFC 2616</a>
*/
public SELF hasStatus5xxServerError() {
return hasStatusSeries(Series.SERVER_ERROR);
}
private SELF hasStatusSeries(Series series) {
Assertions.assertThat(Series.resolve(getResponse().getStatus())).as("HTTP status series").isEqualTo(series);
return this.myself;
}
private AbstractIntegerAssert<?> status() {
return this.statusAssert.get();
}
}
| AbstractHttpServletResponseAssert |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_1881/VehicleDtoMapper.java | {
"start": 1164,
"end": 1578
} | class ____ {
private final String name;
private final int size;
private final String type;
public Vehicle(String name, int size, String type) {
this.name = name;
this.size = size;
this.type = type;
}
public String getName() {
return name;
}
public int getSize() {
return size;
}
public String getType() {
return type;
}
}
| Vehicle |
java | apache__camel | components/camel-wal/src/main/java/org/apache/camel/component/wal/EntryInfo.java | {
"start": 902,
"end": 1203
} | class ____ {
private final long position;
private EntryInfo(long position) {
this.position = position;
}
public long getPosition() {
return position;
}
/**
* Contains information about a log entry that is hot on the cache
*/
public static | EntryInfo |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bulkid/AbstractMutationStrategyGeneratedIdentityTest.java | {
"start": 1236,
"end": 5412
} | class ____ {
@BeforeEach
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
Doctor doctor = new Doctor();
doctor.setName( "Doctor John" );
doctor.setEmployed( true );
session.persist( doctor );
} );
}
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncateMappedObjects();
}
@Test
@SkipForDialect(dialectClass = MySQLDialect.class, matchSubTypes = true,
reason = "MySQL ignores a provided value for an auto_increment column if it's lower than the current sequence value")
@SkipForDialect(dialectClass = AbstractTransactSQLDialect.class, matchSubTypes = true,
reason = "T-SQL complains IDENTITY_INSERT is off when a value for an identity column is provided")
@SkipForDialect(dialectClass = InformixDialect.class, reason = "Informix counts from 1 like a normal person")
public void testInsertStatic(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery(
"insert into Engineer(id, name, employed, fellow) values (0, :name, :employed, false)" )
.setParameter( "name", "John Doe" )
.setParameter( "employed", true )
.executeUpdate();
final Engineer engineer = session.find( Engineer.class, 0 );
assertThat( engineer.getName() ).isEqualTo( "John Doe" );
assertThat( engineer.isEmployed() ).isTrue();
assertThat( engineer.isFellow() ).isFalse();
} );
}
@Test
@SkipForDialect(dialectClass = OracleDialect.class,
reason = "Oracle doesn't support insert-select with a returning clause")
public void testInsertGenerated(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "insert into Engineer(name, employed, fellow) values (:name, :employed, false)" )
.setParameter( "name", "John Doe" )
.setParameter( "employed", true )
.executeUpdate();
final Engineer engineer = session.createQuery( "from Engineer e where e.name = 'John Doe'", Engineer.class )
.getSingleResult();
assertThat( engineer.getName() ).isEqualTo( "John Doe" );
assertThat( engineer.isEmployed() ).isTrue();
assertThat( engineer.isFellow() ).isFalse();
} );
}
@Test
@SkipForDialect(dialectClass = MySQLDialect.class, matchSubTypes = true,
reason = "MySQL ignores a provided value for an auto_increment column if it's lower than the current sequence value")
@SkipForDialect(dialectClass = AbstractTransactSQLDialect.class, matchSubTypes = true,
reason = "T-SQL complains IDENTITY_INSERT is off when a value for an identity column is provided")
public void testInsertSelectStatic(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final int insertCount = session.createMutationQuery( "insert into Engineer(id, name, employed, fellow) "
+ "select d.id + 1, 'John Doe', true, false from Doctor d" )
.executeUpdate();
final Engineer engineer = session.createQuery( "from Engineer e where e.name = 'John Doe'", Engineer.class )
.getSingleResult();
assertThat( insertCount ).isEqualTo( 1 );
assertThat( engineer.getName() ).isEqualTo( "John Doe" );
assertThat( engineer.isEmployed() ).isTrue();
assertThat( engineer.isFellow() ).isFalse();
} );
}
@Test
@SkipForDialect(dialectClass = OracleDialect.class,
reason = "Oracle doesn't support insert-select with a returning clause")
public void testInsertSelectGenerated(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final int insertCount = session.createMutationQuery( "insert into Engineer(name, employed, fellow) "
+ "select 'John Doe', true, false from Doctor d" )
.executeUpdate();
final Engineer engineer = session.createQuery( "from Engineer e where e.name = 'John Doe'", Engineer.class )
.getSingleResult();
assertThat( insertCount ).isEqualTo( 1 );
assertThat( engineer.getName() ).isEqualTo( "John Doe" );
assertThat( engineer.isEmployed() ).isTrue();
assertThat( engineer.isFellow() ).isFalse();
} );
}
@Entity(name = "Person")
@Inheritance(strategy = InheritanceType.JOINED)
public static | AbstractMutationStrategyGeneratedIdentityTest |
java | quarkusio__quarkus | extensions/devui/deployment/src/main/java/io/quarkus/devui/deployment/extension/Extension.java | {
"start": 282,
"end": 7211
} | class ____ {
private String namespace;
private String artifact;
private String name;
private String shortName;
private String description;
private URL guide;
private List<String> keywords;
private String status;
private List<String> configFilter;
private List<String> categories;
private String unlisted;
private String builtWith;
private List<String> providesCapabilities;
private List<String> extensionDependencies;
private Codestart codestart;
private final List<Page> cardPages = new ArrayList<>();
private final List<Page> menuPages = new ArrayList<>();
private final List<Page> footerPages = new ArrayList<>();
private final List<Page> settingPages = new ArrayList<>();
private final List<Page> unlistedPages = new ArrayList<>();
private Card card = null; // Custom card
private List<LibraryLink> libraryLinks = null;
private String darkLogo = null;
private String lightLogo = null;
private String headlessComponent = null;
public Extension() {
}
public String getNamespace() {
return namespace;
}
public void setNamespace(String namespace) {
this.namespace = namespace;
}
public String getArtifact() {
return artifact;
}
public void setArtifact(String artifact) {
this.artifact = artifact;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getShortName() {
return shortName;
}
public boolean hasLibraryLinks() {
return libraryLinks != null && !libraryLinks.isEmpty();
}
public List<LibraryLink> getLibraryLinks() {
return libraryLinks;
}
public void addLibraryLink(LibraryLink libraryLink) {
if (this.libraryLinks == null)
this.libraryLinks = new LinkedList<>();
this.libraryLinks.add(libraryLink);
}
public String getDarkLogo() {
return this.darkLogo;
}
public String getLightLogo() {
return this.lightLogo;
}
public void setLogo(String darkLogo, String lightLogo) {
this.darkLogo = darkLogo;
this.lightLogo = lightLogo;
}
public void setShortName(String shortName) {
this.shortName = shortName;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public URL getGuide() {
return guide;
}
public void setGuide(URL guide) {
this.guide = guide;
}
public List<String> getKeywords() {
return keywords;
}
public void setKeywords(List<String> keywords) {
this.keywords = keywords;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
public List<String> getConfigFilter() {
return configFilter;
}
public void setConfigFilter(List<String> configFilter) {
this.configFilter = configFilter;
}
public List<String> getCategories() {
return categories;
}
public void setCategories(List<String> categories) {
this.categories = categories;
}
public String getUnlisted() {
return unlisted;
}
public void setUnlisted(String unlisted) {
this.unlisted = unlisted;
}
public String getBuiltWith() {
return builtWith;
}
public void setBuiltWith(String builtWith) {
this.builtWith = builtWith;
}
public List<String> getProvidesCapabilities() {
return providesCapabilities;
}
public void setProvidesCapabilities(List<String> providesCapabilities) {
this.providesCapabilities = providesCapabilities;
}
public List<String> getExtensionDependencies() {
return extensionDependencies;
}
public void setExtensionDependencies(List<String> extensionDependencies) {
this.extensionDependencies = extensionDependencies;
}
public Codestart getCodestart() {
return codestart;
}
public void setCodestart(Codestart codestart) {
this.codestart = codestart;
}
public void addCardPage(Page page) {
this.cardPages.add(page);
}
public void addCardPages(List<Page> pages) {
this.cardPages.addAll(pages);
}
public List<Page> getCardPages() {
return cardPages;
}
public void addMenuPage(Page page) {
this.menuPages.add(page);
}
public void addMenuPages(List<Page> pages) {
this.menuPages.addAll(pages);
}
public List<Page> getMenuPages() {
return menuPages;
}
public void addFooterPage(Page page) {
this.footerPages.add(page);
}
public void addFooterPages(List<Page> pages) {
this.footerPages.addAll(pages);
}
public List<Page> getFooterPages() {
return footerPages;
}
public void addSettingPage(Page page) {
this.settingPages.add(page);
}
public void addSettingPages(List<Page> pages) {
this.settingPages.addAll(pages);
}
public List<Page> getSettingPages() {
return settingPages;
}
public void addUnlistedPage(Page page) {
this.unlistedPages.add(page);
}
public void addUnlistedPages(List<Page> pages) {
this.unlistedPages.addAll(pages);
}
public List<Page> getUnlistedPages() {
return unlistedPages;
}
public void setCard(Card card) {
this.card = card;
}
public Card getCard() {
return this.card;
}
public boolean hasCard() {
return this.card != null;
}
public void setHeadlessComponent(String headlessComponent) {
this.headlessComponent = headlessComponent;
}
public String getHeadlessComponent() {
return this.headlessComponent;
}
public String getHeadlessComponentRef() {
if (headlessComponent != null) {
return DOT + SLASH + DOT + DOT + SLASH + this.namespace + SLASH + this.headlessComponent;
}
return null;
}
@Override
public String toString() {
return "Extension{" + "namespace=" + namespace + ", artifact=" + artifact + ", name=" + name + ", shortName="
+ shortName + ", description=" + description + ", guide=" + guide + ", keywords=" + keywords + ", status="
+ status + ", configFilter=" + configFilter + ", categories=" + categories + ", unlisted=" + unlisted
+ ", builtWith=" + builtWith + ", providesCapabilities=" + providesCapabilities + ", extensionDependencies="
+ extensionDependencies + ", codestart=" + codestart + '}';
}
private static final String SLASH = "/";
private static final String DOT = ".";
}
| Extension |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/session/TransactionIsolationLevel.java | {
"start": 755,
"end": 1438
} | enum ____ {
NONE(Connection.TRANSACTION_NONE),
READ_COMMITTED(Connection.TRANSACTION_READ_COMMITTED),
READ_UNCOMMITTED(Connection.TRANSACTION_READ_UNCOMMITTED),
REPEATABLE_READ(Connection.TRANSACTION_REPEATABLE_READ),
SERIALIZABLE(Connection.TRANSACTION_SERIALIZABLE),
/**
* A non-standard isolation level for Microsoft SQL Server. Defined in the SQL Server JDBC driver
* {@link com.microsoft.sqlserver.jdbc.ISQLServerConnection}
*
* @since 3.5.6
*/
SQL_SERVER_SNAPSHOT(0x1000);
private final int level;
TransactionIsolationLevel(int level) {
this.level = level;
}
public int getLevel() {
return level;
}
}
| TransactionIsolationLevel |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java | {
"start": 1240,
"end": 8180
} | class ____ extends AbstractQueryTestCase<MatchPhrasePrefixQueryBuilder> {
@Override
protected MatchPhrasePrefixQueryBuilder doCreateTestQueryBuilder() {
String fieldName = randomFrom(TEXT_FIELD_NAME, TEXT_ALIAS_FIELD_NAME);
Object value;
if (isTextField(fieldName)) {
int terms = randomIntBetween(0, 3);
StringBuilder builder = new StringBuilder();
for (int i = 0; i < terms; i++) {
builder.append(randomAlphaOfLengthBetween(1, 10)).append(" ");
}
value = builder.toString().trim();
} else {
value = getRandomValueForFieldName(fieldName);
}
MatchPhrasePrefixQueryBuilder matchQuery = new MatchPhrasePrefixQueryBuilder(fieldName, value);
if (randomBoolean() && isTextField(fieldName)) {
matchQuery.analyzer(randomFrom("simple", "keyword", "whitespace"));
}
if (randomBoolean()) {
matchQuery.slop(randomIntBetween(0, 10));
}
if (randomBoolean()) {
matchQuery.maxExpansions(randomIntBetween(1, 10000));
}
if (randomBoolean()) {
matchQuery.zeroTermsQuery(randomFrom(ZeroTermsQueryOption.ALL, ZeroTermsQueryOption.NONE));
}
return matchQuery;
}
@Override
protected Map<String, MatchPhrasePrefixQueryBuilder> getAlternateVersions() {
Map<String, MatchPhrasePrefixQueryBuilder> alternateVersions = new HashMap<>();
MatchPhrasePrefixQueryBuilder matchPhrasePrefixQuery = new MatchPhrasePrefixQueryBuilder(
randomAlphaOfLengthBetween(1, 10),
randomAlphaOfLengthBetween(1, 10)
);
String contentString = Strings.format("""
{
"match_phrase_prefix" : {
"%s" : "%s"
}
}""", matchPhrasePrefixQuery.fieldName(), matchPhrasePrefixQuery.value());
alternateVersions.put(contentString, matchPhrasePrefixQuery);
return alternateVersions;
}
@Override
protected void doAssertLuceneQuery(MatchPhrasePrefixQueryBuilder queryBuilder, Query query, SearchExecutionContext context)
throws IOException {
assertThat(query, notNullValue());
if (query instanceof MatchAllDocsQuery) {
assertThat(queryBuilder.zeroTermsQuery(), equalTo(ZeroTermsQueryOption.ALL));
return;
}
assertThat(
query,
either(instanceOf(MultiPhrasePrefixQuery.class)).or(instanceOf(SynonymQuery.class)).or(instanceOf(MatchNoDocsQuery.class))
);
}
public void testIllegalValues() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MatchPhrasePrefixQueryBuilder(null, "value"));
assertEquals("[match_phrase_prefix] requires fieldName", e.getMessage());
e = expectThrows(IllegalArgumentException.class, () -> new MatchPhrasePrefixQueryBuilder("fieldName", null));
assertEquals("[match_phrase_prefix] requires query value", e.getMessage());
MatchPhrasePrefixQueryBuilder matchQuery = new MatchPhrasePrefixQueryBuilder("fieldName", "text");
e = expectThrows(IllegalArgumentException.class, () -> matchQuery.maxExpansions(-1));
}
public void testBadAnalyzer() throws IOException {
MatchPhrasePrefixQueryBuilder matchQuery = new MatchPhrasePrefixQueryBuilder("fieldName", "text");
matchQuery.analyzer("bogusAnalyzer");
QueryShardException e = expectThrows(QueryShardException.class, () -> matchQuery.toQuery(createSearchExecutionContext()));
assertThat(e.getMessage(), containsString("analyzer [bogusAnalyzer] not found"));
}
public void testPhraseOnFieldWithNoTerms() {
MatchPhrasePrefixQueryBuilder matchQuery = new MatchPhrasePrefixQueryBuilder(DATE_FIELD_NAME, "three term phrase");
matchQuery.analyzer("whitespace");
expectThrows(IllegalArgumentException.class, () -> matchQuery.doToQuery(createSearchExecutionContext()));
}
public void testPhrasePrefixZeroTermsQuery() throws IOException {
MatchPhrasePrefixQueryBuilder matchQuery = new MatchPhrasePrefixQueryBuilder(TEXT_FIELD_NAME, "");
matchQuery.zeroTermsQuery(ZeroTermsQueryOption.NONE);
assertEquals(new MatchNoDocsQuery(), matchQuery.doToQuery(createSearchExecutionContext()));
matchQuery = new MatchPhrasePrefixQueryBuilder(TEXT_FIELD_NAME, "");
matchQuery.zeroTermsQuery(ZeroTermsQueryOption.ALL);
assertEquals(new MatchAllDocsQuery(), matchQuery.doToQuery(createSearchExecutionContext()));
}
public void testPhrasePrefixMatchQuery() throws IOException {
String json1 = """
{
"match_phrase_prefix" : {
"message" : "this is a test"
}
}""";
String expected = """
{
"match_phrase_prefix" : {
"message" : {
"query" : "this is a test"
}
}
}""";
MatchPhrasePrefixQueryBuilder qb = (MatchPhrasePrefixQueryBuilder) parseQuery(json1);
checkGeneratedJson(expected, qb);
String json3 = """
{
"match_phrase_prefix" : {
"message" : {
"query" : "this is a test",
"max_expansions" : 10
}
}
}""";
expected = """
{
"match_phrase_prefix" : {
"message" : {
"query" : "this is a test",
"max_expansions" : 10
}
}
}""";
qb = (MatchPhrasePrefixQueryBuilder) parseQuery(json3);
checkGeneratedJson(expected, qb);
}
public void testParseFailsWithMultipleFields() throws IOException {
String json = """
{
"match_phrase_prefix" : {
"message1" : {
"query" : "this is a test"
},
"message2" : {
"query" : "this is a test"
}
}
}""";
ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json));
assertEquals("[match_phrase_prefix] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage());
String shortJson = """
{
"match_phrase_prefix" : {
"message1" : "this is a test",
"message2" : "this is a test"
}
}""";
e = expectThrows(ParsingException.class, () -> parseQuery(shortJson));
assertEquals("[match_phrase_prefix] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage());
}
}
| MatchPhrasePrefixQueryBuilderTests |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java | {
"start": 1658,
"end": 16385
} | class ____ implements Closeable {
static {
if (SystemUtils.IS_OS_WINDOWS) {
loadingFailureReason = "UNIX Domain sockets are not available on Windows.";
} else if (!NativeCodeLoader.isNativeCodeLoaded()) {
loadingFailureReason = "libhadoop cannot be loaded.";
} else {
String problem;
try {
anchorNative();
problem = null;
} catch (Throwable t) {
problem = "DomainSocket#anchorNative got error: " + t.getMessage();
}
loadingFailureReason = problem;
}
}
static final Logger LOG = LoggerFactory.getLogger(DomainSocket.class);
/**
* True only if we should validate the paths used in
* {@link DomainSocket#bindAndListen(String)}
*/
private static boolean validateBindPaths = true;
/**
* The reason why DomainSocket is not available, or null if it is available.
*/
private final static String loadingFailureReason;
/**
* Initialize the native library code.
*/
private static native void anchorNative();
/**
* This function is designed to validate that the path chosen for a UNIX
* domain socket is secure. A socket path is secure if it doesn't allow
* unprivileged users to perform a man-in-the-middle attack against it.
* For example, one way to perform a man-in-the-middle attack would be for
* a malicious user to move the server socket out of the way and create his
* own socket in the same place. Not good.
*
* Note that we only check the path once. It's possible that the
* permissions on the path could change, perhaps to something more relaxed,
* immediately after the path passes our validation test-- hence creating a
* security hole. However, the purpose of this check is to spot common
* misconfigurations. System administrators do not commonly change
* permissions on these paths while the server is running.
*
* For more information on Security exceptions see this wiki page:
* https://wiki.apache.org/hadoop/SocketPathSecurity
*
* @param path the path to validate
* @param skipComponents the number of starting path components to skip
* validation for (used only for testing)
*/
@VisibleForTesting
native static void validateSocketPathSecurity0(String path,
int skipComponents) throws IOException;
/**
* Return true only if UNIX domain sockets are available.
*
* @return loadingFailureReason.
*/
public static String getLoadingFailureReason() {
return loadingFailureReason;
}
/**
* Disable validation of the server bind paths.
*/
@VisibleForTesting
public static void disableBindPathValidation() {
validateBindPaths = false;
}
/**
* Given a path and a port, compute the effective path by replacing
* occurrences of _PORT with the port. This is mainly to make it
* possible to run multiple DataNodes locally for testing purposes.
*
* @param path The source path
* @param port Port number to use
*
* @return The effective path
*/
public static String getEffectivePath(String path, int port) {
return path.replace("_PORT", String.valueOf(port));
}
/**
* The socket reference count and closed bit.
*/
final CloseableReferenceCount refCount;
/**
* The file descriptor associated with this UNIX domain socket.
*/
final int fd;
/**
* The path associated with this UNIX domain socket.
*/
private final String path;
/**
* The InputStream associated with this socket.
*/
private final DomainInputStream inputStream = new DomainInputStream();
/**
* The OutputStream associated with this socket.
*/
private final DomainOutputStream outputStream = new DomainOutputStream();
/**
* The Channel associated with this socket.
*/
private final DomainChannel channel = new DomainChannel();
private DomainSocket(String path, int fd) {
this.refCount = new CloseableReferenceCount();
this.fd = fd;
this.path = path;
}
private static native int bind0(String path) throws IOException;
private void unreference(boolean checkClosed) throws ClosedChannelException {
if (checkClosed) {
refCount.unreferenceCheckClosed();
} else {
refCount.unreference();
}
}
/**
* Create a new DomainSocket listening on the given path.
*
* @param path The path to bind and listen on.
* @return The new DomainSocket.
* @throws IOException raised on errors performing I/O.
*/
public static DomainSocket bindAndListen(String path) throws IOException {
if (loadingFailureReason != null) {
throw new UnsupportedOperationException(loadingFailureReason);
}
if (validateBindPaths) {
validateSocketPathSecurity0(path, 0);
}
int fd = bind0(path);
return new DomainSocket(path, fd);
}
/**
* Create a pair of UNIX domain sockets which are connected to each other
* by calling socketpair(2).
*
* @return An array of two UNIX domain sockets connected to
* each other.
* @throws IOException on error.
*/
public static DomainSocket[] socketpair() throws IOException {
int fds[] = socketpair0();
return new DomainSocket[] {
new DomainSocket("(anonymous0)", fds[0]),
new DomainSocket("(anonymous1)", fds[1])
};
}
private static native int[] socketpair0() throws IOException;
private static native int accept0(int fd) throws IOException;
/**
* Accept a new UNIX domain connection.
*
* This method can only be used on sockets that were bound with bind().
*
* @return The new connection.
* @throws IOException If there was an I/O error performing the accept--
* such as the socket being closed from under us.
* Particularly when the accept is timed out, it throws
* SocketTimeoutException.
*/
public DomainSocket accept() throws IOException {
refCount.reference();
boolean exc = true;
try {
DomainSocket ret = new DomainSocket(path, accept0(fd));
exc = false;
return ret;
} finally {
unreference(exc);
}
}
private static native int connect0(String path) throws IOException;
/**
* Create a new DomainSocket connected to the given path.
*
* @param path The path to connect to.
* @throws IOException If there was an I/O error performing the connect.
*
* @return The new DomainSocket.
*/
public static DomainSocket connect(String path) throws IOException {
if (loadingFailureReason != null) {
throw new UnsupportedOperationException(loadingFailureReason);
}
int fd = connect0(path);
return new DomainSocket(path, fd);
}
/**
* Return true if the file descriptor is currently open.
*
* @return True if the file descriptor is currently open.
*/
public boolean isOpen() {
return refCount.isOpen();
}
/**
* @return The socket path.
*/
public String getPath() {
return path;
}
/**
* @return The socket InputStream
*/
public DomainInputStream getInputStream() {
return inputStream;
}
/**
* @return The socket OutputStream
*/
public DomainOutputStream getOutputStream() {
return outputStream;
}
/**
* @return The socket Channel
*/
public DomainChannel getChannel() {
return channel;
}
public static final int SEND_BUFFER_SIZE = 1;
public static final int RECEIVE_BUFFER_SIZE = 2;
public static final int SEND_TIMEOUT = 3;
public static final int RECEIVE_TIMEOUT = 4;
private static native void setAttribute0(int fd, int type, int val)
throws IOException;
public void setAttribute(int type, int size) throws IOException {
refCount.reference();
boolean exc = true;
try {
setAttribute0(fd, type, size);
exc = false;
} finally {
unreference(exc);
}
}
private native int getAttribute0(int fd, int type) throws IOException;
public int getAttribute(int type) throws IOException {
refCount.reference();
int attribute;
boolean exc = true;
try {
attribute = getAttribute0(fd, type);
exc = false;
return attribute;
} finally {
unreference(exc);
}
}
private static native void close0(int fd) throws IOException;
private static native void closeFileDescriptor0(FileDescriptor fd)
throws IOException;
private static native void shutdown0(int fd) throws IOException;
/**
* Close the Server Socket without check refCount.
* When Server Socket is blocked on accept(), its refCount is 1.
* close() call on Server Socket will be stuck in the while loop count check.
* @param force if true, will not check refCount before close socket.
* @throws IOException raised on errors performing I/O.
*/
public void close(boolean force) throws IOException {
// Set the closed bit on this DomainSocket
int count;
try {
count = refCount.setClosed();
} catch (ClosedChannelException e) {
// Someone else already closed the DomainSocket.
return;
}
boolean interrupted = false;
if (force) {
try {
// Calling shutdown on the socket will interrupt blocking system
// calls like accept, write, and read that are going on in a
// different thread.
shutdown0(fd);
} catch (IOException e) {
LOG.error("shutdown error: ", e);
}
} else {
// Wait for all references to go away
boolean didShutdown = false;
while (count > 0) {
if (!didShutdown) {
try {
// Calling shutdown on the socket will interrupt blocking system
// calls like accept, write, and read that are going on in a
// different thread.
shutdown0(fd);
} catch (IOException e) {
LOG.error("shutdown error: ", e);
}
didShutdown = true;
}
try {
Thread.sleep(10);
} catch (InterruptedException e) {
interrupted = true;
}
count = refCount.getReferenceCount();
}
}
// At this point, nobody has a reference to the file descriptor,
// and nobody will be able to get one in the future either.
// We now call close(2) on the file descriptor.
// After this point, the file descriptor number will be reused by
// something else. Although this DomainSocket object continues to hold
// the old file descriptor number (it's a final field), we never use it
// again because this DomainSocket is closed.
close0(fd);
if (interrupted) {
Thread.currentThread().interrupt();
}
}
/**
* Close the Socket.
*/
@Override
public void close() throws IOException {
close(false);
}
/**
* Call shutdown(SHUT_RDWR) on the UNIX domain socket.
*
* @throws IOException raised on errors performing I/O.
*/
public void shutdown() throws IOException {
refCount.reference();
boolean exc = true;
try {
shutdown0(fd);
exc = false;
} finally {
unreference(exc);
}
}
private native static void sendFileDescriptors0(int fd,
FileDescriptor descriptors[],
byte jbuf[], int offset, int length) throws IOException;
/**
* Send some FileDescriptor objects to the process on the other side of this
* socket.
*
* @param descriptors The file descriptors to send.
* @param jbuf Some bytes to send. You must send at least
* one byte.
* @param offset The offset in the jbuf array to start at.
* @param length Length of the jbuf array to use.
* @throws IOException raised on errors performing I/O.
*/
public void sendFileDescriptors(FileDescriptor descriptors[],
byte jbuf[], int offset, int length) throws IOException {
refCount.reference();
boolean exc = true;
try {
sendFileDescriptors0(fd, descriptors, jbuf, offset, length);
exc = false;
} finally {
unreference(exc);
}
}
private static native int receiveFileDescriptors0(int fd,
FileDescriptor[] descriptors,
byte[] buf, int offset, int length) throws IOException;
/**
* Receive some FileDescriptor objects from the process on the other side of
* this socket, and wrap them in FileInputStream objects.
*
* @param streams input stream.
* @param buf input buf.
* @param offset input offset.
* @param length input length.
* @return wrap them in FileInputStream objects.
* @throws IOException raised on errors performing I/O.
*/
public int recvFileInputStreams(FileInputStream[] streams, byte buf[],
int offset, int length) throws IOException {
FileDescriptor descriptors[] = new FileDescriptor[streams.length];
boolean success = false;
for (int i = 0; i < streams.length; i++) {
streams[i] = null;
}
refCount.reference();
try {
int ret = receiveFileDescriptors0(fd, descriptors, buf, offset, length);
for (int i = 0, j = 0; i < descriptors.length; i++) {
if (descriptors[i] != null) {
streams[j++] = new FileInputStream(descriptors[i]);
descriptors[i] = null;
}
}
success = true;
return ret;
} finally {
if (!success) {
for (int i = 0; i < descriptors.length; i++) {
if (descriptors[i] != null) {
try {
closeFileDescriptor0(descriptors[i]);
} catch (Throwable t) {
LOG.warn(t.toString());
}
} else if (streams[i] != null) {
try {
streams[i].close();
} catch (Throwable t) {
LOG.warn(t.toString());
} finally {
streams[i] = null; }
}
}
}
unreference(!success);
}
}
private native static int readArray0(int fd, byte b[], int off, int len)
throws IOException;
private native static int available0(int fd) throws IOException;
private static native void write0(int fd, int b) throws IOException;
private static native void writeArray0(int fd, byte b[], int offset, int length)
throws IOException;
private native static int readByteBufferDirect0(int fd, ByteBuffer dst,
int position, int remaining) throws IOException;
/**
* Input stream for UNIX domain sockets.
*/
@InterfaceAudience.LimitedPrivate("HDFS")
public | DomainSocket |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingServer.java | {
"start": 4248,
"end": 12298
} | class ____ extends NettyRemotingAbstract implements RemotingServer {
private static final Logger log = LoggerFactory.getLogger(LoggerName.ROCKETMQ_REMOTING_NAME);
private static final Logger TRAFFIC_LOGGER = LoggerFactory.getLogger(LoggerName.ROCKETMQ_TRAFFIC_NAME);
private final ServerBootstrap serverBootstrap;
protected final EventLoopGroup eventLoopGroupSelector;
protected final EventLoopGroup eventLoopGroupBoss;
protected final NettyServerConfig nettyServerConfig;
private final ExecutorService publicExecutor;
private final ScheduledExecutorService scheduledExecutorService;
private final ChannelEventListener channelEventListener;
private final HashedWheelTimer timer = new HashedWheelTimer(r -> new Thread(r, "ServerHouseKeepingService"));
private DefaultEventExecutorGroup defaultEventExecutorGroup;
/**
* NettyRemotingServer may hold multiple SubRemotingServer, each server will be stored in this container with a
* ListenPort key.
*/
private final ConcurrentMap<Integer/*Port*/, NettyRemotingAbstract> remotingServerTable = new ConcurrentHashMap<>();
public static final String HANDSHAKE_HANDLER_NAME = "handshakeHandler";
public static final String HA_PROXY_DECODER = "HAProxyDecoder";
public static final String HA_PROXY_HANDLER = "HAProxyHandler";
public static final String TLS_MODE_HANDLER = "TlsModeHandler";
public static final String TLS_HANDLER_NAME = "sslHandler";
public static final String FILE_REGION_ENCODER_NAME = "fileRegionEncoder";
// sharable handlers
protected final TlsModeHandler tlsModeHandler = new TlsModeHandler(TlsSystemConfig.tlsMode);
protected final NettyEncoder encoder = new NettyEncoder();
protected final NettyConnectManageHandler connectionManageHandler = new NettyConnectManageHandler();
protected final NettyServerHandler serverHandler = new NettyServerHandler();
protected final RemotingCodeDistributionHandler distributionHandler = new RemotingCodeDistributionHandler();
public NettyRemotingServer(final NettyServerConfig nettyServerConfig) {
this(nettyServerConfig, null);
}
public NettyRemotingServer(final NettyServerConfig nettyServerConfig,
final ChannelEventListener channelEventListener) {
super(nettyServerConfig.getServerOnewaySemaphoreValue(), nettyServerConfig.getServerAsyncSemaphoreValue());
this.serverBootstrap = new ServerBootstrap();
this.nettyServerConfig = nettyServerConfig;
this.channelEventListener = channelEventListener;
this.publicExecutor = buildPublicExecutor(nettyServerConfig);
this.scheduledExecutorService = buildScheduleExecutor();
this.eventLoopGroupBoss = buildEventLoopGroupBoss();
this.eventLoopGroupSelector = buildEventLoopGroupSelector();
loadSslContext();
}
protected EventLoopGroup buildEventLoopGroupSelector() {
if (useEpoll()) {
return new EpollEventLoopGroup(nettyServerConfig.getServerSelectorThreads(), new ThreadFactoryImpl("NettyServerEPOLLSelector_"));
} else {
return new NioEventLoopGroup(nettyServerConfig.getServerSelectorThreads(), new ThreadFactoryImpl("NettyServerNIOSelector_"));
}
}
protected EventLoopGroup buildEventLoopGroupBoss() {
if (useEpoll()) {
return new EpollEventLoopGroup(1, new ThreadFactoryImpl("NettyEPOLLBoss_"));
} else {
return new NioEventLoopGroup(1, new ThreadFactoryImpl("NettyNIOBoss_"));
}
}
private ExecutorService buildPublicExecutor(NettyServerConfig nettyServerConfig) {
int publicThreadNums = nettyServerConfig.getServerCallbackExecutorThreads();
if (publicThreadNums <= 0) {
publicThreadNums = 4;
}
return Executors.newFixedThreadPool(publicThreadNums, new ThreadFactoryImpl("NettyServerPublicExecutor_"));
}
private ScheduledExecutorService buildScheduleExecutor() {
return ThreadUtils.newScheduledThreadPool(1,
new ThreadFactoryImpl("NettyServerScheduler_", true),
new ThreadPoolExecutor.DiscardOldestPolicy());
}
public void loadSslContext() {
TlsMode tlsMode = TlsSystemConfig.tlsMode;
log.info("Server is running in TLS {} mode", tlsMode.getName());
if (tlsMode != TlsMode.DISABLED) {
try {
sslContext = TlsHelper.buildSslContext(false);
log.info("SslContext created for server");
} catch (CertificateException | IOException e) {
log.error("Failed to create SslContext for server", e);
}
}
}
private boolean useEpoll() {
return NetworkUtil.isLinuxPlatform()
&& nettyServerConfig.isUseEpollNativeSelector()
&& Epoll.isAvailable();
}
protected void initServerBootstrap(ServerBootstrap serverBootstrap) {
serverBootstrap.group(this.eventLoopGroupBoss, this.eventLoopGroupSelector)
.channel(useEpoll() ? EpollServerSocketChannel.class : NioServerSocketChannel.class)
.option(ChannelOption.SO_BACKLOG, 1024)
.option(ChannelOption.SO_REUSEADDR, true)
.childOption(ChannelOption.SO_KEEPALIVE, false)
.childOption(ChannelOption.TCP_NODELAY, true)
.localAddress(new InetSocketAddress(this.nettyServerConfig.getBindAddress(),
this.nettyServerConfig.getListenPort()))
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) {
configChannel(ch);
}
});
addCustomConfig(serverBootstrap);
}
@Override
public void start() {
this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(nettyServerConfig.getServerWorkerThreads(),
new ThreadFactoryImpl("NettyServerCodecThread_"));
initServerBootstrap(serverBootstrap);
try {
ChannelFuture sync = serverBootstrap.bind().sync();
InetSocketAddress addr = (InetSocketAddress) sync.channel().localAddress();
if (0 == nettyServerConfig.getListenPort()) {
this.nettyServerConfig.setListenPort(addr.getPort());
}
log.info("RemotingServer started, listening {}:{}", this.nettyServerConfig.getBindAddress(),
this.nettyServerConfig.getListenPort());
this.remotingServerTable.put(this.nettyServerConfig.getListenPort(), this);
} catch (Exception e) {
throw new IllegalStateException(String.format("Failed to bind to %s:%d", nettyServerConfig.getBindAddress(),
nettyServerConfig.getListenPort()), e);
}
if (this.channelEventListener != null) {
this.nettyEventExecutor.start();
}
TimerTask timerScanResponseTable = new TimerTask() {
@Override
public void run(Timeout timeout) {
try {
NettyRemotingServer.this.scanResponseTable();
} catch (Throwable e) {
log.error("scanResponseTable exception", e);
} finally {
timer.newTimeout(this, 1000, TimeUnit.MILLISECONDS);
}
}
};
this.timer.newTimeout(timerScanResponseTable, 1000 * 3, TimeUnit.MILLISECONDS);
scheduledExecutorService.scheduleWithFixedDelay(() -> {
try {
NettyRemotingServer.this.printRemotingCodeDistribution();
} catch (Throwable e) {
TRAFFIC_LOGGER.error("NettyRemotingServer print remoting code distribution exception", e);
}
}, 1, 1, TimeUnit.SECONDS);
}
/**
* config channel in ChannelInitializer
*
* @param ch the SocketChannel needed to init
* @return the initialized ChannelPipeline, sub | NettyRemotingServer |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/FluxTimestampTest.java | {
"start": 819,
"end": 1284
} | class ____ {
Flux<Tuple2<Long, String>> scenario_aFluxCanBeTimestamped(){
return Flux.just("test")
.timestamp();
}
@Test
public void aFluxCanBeTimestamped(){
StepVerifier.withVirtualTime(this::scenario_aFluxCanBeTimestamped, 0)
.thenAwait(Duration.ofSeconds(2))
.thenRequest(1)
.expectNextMatches(t -> t.getT1() == 2000 && t.getT2().equals("test"))
.verifyComplete();
}
}
| FluxTimestampTest |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/extension/ext8_add/impl/AddExt2_ManualAdaptive.java | {
"start": 1082,
"end": 1341
} | class ____ implements AddExt2 {
public String echo(URL url, String s) {
AddExt2 addExt1 = ExtensionLoader.getExtensionLoader(AddExt2.class).getExtension(url.getParameter("add.ext2"));
return addExt1.echo(url, s);
}
}
| AddExt2_ManualAdaptive |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/logging/log4j2/Log4j2XmlTests.java | {
"start": 1372,
"end": 3891
} | class ____ {
protected @Nullable Configuration configuration;
@AfterEach
void stopConfiguration() {
if (this.configuration != null) {
this.configuration.stop();
}
}
@Test
void whenLogExceptionConversionWordIsNotConfiguredThenConsoleUsesDefault() {
assertThat(consolePattern()).contains("%xwEx");
}
@Test
void whenLogExceptionConversionWordIsSetThenConsoleUsesIt() {
withSystemProperty(LoggingSystemProperty.EXCEPTION_CONVERSION_WORD.getEnvironmentVariableName(), "custom",
() -> assertThat(consolePattern()).contains("custom"));
}
@Test
void whenLogLevelPatternIsNotConfiguredThenConsoleUsesDefault() {
assertThat(consolePattern()).contains("%5p");
}
@Test
void whenLogLevelPatternIsSetThenConsoleUsesIt() {
withSystemProperty(LoggingSystemProperty.LEVEL_PATTERN.getEnvironmentVariableName(), "custom",
() -> assertThat(consolePattern()).contains("custom"));
}
@Test
void whenLogLDateformatPatternIsNotConfiguredThenConsoleUsesDefault() {
assertThat(consolePattern()).contains("yyyy-MM-dd'T'HH:mm:ss.SSSXXX");
}
@Test
void whenLogDateformatPatternIsSetThenConsoleUsesIt() {
withSystemProperty(LoggingSystemProperty.DATEFORMAT_PATTERN.getEnvironmentVariableName(), "dd-MM-yyyy",
() -> assertThat(consolePattern()).contains("dd-MM-yyyy"));
}
protected void withSystemProperty(String name, String value, Runnable action) {
String previous = System.setProperty(name, value);
action.run();
if (previous == null) {
System.clearProperty(name);
}
else {
System.setProperty(name, previous);
}
}
private String consolePattern() {
prepareConfiguration();
assertThat(this.configuration).isNotNull();
return ((PatternLayout) this.configuration.getAppender("Console").getLayout()).getConversionPattern();
}
protected void prepareConfiguration() {
this.configuration = initializeConfiguration();
this.configuration.start();
}
protected Configuration initializeConfiguration() {
LoggerContext context = new LoggerContext("test");
Configuration configuration = ConfigurationFactory.getInstance()
.getConfiguration(context, configurationSource());
configuration.initialize();
return configuration;
}
private ConfigurationSource configurationSource() {
try (InputStream in = getClass().getResourceAsStream(getConfigFileName())) {
return new ConfigurationSource(in);
}
catch (IOException ex) {
throw new RuntimeException(ex);
}
}
protected String getConfigFileName() {
return "log4j2.xml";
}
}
| Log4j2XmlTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java | {
"start": 53408,
"end": 53965
} | interface ____ {
void writeTo(XContentBuilder builder) throws IOException;
boolean hasValue();
record FieldLoader(SourceLoader.SyntheticFieldLoader loader) implements FieldWriter {
@Override
public void writeTo(XContentBuilder builder) throws IOException {
loader.write(builder);
}
@Override
public boolean hasValue() {
return loader.hasValue();
}
}
| FieldWriter |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mixed/Document.java | {
"start": 221,
"end": 1014
} | class ____ extends Item {
private Blob content;
private Calendar modified;
private Calendar created;
/**
* @return Returns the created.
*/
public Calendar getCreated() {
return created;
}
/**
* @param created The created to set.
*/
public void setCreated(Calendar created) {
this.created = created;
}
/**
* @return Returns the modified.
*/
public Calendar getModified() {
return modified;
}
/**
* @param modified The modified to set.
*/
public void setModified(Calendar modified) {
this.modified = modified;
}
/**
* @return Returns the content.
*/
public Blob getContent() {
return content;
}
/**
* @param content The content to set.
*/
public void setContent(Blob content) {
this.content = content;
}
}
| Document |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/engine/collection/Mother.java | {
"start": 553,
"end": 935
} | class ____ {
@Id
@GeneratedValue
public Integer getId() { return id; }
public void setId(Integer id) { this.id = id; }
private Integer id;
@OneToMany(mappedBy = "mother")
@Cascade({ CascadeType.PERSIST, CascadeType.MERGE })
public Set<Son> getSons() { return sons; }
public void setSons(Set<Son> sons) { this.sons = sons; }
private Set<Son> sons = new HashSet<>();
}
| Mother |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/type/classreading/MetadataReaderFactory.java | {
"start": 1244,
"end": 1422
} | class ____ (to be resolved to a ".class" file)
* @return a holder for the ClassReader instance (never {@code null})
* @throws ClassFormatException in case of an incompatible | name |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/cache/spi/support/NaturalIdReadWriteAccess.java | {
"start": 758,
"end": 3337
} | class ____ extends AbstractReadWriteAccess implements NaturalIdDataAccess {
private final CacheKeysFactory keysFactory;
public NaturalIdReadWriteAccess(
DomainDataRegion region,
CacheKeysFactory keysFactory,
DomainDataStorageAccess storageAccess,
NaturalIdDataCachingConfig naturalIdDataCachingConfig) {
super( region, storageAccess );
this.keysFactory = keysFactory;
}
@Override
protected AccessedDataClassification getAccessedDataClassification() {
return AccessedDataClassification.NATURAL_ID;
}
@Override
public AccessType getAccessType() {
return AccessType.READ_WRITE;
}
@Override
protected Comparator<?> getVersionComparator() {
// natural id has no comparator
return null;
}
@Override
public Object generateCacheKey(
Object naturalIdValues,
EntityPersister rootEntityDescriptor,
SharedSessionContractImplementor session) {
return keysFactory.createNaturalIdKey( naturalIdValues, rootEntityDescriptor, session );
}
@Override
public Object getNaturalIdValues(Object cacheKey) {
return keysFactory.getNaturalIdValues( cacheKey );
}
private void put(SharedSessionContractImplementor session, Object key, Object value) {
getStorageAccess().putIntoCache( key, new Item( value, null, nextTimestamp() ), session );
}
@Override
public boolean insert(SharedSessionContractImplementor session, Object key, Object value) {
return false;
}
@Override
public boolean afterInsert(SharedSessionContractImplementor session, Object key, Object value) {
try {
writeLock().lock();
final var item = (Lockable) getStorageAccess().getFromCache( key, session );
if ( item == null ) {
put( session, key, value );
return true;
}
else {
return false;
}
}
finally {
writeLock().unlock();
}
}
@Override
public boolean update(SharedSessionContractImplementor session, Object key, Object value) {
return false;
}
@Override
public boolean afterUpdate(SharedSessionContractImplementor session, Object key, Object value, SoftLock lock) {
try {
writeLock().lock();
final var item = (Lockable) getStorageAccess().getFromCache( key, session );
if ( item != null && item.isUnlockable( lock ) ) {
final var lockItem = (SoftLockImpl) item;
if ( lockItem.wasLockedConcurrently() ) {
decrementLock( session, key, lockItem );
return false;
}
else {
put( session, key, value );
return true;
}
}
else {
handleLockExpiry( session, key );
return false;
}
}
finally {
writeLock().unlock();
}
}
}
| NaturalIdReadWriteAccess |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/rest/TaskPending.java | {
"start": 1112,
"end": 1375
} | class ____ extends TaskState {
@JsonCreator
public TaskPending(@JsonProperty("spec") TaskSpec spec) {
super(spec, NullNode.instance);
}
@Override
public TaskStateType stateType() {
return TaskStateType.PENDING;
}
}
| TaskPending |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/StringSplitterTest.java | {
"start": 8303,
"end": 8622
} | class ____ {
void f() {
for (String s : "".split(".*foo\\\\t")) {}
}
}
""")
.addOutputLines(
"Test.java",
"""
import com.google.common.base.Splitter;
import java.util.regex.Pattern;
| Test |
java | elastic__elasticsearch | x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java | {
"start": 48581,
"end": 49046
} | interface ____ extends BiConsumer<EsField, InvalidMappedField> {};
/**
* Preserve the properties (sub fields) of an existing field even when marking it as invalid.
*/
public static final ExistingFieldInvalidCallback PRESERVE_PROPERTIES = (oldField, newField) -> {
var oldProps = oldField.getProperties();
if (oldProps.size() > 0) {
newField.getProperties().putAll(oldProps);
}
};
}
| ExistingFieldInvalidCallback |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/DiscriminatorNotNullSingleTableTest.java | {
"start": 5286,
"end": 5795
} | class ____ extends Account {
private BigDecimal creditLimit;
//Getters and setters are omitted for brevity
//end::entity-inheritance-single-table-discriminator-value-example[]
public BigDecimal getCreditLimit() {
return creditLimit;
}
public void setCreditLimit(BigDecimal creditLimit) {
this.creditLimit = creditLimit;
}
//tag::entity-inheritance-single-table-discriminator-value-example[]
}
@Entity(name = "OtherAccount")
@DiscriminatorValue("not null")
public static | CreditAccount |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/suite/engine/testsuites/LifecycleMethodsSuites.java | {
"start": 3742,
"end": 4079
} | class ____ extends SuperclassWithBeforeAndAfterSuite {
@BeforeSuite
static void setUp() {
StatefulTestCase.callSequence.add("subclassBeforeSuiteMethod");
}
@AfterSuite
static void tearDown() {
StatefulTestCase.callSequence.add("subclassAfterSuiteMethod");
}
}
@TestSuite
public static | SubclassWithBeforeAndAfterSuite |
java | grpc__grpc-java | services/src/test/java/io/grpc/protobuf/services/BinaryLogProviderImplTest.java | {
"start": 1000,
"end": 2017
} | class ____ {
@Test
public void configStrNullTest() throws Exception {
BinaryLogSink sink = mock(BinaryLogSink.class);
BinaryLogProviderImpl binlog = new BinaryLogProviderImpl(sink, /*configStr=*/ null);
assertNull(binlog.getServerInterceptor("package.service/method"));
assertNull(binlog.getClientInterceptor("package.service/method", CallOptions.DEFAULT));
}
@Test
public void configStrEmptyTest() throws Exception {
BinaryLogSink sink = mock(BinaryLogSink.class);
BinaryLogProviderImpl binlog = new BinaryLogProviderImpl(sink, "");
assertNull(binlog.getServerInterceptor("package.service/method"));
assertNull(binlog.getClientInterceptor("package.service/method", CallOptions.DEFAULT));
}
@Test
public void closeTest() throws Exception {
BinaryLogSink sink = mock(BinaryLogSink.class);
BinaryLogProviderImpl log = new BinaryLogProviderImpl(sink, "*");
verify(sink, never()).close();
log.close();
verify(sink).close();
}
}
| BinaryLogProviderImplTest |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/qualifier/iterable/CityDto.java | {
"start": 252,
"end": 487
} | class ____ extends TopologyFeatureDto {
private int population;
public int getPopulation() {
return population;
}
public void setPopulation(int population) {
this.population = population;
}
}
| CityDto |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/action/fieldcaps/FieldCapsWithFilterIT.java | {
"start": 1961,
"end": 2125
} | class ____ extends ESIntegTestCase {
@Override
protected boolean addMockInternalEngine() {
return false;
}
private static | FieldCapsWithFilterIT |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java | {
"start": 2015,
"end": 8538
} | class ____ extends AbstractCatAction {
private static final Set<String> CAPABILITIES = Set.of("allow_closed");
@Override
public List<Route> routes() {
return List.of(new Route(GET, "/_cat/segments"), new Route(GET, "/_cat/segments/{index}"));
}
@Override
public String getName() {
return "cat_segments_action";
}
@Override
public boolean allowSystemIndexAccessByDefault() {
return true;
}
@Override
protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) {
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request));
RestUtils.consumeDeprecatedLocalParameter(request);
final boolean allowClosed = request.paramAsBoolean("allow_closed", false);
final IndicesOptions defaultOptions = allowClosed
? IndicesOptions.strictExpandHidden()
: IndicesOptions.strictExpandOpenAndForbidClosed();
final IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, defaultOptions);
clusterStateRequest.clear().nodes(true);
final RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel());
return channel -> cancelClient.admin().cluster().state(clusterStateRequest, new RestActionListener<>(channel) {
@Override
public void processResponse(final ClusterStateResponse clusterStateResponse) {
final IndicesSegmentsRequest indicesSegmentsRequest = new IndicesSegmentsRequest();
indicesSegmentsRequest.indices(indices).indicesOptions(indicesOptions);
cancelClient.admin().indices().segments(indicesSegmentsRequest, new RestResponseListener<>(channel) {
@Override
public RestResponse buildResponse(final IndicesSegmentResponse indicesSegmentResponse) throws Exception {
if (request.getHttpChannel().isOpen() == false) {
throw new TaskCancelledException("response channel [" + request.getHttpChannel() + "] closed");
}
final Map<String, IndexSegments> indicesSegments = indicesSegmentResponse.getIndices();
Table tab = buildTable(request, clusterStateResponse, indicesSegments);
return RestTable.buildResponse(tab, channel);
}
});
}
});
}
@Override
protected void documentation(StringBuilder sb) {
sb.append("/_cat/segments\n");
sb.append("/_cat/segments/{index}\n");
}
@Override
protected Table getTableWithHeader(RestRequest request) {
Table table = new Table();
table.startHeaders();
table.addCell("index", "default:true;alias:i,idx;desc:index name");
table.addCell("shard", "default:true;alias:s,sh;desc:shard name");
table.addCell("prirep", "alias:p,pr,primaryOrReplica;default:true;desc:primary or replica");
table.addCell("ip", "default:true;desc:ip of node where it lives");
table.addCell("id", "default:false;desc:unique id of node where it lives");
table.addCell("segment", "default:true;alias:seg;desc:segment name");
table.addCell("generation", "default:true;alias:g,gen;text-align:right;desc:segment generation");
table.addCell("docs.count", "default:true;alias:dc,docsCount;text-align:right;desc:number of docs in segment");
table.addCell("docs.deleted", "default:true;alias:dd,docsDeleted;text-align:right;desc:number of deleted docs in segment");
table.addCell("size", "default:true;alias:si;text-align:right;desc:segment size in bytes");
table.addCell("size.memory", "default:true;alias:sm,sizeMemory;text-align:right;desc:segment memory in bytes");
table.addCell("committed", "default:true;alias:ic,isCommitted;desc:is segment committed");
table.addCell("searchable", "default:true;alias:is,isSearchable;desc:is segment searched");
table.addCell("version", "default:true;alias:v,ver;desc:version");
table.addCell("compound", "default:true;alias:ico,isCompound;desc:is segment compound");
table.endHeaders();
return table;
}
private Table buildTable(final RestRequest request, ClusterStateResponse state, Map<String, IndexSegments> indicesSegments) {
Table table = getTableWithHeader(request);
DiscoveryNodes nodes = state.getState().nodes();
for (IndexSegments indexSegments : indicesSegments.values()) {
Map<Integer, IndexShardSegments> shards = indexSegments.getShards();
for (IndexShardSegments indexShardSegments : shards.values()) {
ShardSegments[] shardSegments = indexShardSegments.shards();
for (ShardSegments shardSegment : shardSegments) {
List<Segment> segments = shardSegment.getSegments();
for (Segment segment : segments) {
table.startRow();
table.addCell(shardSegment.getShardRouting().getIndexName());
table.addCell(shardSegment.getShardRouting().getId());
table.addCell(shardSegment.getShardRouting().primary() ? "p" : "r");
table.addCell(nodes.get(shardSegment.getShardRouting().currentNodeId()).getHostAddress());
table.addCell(shardSegment.getShardRouting().currentNodeId());
table.addCell(segment.getName());
table.addCell(segment.getGeneration());
table.addCell(segment.getNumDocs());
table.addCell(segment.getDeletedDocs());
table.addCell(segment.getSize());
table.addCell(0L);
table.addCell(segment.isCommitted());
table.addCell(segment.isSearch());
table.addCell(segment.getVersion());
table.addCell(segment.isCompound());
table.endRow();
}
}
}
}
return table;
}
@Override
public Set<String> supportedCapabilities() {
return CAPABILITIES;
}
}
| RestSegmentsAction |
java | elastic__elasticsearch | modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java | {
"start": 1507,
"end": 1653
} | class ____ {
/** Information about where this method was whitelisted from. */
public final String origin;
/**
* The | WhitelistMethod |
java | spring-projects__spring-framework | spring-r2dbc/src/test/java/org/springframework/r2dbc/core/H2TransactionalDatabaseClientIntegrationTests.java | {
"start": 841,
"end": 1421
} | class ____ extends AbstractTransactionalDatabaseClientIntegrationTests {
private static final String CREATE_TABLE_LEGOSET = """
CREATE TABLE legoset (
id serial CONSTRAINT id PRIMARY KEY,
version integer NULL,
name varchar(255) NOT NULL,
manual integer NULL
);""";
@Override
protected ConnectionFactory createConnectionFactory() {
return H2ConnectionFactory.inMemory("r2dbc-transactional");
}
@Override
protected String getCreateTableStatement() {
return CREATE_TABLE_LEGOSET;
}
}
| H2TransactionalDatabaseClientIntegrationTests |
java | micronaut-projects__micronaut-core | inject-groovy/src/main/groovy/io/micronaut/ast/groovy/visitor/GroovyElementFactory.java | {
"start": 8552,
"end": 9217
} | class ____ be a GroovyEnumElement");
}
return new GroovyEnumConstantElement(
(GroovyClassElement) declaringClass,
visitorContext,
enumConstant,
annotationMetadataFactory
);
}
@NonNull
@Override
public GroovyFieldElement newFieldElement(@NonNull ClassElement owningType,
@NonNull FieldNode field,
@NonNull ElementAnnotationMetadataFactory annotationMetadataFactory) {
if (!(owningType instanceof GroovyClassElement)) {
throw new IllegalArgumentException("Declaring | must |
java | redisson__redisson | redisson-spring-data/redisson-spring-data-33/src/main/java/org/redisson/spring/data/connection/RedissonConnection.java | {
"start": 2972,
"end": 110252
} | class ____ extends AbstractRedisConnection {
private boolean closed;
protected final Redisson redisson;
private boolean filterOkResponses = false;
CommandAsyncExecutor executorService;
private RedissonSubscription subscription;
public RedissonConnection(RedissonClient redisson) {
super();
this.redisson = (Redisson) redisson;
executorService = this.redisson.getCommandExecutor();
}
public RedissonConnection(RedissonClient redisson, boolean filterOkResponses) {
super();
this.redisson = (Redisson) redisson;
this.filterOkResponses = filterOkResponses;
executorService = this.redisson.getCommandExecutor();
}
public boolean isFilterOkResponses() {
return filterOkResponses;
}
public void setFilterOkResponses(boolean filterOkResponses) {
this.filterOkResponses = filterOkResponses;
}
@Override
public void close() throws DataAccessException {
super.close();
if (isQueueing()) {
CommandBatchService es = (CommandBatchService) executorService;
if (!es.isExecuted()) {
discard();
}
}
closed = true;
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public Object getNativeConnection() {
return redisson;
}
@Override
public boolean isQueueing() {
if (executorService instanceof CommandBatchService) {
CommandBatchService es = (CommandBatchService) executorService;
return es.getOptions().getExecutionMode() == ExecutionMode.REDIS_WRITE_ATOMIC;
}
return false;
}
@Override
public boolean isPipelined() {
if (executorService instanceof CommandBatchService) {
CommandBatchService es = (CommandBatchService) executorService;
return es.getOptions().getExecutionMode() == ExecutionMode.IN_MEMORY || es.getOptions().getExecutionMode() == ExecutionMode.IN_MEMORY_ATOMIC;
}
return false;
}
public boolean isPipelinedAtomic() {
if (executorService instanceof CommandBatchService) {
CommandBatchService es = (CommandBatchService) executorService;
return es.getOptions().getExecutionMode() == ExecutionMode.IN_MEMORY_ATOMIC;
}
return false;
}
@Override
public void openPipeline() {
BatchOptions options = BatchOptions.defaults()
.executionMode(ExecutionMode.IN_MEMORY);
this.executorService = executorService.createCommandBatchService(options);
}
@Override
public List<Object> closePipeline() throws RedisPipelineException {
if (isPipelined()) {
CommandBatchService es = (CommandBatchService) executorService;
try {
BatchResult<?> result = es.execute();
filterResults(result);
if (isPipelinedAtomic()) {
return Arrays.<Object>asList((List<Object>) result.getResponses());
}
return (List<Object>) result.getResponses();
} catch (Exception ex) {
throw new RedisPipelineException(ex);
} finally {
resetConnection();
}
}
return Collections.emptyList();
}
@Override
public Object execute(String command, byte[]... args) {
for (Method method : this.getClass().getDeclaredMethods()) {
if (method.getName().equalsIgnoreCase(command)
&& Modifier.isPublic(method.getModifiers())
&& (method.getParameterTypes().length == args.length)) {
try {
Object t = execute(method, args);
if (t instanceof String) {
return ((String) t).getBytes();
}
return t;
} catch (IllegalArgumentException e) {
if (isPipelined()) {
throw new RedisPipelineException(e);
}
throw new InvalidDataAccessApiUsageException(e.getMessage(), e);
}
}
}
throw new UnsupportedOperationException();
}
private Object execute(Method method, byte[]... args) {
if (method.getParameterTypes().length > 0 && method.getParameterTypes()[0] == byte[][].class) {
return ReflectionUtils.invokeMethod(method, this, args);
}
if (args == null) {
return ReflectionUtils.invokeMethod(method, this);
}
return ReflectionUtils.invokeMethod(method, this, Arrays.asList(args).toArray());
}
<V> V syncFuture(RFuture<V> future) {
try {
return executorService.get(future);
} catch (Exception ex) {
throw transform(ex);
}
}
protected RuntimeException transform(Exception ex) {
DataAccessException exception = RedissonConnectionFactory.EXCEPTION_TRANSLATION.translate(ex);
if (exception != null) {
return exception;
}
return new RedisSystemException(ex.getMessage(), ex);
}
@Override
public Boolean exists(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.EXISTS, key);
}
@Override
public Long del(byte[]... keys) {
return write(keys[0], LongCodec.INSTANCE, RedisCommands.DEL, Arrays.asList(keys).toArray());
}
@Override
public Long unlink(byte[]... keys) {
return write(keys[0], LongCodec.INSTANCE, RedisCommands.UNLINK, Arrays.asList(keys).toArray());
}
private static final RedisStrictCommand<DataType> TYPE = new RedisStrictCommand<DataType>("TYPE", new DataTypeConvertor());
@Override
public DataType type(byte[] key) {
return read(key, StringCodec.INSTANCE, TYPE, key);
}
private static final RedisStrictCommand<Set<byte[]>> KEYS = new RedisStrictCommand<Set<byte[]>>("KEYS", new ObjectSetReplayDecoder<byte[]>());
@Override
public Set<byte[]> keys(byte[] pattern) {
if (isQueueing()) {
return read(null, ByteArrayCodec.INSTANCE, KEYS, pattern);
}
List<CompletableFuture<Set<byte[]>>> futures = executorService.readAllAsync(ByteArrayCodec.INSTANCE, KEYS, pattern);
CompletableFuture<Void> ff = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<Set<byte[]>> future = ff.thenApply(r -> {
return futures.stream().flatMap(f -> f.getNow(new HashSet<>()).stream()).collect(Collectors.toSet());
}).toCompletableFuture();
return sync(new CompletableFutureWrapper<>(future));
}
@Override
public Cursor<byte[]> scan(ScanOptions options) {
return new ScanCursor<byte[]>(Cursor.CursorId.initial(), options) {
private RedisClient client;
private Iterator<MasterSlaveEntry> entries = executorService.getConnectionManager().getEntrySet().iterator();
private MasterSlaveEntry entry = entries.next();
@Override
protected ScanIteration<byte[]> doScan(CursorId cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline / transaction mode.");
}
if (entry == null) {
return null;
}
List<Object> args = new ArrayList<Object>();
if (CursorId.of("101010101010101010").equals(cursorId)) {
cursorId = CursorId.initial();
}
args.add(cursorId);
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<byte[]>> f = executorService.readAsync(client, entry, ByteArrayCodec.INSTANCE, RedisCommands.SCAN, args.toArray());
ListScanResult<byte[]> res = syncFuture(f);
String pos = res.getPos();
client = res.getRedisClient();
if (CursorId.isInitial(pos)) {
if (entries.hasNext()) {
pos = "101010101010101010";
entry = entries.next();
client = null;
} else {
entry = null;
}
}
return new ScanIteration<byte[]>(CursorId.of(pos), res.getValues());
}
}.open();
}
@Override
public byte[] randomKey() {
if (isQueueing()) {
return read(null, ByteArrayCodec.INSTANCE, RedisCommands.RANDOM_KEY);
}
RFuture<byte[]> f = executorService.readRandomAsync(ByteArrayCodec.INSTANCE, RedisCommands.RANDOM_KEY);
return sync(f);
}
@Override
public void rename(byte[] oldName, byte[] newName) {
write(oldName, StringCodec.INSTANCE, RedisCommands.RENAME, oldName, newName);
}
@Override
public Boolean renameNX(byte[] oldName, byte[] newName) {
return write(oldName, StringCodec.INSTANCE, RedisCommands.RENAMENX, oldName, newName);
}
private static final RedisStrictCommand<Boolean> EXPIRE = new RedisStrictCommand<Boolean>("EXPIRE", new BooleanReplayConvertor());
@Override
public Boolean expire(byte[] key, long seconds) {
return write(key, StringCodec.INSTANCE, EXPIRE, key, seconds);
}
@Override
public Boolean pExpire(byte[] key, long millis) {
return write(key, StringCodec.INSTANCE, RedisCommands.PEXPIRE, key, millis);
}
private static final RedisStrictCommand<Boolean> EXPIREAT = new RedisStrictCommand<Boolean>("EXPIREAT", new BooleanReplayConvertor());
@Override
public Boolean expireAt(byte[] key, long unixTime) {
return write(key, StringCodec.INSTANCE, EXPIREAT, key, unixTime);
}
@Override
public Boolean pExpireAt(byte[] key, long unixTimeInMillis) {
return write(key, StringCodec.INSTANCE, RedisCommands.PEXPIREAT, key, unixTimeInMillis);
}
@Override
public Boolean persist(byte[] key) {
return write(key, StringCodec.INSTANCE, RedisCommands.PERSIST, key);
}
@Override
public Boolean move(byte[] key, int dbIndex) {
return write(key, StringCodec.INSTANCE, RedisCommands.MOVE, key, dbIndex);
}
private static final RedisStrictCommand<Long> TTL = new RedisStrictCommand<Long>("TTL");
@Override
public Long ttl(byte[] key) {
return read(key, StringCodec.INSTANCE, TTL, key);
}
protected <T> T sync(RFuture<T> f) {
if (isPipelined()) {
return null;
}
if (isQueueing()) {
((BatchPromise)f.toCompletableFuture()).getSentPromise().join();
return null;
}
return syncFuture(f);
}
@Override
public Long ttl(byte[] key, TimeUnit timeUnit) {
return read(key, StringCodec.INSTANCE, new RedisStrictCommand<Long>("TTL", new SecondsConvertor(timeUnit, TimeUnit.SECONDS)), key);
}
@Override
public Long pTtl(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.PTTL, key);
}
@Override
public Long pTtl(byte[] key, TimeUnit timeUnit) {
return read(key, StringCodec.INSTANCE, new RedisStrictCommand<Long>("PTTL", new SecondsConvertor(timeUnit, TimeUnit.MILLISECONDS)), key);
}
@Override
public List<byte[]> sort(byte[] key, SortParameters sortParams) {
List<Object> params = new ArrayList<Object>();
params.add(key);
if (sortParams != null) {
if (sortParams.getByPattern() != null) {
params.add("BY");
params.add(sortParams.getByPattern());
}
if (sortParams.getLimit() != null) {
params.add("LIMIT");
if (sortParams.getLimit().getStart() != -1) {
params.add(sortParams.getLimit().getStart());
}
if (sortParams.getLimit().getCount() != -1) {
params.add(sortParams.getLimit().getCount());
}
}
if (sortParams.getGetPattern() != null) {
for (byte[] pattern : sortParams.getGetPattern()) {
params.add("GET");
params.add(pattern);
}
}
if (sortParams.getOrder() != null) {
params.add(sortParams.getOrder());
}
Boolean isAlpha = sortParams.isAlphabetic();
if (isAlpha != null && isAlpha) {
params.add("ALPHA");
}
}
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.SORT_LIST, params.toArray());
}
private static final RedisCommand<Long> SORT_TO = new RedisCommand<Long>("SORT");
@Override
public Long sort(byte[] key, SortParameters sortParams, byte[] storeKey) {
List<Object> params = new ArrayList<Object>();
params.add(key);
if (sortParams != null) {
if (sortParams.getByPattern() != null) {
params.add("BY");
params.add(sortParams.getByPattern());
}
if (sortParams.getLimit() != null) {
params.add("LIMIT");
if (sortParams.getLimit().getStart() != -1) {
params.add(sortParams.getLimit().getStart());
}
if (sortParams.getLimit().getCount() != -1) {
params.add(sortParams.getLimit().getCount());
}
}
if (sortParams.getGetPattern() != null) {
for (byte[] pattern : sortParams.getGetPattern()) {
params.add("GET");
params.add(pattern);
}
}
if (sortParams.getOrder() != null) {
params.add(sortParams.getOrder());
}
Boolean isAlpha = sortParams.isAlphabetic();
if (isAlpha != null && isAlpha) {
params.add("ALPHA");
}
}
params.add("STORE");
params.add(storeKey);
return read(key, ByteArrayCodec.INSTANCE, SORT_TO, params.toArray());
}
@Override
public byte[] dump(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, key);
}
@Override
public void restore(byte[] key, long ttlInMillis, byte[] serializedValue) {
write(key, StringCodec.INSTANCE, RedisCommands.RESTORE, key, ttlInMillis, serializedValue);
}
@Override
public byte[] get(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key);
}
@Override
public byte[] getSet(byte[] key, byte[] value) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.GETSET, key, value);
}
private static final RedisCommand<List<Object>> MGET = new RedisCommand<List<Object>>("MGET", new ObjectListReplayDecoder<Object>());
@Override
public List<byte[]> mGet(byte[]... keys) {
return read(keys[0], ByteArrayCodec.INSTANCE, MGET, Arrays.asList(keys).toArray());
}
private static final RedisCommand<Boolean> SET = new RedisCommand<>("SET", new BooleanNullSafeReplayConvertor());
@Override
public Boolean set(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, SET, key, value);
}
@Override
public Boolean set(byte[] key, byte[] value, Expiration expiration, SetOption option) {
if (expiration == null) {
return set(key, value);
} else if (expiration.isPersistent()) {
if (option == null || option == SetOption.UPSERT) {
return set(key, value);
}
if (option == SetOption.SET_IF_ABSENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "NX");
}
if (option == SetOption.SET_IF_PRESENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "XX");
}
} else {
if (option == null || option == SetOption.UPSERT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "PX", expiration.getExpirationTimeInMilliseconds());
}
if (option == SetOption.SET_IF_ABSENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "PX", expiration.getExpirationTimeInMilliseconds(), "NX");
}
if (option == SetOption.SET_IF_PRESENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "PX", expiration.getExpirationTimeInMilliseconds(), "XX");
}
}
throw new IllegalArgumentException();
}
@Override
public Boolean setNX(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, RedisCommands.SETNX, key, value);
}
private static final RedisCommand<Boolean> SETEX = new RedisCommand<Boolean>("SETEX", new BooleanReplayConvertor());
@Override
public Boolean setEx(byte[] key, long seconds, byte[] value) {
return write(key, StringCodec.INSTANCE, SETEX, key, seconds, value);
}
private static final RedisCommand<Boolean> PSETEX = new RedisCommand<Boolean>("PSETEX", new BooleanReplayConvertor());
@Override
public Boolean pSetEx(byte[] key, long milliseconds, byte[] value) {
return write(key, StringCodec.INSTANCE, PSETEX, key, milliseconds, value);
}
private static final RedisCommand<Boolean> MSET = new RedisCommand<Boolean>("MSET", new BooleanReplayConvertor());
@Override
public Boolean mSet(Map<byte[], byte[]> tuple) {
List<byte[]> params = convert(tuple);
return write(tuple.keySet().iterator().next(), StringCodec.INSTANCE, MSET, params.toArray());
}
protected List<byte[]> convert(Map<byte[], byte[]> tuple) {
List<byte[]> params = new ArrayList<byte[]>(tuple.size()*2);
for (Entry<byte[], byte[]> entry : tuple.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
return params;
}
@Override
public Boolean mSetNX(Map<byte[], byte[]> tuple) {
List<byte[]> params = convert(tuple);
return write(tuple.keySet().iterator().next(), StringCodec.INSTANCE, RedisCommands.MSETNX, params.toArray());
}
@Override
public Long incr(byte[] key) {
return write(key, StringCodec.INSTANCE, RedisCommands.INCR, key);
}
@Override
public Long incrBy(byte[] key, long value) {
return write(key, StringCodec.INSTANCE, RedisCommands.INCRBY, key, value);
}
@Override
public Double incrBy(byte[] key, double value) {
return write(key, StringCodec.INSTANCE, RedisCommands.INCRBYFLOAT, key, BigDecimal.valueOf(value).toPlainString());
}
@Override
public Long decr(byte[] key) {
return write(key, StringCodec.INSTANCE, RedisCommands.DECR, key);
}
private static final RedisStrictCommand<Long> DECRBY = new RedisStrictCommand<Long>("DECRBY");
@Override
public Long decrBy(byte[] key, long value) {
return write(key, StringCodec.INSTANCE, DECRBY, key, value);
}
private static final RedisStrictCommand<Long> APPEND = new RedisStrictCommand<Long>("APPEND");
@Override
public Long append(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, APPEND, key, value);
}
private static final RedisCommand<Object> GETRANGE = new RedisCommand<Object>("GETRANGE");
@Override
public byte[] getRange(byte[] key, long begin, long end) {
return read(key, ByteArrayCodec.INSTANCE, GETRANGE, key, begin, end);
}
private static final RedisCommand<Void> SETRANGE = new RedisCommand<Void>("SETRANGE", new VoidReplayConvertor());
@Override
public void setRange(byte[] key, byte[] value, long offset) {
write(key, ByteArrayCodec.INSTANCE, SETRANGE, key, offset, value);
}
@Override
public Boolean getBit(byte[] key, long offset) {
return read(key, StringCodec.INSTANCE, RedisCommands.GETBIT, key, offset);
}
@Override
public Boolean setBit(byte[] key, long offset, boolean value) {
return write(key, StringCodec.INSTANCE, RedisCommands.SETBIT, key, offset, value ? 1 : 0);
}
@Override
public Long bitCount(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.BITCOUNT, key);
}
@Override
public Long bitCount(byte[] key, long begin, long end) {
return read(key, StringCodec.INSTANCE, RedisCommands.BITCOUNT, key, begin, end);
}
private static final RedisStrictCommand<Long> BITOP = new RedisStrictCommand<Long>("BITOP");
@Override
public Long bitOp(BitOperation op, byte[] destination, byte[]... keys) {
if (op == BitOperation.NOT && keys.length > 1) {
throw new UnsupportedOperationException("NOT operation doesn't support more than single source key");
}
List<Object> params = new ArrayList<Object>(keys.length + 2);
params.add(op);
params.add(destination);
params.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, BITOP, params.toArray());
}
@Override
public Long strLen(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.STRLEN, key);
}
private static final RedisStrictCommand<Long> RPUSH = new RedisStrictCommand<Long>("RPUSH");
@Override
public Long rPush(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, RPUSH, args.toArray());
}
private static final RedisStrictCommand<Long> LPUSH = new RedisStrictCommand<Long>("LPUSH");
@Override
public Long lPush(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, LPUSH, args.toArray());
}
private static final RedisStrictCommand<Long> RPUSHX = new RedisStrictCommand<Long>("RPUSHX");
@Override
public Long rPushX(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, RPUSHX, key, value);
}
private static final RedisStrictCommand<Long> LPUSHX = new RedisStrictCommand<Long>("LPUSHX");
@Override
public Long lPushX(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, LPUSHX, key, value);
}
private static final RedisStrictCommand<Long> LLEN = new RedisStrictCommand<Long>("LLEN");
@Override
public Long lLen(byte[] key) {
return read(key, StringCodec.INSTANCE, LLEN, key);
}
@Override
public List<byte[]> lRange(byte[] key, long start, long end) {
return read(key, ByteArrayCodec.INSTANCE, LRANGE, key, start, end);
}
@Override
public void lTrim(byte[] key, long start, long end) {
write(key, StringCodec.INSTANCE, RedisCommands.LTRIM, key, start, end);
}
@Override
public byte[] lIndex(byte[] key, long index) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.LINDEX, key, index);
}
private static final RedisStrictCommand<Long> LINSERT = new RedisStrictCommand<Long>("LINSERT");
@Override
public Long lInsert(byte[] key, Position where, byte[] pivot, byte[] value) {
return write(key, StringCodec.INSTANCE, LINSERT, key, where, pivot, value);
}
private final List<String> commandsToRemove = Arrays.asList("SET",
"RESTORE", "LTRIM", "SETEX", "SETRANGE", "FLUSHDB", "LSET", "MSET", "HMSET", "RENAME");
private final List<Integer> indexToRemove = new ArrayList<Integer>();
private int index = -1;
<T> T write(byte[] key, Codec codec, RedisCommand<?> command, Object... params) {
RFuture<T> f = executorService.writeAsync(key, codec, command, params);
indexCommand(command);
return sync(f);
}
protected void indexCommand(RedisCommand<?> command) {
if (isQueueing() || isPipelined()) {
index++;
if (filterOkResponses && commandsToRemove.contains(command.getName())) {
indexToRemove.add(index);
}
}
}
<T> T read(byte[] key, Codec codec, RedisCommand<?> command, Object... params) {
RFuture<T> f = executorService.readAsync(key, codec, command, params);
indexCommand(command);
return sync(f);
}
@Override
public void lSet(byte[] key, long index, byte[] value) {
write(key, StringCodec.INSTANCE, RedisCommands.LSET, key, index, value);
}
private static final RedisStrictCommand<Long> LREM = new RedisStrictCommand<Long>("LREM");
@Override
public Long lRem(byte[] key, long count, byte[] value) {
return write(key, StringCodec.INSTANCE, LREM, key, count, value);
}
@Override
public byte[] lPop(byte[] key) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.LPOP, key);
}
@Override
public byte[] rPop(byte[] key) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.RPOP, key);
}
@Override
public List<byte[]> bLPop(int timeout, byte[]... keys) {
List<Object> params = new ArrayList<Object>(keys.length + 1);
params.addAll(Arrays.asList(keys));
params.add(timeout);
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.BLPOP, params.toArray());
}
@Override
public List<byte[]> bRPop(int timeout, byte[]... keys) {
List<Object> params = new ArrayList<Object>(keys.length + 1);
params.addAll(Arrays.asList(keys));
params.add(timeout);
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.BRPOP, params.toArray());
}
@Override
public byte[] rPopLPush(byte[] srcKey, byte[] dstKey) {
return write(srcKey, ByteArrayCodec.INSTANCE, RedisCommands.RPOPLPUSH, srcKey, dstKey);
}
@Override
public byte[] bRPopLPush(int timeout, byte[] srcKey, byte[] dstKey) {
return write(srcKey, ByteArrayCodec.INSTANCE, RedisCommands.BRPOPLPUSH, srcKey, dstKey, timeout);
}
private static final RedisCommand<List<Long>> LPOS = new RedisCommand<>("LPOS", new ObjectListReplayDecoder<>());
@Override
public List<Long> lPos(byte[] key, byte[] element, Integer rank, Integer count) {
List<Object> args = new ArrayList<>();
args.add(key);
args.add(element);
if (rank != null) {
args.add("RANK");
args.add(rank);
}
if (count != null) {
args.add("COUNT");
args.add(count);
}
Object read = read(key, ByteArrayCodec.INSTANCE, LPOS, args.toArray());
if (read == null) {
return Collections.emptyList();
} else if (read instanceof Long) {
return Collections.singletonList((Long) read);
} else {
return (List<Long>) read;
}
}
private static final RedisCommand<Long> SADD = new RedisCommand<Long>("SADD");
@Override
public Long sAdd(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, SADD, args.toArray());
}
private static final RedisStrictCommand<Long> SREM = new RedisStrictCommand<Long>("SREM");
@Override
public Long sRem(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, SREM, args.toArray());
}
@Override
public byte[] sPop(byte[] key) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.SPOP_SINGLE, key);
}
private static final RedisCommand<List<Object>> SPOP = new RedisCommand<List<Object>>("SPOP", new ObjectListReplayDecoder<Object>());
@Override
public List<byte[]> sPop(byte[] key, long count) {
return write(key, ByteArrayCodec.INSTANCE, SPOP, key, count);
}
@Override
public Boolean sMove(byte[] srcKey, byte[] destKey, byte[] value) {
return write(srcKey, StringCodec.INSTANCE, RedisCommands.SMOVE, srcKey, destKey, value);
}
private static final RedisStrictCommand<Long> SCARD = new RedisStrictCommand<Long>("SCARD");
@Override
public Long sCard(byte[] key) {
return read(key, StringCodec.INSTANCE, SCARD, key);
}
@Override
public Boolean sIsMember(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.SISMEMBER, key, value);
}
@Override
public Set<byte[]> sInter(byte[]... keys) {
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.SINTER, Arrays.asList(keys).toArray());
}
@Override
public Long sInterStore(byte[] destKey, byte[]... keys) {
List<Object> args = new ArrayList<Object>(keys.length + 1);
args.add(destKey);
args.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, RedisCommands.SINTERSTORE, args.toArray());
}
@Override
public Set<byte[]> sUnion(byte[]... keys) {
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.SUNION, Arrays.asList(keys).toArray());
}
@Override
public Long sUnionStore(byte[] destKey, byte[]... keys) {
List<Object> args = new ArrayList<Object>(keys.length + 1);
args.add(destKey);
args.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, RedisCommands.SUNIONSTORE, args.toArray());
}
@Override
public Set<byte[]> sDiff(byte[]... keys) {
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.SDIFF, Arrays.asList(keys).toArray());
}
@Override
public Long sDiffStore(byte[] destKey, byte[]... keys) {
List<Object> args = new ArrayList<Object>(keys.length + 1);
args.add(destKey);
args.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, RedisCommands.SDIFFSTORE, args.toArray());
}
@Override
public Set<byte[]> sMembers(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.SMEMBERS, key);
}
@Override
public byte[] sRandMember(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.SRANDMEMBER_SINGLE, key);
}
private static final RedisCommand<List<Object>> SRANDMEMBER = new RedisCommand<>("SRANDMEMBER", new ObjectListReplayDecoder<>());
@Override
public List<byte[]> sRandMember(byte[] key, long count) {
return read(key, ByteArrayCodec.INSTANCE, SRANDMEMBER, key, count);
}
@Override
public Cursor<byte[]> sScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<byte[]>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<byte[]> doScan(byte[] key, CursorId cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(cursorId.getCursorId());
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<byte[]>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, RedisCommands.SSCAN, args.toArray());
ListScanResult<byte[]> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<byte[]>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
}
@Override
public Boolean zAdd(byte[] key, double score, byte[] value, ZAddArgs args) {
List<Object> params = new ArrayList<>();
params.add(key);
if (args.contains(ZAddArgs.Flag.XX)) {
params.add("XX");
}
if (args.contains(ZAddArgs.Flag.NX)) {
params.add("NX");
}
if (args.contains(ZAddArgs.Flag.GT)) {
params.add("GT");
}
if (args.contains(ZAddArgs.Flag.LT)) {
params.add("LT");
}
if (args.contains(ZAddArgs.Flag.CH)) {
params.add("CH");
}
params.add(BigDecimal.valueOf(score).toPlainString());
params.add(value);
return write(key, StringCodec.INSTANCE, RedisCommands.ZADD_BOOL, params.toArray());
}
@Override
public Long zAdd(byte[] key, Set<Tuple> tuples, ZAddArgs args) {
List<Object> params = new ArrayList<>(tuples.size()*2+1);
params.add(key);
if (args.contains(ZAddArgs.Flag.XX)) {
params.add("XX");
}
if (args.contains(ZAddArgs.Flag.NX)) {
params.add("NX");
}
if (args.contains(ZAddArgs.Flag.GT)) {
params.add("GT");
}
if (args.contains(ZAddArgs.Flag.LT)) {
params.add("LT");
}
if (args.contains(ZAddArgs.Flag.CH)) {
params.add("CH");
}
for (Tuple entry : tuples) {
params.add(BigDecimal.valueOf(entry.getScore()).toPlainString());
params.add(entry.getValue());
}
return write(key, StringCodec.INSTANCE, RedisCommands.ZADD, params.toArray());
}
@Override
public Long zRem(byte[] key, byte[]... values) {
List<Object> params = new ArrayList<Object>(values.length+1);
params.add(key);
params.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, RedisCommands.ZREM_LONG, params.toArray());
}
@Override
public Double zIncrBy(byte[] key, double increment, byte[] value) {
return write(key, DoubleCodec.INSTANCE, RedisCommands.ZINCRBY,
key, new BigDecimal(increment).toPlainString(), value);
}
@Override
public Long zRank(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZRANK, key, value);
}
@Override
public Long zRevRank(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZREVRANK, key, value);
}
private static final RedisCommand<Set<Object>> ZRANGE = new RedisCommand<Set<Object>>("ZRANGE", new ObjectSetReplayDecoder<Object>());
@Override
public Set<byte[]> zRange(byte[] key, long start, long end) {
return read(key, ByteArrayCodec.INSTANCE, ZRANGE, key, start, end);
}
private static final RedisCommand<Set<Tuple>> ZRANGE_ENTRY = new RedisCommand<Set<Tuple>>("ZRANGE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZRANGE_ENTRY_V2 = new RedisCommand<Set<Tuple>>("ZRANGE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<Tuple> zRangeWithScores(byte[] key, long start, long end) {
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZRANGE_ENTRY_V2, key, start, end, "WITHSCORES");
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGE_ENTRY, key, start, end, "WITHSCORES");
}
private String value(org.springframework.data.domain.Range.Bound<?> boundary, String defaultValue) {
if (boundary == null) {
return defaultValue;
}
Object score = boundary.getValue().orElse(null);
if (score == null) {
return defaultValue;
}
StringBuilder element = new StringBuilder();
if (!boundary.isInclusive()) {
element.append("(");
} else {
if (!(score instanceof Double)) {
element.append("[");
}
}
if (score instanceof Double) {
if (Double.isInfinite((Double) score)) {
element.append((Double)score > 0 ? "+inf" : "-inf");
} else {
element.append(BigDecimal.valueOf((Double)score).toPlainString());
}
} else {
if (score instanceof byte[]) {
element.append(new String((byte[]) score, StandardCharsets.UTF_8));
} else {
element.append(score);
}
}
return element.toString();
}
private static final RedisCommand<Set<Tuple>> ZRANGEBYSCORE = new RedisCommand<Set<Tuple>>("ZRANGEBYSCORE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZRANGEBYSCORE_V2 = new RedisCommand<Set<Tuple>>("ZRANGEBYSCORE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<Tuple> zRangeByScoreWithScores(byte[] key, org.springframework.data.domain.Range range, org.springframework.data.redis.connection.Limit limit) {
String min = value(range.getLowerBound(), "-inf");
String max = value(range.getUpperBound(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(min);
args.add(max);
args.add("WITHSCORES");
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZRANGEBYSCORE_V2, args.toArray());
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGEBYSCORE, args.toArray());
}
private static final RedisCommand<Set<Object>> ZREVRANGE = new RedisCommand<Set<Object>>("ZREVRANGE", new ObjectSetReplayDecoder<Object>());
@Override
public Set<byte[]> zRevRange(byte[] key, long start, long end) {
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGE, key, start, end);
}
private static final RedisCommand<Set<Tuple>> ZREVRANGE_ENTRY = new RedisCommand<Set<Tuple>>("ZREVRANGE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZREVRANGE_ENTRY_V2 = new RedisCommand("ZREVRANGE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<Tuple> zRevRangeWithScores(byte[] key, long start, long end) {
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGE_ENTRY_V2, key, start, end, "WITHSCORES");
}
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGE_ENTRY, key, start, end, "WITHSCORES");
}
@Override
public Set<byte[]> zRevRangeByScore(byte[] key, double min, double max) {
return zRevRangeByScore(key, org.springframework.data.domain.Range.closed(min, max));
}
private static final RedisCommand<Set<byte[]>> ZREVRANGEBYSCORE = new RedisCommand<Set<byte[]>>("ZREVRANGEBYSCORE", new ObjectSetReplayDecoder<byte[]>());
private static final RedisCommand<Set<Tuple>> ZREVRANGEBYSCOREWITHSCORES = new RedisCommand<Set<Tuple>>("ZREVRANGEBYSCORE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZREVRANGEBYSCOREWITHSCORES_V2 = new RedisCommand<Set<Tuple>>("ZREVRANGEBYSCORE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<byte[]> zRevRangeByScore(byte[] key, org.springframework.data.domain.Range range, org.springframework.data.redis.connection.Limit limit) {
String min = value(range.getLowerBound(), "-inf");
String max = value(range.getUpperBound(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(max);
args.add(min);
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGEBYSCORE, args.toArray());
}
@Override
public Set<Tuple> zRevRangeByScoreWithScores(byte[] key, org.springframework.data.domain.Range range, org.springframework.data.redis.connection.Limit limit) {
String min = value(range.getLowerBound(), "-inf");
String max = value(range.getUpperBound(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(max);
args.add(min);
args.add("WITHSCORES");
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGEBYSCOREWITHSCORES_V2, args.toArray());
}
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGEBYSCOREWITHSCORES, args.toArray());
}
@Override
public Long zCount(byte[] key, double min, double max) {
return zCount(key, org.springframework.data.domain.Range.closed(min, max));
}
private static final RedisStrictCommand<Long> ZCOUNT = new RedisStrictCommand<Long>("ZCOUNT");
@Override
public Long zCount(byte[] key, org.springframework.data.domain.Range range) {
String min = value(range.getLowerBound(), "-inf");
String max = value(range.getUpperBound(), "+inf");
return read(key, StringCodec.INSTANCE, ZCOUNT, key, min, max);
}
@Override
public Long zCard(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZCARD, key);
}
@Override
public Double zScore(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZSCORE, key, value);
}
private static final RedisStrictCommand<Long> ZREMRANGEBYRANK = new RedisStrictCommand<Long>("ZREMRANGEBYRANK");
private static final RedisStrictCommand<Long> ZREMRANGEBYSCORE = new RedisStrictCommand<Long>("ZREMRANGEBYSCORE");
@Override
public Long zRemRange(byte[] key, long start, long end) {
return write(key, StringCodec.INSTANCE, ZREMRANGEBYRANK, key, start, end);
}
@Override
public Long zRemRangeByScore(byte[] key, double min, double max) {
return zRemRangeByScore(key, org.springframework.data.domain.Range.closed(min, max));
}
@Override
public Long zRemRangeByScore(byte[] key, org.springframework.data.domain.Range range) {
String min = value(range.getLowerBound(), "-inf");
String max = value(range.getUpperBound(), "+inf");
return write(key, StringCodec.INSTANCE, ZREMRANGEBYSCORE, key, min, max);
}
@Override
public Long zUnionStore(byte[] destKey, byte[]... sets) {
return zUnionStore(destKey, null, (Weights)null, sets);
}
private static final RedisStrictCommand<Long> ZUNIONSTORE = new RedisStrictCommand<Long>("ZUNIONSTORE");
@Override
public Long zUnionStore(byte[] destKey, Aggregate aggregate, Weights weights, byte[]... sets) {
List<Object> args = new ArrayList<Object>(sets.length*2 + 5);
args.add(destKey);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
if (weights != null) {
args.add("WEIGHTS");
for (double weight : weights.toArray()) {
args.add(BigDecimal.valueOf(weight).toPlainString());
}
}
if (aggregate != null) {
args.add("AGGREGATE");
args.add(aggregate.name());
}
return write(destKey, LongCodec.INSTANCE, ZUNIONSTORE, args.toArray());
}
private static final RedisStrictCommand<Long> ZINTERSTORE = new RedisStrictCommand<Long>("ZINTERSTORE");
@Override
public Long zInterStore(byte[] destKey, byte[]... sets) {
return zInterStore(destKey, null, (Weights)null, sets);
}
@Override
public Long zInterStore(byte[] destKey, Aggregate aggregate, Weights weights, byte[]... sets) {
List<Object> args = new ArrayList<Object>(sets.length*2 + 5);
args.add(destKey);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
if (weights != null) {
args.add("WEIGHTS");
for (double weight : weights.toArray()) {
args.add(BigDecimal.valueOf(weight).toPlainString());
}
}
if (aggregate != null) {
args.add("AGGREGATE");
args.add(aggregate.name());
}
return write(destKey, StringCodec.INSTANCE, ZINTERSTORE, args.toArray());
}
private static final RedisCommand<ListScanResult<Object>> ZSCAN = new RedisCommand<>("ZSCAN", new ListMultiDecoder2(new ListScanResultReplayDecoder(), new ScoredSortedListReplayDecoder()));
@Override
public Cursor<Tuple> zScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<Tuple>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<Tuple> doScan(byte[] key, CursorId cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(cursorId.getCursorId());
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<Tuple>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, ZSCAN, args.toArray());
ListScanResult<Tuple> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<Tuple>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, String min, String max) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, key, min, max);
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, String min, String max, long offset, long count) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, key, min, max, "LIMIT", offset, count);
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, org.springframework.data.domain.Range range, org.springframework.data.redis.connection.Limit limit) {
String min = value(range.getLowerBound(), "-inf");
String max = value(range.getUpperBound(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(min);
args.add(max);
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, args.toArray());
}
private static final RedisCommand<Set<Object>> ZRANGEBYLEX = new RedisCommand<Set<Object>>("ZRANGEBYLEX", new ObjectSetReplayDecoder<Object>());
@Override
public Set<byte[]> zRangeByLex(byte[] key, org.springframework.data.domain.Range<byte[]> range, org.springframework.data.redis.connection.Limit limit) {
String min = value(range.getLowerBound(), "-");
String max = value(range.getUpperBound(), "+");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(min);
args.add(max);
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGEBYLEX, args.toArray());
}
@Override
public Boolean hSet(byte[] key, byte[] field, byte[] value) {
return write(key, StringCodec.INSTANCE, RedisCommands.HSET, key, field, value);
}
@Override
public Boolean hSetNX(byte[] key, byte[] field, byte[] value) {
return write(key, StringCodec.INSTANCE, RedisCommands.HSETNX, key, field, value);
}
@Override
public byte[] hGet(byte[] key, byte[] field) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HGET, key, field);
}
private static final RedisCommand<List<Object>> HMGET = new RedisCommand<List<Object>>("HMGET", new ObjectListReplayDecoder<Object>());
@Override
public List<byte[]> hMGet(byte[] key, byte[]... fields) {
List<Object> args = new ArrayList<Object>(fields.length + 1);
args.add(key);
args.addAll(Arrays.asList(fields));
return read(key, ByteArrayCodec.INSTANCE, HMGET, args.toArray());
}
@Override
public void hMSet(byte[] key, Map<byte[], byte[]> hashes) {
List<Object> params = new ArrayList<Object>(hashes.size()*2 + 1);
params.add(key);
for (Map.Entry<byte[], byte[]> entry : hashes.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
write(key, StringCodec.INSTANCE, RedisCommands.HMSET, params.toArray());
}
private static final RedisCommand<Long> HINCRBY = new RedisCommand<Long>("HINCRBY");
@Override
public Long hIncrBy(byte[] key, byte[] field, long delta) {
return write(key, StringCodec.INSTANCE, HINCRBY, key, field, delta);
}
private static final RedisCommand<Double> HINCRBYFLOAT = new RedisCommand<Double>("HINCRBYFLOAT", new DoubleReplayConvertor());
@Override
public Double hIncrBy(byte[] key, byte[] field, double delta) {
return write(key, StringCodec.INSTANCE, HINCRBYFLOAT, key, field, BigDecimal.valueOf(delta).toPlainString());
}
@Override
public Boolean hExists(byte[] key, byte[] field) {
return read(key, StringCodec.INSTANCE, RedisCommands.HEXISTS, key, field);
}
@Override
public Long hDel(byte[] key, byte[]... fields) {
List<Object> args = new ArrayList<Object>(fields.length + 1);
args.add(key);
args.addAll(Arrays.asList(fields));
return write(key, StringCodec.INSTANCE, RedisCommands.HDEL, args.toArray());
}
private static final RedisStrictCommand<Long> HLEN = new RedisStrictCommand<Long>("HLEN");
@Override
public Long hLen(byte[] key) {
return read(key, StringCodec.INSTANCE, HLEN, key);
}
@Override
public Set<byte[]> hKeys(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HKEYS, key);
}
@Override
public List<byte[]> hVals(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HVALS, key);
}
@Override
public Map<byte[], byte[]> hGetAll(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HGETALL, key);
}
@Override
public Cursor<Entry<byte[], byte[]>> hScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<Entry<byte[], byte[]>>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<Entry<byte[], byte[]>> doScan(byte[] key, CursorId cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'HSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(cursorId.getCursorId());
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<MapScanResult<byte[], byte[]>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, RedisCommands.HSCAN, args.toArray());
MapScanResult<byte[], byte[]> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
}
@Override
public void multi() {
if (isQueueing()) {
return;
}
if (isPipelined()) {
BatchOptions options = BatchOptions.defaults()
.executionMode(ExecutionMode.IN_MEMORY_ATOMIC);
this.executorService = executorService.createCommandBatchService(options);
return;
}
BatchOptions options = BatchOptions.defaults()
.executionMode(ExecutionMode.REDIS_WRITE_ATOMIC);
this.executorService = executorService.createCommandBatchService(options);
}
@Override
public List<Object> exec() {
if (isPipelinedAtomic()) {
return null;
}
if (isQueueing()) {
try {
BatchResult<?> result = ((CommandBatchService)executorService).execute();
filterResults(result);
return (List<Object>) result.getResponses();
} catch (Exception ex) {
throw transform(ex);
} finally {
resetConnection();
}
} else {
throw new InvalidDataAccessApiUsageException("Not in transaction mode. Please invoke multi method");
}
}
protected void filterResults(BatchResult<?> result) {
if (result.getResponses().isEmpty()) {
return;
}
int t = 0;
for (Integer index : indexToRemove) {
index -= t;
result.getResponses().remove((int)index);
t++;
}
for (ListIterator<Object> iterator = (ListIterator<Object>) result.getResponses().listIterator(); iterator.hasNext();) {
Object object = iterator.next();
if (object instanceof String) {
iterator.set(((String) object).getBytes());
}
}
}
protected void resetConnection() {
executorService = this.redisson.getCommandExecutor();
index = -1;
indexToRemove.clear();
}
@Override
public void discard() {
if (isQueueing()) {
syncFuture(executorService.writeAsync(null, RedisCommands.DISCARD));
resetConnection();
} else {
throw new InvalidDataAccessApiUsageException("Not in transaction mode. Please invoke multi method");
}
}
@Override
public void watch(byte[]... keys) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
syncFuture(executorService.writeAsync(null, RedisCommands.WATCH, keys));
}
@Override
public void unwatch() {
syncFuture(executorService.writeAsync(null, RedisCommands.UNWATCH));
}
@Override
public boolean isSubscribed() {
return subscription != null && subscription.isAlive();
}
@Override
public Subscription getSubscription() {
return subscription;
}
@Override
public Long publish(byte[] channel, byte[] message) {
return write(channel, StringCodec.INSTANCE, RedisCommands.PUBLISH, channel, message);
}
@Override
public void subscribe(MessageListener listener, byte[]... channels) {
checkSubscription();
subscription = new RedissonSubscription(executorService, listener);
subscription.subscribe(channels);
}
private void checkSubscription() {
if (subscription != null) {
throw new RedisSubscribedConnectionException("Connection already subscribed");
}
if (isQueueing()) {
throw new UnsupportedOperationException("Not supported in queueing mode");
}
if (isPipelined()) {
throw new UnsupportedOperationException("Not supported in pipelined mode");
}
}
@Override
public void pSubscribe(MessageListener listener, byte[]... patterns) {
checkSubscription();
subscription = new RedissonSubscription(executorService, listener);
subscription.pSubscribe(patterns);
}
@Override
public void select(int dbIndex) {
throw new UnsupportedOperationException();
}
private static final RedisCommand<Object> ECHO = new RedisCommand<Object>("ECHO");
@Override
public byte[] echo(byte[] message) {
return read(null, ByteArrayCodec.INSTANCE, ECHO, message);
}
@Override
public String ping() {
return read(null, StringCodec.INSTANCE, RedisCommands.PING);
}
// @Override
// public void bgWriteAof() {
// throw new UnsupportedOperationException();
// }
@Override
public void bgReWriteAof() {
write(null, StringCodec.INSTANCE, RedisCommands.BGREWRITEAOF);
}
@Override
public void bgSave() {
write(null, StringCodec.INSTANCE, RedisCommands.BGSAVE);
}
@Override
public Long lastSave() {
return write(null, StringCodec.INSTANCE, RedisCommands.LASTSAVE);
}
private static final RedisStrictCommand<Void> SAVE = new RedisStrictCommand<Void>("SAVE", new VoidReplayConvertor());
@Override
public void save() {
write(null, StringCodec.INSTANCE, SAVE);
}
@Override
public Long dbSize() {
if (isQueueing()) {
return read(null, StringCodec.INSTANCE, RedisCommands.DBSIZE);
}
List<CompletableFuture<Long>> futures = executorService.readAllAsync(RedisCommands.DBSIZE);
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<Long> s = f.thenApply(r -> futures.stream().mapToLong(v -> v.getNow(0L)).sum());
CompletableFutureWrapper<Long> ff = new CompletableFutureWrapper<>(s);
return sync(ff);
}
@Override
public void flushDb() {
if (isQueueing() || isPipelined()) {
write(null, StringCodec.INSTANCE, RedisCommands.FLUSHDB);
return;
}
RFuture<Void> f = executorService.writeAllVoidAsync(RedisCommands.FLUSHDB);
sync(f);
}
@Override
public void flushAll() {
RFuture<Void> f = executorService.writeAllVoidAsync(RedisCommands.FLUSHALL);
sync(f);
}
private static final RedisStrictCommand<Properties> INFO_DEFAULT = new RedisStrictCommand<Properties>("INFO", "DEFAULT", new ObjectDecoder(new PropertiesDecoder()));
private static final RedisStrictCommand<Properties> INFO = new RedisStrictCommand<Properties>("INFO", new ObjectDecoder(new PropertiesDecoder()));
@Override
public Properties info() {
return read(null, StringCodec.INSTANCE, INFO_DEFAULT);
}
@Override
public Properties info(String section) {
return read(null, StringCodec.INSTANCE, INFO, section);
}
@Override
public void shutdown() {
throw new UnsupportedOperationException();
}
@Override
public void shutdown(ShutdownOption option) {
throw new UnsupportedOperationException();
}
private static final RedisStrictCommand<Properties> CONFIG_GET = new RedisStrictCommand<Properties>("CONFIG", "GET", new PropertiesListDecoder());
@Override
public Properties getConfig(String pattern) {
return read(null, StringCodec.INSTANCE, CONFIG_GET, pattern);
}
@Override
public void setConfig(String param, String value) {
write(null, StringCodec.INSTANCE, RedisCommands.CONFIG_SET, param, value);
}
@Override
public void resetConfigStats() {
write(null, StringCodec.INSTANCE, RedisCommands.CONFIG_RESETSTAT);
}
@Override
public void killClient(String host, int port) {
throw new UnsupportedOperationException();
}
@Override
public void setClientName(byte[] name) {
throw new UnsupportedOperationException("Should be defined through Redisson Config object");
}
@Override
public String getClientName() {
throw new UnsupportedOperationException();
}
@Override
public List<RedisClientInfo> getClientList() {
return read(null, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST);
}
// @Override
// public void slaveOf(String host, int port) {
// throw new UnsupportedOperationException();
// }
//
// @Override
// public void slaveOfNoOne() {
// throw new UnsupportedOperationException();
// }
@Override
public void migrate(byte[] key, RedisNode target, int dbIndex, MigrateOption option) {
migrate(key, target, dbIndex, option, Long.MAX_VALUE);
}
@Override
public void migrate(byte[] key, RedisNode target, int dbIndex, MigrateOption option, long timeout) {
write(key, StringCodec.INSTANCE, RedisCommands.MIGRATE, target.getHost(), target.getPort(), key, dbIndex, timeout);
}
@Override
public void scriptFlush() {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException();
}
RFuture<Void> f = executorService.writeAllVoidAsync(RedisCommands.SCRIPT_FLUSH);
sync(f);
}
@Override
public void scriptKill() {
throw new UnsupportedOperationException();
}
@Override
public String scriptLoad(byte[] script) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
if (isPipelined()) {
throw new UnsupportedOperationException();
}
List<CompletableFuture<String>> futures = executorService.executeAllAsync(RedisCommands.SCRIPT_LOAD, (Object)script);
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<String> s = f.thenApply(r -> futures.get(0).getNow(null));
return sync(new CompletableFutureWrapper<>(s));
}
@Override
public List<Boolean> scriptExists(final String... scriptShas) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException();
}
List<CompletableFuture<List<Boolean>>> futures = executorService.writeAllAsync(RedisCommands.SCRIPT_EXISTS, (Object[]) scriptShas);
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<List<Boolean>> s = f.thenApply(r -> {
List<Boolean> result = futures.get(0).getNow(new ArrayList<>());
for (CompletableFuture<List<Boolean>> future : futures.subList(1, futures.size())) {
List<Boolean> l = future.getNow(new ArrayList<>());
for (int i = 0; i < l.size(); i++) {
result.set(i, result.get(i) | l.get(i));
}
}
return result;
});
return sync(new CompletableFutureWrapper<>(s));
}
@Override
public <T> T eval(byte[] script, ReturnType returnType, int numKeys, byte[]... keysAndArgs) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
if (isPipelined()) {
throw new UnsupportedOperationException();
}
RedisCommand<?> c = toCommand(returnType, "EVAL");
List<Object> params = new ArrayList<Object>();
params.add(script);
params.add(numKeys);
params.addAll(Arrays.asList(keysAndArgs));
byte[] key = getKey(numKeys, keysAndArgs);
return write(key, ByteArrayCodec.INSTANCE, c, params.toArray());
}
protected RedisCommand<?> toCommand(ReturnType returnType, String name) {
RedisCommand<?> c = null;
if (returnType == ReturnType.BOOLEAN) {
c = org.redisson.api.RScript.ReturnType.BOOLEAN.getCommand();
} else if (returnType == ReturnType.INTEGER) {
c = org.redisson.api.RScript.ReturnType.INTEGER.getCommand();
} else if (returnType == ReturnType.MULTI) {
c = org.redisson.api.RScript.ReturnType.MULTI.getCommand();
return new RedisCommand(c, name, new BinaryConvertor());
} else if (returnType == ReturnType.STATUS) {
c = org.redisson.api.RScript.ReturnType.STATUS.getCommand();
} else if (returnType == ReturnType.VALUE) {
c = org.redisson.api.RScript.ReturnType.VALUE.getCommand();
return new RedisCommand(c, name, new BinaryConvertor());
}
return new RedisCommand(c, name);
}
@Override
public <T> T evalSha(String scriptSha, ReturnType returnType, int numKeys, byte[]... keysAndArgs) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
if (isPipelined()) {
throw new UnsupportedOperationException();
}
RedisCommand<?> c = toCommand(returnType, "EVALSHA");
List<Object> params = new ArrayList<Object>();
params.add(scriptSha);
params.add(numKeys);
params.addAll(Arrays.asList(keysAndArgs));
byte[] key = getKey(numKeys, keysAndArgs);
return write(key, ByteArrayCodec.INSTANCE, c, params.toArray());
}
@Override
public <T> T evalSha(byte[] scriptSha, ReturnType returnType, int numKeys, byte[]... keysAndArgs) {
RedisCommand<?> c = toCommand(returnType, "EVALSHA");
List<Object> params = new ArrayList<Object>();
params.add(scriptSha);
params.add(numKeys);
params.addAll(Arrays.asList(keysAndArgs));
byte[] key = getKey(numKeys, keysAndArgs);
return write(key, ByteArrayCodec.INSTANCE, c, params.toArray());
}
private static byte[] getKey(int numKeys, byte[][] keysAndArgs) {
if (numKeys > 0 && keysAndArgs.length > 0) {
return keysAndArgs[0];
}
return null;
}
@Override
public Long geoAdd(byte[] key, Point point, byte[] member) {
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, key, point.getX(), point.getY(), member);
}
@Override
public Long geoAdd(byte[] key, GeoLocation<byte[]> location) {
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, key, location.getPoint().getX(), location.getPoint().getY(), location.getName());
}
@Override
public Long geoAdd(byte[] key, Map<byte[], Point> memberCoordinateMap) {
List<Object> params = new ArrayList<Object>(memberCoordinateMap.size()*3 + 1);
params.add(key);
for (Entry<byte[], Point> entry : memberCoordinateMap.entrySet()) {
params.add(entry.getValue().getX());
params.add(entry.getValue().getY());
params.add(entry.getKey());
}
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, params.toArray());
}
@Override
public Long geoAdd(byte[] key, Iterable<GeoLocation<byte[]>> locations) {
List<Object> params = new ArrayList<Object>();
params.add(key);
for (GeoLocation<byte[]> location : locations) {
params.add(location.getPoint().getX());
params.add(location.getPoint().getY());
params.add(location.getName());
}
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, params.toArray());
}
@Override
public Distance geoDist(byte[] key, byte[] member1, byte[] member2) {
return geoDist(key, member1, member2, DistanceUnit.METERS);
}
@Override
public Distance geoDist(byte[] key, byte[] member1, byte[] member2, Metric metric) {
return read(key, DoubleCodec.INSTANCE, new RedisCommand<Distance>("GEODIST", new DistanceConvertor(metric)), key, member1, member2, getAbbreviation(metric));
}
private static final RedisCommand<List<Object>> GEOHASH = new RedisCommand<List<Object>>("GEOHASH", new ObjectListReplayDecoder<Object>());
@Override
public List<String> geoHash(byte[] key, byte[]... members) {
List<Object> params = new ArrayList<Object>(members.length + 1);
params.add(key);
for (byte[] member : members) {
params.add(member);
}
return read(key, StringCodec.INSTANCE, GEOHASH, params.toArray());
}
private final MultiDecoder<Map<Object, Object>> geoDecoder = new ListMultiDecoder2(new ObjectListReplayDecoder2(), new PointDecoder());
@Override
public List<Point> geoPos(byte[] key, byte[]... members) {
List<Object> params = new ArrayList<Object>(members.length + 1);
params.add(key);
params.addAll(Arrays.asList(members));
RedisCommand<Map<Object, Object>> command = new RedisCommand<Map<Object, Object>>("GEOPOS", geoDecoder);
return read(key, StringCodec.INSTANCE, command, params.toArray());
}
private String convert(double longitude) {
return BigDecimal.valueOf(longitude).toPlainString();
}
private final MultiDecoder<GeoResults<GeoLocation<byte[]>>> postitionDecoder = new ListMultiDecoder2(new GeoResultsDecoder(), new CodecDecoder(), new PointDecoder(), new ObjectListReplayDecoder());
@Override
public GeoResults<GeoLocation<byte[]>> geoRadius(byte[] key, Circle within) {
RedisCommand<GeoResults<GeoLocation<byte[]>>> command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUS_RO", new GeoResultsDecoder());
return read(key, ByteArrayCodec.INSTANCE, command, key,
convert(within.getCenter().getX()), convert(within.getCenter().getY()),
within.getRadius().getValue(), getAbbreviation(within.getRadius().getMetric()));
}
@Override
public GeoResults<GeoLocation<byte[]>> geoRadius(byte[] key, Circle within, GeoRadiusCommandArgs args) {
List<Object> params = new ArrayList<Object>();
params.add(key);
params.add(convert(within.getCenter().getX()));
params.add(convert(within.getCenter().getY()));
params.add(within.getRadius().getValue());
params.add(getAbbreviation(within.getRadius().getMetric()));
RedisCommand<GeoResults<GeoLocation<byte[]>>> command;
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUS_RO", postitionDecoder);
params.add("WITHCOORD");
} else {
MultiDecoder<GeoResults<GeoLocation<byte[]>>> distanceDecoder = new ListMultiDecoder2(new GeoResultsDecoder(within.getRadius().getMetric()), new GeoDistanceDecoder());
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUS_RO", distanceDecoder);
params.add("WITHDIST");
}
if (args.getLimit() != null) {
params.add("COUNT");
params.add(args.getLimit());
}
if (args.getSortDirection() != null) {
params.add(args.getSortDirection().name());
}
return read(key, ByteArrayCodec.INSTANCE, command, params.toArray());
}
private String getAbbreviation(Metric metric) {
if (ObjectUtils.nullSafeEquals(Metrics.NEUTRAL, metric)) {
return DistanceUnit.METERS.getAbbreviation();
}
return metric.getAbbreviation();
}
@Override
public GeoResults<GeoLocation<byte[]>> geoRadiusByMember(byte[] key, byte[] member, double radius) {
return geoRadiusByMember(key, member, new Distance(radius, DistanceUnit.METERS));
}
private static final RedisCommand<GeoResults<GeoLocation<byte[]>>> GEORADIUSBYMEMBER = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUSBYMEMBER_RO", new GeoResultsDecoder());
@Override
public GeoResults<GeoLocation<byte[]>> geoRadiusByMember(byte[] key, byte[] member, Distance radius) {
return read(key, ByteArrayCodec.INSTANCE, GEORADIUSBYMEMBER, key, member, radius.getValue(), getAbbreviation(radius.getMetric()));
}
@Override
public GeoResults<GeoLocation<byte[]>> geoRadiusByMember(byte[] key, byte[] member, Distance radius,
GeoRadiusCommandArgs args) {
List<Object> params = new ArrayList<Object>();
params.add(key);
params.add(member);
params.add(radius.getValue());
params.add(getAbbreviation(radius.getMetric()));
RedisCommand<GeoResults<GeoLocation<byte[]>>> command;
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUSBYMEMBER_RO", postitionDecoder);
params.add("WITHCOORD");
} else {
MultiDecoder<GeoResults<GeoLocation<byte[]>>> distanceDecoder = new ListMultiDecoder2(new GeoResultsDecoder(radius.getMetric()), new GeoDistanceDecoder());
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUSBYMEMBER_RO", distanceDecoder);
params.add("WITHDIST");
}
if (args.getLimit() != null) {
params.add("COUNT");
params.add(args.getLimit());
}
if (args.getSortDirection() != null) {
params.add(args.getSortDirection().name());
}
return read(key, ByteArrayCodec.INSTANCE, command, params.toArray());
}
@Override
public Long geoRemove(byte[] key, byte[]... members) {
return zRem(key, members);
}
@Override
public GeoResults<GeoLocation<byte[]>> geoSearch(byte[] key, GeoReference<byte[]> reference, GeoShape predicate, GeoSearchCommandArgs args) {
Assert.notNull(args, "Args must not be null!");
Assert.notNull(key, "Key must not be null!");
Assert.notNull(predicate, "Shape must not be null!");
Assert.notNull(reference, "Reference must not be null!");
List<Object> commandParams = new ArrayList<>();
commandParams.add(key);
if (reference instanceof GeoReference.GeoCoordinateReference) {
GeoReference.GeoCoordinateReference ref = (GeoReference.GeoCoordinateReference) reference;
commandParams.add("FROMLONLAT");
commandParams.add(convert(ref.getLongitude()));
commandParams.add(convert(ref.getLatitude()));
} else if (reference instanceof GeoReference.GeoMemberReference) {
GeoReference.GeoMemberReference ref = (GeoReference.GeoMemberReference) reference;
commandParams.add("FROMMEMBER");
commandParams.add(encode(ref.getMember()));
}
if (predicate instanceof RadiusShape) {
commandParams.add("BYRADIUS");
RadiusShape shape = (RadiusShape) predicate;
commandParams.add(shape.getRadius().getValue());
commandParams.add(convert(shape.getMetric()).getAbbreviation());
} else if (predicate instanceof BoxShape) {
BoxShape shape = (BoxShape) predicate;
commandParams.add("BYBOX");
commandParams.add(shape.getBoundingBox().getWidth().getValue());
commandParams.add(shape.getBoundingBox().getHeight().getValue());
commandParams.add(convert(shape.getMetric()).getAbbreviation());
}
if (args.hasSortDirection()) {
commandParams.add(args.getSortDirection());
}
if (args.getLimit() != null) {
commandParams.add("COUNT");
commandParams.add(args.getLimit());
if (args.hasAnyLimit()) {
commandParams.add("ANY");
}
}
RedisCommand<GeoResults<GeoLocation<byte[]>>> cmd;
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
cmd = new RedisCommand<>("GEOSEARCH", postitionDecoder);
commandParams.add("WITHCOORD");
} else {
MultiDecoder<GeoResults<GeoLocation<byte[]>>> distanceDecoder = new ListMultiDecoder2(new GeoResultsDecoder(predicate.getMetric()), new GeoDistanceDecoder());
cmd = new RedisCommand<>("GEOSEARCH", distanceDecoder);
commandParams.add("WITHDIST");
}
return read(key, ByteArrayCodec.INSTANCE, cmd, commandParams.toArray());
}
@Override
public Long geoSearchStore(byte[] destKey, byte[] key, GeoReference<byte[]> reference, GeoShape predicate, GeoSearchStoreCommandArgs args) {
Assert.notNull(args, "Args must not be null!");
Assert.notNull(key, "Key must not be null!");
Assert.notNull(destKey, "DestKey must not be null!");
Assert.notNull(predicate, "Shape must not be null!");
Assert.notNull(reference, "Reference must not be null!");
List<Object> commandParams = new ArrayList<>();
commandParams.add(destKey);
commandParams.add(key);
if (reference instanceof GeoReference.GeoCoordinateReference) {
GeoReference.GeoCoordinateReference ref = (GeoReference.GeoCoordinateReference) reference;
commandParams.add("FROMLONLAT");
commandParams.add(convert(ref.getLongitude()));
commandParams.add(convert(ref.getLatitude()));
} else if (reference instanceof GeoReference.GeoMemberReference) {
GeoReference.GeoMemberReference ref = (GeoReference.GeoMemberReference) reference;
commandParams.add("FROMMEMBER");
commandParams.add(encode(ref.getMember()));
}
if (predicate instanceof RadiusShape) {
RadiusShape shape = (RadiusShape) predicate;
commandParams.add("BYRADIUS");
commandParams.add(shape.getRadius().getValue());
commandParams.add(convert(shape.getMetric()).getAbbreviation());
} else if (predicate instanceof BoxShape) {
BoxShape shape = (BoxShape) predicate;
commandParams.add("BYBOX");
commandParams.add(shape.getBoundingBox().getWidth().getValue());
commandParams.add(shape.getBoundingBox().getHeight().getValue());
commandParams.add(convert(shape.getMetric()).getAbbreviation());
}
if (args.hasSortDirection()) {
commandParams.add(args.getSortDirection());
}
if (args.getLimit() != null) {
commandParams.add("COUNT");
commandParams.add(args.getLimit());
if (args.hasAnyLimit()) {
commandParams.add("ANY");
}
}
if (args.isStoreDistance()) {
commandParams.add("STOREDIST");
}
return write(key, LongCodec.INSTANCE, RedisCommands.GEOSEARCHSTORE_STORE, commandParams.toArray());
}
private Metric convert(Metric metric) {
if (metric == Metrics.NEUTRAL) {
return DistanceUnit.METERS;
}
return metric;
}
private ByteBuf encode(Object value) {
return executorService.encode(ByteArrayCodec.INSTANCE, value);
}
private static final RedisCommand<Long> PFADD = new RedisCommand<Long>("PFADD");
@Override
public Long pfAdd(byte[] key, byte[]... values) {
List<Object> params = new ArrayList<Object>(values.length + 1);
params.add(key);
for (byte[] member : values) {
params.add(member);
}
return write(key, StringCodec.INSTANCE, PFADD, params.toArray());
}
@Override
public Long pfCount(byte[]... keys) {
Assert.notEmpty(keys, "PFCOUNT requires at least one non 'null' key.");
Assert.noNullElements(keys, "Keys for PFOUNT must not contain 'null'.");
return write(keys[0], StringCodec.INSTANCE, RedisCommands.PFCOUNT, Arrays.asList(keys).toArray());
}
@Override
public void pfMerge(byte[] destinationKey, byte[]... sourceKeys) {
List<Object> args = new ArrayList<Object>(sourceKeys.length + 1);
args.add(destinationKey);
args.addAll(Arrays.asList(sourceKeys));
write(destinationKey, StringCodec.INSTANCE, RedisCommands.PFMERGE, args.toArray());
}
private static final RedisCommand<Long> HSTRLEN = new RedisCommand<Long>("HSTRLEN");
@Override
public Long hStrLen(byte[] key, byte[] field) {
return read(key, StringCodec.INSTANCE, HSTRLEN, key, field);
}
@Override
public RedisStreamCommands streamCommands() {
return new RedissonStreamCommands(this, executorService);
}
private static final RedisStrictCommand<List<Object>> BITFIELD = new RedisStrictCommand<>("BITFIELD", new ObjectListReplayDecoder<>());
@Override
public List<Long> bitField(byte[] key, BitFieldSubCommands subCommands) {
List<Object> params = new ArrayList<>();
params.add(key);
boolean writeOp = false;
for (BitFieldSubCommands.BitFieldSubCommand subCommand : subCommands) {
String size = "u";
if (subCommand.getType().isSigned()) {
size = "i";
}
size += subCommand.getType().getBits();
String offset = "#";
if (subCommand.getOffset().isZeroBased()) {
offset = "";
}
offset += subCommand.getOffset().getValue();
if (subCommand instanceof BitFieldSubCommands.BitFieldGet) {
params.add("GET");
params.add(size);
params.add(offset);
} else if (subCommand instanceof BitFieldSubCommands.BitFieldSet) {
writeOp = true;
params.add("SET");
params.add(size);
params.add(offset);
params.add(((BitFieldSubCommands.BitFieldSet) subCommand).getValue());
} else if (subCommand instanceof BitFieldSubCommands.BitFieldIncrBy) {
writeOp = true;
params.add("INCRBY");
params.add(size);
params.add(offset);
params.add(((BitFieldSubCommands.BitFieldIncrBy) subCommand).getValue());
BitFieldSubCommands.BitFieldIncrBy.Overflow overflow = ((BitFieldSubCommands.BitFieldIncrBy) subCommand).getOverflow();
if (overflow != null) {
params.add("OVERFLOW");
params.add(overflow);
}
}
}
if (writeOp) {
return write(key, StringCodec.INSTANCE, BITFIELD, params.toArray());
}
return read(key, StringCodec.INSTANCE, BITFIELD, params.toArray());
}
@Override
public Long exists(byte[]... keys) {
return read(keys[0], StringCodec.INSTANCE, RedisCommands.EXISTS_LONG, Arrays.asList(keys).toArray());
}
@Override
public Long touch(byte[]... keys) {
return read(keys[0], StringCodec.INSTANCE, RedisCommands.TOUCH_LONG, Arrays.asList(keys).toArray());
}
private static final RedisStrictCommand<ValueEncoding> OBJECT_ENCODING = new RedisStrictCommand<ValueEncoding>("OBJECT", "ENCODING", new Convertor<ValueEncoding>() {
@Override
public ValueEncoding convert(Object obj) {
return ValueEncoding.of((String) obj);
}
});
@Override
public ValueEncoding encodingOf(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return read(key, StringCodec.INSTANCE, OBJECT_ENCODING, key);
}
private static final RedisStrictCommand<Duration> OBJECT_IDLETIME = new RedisStrictCommand<>("OBJECT", "IDLETIME", new Convertor<Duration>() {
@Override
public Duration convert(Object obj) {
return Duration.ofSeconds((Long)obj);
}
});
@Override
public Duration idletime(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return read(key, StringCodec.INSTANCE, OBJECT_IDLETIME, key);
}
private static final RedisStrictCommand<Long> OBJECT_REFCOUNT = new RedisStrictCommand<Long>("OBJECT", "REFCOUNT");
@Override
public Long refcount(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return read(key, StringCodec.INSTANCE, OBJECT_REFCOUNT, key);
}
private static final RedisStrictCommand<Long> BITPOS = new RedisStrictCommand<>("BITPOS");
@Override
public Long bitPos(byte[] key, boolean bit, org.springframework.data.domain.Range<Long> range) {
Assert.notNull(key, "Key must not be null!");
Assert.notNull(range, "Range must not be null! Use Range.unbounded() instead.");
List<Object> params = new ArrayList<>();
params.add(key);
if (bit) {
params.add(1);
} else {
params.add(0);
}
if (range.getLowerBound().isBounded()) {
params.add(range.getLowerBound().getValue().get());
if (range.getUpperBound().isBounded()) {
params.add(range.getUpperBound().getValue().get());
}
}
return read(key, StringCodec.INSTANCE, BITPOS, params.toArray());
}
@Override
public void restore(byte[] key, long ttlInMillis, byte[] serializedValue, boolean replace) {
if (replace) {
write(key, StringCodec.INSTANCE, RedisCommands.RESTORE, key, ttlInMillis, serializedValue, "REPLACE");
return;
}
restore(key, ttlInMillis, serializedValue);
}
@Override
public byte[] zRandMember(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANDMEMBER_SINGLE, (Object) key);
}
private static final RedisCommand<List<Object>> ZRANDMEMBER_LIST = new RedisCommand<>("ZRANDMEMBER", new ObjectListReplayDecoder<>());
@Override
public List<byte[]> zRandMember(byte[] key, long count) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, ZRANDMEMBER_LIST, key, count);
}
private static final RedisCommand<Tuple> ZRANDMEMBER_SCORE = new RedisCommand<>("ZRANDMEMBER", new ScoredSortedSingleReplayDecoder());
@Override
public Tuple zRandMemberWithScore(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, ZRANDMEMBER_SCORE, key, "WITHSCORES");
}
private static final RedisCommand<List<Tuple>> ZRANDMEMBER_SCORE_LIST = new RedisCommand<>("ZRANDMEMBER", new ScoredSortedListReplayDecoder());
@Override
public List<Tuple> zRandMemberWithScore(byte[] key, long count) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, ZRANDMEMBER_SCORE_LIST, key, count, "WITHSCORES");
}
private static final RedisCommand<Tuple> ZPOPMIN = new RedisCommand<>("ZPOPMIN", new ScoredSortedSingleReplayDecoder());
@Override
public Tuple zPopMin(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, ZPOPMIN, (Object) key);
}
private static final RedisCommand<Set<Tuple>> ZPOPMIN_FOR_SET = new RedisCommand<>("ZPOPMIN", new ScoredSortedSetReplayDecoder());
@Override
public Set<Tuple> zPopMin(byte[] key, long count) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, ZPOPMIN_FOR_SET, key, count);
}
private static final RedisCommand<Tuple> BZPOPMIN = new RedisCommand<>("BZPOPMIN", new ScoredSortedSingleBlockingReplayDecoder());
@Override
public Tuple bZPopMin(byte[] key, long timeout, TimeUnit unit) {
Assert.notNull(key, "Key must not be null!");
long seconds = unit.toSeconds(timeout);
return write(key, ByteArrayCodec.INSTANCE, BZPOPMIN , key, seconds);
}
private static final RedisCommand<Tuple> ZPOPMAX = new RedisCommand<>("ZPOPMAX", new ScoredSortedSingleReplayDecoder());
@Override
public Tuple zPopMax(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, ZPOPMAX, (Object) key);
}
private static final RedisCommand<Set<Tuple>> ZPOPMAX_FOR_SET = new RedisCommand<>("ZPOPMAX", new ScoredSortedSetReplayDecoder());
@Override
public Set<Tuple> zPopMax(byte[] key, long count) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, ZPOPMAX_FOR_SET, key, count);
}
private static final RedisCommand<Tuple> BZPOPMAX = new RedisCommand<>("BZPOPMAX", new ScoredSortedSingleBlockingReplayDecoder());
@Override
public Tuple bZPopMax(byte[] key, long timeout, TimeUnit unit) {
Assert.notNull(key, "Key must not be null!");
long seconds = unit.toSeconds(timeout);
return write(key, ByteArrayCodec.INSTANCE, BZPOPMAX , key, seconds);
}
private static final RedisCommand<List<Object>> ZMSCORE = new RedisCommand<>("ZMSCORE", new ObjectListReplayDecoder<>());
@Override
public List<Double> zMScore(byte[] key, byte[]... values) {
Assert.notNull(key, "Key must not be null!");
List<Object> args = new ArrayList<>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, DoubleCodec.INSTANCE, ZMSCORE, args.toArray());
}
private static final RedisCommand<Set<Object>> ZDIFF = new RedisCommand<>("ZDIFF", new ObjectSetReplayDecoder());
@Override
public Set<byte[]> zDiff(byte[]... sets) {
List<Object> args = new ArrayList<>(sets.length + 1);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
return write(sets[0], ByteArrayCodec.INSTANCE, ZDIFF, args.toArray());
}
private static final RedisCommand<Set<Tuple>> ZDIFF_SCORE = new RedisCommand<>("ZDIFF", new ScoredSortedSetReplayDecoder());
@Override
public Set<Tuple> zDiffWithScores(byte[]... sets) {
List<Object> args = new ArrayList<>(sets.length + 1);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
args.add("WITHSCORES");
return write(sets[0], ByteArrayCodec.INSTANCE, ZDIFF_SCORE, args.toArray());
}
private static final RedisStrictCommand<Long> ZDIFFSTORE = new RedisStrictCommand<>("ZDIFFSTORE");
@Override
public Long zDiffStore(byte[] destKey, byte[]... sets) {
Assert.notNull(destKey, "Key must not be null!");
List<Object> args = new ArrayList<>(sets.length + 2);
args.add(destKey);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
return write(destKey, LongCodec.INSTANCE, ZDIFFSTORE, args.toArray());
}
private static final RedisCommand<Set<Object>> ZINTER = new RedisCommand<>("ZINTER", new ObjectSetReplayDecoder<>());
@Override
public Set<byte[]> zInter(byte[]... sets) {
List<Object> args = new ArrayList<>(sets.length + 1);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
return write(sets[0], ByteArrayCodec.INSTANCE, ZINTER, args.toArray());
}
private static final RedisCommand<Set<Tuple>> ZINTER_SCORE = new RedisCommand<>("ZINTER", new ScoredSortedSetReplayDecoder());
@Override
public Set<Tuple> zInterWithScores(Aggregate aggregate, Weights weights, byte[]... sets) {
List<Object> args = new ArrayList<>(sets.length * 2 + 6);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
if (weights != null) {
args.add("WEIGHTS");
for (double weight : weights.toArray()) {
args.add(BigDecimal.valueOf(weight).toPlainString());
}
}
if (aggregate != null) {
args.add("AGGREGATE");
args.add(aggregate.name());
}
args.add("WITHSCORES");
return write(sets[0], ByteArrayCodec.INSTANCE, ZINTER_SCORE, args.toArray());
}
@Override
public Set<Tuple> zInterWithScores(byte[]... sets) {
return zInterWithScores(null, (Weights) null, sets);
}
private static final RedisCommand<Set<Object>> ZUNION = new RedisCommand<>("ZUNION", new ObjectSetReplayDecoder<>());
@Override
public Set<byte[]> zUnion(byte[]... sets) {
List<Object> args = new ArrayList<>(sets.length + 1);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
return write(sets[0], ByteArrayCodec.INSTANCE, ZUNION, args.toArray());
}
private static final RedisCommand<Set<Tuple>> ZUNION_SCORE = new RedisCommand<>("ZUNION", new ScoredSortedSetReplayDecoder());
@Override
public Set<Tuple> zUnionWithScores(Aggregate aggregate, Weights weights, byte[]... sets) {
List<Object> args = new ArrayList<>(sets.length * 2 + 6);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
if (weights != null) {
args.add("WEIGHTS");
for (double weight : weights.toArray()) {
args.add(BigDecimal.valueOf(weight).toPlainString());
}
}
if (aggregate != null) {
args.add("AGGREGATE");
args.add(aggregate.name());
}
args.add("WITHSCORES");
return write(sets[0], ByteArrayCodec.INSTANCE, ZUNION_SCORE, args.toArray());
}
@Override
public Set<Tuple> zUnionWithScores(byte[]... sets) {
return zUnionWithScores(null, (Weights) null, sets);
}
private static final RedisCommand<Object> HRANDFIELD = new RedisCommand<>("HRANDFIELD");
@Override
public byte[] hRandField(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return read(key, ByteArrayCodec.INSTANCE, HRANDFIELD, key);
}
private static final RedisCommand<Entry<Object, Object>> HRANDFIELD_SINGLE_V2 = new RedisCommand("HRANDFIELD",
new ListMultiDecoder2(new ListFirstObjectDecoder(), new SingleMapEntryDecoder()));
private static final RedisCommand<Entry<Object, Object>> HRANDFIELD_SINGLE = new RedisCommand("HRANDFIELD", new SingleMapEntryDecoder());
@Override
public Entry<byte[], byte[]> hRandFieldWithValues(byte[] key) {
Assert.notNull(key, "Key must not be null!");
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, HRANDFIELD_SINGLE_V2, key, 1, "WITHVALUES");
}
return read(key, ByteArrayCodec.INSTANCE, HRANDFIELD_SINGLE, key, 1, "WITHVALUES");
}
private static final RedisCommand<List<Object>> HRANDFIELD_LIST = new RedisCommand<>("HRANDFIELD", new ObjectListReplayDecoder<>());
@Override
public List<byte[]> hRandField(byte[] key, long count) {
Assert.notNull(key, "Key must not be null!");
return read(key, ByteArrayCodec.INSTANCE, HRANDFIELD_LIST, key, count);
}
private static final RedisCommand<List<Entry<Object, Object>>> HRANDFIELD_VALUES_V2 = new RedisCommand("HRANDFIELD",
new ListMultiDecoder2(new ListMergeDecoder(), new ObjectMapEntryReplayDecoder()),
new EmptyListConvertor());
private static final RedisCommand<List<Entry<Object, Object>>> HRANDFIELD_VALUES = new RedisCommand("HRANDFIELD",
new ObjectMapEntryReplayDecoder(), new EmptyListConvertor());
@Override
public List<Entry<byte[], byte[]>> hRandFieldWithValues(byte[] key, long count) {
Assert.notNull(key, "Key must not be null!");
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, HRANDFIELD_VALUES_V2, key, count, "WITHVALUES");
}
return read(key, ByteArrayCodec.INSTANCE, HRANDFIELD_VALUES, key, count, "WITHVALUES");
}
@Override
public Boolean copy(byte[] sourceKey, byte[] targetKey, boolean replace) {
Assert.notNull(sourceKey, "Key must not be null!");
Assert.notNull(targetKey, "Target must not be null!");
List<Object> params = new ArrayList<>();
params.add(sourceKey);
params.add(targetKey);
if (replace) {
params.add("REPLACE");
}
return write(sourceKey, StringCodec.INSTANCE, RedisCommands.COPY, params.toArray());
}
@Override
public byte[] lMove(byte[] sourceKey, byte[] destinationKey, Direction from, Direction to) {
Assert.notNull(sourceKey, "Key must not be null!");
Assert.notNull(destinationKey, "Destination key must not be null!");
Assert.notNull(from, "From must not be null!");
Assert.notNull(from, "To must not be null!");
return write(sourceKey, ByteArrayCodec.INSTANCE, RedisCommands.LMOVE,
sourceKey, destinationKey, from, to);
}
@Override
public byte[] bLMove(byte[] sourceKey, byte[] destinationKey, Direction from, Direction to, double timeout) {
Assert.notNull(sourceKey, "Key must not be null!");
Assert.notNull(destinationKey, "Destination key must not be null!");
Assert.notNull(from, "From must not be null!");
Assert.notNull(to, "To must not be null!");
Assert.notNull(timeout, "Timeout must not be null!");
return write(sourceKey, ByteArrayCodec.INSTANCE, RedisCommands.BLMOVE,
sourceKey, destinationKey, from, to, destinationKey);
}
@Override
public List<byte[]> lPop(byte[] key, long count) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.LPOP_LIST, key, count);
}
@Override
public List<byte[]> rPop(byte[] key, long count) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.RPOP_LIST, key, count);
}
private static final RedisCommand<List<Boolean>> SMISMEMBER = new RedisCommand("SMISMEMBER", new ObjectListReplayDecoder<>(), new BooleanReplayConvertor());
@Override
public List<Boolean> sMIsMember(byte[] key, byte[]... value) {
Assert.notNull(key, "Key must not be null!");
List<Object> args = new ArrayList<>();
args.add(key);
args.addAll(Arrays.asList(value));
return read(key, StringCodec.INSTANCE, SMISMEMBER, args.toArray());
}
private static final RedisCommand<Object> GETEX = new RedisCommand<>("GETEX");
@Override
public byte[] getEx(byte[] key, Expiration expiration) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, GETEX, key,
"PX", expiration.getExpirationTimeInMilliseconds());
}
private static final RedisCommand<Object> GETDEL = new RedisCommand<>("GETDEL");
@Override
public byte[] getDel(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, GETDEL, key);
}
private static final RedisCommand<Set<byte[]>> ZREVRANGEBYLEX = new RedisCommand<>("ZREVRANGEBYLEX", new ObjectSetReplayDecoder<byte[]>());
@Override
public Set<byte[]> zRevRangeByLex(byte[] key, org.springframework.data.domain.Range range, org.springframework.data.redis.connection.Limit limit) {
String min = value(range.getLowerBound(), "-");
String max = value(range.getUpperBound(), "+");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(max);
args.add(min);
if (!limit.isUnlimited()) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGEBYLEX, args.toArray());
}
@Override
public Long time(TimeUnit timeUnit) {
return read(null, LongCodec.INSTANCE, new RedisStrictCommand<>("TIME", new TimeLongObjectDecoder(timeUnit)));
}
private static final RedisStrictCommand<Long> ZREMRANGEBYLEX = new RedisStrictCommand<>("ZREMRANGEBYLEX");
@Override
public Long zRemRangeByLex(byte[] key, org.springframework.data.domain.Range range) {
String min = value(range.getLowerBound(), "-");
String max = value(range.getUpperBound(), "+");
return write(key, StringCodec.INSTANCE, ZREMRANGEBYLEX, key, min, max);
}
private static final RedisStrictCommand<Long> ZLEXCOUNT = new RedisStrictCommand<>("ZLEXCOUNT");
@Override
public Long zLexCount(byte[] key, org.springframework.data.domain.Range range) {
String min = value(range.getLowerBound(), "-");
String max = value(range.getUpperBound(), "+");
return read(key, StringCodec.INSTANCE, ZLEXCOUNT, key, min, max);
}
@Override
public void rewriteConfig() {
write(null, StringCodec.INSTANCE, RedisCommands.CONFIG_REWRITE);
}
public static final RedisCommand<Long> ZRANGESTORE = new RedisCommand<>("ZRANGESTORE");
@Override
public Long zRangeStoreByLex(byte[] dstKey, byte[] srcKey, org.springframework.data.domain.Range<byte[]> range, org.springframework.data.redis.connection.Limit limit) {
String max = value(range.getUpperBound(), "+");
String min = value(range.getLowerBound(), "-");
List<Object> args = new LinkedList<>();
args.add(dstKey);
args.add(srcKey);
args.add(min);
args.add(max);
args.add("BYLEX");
if (!limit.isUnlimited()) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return write(srcKey, LongCodec.INSTANCE, ZRANGESTORE, args.toArray());
}
@Override
public Long zRangeStoreRevByLex(byte[] dstKey, byte[] srcKey, org.springframework.data.domain.Range<byte[]> range, org.springframework.data.redis.connection.Limit limit) {
String max = value(range.getUpperBound(), "+");
String min = value(range.getLowerBound(), "-");
List<Object> args = new LinkedList<>();
args.add(dstKey);
args.add(srcKey);
args.add(min);
args.add(max);
args.add("BYLEX");
args.add("REV");
if (!limit.isUnlimited()) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return write(srcKey, LongCodec.INSTANCE, ZRANGESTORE, args.toArray());
}
@Override
public Long zRangeStoreByScore(byte[] dstKey, byte[] srcKey, org.springframework.data.domain.Range<? extends Number> range, org.springframework.data.redis.connection.Limit limit) {
String min = value(range.getLowerBound(), "-inf");
String max = value(range.getUpperBound(), "+inf");
List<Object> args = new LinkedList<>();
args.add(dstKey);
args.add(srcKey);
args.add(min);
args.add(max);
args.add("BYSCORE");
if (!limit.isUnlimited()) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return write(srcKey, LongCodec.INSTANCE, ZRANGESTORE, args.toArray());
}
@Override
public Long zRangeStoreRevByScore(byte[] dstKey, byte[] srcKey, org.springframework.data.domain.Range<? extends Number> range, org.springframework.data.redis.connection.Limit limit) {
String min = value(range.getLowerBound(), "-inf");
String max = value(range.getUpperBound(), "+inf");
List<Object> args = new LinkedList<>();
args.add(dstKey);
args.add(srcKey);
args.add(min);
args.add(max);
args.add("BYSCORE");
args.add("REV");
if (!limit.isUnlimited()) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return write(srcKey, LongCodec.INSTANCE, ZRANGESTORE, args.toArray());
}
@Override
public void flushDb(FlushOption option) {
write(null, StringCodec.INSTANCE, RedisCommands.FLUSHDB, option.toString());
}
@Override
public void flushAll(FlushOption option) {
write(null, StringCodec.INSTANCE, RedisCommands.FLUSHALL, option.toString());
}
private static final RedisStrictCommand<Void> REPLICAOF = new RedisStrictCommand<>("REPLICAOF");
@Override
public void replicaOf(String host, int port) {
write(null, StringCodec.INSTANCE, REPLICAOF, host, port);
}
@Override
public void replicaOfNoOne() {
write(null, StringCodec.INSTANCE, REPLICAOF, "NO", "ONE");
}
@Override
public org.springframework.data.redis.connection.RedisCommands commands() {
return this;
}
@Override
public RedisGeoCommands geoCommands() {
return this;
}
@Override
public RedisHashCommands hashCommands() {
return this;
}
@Override
public RedisHyperLogLogCommands hyperLogLogCommands() {
return this;
}
@Override
public RedisKeyCommands keyCommands() {
return this;
}
@Override
public RedisListCommands listCommands() {
return this;
}
@Override
public RedisSetCommands setCommands() {
return this;
}
@Override
public RedisScriptingCommands scriptingCommands() {
return this;
}
@Override
public RedisServerCommands serverCommands() {
return this;
}
@Override
public RedisStringCommands stringCommands() {
return this;
}
@Override
public RedisZSetCommands zSetCommands() {
return this;
}
}
| RedissonConnection |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/test/java/org/springframework/boot/loader/launch/FakeJarLauncher.java | {
"start": 875,
"end": 1048
} | class ____ {
public static Consumer<String[]> action;
private FakeJarLauncher() {
}
public static void main(String... args) {
action.accept(args);
}
}
| FakeJarLauncher |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/http/ResponseInjectingHttpHandler.java | {
"start": 1948,
"end": 2262
} | interface ____ {
void writeResponse(HttpExchange exchange, HttpHandler delegate) throws IOException;
default boolean matchesRequest(HttpExchange exchange) {
return true;
}
}
@SuppressForbidden(reason = "We use HttpServer for the fixtures")
public static | RequestHandler |
java | elastic__elasticsearch | x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilderTests.java | {
"start": 744,
"end": 2890
} | class ____ extends AbstractXContentSerializingTestCase<RRFRankBuilder> {
@Override
protected RRFRankBuilder createTestInstance() {
return new RRFRankBuilder(randomIntBetween(0, 100000), randomIntBetween(1, Integer.MAX_VALUE));
}
@Override
protected RRFRankBuilder mutateInstance(RRFRankBuilder instance) throws IOException {
if (randomBoolean()) {
return new RRFRankBuilder(instance.rankWindowSize(), instance.rankConstant() == 1 ? 2 : instance.rankConstant() - 1);
} else {
return new RRFRankBuilder(instance.rankWindowSize() == 0 ? 1 : instance.rankWindowSize() - 1, instance.rankConstant());
}
}
@Override
protected Writeable.Reader<RRFRankBuilder> instanceReader() {
return RRFRankBuilder::new;
}
@Override
protected RRFRankBuilder doParseInstance(XContentParser parser) throws IOException {
parser.nextToken();
Assert.assertEquals(parser.currentToken(), XContentParser.Token.START_OBJECT);
parser.nextToken();
assertEquals(parser.currentToken(), XContentParser.Token.FIELD_NAME);
assertEquals(parser.currentName(), RRFRankPlugin.NAME);
RRFRankBuilder builder = RRFRankBuilder.PARSER.parse(parser, null);
parser.nextToken();
Assert.assertEquals(parser.currentToken(), XContentParser.Token.END_OBJECT);
parser.nextToken();
Assert.assertNull(parser.currentToken());
return builder;
}
public void testCreateRankContexts() {
RRFRankBuilder rrfRankBuilder = createTestInstance();
List<Query> queries = List.of(new TermQuery(new Term("field0", "test0")), new TermQuery(new Term("field1", "test1")));
QueryPhaseRankShardContext queryPhaseRankShardContext = rrfRankBuilder.buildQueryPhaseShardContext(queries, randomInt());
assertEquals(queries, queryPhaseRankShardContext.queries());
assertEquals(rrfRankBuilder.rankWindowSize(), queryPhaseRankShardContext.rankWindowSize());
assertNotNull(rrfRankBuilder.buildQueryPhaseCoordinatorContext(randomInt(), randomInt()));
}
}
| RRFRankBuilderTests |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/mapping/JpaPersistentPropertyImplUnitTests.java | {
"start": 9129,
"end": 9219
} | interface ____ extends AggregateRoot<JMoleculesAggregate, Identifier> {}
}
| JMoleculesAggregate |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/util/ArgumentUtils.java | {
"start": 5957,
"end": 6086
} | interface ____ {
/**
* @return Whether the condition is true
*/
boolean condition();
}
}
| Check |
java | apache__camel | components/camel-thymeleaf/src/generated/java/org/apache/camel/component/thymeleaf/ThymeleafComponentConfigurer.java | {
"start": 736,
"end": 3075
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
ThymeleafComponent target = (ThymeleafComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowcontextmapall":
case "allowContextMapAll": target.setAllowContextMapAll(property(camelContext, boolean.class, value)); return true;
case "allowtemplatefromheader":
case "allowTemplateFromHeader": target.setAllowTemplateFromHeader(property(camelContext, boolean.class, value)); return true;
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowcontextmapall":
case "allowContextMapAll": return boolean.class;
case "allowtemplatefromheader":
case "allowTemplateFromHeader": return boolean.class;
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
ThymeleafComponent target = (ThymeleafComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowcontextmapall":
case "allowContextMapAll": return target.isAllowContextMapAll();
case "allowtemplatefromheader":
case "allowTemplateFromHeader": return target.isAllowTemplateFromHeader();
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| ThymeleafComponentConfigurer |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/security/AnnotationBasedAuthMechanismSelectionTest.java | {
"start": 15312,
"end": 15585
} | interface ____ mechanism is going to be used
return super.defaultImplementedClassLevelInterfaceMethod();
}
@Override
public String overriddenParentClassEndpoint() {
// here we do not repeat Path annotation, therefore parent | auth |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/observers/metadata/EventMetadataTest.java | {
"start": 3009,
"end": 3409
} | class ____ {
static final AtomicReference<EventMetadata> METADATA = new AtomicReference<EventMetadata>();
void observe(@Observes BigDecimal value, EventMetadata metadata) {
METADATA.set(metadata);
}
void observe(@Observes BigInteger value, EventMetadata metadata) {
METADATA.set(metadata);
}
}
@Dependent
static | BigObserver |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/filter/FilterOrder.java | {
"start": 910,
"end": 1901
} | interface ____ {
/**
* Compute the order value for the given bean.
*
* @param bean The bean to compute the order value for, potentially implementing
* {@link Ordered}
* @return The order value
*/
int getOrder(Object bean);
/**
* Fixed order value.
*
* @param value The order value
*/
record Fixed(int value) implements FilterOrder {
@Override
public int getOrder(Object bean) {
return value;
}
}
/**
* Dynamic order value (from {@link Ordered#getOrder()}).
*
* @param fallbackValue The order value to use if the bean does not implement {@link Ordered}
*/
record Dynamic(int fallbackValue) implements FilterOrder {
@Override
public int getOrder(Object bean) {
if (bean instanceof Ordered o) {
return o.getOrder();
} else {
return fallbackValue;
}
}
}
}
| FilterOrder |
java | apache__camel | components/camel-mybatis/src/test/java/org/apache/camel/component/mybatis/MyBatisShutdownAllTasksTest.java | {
"start": 1183,
"end": 4052
} | class ____ extends MyBatisTestSupport {
@Override
public void doPostSetup() {
// super will insert 2 accounts already
Account account = new Account();
account.setId(881);
account.setFirstName("A");
account.setLastName("A");
account.setEmailAddress("a@gmail.com");
template.sendBody("mybatis:insertAccount?statementType=Insert", account);
account = new Account();
account.setId(882);
account.setFirstName("B");
account.setLastName("B");
account.setEmailAddress("b@gmail.com");
template.sendBody("mybatis:insertAccount?statementType=Insert", account);
account = new Account();
account.setId(883);
account.setFirstName("C");
account.setLastName("C");
account.setEmailAddress("c@gmail.com");
template.sendBody("mybatis:insertAccount?statementType=Insert", account);
account = new Account();
account.setId(884);
account.setFirstName("D");
account.setLastName("D");
account.setEmailAddress("d@gmail.com");
template.sendBody("mybatis:insertAccount?statementType=Insert", account);
account = new Account();
account.setId(885);
account.setFirstName("E");
account.setLastName("E");
account.setEmailAddress("e@gmail.com");
template.sendBody("mybatis:insertAccount?statementType=Insert", account);
account = new Account();
account.setId(886);
account.setFirstName("F");
account.setLastName("F");
account.setEmailAddress("f@gmail.com");
template.sendBody("mybatis:insertAccount?statementType=Insert", account);
}
@Test
public void testShutdownAllTasks() throws Exception {
context.getRouteController().startRoute("route1");
MockEndpoint bar = getMockEndpoint("mock:bar");
bar.expectedMinimumMessageCount(1);
bar.setResultWaitTime(3000);
MockEndpoint.assertIsSatisfied(context);
// shutdown during processing
context.stop();
// should route all 8
await()
.atMost(1, TimeUnit.SECONDS)
.untilAsserted(() -> assertEquals(8, bar.getReceivedCounter(), "Should complete all messages"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("mybatis:selectAllAccounts").noAutoStartup().routeId("route1")
// let it complete all tasks
.shutdownRunningTask(ShutdownRunningTask.CompleteAllTasks)
.delay(1000).to("seda:foo");
from("seda:foo").routeId("route2").to("mock:bar");
}
};
}
}
| MyBatisShutdownAllTasksTest |
java | apache__flink | flink-metrics/flink-metrics-core/src/test/java/org/apache/flink/metrics/AbstractHistogramTest.java | {
"start": 976,
"end": 1075
} | class ____ testing {@link Histogram} and {@link HistogramStatistics}
* implementations.
*/
public | for |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/resume/OffsetKeys.java | {
"start": 952,
"end": 1100
} | class ____ {
/**
* For creating anonymous offset keys
*
* @param <T> the type of the offset key
*/
private static | OffsetKeys |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/collection/spi/PersistentList.java | {
"start": 12840,
"end": 13058
} | class ____ extends AbstractListValueDelayedOperation {
public Remove(int index, E orphan) {
super( index, null, orphan );
}
@Override
public void operate() {
list.remove( getIndex() );
}
}
final | Remove |
java | apache__camel | components/camel-kubernetes/src/test/java/org/apache/camel/component/kubernetes/consumer/integration/events/KubernetesEventsConsumerNamespaceIT.java | {
"start": 1607,
"end": 2326
} | class ____ extends KubernetesConsumerTestSupport {
@Test
public void namespaceTest() throws Exception {
result.expectedBodiesReceived("Event e2 " + ns2 + " ADDED");
createEvent(ns1, "e1", null);
createEvent(ns2, "e2", null);
result.assertIsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
fromF("kubernetes-events://%s?oauthToken=%s&namespace=%s", host, authToken, ns2)
.process(new KubernetesProcessor())
.to(result);
}
};
}
}
| KubernetesEventsConsumerNamespaceIT |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/jsonobject/JsonObjectValueResolverTest.java | {
"start": 387,
"end": 1345
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest quarkusApp = new QuarkusUnitTest()
.withApplicationRoot(
app -> app.addClass(foo.class)
.addAsResource(new StringAsset(
"{tool.name} {tool.fieldNames} {tool.fields} {tool.size} {tool.empty} {tool.isEmpty} {tool.get('name')} {tool.containsKey('name')}"),
"templates/JsonObjectValueResolverTest/foo.txt"));
record foo(JsonObject tool) implements TemplateInstance {
}
@Test
void testJsonObjectValueResolver() {
HashMap<String, Object> toolMap = new HashMap<>();
toolMap.put("name", "Roq");
JsonObject jsonObject = new JsonObject(toolMap);
String result = new foo(jsonObject).render();
Assertions.assertThat(result).isEqualTo("Roq [name] [name] 1 false false Roq true");
}
}
| JsonObjectValueResolverTest |
java | apache__hadoop | hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java | {
"start": 8979,
"end": 10054
} | class ____ {
@JsonProperty("description")
String description;
@JsonProperty("num_nodes")
int num_nodes;
@JsonProperty("nodes_per_rack")
int nodes_per_rack;
@JsonProperty("num_jobs")
int num_jobs;
// in sec (selects a portion of time_distribution
@JsonProperty("rand_seed")
long rand_seed;
@JsonProperty("workloads")
List<Workload> workloads;
List<Double> workload_weights;
JDKRandomGenerator rand;
public void init(JDKRandomGenerator random){
this.rand = random;
// Pass rand forward
for(Workload w : workloads){
w.init(rand);
}
// Initialize workload weights
workload_weights = new ArrayList<>();
for(Workload w : workloads){
workload_weights.add(w.workload_weight);
}
}
Workload generateWorkload(){
return workloads.get(SynthUtils.getWeighted(workload_weights, rand));
}
}
/**
* Class used to parse a workload from file.
*/
@SuppressWarnings({ "membername", "checkstyle:visibilitymodifier" })
public static | Trace |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/multipart/MultiByteWithRemoteErrorTest.java | {
"start": 2719,
"end": 2879
} | interface ____ {
@POST
@Consumes(MediaType.MULTIPART_FORM_DATA)
String post(@MultipartForm Form clientForm);
}
public static | Client |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/ExecutionDeploymentReconciler.java | {
"start": 1153,
"end": 1262
} | interface ____ {
/** Factory for {@link ExecutionDeploymentReconciler}. */
| ExecutionDeploymentReconciler |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningUsageTransportActionTests.java | {
"start": 1042,
"end": 2550
} | class ____ extends ESTestCase {
public void testAddTrainedModelStatsHandlesMultipleDeployments() {
Map<String, Object> usage = new HashMap<>();
var deploymentConfig = TrainedModelConfigTests.createTestInstance("id1").build();
var stats = new GetTrainedModelsStatsAction.Response.TrainedModelStats(
"id1",
TrainedModelSizeStatsTests.createRandom(),
GetTrainedModelsStatsActionResponseTests.randomIngestStats(),
randomIntBetween(0, 10),
null,
null
);
StatsAccumulator actualMemoryUsage = new StatsAccumulator();
actualMemoryUsage.add(stats.getModelSizeStats().getModelSizeBytes());
var modelsResponse = new GetTrainedModelsAction.Response(
new QueryPage<>(List.of(deploymentConfig), 1, GetTrainedModelsAction.Response.RESULTS_FIELD)
);
var statsResponse = new GetTrainedModelsStatsAction.Response(
new QueryPage<>(List.of(stats), 1, GetTrainedModelsStatsAction.Response.RESULTS_FIELD)
);
MachineLearningUsageTransportAction.addTrainedModelStats(modelsResponse, statsResponse, usage);
@SuppressWarnings("unchecked")
var expectedModelMemoryUsage = ((Map<String, Object>) usage.get("trained_models")).get(
TrainedModelConfig.MODEL_SIZE_BYTES.getPreferredName()
);
assertThat(expectedModelMemoryUsage, is(actualMemoryUsage.asMap()));
}
}
| MachineLearningUsageTransportActionTests |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java | {
"start": 5487,
"end": 6666
} | class ____ implements Iterator<Slot> {
int slotIdx = -1;
@Override
public boolean hasNext() {
synchronized (ShortCircuitShm.this) {
return allocatedSlots.nextSetBit(slotIdx + 1) != -1;
}
}
@Override
public Slot next() {
synchronized (ShortCircuitShm.this) {
int nextSlotIdx = allocatedSlots.nextSetBit(slotIdx + 1);
if (nextSlotIdx == -1) {
throw new NoSuchElementException();
}
slotIdx = nextSlotIdx;
return slots[nextSlotIdx];
}
}
@Override
public void remove() {
throw new UnsupportedOperationException("SlotIterator " +
"doesn't support removal");
}
}
/**
* A slot containing information about a replica.
*
* The format is:
* word 0
* bit 0:32 Slot flags (see below).
* bit 33:63 Anchor count.
* word 1:7
* Reserved for future use, such as statistics.
* Padding is also useful for avoiding false sharing.
*
* Little-endian versus big-endian is not relevant here since both the client
* and the server reside on the same computer and use the same orientation.
*/
public | SlotIterator |
java | spring-projects__spring-security | oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/registration/ClientRegistration.java | {
"start": 24669,
"end": 25457
} | class ____ {
private boolean requireProofKey = true;
private Builder() {
}
/**
* Set to {@code true} if the client is required to provide a proof key
* challenge and verifier when performing the Authorization Code Grant flow.
* @param requireProofKey {@code true} if the client is required to provide a
* proof key challenge and verifier, {@code false} otherwise
* @return the {@link Builder} for further configuration
*/
public Builder requireProofKey(boolean requireProofKey) {
this.requireProofKey = requireProofKey;
return this;
}
public ClientSettings build() {
ClientSettings clientSettings = new ClientSettings();
clientSettings.requireProofKey = this.requireProofKey;
return clientSettings;
}
}
}
}
| Builder |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 1169832,
"end": 1171373
} | class ____ extends YamlDeserializerBase<TokenizerImplementationDefinition> {
public TokenizerImplementationDefinitionDeserializer() {
super(TokenizerImplementationDefinition.class);
}
@Override
protected TokenizerImplementationDefinition newInstance() {
return new TokenizerImplementationDefinition();
}
@Override
protected boolean setProperty(TokenizerImplementationDefinition target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "id": {
String val = asText(node);
target.setId(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "topicLoadBalancer",
types = org.apache.camel.model.loadbalancer.TopicLoadBalancerDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "Topic Load Balancer",
description = "Topic which sends to all destinations.",
deprecated = false,
properties = @YamlProperty(name = "id", type = "string", description = "The id of this node", displayName = "Id")
)
public static | TokenizerImplementationDefinitionDeserializer |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RLiveObjectService.java | {
"start": 10873,
"end": 11151
} | class ____ registered in one RLiveObjectService instance it is also
* accessible in another RLiveObjectService instance so long as they are
* created by the same RedissonClient instance.
*
* @param cls - type of instance
* @return <code>true</code> if | is |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/CurrencyTest_2.java | {
"start": 532,
"end": 952
} | class ____ {
private Currency value;
private Currency value1;
public Currency getValue() {
return value;
}
public void setValue(Currency value) {
this.value = value;
}
public Currency getValue1() {
return value1;
}
public void setValue1(Currency value1) {
this.value1 = value1;
}
}
}
| VO |
java | apache__flink | flink-table/flink-sql-parser/src/main/java/org/apache/flink/sql/parser/dql/SqlShowColumns.java | {
"start": 1269,
"end": 2235
} | class ____ extends SqlShowCall {
public static final SqlSpecialOperator OPERATOR =
new SqlSpecialOperator("SHOW COLUMNS", SqlKind.OTHER);
public SqlShowColumns(
SqlParserPos pos,
String preposition,
SqlIdentifier tableName,
boolean notLike,
SqlCharStringLiteral likeLiteral) {
// only LIKE currently supported for SHOW COLUMNS
super(
pos,
preposition,
tableName,
likeLiteral == null ? null : "LIKE",
likeLiteral,
notLike);
requireNonNull(preposition, "Preposition of 'SHOW COLUMNS' must be 'FROM' or 'IN'.");
requireNonNull(tableName, "tableName should not be null.");
}
@Override
public SqlOperator getOperator() {
return OPERATOR;
}
@Override
String getOperationName() {
return "SHOW COLUMNS";
}
}
| SqlShowColumns |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injectionpoint/InjectionPointMetadataWithDynamicLookupTest.java | {
"start": 923,
"end": 7200
} | class ____ {
@RegisterExtension
private ArcTestContainer container = new ArcTestContainer(BeanWithInjectionPointMetadata.class,
MyDependentBean.class, MySingletonBean.class);
@Test
public void arcContainerInstance() {
// the "current" injection point of `Arc.container().instance(...)` doesn't seem to be well defined
// but the injection point used does support the required type and qualifiers
BeanWithInjectionPointMetadata bean = Arc.container().instance(BeanWithInjectionPointMetadata.class).get();
bean.assertPresent(ip -> {
assertEquals(BeanWithInjectionPointMetadata.class, ip.getType());
assertEquals(Set.of(), ip.getQualifiers());
assertNull(ip.getMember());
assertNull(ip.getBean());
});
}
@Test
public void arcContainerSelect() {
BeanWithInjectionPointMetadata bean = Arc.container().select(BeanWithInjectionPointMetadata.class).get();
bean.assertPresent(ip -> {
// the `Instance<BeanWithInjectionPointMetadata>` through which the `bean` was looked up
assertEquals(BeanWithInjectionPointMetadata.class, ip.getType());
assertEquals(Set.of(), ip.getQualifiers());
assertNull(ip.getMember());
assertNull(ip.getBean());
});
}
@Test
public void cdiCurrentSelect() {
BeanWithInjectionPointMetadata bean = CDI.current().select(BeanWithInjectionPointMetadata.class).get();
bean.assertPresent(ip -> {
// the `Instance<BeanWithInjectionPointMetadata>` through which the `bean` was looked up
assertEquals(BeanWithInjectionPointMetadata.class, ip.getType());
assertEquals(Set.of(), ip.getQualifiers());
assertNull(ip.getMember());
assertNull(ip.getBean());
});
}
@Test
public void beanManagerCreateInstanceAndSelect() {
BeanManager bm = Arc.container().beanManager();
BeanWithInjectionPointMetadata bean = bm.createInstance().select(BeanWithInjectionPointMetadata.class).get();
bean.assertPresent(ip -> {
// the `Instance<BeanWithInjectionPointMetadata>` through which the `bean` was looked up
assertEquals(BeanWithInjectionPointMetadata.class, ip.getType());
assertEquals(Set.of(), ip.getQualifiers());
assertNull(ip.getMember());
assertNull(ip.getBean());
});
}
@Test
public void beanManagerGetReference() {
BeanManager bm = Arc.container().beanManager();
Bean<BeanWithInjectionPointMetadata> bean = (Bean<BeanWithInjectionPointMetadata>) bm.resolve(
bm.getBeans(BeanWithInjectionPointMetadata.class));
CreationalContext<BeanWithInjectionPointMetadata> cc = bm.createCreationalContext(bean);
BeanWithInjectionPointMetadata instance = (BeanWithInjectionPointMetadata) bm.getReference(
bean, BeanWithInjectionPointMetadata.class, cc);
instance.assertAbsent();
}
@Test
public void beanManagerGetInjectableReference() {
InjectionPoint lookup = new DummyInjectionPoint(BeanWithInjectionPointMetadata.class, Default.Literal.INSTANCE);
BeanManager bm = Arc.container().beanManager();
CreationalContext<BeanWithInjectionPointMetadata> cc = bm.createCreationalContext(null);
BeanWithInjectionPointMetadata instance = (BeanWithInjectionPointMetadata) bm.getInjectableReference(lookup, cc);
instance.assertPresent(ip -> {
assertSame(lookup, ip);
});
}
@Test
public void injectionIntoDependentBean() {
MyDependentBean bean = Arc.container().select(MyDependentBean.class).get();
// the `Instance<MyDependentBean>` through which the `bean` was looked up
assertEquals(MyDependentBean.class, bean.ip.getType());
assertEquals(Set.of(), bean.ip.getQualifiers());
assertNull(bean.ip.getMember());
assertNull(bean.ip.getBean());
assertNotNull(bean.dependency);
bean.dependency.assertPresent(ip -> {
assertEquals(BeanWithInjectionPointMetadata.class, ip.getType());
assertEquals(Set.of(Default.Literal.INSTANCE), ip.getQualifiers());
assertEquals(MyDependentBean.class, ip.getMember().getDeclaringClass());
assertEquals("dependency", ip.getMember().getName());
assertEquals(MyDependentBean.class, ip.getBean().getBeanClass());
});
assertNotNull(bean.dependencyInstance);
bean.dependencyInstance.get().assertPresent(ip -> {
assertEquals(BeanWithInjectionPointMetadata.class, ip.getType());
assertEquals(Set.of(Default.Literal.INSTANCE), ip.getQualifiers());
assertEquals(MyDependentBean.class, ip.getMember().getDeclaringClass());
assertEquals("dependencyInstance", ip.getMember().getName());
assertEquals(MyDependentBean.class, ip.getBean().getBeanClass());
});
}
@Test
public void injectionIntoSingletonBean() {
MySingletonBean bean = Arc.container().select(MySingletonBean.class).get();
assertNotNull(bean.dependency);
bean.dependency.assertPresent(ip -> {
assertEquals(BeanWithInjectionPointMetadata.class, ip.getType());
assertEquals(Set.of(Default.Literal.INSTANCE), ip.getQualifiers());
assertEquals(MySingletonBean.class, ip.getMember().getDeclaringClass());
assertEquals("dependency", ip.getMember().getName());
assertEquals(MySingletonBean.class, ip.getBean().getBeanClass());
});
assertNotNull(bean.dependencyInstance);
bean.dependencyInstance.get().assertPresent(ip -> {
assertEquals(BeanWithInjectionPointMetadata.class, ip.getType());
assertEquals(Set.of(Default.Literal.INSTANCE), ip.getQualifiers());
assertEquals(MySingletonBean.class, ip.getMember().getDeclaringClass());
assertEquals("dependencyInstance", ip.getMember().getName());
assertEquals(MySingletonBean.class, ip.getBean().getBeanClass());
});
}
@Dependent
static | InjectionPointMetadataWithDynamicLookupTest |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/model/MethodReferencePresenceCheck.java | {
"start": 454,
"end": 1780
} | class ____ extends ModelElement implements PresenceCheck {
protected final MethodReference methodReference;
protected final boolean negate;
public MethodReferencePresenceCheck(MethodReference methodReference) {
this( methodReference, false );
}
public MethodReferencePresenceCheck(MethodReference methodReference, boolean negate) {
this.methodReference = methodReference;
this.negate = negate;
}
@Override
public Set<Type> getImportTypes() {
return methodReference.getImportTypes();
}
public MethodReference getMethodReference() {
return methodReference;
}
public boolean isNegate() {
return negate;
}
@Override
public PresenceCheck negate() {
return new MethodReferencePresenceCheck( methodReference, true );
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
MethodReferencePresenceCheck that = (MethodReferencePresenceCheck) o;
return Objects.equals( methodReference, that.methodReference );
}
@Override
public int hashCode() {
return Objects.hash( methodReference );
}
}
| MethodReferencePresenceCheck |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/caching/CachingAndBatchTest.java | {
"start": 3376,
"end": 3633
} | class ____ {
@Id()
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "id_sequence")
private Long id;
public MyEntity2() {
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
}
}
| MyEntity2 |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FromPartialGroupingAggregatorFunction.java | {
"start": 829,
"end": 4838
} | class ____ implements GroupingAggregatorFunction {
private static final List<IntermediateStateDesc> INTERMEDIATE_STATE_DESC = List.of(
new IntermediateStateDesc("partial", ElementType.COMPOSITE, "partial_agg")
);
public static List<IntermediateStateDesc> intermediateStateDesc() {
return INTERMEDIATE_STATE_DESC;
}
private final GroupingAggregatorFunction delegate;
private final int inputChannel;
public FromPartialGroupingAggregatorFunction(GroupingAggregatorFunction delegate, int inputChannel) {
this.delegate = delegate;
this.inputChannel = inputChannel;
}
@Override
public AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, Page page) {
return new AddInput() {
@Override
public void add(int positionOffset, IntBlock groupIds) {
assert false : "Intermediate group id must not have nulls";
throw new IllegalStateException("Intermediate group id must not have nulls");
}
@Override
public void add(int positionOffset, IntArrayBlock groupIds) {
assert false : "Intermediate group id must not have nulls";
throw new IllegalStateException("Intermediate group id must not have nulls");
}
@Override
public void add(int positionOffset, IntBigArrayBlock groupIds) {
assert false : "Intermediate group id must not have nulls";
throw new IllegalStateException("Intermediate group id must not have nulls");
}
@Override
public void add(int positionOffset, IntVector groupIds) {
addIntermediateInput(positionOffset, groupIds, page);
}
@Override
public void close() {}
};
}
@Override
public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) {
delegate.selectedMayContainUnseenGroups(seenGroupIds);
}
@Override
public void addIntermediateInput(int positionOffset, IntArrayBlock groupIdVector, Page page) {
final CompositeBlock inputBlock = page.getBlock(inputChannel);
delegate.addIntermediateInput(positionOffset, groupIdVector, inputBlock.asPage());
}
@Override
public void addIntermediateInput(int positionOffset, IntBigArrayBlock groupIdVector, Page page) {
final CompositeBlock inputBlock = page.getBlock(inputChannel);
delegate.addIntermediateInput(positionOffset, groupIdVector, inputBlock.asPage());
}
@Override
public void addIntermediateInput(int positionOffset, IntVector groupIdVector, Page page) {
final CompositeBlock inputBlock = page.getBlock(inputChannel);
delegate.addIntermediateInput(positionOffset, groupIdVector, inputBlock.asPage());
}
@Override
public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) {
Block[] partialBlocks = new Block[delegate.intermediateBlockCount()];
boolean success = false;
try {
delegate.evaluateIntermediate(partialBlocks, 0, selected);
blocks[offset] = new CompositeBlock(partialBlocks);
success = true;
} finally {
if (success == false) {
Releasables.close(partialBlocks);
}
}
}
@Override
public void evaluateFinal(Block[] blocks, int offset, IntVector selected, GroupingAggregatorEvaluationContext evaluationContext) {
delegate.evaluateFinal(blocks, offset, selected, evaluationContext);
}
@Override
public int intermediateBlockCount() {
return INTERMEDIATE_STATE_DESC.size();
}
@Override
public void close() {
Releasables.close(delegate);
}
@Override
public String toString() {
return getClass().getSimpleName() + "[" + "channel=" + inputChannel + ",delegate=" + delegate + "]";
}
}
| FromPartialGroupingAggregatorFunction |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java | {
"start": 5787,
"end": 55035
} | class ____ implements ClusterStateListener {
private static final Logger logger = LogManager.getLogger(AutodetectProcessManager.class);
private final Client client;
private final ThreadPool threadPool;
private final JobManager jobManager;
private final JobResultsProvider jobResultsProvider;
private final AutodetectProcessFactory autodetectProcessFactory;
private final NormalizerFactory normalizerFactory;
private final IndexNameExpressionResolver expressionResolver;
private final JobResultsPersister jobResultsPersister;
private final JobDataCountsPersister jobDataCountsPersister;
private final AnnotationPersister annotationPersister;
private final NativeStorageProvider nativeStorageProvider;
private final ConcurrentMap<Long, ProcessContext> processByAllocation = new ConcurrentHashMap<>();
private volatile int maxAllowedRunningJobs;
private final NamedXContentRegistry xContentRegistry;
private final AnomalyDetectionAuditor auditor;
private volatile boolean upgradeInProgress;
private volatile boolean resetInProgress;
private volatile boolean nodeDying;
public AutodetectProcessManager(
Settings settings,
Client client,
ThreadPool threadPool,
NamedXContentRegistry xContentRegistry,
AnomalyDetectionAuditor auditor,
ClusterService clusterService,
JobManager jobManager,
JobResultsProvider jobResultsProvider,
JobResultsPersister jobResultsPersister,
JobDataCountsPersister jobDataCountsPersister,
AnnotationPersister annotationPersister,
AutodetectProcessFactory autodetectProcessFactory,
NormalizerFactory normalizerFactory,
NativeStorageProvider nativeStorageProvider,
IndexNameExpressionResolver expressionResolver
) {
this.client = client;
this.threadPool = threadPool;
this.xContentRegistry = xContentRegistry;
this.maxAllowedRunningJobs = MachineLearning.MAX_OPEN_JOBS_PER_NODE.get(settings);
this.autodetectProcessFactory = autodetectProcessFactory;
this.normalizerFactory = normalizerFactory;
this.expressionResolver = expressionResolver;
this.jobManager = jobManager;
this.jobResultsProvider = jobResultsProvider;
this.jobResultsPersister = jobResultsPersister;
this.jobDataCountsPersister = jobDataCountsPersister;
this.annotationPersister = annotationPersister;
this.auditor = auditor;
this.nativeStorageProvider = Objects.requireNonNull(nativeStorageProvider);
clusterService.addListener(this);
clusterService.getClusterSettings()
.addSettingsUpdateConsumer(MachineLearning.MAX_OPEN_JOBS_PER_NODE, this::setMaxAllowedRunningJobs);
}
void setMaxAllowedRunningJobs(int maxAllowedRunningJobs) {
this.maxAllowedRunningJobs = maxAllowedRunningJobs;
}
// The primary use of this is for license expiry
public synchronized void closeAllJobsOnThisNode(String reason) {
// Note, snapshot upgrader processes could still be running, but those are short lived
// Leaving them running is OK.
int numJobs = processByAllocation.size();
if (numJobs != 0) {
logger.info("Closing [{}] jobs, because [{}]", numJobs, reason);
for (ProcessContext processContext : processByAllocation.values()) {
JobTask jobTask = processContext.getJobTask();
setJobState(jobTask, JobState.CLOSING, reason);
jobTask.closeJob(reason);
}
}
}
public void killProcess(JobTask jobTask, boolean awaitCompletion, String reason) {
logger.trace(
() -> format("[%s] Killing process: awaitCompletion = [%s]; reason = [%s]", jobTask.getJobId(), awaitCompletion, reason)
);
ProcessContext processContext = processByAllocation.remove(jobTask.getAllocationId());
if (processContext != null) {
processContext.newKillBuilder()
.setAwaitCompletion(awaitCompletion)
.setFinish(true)
.setReason(reason)
.setShouldFinalizeJob(upgradeInProgress == false && resetInProgress == false)
.kill();
} else {
// If the process is missing but the task exists this is most likely
// due to 3 reasons. The first is because the job went into the failed
// state then the node restarted causing the task to be recreated
// but the failed process wasn't. The second is that the job went into
// the failed state and the user tries to remove it force-deleting it.
// Force-delete issues a kill but the process will not be present
// as it is cleaned up already. The third is that the kill has been
// received before the process has even started. In all cases, we still
// need to remove the task from the TaskManager (which is what the kill would do)
logger.trace(() -> "[" + jobTask.getJobId() + "] Marking job task as completed");
jobTask.markAsCompleted();
}
}
public void killAllProcessesOnThisNode() {
// For snapshot upgrade tasks, they don't exist in `processByAllocation`
// They are short lived, but once they are marked as "started" they cannot be restarted as the snapshot could be corrupted
// Consequently, just let them die with the node. But try not to move forward with saving the upgraded state if the node
// is dying
nodeDying = true;
Iterator<ProcessContext> iterator = processByAllocation.values().iterator();
while (iterator.hasNext()) {
ProcessContext processContext = iterator.next();
processContext.newKillBuilder().setAwaitCompletion(false).setFinish(false).setSilent(true).kill();
iterator.remove();
}
}
public boolean isNodeDying() {
return nodeDying;
}
/**
* Makes open jobs on this node go through the motions of closing but
* without completing the persistent task and instead telling the
* master node to assign the persistent task to a different node.
* The intended user of this functionality is the node shutdown API.
* Jobs that are already closing continue to close.
*/
public synchronized void vacateOpenJobsOnThisNode() {
for (ProcessContext processContext : processByAllocation.values()) {
// We ignore jobs that either don't have a running process yet or already closing.
// - The ones that don't yet have a running process will get picked up on a subsequent call to this
// method. This is simpler than trying to interact with a job before its process is started,
// and importantly, when it eventually does get picked up it will be fast to shut down again
// since it will only just have been started.
// - For jobs that are already closing we might as well let them close on the current node
// rather than trying to vacate them to a different node first.
if (processContext.getState() == ProcessContext.ProcessStateName.RUNNING && processContext.getJobTask().triggerVacate()) {
// We need to fork here, as persisting state is a potentially long-running operation
threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)
.execute(() -> closeProcessAndTask(processContext, processContext.getJobTask(), "node is shutting down"));
}
}
}
/**
* Initiate background persistence of the job
* @param jobTask The job task
* @param handler Listener
*/
public void persistJob(JobTask jobTask, Consumer<Exception> handler) {
AutodetectCommunicator communicator = getOpenAutodetectCommunicator(jobTask);
if (communicator == null) {
String message = String.format(
Locale.ROOT,
"Cannot persist because job [%s] does not have a corresponding autodetect process",
jobTask.getJobId()
);
logger.debug(message);
handler.accept(ExceptionsHelper.conflictStatusException(message));
return;
}
communicator.persistJob((aVoid, e) -> handler.accept(e));
}
/**
* Passes data to the native process.
* This is a blocking call that won't return until all the data has been
* written to the process.
* <p>
* An ElasticsearchStatusException will be thrown is any of these error conditions occur:
* <ol>
* <li>If a configured field is missing from the input</li>
* <li>If JSON data is malformed and we cannot recover parsing</li>
* <li>If a high proportion of the records the timestamp field that cannot be parsed</li>
* <li>If a high proportion of the records chronologically out of order</li>
* </ol>
*
* @param jobTask The job task
* @param analysisRegistry Registry of analyzer components - this is used to build a categorization analyzer if necessary
* @param input Data input stream
* @param xContentType the {@link XContentType} of the input
* @param params Data processing parameters
* @param handler Delegate error or datacount results (Count of records, fields, bytes, etc written as a result of this call)
*/
public void processData(
JobTask jobTask,
AnalysisRegistry analysisRegistry,
InputStream input,
XContentType xContentType,
DataLoadParams params,
BiConsumer<DataCounts, Exception> handler
) {
AutodetectCommunicator communicator = getOpenAutodetectCommunicator(jobTask);
if (communicator == null) {
throw ExceptionsHelper.conflictStatusException(
"Cannot process data because job [" + jobTask.getJobId() + "] does not have a corresponding autodetect process"
);
}
communicator.writeToJob(input, analysisRegistry, xContentType, params, handler);
}
/**
* Flush the running job, ensuring that the native process has had the
* opportunity to process all data previously sent to it with none left
* sitting in buffers.
*
* @param jobTask The job task
* @param params Parameters describing the controls that will accompany the flushing
* (e.g. calculating interim results, time control, etc.)
*/
public void flushJob(JobTask jobTask, FlushJobParams params, ActionListener<FlushAcknowledgement> handler) {
logger.debug("Flushing job {}", jobTask.getJobId());
AutodetectCommunicator communicator = getOpenAutodetectCommunicator(jobTask);
if (communicator == null) {
String message = String.format(
Locale.ROOT,
"Cannot flush because job [%s] does not have a corresponding autodetect process",
jobTask.getJobId()
);
logger.debug(message);
handler.onFailure(ExceptionsHelper.conflictStatusException(message));
return;
}
communicator.flushJob(params, (flushAcknowledgement, e) -> {
if (e != null) {
String msg = String.format(Locale.ROOT, "[%s] exception while flushing job", jobTask.getJobId());
logger.error(msg);
handler.onFailure(ExceptionsHelper.serverError(msg, e));
} else {
handler.onResponse(flushAcknowledgement);
}
});
}
/**
* Do a forecast for the running job.
*
* @param jobTask The job task
* @param params Forecast parameters
*/
public void forecastJob(JobTask jobTask, ForecastParams params, Consumer<Exception> handler) {
String jobId = jobTask.getJobId();
logger.debug("Forecasting job {}", jobId);
AutodetectCommunicator communicator = getOpenAutodetectCommunicator(jobTask);
if (communicator == null) {
String message = String.format(
Locale.ROOT,
"Cannot forecast because job [%s] does not have a corresponding autodetect process",
jobId
);
logger.debug(message);
handler.accept(ExceptionsHelper.conflictStatusException(message));
return;
}
communicator.forecastJob(params, (aVoid, e) -> {
if (e == null) {
handler.accept(null);
} else {
String msg = String.format(Locale.ROOT, "[%s] exception while forecasting job", jobId);
logger.error(msg, e);
handler.accept(ExceptionsHelper.serverError(msg, e));
}
});
}
public void writeUpdateProcessMessage(JobTask jobTask, UpdateParams updateParams, Consumer<Exception> handler) {
AutodetectCommunicator communicator = getOpenAutodetectCommunicator(jobTask);
if (communicator == null) {
String message = "Cannot update the job config because job ["
+ jobTask.getJobId()
+ "] does not have a corresponding autodetect process";
logger.debug(message);
handler.accept(ExceptionsHelper.conflictStatusException(message));
return;
}
UpdateProcessMessage.Builder updateProcessMessage = new UpdateProcessMessage.Builder();
updateProcessMessage.setModelPlotConfig(updateParams.getModelPlotConfig());
updateProcessMessage.setDetectorUpdates(updateParams.getDetectorUpdates());
// Step 3. Set scheduled events on message and write update process message
ActionListener<QueryPage<ScheduledEvent>> eventsListener = ActionListener.wrap(events -> {
updateProcessMessage.setScheduledEvents(events == null ? null : events.results());
communicator.writeUpdateProcessMessage(updateProcessMessage.build(), (aVoid, e) -> handler.accept(e));
}, handler);
// Step 2. Set the filters on the message and get scheduled events
ActionListener<List<MlFilter>> filtersListener = ActionListener.wrap(filters -> {
updateProcessMessage.setFilters(filters);
if (updateParams.isUpdateScheduledEvents()) {
jobManager.getJob(jobTask.getJobId(), new ActionListener<>() {
@Override
public void onResponse(Job job) {
Optional<Tuple<DataCounts, Tuple<ModelSizeStats, TimingStats>>> stats = getStatistics(jobTask);
DataCounts dataCounts = stats.isPresent() ? stats.get().v1() : new DataCounts(job.getId());
ScheduledEventsQueryBuilder query = new ScheduledEventsQueryBuilder().start(job.earliestValidTimestamp(dataCounts));
logger.debug(
"[{}] Fetching scheduled events for calendar update, time range: [{}]",
jobTask.getJobId(),
job.earliestValidTimestamp(dataCounts)
);
jobResultsProvider.scheduledEventsForJob(jobTask.getJobId(), job.getGroups(), query, eventsListener);
}
@Override
public void onFailure(Exception e) {
handler.accept(e);
}
});
} else {
eventsListener.onResponse(null);
}
}, handler);
// All referenced filters must also be updated
Set<String> filterIds = updateParams.extractReferencedFilters();
// Step 1. Get the filters
if (filterIds.isEmpty()) {
filtersListener.onResponse(null);
} else {
GetFiltersAction.Request getFilterRequest = new GetFiltersAction.Request(String.join(",", filterIds));
getFilterRequest.setPageParams(new PageParams(0, filterIds.size()));
executeAsyncWithOrigin(
client,
ML_ORIGIN,
GetFiltersAction.INSTANCE,
getFilterRequest,
ActionListener.wrap(getFilterResponse -> filtersListener.onResponse(getFilterResponse.getFilters().results()), handler)
);
}
}
public void upgradeSnapshot(SnapshotUpgradeTask task, Consumer<Exception> closeHandler) {
final String jobId = task.getJobId();
final String snapshotId = task.getSnapshotId();
final Function<String, SnapshotUpgradeTaskState> failureBuilder = (reason) -> new SnapshotUpgradeTaskState(
SnapshotUpgradeState.FAILED,
task.getAllocationId(),
reason
);
// Start the process
jobManager.getJob(jobId, ActionListener.wrap(job -> {
if (job.getJobVersion() == null) {
closeHandler.accept(
ExceptionsHelper.badRequestException(
"Cannot open job [" + jobId + "] because jobs created prior to version 5.5 are not supported"
)
);
return;
}
jobResultsProvider.getAutodetectParams(job, snapshotId, params -> {
if (params.modelSnapshot() == null) {
closeHandler.accept(
new ElasticsearchStatusException(
"cannot find snapshot [{}] for job [{}] to upgrade",
RestStatus.NOT_FOUND,
jobId,
snapshotId
)
);
return;
}
if (resetInProgress) {
logger.trace(
() -> format("Aborted upgrading snapshot [%s] for job [%s] as ML feature is being reset", snapshotId, jobId)
);
closeHandler.accept(null);
return;
}
// We need to fork, otherwise we restore model state from a network thread (several GET api calls):
threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
closeHandler.accept(e);
}
@Override
protected void doRun() {
if (nodeDying) {
logger.info(() -> format("Aborted upgrading snapshot [%s] for job [%s] as node is dying", snapshotId, jobId));
closeHandler.accept(null);
return;
}
if (resetInProgress) {
logger.trace(
() -> format("Aborted upgrading snapshot [%s] for job [%s] as ML feature is being reset", snapshotId, jobId)
);
closeHandler.accept(null);
return;
}
runSnapshotUpgrade(task, job, params, closeHandler);
}
});
}, e1 -> {
logger.warn(() -> format("[%s] [%s] Failed to gather information required to upgrade snapshot job", jobId, snapshotId), e1);
task.updatePersistentTaskState(
failureBuilder.apply(e1.getMessage()),
ActionListener.wrap(t -> closeHandler.accept(e1), e2 -> {
logger.warn(() -> format("[%s] [%s] failed to set task to failed", jobId, snapshotId), e2);
closeHandler.accept(e1);
})
);
});
}, closeHandler));
}
public void openJob(
JobTask jobTask,
ClusterState clusterState,
TimeValue masterNodeTimeout,
BiConsumer<Exception, Boolean> closeHandler
) {
String jobId = jobTask.getJobId();
if (jobTask.isClosing()) {
logger.info("Aborting opening of job [{}] as it is being closed", jobId);
jobTask.markAsCompleted();
return;
}
logger.info("Opening job [{}]", jobId);
// Start the process
ActionListener<Boolean> stateAliasHandler = ActionListener.wrap(
r -> jobManager.getJob(
jobId,
ActionListener.wrap(job -> startProcess(jobTask, job, closeHandler), e -> closeHandler.accept(e, true))
),
e -> {
if (ExceptionsHelper.unwrapCause(e) instanceof InvalidAliasNameException) {
String msg = "Detected a problem with your setup of machine learning, the state index alias ["
+ AnomalyDetectorsIndex.jobStateIndexWriteAlias()
+ "] exists as index but must be an alias.";
logger.error(() -> format("[%s] %s", jobId, msg), e);
// The close handler is responsible for auditing this and setting the job state to failed
closeHandler.accept(new IllegalStateException(msg, e), true);
} else {
closeHandler.accept(e, true);
}
}
);
// Make sure the state index and alias exist and are writeable
ActionListener<Boolean> resultsMappingUpdateHandler = ActionListener.wrap(
ack -> AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessaryAndWaitForYellow(
client,
clusterState,
expressionResolver,
masterNodeTimeout,
stateAliasHandler
),
e -> {
logger.error(() -> "[" + jobId + "] ML state index alias could not be updated", e);
closeHandler.accept(e, true);
}
);
// Try adding the results doc mapping - this updates to the latest version if an old mapping is present
ActionListener<Boolean> annotationsIndexUpdateHandler = ActionListener.wrap(
ack -> ElasticsearchMappings.addDocMappingIfMissing(
AnomalyDetectorsIndex.jobResultsAliasedName(jobId),
AnomalyDetectorsIndex::wrappedResultsMapping,
client,
clusterState,
masterNodeTimeout,
resultsMappingUpdateHandler,
AnomalyDetectorsIndex.RESULTS_INDEX_MAPPINGS_VERSION
),
e -> {
// Due to a bug in 7.9.0 it's possible that the annotations index already has incorrect mappings
// and it would cause more harm than good to block jobs from opening in subsequent releases
logger.warn(() -> "[" + jobId + "] ML annotations index could not be updated with latest mappings", e);
ElasticsearchMappings.addDocMappingIfMissing(
AnomalyDetectorsIndex.jobResultsAliasedName(jobId),
AnomalyDetectorsIndex::wrappedResultsMapping,
client,
clusterState,
masterNodeTimeout,
resultsMappingUpdateHandler,
AnomalyDetectorsIndex.RESULTS_INDEX_MAPPINGS_VERSION
);
}
);
// Create the annotations index if necessary - this also updates the mappings if an old mapping is present
AnnotationIndex.createAnnotationsIndexIfNecessaryAndWaitForYellow(
client,
clusterState,
masterNodeTimeout,
annotationsIndexUpdateHandler
);
}
private void startProcess(JobTask jobTask, Job job, BiConsumer<Exception, Boolean> closeHandler) {
if (job.getJobVersion() == null) {
closeHandler.accept(
ExceptionsHelper.badRequestException(
"Cannot open job [" + job.getId() + "] because jobs created prior to version 5.5 are not supported"
),
true
);
return;
}
processByAllocation.putIfAbsent(jobTask.getAllocationId(), new ProcessContext(jobTask));
jobResultsProvider.getAutodetectParams(job, params -> {
// We need to fork, otherwise we restore model state from a network thread (several GET api calls):
threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
closeHandler.accept(e, true);
}
@Override
protected void doRun() {
ProcessContext processContext = processByAllocation.get(jobTask.getAllocationId());
if (processContext == null) {
logger.debug("Aborted opening job [{}] as it has been closed or killed", job.getId());
return;
}
// We check again after the process state is locked to ensure no race conditions are hit.
if (processContext.getJobTask().isClosing()) {
logger.debug("Aborted opening job [{}] as it is being closed (before starting process)", job.getId());
jobTask.markAsCompleted();
return;
}
try {
if (createProcessAndSetRunning(processContext, job, params, closeHandler)) {
// This next check also covers the case of a process being killed while it was being started.
// It relies on callers setting the closing flag on the job task before calling this method.
// It also relies on the fact that at this stage of the process lifecycle kill and close are
// basically identical, i.e. the process has done so little work that making it exit by closing
// its input stream will not result in side effects.
if (processContext.getJobTask().isClosing()) {
logger.debug(
"Aborted opening job [{}] as it is being closed or killed (after starting process)",
job.getId()
);
closeProcessAndTask(processContext, jobTask, "job is already closing");
return;
}
processContext.getAutodetectCommunicator().restoreState(params.modelSnapshot());
setJobState(jobTask, JobState.OPENED, null, e -> {
if (e != null) {
logSetJobStateFailure(JobState.OPENED, job.getId(), e);
if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) {
// Don't leave a process with no persistent task hanging around
processContext.newKillBuilder().setAwaitCompletion(false).setFinish(false).kill();
processByAllocation.remove(jobTask.getAllocationId());
}
}
});
}
} catch (Exception e1) {
// No need to log here as the persistent task framework will log it
try {
// Don't leave a partially initialised process hanging around
processContext.newKillBuilder().setAwaitCompletion(false).setFinish(false).kill();
processByAllocation.remove(jobTask.getAllocationId());
} finally {
setJobState(jobTask, JobState.FAILED, e1.getMessage(), e2 -> closeHandler.accept(e1, true));
}
}
}
});
}, e1 -> {
logger.warn("Failed to gather information required to open job [" + job.getId() + "]", e1);
setJobState(jobTask, JobState.FAILED, e1.getMessage(), e2 -> closeHandler.accept(e1, true));
});
}
private void runSnapshotUpgrade(SnapshotUpgradeTask task, Job job, AutodetectParams params, Consumer<Exception> handler) {
JobModelSnapshotUpgrader jobModelSnapshotUpgrader = new JobModelSnapshotUpgrader(
task,
job,
params,
threadPool,
autodetectProcessFactory,
jobResultsPersister,
client,
nativeStorageProvider,
handler,
() -> nodeDying == false
);
jobModelSnapshotUpgrader.start();
}
private boolean createProcessAndSetRunning(
ProcessContext processContext,
Job job,
AutodetectParams params,
BiConsumer<Exception, Boolean> handler
) throws IOException {
// At this point we lock the process context until the process has been started.
// The reason behind this is to ensure closing the job does not happen before
// the process is started as that can result to the job getting seemingly closed
// but the actual process is hanging alive.
processContext.tryLock();
try {
if (processContext.getState() != ProcessContext.ProcessStateName.NOT_RUNNING) {
logger.debug("Cannot open job [{}] when its state is [{}]", job.getId(), processContext.getState().getClass().getName());
return false;
}
if (processContext.getJobTask().isClosing()) {
logger.debug("Cannot open job [{}] as it is closing", job.getId());
processContext.getJobTask().markAsCompleted();
return false;
}
AutodetectCommunicator communicator = create(processContext.getJobTask(), job, params, handler);
communicator.writeHeader();
processContext.setRunning(communicator);
return true;
} finally {
// Now that the process is running and we have updated its state we can unlock.
// It is important to unlock before we initialize the communicator (ie. load the model state)
// as that may be a long-running method.
processContext.unlock();
}
}
AutodetectCommunicator create(JobTask jobTask, Job job, AutodetectParams autodetectParams, BiConsumer<Exception, Boolean> handler) {
// Copy for consistency within a single method call
int localMaxAllowedRunningJobs = maxAllowedRunningJobs;
// Closing jobs can still be using some or all threads in MachineLearning.JOB_COMMS_THREAD_POOL_NAME
// that an open job uses, so include them too when considering if enough threads are available.
int currentRunningJobs = processByAllocation.size();
// TODO: in future this will also need to consider jobs that are not anomaly detector jobs
if (currentRunningJobs > localMaxAllowedRunningJobs) {
throw new ElasticsearchStatusException(
"max running job capacity [" + localMaxAllowedRunningJobs + "] reached",
RestStatus.TOO_MANY_REQUESTS
);
}
String jobId = jobTask.getJobId();
notifyLoadingSnapshot(jobId, autodetectParams);
if (autodetectParams.dataCounts().getLatestRecordTimeStamp() != null) {
if (autodetectParams.modelSnapshot() == null) {
String msg = "No model snapshot could be found for a job with processed records";
logger.warn("[{}] {}", jobId, msg);
auditor.warning(jobId, "No model snapshot could be found for a job with processed records");
}
if (autodetectParams.quantiles() == null) {
String msg = "No quantiles could be found for a job with processed records";
logger.warn("[{}] {}", jobId, msg);
auditor.warning(jobId, msg);
}
}
// A TP with no queue, so that we fail immediately if there are no threads available
ExecutorService autodetectExecutorService = threadPool.executor(MachineLearning.JOB_COMMS_THREAD_POOL_NAME);
DataCountsReporter dataCountsReporter = new DataCountsReporter(job, autodetectParams.dataCounts(), jobDataCountsPersister);
ScoresUpdater scoresUpdater = new ScoresUpdater(
job,
jobResultsProvider,
new JobRenormalizedResultsPersister(job.getId(), client),
normalizerFactory
);
ExecutorService renormalizerExecutorService = threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME);
Renormalizer renormalizer = new ShortCircuitingRenormalizer(jobId, scoresUpdater, renormalizerExecutorService);
AutodetectProcess process = autodetectProcessFactory.createAutodetectProcess(
job,
autodetectParams,
autodetectExecutorService,
onProcessCrash(jobTask)
);
AutodetectResultProcessor processor = new AutodetectResultProcessor(
client,
auditor,
jobId,
renormalizer,
jobResultsPersister,
annotationPersister,
process,
autodetectParams.modelSizeStats(),
autodetectParams.timingStats()
);
ExecutorService autodetectWorkerExecutor;
try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) {
autodetectWorkerExecutor = createAutodetectExecutorService(autodetectExecutorService);
autodetectExecutorService.submit(processor::process);
} catch (EsRejectedExecutionException e) {
// If submitting the operation to read the results from the process fails we need to close
// the process too, so that other submitted operations to threadpool are stopped.
try {
IOUtils.close(process);
} catch (IOException ioe) {
logger.error("Can't close autodetect", ioe);
}
throw e;
}
return new AutodetectCommunicator(
job,
process,
new StateStreamer(client),
dataCountsReporter,
processor,
handler,
xContentRegistry,
autodetectWorkerExecutor
);
}
private void notifyLoadingSnapshot(String jobId, AutodetectParams autodetectParams) {
ModelSnapshot modelSnapshot = autodetectParams.modelSnapshot();
StringBuilder msgBuilder = new StringBuilder("Loading model snapshot [");
if (modelSnapshot == null) {
msgBuilder.append("N/A");
} else {
msgBuilder.append(modelSnapshot.getSnapshotId());
msgBuilder.append("] with latest_record_timestamp [");
Date snapshotLatestRecordTimestamp = modelSnapshot.getLatestRecordTimeStamp();
msgBuilder.append(
snapshotLatestRecordTimestamp == null
? "N/A"
: XContentElasticsearchExtension.DEFAULT_FORMATTER.format(snapshotLatestRecordTimestamp.toInstant())
);
}
msgBuilder.append("], job latest_record_timestamp [");
Date jobLatestRecordTimestamp = autodetectParams.dataCounts().getLatestRecordTimeStamp();
msgBuilder.append(
jobLatestRecordTimestamp == null
? "N/A"
: XContentElasticsearchExtension.DEFAULT_FORMATTER.format(jobLatestRecordTimestamp.toInstant())
);
msgBuilder.append("]");
String msg = msgBuilder.toString();
logger.info("[{}] {}", jobId, msg);
auditor.info(jobId, msg);
}
private Consumer<String> onProcessCrash(JobTask jobTask) {
return (reason) -> {
ProcessContext processContext = processByAllocation.remove(jobTask.getAllocationId());
if (processContext != null) {
AutodetectCommunicator communicator = processContext.getAutodetectCommunicator();
if (communicator != null) {
communicator.destroyCategorizationAnalyzer();
}
}
setJobState(jobTask, JobState.FAILED, reason);
try {
nativeStorageProvider.cleanupLocalTmpStorage(jobTask.getDescription());
} catch (IOException e) {
logger.error(() -> "[" + jobTask.getJobId() + "] Failed to delete temporary files", e);
}
};
}
private void closeProcessAndTask(ProcessContext processContext, JobTask jobTask, String reason) {
String jobId = jobTask.getJobId();
long allocationId = jobTask.getAllocationId();
// We use a lock to prevent simultaneous open and close from conflicting. However, we found
// that we could not use the lock to stop kill from conflicting because that could lead to
// a kill taking an unacceptably long time to have an effect, which largely defeats the point
// of having an option to quickly kill a process. Therefore we have to deal with the effects
// of kill running simultaneously with open and close.
boolean jobKilled = false;
processContext.tryLock();
try {
if (processContext.setDying() == false) {
logger.debug("Cannot {} job [{}] as it has been marked as dying", jobTask.isVacating() ? "vacate" : "close", jobId);
// The only way we can get here is if 2 close requests are made very close together.
// The other close has done the work so it's safe to return here without doing anything.
return;
}
// If the job was killed early on during its open sequence then
// its context will already have been removed from this map
jobKilled = (processByAllocation.containsKey(allocationId) == false);
if (jobKilled) {
logger.debug("[{}] Cleaning up job opened after kill", jobId);
} else if (reason == null) {
logger.info("{} job [{}]", jobTask.isVacating() ? "Vacating" : "Closing", jobId);
} else {
logger.info("{} job [{}], because [{}]", jobTask.isVacating() ? "Vacating" : "Closing", jobId, reason);
}
AutodetectCommunicator communicator = processContext.getAutodetectCommunicator();
if (communicator == null) {
assert jobKilled == false
: "Job " + jobId + " killed before process started yet still had no communicator during cleanup after process started";
assert jobTask.isVacating() == false
: "Job " + jobId + " was vacated before it had a communicator - should not be possible";
logger.debug("Job [{}] is being closed before its process is started", jobId);
jobTask.markAsCompleted();
processByAllocation.remove(allocationId);
} else {
if (jobKilled) {
communicator.killProcess(true, false, false);
} else {
communicator.setVacating(jobTask.isVacating());
// communicator.close() may take a long time to run, if the job persists a large model state as a
// result of calling it. We want to leave open the option to kill the job during this time, which
// is why the allocation ID must remain in the map until after the close is complete.
communicator.close();
processByAllocation.remove(allocationId);
}
}
} catch (Exception e) {
// If the close failed because the process has explicitly been killed by us then just pass on that exception.
// (Note that jobKilled may be false in this case, if the kill is executed while communicator.close() is running.)
if (e instanceof ElasticsearchStatusException exception && exception.status() == RestStatus.CONFLICT) {
logger.trace(
"[{}] Conflict between kill and {} during autodetect process cleanup - job {} before cleanup started",
jobId,
jobTask.isVacating() ? "vacate" : "close",
jobKilled ? "killed" : "not killed"
);
throw exception;
}
String msg = jobKilled
? "Exception cleaning up autodetect process started after kill"
: "Exception " + (jobTask.isVacating() ? "vacating" : "closing") + " autodetect process";
logger.warn("[" + jobId + "] " + msg, e);
setJobState(jobTask, JobState.FAILED, e.getMessage());
throw ExceptionsHelper.serverError(msg, e);
} finally {
// to ensure the contract that multiple simultaneous close calls for the same job wait until
// the job is closed is honoured, hold the lock throughout the close procedure so that another
// thread that gets into this method blocks until the first thread has finished closing the job
processContext.unlock();
}
// delete any tmp storage
try {
nativeStorageProvider.cleanupLocalTmpStorage(jobTask.getDescription());
} catch (IOException e) {
logger.error(() -> "[" + jobId + "] Failed to delete temporary files", e);
}
}
/**
* Stop the running job and mark it as finished. For consistency with the job task,
* other than for testing this method should only be called via {@link JobTask#closeJob}.
* @param jobTask The job to stop
* @param reason The reason for closing the job
*/
public void closeJob(JobTask jobTask, String reason) {
String jobId = jobTask.getJobId();
long allocationId = jobTask.getAllocationId();
logger.debug("Attempting to close job [{}], because [{}]", jobId, reason);
// don't remove the process context immediately, because we need to ensure
// it is reachable to enable killing a job while it is closing
ProcessContext processContext = processByAllocation.get(allocationId);
if (processContext == null) {
logger.debug("Cannot close job [{}] as it has already been closed or is closing", jobId);
return;
}
closeProcessAndTask(processContext, jobTask, reason);
}
int numberOfOpenJobs() {
return (int) processByAllocation.values().stream().filter(p -> p.getState() != ProcessContext.ProcessStateName.DYING).count();
}
boolean jobHasActiveAutodetectProcess(JobTask jobTask) {
return getAutodetectCommunicator(jobTask) != null;
}
private AutodetectCommunicator getAutodetectCommunicator(JobTask jobTask) {
return processByAllocation.getOrDefault(jobTask.getAllocationId(), new ProcessContext(jobTask)).getAutodetectCommunicator();
}
private AutodetectCommunicator getOpenAutodetectCommunicator(JobTask jobTask) {
ProcessContext processContext = processByAllocation.get(jobTask.getAllocationId());
if (processContext != null && processContext.getState() == ProcessContext.ProcessStateName.RUNNING) {
return processContext.getAutodetectCommunicator();
}
return null;
}
public boolean hasOpenAutodetectCommunicator(long jobAllocationId) {
ProcessContext processContext = processByAllocation.get(jobAllocationId);
if (processContext != null && processContext.getState() == ProcessContext.ProcessStateName.RUNNING) {
return processContext.getAutodetectCommunicator() != null;
}
return false;
}
public Optional<Duration> jobOpenTime(JobTask jobTask) {
AutodetectCommunicator communicator = getAutodetectCommunicator(jobTask);
if (communicator == null) {
return Optional.empty();
}
return Optional.of(Duration.between(communicator.getProcessStartTime(), ZonedDateTime.now()));
}
void setJobState(JobTask jobTask, JobState state, String reason) {
JobTaskState jobTaskState = new JobTaskState(state, jobTask.getAllocationId(), reason, Instant.now());
// retry state update to ensure that cluster state stays consistent
new UpdateStateRetryableAction(
logger,
threadPool,
jobTask,
jobTaskState,
ActionListener.wrap(
persistentTask -> logger.info("Successfully set job state to [{}] for job [{}]", state, jobTask.getJobId()),
e -> logSetJobStateFailure(state, jobTask.getJobId(), e)
)
).run();
}
private static void logSetJobStateFailure(JobState state, String jobId, Exception e) {
if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) {
logger.debug("Could not set job state to [{}] for job [{}] as it has been closed", state, jobId);
} else {
logger.error(() -> format("Could not set job state to [%s] for job [%s]", state, jobId), e);
}
}
void setJobState(JobTask jobTask, JobState state, String reason, CheckedConsumer<Exception, IOException> handler) {
JobTaskState jobTaskState = new JobTaskState(state, jobTask.getAllocationId(), reason, Instant.now());
// retry state update to ensure that cluster state stays consistent
new UpdateStateRetryableAction(logger, threadPool, jobTask, jobTaskState, ActionListener.wrap(persistentTask -> {
try {
handler.accept(null);
} catch (IOException e1) {
logger.warn("Error while delegating response", e1);
}
}, e -> {
try {
handler.accept(e);
} catch (IOException e1) {
logger.warn("Error while delegating exception [" + e.getMessage() + "]", e1);
}
})).run();
}
public Optional<Tuple<DataCounts, Tuple<ModelSizeStats, TimingStats>>> getStatistics(JobTask jobTask) {
AutodetectCommunicator communicator = getAutodetectCommunicator(jobTask);
if (communicator == null) {
return Optional.empty();
}
return Optional.of(
new Tuple<>(communicator.getDataCounts(), new Tuple<>(communicator.getModelSizeStats(), communicator.getTimingStats()))
);
}
ExecutorService createAutodetectExecutorService(ExecutorService executorService) {
ProcessWorkerExecutorService autodetectWorkerExecutor = new AutodetectWorkerExecutorService(threadPool.getThreadContext());
executorService.submit(autodetectWorkerExecutor::start);
return autodetectWorkerExecutor;
}
public ByteSizeValue getMinLocalStorageAvailable() {
return nativeStorageProvider.getMinLocalStorageAvailable();
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
upgradeInProgress = MlMetadata.getMlMetadata(event.state()).isUpgradeMode();
resetInProgress = MlMetadata.getMlMetadata(event.state()).isResetMode();
}
/**
* Finds the memory used by open autodetect processes on the current node.
* @return Memory used by open autodetect processes on the current node.
*/
public ByteSizeValue getOpenProcessMemoryUsage() {
long memoryUsedBytes = 0;
for (ProcessContext processContext : processByAllocation.values()) {
if (processContext.getState() == ProcessContext.ProcessStateName.RUNNING) {
ModelSizeStats modelSizeStats = processContext.getAutodetectCommunicator().getModelSizeStats();
ModelSizeStats.AssignmentMemoryBasis basis = modelSizeStats.getAssignmentMemoryBasis();
memoryUsedBytes += switch (basis != null ? basis : ModelSizeStats.AssignmentMemoryBasis.MODEL_MEMORY_LIMIT) {
case MODEL_MEMORY_LIMIT -> Optional.ofNullable(modelSizeStats.getModelBytesMemoryLimit()).orElse(0L);
case CURRENT_MODEL_BYTES -> modelSizeStats.getModelBytes();
case PEAK_MODEL_BYTES -> Optional.ofNullable(modelSizeStats.getPeakModelBytes()).orElse(modelSizeStats.getModelBytes());
};
memoryUsedBytes += Job.PROCESS_MEMORY_OVERHEAD.getBytes();
}
}
return ByteSizeValue.ofBytes(memoryUsedBytes);
}
private static | AutodetectProcessManager |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/execution/CancelTaskException.java | {
"start": 993,
"end": 1312
} | class ____ extends RuntimeException {
private static final long serialVersionUID = 1L;
public CancelTaskException(Throwable cause) {
super(cause);
}
public CancelTaskException(String msg) {
super(msg);
}
public CancelTaskException() {
super();
}
}
| CancelTaskException |
java | apache__flink | flink-core/src/main/java/org/apache/flink/core/plugin/DefaultPluginManager.java | {
"start": 1573,
"end": 5044
} | class ____ implements PluginManager {
private static final Logger LOG = LoggerFactory.getLogger(DefaultPluginManager.class);
/**
* Parent-classloader to all classloader that are used for plugin loading. We expect that this
* is thread-safe.
*/
private final ClassLoader parentClassLoader;
/** A collection of descriptions of all plugins known to this plugin manager. */
private final Collection<PluginDescriptor> pluginDescriptors;
private final Lock pluginLoadersLock;
@GuardedBy("pluginLoadersLock")
private final Map<String, PluginLoader> pluginLoaders;
/** List of patterns for classes that should always be resolved from the parent ClassLoader. */
private final String[] alwaysParentFirstPatterns;
@VisibleForTesting
DefaultPluginManager() {
parentClassLoader = null;
pluginDescriptors = null;
pluginLoadersLock = null;
pluginLoaders = null;
alwaysParentFirstPatterns = null;
}
public DefaultPluginManager(
Collection<PluginDescriptor> pluginDescriptors, String[] alwaysParentFirstPatterns) {
this(
pluginDescriptors,
DefaultPluginManager.class.getClassLoader(),
alwaysParentFirstPatterns);
}
public DefaultPluginManager(
Collection<PluginDescriptor> pluginDescriptors,
ClassLoader parentClassLoader,
String[] alwaysParentFirstPatterns) {
this.pluginDescriptors = pluginDescriptors;
this.pluginLoadersLock = new ReentrantLock();
this.pluginLoaders = new HashMap<>();
this.parentClassLoader = parentClassLoader;
this.alwaysParentFirstPatterns = alwaysParentFirstPatterns;
}
@Override
public <P> Iterator<P> load(Class<P> service) {
ArrayList<Iterator<P>> combinedIterators = new ArrayList<>(pluginDescriptors.size());
for (PluginDescriptor pluginDescriptor : pluginDescriptors) {
PluginLoader pluginLoader;
String pluginId = pluginDescriptor.getPluginId();
pluginLoadersLock.lock();
try {
if (pluginLoaders.containsKey(pluginId)) {
LOG.info("Plugin loader with ID found, reusing it: {}", pluginId);
pluginLoader = pluginLoaders.get(pluginId);
} else {
LOG.info("Plugin loader with ID not found, creating it: {}", pluginId);
pluginLoader =
PluginLoader.create(
pluginDescriptor, parentClassLoader, alwaysParentFirstPatterns);
pluginLoaders.putIfAbsent(pluginId, pluginLoader);
}
} finally {
pluginLoadersLock.unlock();
}
combinedIterators.add(pluginLoader.load(service));
}
return Iterators.concat(combinedIterators.iterator());
}
@Override
public String toString() {
return "PluginManager{"
+ "parentClassLoader="
+ parentClassLoader
+ ", pluginDescriptors="
+ pluginDescriptors
+ ", pluginLoaders="
+ Joiner.on(",").withKeyValueSeparator("=").join(pluginLoaders)
+ ", alwaysParentFirstPatterns="
+ Arrays.toString(alwaysParentFirstPatterns)
+ '}';
}
}
| DefaultPluginManager |
java | apache__camel | components/camel-http-base/src/main/java/org/apache/camel/http/base/HttpProtocolHeaderFilterStrategy.java | {
"start": 956,
"end": 2706
} | class ____ extends DefaultHeaderFilterStrategy {
public HttpProtocolHeaderFilterStrategy() {
initialize();
}
// Just add the http headers here
protected void initialize() {
getInFilter().add("content-encoding");
getInFilter().add("content-language");
getInFilter().add("content-location");
getInFilter().add("content-md5");
getInFilter().add("content-range");
getInFilter().add("dav");
getInFilter().add("depth");
getInFilter().add("destination");
getInFilter().add("etag");
getInFilter().add("expect");
getInFilter().add("expires");
getInFilter().add("from");
getInFilter().add("if");
getInFilter().add("if-match");
getInFilter().add("if-modified-since");
getInFilter().add("if-none-match");
getInFilter().add("if-range");
getInFilter().add("if-unmodified-since");
getInFilter().add("last-modified");
getInFilter().add("location");
getInFilter().add("lock-token");
getInFilter().add("max-forwards");
getInFilter().add("overwrite");
getInFilter().add("proxy-authenticate");
getInFilter().add("proxy-authorization");
getInFilter().add("range");
getInFilter().add("referer");
getInFilter().add("retry-after");
getInFilter().add("server");
getInFilter().add("status-uri");
getInFilter().add("te");
getInFilter().add("timeout");
getInFilter().add("user-agent");
getInFilter().add("vary");
getInFilter().add("www-authenticate");
HttpUtil.addCommonFilters(getInFilter());
setLowerCase(true);
}
}
| HttpProtocolHeaderFilterStrategy |
java | apache__maven | compat/maven-embedder/src/main/java/org/apache/maven/cli/CliRequest.java | {
"start": 1174,
"end": 2885
} | class ____ {
String[] args;
CommandLine commandLine;
ClassWorld classWorld;
String workingDirectory;
File multiModuleProjectDirectory;
Path rootDirectory;
Path topDirectory;
boolean verbose;
boolean quiet;
boolean showErrors = true;
Properties userProperties = new Properties();
Properties systemProperties = new Properties();
MavenExecutionRequest request;
CliRequest(String[] args, ClassWorld classWorld) {
this.args = args;
this.classWorld = classWorld;
this.request = new DefaultMavenExecutionRequest();
}
public String[] getArgs() {
return args;
}
public CommandLine getCommandLine() {
return commandLine;
}
public ClassWorld getClassWorld() {
return classWorld;
}
public String getWorkingDirectory() {
return workingDirectory;
}
public File getMultiModuleProjectDirectory() {
return multiModuleProjectDirectory;
}
public boolean isVerbose() {
return verbose;
}
public boolean isQuiet() {
return quiet;
}
public boolean isShowErrors() {
return showErrors;
}
public Properties getUserProperties() {
return userProperties;
}
public Properties getSystemProperties() {
return systemProperties;
}
public MavenExecutionRequest getRequest() {
return request;
}
public void setUserProperties(Properties properties) {
this.userProperties.putAll(properties);
}
public Path getRootDirectory() {
return rootDirectory;
}
public Path getTopDirectory() {
return topDirectory;
}
}
| CliRequest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ElementsShouldBeAtMost_create_Test.java | {
"start": 1218,
"end": 1783
} | class ____ {
@Test
void should_create_error_message() {
// GIVEN
ErrorMessageFactory factory = elementsShouldBeAtMost(list("Yoda", "Luke", "Obiwan"), 2, new TestCondition<>("a Jedi"));
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo(format("[Test] %nExpecting elements:%n" +
" [\"Yoda\", \"Luke\", \"Obiwan\"]%n" +
"to be at most 2 times a Jedi"));
}
}
| ElementsShouldBeAtMost_create_Test |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/util/NameTransformer.java | {
"start": 539,
"end": 4259
} | class ____
extends NameTransformer
implements java.io.Serializable
{
private static final long serialVersionUID = 1L;
@Override
public String transform(String name) {
return name;
}
@Override
public String reverse(String transformed) {
// identity transformation is always reversible:
return transformed;
}
}
protected NameTransformer() { }
/**
* Factory method for constructing a simple transformer based on
* prefix and/or suffix.
*/
public static NameTransformer simpleTransformer(final String prefix, final String suffix)
{
boolean hasPrefix = (prefix != null) && !prefix.isEmpty();
boolean hasSuffix = (suffix != null) && !suffix.isEmpty();
if (hasPrefix) {
if (hasSuffix) {
return new NameTransformer() {
@Override
public String transform(String name) { return prefix + name + suffix; }
@Override
public String reverse(String transformed) {
if (transformed.startsWith(prefix)) {
String str = transformed.substring(prefix.length());
if (str.endsWith(suffix)) {
return str.substring(0, str.length() - suffix.length());
}
}
return null;
}
@Override
public String toString() { return "[PreAndSuffixTransformer('"+prefix+"','"+suffix+"')]"; }
};
}
return new NameTransformer() {
@Override
public String transform(String name) { return prefix + name; }
@Override
public String reverse(String transformed) {
if (transformed.startsWith(prefix)) {
return transformed.substring(prefix.length());
}
return null;
}
@Override
public String toString() { return "[PrefixTransformer('"+prefix+"')]"; }
};
}
if (hasSuffix) {
return new NameTransformer() {
@Override
public String transform(String name) { return name + suffix; }
@Override
public String reverse(String transformed) {
if (transformed.endsWith(suffix)) {
return transformed.substring(0, transformed.length() - suffix.length());
}
return null;
}
@Override
public String toString() { return "[SuffixTransformer('"+suffix+"')]"; }
};
}
return NOP;
}
/**
* Method that constructs transformer that applies given transformers
* as a sequence; essentially combines separate transform operations
* into one logical transformation.
*/
public static NameTransformer chainedTransformer(NameTransformer t1, NameTransformer t2) {
return new Chained(t1, t2);
}
/**
* Method called when (forward) transformation is needed.
*/
public abstract String transform(String name);
/**
* Method called when reversal of transformation is needed; should return
* null if this is not possible, that is, given name cannot have been
* result of calling {@link #transform} of this object.
*/
public abstract String reverse(String transformed);
public static | NopTransformer |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/GraphNodeTest.java | {
"start": 4476,
"end": 4722
} | class ____ extends GraphNode {
ExtendedGraphNode(final String nodeName) {
super(nodeName);
}
@Override
public void writeToTopology(final InternalTopologyBuilder topologyBuilder) {}
}
} | ExtendedGraphNode |
java | junit-team__junit5 | junit-jupiter-engine/src/main/java/org/junit/jupiter/engine/discovery/MethodSelectorResolver.java | {
"start": 2637,
"end": 7436
} | class ____ implements SelectorResolver {
private static final MethodSegmentResolver methodSegmentResolver = new MethodSegmentResolver();
private final Predicate<Class<?>> testClassPredicate;
private final JupiterConfiguration configuration;
private final DiscoveryIssueReporter issueReporter;
private final List<MethodType> methodTypes;
MethodSelectorResolver(JupiterConfiguration configuration, DiscoveryIssueReporter issueReporter) {
this.configuration = configuration;
this.issueReporter = issueReporter;
this.methodTypes = MethodType.allPossibilities(issueReporter);
this.testClassPredicate = new TestClassPredicates(issueReporter).looksLikeNestedOrStandaloneTestClass;
}
@Override
public Resolution resolve(MethodSelector selector, Context context) {
return resolve(context, emptyList(), selector.getJavaClass(), selector::getJavaMethod, Match::exact);
}
@Override
public Resolution resolve(NestedMethodSelector selector, Context context) {
return resolve(context, selector.getEnclosingClasses(), selector.getNestedClass(), selector::getMethod,
Match::exact);
}
@Override
public Resolution resolve(DiscoverySelector selector, Context context) {
if (selector instanceof DeclaredMethodSelector methodSelector) {
var testClasses = methodSelector.testClasses();
if (testClasses.size() == 1) {
return resolve(context, emptyList(), testClasses.get(0), methodSelector::method, Match::exact);
}
int lastIndex = testClasses.size() - 1;
return resolve(context, testClasses.subList(0, lastIndex), testClasses.get(lastIndex),
methodSelector::method, Match::exact);
}
return unresolved();
}
private Resolution resolve(Context context, List<Class<?>> enclosingClasses, Class<?> testClass,
Supplier<Method> methodSupplier,
BiFunction<TestDescriptor, Supplier<Set<? extends DiscoverySelector>>, Match> matchFactory) {
if (!testClassPredicate.test(testClass)) {
return unresolved();
}
Method method = methodSupplier.get();
// @formatter:off
Set<Match> matches = methodTypes.stream()
.map(methodType -> methodType.resolve(enclosingClasses, testClass, method, context, configuration))
.flatMap(Optional::stream)
.map(testDescriptor -> matchFactory.apply(testDescriptor, expansionCallback(testDescriptor)))
.collect(toSet());
// @formatter:on
if (matches.size() > 1) {
Stream<TestDescriptor> testDescriptors = matches.stream().map(Match::getTestDescriptor);
String message = """
Possible configuration error: method [%s] resulted in multiple TestDescriptors %s. \
This is typically the result of annotating a method with multiple competing annotations \
such as @Test, @RepeatedTest, @ParameterizedTest, @TestFactory, etc.""".formatted(
method.toGenericString(), testDescriptors.map(d -> d.getClass().getName()).toList());
issueReporter.reportIssue(
DiscoveryIssue.builder(Severity.WARNING, message).source(MethodSource.from(method)));
}
return matches.isEmpty() ? unresolved() : matches(matches);
}
@Override
public Resolution resolve(UniqueIdSelector selector, Context context) {
UniqueId uniqueId = selector.getUniqueId();
// @formatter:off
return methodTypes.stream()
.map(methodType -> methodType.resolveUniqueIdIntoTestDescriptor(uniqueId, context, configuration))
.flatMap(Optional::stream)
.map(testDescriptor -> {
boolean exactMatch = uniqueId.equals(testDescriptor.getUniqueId());
if (testDescriptor instanceof Filterable filterable) {
if (exactMatch) {
filterable.getDynamicDescendantFilter().allowAll();
}
else {
filterable.getDynamicDescendantFilter().allowUniqueIdPrefix(uniqueId);
}
}
return Resolution.match(exactMatch ? Match.exact(testDescriptor) : Match.partial(testDescriptor, expansionCallback(testDescriptor)));
})
.findFirst()
.orElse(unresolved());
// @formatter:on
}
@Override
public Resolution resolve(IterationSelector selector, Context context) {
if (selector.getParentSelector() instanceof MethodSelector methodSelector) {
return resolve(context, emptyList(), methodSelector.getJavaClass(), methodSelector::getJavaMethod,
(testDescriptor, childSelectorsSupplier) -> {
if (testDescriptor instanceof Filterable filterable) {
filterable.getDynamicDescendantFilter().allowIndex(selector.getIterationIndices());
}
return Match.partial(testDescriptor, childSelectorsSupplier);
});
}
return unresolved();
}
private Supplier<Set<? extends DiscoverySelector>> expansionCallback(TestDescriptor testDescriptor) {
return () -> {
if (testDescriptor instanceof Filterable filterable) {
filterable.getDynamicDescendantFilter().allowAll();
}
return emptySet();
};
}
private static | MethodSelectorResolver |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/refcolnames/misc/Misc3Test.java | {
"start": 1323,
"end": 1630
} | class ____ {
@Id
@GeneratedValue
private Long id;
@Basic
private String uniqueName;
@ManyToOne
@JoinColumn(name="a_id", referencedColumnName="id")
private A a;
}
@Entity
@Table(name = "C", uniqueConstraints = {@UniqueConstraint(columnNames = {"a_id", "uniqueName"})})
public static final | B |
java | google__dagger | javatests/dagger/hilt/android/MultiTestRoot1Test.java | {
"start": 4206,
"end": 4284
} | class ____ {
@Module
@InstallIn(SingletonComponent.class)
public | Outer |
java | apache__flink | flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/nfa/sharedbuffer/EventId.java | {
"start": 1387,
"end": 2586
} | class ____ implements Comparable<EventId> {
private final int id;
private final long timestamp;
public EventId(int id, long timestamp) {
this.id = id;
this.timestamp = timestamp;
}
public int getId() {
return id;
}
public long getTimestamp() {
return timestamp;
}
public static final Comparator<EventId> COMPARATOR =
Comparator.comparingLong(EventId::getTimestamp).thenComparingInt(EventId::getId);
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
EventId eventId = (EventId) o;
return id == eventId.id && timestamp == eventId.timestamp;
}
@Override
public int hashCode() {
return Objects.hash(id, timestamp);
}
@Override
public String toString() {
return "EventId{" + "id=" + id + ", timestamp=" + timestamp + '}';
}
@Override
public int compareTo(EventId o) {
return COMPARATOR.compare(this, o);
}
/** {@link TypeSerializer} for {@link EventId}. */
public static | EventId |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/write/OptionalTableUpdateTests.java | {
"start": 2855,
"end": 4201
} | class ____ {
@Id
private Integer id;
@Basic
private String name;
@Basic
@Column( table = "supplements" )
private String details;
// Used to provoke https://hibernate.atlassian.net/browse/HHH-19749
@Column( precision = 6, scale = 2, table = "supplements" )
private BigDecimal theBigDecimal;
// Used to provoke https://hibernate.atlassian.net/browse/HHH-18860
@JdbcTypeCode( SqlTypes.NUMERIC )
@Column( precision = 6, scale = 2, table = "supplements" )
private Double theDoubleDecimal;
private TheEntity() {
// for use by Hibernate
}
public TheEntity(Integer id, String name, String details) {
this.id = id;
this.name = name;
this.details = details;
}
public Integer getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDetails() {
return details;
}
public void setDetails(String details) {
this.details = details;
}
public BigDecimal getTheBigDecimal() {
return theBigDecimal;
}
public void setTheBigDecimal(BigDecimal discount) {
this.theBigDecimal = discount;
}
public Double getTheDoubleDecimal() {
return theDoubleDecimal;
}
public void setTheDoubleDecimal(Double theDoubleDecimal) {
this.theDoubleDecimal = theDoubleDecimal;
}
}
}
| TheEntity |
java | apache__camel | components/camel-pgevent/src/generated/java/org/apache/camel/component/pgevent/PgEventEndpointUriFactory.java | {
"start": 517,
"end": 2949
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":host:port/database/channel";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(15);
props.add("bridgeErrorHandler");
props.add("channel");
props.add("database");
props.add("datasource");
props.add("exceptionHandler");
props.add("exchangePattern");
props.add("host");
props.add("lazyStartProducer");
props.add("pass");
props.add("port");
props.add("reconnectDelay");
props.add("user");
props.add("workerPool");
props.add("workerPoolCoreSize");
props.add("workerPoolMaxSize");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(2);
secretProps.add("pass");
secretProps.add("user");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "pgevent".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "host", "localhost", false, copy);
uri = buildPathParameter(syntax, uri, "port", 5432, false, copy);
uri = buildPathParameter(syntax, uri, "database", null, true, copy);
uri = buildPathParameter(syntax, uri, "channel", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| PgEventEndpointUriFactory |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java | {
"start": 20303,
"end": 20452
} | class ____ maintained for backwards compatibility and performance purposes. We use it for serialisation along with {@link Option}.
*/
private | is |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/where/hbm/LazyElementCollectionWithLazyManyToOneNonUniqueIdWhereTest.java | {
"start": 6315,
"end": 6592
} | class ____ {
private int id;
private String name;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
public static | Size |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java | {
"start": 2354,
"end": 21182
} | class ____ extends ESTestCase {
private IngestService ingestService;
@Before
public void init() throws IOException {
TestProcessor processor = new TestProcessor(ingestDocument -> {});
CompoundProcessor pipelineCompoundProcessor = new CompoundProcessor(processor);
Pipeline pipeline = new Pipeline(SIMULATED_PIPELINE_ID, null, null, null, pipelineCompoundProcessor);
Map<String, Processor.Factory> registry = Collections.singletonMap(
"mock_processor",
(factories, tag, description, config, projectId) -> processor
);
ingestService = mock(IngestService.class);
when(ingestService.getPipeline(any(), eq(SIMULATED_PIPELINE_ID))).thenReturn(pipeline);
when(ingestService.getProcessorFactories()).thenReturn(registry);
}
public void testParseUsingPipelineStore() throws Exception {
int numDocs = randomIntBetween(1, 10);
Map<String, Object> requestContent = new HashMap<>();
List<Map<String, Object>> docs = new ArrayList<>();
List<Map<String, Object>> expectedDocs = new ArrayList<>();
requestContent.put(Fields.DOCS, docs);
for (int i = 0; i < numDocs; i++) {
Map<String, Object> doc = new HashMap<>();
String index = randomAlphaOfLengthBetween(1, 10);
String id = randomAlphaOfLengthBetween(1, 10);
doc.put(INDEX.getFieldName(), index);
doc.put(ID.getFieldName(), id);
String fieldName = randomAlphaOfLengthBetween(1, 10);
String fieldValue = randomAlphaOfLengthBetween(1, 10);
doc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue));
docs.add(doc);
Map<String, Object> expectedDoc = new HashMap<>();
expectedDoc.put(INDEX.getFieldName(), index);
expectedDoc.put(ID.getFieldName(), id);
expectedDoc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue));
expectedDocs.add(expectedDoc);
}
var projectId = randomProjectIdOrDefault();
SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parseWithPipelineId(
projectId,
SIMULATED_PIPELINE_ID,
requestContent,
false,
ingestService,
RestApiVersion.current()
);
assertThat(actualRequest.verbose(), equalTo(false));
assertThat(actualRequest.documents().size(), equalTo(numDocs));
Iterator<Map<String, Object>> expectedDocsIterator = expectedDocs.iterator();
for (IngestDocument ingestDocument : actualRequest.documents()) {
Map<String, Object> expectedDocument = expectedDocsIterator.next();
assertThat(ingestDocument.getMetadata().get(INDEX.getFieldName()), equalTo(expectedDocument.get(INDEX.getFieldName())));
assertThat(ingestDocument.getMetadata().get(ID.getFieldName()), equalTo(expectedDocument.get(ID.getFieldName())));
assertThat(ingestDocument.getSource(), equalTo(expectedDocument.get(Fields.SOURCE)));
}
assertThat(actualRequest.pipeline().getId(), equalTo(SIMULATED_PIPELINE_ID));
assertThat(actualRequest.pipeline().getDescription(), nullValue());
assertThat(actualRequest.pipeline().getProcessors().size(), equalTo(1));
}
public void testParseWithProvidedPipeline() throws Exception {
int numDocs = randomIntBetween(1, 10);
Map<String, Object> requestContent = new HashMap<>();
List<Map<String, Object>> docs = new ArrayList<>();
List<Map<String, Object>> expectedDocs = new ArrayList<>();
requestContent.put(Fields.DOCS, docs);
for (int i = 0; i < numDocs; i++) {
Map<String, Object> doc = new HashMap<>();
Map<String, Object> expectedDoc = new HashMap<>();
List<Metadata> fields = Arrays.asList(INDEX, ID, ROUTING, VERSION, VERSION_TYPE, IF_SEQ_NO, IF_PRIMARY_TERM);
for (Metadata field : fields) {
if (field == VERSION) {
Object value = randomBoolean() ? randomLong() : randomInt();
doc.put(field.getFieldName(), randomBoolean() ? value : value.toString());
long longValue = (long) value;
expectedDoc.put(field.getFieldName(), longValue);
} else if (field == VERSION_TYPE) {
String value = VersionType.toString(randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE));
doc.put(field.getFieldName(), value);
expectedDoc.put(field.getFieldName(), value);
} else if (field == IF_SEQ_NO || field == IF_PRIMARY_TERM) {
Object value = randomBoolean() ? randomNonNegativeLong() : randomInt(1000);
doc.put(field.getFieldName(), randomBoolean() ? value : value.toString());
long longValue = (long) value;
expectedDoc.put(field.getFieldName(), longValue);
} else {
if (randomBoolean()) {
String value = randomAlphaOfLengthBetween(1, 10);
doc.put(field.getFieldName(), value);
expectedDoc.put(field.getFieldName(), value);
} else {
Integer value = randomIntBetween(1, 1000000);
doc.put(field.getFieldName(), value);
expectedDoc.put(field.getFieldName(), String.valueOf(value));
}
}
}
String fieldName = randomAlphaOfLengthBetween(1, 10);
String fieldValue = randomAlphaOfLengthBetween(1, 10);
doc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue));
docs.add(doc);
expectedDoc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue));
expectedDocs.add(expectedDoc);
}
Map<String, Object> pipelineConfig = new HashMap<>();
List<Map<String, Object>> processors = new ArrayList<>();
int numProcessors = randomIntBetween(1, 10);
for (int i = 0; i < numProcessors; i++) {
Map<String, Object> processorConfig = new HashMap<>();
List<Map<String, Object>> onFailureProcessors = new ArrayList<>();
int numOnFailureProcessors = randomIntBetween(0, 1);
for (int j = 0; j < numOnFailureProcessors; j++) {
onFailureProcessors.add(Collections.singletonMap("mock_processor", Collections.emptyMap()));
}
if (numOnFailureProcessors > 0) {
processorConfig.put("on_failure", onFailureProcessors);
}
processors.add(Collections.singletonMap("mock_processor", processorConfig));
}
pipelineConfig.put("processors", processors);
List<Map<String, Object>> onFailureProcessors = new ArrayList<>();
int numOnFailureProcessors = randomIntBetween(0, 1);
for (int i = 0; i < numOnFailureProcessors; i++) {
onFailureProcessors.add(Collections.singletonMap("mock_processor", Collections.emptyMap()));
}
if (numOnFailureProcessors > 0) {
pipelineConfig.put("on_failure", onFailureProcessors);
}
requestContent.put(Fields.PIPELINE, pipelineConfig);
var projectId = randomProjectIdOrDefault();
SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parse(
projectId,
requestContent,
false,
ingestService,
RestApiVersion.current(),
(nodeFeature) -> true
);
assertThat(actualRequest.verbose(), equalTo(false));
assertThat(actualRequest.documents().size(), equalTo(numDocs));
Iterator<Map<String, Object>> expectedDocsIterator = expectedDocs.iterator();
for (IngestDocument ingestDocument : actualRequest.documents()) {
Map<String, Object> expectedDocument = expectedDocsIterator.next();
org.elasticsearch.script.Metadata metadata = ingestDocument.getMetadata();
assertThat(metadata.get(INDEX.getFieldName()), equalTo(expectedDocument.get(INDEX.getFieldName())));
assertThat(metadata.get(ID.getFieldName()), equalTo(expectedDocument.get(ID.getFieldName())));
assertThat(metadata.get(ROUTING.getFieldName()), equalTo(expectedDocument.get(ROUTING.getFieldName())));
assertThat(metadata.get(VERSION.getFieldName()), equalTo(expectedDocument.get(VERSION.getFieldName())));
assertThat(metadata.get(VERSION_TYPE.getFieldName()), equalTo(expectedDocument.get(VERSION_TYPE.getFieldName())));
assertThat(metadata.get(IF_SEQ_NO.getFieldName()), equalTo(expectedDocument.get(IF_SEQ_NO.getFieldName())));
assertThat(metadata.get(IF_PRIMARY_TERM.getFieldName()), equalTo(expectedDocument.get(IF_PRIMARY_TERM.getFieldName())));
assertThat(ingestDocument.getSource(), equalTo(expectedDocument.get(Fields.SOURCE)));
}
assertThat(actualRequest.pipeline().getId(), equalTo(SIMULATED_PIPELINE_ID));
assertThat(actualRequest.pipeline().getDescription(), nullValue());
assertThat(actualRequest.pipeline().getProcessors().size(), equalTo(numProcessors));
}
public void testNullPipelineId() {
var projectId = randomProjectIdOrDefault();
Map<String, Object> requestContent = new HashMap<>();
List<Map<String, Object>> docs = new ArrayList<>();
requestContent.put(Fields.DOCS, docs);
Exception e = expectThrows(
IllegalArgumentException.class,
() -> SimulatePipelineRequest.parseWithPipelineId(
projectId,
null,
requestContent,
false,
ingestService,
RestApiVersion.current()
)
);
assertThat(e.getMessage(), equalTo("param [pipeline] is null"));
}
public void testNonExistentPipelineId() {
var projectId = randomProjectIdOrDefault();
String pipelineId = randomAlphaOfLengthBetween(1, 10);
Map<String, Object> requestContent = new HashMap<>();
List<Map<String, Object>> docs = new ArrayList<>();
requestContent.put(Fields.DOCS, docs);
Exception e = expectThrows(
IllegalArgumentException.class,
() -> SimulatePipelineRequest.parseWithPipelineId(
projectId,
pipelineId,
requestContent,
false,
ingestService,
RestApiVersion.current()
)
);
assertThat(e.getMessage(), equalTo("pipeline [" + pipelineId + "] does not exist"));
}
public void testNotValidDocs() {
var projectId = randomProjectIdOrDefault();
Map<String, Object> requestContent = new HashMap<>();
List<Map<String, Object>> docs = new ArrayList<>();
Map<String, Object> pipelineConfig = new HashMap<>();
List<Map<String, Object>> processors = new ArrayList<>();
pipelineConfig.put("processors", processors);
requestContent.put(Fields.DOCS, docs);
requestContent.put(Fields.PIPELINE, pipelineConfig);
Exception e1 = expectThrows(
IllegalArgumentException.class,
() -> SimulatePipelineRequest.parse(
projectId,
requestContent,
false,
ingestService,
RestApiVersion.current(),
(nodeFeature) -> true
)
);
assertThat(e1.getMessage(), equalTo("must specify at least one document in [docs]"));
List<String> stringList = new ArrayList<>();
stringList.add("test");
pipelineConfig.put("processors", processors);
requestContent.put(Fields.DOCS, stringList);
requestContent.put(Fields.PIPELINE, pipelineConfig);
Exception e2 = expectThrows(
IllegalArgumentException.class,
() -> SimulatePipelineRequest.parse(
projectId,
requestContent,
false,
ingestService,
RestApiVersion.current(),
(nodeFeature) -> true
)
);
assertThat(e2.getMessage(), equalTo("malformed [docs] section, should include an inner object"));
docs.add(new HashMap<>());
requestContent.put(Fields.DOCS, docs);
requestContent.put(Fields.PIPELINE, pipelineConfig);
Exception e3 = expectThrows(
ElasticsearchParseException.class,
() -> SimulatePipelineRequest.parse(
projectId,
requestContent,
false,
ingestService,
RestApiVersion.current(),
(nodeFeature) -> true
)
);
assertThat(e3.getMessage(), containsString("required property is missing"));
}
public void testIngestPipelineWithDocumentsWithType() throws Exception {
int numDocs = randomIntBetween(1, 10);
Map<String, Object> requestContent = new HashMap<>();
List<Map<String, Object>> docs = new ArrayList<>();
List<Map<String, Object>> expectedDocs = new ArrayList<>();
requestContent.put(Fields.DOCS, docs);
for (int i = 0; i < numDocs; i++) {
Map<String, Object> doc = new HashMap<>();
Map<String, Object> expectedDoc = new HashMap<>();
List<Metadata> fields = Arrays.asList(INDEX, TYPE, ID, ROUTING, VERSION, VERSION_TYPE);
for (Metadata field : fields) {
if (field == VERSION) {
Long value = randomLong();
doc.put(field.getFieldName(), value);
expectedDoc.put(field.getFieldName(), value);
} else if (field == VERSION_TYPE) {
String value = VersionType.toString(randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE));
doc.put(field.getFieldName(), value);
expectedDoc.put(field.getFieldName(), value);
} else if (field == TYPE) {
String value = randomAlphaOfLengthBetween(1, 10);
doc.put(field.getFieldName(), value);
expectedDoc.put(field.getFieldName(), value);
} else {
if (randomBoolean()) {
String value = randomAlphaOfLengthBetween(1, 10);
doc.put(field.getFieldName(), value);
expectedDoc.put(field.getFieldName(), value);
} else {
Integer value = randomIntBetween(1, 1000000);
doc.put(field.getFieldName(), value);
expectedDoc.put(field.getFieldName(), String.valueOf(value));
}
}
}
String fieldName = randomAlphaOfLengthBetween(1, 10);
String fieldValue = randomAlphaOfLengthBetween(1, 10);
doc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue));
docs.add(doc);
expectedDoc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue));
expectedDocs.add(expectedDoc);
}
Map<String, Object> pipelineConfig = new HashMap<>();
List<Map<String, Object>> processors = new ArrayList<>();
int numProcessors = randomIntBetween(1, 10);
for (int i = 0; i < numProcessors; i++) {
Map<String, Object> processorConfig = new HashMap<>();
List<Map<String, Object>> onFailureProcessors = new ArrayList<>();
int numOnFailureProcessors = randomIntBetween(0, 1);
for (int j = 0; j < numOnFailureProcessors; j++) {
onFailureProcessors.add(Collections.singletonMap("mock_processor", Collections.emptyMap()));
}
if (numOnFailureProcessors > 0) {
processorConfig.put("on_failure", onFailureProcessors);
}
processors.add(Collections.singletonMap("mock_processor", processorConfig));
}
pipelineConfig.put("processors", processors);
List<Map<String, Object>> onFailureProcessors = new ArrayList<>();
int numOnFailureProcessors = randomIntBetween(0, 1);
for (int i = 0; i < numOnFailureProcessors; i++) {
onFailureProcessors.add(Collections.singletonMap("mock_processor", Collections.emptyMap()));
}
if (numOnFailureProcessors > 0) {
pipelineConfig.put("on_failure", onFailureProcessors);
}
requestContent.put(Fields.PIPELINE, pipelineConfig);
var projectId = randomProjectIdOrDefault();
SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parse(
projectId,
requestContent,
false,
ingestService,
RestApiVersion.V_8,
(nodeFeature) -> true
);
assertThat(actualRequest.verbose(), equalTo(false));
assertThat(actualRequest.documents().size(), equalTo(numDocs));
Iterator<Map<String, Object>> expectedDocsIterator = expectedDocs.iterator();
for (IngestDocument ingestDocument : actualRequest.documents()) {
Map<String, Object> expectedDocument = expectedDocsIterator.next();
org.elasticsearch.script.Metadata metadata = ingestDocument.getMetadata();
assertThat(metadata.get(INDEX.getFieldName()), equalTo(expectedDocument.get(INDEX.getFieldName())));
assertThat(metadata.get(ID.getFieldName()), equalTo(expectedDocument.get(ID.getFieldName())));
assertThat(metadata.get(ROUTING.getFieldName()), equalTo(expectedDocument.get(ROUTING.getFieldName())));
assertThat(metadata.get(VERSION.getFieldName()), equalTo(expectedDocument.get(VERSION.getFieldName())));
assertThat(metadata.get(VERSION_TYPE.getFieldName()), equalTo(expectedDocument.get(VERSION_TYPE.getFieldName())));
assertThat(ingestDocument.getSource(), equalTo(expectedDocument.get(Fields.SOURCE)));
}
assertThat(actualRequest.pipeline().getId(), equalTo(SIMULATED_PIPELINE_ID));
assertThat(actualRequest.pipeline().getDescription(), nullValue());
assertThat(actualRequest.pipeline().getProcessors().size(), equalTo(numProcessors));
assertCriticalWarnings("[types removal] specifying _type in pipeline simulation requests is deprecated");
}
}
| SimulatePipelineRequestParsingTests |
java | quarkusio__quarkus | integration-tests/kubernetes/quarkus-standard-way-kafka/src/test/java/io/quarkus/it/kubernetes/kafka/BasicOpenshiftDeploymentConfigTest.java | {
"start": 581,
"end": 3054
} | class ____ {
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withApplicationRoot((jar) -> jar.addClasses(DummyProcessor.class))
.setApplicationName("basic-openshift")
.setApplicationVersion("0.1-SNAPSHOT")
.withConfigurationResource("basic-openshift.properties")
.overrideConfigKey("quarkus.openshift.deployment-kind", "deployment-config");
@ProdBuildResults
private ProdModeTestResults prodModeTestResults;
@SuppressWarnings("unchecked")
@Test
public void assertGeneratedResources() throws IOException {
Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir)
.isDirectoryContaining(p -> p.getFileName().endsWith("openshift.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("openshift.yml"))
.satisfies(p -> assertThat(p.toFile().listFiles()).hasSize(2));
List<HasMetadata> openshiftList = DeserializationUtil
.deserializeAsList(kubernetesDir.resolve("openshift.yml"));
assertThat(openshiftList).filteredOn(h -> "DeploymentConfig".equals(h.getKind())).singleElement().satisfies(h -> {
assertThat(h.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("basic-openshift");
assertThat(m.getLabels().get("app.openshift.io/runtime")).isEqualTo("quarkus");
assertThat(m.getNamespace()).isNull();
});
AbstractObjectAssert<?, ?> specAssert = assertThat(h).extracting("spec");
specAssert.extracting("replicas").isEqualTo(1);
specAssert.extracting("selector").isInstanceOfSatisfying(Map.class, selectorsMap -> {
assertThat(selectorsMap).containsOnly(entry("app.kubernetes.io/name", "basic-openshift"),
entry("app.kubernetes.io/version", "0.1-SNAPSHOT"));
});
specAssert.extracting("template").isInstanceOfSatisfying(PodTemplateSpec.class, templateMap -> {
assertThat(templateMap.getSpec()).satisfies(podSpec -> {
assertThat(podSpec.getContainers()).singleElement().satisfies(container -> {
assertThat(container.getPorts()).isNullOrEmpty();
});
});
});
});
}
}
| BasicOpenshiftDeploymentConfigTest |
java | elastic__elasticsearch | libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyUtilsTests.java | {
"start": 2011,
"end": 15873
} | class ____ extends ESTestCase {
public void testCreatePluginPolicyWithPatch() {
var policyPatch = """
versions:
- 9.0.0
- 9.0.0-SNAPSHOT
policy:
entitlement-module-name:
- load_native_libraries
entitlement-module-name-2:
- set_https_connection_properties
""";
var base64EncodedPolicy = new String(
Base64.getEncoder().encode(policyPatch.getBytes(StandardCharsets.UTF_8)),
StandardCharsets.UTF_8
);
final Policy expectedPolicy = new Policy(
"test-plugin",
List.of(
new Scope("entitlement-module-name", List.of(new LoadNativeLibrariesEntitlement())),
new Scope("entitlement-module-name-2", List.of(new SetHttpsConnectionPropertiesEntitlement()))
)
);
var policy = PolicyUtils.parseEncodedPolicyIfExists(
base64EncodedPolicy,
"9.0.0",
true,
"test-plugin",
Set.of("entitlement-module-name", "entitlement-module-name-2")
);
assertThat(policy, equalTo(expectedPolicy));
}
public void testCreatePluginPolicyWithPatchAnyVersion() {
var policyPatch = """
policy:
entitlement-module-name:
- load_native_libraries
entitlement-module-name-2:
- set_https_connection_properties
""";
var base64EncodedPolicy = new String(
Base64.getEncoder().encode(policyPatch.getBytes(StandardCharsets.UTF_8)),
StandardCharsets.UTF_8
);
final Policy expectedPolicy = new Policy(
"test-plugin",
List.of(
new Scope("entitlement-module-name", List.of(new LoadNativeLibrariesEntitlement())),
new Scope("entitlement-module-name-2", List.of(new SetHttpsConnectionPropertiesEntitlement()))
)
);
var policy = PolicyUtils.parseEncodedPolicyIfExists(
base64EncodedPolicy,
"abcdef",
true,
"test-plugin",
Set.of("entitlement-module-name", "entitlement-module-name-2")
);
assertThat(policy, equalTo(expectedPolicy));
}
public void testNoPatchWithVersionMismatch() {
var policyPatch = """
versions:
- 9.0.0
- 9.0.0-SNAPSHOT
policy:
entitlement-module-name:
- load_native_libraries
entitlement-module-name-2:
- set_https_connection_properties
""";
var base64EncodedPolicy = new String(
Base64.getEncoder().encode(policyPatch.getBytes(StandardCharsets.UTF_8)),
StandardCharsets.UTF_8
);
var policy = PolicyUtils.parseEncodedPolicyIfExists(
base64EncodedPolicy,
"9.1.0",
true,
"test-plugin",
Set.of("entitlement-module-name", "entitlement-module-name-2")
);
assertThat(policy, nullValue());
}
public void testNoPatchWithValidationError() {
// Nonexistent module names
var policyPatch = """
versions:
- 9.0.0
- 9.0.0-SNAPSHOT
policy:
entitlement-module-name:
- load_native_libraries
entitlement-module-name-2:
- set_https_connection_properties
""";
var base64EncodedPolicy = new String(
Base64.getEncoder().encode(policyPatch.getBytes(StandardCharsets.UTF_8)),
StandardCharsets.UTF_8
);
assertThrows(
IllegalStateException.class,
() -> PolicyUtils.parseEncodedPolicyIfExists(base64EncodedPolicy, "9.0.0", true, "test-plugin", Set.of())
);
}
public void testNoPatchWithParsingError() {
// no <version> or <policy> field
var policyPatch = """
entitlement-module-name:
- load_native_libraries
entitlement-module-name-2:
- set_https_connection_properties
""";
var base64EncodedPolicy = new String(
Base64.getEncoder().encode(policyPatch.getBytes(StandardCharsets.UTF_8)),
StandardCharsets.UTF_8
);
assertThrows(
IllegalStateException.class,
() -> PolicyUtils.parseEncodedPolicyIfExists(base64EncodedPolicy, "9.0.0", true, "test-plugin", Set.of())
);
}
public void testMergeScopes() {
var originalPolicy = List.of(
new Scope("module1", List.of(new LoadNativeLibrariesEntitlement())),
new Scope("module2", List.of(new ManageThreadsEntitlement())),
new Scope("module3", List.of(new InboundNetworkEntitlement()))
);
var patchPolicy = List.of(
new Scope("module2", List.of(new ManageThreadsEntitlement())),
new Scope("module3", List.of(new OutboundNetworkEntitlement())),
new Scope("module4", List.of(new WriteAllSystemPropertiesEntitlement()))
);
var resultPolicy = PolicyUtils.mergeScopes(originalPolicy, patchPolicy);
assertThat(
resultPolicy,
containsInAnyOrder(
equalTo(new Scope("module1", List.of(new LoadNativeLibrariesEntitlement()))),
equalTo(new Scope("module2", List.of(new ManageThreadsEntitlement()))),
both(transformedMatch(Scope::moduleName, equalTo("module3"))).and(
transformedMatch(
Scope::entitlements,
containsInAnyOrder(new InboundNetworkEntitlement(), new OutboundNetworkEntitlement())
)
),
equalTo(new Scope("module4", List.of(new WriteAllSystemPropertiesEntitlement())))
)
);
}
public void testMergeSameFlagEntitlement() {
var e1 = new InboundNetworkEntitlement();
var e2 = new InboundNetworkEntitlement();
assertThat(PolicyUtils.mergeEntitlement(e1, e2), equalTo(new InboundNetworkEntitlement()));
}
public void testMergeFilesEntitlement() {
var e1 = new FilesEntitlement(
List.of(
FilesEntitlement.FileData.ofPath(Path.of("/a/b"), FilesEntitlement.Mode.READ),
FilesEntitlement.FileData.ofPath(Path.of("/a/c"), FilesEntitlement.Mode.READ_WRITE),
FilesEntitlement.FileData.ofRelativePath(Path.of("c/d"), PathLookup.BaseDir.CONFIG, FilesEntitlement.Mode.READ)
)
);
var e2 = new FilesEntitlement(
List.of(
FilesEntitlement.FileData.ofPath(Path.of("/a/b"), FilesEntitlement.Mode.READ), // identical
FilesEntitlement.FileData.ofPath(Path.of("/a/c"), FilesEntitlement.Mode.READ), // different mode
FilesEntitlement.FileData.ofPath(Path.of("/c/d"), FilesEntitlement.Mode.READ) // different type
)
);
var merged = PolicyUtils.mergeEntitlement(e1, e2);
assertThat(
merged,
transformedMatch(
x -> ((FilesEntitlement) x).filesData(),
containsInAnyOrder(
FilesEntitlement.FileData.ofPath(Path.of("/a/b"), FilesEntitlement.Mode.READ),
FilesEntitlement.FileData.ofPath(Path.of("/a/c"), FilesEntitlement.Mode.READ),
FilesEntitlement.FileData.ofPath(Path.of("/a/c"), FilesEntitlement.Mode.READ_WRITE),
FilesEntitlement.FileData.ofRelativePath(Path.of("c/d"), PathLookup.BaseDir.CONFIG, FilesEntitlement.Mode.READ),
FilesEntitlement.FileData.ofPath(Path.of("/c/d"), FilesEntitlement.Mode.READ)
)
)
);
}
public void testMergeWritePropertyEntitlement() {
var e1 = new WriteSystemPropertiesEntitlement(List.of("a", "b", "c"));
var e2 = new WriteSystemPropertiesEntitlement(List.of("b", "c", "d"));
var merged = PolicyUtils.mergeEntitlement(e1, e2);
assertThat(
merged,
transformedMatch(x -> ((WriteSystemPropertiesEntitlement) x).properties(), containsInAnyOrder("a", "b", "c", "d"))
);
}
public void testMergeEntitlements() {
List<Entitlement> a = List.of(
new InboundNetworkEntitlement(),
new OutboundNetworkEntitlement(),
new FilesEntitlement(
List.of(
FilesEntitlement.FileData.ofPath(Path.of("/a/b"), FilesEntitlement.Mode.READ),
FilesEntitlement.FileData.ofPath(Path.of("/a/c"), FilesEntitlement.Mode.READ_WRITE)
)
)
);
List<Entitlement> b = List.of(
new InboundNetworkEntitlement(),
new LoadNativeLibrariesEntitlement(),
new FilesEntitlement(List.of()),
new WriteSystemPropertiesEntitlement(List.of("a"))
);
var merged = PolicyUtils.mergeEntitlements(a, b);
assertThat(
merged,
containsInAnyOrder(
new InboundNetworkEntitlement(),
new OutboundNetworkEntitlement(),
new LoadNativeLibrariesEntitlement(),
new FilesEntitlement(
List.of(
FilesEntitlement.FileData.ofPath(Path.of("/a/b"), FilesEntitlement.Mode.READ),
FilesEntitlement.FileData.ofPath(Path.of("/a/c"), FilesEntitlement.Mode.READ_WRITE)
)
),
new WriteSystemPropertiesEntitlement(List.of("a"))
)
);
}
/** Test that we can parse the set of entitlements correctly for a simple policy */
public void testFormatSimplePolicy() {
var pluginPolicy = new Policy(
"test-plugin",
List.of(new Scope("module1", List.of(new WriteSystemPropertiesEntitlement(List.of("property1", "property2")))))
);
Set<String> actual = PolicyUtils.getEntitlementsDescriptions(pluginPolicy);
assertThat(actual, containsInAnyOrder("write_system_properties [property1]", "write_system_properties [property2]"));
}
/** Test that we can format the set of entitlements correctly for a complex policy */
public void testFormatPolicyWithMultipleScopes() {
var pluginPolicy = new Policy(
"test-plugin",
List.of(
new Scope("module1", List.of(new CreateClassLoaderEntitlement())),
new Scope("module2", List.of(new CreateClassLoaderEntitlement(), new OutboundNetworkEntitlement())),
new Scope("module3", List.of(new InboundNetworkEntitlement(), new OutboundNetworkEntitlement()))
)
);
Set<String> actual = PolicyUtils.getEntitlementsDescriptions(pluginPolicy);
assertThat(actual, containsInAnyOrder("create_class_loader", "outbound_network", "inbound_network"));
}
/** Test that we can format some simple files entitlement properly */
public void testFormatFilesEntitlement() {
var pathAB = Path.of("/a/b");
var pathCD = Path.of("c/d");
var policy = new Policy(
"test-plugin",
List.of(
new Scope(
"module1",
List.of(
new FilesEntitlement(
List.of(
FilesEntitlement.FileData.ofPath(pathAB, FilesEntitlement.Mode.READ_WRITE),
FilesEntitlement.FileData.ofRelativePath(pathCD, PathLookup.BaseDir.DATA, FilesEntitlement.Mode.READ)
)
)
)
),
new Scope(
"module2",
List.of(
new FilesEntitlement(
List.of(
FilesEntitlement.FileData.ofPath(pathAB, FilesEntitlement.Mode.READ_WRITE),
FilesEntitlement.FileData.ofPathSetting("setting", PathLookup.BaseDir.DATA, FilesEntitlement.Mode.READ)
)
)
)
)
)
);
Set<String> actual = PolicyUtils.getEntitlementsDescriptions(policy);
var pathABString = pathAB.toAbsolutePath().toString();
var pathCDString = SEPARATOR + pathCD.toString();
var pathSettingString = SEPARATOR + "<setting>";
assertThat(
actual,
containsInAnyOrder(
"files [READ_WRITE] " + pathABString,
"files [READ] <DATA>" + pathCDString,
"files [READ] <DATA>" + pathSettingString
)
);
}
/** Test that we can format some simple files entitlement properly */
public void testFormatWriteSystemPropertiesEntitlement() {
var policy = new Policy(
"test-plugin",
List.of(
new Scope("module1", List.of(new WriteSystemPropertiesEntitlement(List.of("property1", "property2")))),
new Scope("module2", List.of(new WriteSystemPropertiesEntitlement(List.of("property2", "property3"))))
)
);
Set<String> actual = PolicyUtils.getEntitlementsDescriptions(policy);
assertThat(
actual,
containsInAnyOrder(
"write_system_properties [property1]",
"write_system_properties [property2]",
"write_system_properties [property3]"
)
);
}
}
| PolicyUtilsTests |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 64857,
"end": 66345
} | class ____ extends YamlDeserializerBase<BeanConstructorDefinition> {
public BeanConstructorDefinitionDeserializer() {
super(BeanConstructorDefinition.class);
}
@Override
protected BeanConstructorDefinition newInstance() {
return new BeanConstructorDefinition();
}
@Override
protected boolean setProperty(BeanConstructorDefinition target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "index": {
String val = asText(node);
target.setIndex(java.lang.Integer.valueOf(val));
break;
}
case "value": {
String val = asText(node);
target.setValue(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
types = org.apache.camel.model.app.BeanConstructorsDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
properties = @YamlProperty(name = "constructor", type = "array:org.apache.camel.model.app.BeanConstructorDefinition")
)
public static | BeanConstructorDefinitionDeserializer |
java | hibernate__hibernate-orm | hibernate-spatial/src/test/java/org/hibernate/spatial/testing/SpatialTest.java | {
"start": 2717,
"end": 3296
} | class ____ {
@Id
private Long id;
private String name;
private Point location;
//Getters and setters are omitted for brevity
//end::spatial-types-mapping-example[]
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Point getLocation() {
return location;
}
public void setLocation(Point location) {
this.location = location;
}
//tag::spatial-types-mapping-example[]
}
//end::spatial-types-mapping-example[]
}
| Event |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/support/ActiveProfilesUtilsTests.java | {
"start": 10223,
"end": 10508
} | class ____ implements ActiveProfilesResolver {
@SuppressWarnings("unused")
NoDefaultConstructorActiveProfilesResolver(Object argument) {
}
@Override
public String[] resolve(Class<?> testClass) {
return null;
}
}
private static | NoDefaultConstructorActiveProfilesResolver |
java | apache__camel | components/camel-kubernetes/src/generated/java/org/apache/camel/component/kubernetes/namespaces/KubernetesNamespacesComponentConfigurer.java | {
"start": 748,
"end": 3275
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
KubernetesNamespacesComponent target = (KubernetesNamespacesComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "kubernetesclient":
case "kubernetesClient": target.setKubernetesClient(property(camelContext, io.fabric8.kubernetes.client.KubernetesClient.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public String[] getAutowiredNames() {
return new String[]{"kubernetesClient"};
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "kubernetesclient":
case "kubernetesClient": return io.fabric8.kubernetes.client.KubernetesClient.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
KubernetesNamespacesComponent target = (KubernetesNamespacesComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "kubernetesclient":
case "kubernetesClient": return target.getKubernetesClient();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| KubernetesNamespacesComponentConfigurer |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnMissingBeanTests.java | {
"start": 28578,
"end": 28795
} | class ____ {
@Bean
String bar() {
return "bar";
}
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnMissingBean(name = "foo", search = SearchStrategy.CURRENT)
static | HierarchyConsideredConfiguration |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/db/jdbc/JdbcDatabaseManager.java | {
"start": 3025,
"end": 5090
} | class ____ extends AbstractDatabaseManager.AbstractFactoryData {
private final ConnectionSource connectionSource;
private final String tableName;
private final ColumnConfig[] columnConfigs;
final ColumnMapping[] columnMappings;
private final boolean immediateFail;
private final boolean retry;
private final long reconnectIntervalMillis;
private final boolean truncateStrings;
protected FactoryData(
final int bufferSize,
final Layout<? extends Serializable> layout,
final ConnectionSource connectionSource,
final String tableName,
final ColumnConfig[] columnConfigs,
final ColumnMapping[] columnMappings,
final boolean immediateFail,
final long reconnectIntervalMillis,
final boolean truncateStrings) {
super(bufferSize, layout);
this.connectionSource = connectionSource;
this.tableName = tableName;
this.columnConfigs = columnConfigs;
this.columnMappings = columnMappings;
this.immediateFail = immediateFail;
this.retry = reconnectIntervalMillis > 0;
this.reconnectIntervalMillis = reconnectIntervalMillis;
this.truncateStrings = truncateStrings;
}
@Override
public String toString() {
return String.format(
"FactoryData [connectionSource=%s, tableName=%s, columnConfigs=%s, columnMappings=%s, immediateFail=%s, retry=%s, reconnectIntervalMillis=%s, truncateStrings=%s]",
connectionSource,
tableName,
Arrays.toString(columnConfigs),
Arrays.toString(columnMappings),
immediateFail,
retry,
reconnectIntervalMillis,
truncateStrings);
}
}
/**
* Creates managers.
*/
private static final | FactoryData |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/functions/source/legacy/RichSourceFunction.java | {
"start": 1335,
"end": 2263
} | class ____ useful when implementing parallel sources where different parallel subtasks need
* to perform different work. Typical patterns for that are:
*
* <ul>
* <li>Use {@link #getRuntimeContext()} to obtain the runtime context.
* <li>Use the number of parallel subtasks in {@link RuntimeContext#getTaskInfo()} to determine
* the current parallelism. It is strongly encouraged to use this method, rather than
* hard-wiring the parallelism, because the configured parallelism may change depending on
* program configuration. The parallelism may also change after recovering failures, when
* fewer than desired parallel worker as available.
* <li>Use the index of task in {@link RuntimeContext#getTaskInfo()}} to determine which subtask
* the current instance of the function executes.
* </ul>
*
* @param <OUT> The type of the records produced by this source.
* @deprecated This | is |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/JnaCleanerThreadsFilter.java | {
"start": 708,
"end": 874
} | class ____ implements ThreadFilter {
@Override
public boolean reject(Thread t) {
return t.getName().equals("JNA Cleaner");
}
}
| JnaCleanerThreadsFilter |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-jetty/src/main/java/smoketest/jetty/ExampleServletContextListener.java | {
"start": 878,
"end": 1193
} | class ____ implements ServletContextListener {
@Override
public void contextInitialized(ServletContextEvent sce) {
System.out.println("*** contextInitialized");
}
@Override
public void contextDestroyed(ServletContextEvent sce) {
System.out.println("*** contextDestroyed");
}
}
| ExampleServletContextListener |
java | grpc__grpc-java | util/src/test/java/io/grpc/util/RandomSubsettingLoadBalancerTest.java | {
"start": 10407,
"end": 12030
} | class ____ {
private final List<EquivalentAddressGroup> servers;
private final Map<List<EquivalentAddressGroup>, Subchannel> subchannels;
private final Map<Subchannel, LoadBalancer.SubchannelStateListener> subchannelStateListeners;
BackendDetails(List<EquivalentAddressGroup> servers,
Map<List<EquivalentAddressGroup>, Subchannel> subchannels) {
this.servers = servers;
this.subchannels = subchannels;
this.subchannelStateListeners = Maps.newLinkedHashMap();
when(mockHelper.createSubchannel(any(LoadBalancer.CreateSubchannelArgs.class))).then(
new Answer<Subchannel>() {
@Override
public Subchannel answer(InvocationOnMock invocation) throws Throwable {
CreateSubchannelArgs args = (CreateSubchannelArgs) invocation.getArguments()[0];
final Subchannel subchannel = backendDetails.subchannels.get(args.getAddresses());
when(subchannel.getAllAddresses()).thenReturn(args.getAddresses());
when(subchannel.getAttributes()).thenReturn(args.getAttributes());
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
subchannelStateListeners.put(subchannel,
(SubchannelStateListener) invocation.getArguments()[0]);
return null;
}
}).when(subchannel).start(any(SubchannelStateListener.class));
return subchannel;
}
});
}
}
private static | BackendDetails |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/xml_external_ref/ShortNameTest.java | {
"start": 1145,
"end": 2100
} | class ____ {
@Test
void getStatementByShortName() throws Exception {
Configuration configuration = getConfiguration();
// statement can be referenced by its short name.
MappedStatement selectPet = configuration.getMappedStatement("selectPet");
assertNotNull(selectPet);
}
@Test
void ambiguousShortNameShouldFail() throws Exception {
Configuration configuration = getConfiguration();
// ambiguous short name should throw an exception.
Assertions.assertThrows(IllegalArgumentException.class, () -> configuration.getMappedStatement("select"));
}
private Configuration getConfiguration() throws IOException {
try (Reader configReader = Resources
.getResourceAsReader("org/apache/ibatis/submitted/xml_external_ref/MapperConfig.xml")) {
SqlSessionFactory sqlSessionFactory = new SqlSessionFactoryBuilder().build(configReader);
return sqlSessionFactory.getConfiguration();
}
}
}
| ShortNameTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java | {
"start": 1018,
"end": 3721
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(ServerSocketUtil.class);
private static Random rand = new Random();
/**
* Port scan & allocate is how most other apps find ports
*
* @param port given port
* @param retries number of retries
* @return
* @throws IOException
*/
public static int getPort(int port, int retries) throws IOException {
int tryPort = port;
int tries = 0;
while (true) {
if (tries > 0 || tryPort == 0) {
tryPort = port + rand.nextInt(65535 - port);
}
if (tryPort == 0) {
continue;
}
try (ServerSocket s = new ServerSocket(tryPort, 50,
InetAddress.getLoopbackAddress())) {
LOG.info("Using port " + tryPort);
return tryPort;
} catch (IOException e) {
tries++;
if (tries >= retries) {
LOG.info("Port is already in use; giving up");
throw e;
} else {
LOG.info("Port is already in use; trying again");
}
}
}
}
/**
* Check whether port is available or not.
*
* @param port given port
* @return
*/
private static boolean isPortAvailable(int port) {
try (ServerSocket s = new ServerSocket(port)) {
return true;
} catch (IOException e) {
return false;
}
}
/**
* Wait till the port available.
*
* @param port given port
* @param retries number of retries for given port
* @return
* @throws InterruptedException
* @throws IOException
*/
public static int waitForPort(int port, int retries)
throws InterruptedException, IOException {
int tries = 0;
while (true) {
if (isPortAvailable(port)) {
return port;
} else {
tries++;
if (tries >= retries) {
throw new IOException(
"Port is already in use; giving up after " + tries + " times.");
}
Thread.sleep(1000);
}
}
}
/**
* Find the specified number of unique ports available.
* The ports are all closed afterwards,
* so other network services started may grab those same ports.
*
* @param numPorts number of required port nubmers
* @return array of available port numbers
* @throws IOException
*/
public static int[] getPorts(int numPorts) throws IOException {
ServerSocket[] sockets = new ServerSocket[numPorts];
int[] ports = new int[numPorts];
for (int i = 0; i < numPorts; i++) {
ServerSocket sock = new ServerSocket(0);
sockets[i] = sock;
ports[i] = sock.getLocalPort();
}
for (ServerSocket sock : sockets) {
sock.close();
}
return ports;
}
}
| ServerSocketUtil |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.