language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | netty__netty | handler/src/main/java/io/netty/handler/ipfilter/IpSubnetFilterRuleComparator.java | {
"start": 798,
"end": 1199
} | class ____ implements Comparator<Object> {
static final IpSubnetFilterRuleComparator INSTANCE = new IpSubnetFilterRuleComparator();
private IpSubnetFilterRuleComparator() {
// Prevent outside initialization
}
@Override
public int compare(Object o1, Object o2) {
return ((IpSubnetFilterRule) o1).compareTo((InetSocketAddress) o2);
}
}
| IpSubnetFilterRuleComparator |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java | {
"start": 1394,
"end": 15165
} | class ____ extends MapperServiceTestCase {
public void testConfigureIndex() {
Settings s = getSettings();
assertSame(IndexMode.TIME_SERIES, IndexSettings.MODE.get(s));
}
public void testPartitioned() {
Settings s = Settings.builder()
.put(getSettings())
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 4)
.put(IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING.getKey(), 2)
.build();
IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", s);
Exception e = expectThrows(IllegalArgumentException.class, () -> new IndexSettings(metadata, Settings.EMPTY));
assertThat(e.getMessage(), equalTo("[index.mode=time_series] is incompatible with [index.routing_partition_size]"));
}
public void testSortField() {
Settings s = Settings.builder().put(getSettings()).put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "a").build();
IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", s);
Exception e = expectThrows(IllegalArgumentException.class, () -> new IndexSettings(metadata, Settings.EMPTY));
assertThat(e.getMessage(), equalTo("[index.mode=time_series] is incompatible with [index.sort.field]"));
}
public void testSortMode() {
Settings s = Settings.builder().put(getSettings()).put(IndexSortConfig.INDEX_SORT_MISSING_SETTING.getKey(), "_last").build();
IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", s);
Exception e = expectThrows(IllegalArgumentException.class, () -> new IndexSettings(metadata, Settings.EMPTY));
assertThat(e.getMessage(), equalTo("[index.mode=time_series] is incompatible with [index.sort.missing]"));
}
public void testSortOrder() {
Settings s = Settings.builder().put(getSettings()).put(IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), "desc").build();
IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", s);
Exception e = expectThrows(IllegalArgumentException.class, () -> new IndexSettings(metadata, Settings.EMPTY));
assertThat(e.getMessage(), equalTo("[index.mode=time_series] is incompatible with [index.sort.order]"));
}
public void testWithoutRoutingPath() {
Settings s = Settings.builder().put(IndexSettings.MODE.getKey(), "time_series").build();
Exception e = expectThrows(
IllegalArgumentException.class,
() -> new IndexSettings(IndexSettingsTests.newIndexMeta("test", s), Settings.EMPTY)
);
assertThat(e.getMessage(), containsString("[index.mode=time_series] requires a non-empty [index.routing_path]"));
}
public void testWithEmptyRoutingPath() {
Settings s = getSettings("");
Exception e = expectThrows(
IllegalArgumentException.class,
() -> new IndexSettings(IndexSettingsTests.newIndexMeta("test", s), Settings.EMPTY)
);
assertThat(e.getMessage(), containsString("[index.mode=time_series] requires a non-empty [index.routing_path]"));
}
public void testWithoutStartTime() {
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES)
.put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo")
.build();
IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", settings);
IndexSettings indexSettings = new IndexSettings(metadata, Settings.EMPTY);
assertThat(indexSettings.getTimestampBounds().startTime(), CoreMatchers.equalTo(DateUtils.MAX_MILLIS_BEFORE_MINUS_9999));
assertThat(indexSettings.getTimestampBounds().endTime(), CoreMatchers.equalTo(DateUtils.MAX_MILLIS_BEFORE_9999));
}
public void testWithoutEndTime() {
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES)
.put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo")
.put(TIME_SERIES_START_TIME.getKey(), "1970-01-01T00:00:00Z")
.build();
IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", settings);
IndexSettings indexSettings = new IndexSettings(metadata, Settings.EMPTY);
assertThat(indexSettings.getTimestampBounds().startTime(), CoreMatchers.equalTo(0L));
assertThat(indexSettings.getTimestampBounds().endTime(), CoreMatchers.equalTo(DateUtils.MAX_MILLIS_BEFORE_9999));
}
public void testSetDefaultTimeRangeValue() {
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES)
.put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo")
.put(TIME_SERIES_START_TIME.getKey(), Instant.ofEpochMilli(DateUtils.MAX_MILLIS_BEFORE_MINUS_9999).toString())
.put(TIME_SERIES_END_TIME.getKey(), Instant.ofEpochMilli(DateUtils.MAX_MILLIS_BEFORE_9999).toString())
.build();
IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", settings);
IndexSettings indexSettings = new IndexSettings(metadata, Settings.EMPTY);
assertThat(indexSettings.getTimestampBounds().startTime(), CoreMatchers.equalTo(DateUtils.MAX_MILLIS_BEFORE_MINUS_9999));
assertThat(indexSettings.getTimestampBounds().endTime(), CoreMatchers.equalTo(DateUtils.MAX_MILLIS_BEFORE_9999));
}
public void testRequiredRouting() {
Settings s = getSettings();
var mapperService = new TestMapperServiceBuilder().settings(s).applyDefaultMapping(false).build();
Exception e = expectThrows(
IllegalArgumentException.class,
() -> withMapping(mapperService, topMapping(b -> b.startObject("_routing").field("required", true).endObject()))
);
assertThat(e.getMessage(), equalTo("routing is forbidden on CRUD operations that target indices in [index.mode=time_series]"));
}
public void testValidateAlias() {
Settings s = getSettings();
IndexSettings.MODE.get(s).validateAlias(null, null); // Doesn't throw exception
}
public void testValidateAliasWithIndexRouting() {
Settings s = getSettings();
Exception e = expectThrows(IllegalArgumentException.class, () -> IndexSettings.MODE.get(s).validateAlias("r", null));
assertThat(e.getMessage(), equalTo("routing is forbidden on CRUD operations that target indices in [index.mode=time_series]"));
}
public void testValidateAliasWithSearchRouting() {
Settings s = getSettings();
Exception e = expectThrows(IllegalArgumentException.class, () -> IndexSettings.MODE.get(s).validateAlias(null, "r"));
assertThat(e.getMessage(), equalTo("routing is forbidden on CRUD operations that target indices in [index.mode=time_series]"));
}
public void testRoutingPathMatchesObject() throws IOException {
Settings s = getSettings("dim.o*");
createMapperService(s, mapping(b -> {
b.startObject("dim").startObject("properties");
{
b.startObject("o").startObject("properties");
b.startObject("inner_dim").field("type", "keyword").field("time_series_dimension", true).endObject();
b.endObject().endObject();
}
b.startObject("dim").field("type", "keyword").field("time_series_dimension", true).endObject();
b.endObject().endObject();
}));
}
public void testRoutingPathEqualsObjectNameError() {
Settings s = getSettings("dim.o");
Exception e = expectThrows(IllegalArgumentException.class, () -> createMapperService(s, mapping(b -> {
b.startObject("dim").startObject("properties");
{
b.startObject("o").startObject("properties");
b.startObject("inner_dim").field("type", "keyword").field("time_series_dimension", true).endObject();
b.endObject().endObject();
}
b.startObject("dim").field("type", "keyword").field("time_series_dimension", true).endObject();
b.endObject().endObject();
})));
assertThat(e.getMessage(), equalTo("All fields that match routing_path must be flattened fields. [dim.o] was [object]."));
}
public void testRoutingPathMatchesNonDimensionKeyword() {
Settings s = getSettings(randomBoolean() ? "dim.non_dim" : "dim.*");
Exception e = expectThrows(IllegalArgumentException.class, () -> createMapperService(s, mapping(b -> {
b.startObject("dim").startObject("properties");
b.startObject("non_dim").field("type", "keyword").endObject();
b.startObject("dim").field("type", "keyword").field("time_series_dimension", true).endObject();
b.endObject().endObject();
})));
assertThat(
e.getMessage(),
equalTo(
"All fields that match routing_path must be configured with [time_series_dimension: true] "
+ "or flattened fields with a list of dimensions in [time_series_dimensions] and "
+ "without the [script] parameter. [dim.non_dim] was not a dimension."
)
);
}
public void testRoutingPathMatchesNonKeyword() throws IOException {
Settings s = getSettings(randomBoolean() ? "dim.non_kwd" : "dim.*");
createMapperService(s, mapping(b -> {
b.startObject("dim").startObject("properties");
b.startObject("non_kwd").field("type", "integer").field("time_series_dimension", true).endObject();
b.startObject("dim").field("type", "keyword").field("time_series_dimension", true).endObject();
b.endObject().endObject();
}));
}
public void testRoutingPathMatchesScriptedKeyword() {
Settings s = getSettings(randomBoolean() ? "dim.kwd" : "dim.*");
Exception e = expectThrows(IllegalArgumentException.class, () -> createMapperService(s, mapping(b -> {
b.startObject("dim.kwd");
b.field("type", "keyword");
b.field("time_series_dimension", true);
b.startObject("script").field("lang", "mock").field("source", "mock").endObject();
b.endObject();
})));
assertThat(
e.getMessage(),
equalTo(
"All fields that match routing_path must be configured with [time_series_dimension: true] "
+ "or flattened fields with a list of dimensions in [time_series_dimensions] and "
+ "without the [script] parameter. [dim.kwd] has a [script] parameter."
)
);
}
public void testRoutingPathMatchesRuntimeKeyword() {
Settings s = getSettings(randomBoolean() ? "dim.kwd" : "dim.*");
Exception e = expectThrows(
IllegalArgumentException.class,
() -> createMapperService(s, runtimeMapping(b -> b.startObject("dim.kwd").field("type", "keyword").endObject()))
);
assertThat(
e.getMessage(),
equalTo(
"All fields that match routing_path must be configured with [time_series_dimension: true] "
+ "or flattened fields with a list of dimensions in [time_series_dimensions] and "
+ "without the [script] parameter. [dim.kwd] was a runtime [keyword]."
)
);
}
public void testRoutingPathMatchesOnlyKeywordDimensions() throws IOException {
Settings s = getSettings(randomBoolean() ? "dim.metric_type,dim.server,dim.species,dim.uuid" : "dim.*");
createMapperService(s, mapping(b -> {
b.startObject("dim").startObject("properties");
b.startObject("metric_type").field("type", "keyword").field("time_series_dimension", true).endObject();
b.startObject("server").field("type", "keyword").field("time_series_dimension", true).endObject();
b.startObject("species").field("type", "keyword").field("time_series_dimension", true).endObject();
b.startObject("uuid").field("type", "keyword").field("time_series_dimension", true).endObject();
b.endObject().endObject();
})); // doesn't throw
}
@Override
@SuppressWarnings("unchecked")
protected <T> T compileScript(Script script, ScriptContext<T> context) {
if (context.equals(StringFieldScript.CONTEXT) && script.getLang().equals("mock")) {
return (T) new StringFieldScript.Factory() {
@Override
public LeafFactory newFactory(
String fieldName,
Map<String, Object> params,
SearchLookup searchLookup,
OnScriptError onScriptError
) {
throw new UnsupportedOperationException("error should be thrown before getting here");
}
};
}
return super.compileScript(script, context);
}
private Settings getSettings() {
return getSettings(randomAlphaOfLength(5), "2021-04-28T00:00:00Z", "2021-04-29T00:00:00Z");
}
private Settings getSettings(String routingPath) {
return getSettings(routingPath, "2021-04-28T00:00:00Z", "2021-04-29T00:00:00Z");
}
private Settings getSettings(String routingPath, String startTime, String endTime) {
return Settings.builder()
.put(IndexSettings.MODE.getKey(), "time_series")
.put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), routingPath)
.put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime)
.put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime)
.build();
}
}
| TimeSeriesModeTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/fetch/depth/DepthOneBatchTest.java | {
"start": 1995,
"end": 4100
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Agency agency = new Agency( 1, "Test Agency" );
session.persist( agency );
Group group = new Group( 1, "Test Group 1" );
agency.addGroup( group );
session.persist( group );
for ( int i = 1; i < 9; i++ ) {
User user = new User( i, "User " + i );
group.addUser( user );
agency.addUser( user );
session.persist( user );
}
}
);
}
@Test
public void tesGetAgency(SessionFactoryScope scope) {
SQLStatementInspector statementInspector = (SQLStatementInspector) scope.getStatementInspector();
statementInspector.clear();
scope.inTransaction(
session -> {
Agency agency = session.get( Agency.class, 1 );
assertThat( agency ).isNotNull();
List<String> executedQueries = statementInspector.getSqlQueries();
assertThat( executedQueries.size() ).isEqualTo( 5 );
assertThat( executedQueries.get( 0 ).toLowerCase() ).isEqualTo(
"select a1_0.agency_id,a1_0.agency_txt from agency_table a1_0 where a1_0.agency_id=?"
);
assertThat( executedQueries.get( 1 ).toLowerCase() ).isEqualTo(
"select ad1_0.agency_id,ad1_0.agency_detail from agency_detail_table ad1_0 where ad1_0.agency_id=?"
);
assertThat( executedQueries.get( 2 ).toLowerCase() ).isEqualTo(
"select u1_0.agency_id,u1_0.user_id,u1_0.user_name from user_table u1_0 where u1_0.agency_id=?"
);
assertThat( executedQueries.get( 3 ).toLowerCase() ).isEqualTo(
"select g1_0.agency_id,g1_0.group_id,g1_0.group_name from group_table g1_0 where g1_0.agency_id=?"
);
assertThat( executedQueries.get( 4 ).toLowerCase() ).isEqualTo(
"select u1_0.group_id,u1_1.user_id,a1_0.agency_id,a1_0.agency_txt,u1_1.user_name from group_user u1_0 join user_table u1_1 on u1_1.user_id=u1_0.user_id left join agency_table a1_0 on a1_0.agency_id=u1_1.agency_id where u1_0.group_id=?"
);
}
);
}
@Entity(name = "Agency")
@Table(name = "AGENCY_TABLE")
public static | DepthOneBatchTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java | {
"start": 4577,
"end": 17703
} | enum ____ {
TIMESTAMPS, OWNERSHIP, PERMISSION, ACL, XATTR;
public static FileAttribute getAttribute(char symbol) {
for (FileAttribute attribute : values()) {
if (attribute.name().charAt(0) == Character.toUpperCase(symbol)) {
return attribute;
}
}
throw new NoSuchElementException("No attribute for " + symbol);
}
}
private EnumSet<FileAttribute> preserveStatus =
EnumSet.noneOf(FileAttribute.class);
/**
* Checks if the input attribute should be preserved or not
*
* @param attribute - Attribute to check
* @return boolean true if attribute should be preserved, false otherwise
*/
private boolean shouldPreserve(FileAttribute attribute) {
return preserveStatus.contains(attribute);
}
/**
* Add file attributes that need to be preserved. This method may be
* called multiple times to add attributes.
*
* @param fileAttribute - Attribute to add, one at a time
*/
protected void preserve(FileAttribute fileAttribute) {
for (FileAttribute attribute : preserveStatus) {
if (attribute.equals(fileAttribute)) {
return;
}
}
preserveStatus.add(fileAttribute);
}
/**
* The last arg is expected to be a local path, if only one argument is
* given then the destination will be the current directory
* @param args is the list of arguments
* @throws IOException raised on errors performing I/O.
*/
protected void getLocalDestination(LinkedList<String> args)
throws IOException {
String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast();
try {
dst = new PathData(new URI(pathString), getConf());
} catch (URISyntaxException e) {
if (Path.WINDOWS) {
// Unlike URI, PathData knows how to parse Windows drive-letter paths.
dst = new PathData(pathString, getConf());
} else {
throw new IOException("unexpected URISyntaxException", e);
}
}
}
/**
* The last arg is expected to be a remote path, if only one argument is
* given then the destination will be the remote user's directory
* @param args is the list of arguments
* @throws PathIOException if path doesn't exist or matches too many times
*/
protected void getRemoteDestination(LinkedList<String> args)
throws IOException {
if (args.size() < 2) {
dst = new PathData(Path.CUR_DIR, getConf());
} else {
String pathString = args.removeLast();
// if the path is a glob, then it must match one and only one path
PathData[] items = PathData.expandAsGlob(pathString, getConf());
switch (items.length) {
case 0:
throw new PathNotFoundException(pathString);
case 1:
dst = items[0];
break;
default:
throw new PathIOException(pathString, "Too many matches");
}
}
}
@Override
protected void processArguments(LinkedList<PathData> args)
throws IOException {
// if more than one arg, the destination must be a directory
// if one arg, the dst must not exist or must be a directory
if (args.size() > 1) {
if (!dst.exists) {
throw new PathNotFoundException(dst.toString());
}
if (!dst.stat.isDirectory()) {
throw new PathIsNotDirectoryException(dst.toString());
}
} else if (dst.exists) {
if (!dst.stat.isDirectory() && !overwrite) {
LOG.debug("Destination file exists: {}", dst.stat);
throw new PathExistsException(dst.toString());
}
} else if (!dst.parentExists()) {
throw new PathNotFoundException(dst.toString())
.withFullyQualifiedPath(dst.path.toUri().toString());
}
super.processArguments(args);
}
@Override
protected void processPathArgument(PathData src)
throws IOException {
if (src.stat.isDirectory() && src.fs.equals(dst.fs)) {
PathData target = getTargetPath(src);
String srcPath = src.fs.makeQualified(src.path).toString();
String dstPath = dst.fs.makeQualified(target.path).toString();
if (dstPath.equals(srcPath)) {
PathIOException e = new PathIOException(src.toString(),
"are identical");
e.setTargetPath(dstPath.toString());
throw e;
}
// When a path is normalized, all trailing slashes are removed
// except for the root
if(!srcPath.endsWith(Path.SEPARATOR)) {
srcPath += Path.SEPARATOR;
}
if(dstPath.startsWith(srcPath)) {
PathIOException e = new PathIOException(src.toString(),
"is a subdirectory of itself");
e.setTargetPath(target.toString());
throw e;
}
}
super.processPathArgument(src);
}
@Override
protected void processPath(PathData src) throws IOException {
processPath(src, getTargetPath(src));
}
/**
* Called with a source and target destination pair
* @param src for the operation
* @param dst for the operation
* @throws IOException if anything goes wrong
*/
protected void processPath(PathData src, PathData dst) throws IOException {
if (src.stat.isSymlink()) {
// TODO: remove when FileContext is supported, this needs to either
// copy the symlink or deref the symlink
throw new PathOperationException(src.toString());
} else if (src.stat.isFile()) {
copyFileToTarget(src, dst);
} else if (src.stat.isDirectory() && !isRecursive()) {
throw new PathIsDirectoryException(src.toString());
}
}
@Override
protected void recursePath(PathData src) throws IOException {
PathData savedDst = dst;
try {
// modify dst as we descend to append the basename of the
// current directory being processed
dst = getTargetPath(src);
final boolean preserveRawXattrs =
checkPathsForReservedRaw(src.path, dst.path);
if (dst.exists) {
if (!dst.stat.isDirectory()) {
throw new PathIsNotDirectoryException(dst.toString());
}
} else {
if (!dst.fs.mkdirs(dst.path)) {
// too bad we have no clue what failed
PathIOException e = new PathIOException(dst.toString());
e.setOperation("mkdir");
throw e;
}
dst.refreshStatus(); // need to update stat to know it exists now
}
super.recursePath(src);
if (dst.stat.isDirectory()) {
preserveAttributes(src, dst, preserveRawXattrs);
}
} finally {
dst = savedDst;
}
}
protected PathData getTargetPath(PathData src) throws IOException {
PathData target;
// on the first loop, the dst may be directory or a file, so only create
// a child path if dst is a dir; after recursion, it's always a dir
if ((getDepth() > 0) || (dst.exists && dst.stat.isDirectory())) {
target = dst.getPathDataForChild(src);
} else if (dst.representsDirectory()) { // see if path looks like a dir
target = dst.getPathDataForChild(src);
} else {
target = dst;
}
return target;
}
/**
* Copies the source file to the target.
* @param src item to copy
* @param target where to copy the item
* @throws IOException if copy fails
*/
protected void copyFileToTarget(PathData src, PathData target)
throws IOException {
final boolean preserveRawXattrs =
checkPathsForReservedRaw(src.path, target.path);
src.fs.setVerifyChecksum(verifyChecksum);
InputStream in = null;
try {
in = awaitFuture(src.fs.openFile(src.path)
.withFileStatus(src.stat)
.opt(FS_OPTION_OPENFILE_READ_POLICY,
FS_OPTION_OPENFILE_READ_POLICY_WHOLE_FILE)
.build());
copyStreamToTarget(in, target);
preserveAttributes(src, target, preserveRawXattrs);
} finally {
IOUtils.closeStream(in);
}
}
/**
* Check the source and target paths to ensure that they are either both in
* /.reserved/raw or neither in /.reserved/raw. If neither src nor target are
* in /.reserved/raw, then return false, indicating not to preserve raw.*
* xattrs. If both src/target are in /.reserved/raw, then return true,
* indicating raw.* xattrs should be preserved. If only one of src/target is
* in /.reserved/raw then throw an exception.
*
* @param src The source path to check. This should be a fully-qualified
* path, not relative.
* @param target The target path to check. This should be a fully-qualified
* path, not relative.
* @return true if raw.* xattrs should be preserved.
* @throws PathOperationException is only one of src/target are in
* /.reserved/raw.
*/
private boolean checkPathsForReservedRaw(Path src, Path target)
throws PathOperationException {
final boolean srcIsRR = Path.getPathWithoutSchemeAndAuthority(src).
toString().startsWith(RESERVED_RAW);
final boolean dstIsRR = Path.getPathWithoutSchemeAndAuthority(target).
toString().startsWith(RESERVED_RAW);
boolean preserveRawXattrs = false;
if (srcIsRR && !dstIsRR) {
final String s = "' copy from '" + RESERVED_RAW + "' to non '" +
RESERVED_RAW + "'. Either both source and target must be in '" +
RESERVED_RAW + "' or neither.";
throw new PathOperationException("'" + src.toString() + s);
} else if (!srcIsRR && dstIsRR) {
final String s = "' copy from non '" + RESERVED_RAW +"' to '" +
RESERVED_RAW + "'. Either both source and target must be in '" +
RESERVED_RAW + "' or neither.";
throw new PathOperationException("'" + dst.toString() + s);
} else if (srcIsRR && dstIsRR) {
preserveRawXattrs = true;
}
return preserveRawXattrs;
}
/**
* If direct write is disabled ,copies the stream contents to a temporary
* file "target._COPYING_". If the copy is successful, the temporary file
* will be renamed to the real path, else the temporary file will be deleted.
* if direct write is enabled , then creation temporary file is skipped.
*
* @param in the input stream for the copy
* @param target where to store the contents of the stream
* @throws IOException if copy fails
*/
protected void copyStreamToTarget(InputStream in, PathData target)
throws IOException {
if (target.exists && (target.stat.isDirectory() || !overwrite)) {
throw new PathExistsException(target.toString());
}
TargetFileSystem targetFs = new TargetFileSystem(target.fs);
try {
PathData tempTarget = direct ? target : target.suffix("._COPYING_");
targetFs.setWriteChecksum(writeChecksum);
targetFs.writeStreamToFile(in, tempTarget, lazyPersist, direct);
if (!direct) {
targetFs.rename(tempTarget, target);
}
} finally {
targetFs.close(); // last ditch effort to ensure temp file is removed
}
}
/**
* Preserve the attributes of the source to the target.
* The method calls {@link #shouldPreserve(FileAttribute)} to check what
* attribute to preserve.
* @param src source to preserve
* @param target where to preserve attributes
* @param preserveRawXAttrs true if raw.* xattrs should be preserved
* @throws IOException if fails to preserve attributes
*/
protected void preserveAttributes(PathData src, PathData target,
boolean preserveRawXAttrs)
throws IOException {
if (shouldPreserve(FileAttribute.TIMESTAMPS)) {
target.fs.setTimes(
target.path,
src.stat.getModificationTime(),
src.stat.getAccessTime());
}
if (shouldPreserve(FileAttribute.OWNERSHIP)) {
target.fs.setOwner(
target.path,
src.stat.getOwner(),
src.stat.getGroup());
}
if (shouldPreserve(FileAttribute.PERMISSION) ||
shouldPreserve(FileAttribute.ACL)) {
target.fs.setPermission(
target.path,
src.stat.getPermission());
}
if (shouldPreserve(FileAttribute.ACL)) {
if (src.stat.hasAcl()) {
FsPermission perm = src.stat.getPermission();
List<AclEntry> srcEntries =
src.fs.getAclStatus(src.path).getEntries();
List<AclEntry> srcFullEntries =
AclUtil.getAclFromPermAndEntries(perm, srcEntries);
target.fs.setAcl(target.path, srcFullEntries);
}
}
final boolean preserveXAttrs = shouldPreserve(FileAttribute.XATTR);
if (preserveXAttrs || preserveRawXAttrs) {
Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
if (srcXAttrs != null) {
Iterator<Entry<String, byte[]>> iter = srcXAttrs.entrySet().iterator();
while (iter.hasNext()) {
Entry<String, byte[]> entry = iter.next();
final String xattrName = entry.getKey();
if (xattrName.startsWith(RAW) || preserveXAttrs) {
target.fs.setXAttr(target.path, entry.getKey(), entry.getValue());
}
}
}
}
}
// Helper filter filesystem that registers created files as temp files to
// be deleted on exit unless successfully renamed
private static | FileAttribute |
java | apache__flink | flink-table/flink-sql-jdbc-driver/src/test/java/org/apache/flink/table/jdbc/FlinkDataSourceTest.java | {
"start": 1044,
"end": 1493
} | class ____ extends FlinkJdbcDriverTestBase {
@Test
public void testDataSource() throws Exception {
FlinkDataSource dataSource = new FlinkDataSource(getDriverUri().getURL(), new Properties());
try (Connection connection = dataSource.getConnection()) {
assertEquals("default_catalog", connection.getCatalog());
assertEquals("default_database", connection.getSchema());
}
}
}
| FlinkDataSourceTest |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/protocol/body/PopProcessQueueInfo.java | {
"start": 862,
"end": 1712
} | class ____ {
private int waitAckCount;
private boolean droped;
private long lastPopTimestamp;
public int getWaitAckCount() {
return waitAckCount;
}
public void setWaitAckCount(int waitAckCount) {
this.waitAckCount = waitAckCount;
}
public boolean isDroped() {
return droped;
}
public void setDroped(boolean droped) {
this.droped = droped;
}
public long getLastPopTimestamp() {
return lastPopTimestamp;
}
public void setLastPopTimestamp(long lastPopTimestamp) {
this.lastPopTimestamp = lastPopTimestamp;
}
@Override
public String toString() {
return "PopProcessQueueInfo [waitAckCount:" + waitAckCount +
", droped:" + droped + ", lastPopTimestamp:" + lastPopTimestamp + "]";
}
}
| PopProcessQueueInfo |
java | apache__flink | flink-core/src/test/java/org/apache/flink/core/fs/LimitedConnectionsFileSystemDelegationTest.java | {
"start": 1634,
"end": 7587
} | class ____ {
@TempDir public File tempFolder;
@Test
@SuppressWarnings("deprecation")
void testDelegateFsMethods() throws IOException {
final FileSystem fs = mock(FileSystem.class);
when(fs.open(any(Path.class))).thenReturn(mock(FSDataInputStream.class));
when(fs.open(any(Path.class), anyInt())).thenReturn(mock(FSDataInputStream.class));
when(fs.create(any(Path.class), anyBoolean())).thenReturn(mock(FSDataOutputStream.class));
when(fs.create(any(Path.class), any(WriteMode.class)))
.thenReturn(mock(FSDataOutputStream.class));
when(fs.create(any(Path.class), anyBoolean(), anyInt(), anyShort(), anyLong()))
.thenReturn(mock(FSDataOutputStream.class));
final LimitedConnectionsFileSystem lfs = new LimitedConnectionsFileSystem(fs, 1000);
final Random rnd = new Random();
lfs.isDistributedFS();
verify(fs).isDistributedFS();
lfs.getWorkingDirectory();
verify(fs).isDistributedFS();
lfs.getHomeDirectory();
verify(fs).getHomeDirectory();
lfs.getUri();
verify(fs).getUri();
{
Path path = mock(Path.class);
lfs.getFileStatus(path);
verify(fs).getFileStatus(path);
}
{
FileStatus path = mock(FileStatus.class);
int pos = rnd.nextInt();
int len = rnd.nextInt();
lfs.getFileBlockLocations(path, pos, len);
verify(fs).getFileBlockLocations(path, pos, len);
}
{
Path path = mock(Path.class);
int bufferSize = rnd.nextInt();
lfs.open(path, bufferSize);
verify(fs).open(path, bufferSize);
}
{
Path path = mock(Path.class);
lfs.open(path);
verify(fs).open(path);
}
lfs.getDefaultBlockSize();
verify(fs).getDefaultBlockSize();
{
Path path = mock(Path.class);
lfs.listStatus(path);
verify(fs).listStatus(path);
}
{
Path path = mock(Path.class);
lfs.exists(path);
verify(fs).exists(path);
}
{
Path path = mock(Path.class);
boolean recursive = rnd.nextBoolean();
lfs.delete(path, recursive);
verify(fs).delete(path, recursive);
}
{
Path path = mock(Path.class);
lfs.mkdirs(path);
verify(fs).mkdirs(path);
}
{
Path path = mock(Path.class);
boolean overwrite = rnd.nextBoolean();
int bufferSize = rnd.nextInt();
short replication = (short) rnd.nextInt();
long blockSize = rnd.nextInt();
lfs.create(path, overwrite, bufferSize, replication, blockSize);
verify(fs).create(path, overwrite, bufferSize, replication, blockSize);
}
{
Path path = mock(Path.class);
WriteMode mode = rnd.nextBoolean() ? WriteMode.OVERWRITE : WriteMode.NO_OVERWRITE;
lfs.create(path, mode);
verify(fs).create(path, mode);
}
{
Path path1 = mock(Path.class);
Path path2 = mock(Path.class);
lfs.rename(path1, path2);
verify(fs).rename(path1, path2);
}
}
@Test
void testDelegateOutStreamMethods() throws IOException {
// mock the output stream
final FSDataOutputStream mockOut = mock(FSDataOutputStream.class);
final long outPos = 46651L;
when(mockOut.getPos()).thenReturn(outPos);
final FileSystem fs = mock(FileSystem.class);
when(fs.create(any(Path.class), any(WriteMode.class))).thenReturn(mockOut);
final LimitedConnectionsFileSystem lfs = new LimitedConnectionsFileSystem(fs, 100);
final FSDataOutputStream out = lfs.create(mock(Path.class), WriteMode.OVERWRITE);
// validate the output stream
out.write(77);
verify(mockOut).write(77);
{
byte[] bytes = new byte[1786];
out.write(bytes, 100, 111);
verify(mockOut).write(bytes, 100, 111);
}
assertThat(out.getPos()).isEqualTo(outPos);
out.flush();
verify(mockOut).flush();
out.sync();
verify(mockOut).sync();
out.close();
verify(mockOut).close();
}
@Test
void testDelegateInStreamMethods() throws IOException {
// mock the input stream
final FSDataInputStream mockIn = mock(FSDataInputStream.class);
final int value = 93;
final int bytesRead = 11;
final long inPos = 93;
final int available = 17;
final boolean markSupported = true;
when(mockIn.read()).thenReturn(value);
when(mockIn.read(any(byte[].class), anyInt(), anyInt())).thenReturn(11);
when(mockIn.getPos()).thenReturn(inPos);
when(mockIn.available()).thenReturn(available);
when(mockIn.markSupported()).thenReturn(markSupported);
final FileSystem fs = mock(FileSystem.class);
when(fs.open(any(Path.class))).thenReturn(mockIn);
final LimitedConnectionsFileSystem lfs = new LimitedConnectionsFileSystem(fs, 100);
final FSDataInputStream in = lfs.open(mock(Path.class));
// validate the input stream
assertThat(in.read()).isEqualTo(value);
assertThat(in.read(new byte[11], 2, 5)).isEqualTo(bytesRead);
assertThat(in.getPos()).isEqualTo(inPos);
in.seek(17876);
verify(mockIn).seek(17876);
assertThat(in.available()).isEqualTo(available);
assertThat(in.markSupported()).isEqualTo(markSupported);
in.mark(9876);
verify(mockIn).mark(9876);
in.close();
verify(mockIn).close();
}
}
| LimitedConnectionsFileSystemDelegationTest |
java | elastic__elasticsearch | x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/gen/pipeline/Pipe.java | {
"start": 1224,
"end": 2581
} | class ____ extends Node<Pipe> implements FieldExtraction, Resolvable {
private final Expression expression;
public Pipe(Source source, Expression expression, List<Pipe> children) {
super(source, children);
this.expression = expression;
}
public Expression expression() {
return expression;
}
@Override
public boolean resolved() {
return Resolvables.resolved(children());
}
@Override
public void collectFields(QlSourceBuilder sourceBuilder) {
children().forEach(c -> c.collectFields(sourceBuilder));
}
@Override
public boolean supportedByAggsOnlyQuery() {
return children().stream().anyMatch(Pipe::supportedByAggsOnlyQuery);
}
public abstract Processor asProcessor();
/**
* Resolve {@link Attribute}s which are unprocessable into
* {@link Pipe}s that are.
*
* @return {@code this} if the resolution doesn't change the
* definition, a new {@link Pipe} otherwise
*/
public Pipe resolveAttributes(AttributeResolver resolver) {
List<Pipe> newPipes = new ArrayList<>(children().size());
for (Pipe p : children()) {
newPipes.add(p.resolveAttributes(resolver));
}
return children().equals(newPipes) ? this : replaceChildrenSameSize(newPipes);
}
public | Pipe |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/event/monitor/spi/EventMonitor.java | {
"start": 904,
"end": 1277
} | interface ____ implemented by Hibernate JFR to report
* events to Java Flight Recorder.
* <p>
* Note that event reporting is different to aggregate <em>metrics</em>,
* which Hibernate exposes via the {@link org.hibernate.stat.Statistics}
* interface.
*
* @apiNote This an incubating API, subject to change.
*
* @since 7.0
*/
@JavaServiceLoadable
@Incubating
public | is |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Order.java | {
"start": 1010,
"end": 3614
} | class ____ extends Expression {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Order", Order::new);
private final Expression child;
private final OrderDirection direction;
private final NullsPosition nulls;
public Order(Source source, Expression child, OrderDirection direction, NullsPosition nulls) {
super(source, List.of(child));
this.child = child;
this.direction = direction;
this.nulls = nulls == null ? NullsPosition.ANY : nulls;
}
public Order(StreamInput in) throws IOException {
this(
Source.readFrom((PlanStreamInput) in),
in.readNamedWriteable(Expression.class),
in.readEnum(OrderDirection.class),
in.readEnum(NullsPosition.class)
);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
Source.EMPTY.writeTo(out);
out.writeNamedWriteable(child);
out.writeEnum(direction);
out.writeEnum(nulls);
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
protected TypeResolution resolveType() {
if (DataType.isString(child.dataType())) {
return TypeResolution.TYPE_RESOLVED;
}
return isExact(child, "ORDER BY cannot be applied to field of data type [{}]: {}");
}
@Override
public DataType dataType() {
return child.dataType();
}
@Override
public Order replaceChildren(List<Expression> newChildren) {
return new Order(source(), newChildren.get(0), direction, nulls);
}
@Override
protected NodeInfo<Order> info() {
return NodeInfo.create(this, Order::new, child, direction, nulls);
}
@Override
public Nullability nullable() {
return Nullability.FALSE;
}
public Expression child() {
return child;
}
public OrderDirection direction() {
return direction;
}
public NullsPosition nullsPosition() {
return nulls;
}
@Override
public int hashCode() {
return Objects.hash(child, direction, nulls);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
Order other = (Order) obj;
return Objects.equals(direction, other.direction) && Objects.equals(nulls, other.nulls) && Objects.equals(child, other.child);
}
public | Order |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/CustomTypeIdResolverTest.java | {
"start": 867,
"end": 1039
} | class ____ extends CustomBean {
public int x;
public CustomBeanImpl() { }
public CustomBeanImpl(int x) { this.x = x; }
}
static | CustomBeanImpl |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/NamedNativeQueries.java | {
"start": 639,
"end": 767
} | interface ____ {
/**
* The grouping of Hibernate named native SQL queries.
*/
NamedNativeQuery[] value();
}
| NamedNativeQueries |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/kstream/SessionWindowedSerializerTest.java | {
"start": 1465,
"end": 5252
} | class ____ {
private final SessionWindowedSerializer<?> sessionWindowedSerializer = new SessionWindowedSerializer<>(Serdes.String().serializer());
private final Map<String, String> props = new HashMap<>();
@Test
public void testSessionWindowedSerializerConstructor() {
sessionWindowedSerializer.configure(props, true);
final Serializer<?> inner = sessionWindowedSerializer.innerSerializer();
assertNotNull(inner, "Inner serializer should be not null");
assertInstanceOf(StringSerializer.class, inner, "Inner serializer type should be StringSerializer");
}
@Deprecated
@Test
public void shouldSetSerializerThroughWindowedInnerClassSerdeConfig() {
props.put(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, Serdes.ByteArraySerde.class.getName());
try (final SessionWindowedSerializer<?> serializer = new SessionWindowedSerializer<>()) {
serializer.configure(props, false);
assertInstanceOf(ByteArraySerializer.class, serializer.innerSerializer());
}
}
@Test
public void shouldSetSerializerThroughWindowedInnerSerializerClassConfig() {
props.put(SessionWindowedSerializer.WINDOWED_INNER_SERIALIZER_CLASS, Serdes.ByteArraySerde.class.getName());
try (final SessionWindowedSerializer<?> serializer = new SessionWindowedSerializer<>()) {
serializer.configure(props, false);
assertInstanceOf(ByteArraySerializer.class, serializer.innerSerializer());
}
}
@Deprecated
@Test
public void shouldIgnoreWindowedInnerClassSerdeConfigIfWindowedInnerSerializerClassConfigIsSet() {
props.put(SessionWindowedSerializer.WINDOWED_INNER_SERIALIZER_CLASS, Serdes.ByteArraySerde.class.getName());
props.put(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, "some.non.existent.class");
try (final SessionWindowedSerializer<?> serializer = new SessionWindowedSerializer<>()) {
serializer.configure(props, false);
assertInstanceOf(ByteArraySerializer.class, serializer.innerSerializer());
}
}
@Test
public void shouldThrowErrorIfWindowedInnerClassSerdeAndWindowedInnerSerializerClassAreNotSet() {
try (final SessionWindowedSerializer<?> serializer = new SessionWindowedSerializer<>()) {
assertThrows(IllegalArgumentException.class, () -> serializer.configure(props, false));
}
}
@Deprecated
@Test
public void shouldThrowErrorIfSerializersConflictInConstructorAndWindowedInnerClassSerdeConfig() {
props.put(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, Serdes.ByteArraySerde.class.getName());
assertThrows(IllegalArgumentException.class, () -> sessionWindowedSerializer.configure(props, false));
}
@Test
public void shouldThrowErrorIfSerializersConflictInConstructorAndWindowedInnerSerializerClassConfig() {
props.put(SessionWindowedSerializer.WINDOWED_INNER_SERIALIZER_CLASS, Serdes.ByteArraySerde.class.getName());
assertThrows(IllegalArgumentException.class, () -> sessionWindowedSerializer.configure(props, false));
}
@Deprecated
@Test
public void shouldThrowConfigExceptionWhenInvalidWindowedInnerClassSerdeSupplied() {
props.put(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, "some.non.existent.class");
assertThrows(ConfigException.class, () -> sessionWindowedSerializer.configure(props, false));
}
@Test
public void shouldThrowConfigExceptionWhenInvalidWindowedInnerSerializerClassSupplied() {
props.put(SessionWindowedSerializer.WINDOWED_INNER_SERIALIZER_CLASS, "some.non.existent.class");
assertThrows(ConfigException.class, () -> sessionWindowedSerializer.configure(props, false));
}
}
| SessionWindowedSerializerTest |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/MergedAnnotationsTests.java | {
"start": 140961,
"end": 141058
} | interface ____ {
}
@Retention(RetentionPolicy.RUNTIME)
@DefaultOverrideMeta
@ | DefaultOverrideMeta |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/integer/AtomicIntegerAssert_customRepresentation_Test.java | {
"start": 943,
"end": 1414
} | class ____ {
@Test
void should_honor_customRepresentation() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> assertThat(new AtomicInteger(0)).withRepresentation(new CustomRepresentation())
.isEqualTo(-1))
.withMessageContaining("@0@");
}
private | AtomicIntegerAssert_customRepresentation_Test |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastLongToDoubleEvaluator.java | {
"start": 1184,
"end": 3933
} | class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(CastLongToDoubleEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator v;
private final DriverContext driverContext;
private Warnings warnings;
public CastLongToDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator v,
DriverContext driverContext) {
this.source = source;
this.v = v;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (LongBlock vBlock = (LongBlock) v.eval(page)) {
LongVector vVector = vBlock.asVector();
if (vVector == null) {
return eval(page.getPositionCount(), vBlock);
}
return eval(page.getPositionCount(), vVector).asBlock();
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += v.baseRamBytesUsed();
return baseRamBytesUsed;
}
public DoubleBlock eval(int positionCount, LongBlock vBlock) {
try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
switch (vBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
long v = vBlock.getLong(vBlock.getFirstValueIndex(p));
result.appendDouble(Cast.castLongToDouble(v));
}
return result.build();
}
}
public DoubleVector eval(int positionCount, LongVector vVector) {
try(DoubleVector.FixedBuilder result = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
long v = vVector.getLong(p);
result.appendDouble(p, Cast.castLongToDouble(v));
}
return result.build();
}
}
@Override
public String toString() {
return "CastLongToDoubleEvaluator[" + "v=" + v + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(v);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static | CastLongToDoubleEvaluator |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/headers/VertxHeadersTest.java | {
"start": 1512,
"end": 1870
} | class ____ implements ContainerResponseFilter {
@Override
public void filter(final ContainerRequestContext requestContext, final ContainerResponseContext responseContext)
throws IOException {
responseContext.getHeaders().add(HttpHeaders.VARY, "Prefer");
}
}
@Path("test")
public static | JaxRsFilter |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/MonoSubscribeOnTest.java | {
"start": 1419,
"end": 7777
} | class ____ {
@RegisterExtension
public AutoDisposingExtension afterTest = new AutoDisposingExtension();
/*@Test
public void constructors() {
ConstructorTestBuilder ctb = new ConstructorTestBuilder(FluxPublishOn.class);
ctb.addRef("source", Flux.never());
ctb.addRef("executor", Schedulers.single());
ctb.addRef("schedulerFactory", (Callable<? extends Consumer<Runnable>>)() -> r -> { });
ctb.test();
}*/
@Test
public void classic() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Mono.fromSupplier(() -> 1)
.subscribeOn(Schedulers.fromExecutorService(ForkJoinPool.commonPool()))
.subscribe(ts);
ts.await(Duration.ofSeconds(5));
ts.assertValueCount(1)
.assertNoError()
.assertComplete();
}
@Test
public void classicBackpressured() throws Exception {
AssertSubscriber<Integer> ts = AssertSubscriber.create(0);
Mono.fromCallable(() -> 1)
.log()
.subscribeOn(Schedulers.fromExecutorService(ForkJoinPool.commonPool()))
.subscribe(ts);
Thread.sleep(100);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(500);
Thread.sleep(2000);
ts.assertValueCount(1)
.assertNoError()
.assertComplete();
ts.request(500);
}
@Test
public void classicJust() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Mono.just(1)
.subscribeOn(Schedulers.fromExecutorService(ForkJoinPool.commonPool()))
.subscribe(ts);
ts.await(Duration.ofSeconds(5));
ts.assertValues(1)
.assertNoError()
.assertComplete();
}
@Test
public void classicJustBackpressured() throws Exception {
AssertSubscriber<Integer> ts = AssertSubscriber.create(0);
Mono.just(1)
.subscribeOn(Schedulers.fromExecutorService(ForkJoinPool.commonPool()))
.subscribe(ts);
Thread.sleep(100);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(500);
ts.await(Duration.ofSeconds(5));
ts.assertValues(1)
.assertNoError()
.assertComplete();
}
@Test
public void classicEmpty() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Mono.<Integer>empty().subscribeOn(Schedulers.fromExecutorService(ForkJoinPool.commonPool()))
.subscribe(ts);
ts.await(Duration.ofSeconds(5));
ts.assertNoValues()
.assertNoError()
.assertComplete();
}
@Test
public void classicEmptyBackpressured() throws Exception {
AssertSubscriber<Integer> ts = AssertSubscriber.create(0);
Mono.<Integer>empty().subscribeOn(Schedulers.fromExecutorService(ForkJoinPool.commonPool()))
.subscribe(ts);
ts.await(Duration.ofSeconds(5));
ts.assertNoValues()
.assertNoError()
.assertComplete();
}
@Test
public void classicWithTimeout() {
final Scheduler timeoutScheduler = afterTest.autoDispose(Schedulers.newBoundedElastic(4, 100, "timeout"));
StepVerifier.create(
Mono.fromCallable(() -> {
try {
TimeUnit.SECONDS.sleep(2L);
}
catch (InterruptedException ignore) {
}
return 0;
})
.subscribeOn(timeoutScheduler)
.timeout(Duration.ofMillis(100L))
.onErrorResume(t -> Mono.fromCallable(() -> 1)),
0)
.expectSubscription()
.thenRequest(1)
.expectNext(1)
.expectComplete()
.verify(Duration.ofMillis(500));
}
@Test
public void callableEvaluatedTheRightTime() {
AtomicInteger count = new AtomicInteger();
Mono<Integer> p = Mono.fromCallable(count::incrementAndGet)
.subscribeOn(Schedulers.fromExecutorService(ForkJoinPool.commonPool()));
assertThat(count).hasValue(0);
p.subscribeWith(AssertSubscriber.create())
.await();
assertThat(count).hasValue(1);
}
@Test
public void scanOperator() {
MonoSubscribeOn<String> test = new MonoSubscribeOn<>(Mono.empty(), Schedulers.immediate());
assertThat(test.scan(Scannable.Attr.RUN_ON)).isSameAs(Schedulers.immediate());
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.ASYNC);
}
@Test
public void scanSubscribeOnSubscriber() {
Scheduler.Worker worker = Schedulers.single().createWorker();
try {
final Flux<String> source = Flux.just("foo");
CoreSubscriber<String>
actual = new LambdaMonoSubscriber<>(null, e -> {}, null, null);
MonoSubscribeOn.SubscribeOnSubscriber<String> test = new MonoSubscribeOn.SubscribeOnSubscriber<>(
source, actual, worker);
Subscription parent = Operators.emptySubscription();
test.onSubscribe(parent);
test.requested = 3L;
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(3L);
assertThat(test.scan(Scannable.Attr.RUN_ON)).isSameAs(worker);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.ASYNC);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual);
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
test.cancel();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isTrue();
}
finally {
worker.dispose();
}
}
@Test
public void error() {
StepVerifier.create(Mono.error(new RuntimeException("forced failure"))
.subscribeOn(Schedulers.single()))
.verifyErrorMessage("forced failure");
}
@Test
public void errorHide() {
StepVerifier.create(Mono.error(new RuntimeException("forced failure"))
.hide()
.subscribeOn(Schedulers.single()))
.verifyErrorMessage("forced failure");
}
@Test
public void disposeWorkerIfCancelledBeforeOnSubscribe() {
AtomicInteger disposeCount = new AtomicInteger();
Scheduler.Worker countingWorker = new Scheduler.Worker() {
@Override
public Disposable schedule(Runnable task) {
return Disposables.disposed();
}
@Override
public void dispose() {
disposeCount.incrementAndGet();
}
};
MonoSubscribeOn.SubscribeOnSubscriber<Integer> sosub =
new MonoSubscribeOn.SubscribeOnSubscriber<>(ignoredSubscribe -> {}, null, countingWorker);
for (int i = 1; i <= 10_000; i++) {
RaceTestUtils.race(sosub::cancel, () -> sosub.onSubscribe(Operators.emptySubscription()));
assertThat(disposeCount).as("idle/disposed in round %d", i).hasValue(i);
//reset
sosub.s = null;
}
}
}
| MonoSubscribeOnTest |
java | spring-projects__spring-boot | module/spring-boot-http-client/src/test/java/org/springframework/boot/http/client/reactive/JettyClientHttpConnectorBuilderTests.java | {
"start": 1343,
"end": 3639
} | class ____ extends AbstractClientHttpConnectorBuilderTests<JettyClientHttpConnector> {
JettyClientHttpConnectorBuilderTests() {
super(JettyClientHttpConnector.class, ClientHttpConnectorBuilder.jetty());
}
@Test
void withCustomizers() {
TestCustomizer<HttpClient> httpClientCustomizer1 = new TestCustomizer<>();
TestCustomizer<HttpClient> httpClientCustomizer2 = new TestCustomizer<>();
TestCustomizer<HttpClientTransport> httpClientTransportCustomizer = new TestCustomizer<>();
TestCustomizer<ClientConnector> clientConnectorCustomizerCustomizer = new TestCustomizer<>();
ClientHttpConnectorBuilder.jetty()
.withHttpClientCustomizer(httpClientCustomizer1)
.withHttpClientCustomizer(httpClientCustomizer2)
.withHttpClientTransportCustomizer(httpClientTransportCustomizer)
.withClientConnectorCustomizerCustomizer(clientConnectorCustomizerCustomizer)
.build();
httpClientCustomizer1.assertCalled();
httpClientCustomizer2.assertCalled();
httpClientTransportCustomizer.assertCalled();
clientConnectorCustomizerCustomizer.assertCalled();
}
@Test
void with() {
TestCustomizer<HttpClient> customizer = new TestCustomizer<>();
ClientHttpConnectorBuilder.jetty().with((builder) -> builder.withHttpClientCustomizer(customizer)).build();
customizer.assertCalled();
}
@Test
void withHttpClientTransportFactory() {
JettyClientHttpConnector connector = ClientHttpConnectorBuilder.jetty()
.withHttpClientTransportFactory(TestHttpClientTransport::new)
.build();
assertThat(connector).extracting("httpClient")
.extracting("transport")
.isInstanceOf(TestHttpClientTransport.class);
}
@Override
protected long connectTimeout(JettyClientHttpConnector connector) {
HttpClient httpClient = (HttpClient) ReflectionTestUtils.getField(connector, "httpClient");
assertThat(httpClient).isNotNull();
return httpClient.getConnectTimeout();
}
@Override
protected long readTimeout(JettyClientHttpConnector connector) {
HttpClient httpClient = (HttpClient) ReflectionTestUtils.getField(connector, "httpClient");
assertThat(httpClient).isNotNull();
Object field = ReflectionTestUtils.getField(httpClient, "readTimeout");
assertThat(field).isNotNull();
return ((Duration) field).toMillis();
}
static | JettyClientHttpConnectorBuilderTests |
java | apache__flink | flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/writer/IncompletePartPrefixTest.java | {
"start": 1128,
"end": 2574
} | class ____ {
@Test
void nullObjectNameShouldThroughException() {
assertThatThrownBy(
() ->
RecoverableMultiPartUploadImpl.createIncompletePartObjectNamePrefix(
null))
.isInstanceOf(NullPointerException.class);
}
@Test
void emptyInitialNameShouldSucceed() {
String objectNamePrefix =
RecoverableMultiPartUploadImpl.createIncompletePartObjectNamePrefix("");
assertThat(objectNamePrefix).isEqualTo("_tmp_");
}
@Test
void nameWithoutSlashShouldSucceed() {
String objectNamePrefix =
RecoverableMultiPartUploadImpl.createIncompletePartObjectNamePrefix(
"no_slash_path");
assertThat(objectNamePrefix).isEqualTo("_no_slash_path_tmp_");
}
@Test
void nameWithOnlySlashShouldSucceed() {
String objectNamePrefix =
RecoverableMultiPartUploadImpl.createIncompletePartObjectNamePrefix("/");
assertThat(objectNamePrefix).isEqualTo("/_tmp_");
}
@Test
void normalPathShouldSucceed() {
String objectNamePrefix =
RecoverableMultiPartUploadImpl.createIncompletePartObjectNamePrefix(
"/root/home/test-file");
assertThat(objectNamePrefix).isEqualTo("/root/home/_test-file_tmp_");
}
}
| IncompletePartPrefixTest |
java | apache__avro | lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroSequenceFile.java | {
"start": 8687,
"end": 8904
} | class ____ the value records to be written.
*
* <p>
* If the values will be Avro data, use
* {@link #withValueSchema(org.apache.avro.Schema)} to specify the writer
* schema. The value | of |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/util/subpackage/Person.java | {
"start": 924,
"end": 1075
} | interface ____ {
long getId();
String getName();
int getAge();
String getEyeColor();
boolean likesPets();
Number getFavoriteNumber();
}
| Person |
java | apache__logging-log4j2 | log4j-to-slf4j/src/test/java/org/apache/logging/slf4j/LoggerContextResolver.java | {
"start": 5704,
"end": 7779
} | class ____ implements Store.CloseableResource {
private final LoggerContext context;
private final Logger logger;
private LoggerContextHolder(final LoggerContextSource source, final ExtensionContext extensionContext) {
this.context = (LoggerContext) LoggerFactory.getILoggerFactory();
Class<?> clazz = extensionContext.getRequiredTestClass();
this.logger = context.getLogger(clazz);
final JoranConfigurator configurator = new JoranConfigurator();
final URL configLocation = getConfigLocation(source, extensionContext);
configurator.setContext(context);
try {
configurator.doConfigure(configLocation);
} catch (final JoranException e) {
throw new ExtensionContextException("Failed to initialize Logback logger context for " + clazz, e);
}
}
private static URL getConfigLocation(
final LoggerContextSource source, final ExtensionContext extensionContext) {
final String value = source.value();
Class<?> clazz = extensionContext.getRequiredTestClass();
URL url = null;
if (value.isEmpty()) {
while (clazz != null) {
url = clazz.getResource(clazz.getSimpleName() + ".xml");
if (url != null) {
break;
}
clazz = clazz.getSuperclass();
}
} else {
url = clazz.getClassLoader().getResource(value);
}
if (url != null) {
return url;
}
throw new ExtensionContextException("Failed to find a default configuration for " + clazz);
}
public LoggerContext getLoggerContext() {
return context;
}
public Logger getLogger() {
return logger;
}
@Override
public void close() {
context.stop();
}
}
}
| LoggerContextHolder |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ext/javatime/deser/LocalTimeDeserTest.java | {
"start": 1803,
"end": 2033
} | class ____ {
@JsonFormat(pattern="HH:mm", lenient = OptBoolean.FALSE)
public LocalTime value;
public StrictWrapper() { }
public StrictWrapper(LocalTime v) { value = v; }
}
static | StrictWrapper |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/MockEndpointBuilderFactory.java | {
"start": 5820,
"end": 23715
} | interface ____
extends
EndpointProducerBuilder {
default MockEndpointBuilder basic() {
return (MockEndpointBuilder) this;
}
/**
* Sets whether to make a deep copy of the incoming Exchange when
* received at this mock endpoint.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: producer (advanced)
*
* @param copyOnExchange the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder copyOnExchange(boolean copyOnExchange) {
doSetProperty("copyOnExchange", copyOnExchange);
return this;
}
/**
* Sets whether to make a deep copy of the incoming Exchange when
* received at this mock endpoint.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: producer (advanced)
*
* @param copyOnExchange the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder copyOnExchange(String copyOnExchange) {
doSetProperty("copyOnExchange", copyOnExchange);
return this;
}
/**
* Sets whether assertIsSatisfied() should fail fast at the first
* detected failed expectation while it may otherwise wait for all
* expected messages to arrive before performing expectations
* verifications. Is by default true. Set to false to use behavior as in
* Camel 2.x.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: producer (advanced)
*
* @param failFast the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder failFast(boolean failFast) {
doSetProperty("failFast", failFast);
return this;
}
/**
* Sets whether assertIsSatisfied() should fail fast at the first
* detected failed expectation while it may otherwise wait for all
* expected messages to arrive before performing expectations
* verifications. Is by default true. Set to false to use behavior as in
* Camel 2.x.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: producer (advanced)
*
* @param failFast the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder failFast(String failFast) {
doSetProperty("failFast", failFast);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* To turn on logging when the mock receives an incoming message. This
* will log only one time at INFO level for the incoming message. For
* more detailed logging, then set the logger to DEBUG level for the
* org.apache.camel.component.mock.MockEndpoint class.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param log the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder log(boolean log) {
doSetProperty("log", log);
return this;
}
/**
* To turn on logging when the mock receives an incoming message. This
* will log only one time at INFO level for the incoming message. For
* more detailed logging, then set the logger to DEBUG level for the
* org.apache.camel.component.mock.MockEndpoint class.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param log the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder log(String log) {
doSetProperty("log", log);
return this;
}
/**
* A number that is used to turn on throughput logging based on groups
* of the size.
*
* The option is a: <code>int</code> type.
*
* Group: producer (advanced)
*
* @param reportGroup the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder reportGroup(int reportGroup) {
doSetProperty("reportGroup", reportGroup);
return this;
}
/**
* A number that is used to turn on throughput logging based on groups
* of the size.
*
* The option will be converted to a <code>int</code> type.
*
* Group: producer (advanced)
*
* @param reportGroup the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder reportGroup(String reportGroup) {
doSetProperty("reportGroup", reportGroup);
return this;
}
/**
* Sets the minimum expected amount of time the assertIsSatisfied() will
* wait on a latch until it is satisfied.
*
* The option is a: <code>long</code> type.
*
* Group: producer (advanced)
*
* @param resultMinimumWaitTime the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder resultMinimumWaitTime(long resultMinimumWaitTime) {
doSetProperty("resultMinimumWaitTime", resultMinimumWaitTime);
return this;
}
/**
* Sets the minimum expected amount of time the assertIsSatisfied() will
* wait on a latch until it is satisfied.
*
* The option will be converted to a <code>long</code> type.
*
* Group: producer (advanced)
*
* @param resultMinimumWaitTime the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder resultMinimumWaitTime(String resultMinimumWaitTime) {
doSetProperty("resultMinimumWaitTime", resultMinimumWaitTime);
return this;
}
/**
* Sets the maximum amount of time the assertIsSatisfied() will wait on
* a latch until it is satisfied.
*
* The option is a: <code>long</code> type.
*
* Group: producer (advanced)
*
* @param resultWaitTime the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder resultWaitTime(long resultWaitTime) {
doSetProperty("resultWaitTime", resultWaitTime);
return this;
}
/**
* Sets the maximum amount of time the assertIsSatisfied() will wait on
* a latch until it is satisfied.
*
* The option will be converted to a <code>long</code> type.
*
* Group: producer (advanced)
*
* @param resultWaitTime the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder resultWaitTime(String resultWaitTime) {
doSetProperty("resultWaitTime", resultWaitTime);
return this;
}
/**
* Specifies to only retain the first nth number of received Exchanges.
* This is used when testing with big data, to reduce memory consumption
* by not storing copies of every Exchange this mock endpoint receives.
* Important: When using this limitation, then the getReceivedCounter()
* will still return the actual number of received message. For example
* if we have received 5000 messages and have configured to only retain
* the first 10 Exchanges, then the getReceivedCounter() will still
* return 5000 but there is only the first 10 Exchanges in the
* getExchanges() and getReceivedExchanges() methods. When using this
* method, then some of the other expectation methods is not supported,
* for example the expectedBodiesReceived(Object...) sets a expectation
* on the first number of bodies received. You can configure both
* retainFirst and retainLast options, to limit both the first and last
* received.
*
* The option is a: <code>int</code> type.
*
* Default: -1
* Group: producer (advanced)
*
* @param retainFirst the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder retainFirst(int retainFirst) {
doSetProperty("retainFirst", retainFirst);
return this;
}
/**
* Specifies to only retain the first nth number of received Exchanges.
* This is used when testing with big data, to reduce memory consumption
* by not storing copies of every Exchange this mock endpoint receives.
* Important: When using this limitation, then the getReceivedCounter()
* will still return the actual number of received message. For example
* if we have received 5000 messages and have configured to only retain
* the first 10 Exchanges, then the getReceivedCounter() will still
* return 5000 but there is only the first 10 Exchanges in the
* getExchanges() and getReceivedExchanges() methods. When using this
* method, then some of the other expectation methods is not supported,
* for example the expectedBodiesReceived(Object...) sets a expectation
* on the first number of bodies received. You can configure both
* retainFirst and retainLast options, to limit both the first and last
* received.
*
* The option will be converted to a <code>int</code> type.
*
* Default: -1
* Group: producer (advanced)
*
* @param retainFirst the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder retainFirst(String retainFirst) {
doSetProperty("retainFirst", retainFirst);
return this;
}
/**
* Specifies to only retain the last nth number of received Exchanges.
* This is used when testing with big data, to reduce memory consumption
* by not storing copies of every Exchange this mock endpoint receives.
* Important: When using this limitation, then the getReceivedCounter()
* will still return the actual number of received message. For example
* if we have received 5000 messages and have configured to only retain
* the last 20 Exchanges, then the getReceivedCounter() will still
* return 5000 but there is only the last 20 Exchanges in the
* getExchanges() and getReceivedExchanges() methods. When using this
* method, then some of the other expectation methods is not supported,
* for example the expectedBodiesReceived(Object...) sets a expectation
* on the first number of bodies received. You can configure both
* retainFirst and retainLast options, to limit both the first and last
* received.
*
* The option is a: <code>int</code> type.
*
* Default: -1
* Group: producer (advanced)
*
* @param retainLast the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder retainLast(int retainLast) {
doSetProperty("retainLast", retainLast);
return this;
}
/**
* Specifies to only retain the last nth number of received Exchanges.
* This is used when testing with big data, to reduce memory consumption
* by not storing copies of every Exchange this mock endpoint receives.
* Important: When using this limitation, then the getReceivedCounter()
* will still return the actual number of received message. For example
* if we have received 5000 messages and have configured to only retain
* the last 20 Exchanges, then the getReceivedCounter() will still
* return 5000 but there is only the last 20 Exchanges in the
* getExchanges() and getReceivedExchanges() methods. When using this
* method, then some of the other expectation methods is not supported,
* for example the expectedBodiesReceived(Object...) sets a expectation
* on the first number of bodies received. You can configure both
* retainFirst and retainLast options, to limit both the first and last
* received.
*
* The option will be converted to a <code>int</code> type.
*
* Default: -1
* Group: producer (advanced)
*
* @param retainLast the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder retainLast(String retainLast) {
doSetProperty("retainLast", retainLast);
return this;
}
/**
* Allows a sleep to be specified to wait to check that this mock really
* is empty when expectedMessageCount(int) is called with zero value.
*
* The option is a: <code>long</code> type.
*
* Group: producer (advanced)
*
* @param sleepForEmptyTest the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder sleepForEmptyTest(long sleepForEmptyTest) {
doSetProperty("sleepForEmptyTest", sleepForEmptyTest);
return this;
}
/**
* Allows a sleep to be specified to wait to check that this mock really
* is empty when expectedMessageCount(int) is called with zero value.
*
* The option will be converted to a <code>long</code> type.
*
* Group: producer (advanced)
*
* @param sleepForEmptyTest the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder sleepForEmptyTest(String sleepForEmptyTest) {
doSetProperty("sleepForEmptyTest", sleepForEmptyTest);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option is a: <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder browseLimit(int browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedMockEndpointBuilder browseLimit(String browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
}
public | AdvancedMockEndpointBuilder |
java | alibaba__nacos | core/src/main/java/com/alibaba/nacos/core/namespace/model/NamespaceTypeEnum.java | {
"start": 846,
"end": 2014
} | enum ____ {
/**
* Global configuration.
*/
GLOBAL(0, "Global configuration"),
/**
* Custom namespace for naming and config.
*/
CUSTOM(1, "Custom namespace for naming and config"),
/**
* Nacos AI module MCP type namespace.
*/
AI_MCP(2, "Default private namespace");
/**
* the namespace type.
*/
private final int type;
/**
* the description.
*/
private final String description;
NamespaceTypeEnum(int type, String description) {
this.type = type;
this.description = description;
}
public int getType() {
return type;
}
public String getDescription() {
return description;
}
public static NamespaceTypeEnum getByType(String type) {
try {
int typeInt = Integer.parseInt(type);
for (NamespaceTypeEnum value : values()) {
if (value.getType() == typeInt) {
return value;
}
}
} catch (NumberFormatException ignored) {
}
return null;
}
}
| NamespaceTypeEnum |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/rest/VerbDefinition.java | {
"start": 10418,
"end": 10599
} | class ____ to use for binding from input to POJO for the incoming data This option will override what
* may be configured on a parent level.
* <p/>
* The name of the | name |
java | google__error-prone | core/src/test/java/com/google/errorprone/refaster/testdata/input/TryTemplateExample.java | {
"start": 746,
"end": 958
} | class ____ {
int foo(String str) {
int result;
try {
result = Integer.parseInt(str);
} catch (NumberFormatException tolerated) {
result = 0;
}
return result;
}
}
| TryTemplateExample |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/net/SmtpManager.java | {
"start": 10685,
"end": 13527
} | class ____ implements MailManagerFactory {
@Override
public SmtpManager createManager(final String name, final FactoryData data) {
final String smtpProtocol = data.getSmtpProtocol();
final String prefix = "mail." + smtpProtocol;
final Properties properties = PropertiesUtil.getSystemProperties();
properties.setProperty("mail.transport.protocol", smtpProtocol);
if (properties.getProperty("mail.host") == null) {
// Prevent an UnknownHostException in Java 7
properties.setProperty("mail.host", NetUtils.getLocalHostname());
}
final String smtpHost = data.getSmtpHost();
if (null != smtpHost) {
properties.setProperty(prefix + ".host", smtpHost);
}
if (data.getSmtpPort() > 0) {
properties.setProperty(prefix + ".port", String.valueOf(data.getSmtpPort()));
}
final Authenticator authenticator = buildAuthenticator(data.getSmtpUsername(), data.getSmtpPassword());
if (null != authenticator) {
properties.setProperty(prefix + ".auth", "true");
}
if (smtpProtocol.equals("smtps")) {
final SslConfiguration sslConfiguration = data.getSslConfiguration();
if (sslConfiguration != null) {
final SSLContext sslContext = sslConfiguration.getSslContext();
if (sslContext != null) {
final SSLSocketFactory sslSocketFactory = sslContext.getSocketFactory();
properties.put(prefix + ".ssl.socketFactory", sslSocketFactory);
}
properties.setProperty(
prefix + ".ssl.checkserveridentity", Boolean.toString(sslConfiguration.isVerifyHostName()));
}
}
final Session session = Session.getInstance(properties, authenticator);
session.setProtocolForAddress("rfc822", smtpProtocol);
session.setDebug(data.isSmtpDebug());
return new SmtpManager(name, session, null, data);
}
private Authenticator buildAuthenticator(final String username, final String password) {
if (null != password && null != username) {
return new Authenticator() {
private final PasswordAuthentication passwordAuthentication =
new PasswordAuthentication(username, password);
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return passwordAuthentication;
}
};
}
return null;
}
}
}
| SMTPManagerFactory |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/TestContextAnnotationUtilsTests.java | {
"start": 25873,
"end": 25995
} | class ____ {
}
@MetaConfig(classes = TestContextAnnotationUtilsTests.class)
static | MetaConfigWithDefaultAttributesTestCase |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/MapManyToManyTreatJoinTest.java | {
"start": 2456,
"end": 2926
} | class ____ {
@Id
private Integer id;
private String name;
@ManyToMany
@JoinTable( name = "sub_map" )
private Map<Integer, JoinedBase> subMap = new HashMap<>();
public JoinedBase() {
}
public JoinedBase(Integer id) {
this.id = id;
}
public Map<Integer, JoinedBase> getSubMap() {
return subMap;
}
}
@SuppressWarnings({"FieldCanBeLocal", "unused"})
@Entity( name = "JoinedSub1" )
@Table( name = "joined_sub_1" )
public static | JoinedBase |
java | elastic__elasticsearch | libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/TokenFilterFactory.java | {
"start": 729,
"end": 1565
} | interface ____ extends Nameable {
/**
* Transform the specified input TokenStream.
* @param tokenStream a token stream to be transformed
* @return transformed token stream
*/
TokenStream create(TokenStream tokenStream);
/**
* Normalize a tokenStream for use in multi-term queries.
* The default implementation returns a given token stream.
*/
default TokenStream normalize(TokenStream tokenStream) {
return tokenStream;
}
/**
* Get the {@link AnalysisMode} this filter is allowed to be used in. The default is
* {@link AnalysisMode#ALL}. Instances need to override this method to define their
* own restrictions.
* @return analysis mode
*/
default AnalysisMode getAnalysisMode() {
return AnalysisMode.ALL;
}
}
| TokenFilterFactory |
java | spring-projects__spring-boot | module/spring-boot-http-converter/src/main/java/org/springframework/boot/http/converter/autoconfigure/GsonHttpMessageConvertersConfiguration.java | {
"start": 2520,
"end": 2615
} | class ____ {
}
@Conditional(JacksonAndJsonbUnavailableCondition.class)
static | GsonPreferred |
java | apache__flink | flink-metrics/flink-metrics-core/src/test/java/org/apache/flink/metrics/util/TestHistogram.java | {
"start": 1028,
"end": 2664
} | class ____ implements Histogram {
private long count = 1;
private int size = 3;
private double mean = 4;
private double stdDev = 5;
private long max = 6;
private long min = 7;
@Override
public void update(long value) {}
@Override
public long getCount() {
return count;
}
@Override
public HistogramStatistics getStatistics() {
return new HistogramStatistics() {
@Override
public double getQuantile(double quantile) {
return quantile;
}
@Override
public long[] getValues() {
return new long[0];
}
@Override
public int size() {
return size;
}
@Override
public double getMean() {
return mean;
}
@Override
public double getStdDev() {
return stdDev;
}
@Override
public long getMax() {
return max;
}
@Override
public long getMin() {
return min;
}
};
}
public void setSize(int size) {
this.size = size;
}
public void setMean(double mean) {
this.mean = mean;
}
public void setStdDev(double stdDev) {
this.stdDev = stdDev;
}
public void setMax(long max) {
this.max = max;
}
public void setMin(long min) {
this.min = min;
}
public void setCount(long count) {
this.count = count;
}
}
| TestHistogram |
java | apache__flink | flink-libraries/flink-state-processing-api/src/test/java/org/apache/flink/state/api/output/KeyedStateBootstrapOperatorTest.java | {
"start": 7458,
"end": 8056
} | class ____
extends KeyedProcessFunction<Long, Long, Tuple3<Long, Long, TimeDomain>> {
@Override
public void processElement(
Long value, Context ctx, Collector<Tuple3<Long, Long, TimeDomain>> out)
throws Exception {}
@Override
public void onTimer(
long timestamp, OnTimerContext ctx, Collector<Tuple3<Long, Long, TimeDomain>> out)
throws Exception {
out.collect(Tuple3.of(ctx.getCurrentKey(), timestamp, ctx.timeDomain()));
}
}
private static | SimpleProcessFunction |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableSkipLastTimed.java | {
"start": 1764,
"end": 6518
} | class ____<T> extends AtomicInteger implements FlowableSubscriber<T>, Subscription {
private static final long serialVersionUID = -5677354903406201275L;
final Subscriber<? super T> downstream;
final long time;
final TimeUnit unit;
final Scheduler scheduler;
final SpscLinkedArrayQueue<Object> queue;
final boolean delayError;
Subscription upstream;
final AtomicLong requested = new AtomicLong();
volatile boolean cancelled;
volatile boolean done;
Throwable error;
SkipLastTimedSubscriber(Subscriber<? super T> actual, long time, TimeUnit unit, Scheduler scheduler, int bufferSize, boolean delayError) {
this.downstream = actual;
this.time = time;
this.unit = unit;
this.scheduler = scheduler;
this.queue = new SpscLinkedArrayQueue<>(bufferSize);
this.delayError = delayError;
}
@Override
public void onSubscribe(Subscription s) {
if (SubscriptionHelper.validate(this.upstream, s)) {
this.upstream = s;
downstream.onSubscribe(this);
s.request(Long.MAX_VALUE);
}
}
@Override
public void onNext(T t) {
long now = scheduler.now(unit);
queue.offer(now, t);
drain();
}
@Override
public void onError(Throwable t) {
error = t;
done = true;
drain();
}
@Override
public void onComplete() {
done = true;
drain();
}
@Override
public void request(long n) {
if (SubscriptionHelper.validate(n)) {
BackpressureHelper.add(requested, n);
drain();
}
}
@Override
public void cancel() {
if (!cancelled) {
cancelled = true;
upstream.cancel();
if (getAndIncrement() == 0) {
queue.clear();
}
}
}
void drain() {
if (getAndIncrement() != 0) {
return;
}
int missed = 1;
final Subscriber<? super T> a = downstream;
final SpscLinkedArrayQueue<Object> q = queue;
final boolean delayError = this.delayError;
final TimeUnit unit = this.unit;
final Scheduler scheduler = this.scheduler;
final long time = this.time;
for (;;) {
long r = requested.get();
long e = 0L;
while (e != r) {
boolean d = done;
Long ts = (Long)q.peek();
boolean empty = ts == null;
long now = scheduler.now(unit);
if (!empty && ts > now - time) {
empty = true;
}
if (checkTerminated(d, empty, a, delayError)) {
return;
}
if (empty) {
break;
}
q.poll();
@SuppressWarnings("unchecked")
T v = (T)q.poll();
a.onNext(v);
e++;
}
if (e != 0L) {
BackpressureHelper.produced(requested, e);
}
missed = addAndGet(-missed);
if (missed == 0) {
break;
}
}
}
boolean checkTerminated(boolean d, boolean empty, Subscriber<? super T> a, boolean delayError) {
if (cancelled) {
queue.clear();
return true;
}
if (d) {
if (delayError) {
if (empty) {
Throwable e = error;
if (e != null) {
a.onError(e);
} else {
a.onComplete();
}
return true;
}
} else {
Throwable e = error;
if (e != null) {
queue.clear();
a.onError(e);
return true;
} else
if (empty) {
a.onComplete();
return true;
}
}
}
return false;
}
}
}
| SkipLastTimedSubscriber |
java | apache__flink | flink-filesystems/flink-oss-fs-hadoop/src/main/java/org/apache/flink/fs/osshadoop/OSSAccessor.java | {
"start": 1398,
"end": 3867
} | class ____ {
private AliyunOSSFileSystem fs;
private AliyunOSSFileSystemStore store;
public OSSAccessor(AliyunOSSFileSystem fs) {
this.fs = fs;
this.store = fs.getStore();
}
public String pathToObject(final Path path) {
org.apache.hadoop.fs.Path hadoopPath = HadoopFileSystem.toHadoopPath(path);
if (!hadoopPath.isAbsolute()) {
hadoopPath = new org.apache.hadoop.fs.Path(fs.getWorkingDirectory(), hadoopPath);
}
return hadoopPath.toUri().getPath().substring(1);
}
public Path objectToPath(String object) {
return new Path("/" + object);
}
public String startMultipartUpload(String objectName) {
return store.getUploadId(objectName);
}
public boolean deleteObject(String objectName) throws IOException {
return fs.delete(new org.apache.hadoop.fs.Path('/' + objectName), false);
}
public CompleteMultipartUploadResult completeMultipartUpload(
String objectName, String uploadId, List<PartETag> partETags) {
return store.completeMultipartUpload(objectName, uploadId, partETags);
}
public PartETag uploadPart(File file, String objectName, String uploadId, int idx)
throws IOException {
return store.uploadPart(file, objectName, uploadId, idx);
}
public void putObject(String objectName, File file) throws IOException {
store.uploadObject(objectName, file);
}
public void getObject(String objectName, String dstPath, long length) throws IOException {
long contentLength = store.getObjectMetadata(objectName).getContentLength();
if (contentLength != length) {
throw new IOException(
String.format(
"Error recovering writer: "
+ "Downloading the last data chunk file gives incorrect length."
+ "File length is %d bytes, RecoveryData indicates %d bytes",
contentLength, length));
}
org.apache.hadoop.fs.Path srcPath = new org.apache.hadoop.fs.Path("/" + objectName);
org.apache.hadoop.fs.Path localPath = new org.apache.hadoop.fs.Path(dstPath);
fs.copyToLocalFile(srcPath, localPath);
String crcFileName = "." + localPath.getName() + ".crc";
(new File(localPath.getParent().toString() + "/" + crcFileName)).delete();
}
}
| OSSAccessor |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMixedHttpData.java | {
"start": 884,
"end": 6974
} | class ____<D extends HttpData> extends AbstractReferenceCounted implements HttpData {
final String baseDir;
final boolean deleteOnExit;
D wrapped;
private final long limitSize;
AbstractMixedHttpData(long limitSize, String baseDir, boolean deleteOnExit, D initial) {
this.limitSize = limitSize;
this.wrapped = initial;
this.baseDir = baseDir;
this.deleteOnExit = deleteOnExit;
}
abstract D makeDiskData();
@Override
public long getMaxSize() {
return wrapped.getMaxSize();
}
@Override
public void setMaxSize(long maxSize) {
wrapped.setMaxSize(maxSize);
}
@Override
public ByteBuf content() {
return wrapped.content();
}
@Override
public void checkSize(long newSize) throws IOException {
wrapped.checkSize(newSize);
}
@Override
public long definedLength() {
return wrapped.definedLength();
}
@Override
public Charset getCharset() {
return wrapped.getCharset();
}
@Override
public String getName() {
return wrapped.getName();
}
@Override
public void addContent(ByteBuf buffer, boolean last) throws IOException {
if (wrapped instanceof AbstractMemoryHttpData) {
try {
checkSize(wrapped.length() + buffer.readableBytes());
if (wrapped.length() + buffer.readableBytes() > limitSize) {
D diskData = makeDiskData();
ByteBuf data = ((AbstractMemoryHttpData) wrapped).getByteBuf();
if (data != null && data.isReadable()) {
diskData.addContent(data.retain(), false);
}
wrapped.release();
wrapped = diskData;
}
} catch (IOException e) {
buffer.release();
throw e;
}
}
wrapped.addContent(buffer, last);
}
@Override
protected void deallocate() {
delete();
}
@Override
public void delete() {
wrapped.delete();
}
@Override
public byte[] get() throws IOException {
return wrapped.get();
}
@Override
public ByteBuf getByteBuf() throws IOException {
return wrapped.getByteBuf();
}
@Override
public String getString() throws IOException {
return wrapped.getString();
}
@Override
public String getString(Charset encoding) throws IOException {
return wrapped.getString(encoding);
}
@Override
public boolean isInMemory() {
return wrapped.isInMemory();
}
@Override
public long length() {
return wrapped.length();
}
@Override
public boolean renameTo(File dest) throws IOException {
return wrapped.renameTo(dest);
}
@Override
public void setCharset(Charset charset) {
wrapped.setCharset(charset);
}
@Override
public void setContent(ByteBuf buffer) throws IOException {
try {
checkSize(buffer.readableBytes());
} catch (IOException e) {
buffer.release();
throw e;
}
if (buffer.readableBytes() > limitSize) {
if (wrapped instanceof AbstractMemoryHttpData) {
// change to Disk
wrapped.release();
wrapped = makeDiskData();
}
}
wrapped.setContent(buffer);
}
@Override
public void setContent(File file) throws IOException {
checkSize(file.length());
if (file.length() > limitSize) {
if (wrapped instanceof AbstractMemoryHttpData) {
// change to Disk
wrapped.release();
wrapped = makeDiskData();
}
}
wrapped.setContent(file);
}
@Override
public void setContent(InputStream inputStream) throws IOException {
if (wrapped instanceof AbstractMemoryHttpData) {
// change to Disk even if we don't know the size
wrapped.release();
wrapped = makeDiskData();
}
wrapped.setContent(inputStream);
}
@Override
public boolean isCompleted() {
return wrapped.isCompleted();
}
@Override
public HttpDataType getHttpDataType() {
return wrapped.getHttpDataType();
}
@Override
public int hashCode() {
return wrapped.hashCode();
}
@Override
public boolean equals(Object obj) {
return wrapped.equals(obj);
}
@Override
public int compareTo(InterfaceHttpData o) {
return wrapped.compareTo(o);
}
@Override
public String toString() {
return "Mixed: " + wrapped;
}
@Override
public ByteBuf getChunk(int length) throws IOException {
return wrapped.getChunk(length);
}
@Override
public File getFile() throws IOException {
return wrapped.getFile();
}
@SuppressWarnings("unchecked")
@Override
public D copy() {
return (D) wrapped.copy();
}
@SuppressWarnings("unchecked")
@Override
public D duplicate() {
return (D) wrapped.duplicate();
}
@SuppressWarnings("unchecked")
@Override
public D retainedDuplicate() {
return (D) wrapped.retainedDuplicate();
}
@SuppressWarnings("unchecked")
@Override
public D replace(ByteBuf content) {
return (D) wrapped.replace(content);
}
@SuppressWarnings("unchecked")
@Override
public D touch() {
wrapped.touch();
return (D) this;
}
@SuppressWarnings("unchecked")
@Override
public D touch(Object hint) {
wrapped.touch(hint);
return (D) this;
}
@SuppressWarnings("unchecked")
@Override
public D retain() {
return (D) super.retain();
}
@SuppressWarnings("unchecked")
@Override
public D retain(int increment) {
return (D) super.retain(increment);
}
}
| AbstractMixedHttpData |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/actuator/cloudfoundry/customcontextpath/MyCloudFoundryConfiguration.java | {
"start": 1484,
"end": 2732
} | class ____ {
@Bean
public TomcatServletWebServerFactory servletWebServerFactory() {
return new TomcatServletWebServerFactory() {
@Override
protected void prepareContext(Host host, ServletContextInitializer[] initializers) {
super.prepareContext(host, initializers);
StandardContext child = new StandardContext();
child.addLifecycleListener(new Tomcat.FixContextListener());
child.setPath("/cloudfoundryapplication");
ServletContainerInitializer initializer = getServletContextInitializer(getContextPath());
child.addServletContainerInitializer(initializer, Collections.emptySet());
child.setCrossContext(true);
host.addChild(child);
}
};
}
private ServletContainerInitializer getServletContextInitializer(String contextPath) {
return (classes, context) -> {
Servlet servlet = new GenericServlet() {
@Override
public void service(ServletRequest req, ServletResponse res) throws ServletException, IOException {
ServletContext context = req.getServletContext().getContext(contextPath);
context.getRequestDispatcher("/cloudfoundryapplication").forward(req, res);
}
};
context.addServlet("cloudfoundry", servlet).addMapping("/*");
};
}
}
| MyCloudFoundryConfiguration |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/validation/method/MethodValidator.java | {
"start": 1082,
"end": 1308
} | interface ____ {
/**
* Determine the applicable validation groups. By default, obtained from an
* {@link org.springframework.validation.annotation.Validated @Validated}
* annotation on the method, or on the | MethodValidator |
java | apache__avro | lang/java/grpc/src/main/java/org/apache/avro/grpc/AvroInputStream.java | {
"start": 1159,
"end": 2135
} | class ____ extends InputStream implements Drainable {
/**
* Container to hold the serialized Avro payload when its read before draining
* it.
*/
private ByteArrayInputStream partial;
@Override
public int read(byte[] b, int off, int len) throws IOException {
return getPartialInternal().read(b, off, len);
}
@Override
public int read() throws IOException {
return getPartialInternal().read();
}
private ByteArrayInputStream getPartialInternal() throws IOException {
if (partial == null) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
drainTo(outputStream);
partial = new ByteArrayInputStream(outputStream.toByteArray());
}
return partial;
}
protected ByteArrayInputStream getPartial() {
return partial;
}
/**
* An {@link OutputStream} that writes to a target {@link OutputStream} and
* provides total number of bytes written to it.
*/
protected static | AvroInputStream |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/SerializeWriterTest_12.java | {
"start": 606,
"end": 958
} | class ____ extends Writer {
@Override
public void write(char[] cbuf, int off, int len) throws IOException {
throw new IOException();
}
@Override
public void flush() throws IOException {
throw new IOException();
}
@Override
public void close() throws IOException {
}
}
}
| ErrorWriter |
java | apache__camel | core/camel-main/src/main/java/org/apache/camel/main/DefaultConfigurationProperties.java | {
"start": 27959,
"end": 72636
} | class ____ (org.apache.camel.processor.LogProcessor) <br/>
* ${contextId} - the camel context id <br/>
* ${routeId} - the route id <br/>
* ${groupId} - the route group id <br/>
* ${nodeId} - the node id <br/>
* ${nodePrefixId} - the node prefix id <br/>
* ${source} - the source:line (source location must be enabled) <br/>
* ${source.name} - the source filename (source location must be enabled) <br/>
* ${source.line} - the source line number (source location must be enabled)
*
* For example to use the route and node id you can specify the name as: ${routeId}/${nodeId}
*/
public void setLogName(String logName) {
this.logName = logName;
}
public String getLogLanguage() {
return logLanguage;
}
/**
* To configure the language to use for Log EIP. By default, the simple language is used. However, Camel also
* supports other languages such as groovy.
*/
public void setLogLanguage(String logLanguage) {
this.logLanguage = logLanguage;
}
public String getAdditionalSensitiveKeywords() {
return additionalSensitiveKeywords;
}
/**
* Camel comes with a default set of sensitive keywords which are automatically masked. This option allows to add
* additional custom keywords to be masked as well. Multiple keywords can be separated by comma.
*/
public void setAdditionalSensitiveKeywords(String additionalSensitiveKeywords) {
this.additionalSensitiveKeywords = additionalSensitiveKeywords;
}
public boolean isAutoStartup() {
return autoStartup;
}
/**
* Sets whether the object should automatically start when Camel starts. Important: Currently only routes can be
* disabled, as CamelContext's are always started. Note: When setting auto startup false on CamelContext then that
* takes precedence and no routes are started. You would need to start CamelContext explicit using the
* org.apache.camel.CamelContext.start() method, to start the context, and then you would need to start the routes
* manually using CamelContext.getRouteController().startRoute(String).
*
* Default is true to always start up.
*/
public void setAutoStartup(boolean autoStartup) {
this.autoStartup = autoStartup;
}
public String getAutoStartupExcludePattern() {
return autoStartupExcludePattern;
}
/**
* Used for exclusive filtering of routes to not automatically start with Camel starts.
*
* The pattern support matching by route id or endpoint urls.
*
* Multiple patterns can be specified separated by comma, as example, to exclude all the routes starting from kafka
* or jms use: kafka,jms.
*/
public void setAutoStartupExcludePattern(String autoStartupExcludePattern) {
this.autoStartupExcludePattern = autoStartupExcludePattern;
}
public boolean isAllowUseOriginalMessage() {
return allowUseOriginalMessage;
}
/**
* Sets whether to allow access to the original message from Camel's error handler, or from
* org.apache.camel.spi.UnitOfWork.getOriginalInMessage(). Turning this off can optimize performance, as defensive
* copy of the original message is not needed.
*
* Default is false.
*/
public void setAllowUseOriginalMessage(boolean allowUseOriginalMessage) {
this.allowUseOriginalMessage = allowUseOriginalMessage;
}
public boolean isCaseInsensitiveHeaders() {
return caseInsensitiveHeaders;
}
/**
* Whether to use case sensitive or insensitive headers.
*
* Important: When using case sensitive (this is set to false). Then the map is case sensitive which means headers
* such as content-type and Content-Type are two different keys which can be a problem for some protocols such as
* HTTP based, which rely on case insensitive headers. However case sensitive implementations can yield faster
* performance. Therefore use case sensitive implementation with care.
*
* Default is true.
*/
public void setCaseInsensitiveHeaders(boolean caseInsensitiveHeaders) {
this.caseInsensitiveHeaders = caseInsensitiveHeaders;
}
public boolean isAutowiredEnabled() {
return autowiredEnabled;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring options (the option must be marked as
* autowired) by looking up in the registry to find if there is a single instance of matching type, which then gets
* configured on the component. This can be used for automatic configuring JDBC data sources, JMS connection
* factories, AWS Clients, etc.
*
* Default is true.
*/
public void setAutowiredEnabled(boolean autowiredEnabled) {
this.autowiredEnabled = autowiredEnabled;
}
public boolean isEndpointRuntimeStatisticsEnabled() {
return endpointRuntimeStatisticsEnabled;
}
/**
* Sets whether endpoint runtime statistics is enabled (gathers runtime usage of each incoming and outgoing
* endpoints).
*
* The default value is false.
*/
public void setEndpointRuntimeStatisticsEnabled(boolean endpointRuntimeStatisticsEnabled) {
this.endpointRuntimeStatisticsEnabled = endpointRuntimeStatisticsEnabled;
}
public boolean isLoadStatisticsEnabled() {
return loadStatisticsEnabled;
}
/**
* Sets whether Camel load (inflight messages, not cpu) statistics is enabled (something like the unix load
* average). The statistics requires to have camel-management on the classpath as JMX is required.
*
* The default value is false.
*/
public void setLoadStatisticsEnabled(boolean loadStatisticsEnabled) {
this.loadStatisticsEnabled = loadStatisticsEnabled;
}
public boolean isEndpointLazyStartProducer() {
return endpointLazyStartProducer;
}
/**
* Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow
* CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause
* the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled
* during routing messages via Camel's routing error handlers. Beware that when the first message is processed then
* creating and starting the producer may take a little time and prolong the total processing time of the
* processing.
*
* The default value is false.
*/
public void setEndpointLazyStartProducer(boolean endpointLazyStartProducer) {
this.endpointLazyStartProducer = endpointLazyStartProducer;
}
public boolean isEndpointBridgeErrorHandler() {
return endpointBridgeErrorHandler;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the
* consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by
* the routing Error Handler.
* <p/>
* By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be
* logged at WARN/ERROR level and ignored.
*
* The default value is false.
*/
public void setEndpointBridgeErrorHandler(boolean endpointBridgeErrorHandler) {
this.endpointBridgeErrorHandler = endpointBridgeErrorHandler;
}
public boolean isUseDataType() {
return useDataType;
}
/**
* Whether to enable using data type on Camel messages.
*
* Data type are automatic turned on if one ore more routes has been explicit configured with input and output
* types. Otherwise data type is default off.
*/
public void setUseDataType(boolean useDataType) {
this.useDataType = useDataType;
}
public boolean isUseBreadcrumb() {
return useBreadcrumb;
}
/**
* Set whether breadcrumb is enabled. The default value is false.
*/
public void setUseBreadcrumb(boolean useBreadcrumb) {
this.useBreadcrumb = useBreadcrumb;
}
public boolean isBeanPostProcessorEnabled() {
return beanPostProcessorEnabled;
}
/**
* Can be used to turn off bean post processing.
*
* Be careful to turn this off, as this means that beans that use Camel annotations such as
* {@link org.apache.camel.EndpointInject}, {@link org.apache.camel.ProducerTemplate},
* {@link org.apache.camel.Produce}, {@link org.apache.camel.Consume} etc will not be injected and in use.
*
* Turning this off should only be done if you are sure you do not use any of these Camel features.
*
* Not all runtimes allow turning this off.
*
* The default value is true (enabled).
*/
public void setBeanPostProcessorEnabled(boolean beanPostProcessorEnabled) {
this.beanPostProcessorEnabled = beanPostProcessorEnabled;
}
public ManagementMBeansLevel getJmxManagementMBeansLevel() {
return jmxManagementMBeansLevel;
}
/**
* Sets the mbeans registration level.
*
* The default value is Default.
*/
public void setJmxManagementMBeansLevel(ManagementMBeansLevel jmxManagementMBeansLevel) {
this.jmxManagementMBeansLevel = jmxManagementMBeansLevel;
}
public ManagementStatisticsLevel getJmxManagementStatisticsLevel() {
return jmxManagementStatisticsLevel;
}
/**
* Sets the JMX statistics level, the level can be set to Extended to gather additional information
*
* The default value is Default.
*/
public void setJmxManagementStatisticsLevel(ManagementStatisticsLevel jmxManagementStatisticsLevel) {
this.jmxManagementStatisticsLevel = jmxManagementStatisticsLevel;
}
public String getJmxManagementNamePattern() {
return jmxManagementNamePattern;
}
/**
* The naming pattern for creating the CamelContext JMX management name.
*
* The default pattern is #name#
*/
public void setJmxManagementNamePattern(String jmxManagementNamePattern) {
this.jmxManagementNamePattern = jmxManagementNamePattern;
}
public boolean isJmxManagementRegisterRoutesCreateByKamelet() {
return jmxManagementRegisterRoutesCreateByKamelet;
}
/**
* Whether routes created by Kamelets should be registered for JMX management. Enabling this allows to have
* fine-grained monitoring and management of every route created via Kamelets.
*
* This is default disabled as a Kamelet is intended as a component (black-box) and its implementation details as
* Camel route makes the overall management and monitoring of Camel applications more verbose.
*
* During development of Kamelets then enabling this will make it possible for developers to do fine-grained
* performance inspection and identify potential bottlenecks in the Kamelet routes.
*
* However, for production usage then keeping this disabled is recommended.
*/
public void setJmxManagementRegisterRoutesCreateByKamelet(boolean jmxManagementRegisterRoutesCreateByKamelet) {
this.jmxManagementRegisterRoutesCreateByKamelet = jmxManagementRegisterRoutesCreateByKamelet;
}
public boolean isJmxManagementRegisterRoutesCreateByTemplate() {
return jmxManagementRegisterRoutesCreateByTemplate;
}
/**
* Whether routes created by route templates (not Kamelets) should be registered for JMX management. Enabling this
* allows to have fine-grained monitoring and management of every route created via route templates.
*
* This is default enabled (unlike Kamelets) as routes created via templates is regarded as standard routes, and
* should be available for management and monitoring.
*/
public void setJmxManagementRegisterRoutesCreateByTemplate(boolean jmxManagementRegisterRoutesCreateByTemplate) {
this.jmxManagementRegisterRoutesCreateByTemplate = jmxManagementRegisterRoutesCreateByTemplate;
}
public boolean isCamelEventsTimestampEnabled() {
return camelEventsTimestampEnabled;
}
/**
* Whether to include timestamps for all emitted Camel Events. Enabling this allows to know fine-grained at what
* time each event was emitted, which can be used for reporting to report exactly the time of the events. This is by
* default false to avoid the overhead of including this information.
*/
public void setCamelEventsTimestampEnabled(boolean camelEventsTimestampEnabled) {
this.camelEventsTimestampEnabled = camelEventsTimestampEnabled;
}
public boolean isUseMdcLogging() {
return useMdcLogging;
}
/**
* To turn on MDC logging
*/
public void setUseMdcLogging(boolean useMdcLogging) {
this.useMdcLogging = useMdcLogging;
}
public String getMdcLoggingKeysPattern() {
return mdcLoggingKeysPattern;
}
/**
* Sets the pattern used for determine which custom MDC keys to propagate during message routing when the routing
* engine continues routing asynchronously for the given message. Setting this pattern to * will propagate all
* custom keys. Or setting the pattern to foo*,bar* will propagate any keys starting with either foo or bar. Notice
* that a set of standard Camel MDC keys are always propagated which starts with camel. as key name.
*
* The match rules are applied in this order (case insensitive):
*
* 1. exact match, returns true 2. wildcard match (pattern ends with a * and the name starts with the pattern),
* returns true 3. regular expression match, returns true 4. otherwise returns false
*/
public void setMdcLoggingKeysPattern(String mdcLoggingKeysPattern) {
this.mdcLoggingKeysPattern = mdcLoggingKeysPattern;
}
public String getThreadNamePattern() {
return threadNamePattern;
}
/**
* Sets the thread name pattern used for creating the full thread name.
*
* The default pattern is: Camel (#camelId#) thread ##counter# - #name#
*
* Where #camelId# is the name of the CamelContext. and #counter# is a unique incrementing counter. and #name# is
* the regular thread name.
*
* You can also use #longName# which is the long thread name which can includes endpoint parameters etc.
*/
public void setThreadNamePattern(String threadNamePattern) {
this.threadNamePattern = threadNamePattern;
}
public String getRouteFilterIncludePattern() {
return routeFilterIncludePattern;
}
/**
* Used for filtering routes matching the given pattern, which follows the following rules:
*
* - Match by route id - Match by route input endpoint uri
*
* The matching is using exact match, by wildcard and regular expression as documented by
* {@link PatternHelper#matchPattern(String, String)}.
*
* For example to only include routes which starts with foo in their route id's, use: include=foo* And to
* exclude routes which starts from JMS endpoints, use: exclude=jms:*
*
* Multiple patterns can be separated by comma, for example to exclude both foo and bar routes, use:
* exclude=foo*,bar*
*
* Exclude takes precedence over include.
*/
public void setRouteFilterIncludePattern(String include) {
this.routeFilterIncludePattern = include;
}
public String getRouteFilterExcludePattern() {
return routeFilterExcludePattern;
}
/**
* Used for filtering routes routes matching the given pattern, which follows the following rules:
*
* - Match by route id - Match by route input endpoint uri
*
* The matching is using exact match, by wildcard and regular expression as documented by
* {@link PatternHelper#matchPattern(String, String)}.
*
* For example to only include routes which starts with foo in their route id's, use: include=foo* And to
* exclude routes which starts from JMS endpoints, use: exclude=jms:*
*
* Multiple patterns can be separated by comma, for example to exclude both foo and bar routes, use:
* exclude=foo*,bar*
*
* Exclude takes precedence over include.
*/
public void setRouteFilterExcludePattern(String exclude) {
this.routeFilterExcludePattern = exclude;
}
public boolean isBeanIntrospectionExtendedStatistics() {
return beanIntrospectionExtendedStatistics;
}
/**
* Sets whether bean introspection uses extended statistics. The default is false.
*/
public void setBeanIntrospectionExtendedStatistics(boolean beanIntrospectionExtendedStatistics) {
this.beanIntrospectionExtendedStatistics = beanIntrospectionExtendedStatistics;
}
public LoggingLevel getBeanIntrospectionLoggingLevel() {
return beanIntrospectionLoggingLevel;
}
/**
* Sets the logging level used by bean introspection, logging activity of its usage. The default is TRACE.
*/
public void setBeanIntrospectionLoggingLevel(LoggingLevel beanIntrospectionLoggingLevel) {
this.beanIntrospectionLoggingLevel = beanIntrospectionLoggingLevel;
}
public boolean isRoutesCollectorEnabled() {
return routesCollectorEnabled;
}
/**
* Whether the routes collector is enabled or not.
*
* When enabled Camel will auto-discover routes (RouteBuilder instances from the registry and also load additional
* routes from the file system).
*
* The routes collector is default enabled.
*/
public void setRoutesCollectorEnabled(boolean routesCollectorEnabled) {
this.routesCollectorEnabled = routesCollectorEnabled;
}
public boolean isRoutesCollectorIgnoreLoadingError() {
return routesCollectorIgnoreLoadingError;
}
/**
* Whether the routes collector should ignore any errors during loading and compiling routes.
*
* This is only intended for development or tooling.
*/
public void setRoutesCollectorIgnoreLoadingError(boolean routesCollectorIgnoreLoadingError) {
this.routesCollectorIgnoreLoadingError = routesCollectorIgnoreLoadingError;
}
public String getCompileWorkDir() {
return compileWorkDir;
}
/**
* Work directory for compiler. Can be used to write compiled classes or other resources.
*/
public void setCompileWorkDir(String compileWorkDir) {
this.compileWorkDir = compileWorkDir;
}
public String getJavaRoutesIncludePattern() {
return javaRoutesIncludePattern;
}
/**
* Used for inclusive filtering RouteBuilder classes which are collected from the registry or via classpath
* scanning. The exclusive filtering takes precedence over inclusive filtering. The pattern is using Ant-path style
* pattern. Multiple patterns can be specified separated by comma.
*
* Multiple patterns can be specified separated by comma. For example to include all classes starting with Foo use:
* **/Foo* To include all routes form a specific package use: com/mycompany/foo/* To include all routes
* form a specific package and its sub-packages use double wildcards: com/mycompany/foo/** And to include
* all routes from two specific packages use: com/mycompany/foo/*,com/mycompany/stuff/*
*/
public void setJavaRoutesIncludePattern(String javaRoutesIncludePattern) {
this.javaRoutesIncludePattern = javaRoutesIncludePattern;
}
public String getJavaRoutesExcludePattern() {
return javaRoutesExcludePattern;
}
/**
* Used for exclusive filtering RouteBuilder classes which are collected from the registry or via classpath
* scanning. The exclusive filtering takes precedence over inclusive filtering. The pattern is using Ant-path style
* pattern. Multiple patterns can be specified separated by comma.
*
* For example to exclude all classes starting with Bar use: **/Bar* To exclude all routes form a
* specific package use: com/mycompany/bar/* To exclude all routes form a specific package and its sub-packages
* use double wildcards: com/mycompany/bar/** And to exclude all routes from two specific packages use:
* com/mycompany/bar/*,com/mycompany/stuff/*
*/
public void setJavaRoutesExcludePattern(String javaRoutesExcludePattern) {
this.javaRoutesExcludePattern = javaRoutesExcludePattern;
}
public String getRoutesIncludePattern() {
return routesIncludePattern;
}
/**
* Used for inclusive filtering of routes from directories. The exclusive filtering takes precedence over inclusive
* filtering. The pattern is using Ant-path style pattern.
*
* Multiple patterns can be specified separated by comma, as example, to include all the routes from a directory
* whose name contains foo use: **/*foo*.
*/
public void setRoutesIncludePattern(String routesIncludePattern) {
this.routesIncludePattern = routesIncludePattern;
}
public String getRoutesExcludePattern() {
return routesExcludePattern;
}
/**
* Used for exclusive filtering of routes from directories. The exclusive filtering takes precedence over inclusive
* filtering. The pattern is using Ant-path style pattern.
*
* Multiple patterns can be specified separated by comma, as example, to exclude all the routes from a directory
* whose name contains foo use: **/*foo*.
*/
public void setRoutesExcludePattern(String routesExcludePattern) {
this.routesExcludePattern = routesExcludePattern;
}
public boolean isRoutesReloadEnabled() {
return routesReloadEnabled;
}
/**
* Used for enabling automatic routes reloading. If enabled then Camel will watch for file changes in the given
* reload directory, and trigger reloading routes if files are changed.
*/
public void setRoutesReloadEnabled(boolean routesReloadEnabled) {
this.routesReloadEnabled = routesReloadEnabled;
}
public boolean isContextReloadEnabled() {
return contextReloadEnabled;
}
/**
* Used for enabling context reloading. If enabled then Camel allow external systems such as security vaults (AWS
* secrets manager, etc.) to trigger refreshing Camel by updating property placeholders and reload all existing
* routes to take changes into effect.
*/
public void setContextReloadEnabled(boolean contextReloadEnabled) {
this.contextReloadEnabled = contextReloadEnabled;
}
public String getRoutesReloadDirectory() {
return routesReloadDirectory;
}
/**
* Directory to scan for route changes. Camel cannot scan the classpath, so this must be configured to a file
* directory. Development with Maven as build tool, you can configure the directory to be src/main/resources to scan
* for Camel routes in XML or YAML files.
*/
public void setRoutesReloadDirectory(String routesReloadDirectory) {
this.routesReloadDirectory = routesReloadDirectory;
}
public boolean isRoutesReloadDirectoryRecursive() {
return routesReloadDirectoryRecursive;
}
/**
* Whether the directory to scan should include sub directories.
*
* Depending on the number of sub directories, then this can cause the JVM to startup slower as Camel uses the JDK
* file-watch service to scan for file changes.
*/
public void setRoutesReloadDirectoryRecursive(boolean routesReloadDirectoryRecursive) {
this.routesReloadDirectoryRecursive = routesReloadDirectoryRecursive;
}
public String getRoutesReloadPattern() {
return routesReloadPattern;
}
/**
* Used for inclusive filtering of routes from directories.
*
* Typical used for specifying to accept routes in XML or YAML files, such as <tt>*.yaml,*.xml</tt>. Multiple
* patterns can be specified separated by comma.
*/
public void setRoutesReloadPattern(String routesReloadPattern) {
this.routesReloadPattern = routesReloadPattern;
}
public boolean isRoutesReloadRemoveAllRoutes() {
return routesReloadRemoveAllRoutes;
}
/**
* When reloading routes should all existing routes be stopped and removed.
*
* By default, Camel will stop and remove all existing routes before reloading routes. This ensures that only the
* reloaded routes will be active. If disabled then only routes with the same route id is updated, and any existing
* routes are continued to run.
*/
public void setRoutesReloadRemoveAllRoutes(boolean routesReloadRemoveAllRoutes) {
this.routesReloadRemoveAllRoutes = routesReloadRemoveAllRoutes;
}
public boolean isRoutesReloadRestartDuration() {
return routesReloadRestartDuration;
}
/**
* Whether to restart max duration when routes are reloaded. For example if max duration is 60 seconds, and a route
* is reloaded after 25 seconds, then this will restart the count and wait 60 seconds again.
*/
public void setRoutesReloadRestartDuration(boolean routesReloadRestartDuration) {
this.routesReloadRestartDuration = routesReloadRestartDuration;
}
public boolean isJmxUpdateRouteEnabled() {
return jmxUpdateRouteEnabled;
}
/**
* Whether to allow updating routes at runtime via JMX using the ManagedRouteMBean.
*
* This is disabled by default, but can be enabled for development and troubleshooting purposes, such as updating
* routes in an existing running Camel via JMX and other tools.
*/
public void setJmxUpdateRouteEnabled(boolean jmxUpdateRouteEnabled) {
this.jmxUpdateRouteEnabled = jmxUpdateRouteEnabled;
}
public String getGroovyScriptPattern() {
return groovyScriptPattern;
}
/**
* Directories to scan for groovy source to be pre-compiled. For example: scripts/*.groovy will scan inside the
* classpath folder scripts for all groovy source files.
*
* By default, sources are scanned from the classpath, but you can prefix with file: to use file system.
*
* The directories are using Ant-path style pattern, and multiple directories can be specified separated by comma.
*
* This requires having camel-groovy JAR on the classpath.
*/
public void setGroovyScriptPattern(String groovyScriptPattern) {
this.groovyScriptPattern = groovyScriptPattern;
}
public boolean isGroovyPreloadCompiled() {
return groovyPreloadCompiled;
}
/**
* Whether to preload existing compiled Groovy sources from the compileWorkDir option on startup. This can be
* enabled to avoid compiling sources that already has been compiled during a build phase.
*/
public void setGroovyPreloadCompiled(boolean groovyPreloadCompiled) {
this.groovyPreloadCompiled = groovyPreloadCompiled;
}
public String getExchangeFactory() {
return exchangeFactory;
}
/**
* Controls whether to pool (reuse) exchanges or create new exchanges (prototype). Using pooled will reduce JVM
* garbage collection overhead by avoiding to re-create Exchange instances per message each consumer receives. The
* default is prototype mode.
*/
public void setExchangeFactory(String exchangeFactory) {
this.exchangeFactory = exchangeFactory;
}
/**
* The capacity the pool (for each consumer) uses for storing exchanges. The default capacity is 100.
*/
public int getExchangeFactoryCapacity() {
return exchangeFactoryCapacity;
}
/**
* The capacity the pool (for each consumer) uses for storing exchanges. The default capacity is 100.
*/
public void setExchangeFactoryCapacity(int exchangeFactoryCapacity) {
this.exchangeFactoryCapacity = exchangeFactoryCapacity;
}
public boolean isExchangeFactoryStatisticsEnabled() {
return exchangeFactoryStatisticsEnabled;
}
/**
* Configures whether statistics is enabled on exchange factory.
*/
public void setExchangeFactoryStatisticsEnabled(boolean exchangeFactoryStatisticsEnabled) {
this.exchangeFactoryStatisticsEnabled = exchangeFactoryStatisticsEnabled;
}
public String getDumpRoutes() {
return dumpRoutes;
}
/**
* If dumping is enabled then Camel will during startup dump all loaded routes (incl rests and route templates)
* represented as XML/YAML DSL into the log. This is intended for trouble shooting or to assist during development.
*
* Sensitive information that may be configured in the route endpoints could potentially be included in the dump
* output and is therefore not recommended being used for production usage.
*
* This requires to have camel-xml-io/camel-yaml-io on the classpath to be able to dump the routes as XML/YAML.
*/
public void setDumpRoutes(String dumpRoutes) {
this.dumpRoutes = dumpRoutes;
}
public String getDumpRoutesInclude() {
return dumpRoutesInclude;
}
/**
* Controls what to include in output for route dumping.
*
* Possible values: all, routes, rests, routeConfigurations, routeTemplates, beans, dataFormats. Multiple values can
* be separated by comma. Default is routes.
*/
public void setDumpRoutesInclude(String dumpRoutesInclude) {
this.dumpRoutesInclude = dumpRoutesInclude;
}
public boolean isDumpRoutesLog() {
return dumpRoutesLog;
}
/**
* Whether to log route dumps to Logger
*/
public void setDumpRoutesLog(boolean dumpRoutesLog) {
this.dumpRoutesLog = dumpRoutesLog;
}
public boolean isDumpRoutesResolvePlaceholders() {
return dumpRoutesResolvePlaceholders;
}
/**
* Whether to resolve property placeholders in the dumped output. Default is true.
*/
public void setDumpRoutesResolvePlaceholders(boolean dumpRoutesResolvePlaceholders) {
this.dumpRoutesResolvePlaceholders = dumpRoutesResolvePlaceholders;
}
public boolean isDumpRoutesUriAsParameters() {
return dumpRoutesUriAsParameters;
}
/**
* When dumping routes to YAML format, then this option controls whether endpoint URIs should be expanded into a
* key/value parameters.
*/
public void setDumpRoutesUriAsParameters(boolean dumpRoutesUriAsParameters) {
this.dumpRoutesUriAsParameters = dumpRoutesUriAsParameters;
}
public boolean isDumpRoutesGeneratedIds() {
return dumpRoutesGeneratedIds;
}
/**
* Whether to include auto generated IDs in the dumped output. Default is false.
*/
public void setDumpRoutesGeneratedIds(boolean dumpRoutesGeneratedIds) {
this.dumpRoutesGeneratedIds = dumpRoutesGeneratedIds;
}
public String getDumpRoutesOutput() {
return dumpRoutesOutput;
}
/**
* Whether to save route dumps to an output file.
*
* If the output is a filename, then all content is saved to this file. If the output is a directory name, then one
* or more files are saved to the directory, where the names are based on the original source file names, or auto
* generated names.
*/
public void setDumpRoutesOutput(String dumpRoutesOutput) {
this.dumpRoutesOutput = dumpRoutesOutput;
}
public Map<String, String> getGlobalOptions() {
return globalOptions;
}
/**
* Sets global options that can be referenced in the camel context
* <p/>
* <b>Important:</b> This has nothing to do with property placeholders, and is just a plain set of key/value pairs
* which are used to configure global options on CamelContext, such as a maximum debug logging length etc.
*/
public void setGlobalOptions(Map<String, String> globalOptions) {
this.globalOptions = globalOptions;
}
/**
* Adds a global options that can be referenced in the camel context
* <p/>
* <b>Important:</b> This has nothing to do with property placeholders, and is just a plain set of key/value pairs
* which are used to configure global options on CamelContext, such as a maximum debug logging length etc.
*/
public void addGlobalOption(String key, Object value) {
if (this.globalOptions == null) {
this.globalOptions = new HashMap<>();
}
this.globalOptions.put(key, value.toString());
}
public String getStartupRecorder() {
return startupRecorder;
}
/**
* To use startup recorder for capturing execution time during starting Camel. The recorder can be one of: false (or
* off), logging, backlog, java-flight-recorder (or jfr).
*/
public void setStartupRecorder(String startupRecorder) {
this.startupRecorder = startupRecorder;
}
public int getStartupRecorderMaxDepth() {
return startupRecorderMaxDepth;
}
/**
* To filter our sub steps at a maximum depth.
*
* Use -1 for no maximum. Use 0 for no sub steps. Use 1 for max 1 sub step, and so forth.
*
* The default is -1.
*/
public void setStartupRecorderMaxDepth(int startupRecorderMaxDepth) {
this.startupRecorderMaxDepth = startupRecorderMaxDepth;
}
public boolean isStartupRecorderRecording() {
return startupRecorderRecording;
}
/**
* To enable Java Flight Recorder to start a recording and automatic dump the recording to disk after startup is
* complete.
*
* This requires that camel-jfr is on the classpath, and to enable this option.
*/
public void setStartupRecorderRecording(boolean startupRecorderRecording) {
this.startupRecorderRecording = startupRecorderRecording;
}
public String getStartupRecorderProfile() {
return startupRecorderProfile;
}
/**
* To use a specific Java Flight Recorder profile configuration, such as default or profile.
*
* The default is default.
*/
public void setStartupRecorderProfile(String startupRecorderProfile) {
this.startupRecorderProfile = startupRecorderProfile;
}
public long getStartupRecorderDuration() {
return startupRecorderDuration;
}
/**
* How long time to run the startup recorder.
*
* Use 0 (default) to keep the recorder running until the JVM is exited. Use -1 to stop the recorder right after
* Camel has been started (to only focus on potential Camel startup performance bottlenecks) Use a positive value to
* keep recording for N seconds.
*
* When the recorder is stopped then the recording is auto saved to disk (note: save to disk can be disabled by
* setting startupRecorderDir to false)
*/
public void setStartupRecorderDuration(long startupRecorderDuration) {
this.startupRecorderDuration = startupRecorderDuration;
}
public String getStartupRecorderDir() {
return startupRecorderDir;
}
/**
* Directory to store the recording. By default the current directory will be used. Use false to turn off saving
* recording to disk.
*/
public void setStartupRecorderDir(String startupRecorderDir) {
this.startupRecorderDir = startupRecorderDir;
}
public String getCloudPropertiesLocation() {
return cloudPropertiesLocation;
}
/**
* Sets the locations (comma separated values) where to find properties configuration as defined for cloud native
* environments such as Kubernetes. You should only scan text based mounted configuration.
*/
public void setCloudPropertiesLocation(String cloudPropertiesLocation) {
this.cloudPropertiesLocation = cloudPropertiesLocation;
}
// fluent builders
// --------------------------------------------------------------
/**
* Sets the name of the CamelContext.
*/
public T withName(String name) {
this.name = name;
return (T) this;
}
/**
* Sets the description (intended for humans) of the Camel application.
*/
public T withDescription(String description) {
this.description = description;
return (T) this;
}
/**
* To specify for how long time in seconds to keep running the JVM before automatic terminating the JVM. You can use
* this to run Camel for a short while.
*/
public T withDurationMaxSeconds(int durationMaxSeconds) {
this.durationMaxSeconds = durationMaxSeconds;
return (T) this;
}
/**
* To specify for how long time in seconds Camel can be idle before automatic terminating the JVM. You can use this
* to run Camel for a short while.
*/
public T withDurationMaxIdleSeconds(int durationMaxIdleSeconds) {
this.durationMaxIdleSeconds = durationMaxIdleSeconds;
return (T) this;
}
/**
* To specify how many messages to process by Camel before automatic terminating the JVM. You can use this to run
* Camel for a short while.
*/
public T withDurationMaxMessages(int durationMaxMessages) {
this.durationMaxMessages = durationMaxMessages;
return (T) this;
}
/**
* Controls whether the Camel application should shutdown the JVM, or stop all routes, when duration max is
* triggered.
*/
public T withDurationMaxAction(String durationMaxAction) {
this.durationMaxAction = durationMaxAction;
return (T) this;
}
/**
* Timeout in seconds to graceful shutdown all the Camel routes.
*/
public T withShutdownTimeout(int shutdownTimeout) {
this.shutdownTimeout = shutdownTimeout;
return (T) this;
}
/**
* Whether Camel should try to suppress logging during shutdown and timeout was triggered, meaning forced shutdown
* is happening. And during forced shutdown we want to avoid logging errors/warnings et all in the logs as a
* side-effect of the forced timeout. Notice the suppress is a best effort as there may still be some logs coming
* from 3rd party libraries and whatnot, which Camel cannot control. This option is default false.
*/
public T withShutdownSuppressLoggingOnTimeout(boolean shutdownSuppressLoggingOnTimeout) {
this.shutdownSuppressLoggingOnTimeout = shutdownSuppressLoggingOnTimeout;
return (T) this;
}
/**
* Sets whether to force shutdown of all consumers when a timeout occurred and thus not all consumers was shutdown
* within that period.
*
* You should have good reasons to set this option to false as it means that the routes keep running and is halted
* abruptly when CamelContext has been shutdown.
*/
public T withShutdownNowOnTimeout(boolean shutdownNowOnTimeout) {
this.shutdownNowOnTimeout = shutdownNowOnTimeout;
return (T) this;
}
/**
* Sets whether routes should be shutdown in reverse or the same order as they where started.
*/
public T withShutdownRoutesInReverseOrder(boolean shutdownRoutesInReverseOrder) {
this.shutdownRoutesInReverseOrder = shutdownRoutesInReverseOrder;
return (T) this;
}
/**
* Sets whether to log information about the inflight Exchanges which are still running during a shutdown which
* didn't complete without the given timeout.
*
* This requires to enable the option inflightRepositoryExchangeEnabled.
*/
public T withShutdownLogInflightExchangesOnTimeout(boolean shutdownLogInflightExchangesOnTimeout) {
this.shutdownLogInflightExchangesOnTimeout = shutdownLogInflightExchangesOnTimeout;
return (T) this;
}
/**
* Sets whether the inflight repository should allow browsing each inflight exchange.
*
* This is by default disabled as there is a very slight performance overhead when enabled.
*/
public T withInflightRepositoryBrowseEnabled(boolean inflightRepositoryBrowseEnabled) {
this.inflightRepositoryBrowseEnabled = inflightRepositoryBrowseEnabled;
return (T) this;
}
/**
* Directory to load additional configuration files that contains configuration values that takes precedence over
* any other configuration. This can be used to refer to files that may have secret configuration that has been
* mounted on the file system for containers.
*
* You can specify a pattern to load from sub directories and a name pattern such as /var/app/secret/*.properties,
* multiple directories can be separated by comma.
*/
public T withFileConfigurations(String fileConfigurations) {
this.fileConfigurations = fileConfigurations;
return (T) this;
}
/**
* Enable JMX in your Camel application.
*/
public T withJmxEnabled(boolean jmxEnabled) {
this.jmxEnabled = jmxEnabled;
return (T) this;
}
/**
* Producer template endpoints cache size.
*/
public T withProducerTemplateCacheSize(int producerTemplateCacheSize) {
this.producerTemplateCacheSize = producerTemplateCacheSize;
return (T) this;
}
/**
* Consumer template endpoints cache size.
*/
public T withConsumerTemplateCacheSize(int consumerTemplateCacheSize) {
this.consumerTemplateCacheSize = consumerTemplateCacheSize;
return (T) this;
}
/**
* Whether to load custom type converters by scanning classpath. This is used for backwards compatibility with Camel
* 2.x. Its recommended to migrate to use fast type converter loading by setting <tt>@Converter(loader = true)</tt>
* on your custom type converter classes.
*/
public T withLoadTypeConverters(boolean loadTypeConverters) {
this.loadTypeConverters = loadTypeConverters;
return (T) this;
}
/**
* Whether to load custom health checks by scanning classpath.
*/
public T withLoadHealthChecks(boolean loadHealthChecks) {
this.loadHealthChecks = loadHealthChecks;
return (T) this;
}
/**
* Whether to support JBang style //DEPS to specify additional dependencies when running Camel JBang
*/
public T withModeline(boolean modeline) {
this.modeline = modeline;
return (T) this;
}
/**
* Whether to enable developer console (requires camel-console on classpath).
*
* The developer console is only for assisting during development. This is NOT for production usage.
*/
public T withDevConsoleEnabled(boolean devConsoleEnabled) {
this.devConsoleEnabled = devConsoleEnabled;
return (T) this;
}
/**
* Is used to limit the maximum length of the logging Camel message bodies. If the message body is longer than the
* limit, the log message is clipped. Use -1 to have unlimited length. Use for example 1000 to log at most 1000
* characters.
*/
public T withLogDebugMaxChars(int logDebugMaxChars) {
this.logDebugMaxChars = logDebugMaxChars;
return (T) this;
}
/**
* Sets whether stream caching is enabled or not.
*
* While stream types (like StreamSource, InputStream and Reader) are commonly used in messaging for performance
* reasons, they also have an important drawback: they can only be read once. In order to be able to work with
* message content multiple times, the stream needs to be cached.
*
* Streams are cached in memory only (by default).
*
* If streamCachingSpoolEnabled=true, then, for large stream messages (over 128 KB by default) will be cached in a
* temporary file instead, and Camel will handle deleting the temporary file once the cached stream is no longer
* necessary.
*
* Default is true.
*/
public T withStreamCachingEnabled(boolean streamCachingEnabled) {
this.streamCachingEnabled = streamCachingEnabled;
return (T) this;
}
/**
* To filter stream caching of a given set of allowed/denied classes. By default, all classes that are
* {@link java.io.InputStream} is allowed. Multiple | name |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java | {
"start": 8079,
"end": 8184
} | enum ____ {
Allow,
Deny
}
/**
* Any element in a role.
*/
public static abstract | Effects |
java | apache__flink | flink-table/flink-table-common/src/test/java/org/apache/flink/table/types/extraction/ExtractionUtilsTest.java | {
"start": 7720,
"end": 7918
} | class ____ extends ClassBase2<CompletableFuture<Long>> {}
/**
* A test function that contains multi local variable blocks without initialization at first.
*/
public static | FutureClass |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/EmbeddableOverrideTest.java | {
"start": 4439,
"end": 4963
} | class ____ {
@Id
@GeneratedValue
private Long id;
@NaturalId
private String name;
//Getters and setters are omitted for brevity
//end::embeddable-type-association-mapping-example[]
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
//tag::embeddable-type-association-mapping-example[]
}
//end::embeddable-type-association-mapping-example[]
}
| Country |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client-jackson/deployment/src/test/java/io/quarkus/rest/client/reactive/jackson/test/DifferentObjectMapperForClientAndServerTest.java | {
"start": 1266,
"end": 4105
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest().withEmptyApplication();
@TestHTTPResource
URI uri;
MyClientUnwrappingRootElement clientUnwrappingRootElement;
MyClientNotUnwrappingRootElement clientNotUnwrappingRootElement;
@BeforeEach
public void setup() {
clientUnwrappingRootElement = QuarkusRestClientBuilder.newBuilder().baseUri(uri)
.build(MyClientUnwrappingRootElement.class);
clientNotUnwrappingRootElement = QuarkusRestClientBuilder.newBuilder().baseUri(uri)
.build(MyClientNotUnwrappingRootElement.class);
}
/**
* Because we have configured the server Object Mapper instance with:
* `objectMapper.enable(SerializationFeature.WRAP_ROOT_VALUE);`
*/
@Test
void serverShouldWrapRootElement() {
given().get("/server").then()
.statusCode(HttpStatus.SC_OK)
.body("Request.value", equalTo("good"));
}
/**
* Because MyClientUnwrappingRootElement is using `@RegisterProvider(ClientObjectMapperUnwrappingRootElement.class)` which
* is configured with: `.enable(DeserializationFeature.UNWRAP_ROOT_VALUE)`.
*/
@Test
void shouldClientUseCustomObjectMapperUnwrappingRootElement() {
AtomicLong count = ClientObjectMapperUnwrappingRootElement.COUNT;
assertEquals(0, count.get());
Request request = clientUnwrappingRootElement.get();
assertEquals("good", request.value);
assertEquals(1, count.get());
assertEquals("good", clientUnwrappingRootElement.get().value);
assertEquals("good", clientUnwrappingRootElement.get().value);
assertEquals("good", clientUnwrappingRootElement.get().value);
// count should not change as the resolution of the ObjectMapper should be cached
assertEquals(1, count.get());
}
/**
* Because MyClientNotUnwrappingRootElement uses `@ClientObjectMapper`
* which is configured with: `.disable(DeserializationFeature.UNWRAP_ROOT_VALUE)`.
*/
@Test
void shouldClientUseCustomObjectMapperNotUnwrappingRootElement() {
AtomicLong count = MyClientNotUnwrappingRootElement.CUSTOM_OBJECT_MAPPER_COUNT;
assertEquals(0, count.get());
Request request = clientNotUnwrappingRootElement.get();
assertNull(request.value);
assertEquals(1, count.get());
assertNull(clientNotUnwrappingRootElement.get().value);
assertNull(clientNotUnwrappingRootElement.get().value);
assertNull(clientNotUnwrappingRootElement.get().value);
// count should not change as the resolution of the ObjectMapper should be cached
assertEquals(1, count.get());
}
@Path("/server")
public static | DifferentObjectMapperForClientAndServerTest |
java | spring-projects__spring-boot | buildSrc/src/main/java/org/springframework/boot/build/antora/GenerateAntoraPlaybook.java | {
"start": 8177,
"end": 8297
} | class ____ {
@Input
@Optional
public abstract ListProperty<String> getStubs();
}
public abstract static | Xref |
java | google__auto | value/src/test/java/com/google/auto/value/processor/ExtensionTest.java | {
"start": 34417,
"end": 34578
} | class ____ extends EmptyExtension {
@Override
public boolean mustBeFinal(Context context) {
return false;
}
}
private static | NonFinalExtension |
java | quarkusio__quarkus | test-framework/junit5/src/main/java/io/quarkus/test/junit/DisableIfBuiltWithGraalVMNewerThan.java | {
"start": 280,
"end": 756
} | class ____ method should be disabled if the version of GraalVM used to build the native binary
* under test was newer than the supplied version.
*
* This annotation should only be used on a test classes annotated with {@link QuarkusIntegrationTest}.
* If it is used on other test classes, it will have no effect.
*/
@Target({ ElementType.TYPE, ElementType.METHOD })
@Retention(RetentionPolicy.RUNTIME)
@ExtendWith(DisableIfBuiltWithGraalVMNewerThanCondition.class)
public @ | or |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/dynamic/DynConstructors.java | {
"start": 6999,
"end": 8283
} | class ____ implements PrivilegedAction<Void> {
private Constructor<?> hidden;
private MakeAccessible(Constructor<?> hidden) {
this.hidden = hidden;
}
@Override
public Void run() {
hidden.setAccessible(true);
return null;
}
}
private static String formatProblems(Map<String, Throwable> problems) {
StringBuilder sb = new StringBuilder();
boolean first = true;
for (Map.Entry<String, Throwable> problem : problems.entrySet()) {
if (first) {
first = false;
} else {
sb.append("\n");
}
sb.append("\tMissing ")
.append(problem.getKey())
.append(" [")
.append(problem.getValue().getClass().getName())
.append(": ")
.append(problem.getValue().getMessage())
.append("]");
}
return sb.toString();
}
private static String methodName(Class<?> targetClass, Class<?>... types) {
StringBuilder sb = new StringBuilder();
sb.append(targetClass.getName()).append("(");
boolean first = true;
for (Class<?> type : types) {
if (first) {
first = false;
} else {
sb.append(",");
}
sb.append(type.getName());
}
sb.append(")");
return sb.toString();
}
}
| MakeAccessible |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestResourceInformation.java | {
"start": 1424,
"end": 4769
} | class ____ {
@Test
void testName() {
String name = "yarn.io/test";
ResourceInformation ri = ResourceInformation.newInstance(name);
assertEquals(name, ri.getName(), "Resource name incorrect");
}
@Test
void testUnits() {
String name = "yarn.io/test";
String units = "m";
ResourceInformation ri = ResourceInformation.newInstance(name, units);
assertEquals(name, ri.getName(), "Resource name incorrect");
assertEquals(units, ri.getUnits(), "Resource units incorrect");
units = "z";
try {
ResourceInformation.newInstance(name, units).setUnits(units);
fail(units + "is not a valid unit");
} catch (IllegalArgumentException ie) {
// do nothing
}
}
@Test
void testValue() {
String name = "yarn.io/test";
long value = 1L;
ResourceInformation ri = ResourceInformation.newInstance(name, value);
assertEquals(name, ri.getName(), "Resource name incorrect");
assertEquals(value, ri.getValue(), "Resource value incorrect");
}
@Test
void testResourceInformation() {
String name = "yarn.io/test";
long value = 1L;
String units = "m";
ResourceInformation ri =
ResourceInformation.newInstance(name, units, value);
assertEquals(name, ri.getName(), "Resource name incorrect");
assertEquals(value, ri.getValue(), "Resource value incorrect");
assertEquals(units, ri.getUnits(), "Resource units incorrect");
}
@Test
void testEqualsWithTagsAndAttributes() {
// Same tags but different order
ResourceInformation ri01 = ResourceInformation.newInstance("r1", "M", 100,
ResourceTypes.COUNTABLE, 0, 100,
ImmutableSet.of("A", "B"), null);
ResourceInformation ri02 = ResourceInformation.newInstance("r1", "M", 100,
ResourceTypes.COUNTABLE, 0, 100, ImmutableSet.of("B", "A"), null);
assertEquals(ri01, ri02);
// Different tags
ResourceInformation ri11 = ResourceInformation.newInstance("r1", "M", 100,
ResourceTypes.COUNTABLE, 0, 100, null, null);
ResourceInformation ri12 = ResourceInformation.newInstance("r1", "M", 100,
ResourceTypes.COUNTABLE, 0, 100, ImmutableSet.of("B", "A"), null);
assertNotEquals(ri11, ri12);
// Different attributes
ResourceInformation ri21 = ResourceInformation.newInstance("r1", "M", 100,
ResourceTypes.COUNTABLE, 0, 100, null,
ImmutableMap.of("A", "A1", "B", "B1"));
ResourceInformation ri22 = ResourceInformation.newInstance("r1", "M", 100,
ResourceTypes.COUNTABLE, 0, 100, null,
ImmutableMap.of("A", "A1", "B", "B2"));
assertNotEquals(ri21, ri22);
// No tags or attributes
ResourceInformation ri31 = ResourceInformation.newInstance("r1", "M", 100,
ResourceTypes.COUNTABLE, 0, 100, null, null);
ResourceInformation ri32 = ResourceInformation.newInstance("r1", "M", 100,
ResourceTypes.COUNTABLE, 0, 100, null, null);
assertEquals(ri31, ri32);
// Null tags/attributes same as empty ones
ResourceInformation ri41 = ResourceInformation.newInstance("r1", "M", 100,
ResourceTypes.COUNTABLE, 0, 100, ImmutableSet.of(), null);
ResourceInformation ri42 = ResourceInformation.newInstance("r1", "M", 100,
ResourceTypes.COUNTABLE, 0, 100, null, ImmutableMap.of());
assertEquals(ri41, ri42);
}
}
| TestResourceInformation |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/tests/http/Http1xTest.java | {
"start": 2459,
"end": 57746
} | class ____ extends HttpTest {
public Http1xTest() {
super(HttpConfig.Http1x.DEFAULT);
}
@Override
protected VertxOptions getOptions() {
VertxOptions options = super.getOptions();
options.getAddressResolverOptions().setHostsValue(Buffer.buffer("" +
"127.0.0.1 localhost\n" +
"127.0.0.1 host0\n" +
"127.0.0.1 host1\n" +
"127.0.0.1 host2\n"));
return options;
}
@Test
public void testClientOptions() {
HttpClientOptions options = new HttpClientOptions();
assertEquals(NetworkOptions.DEFAULT_SEND_BUFFER_SIZE, options.getSendBufferSize());
int rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setSendBufferSize(rand));
assertEquals(rand, options.getSendBufferSize());
assertIllegalArgumentException(() -> options.setSendBufferSize(0));
assertIllegalArgumentException(() -> options.setSendBufferSize(-123));
assertEquals(NetworkOptions.DEFAULT_RECEIVE_BUFFER_SIZE, options.getReceiveBufferSize());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setReceiveBufferSize(rand));
assertEquals(rand, options.getReceiveBufferSize());
assertIllegalArgumentException(() -> options.setReceiveBufferSize(0));
assertIllegalArgumentException(() -> options.setReceiveBufferSize(-123));
assertTrue(options.isReuseAddress());
assertEquals(options, options.setReuseAddress(false));
assertFalse(options.isReuseAddress());
assertEquals(NetworkOptions.DEFAULT_TRAFFIC_CLASS, options.getTrafficClass());
rand = 23;
assertEquals(options, options.setTrafficClass(rand));
assertEquals(rand, options.getTrafficClass());
assertIllegalArgumentException(() -> options.setTrafficClass(-2));
assertIllegalArgumentException(() -> options.setTrafficClass(256));
assertTrue(options.isTcpNoDelay());
assertEquals(options, options.setTcpNoDelay(false));
assertFalse(options.isTcpNoDelay());
boolean tcpKeepAlive = false;
assertEquals(tcpKeepAlive, options.isTcpKeepAlive());
assertEquals(options, options.setTcpKeepAlive(!tcpKeepAlive));
assertEquals(!tcpKeepAlive, options.isTcpKeepAlive());
int soLinger = -1;
assertEquals(soLinger, options.getSoLinger());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setSoLinger(rand));
assertEquals(rand, options.getSoLinger());
assertIllegalArgumentException(() -> options.setSoLinger(-2));
assertEquals(0, options.getIdleTimeout());
assertEquals(TimeUnit.SECONDS, options.getIdleTimeoutUnit());
assertEquals(options, options.setIdleTimeout(10));
assertEquals(options, options.setIdleTimeoutUnit(TimeUnit.MILLISECONDS));
assertEquals(10, options.getIdleTimeout());
assertEquals(TimeUnit.MILLISECONDS, options.getIdleTimeoutUnit());
assertIllegalArgumentException(() -> options.setIdleTimeout(-1));
assertFalse(options.isSsl());
assertEquals(options, options.setSsl(true));
assertTrue(options.isSsl());
assertNull(options.getKeyCertOptions());
JksOptions keyStoreOptions = new JksOptions().setPath(TestUtils.randomAlphaString(100)).setPassword(TestUtils.randomAlphaString(100));
assertEquals(options, options.setKeyCertOptions(keyStoreOptions));
assertEquals(keyStoreOptions, options.getKeyCertOptions());
assertNull(options.getTrustOptions());
JksOptions trustStoreOptions = new JksOptions().setPath(TestUtils.randomAlphaString(100)).setPassword(TestUtils.randomAlphaString(100));
assertEquals(options, options.setTrustOptions(trustStoreOptions));
assertEquals(trustStoreOptions, options.getTrustOptions());
assertFalse(options.isTrustAll());
assertEquals(options, options.setTrustAll(true));
assertTrue(options.isTrustAll());
assertTrue(options.isVerifyHost());
assertEquals(options, options.setVerifyHost(false));
assertFalse(options.isVerifyHost());
rand = TestUtils.randomPositiveInt();
assertTrue(options.isKeepAlive());
assertEquals(options, options.setKeepAlive(false));
assertFalse(options.isKeepAlive());
assertFalse(options.isPipelining());
assertEquals(options, options.setPipelining(true));
assertTrue(options.isPipelining());
assertEquals(HttpClientOptions.DEFAULT_PIPELINING_LIMIT, options.getPipeliningLimit());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setPipeliningLimit(rand));
assertEquals(rand, options.getPipeliningLimit());
assertIllegalArgumentException(() -> options.setPipeliningLimit(0));
assertIllegalArgumentException(() -> options.setPipeliningLimit(-1));
assertEquals(HttpClientOptions.DEFAULT_HTTP2_MULTIPLEXING_LIMIT, options.getHttp2MultiplexingLimit());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setHttp2MultiplexingLimit(rand));
assertEquals(rand, options.getHttp2MultiplexingLimit());
assertIllegalArgumentException(() -> options.setHttp2MultiplexingLimit(0));
assertEquals(options, options.setHttp2MultiplexingLimit(-1));
assertEquals(-1, options.getHttp2MultiplexingLimit());
assertEquals(HttpClientOptions.DEFAULT_HTTP2_CONNECTION_WINDOW_SIZE, options.getHttp2ConnectionWindowSize());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setHttp2ConnectionWindowSize(rand));
assertEquals(rand, options.getHttp2ConnectionWindowSize());
assertEquals(options, options.setHttp2ConnectionWindowSize(-1));
assertEquals(-1, options.getHttp2ConnectionWindowSize());
assertEquals(HttpClientOptions.DEFAULT_HTTP2_UPGRADE_MAX_CONTENT_LENGTH, options.getHttp2UpgradeMaxContentLength());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setHttp2UpgradeMaxContentLength(rand));
assertEquals(rand, options.getHttp2UpgradeMaxContentLength());
assertEquals(options, options.setHttp2UpgradeMaxContentLength(-1));
assertEquals(-1, options.getHttp2UpgradeMaxContentLength());
assertEquals(60000, options.getConnectTimeout());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setConnectTimeout(rand));
assertEquals(rand, options.getConnectTimeout());
assertIllegalArgumentException(() -> options.setConnectTimeout(-2));
assertFalse(options.isDecompressionSupported());
assertEquals(options, options.setDecompressionSupported(true));
assertEquals(true, options.isDecompressionSupported());
assertTrue(options.getEnabledCipherSuites().isEmpty());
assertEquals(options, options.addEnabledCipherSuite("foo"));
assertEquals(options, options.addEnabledCipherSuite("bar"));
assertNotNull(options.getEnabledCipherSuites());
assertTrue(options.getEnabledCipherSuites().contains("foo"));
assertTrue(options.getEnabledCipherSuites().contains("bar"));
assertEquals(HttpVersion.HTTP_1_1, options.getProtocolVersion());
assertEquals(options, options.setProtocolVersion(HttpVersion.HTTP_1_0));
assertEquals(HttpVersion.HTTP_1_0, options.getProtocolVersion());
assertIllegalArgumentException(() -> options.setProtocolVersion(null));
assertEquals(HttpClientOptions.DEFAULT_MAX_CHUNK_SIZE, options.getMaxChunkSize());
assertEquals(options, options.setMaxChunkSize(100));
assertEquals(100, options.getMaxChunkSize());
assertEquals(HttpClientOptions.DEFAULT_MAX_INITIAL_LINE_LENGTH, options.getMaxInitialLineLength());
assertEquals(options, options.setMaxInitialLineLength(100));
assertEquals(100, options.getMaxInitialLineLength());
assertEquals(HttpClientOptions.DEFAULT_MAX_HEADER_SIZE, options.getMaxHeaderSize());
assertEquals(options, options.setMaxHeaderSize(100));
assertEquals(100, options.getMaxHeaderSize());
Http2Settings initialSettings = randomHttp2Settings();
assertEquals(new Http2Settings(), options.getInitialSettings());
assertEquals(options, options.setInitialSettings(initialSettings));
assertEquals(initialSettings, options.getInitialSettings());
assertEquals(false, options.isUseAlpn());
assertEquals(options, options.setUseAlpn(true));
assertEquals(true, options.isUseAlpn());
assertNull(options.getSslEngineOptions());
assertEquals(options, options.setSslEngineOptions(new JdkSSLEngineOptions()));
assertTrue(options.getSslEngineOptions() instanceof JdkSSLEngineOptions);
List<HttpVersion> alpnVersions = Collections.singletonList(HttpVersion.HTTP_1_1);
assertEquals(HttpClientOptions.DEFAULT_ALPN_VERSIONS, options.getAlpnVersions());
assertEquals(options, options.setAlpnVersions(alpnVersions));
assertEquals(alpnVersions, options.getAlpnVersions());
assertEquals(true, options.isHttp2ClearTextUpgrade());
assertEquals(options, options.setHttp2ClearTextUpgrade(false));
assertEquals(false, options.isHttp2ClearTextUpgrade());
assertEquals(null, options.getLocalAddress());
assertEquals(HttpClientOptions.DEFAULT_DECODER_INITIAL_BUFFER_SIZE, options.getDecoderInitialBufferSize());
assertEquals(options, options.setDecoderInitialBufferSize(256));
assertEquals(256, options.getDecoderInitialBufferSize());
assertIllegalArgumentException(() -> options.setDecoderInitialBufferSize(-1));
assertEquals(HttpClientOptions.DEFAULT_KEEP_ALIVE_TIMEOUT, options.getKeepAliveTimeout());
assertEquals(options, options.setKeepAliveTimeout(10));
assertEquals(10, options.getKeepAliveTimeout());
assertIllegalArgumentException(() -> options.setKeepAliveTimeout(-1));
assertEquals(HttpClientOptions.DEFAULT_HTTP2_KEEP_ALIVE_TIMEOUT, options.getHttp2KeepAliveTimeout());
assertEquals(options, options.setHttp2KeepAliveTimeout(10));
assertEquals(10, options.getHttp2KeepAliveTimeout());
assertIllegalArgumentException(() -> options.setHttp2KeepAliveTimeout(-1));
}
@Test
public void testServerOptions() {
HttpServerOptions options = new HttpServerOptions();
assertEquals(NetworkOptions.DEFAULT_SEND_BUFFER_SIZE, options.getSendBufferSize());
int rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setSendBufferSize(rand));
assertEquals(rand, options.getSendBufferSize());
assertIllegalArgumentException(() -> options.setSendBufferSize(0));
assertIllegalArgumentException(() -> options.setSendBufferSize(-123));
assertEquals(NetworkOptions.DEFAULT_RECEIVE_BUFFER_SIZE, options.getReceiveBufferSize());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setReceiveBufferSize(rand));
assertEquals(rand, options.getReceiveBufferSize());
assertIllegalArgumentException(() -> options.setReceiveBufferSize(0));
assertIllegalArgumentException(() -> options.setReceiveBufferSize(-123));
assertTrue(options.isReuseAddress());
assertEquals(options, options.setReuseAddress(false));
assertFalse(options.isReuseAddress());
assertEquals(NetworkOptions.DEFAULT_TRAFFIC_CLASS, options.getTrafficClass());
rand = 23;
assertEquals(options, options.setTrafficClass(rand));
assertEquals(rand, options.getTrafficClass());
assertIllegalArgumentException(() -> options.setTrafficClass(-2));
assertIllegalArgumentException(() -> options.setTrafficClass(256));
assertTrue(options.isTcpNoDelay());
assertEquals(options, options.setTcpNoDelay(false));
assertFalse(options.isTcpNoDelay());
boolean tcpKeepAlive = false;
assertEquals(tcpKeepAlive, options.isTcpKeepAlive());
assertEquals(options, options.setTcpKeepAlive(!tcpKeepAlive));
assertEquals(!tcpKeepAlive, options.isTcpKeepAlive());
int soLinger = -1;
assertEquals(soLinger, options.getSoLinger());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setSoLinger(rand));
assertEquals(rand, options.getSoLinger());
assertIllegalArgumentException(() -> options.setSoLinger(-2));
assertEquals(0, options.getIdleTimeout());
assertEquals(options, options.setIdleTimeout(10));
assertEquals(10, options.getIdleTimeout());
assertIllegalArgumentException(() -> options.setIdleTimeout(-1));
assertFalse(options.isSsl());
assertEquals(options, options.setSsl(true));
assertTrue(options.isSsl());
assertNull(options.getKeyCertOptions());
JksOptions keyStoreOptions = new JksOptions().setPath(TestUtils.randomAlphaString(100)).setPassword(TestUtils.randomAlphaString(100));
assertEquals(options, options.setKeyCertOptions(keyStoreOptions));
assertEquals(keyStoreOptions, options.getKeyCertOptions());
assertNull(options.getTrustOptions());
JksOptions trustStoreOptions = new JksOptions().setPath(TestUtils.randomAlphaString(100)).setPassword(TestUtils.randomAlphaString(100));
assertEquals(options, options.setTrustOptions(trustStoreOptions));
assertEquals(trustStoreOptions, options.getTrustOptions());
assertEquals(-1, options.getAcceptBacklog());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setAcceptBacklog(rand));
assertEquals(rand, options.getAcceptBacklog());
assertFalse(options.isCompressionSupported());
assertEquals(options, options.setCompressionSupported(true));
assertTrue(options.isCompressionSupported());
assertEquals(65536, options.getMaxWebSocketFrameSize());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setMaxWebSocketFrameSize(rand));
assertEquals(rand, options.getMaxWebSocketFrameSize());
assertEquals(80, options.getPort());
assertEquals(options, options.setPort(1234));
assertEquals(1234, options.getPort());
assertIllegalArgumentException(() -> options.setPort(65536));
assertEquals("0.0.0.0", options.getHost());
String randString = TestUtils.randomUnicodeString(100);
assertEquals(options, options.setHost(randString));
assertEquals(randString, options.getHost());
assertNull(options.getWebSocketSubProtocols());
assertEquals(options, options.setWebSocketSubProtocols(Collections.singletonList("foo")));
assertEquals(Collections.singletonList("foo"), options.getWebSocketSubProtocols());
HttpServerOptions optionsCopy = new HttpServerOptions(options);
assertEquals(options.toJson(), optionsCopy.setWebSocketSubProtocols(options.getWebSocketSubProtocols()).toJson());
assertTrue(options.getEnabledCipherSuites().isEmpty());
assertEquals(options, options.addEnabledCipherSuite("foo"));
assertEquals(options, options.addEnabledCipherSuite("bar"));
assertNotNull(options.getEnabledCipherSuites());
assertTrue(options.getEnabledCipherSuites().contains("foo"));
assertTrue(options.getEnabledCipherSuites().contains("bar"));
assertFalse(options.isHandle100ContinueAutomatically());
assertEquals(options, options.setHandle100ContinueAutomatically(true));
assertTrue(options.isHandle100ContinueAutomatically());
assertEquals(false, options.isUseAlpn());
assertEquals(options, options.setUseAlpn(true));
assertEquals(true, options.isUseAlpn());
assertNull(options.getSslEngineOptions());
assertEquals(options, options.setSslEngineOptions(new JdkSSLEngineOptions()));
assertTrue(options.getSslEngineOptions() instanceof JdkSSLEngineOptions);
Http2Settings initialSettings = randomHttp2Settings();
assertEquals(new Http2Settings().setMaxConcurrentStreams(HttpServerOptions.DEFAULT_INITIAL_SETTINGS_MAX_CONCURRENT_STREAMS), options.getInitialSettings());
assertEquals(options, options.setInitialSettings(initialSettings));
assertEquals(initialSettings, options.getInitialSettings());
List<HttpVersion> alpnVersions = Collections.singletonList(HttpVersion.HTTP_1_1);
assertEquals(HttpServerOptions.DEFAULT_ALPN_VERSIONS, options.getAlpnVersions());
assertEquals(options, options.setAlpnVersions(alpnVersions));
assertEquals(alpnVersions, options.getAlpnVersions());
assertEquals(HttpClientOptions.DEFAULT_HTTP2_CONNECTION_WINDOW_SIZE, options.getHttp2ConnectionWindowSize());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setHttp2ConnectionWindowSize(rand));
assertEquals(rand, options.getHttp2ConnectionWindowSize());
assertEquals(options, options.setHttp2ConnectionWindowSize(-1));
assertEquals(-1, options.getHttp2ConnectionWindowSize());
assertFalse(options.isDecompressionSupported());
assertEquals(options, options.setDecompressionSupported(true));
assertTrue(options.isDecompressionSupported());
assertEquals(HttpServerOptions.DEFAULT_DECODER_INITIAL_BUFFER_SIZE, options.getDecoderInitialBufferSize());
assertEquals(options, options.setDecoderInitialBufferSize(256));
assertEquals(256, options.getDecoderInitialBufferSize());
assertIllegalArgumentException(() -> options.setDecoderInitialBufferSize(-1));
}
@Test
public void testCopyClientOptions() {
HttpClientOptions options = new HttpClientOptions();
int sendBufferSize = TestUtils.randomPositiveInt();
int receiverBufferSize = TestUtils.randomPortInt();
Random rand = new Random();
boolean reuseAddress = rand.nextBoolean();
int trafficClass = TestUtils.randomByte() + 128;
boolean tcpNoDelay = rand.nextBoolean();
boolean tcpKeepAlive = rand.nextBoolean();
int soLinger = TestUtils.randomPositiveInt();
int idleTimeout = TestUtils.randomPositiveInt();
boolean ssl = rand.nextBoolean();
KeyCertOptions keyCertOptions = randomKeyCertOptions();
TrustOptions trustOptions = randomTrustOptions();
String enabledCipher = TestUtils.randomAlphaString(100);
int connectTimeout = TestUtils.randomPositiveInt();
boolean trustAll = rand.nextBoolean();
String crlPath = TestUtils.randomUnicodeString(100);
Buffer crlValue = TestUtils.randomBuffer(100);
int keepAliveTimeout = TestUtils.randomPositiveInt();
int http2KeepAliveTimeout = TestUtils.randomPositiveInt();
boolean verifyHost = rand.nextBoolean();
int maxPoolSize = TestUtils.randomPositiveInt();
boolean keepAlive = rand.nextBoolean();
boolean pipelining = rand.nextBoolean();
int pipeliningLimit = TestUtils.randomPositiveInt();
int http2MaxPoolSize = TestUtils.randomPositiveInt();
int http2MultiplexingLimit = TestUtils.randomPositiveInt();
int http2ConnectionWindowSize = TestUtils.randomPositiveInt();
int http2UpgradeMaxContentLength = TestUtils.randomPositiveInt();
boolean decompressionSupported = rand.nextBoolean();
HttpVersion protocolVersion = HttpVersion.HTTP_1_0;
int maxChunkSize = TestUtils.randomPositiveInt();
int maxInitialLineLength = TestUtils.randomPositiveInt();
int maxHeaderSize = TestUtils.randomPositiveInt();
int maxWaitQueueSize = TestUtils.randomPositiveInt();
Http2Settings initialSettings = randomHttp2Settings();
boolean useAlpn = TestUtils.randomBoolean();
SSLEngineOptions sslEngine = TestUtils.randomBoolean() ? new JdkSSLEngineOptions() : new OpenSSLEngineOptions();
List<HttpVersion> alpnVersions = Collections.singletonList(HttpVersion.values()[TestUtils.randomPositiveInt() % 3]);
boolean h2cUpgrade = TestUtils.randomBoolean();
boolean openSslSessionCacheEnabled = rand.nextBoolean();
boolean sendUnmaskedFrame = rand.nextBoolean();
String localAddress = TestUtils.randomAlphaString(10);
int decoderInitialBufferSize = TestUtils.randomPositiveInt();
options.setSendBufferSize(sendBufferSize);
options.setReceiveBufferSize(receiverBufferSize);
options.setReuseAddress(reuseAddress);
options.setTrafficClass(trafficClass);
options.setSsl(ssl);
options.setTcpNoDelay(tcpNoDelay);
options.setTcpKeepAlive(tcpKeepAlive);
options.setSoLinger(soLinger);
options.setIdleTimeout(idleTimeout);
options.setKeyCertOptions(keyCertOptions);
options.setTrustOptions(trustOptions);
options.addEnabledCipherSuite(enabledCipher);
options.setConnectTimeout(connectTimeout);
options.setTrustAll(trustAll);
options.addCrlPath(crlPath);
options.addCrlValue(crlValue);
options.setVerifyHost(verifyHost);
options.setKeepAlive(keepAlive);
options.setPipelining(pipelining);
options.setPipeliningLimit(pipeliningLimit);
options.setHttp2MultiplexingLimit(http2MultiplexingLimit);
options.setHttp2ConnectionWindowSize(http2ConnectionWindowSize);
options.setDecompressionSupported(decompressionSupported);
options.setProtocolVersion(protocolVersion);
options.setMaxChunkSize(maxChunkSize);
options.setMaxInitialLineLength(maxInitialLineLength);
options.setMaxHeaderSize(maxHeaderSize);
options.setInitialSettings(initialSettings);
options.setUseAlpn(useAlpn);
options.setSslEngineOptions(sslEngine);
options.setAlpnVersions(alpnVersions);
options.setHttp2ClearTextUpgrade(h2cUpgrade);
options.setLocalAddress(localAddress);
options.setDecoderInitialBufferSize(decoderInitialBufferSize);
options.setKeepAliveTimeout(keepAliveTimeout);
options.setHttp2KeepAliveTimeout(http2KeepAliveTimeout);
options.setHttp2UpgradeMaxContentLength(http2UpgradeMaxContentLength);
HttpClientOptions copy = new HttpClientOptions(options);
checkCopyHttpClientOptions(options, copy);
HttpClientOptions copy2 = new HttpClientOptions(options.toJson());
checkCopyHttpClientOptions(options, copy2);
}
private void checkCopyHttpClientOptions(HttpClientOptions options, HttpClientOptions copy) {
assertEquals(options.toJson(), copy.toJson());
assertNotSame(options.getKeyCertOptions(), copy.getKeyCertOptions());
assertNotSame(options.getTrustOptions(), copy.getTrustOptions());
if (copy.getTrustOptions() instanceof PemTrustOptions) {
assertEquals(((PemTrustOptions) options.getTrustOptions()).getCertValues(), ((PemTrustOptions) copy.getTrustOptions()).getCertValues());
} else if (copy.getTrustOptions() instanceof JksOptions) {
JksOptions a = (JksOptions) options.getTrustOptions();
JksOptions b = (JksOptions) copy.getTrustOptions();
assertEquals(a.getPath(), b.getPath());
assertEquals(a.getPassword(), b.getPassword());
assertEquals(a.getValue(), b.getValue());
} else if (copy.getTrustOptions() instanceof PfxOptions) {
PfxOptions a = (PfxOptions) options.getTrustOptions();
PfxOptions b = (PfxOptions) copy.getTrustOptions();
assertEquals(a.getPath(), b.getPath());
assertEquals(a.getPassword(), b.getPassword());
assertEquals(a.getValue(), b.getValue());
}
}
@Test
public void testDefaultClientOptionsJson() {
HttpClientOptions def = new HttpClientOptions();
HttpClientOptions json = new HttpClientOptions(new JsonObject());
assertEquals(def.isKeepAlive(), json.isKeepAlive());
assertEquals(def.isPipelining(), json.isPipelining());
assertEquals(def.getPipeliningLimit(), json.getPipeliningLimit());
assertEquals(def.getHttp2MultiplexingLimit(), json.getHttp2MultiplexingLimit());
assertEquals(def.getHttp2ConnectionWindowSize(), json.getHttp2ConnectionWindowSize());
assertEquals(def.getHttp2UpgradeMaxContentLength(), json.getHttp2UpgradeMaxContentLength());
assertEquals(def.isVerifyHost(), json.isVerifyHost());
assertEquals(def.isDecompressionSupported(), json.isDecompressionSupported());
assertEquals(def.isTrustAll(), json.isTrustAll());
assertEquals(def.getCrlPaths(), json.getCrlPaths());
assertEquals(def.getCrlValues(), json.getCrlValues());
assertEquals(def.getConnectTimeout(), json.getConnectTimeout());
assertEquals(def.isTcpNoDelay(), json.isTcpNoDelay());
assertEquals(def.isTcpKeepAlive(), json.isTcpKeepAlive());
assertEquals(def.getSoLinger(), json.getSoLinger());
assertEquals(def.isSsl(), json.isSsl());
assertEquals(def.getProtocolVersion(), json.getProtocolVersion());
assertEquals(def.getMaxChunkSize(), json.getMaxChunkSize());
assertEquals(def.getMaxInitialLineLength(), json.getMaxInitialLineLength());
assertEquals(def.getMaxHeaderSize(), json.getMaxHeaderSize());
assertEquals(def.getInitialSettings(), json.getInitialSettings());
assertEquals(def.isUseAlpn(), json.isUseAlpn());
assertEquals(def.getSslEngineOptions(), json.getSslEngineOptions());
assertEquals(def.getAlpnVersions(), json.getAlpnVersions());
assertEquals(def.isHttp2ClearTextUpgrade(), json.isHttp2ClearTextUpgrade());
assertEquals(def.getLocalAddress(), json.getLocalAddress());
assertEquals(def.getDecoderInitialBufferSize(), json.getDecoderInitialBufferSize());
assertEquals(def.getKeepAliveTimeout(), json.getKeepAliveTimeout());
assertEquals(def.getHttp2KeepAliveTimeout(), json.getHttp2KeepAliveTimeout());
}
@Test
public void testClientOptionsJson() {
int sendBufferSize = TestUtils.randomPositiveInt();
int receiverBufferSize = TestUtils.randomPortInt();
Random rand = new Random();
boolean reuseAddress = rand.nextBoolean();
int trafficClass = TestUtils.randomByte() + 128;
boolean tcpNoDelay = rand.nextBoolean();
boolean tcpKeepAlive = rand.nextBoolean();
int soLinger = TestUtils.randomPositiveInt();
int idleTimeout = TestUtils.randomPositiveInt();
boolean ssl = rand.nextBoolean();
JksOptions keyStoreOptions = new JksOptions();
String ksPassword = TestUtils.randomAlphaString(100);
keyStoreOptions.setPassword(ksPassword);
String ksPath = TestUtils.randomAlphaString(100);
keyStoreOptions.setPath(ksPath);
JksOptions trustStoreOptions = new JksOptions();
String tsPassword = TestUtils.randomAlphaString(100);
trustStoreOptions.setPassword(tsPassword);
String tsPath = TestUtils.randomAlphaString(100);
trustStoreOptions.setPath(tsPath);
String enabledCipher = TestUtils.randomAlphaString(100);
int connectTimeout = TestUtils.randomPositiveInt();
boolean trustAll = rand.nextBoolean();
String crlPath = TestUtils.randomUnicodeString(100);
boolean verifyHost = rand.nextBoolean();
int maxPoolSize = TestUtils.randomPositiveInt();
boolean keepAlive = rand.nextBoolean();
boolean pipelining = rand.nextBoolean();
int pipeliningLimit = TestUtils.randomPositiveInt();
int http2MaxPoolSize = TestUtils.randomPositiveInt();
int http2MultiplexingLimit = TestUtils.randomPositiveInt();
int http2ConnectionWindowSize = TestUtils.randomPositiveInt();
int http2UpgradeMaxContentLength = TestUtils.randomPositiveInt();
boolean decompressionSupported = rand.nextBoolean();
HttpVersion protocolVersion = HttpVersion.HTTP_1_1;
int maxChunkSize = TestUtils.randomPositiveInt();
int maxInitialLineLength = TestUtils.randomPositiveInt();
int maxHeaderSize = TestUtils.randomPositiveInt();
int maxWaitQueueSize = TestUtils.randomPositiveInt();
Http2Settings initialSettings = randomHttp2Settings();
boolean useAlpn = TestUtils.randomBoolean();
String sslEngine = TestUtils.randomBoolean() ? "jdkSslEngineOptions" : "openSslEngineOptions";
List<HttpVersion> alpnVersions = Collections.singletonList(HttpVersion.values()[TestUtils.randomPositiveInt() % 3]);
boolean h2cUpgrade = rand.nextBoolean();
boolean openSslSessionCacheEnabled = rand.nextBoolean();
String localAddress = TestUtils.randomAlphaString(10);
int decoderInitialBufferSize = TestUtils.randomPositiveInt();
int keepAliveTimeout = TestUtils.randomPositiveInt();
int http2KeepAliveTimeout = TestUtils.randomPositiveInt();
JsonObject json = new JsonObject();
json.put("sendBufferSize", sendBufferSize)
.put("receiveBufferSize", receiverBufferSize)
.put("reuseAddress", reuseAddress)
.put("trafficClass", trafficClass)
.put("tcpNoDelay", tcpNoDelay)
.put("tcpKeepAlive", tcpKeepAlive)
.put("soLinger", soLinger)
.put("idleTimeout", idleTimeout)
.put("ssl", ssl)
.put("enabledCipherSuites", new JsonArray().add(enabledCipher))
.put("connectTimeout", connectTimeout)
.put("trustAll", trustAll)
.put("crlPaths", new JsonArray().add(crlPath))
.put("keyStoreOptions", new JsonObject().put("password", ksPassword).put("path", ksPath))
.put("trustStoreOptions", new JsonObject().put("password", tsPassword).put("path", tsPath))
.put("verifyHost", verifyHost)
.put("maxPoolSize", maxPoolSize)
.put("keepAlive", keepAlive)
.put("pipelining", pipelining)
.put("pipeliningLimit", pipeliningLimit)
.put("http2MaxPoolSize", http2MaxPoolSize)
.put("http2MultiplexingLimit", http2MultiplexingLimit)
.put("http2ConnectionWindowSize", http2ConnectionWindowSize)
.put("http2UpgradeMaxContentLength", http2UpgradeMaxContentLength)
.put("decompressionSupported", decompressionSupported)
.put("protocolVersion", protocolVersion.name())
.put("maxChunkSize", maxChunkSize)
.put("maxInitialLineLength", maxInitialLineLength)
.put("maxHeaderSize", maxHeaderSize)
.put("maxWaitQueueSize", maxWaitQueueSize)
.put("initialSettings", new JsonObject()
.put("pushEnabled", initialSettings.isPushEnabled())
.put("headerTableSize", initialSettings.getHeaderTableSize())
.put("maxHeaderListSize", initialSettings.getMaxHeaderListSize())
.put("maxConcurrentStreams", initialSettings.getMaxConcurrentStreams())
.put("initialWindowSize", initialSettings.getInitialWindowSize())
.put("maxFrameSize", initialSettings.getMaxFrameSize()))
.put("useAlpn", useAlpn)
.put(sslEngine, new JsonObject())
.put("alpnVersions", new JsonArray().add(alpnVersions.get(0).name()))
.put("http2ClearTextUpgrade", h2cUpgrade)
.put("openSslSessionCacheEnabled", openSslSessionCacheEnabled)
.put("localAddress", localAddress)
.put("decoderInitialBufferSize", decoderInitialBufferSize)
.put("keepAliveTimeout", keepAliveTimeout)
.put("http2KeepAliveTimeout", http2KeepAliveTimeout);
HttpClientOptions options = new HttpClientOptions(json);
assertEquals(sendBufferSize, options.getSendBufferSize());
assertEquals(receiverBufferSize, options.getReceiveBufferSize());
assertEquals(reuseAddress, options.isReuseAddress());
assertEquals(trafficClass, options.getTrafficClass());
assertEquals(tcpKeepAlive, options.isTcpKeepAlive());
assertEquals(tcpNoDelay, options.isTcpNoDelay());
assertEquals(soLinger, options.getSoLinger());
assertEquals(idleTimeout, options.getIdleTimeout());
assertEquals(ssl, options.isSsl());
assertNotSame(keyStoreOptions, options.getKeyCertOptions());
assertEquals(ksPassword, ((JksOptions) options.getKeyCertOptions()).getPassword());
assertEquals(ksPath, ((JksOptions) options.getKeyCertOptions()).getPath());
assertNotSame(trustStoreOptions, options.getTrustOptions());
assertEquals(tsPassword, ((JksOptions) options.getTrustOptions()).getPassword());
assertEquals(tsPath, ((JksOptions) options.getTrustOptions()).getPath());
assertEquals(1, options.getEnabledCipherSuites().size());
assertTrue(options.getEnabledCipherSuites().contains(enabledCipher));
assertEquals(connectTimeout, options.getConnectTimeout());
assertEquals(trustAll, options.isTrustAll());
assertEquals(1, options.getCrlPaths().size());
assertEquals(crlPath, options.getCrlPaths().get(0));
assertEquals(verifyHost, options.isVerifyHost());
assertEquals(keepAlive, options.isKeepAlive());
assertEquals(pipelining, options.isPipelining());
assertEquals(pipeliningLimit, options.getPipeliningLimit());
assertEquals(http2MultiplexingLimit, options.getHttp2MultiplexingLimit());
assertEquals(http2ConnectionWindowSize, options.getHttp2ConnectionWindowSize());
assertEquals(http2UpgradeMaxContentLength, options.getHttp2UpgradeMaxContentLength());
assertEquals(decompressionSupported, options.isDecompressionSupported());
assertEquals(protocolVersion, options.getProtocolVersion());
assertEquals(maxChunkSize, options.getMaxChunkSize());
assertEquals(maxInitialLineLength, options.getMaxInitialLineLength());
assertEquals(maxHeaderSize, options.getMaxHeaderSize());
assertEquals(initialSettings, options.getInitialSettings());
assertEquals(useAlpn, options.isUseAlpn());
switch (sslEngine) {
case "jdkSslEngineOptions":
assertTrue(options.getSslEngineOptions() instanceof JdkSSLEngineOptions);
break;
case "openSslEngineOptions":
assertTrue(options.getSslEngineOptions() instanceof OpenSSLEngineOptions);
break;
default:
fail();
break;
}
assertEquals(alpnVersions, options.getAlpnVersions());
assertEquals(h2cUpgrade, options.isHttp2ClearTextUpgrade());
assertEquals(localAddress, options.getLocalAddress());
assertEquals(decoderInitialBufferSize, options.getDecoderInitialBufferSize());
// Test other keystore/truststore types
json.remove("keyStoreOptions");
json.remove("trustStoreOptions");
json.put("pfxKeyCertOptions", new JsonObject().put("password", ksPassword))
.put("pfxTrustOptions", new JsonObject().put("password", tsPassword));
options = new HttpClientOptions(json);
assertTrue(options.getTrustOptions() instanceof PfxOptions);
assertTrue(options.getKeyCertOptions() instanceof PfxOptions);
json.remove("pfxKeyCertOptions");
json.remove("pfxTrustOptions");
json.put("pemKeyCertOptions", new JsonObject())
.put("pemTrustOptions", new JsonObject());
options = new HttpClientOptions(json);
assertTrue(options.getTrustOptions() instanceof PemTrustOptions);
assertTrue(options.getKeyCertOptions() instanceof PemKeyCertOptions);
// Test invalid protocolVersion
json.put("protocolVersion", "invalidProtocolVersion");
assertIllegalArgumentException(() -> new HttpClientOptions(json));
assertEquals(keepAliveTimeout, options.getKeepAliveTimeout());
assertEquals(http2KeepAliveTimeout, options.getHttp2KeepAliveTimeout());
}
@Test
public void testCopyServerOptions() {
HttpServerOptions options = new HttpServerOptions();
int sendBufferSize = TestUtils.randomPositiveInt();
int receiverBufferSize = TestUtils.randomPortInt();
Random rand = new Random();
boolean reuseAddress = rand.nextBoolean();
int trafficClass = TestUtils.randomByte() + 128;
boolean tcpNoDelay = rand.nextBoolean();
boolean tcpKeepAlive = rand.nextBoolean();
int soLinger = TestUtils.randomPositiveInt();
int idleTimeout = TestUtils.randomPositiveInt();
boolean ssl = rand.nextBoolean();
KeyCertOptions keyCertOptions = randomKeyCertOptions();
TrustOptions trustOptions = randomTrustOptions();
String enabledCipher = TestUtils.randomAlphaString(100);
String crlPath = TestUtils.randomUnicodeString(100);
Buffer crlValue = TestUtils.randomBuffer(100);
int port = 1234;
String host = TestUtils.randomAlphaString(100);
int acceptBacklog = TestUtils.randomPortInt();
boolean compressionSupported = rand.nextBoolean();
int maxWebSocketFrameSize = TestUtils.randomPositiveInt();
List<String> wsSubProtocols = Arrays.asList(TestUtils.randomAlphaString(10));
boolean is100ContinueHandledAutomatically = rand.nextBoolean();
int maxChunkSize = rand.nextInt(10000);
Http2Settings initialSettings = randomHttp2Settings();
boolean useAlpn = TestUtils.randomBoolean();
int http2ConnectionWindowSize = TestUtils.randomInt();
boolean openSslSessionCacheEnabled = rand.nextBoolean();
SSLEngineOptions sslEngine = TestUtils.randomBoolean() ? new JdkSSLEngineOptions() : new OpenSSLEngineOptions();
List<HttpVersion> alpnVersions = Collections.singletonList(HttpVersion.values()[TestUtils.randomPositiveInt() % 3]);
boolean decompressionSupported = rand.nextBoolean();
boolean acceptUnmaskedFrames = rand.nextBoolean();
int decoderInitialBufferSize = TestUtils.randomPositiveInt();
options.setSendBufferSize(sendBufferSize);
options.setReceiveBufferSize(receiverBufferSize);
options.setReuseAddress(reuseAddress);
options.setTrafficClass(trafficClass);
options.setTcpNoDelay(tcpNoDelay);
options.setTcpKeepAlive(tcpKeepAlive);
options.setSoLinger(soLinger);
options.setIdleTimeout(idleTimeout);
options.setSsl(ssl);
options.setKeyCertOptions(keyCertOptions);
options.setTrustOptions(trustOptions);
options.addEnabledCipherSuite(enabledCipher);
options.addCrlPath(crlPath);
options.addCrlValue(crlValue);
options.setPort(port);
options.setHost(host);
options.setAcceptBacklog(acceptBacklog);
options.setCompressionSupported(compressionSupported);
options.setMaxWebSocketFrameSize(maxWebSocketFrameSize);
options.setWebSocketSubProtocols(wsSubProtocols);
options.setHandle100ContinueAutomatically(is100ContinueHandledAutomatically);
options.setMaxChunkSize(maxChunkSize);
options.setUseAlpn(useAlpn);
options.setHttp2ConnectionWindowSize(http2ConnectionWindowSize);
options.setSslEngineOptions(sslEngine);
options.setInitialSettings(initialSettings);
options.setAlpnVersions(alpnVersions);
options.setDecompressionSupported(decompressionSupported);
options.setAcceptUnmaskedFrames(acceptUnmaskedFrames);
options.setDecoderInitialBufferSize(decoderInitialBufferSize);
HttpServerOptions copy = new HttpServerOptions(options);
checkCopyHttpServerOptions(options, copy);
HttpServerOptions copy2 = new HttpServerOptions(options.toJson());
checkCopyHttpServerOptions(options, copy2);
}
private void checkCopyHttpServerOptions(HttpServerOptions options, HttpServerOptions copy) {
assertEquals(options.toJson(), copy.toJson());
assertNotSame(options.getKeyCertOptions(), copy.getKeyCertOptions());
assertNotSame(options.getTrustOptions(), copy.getTrustOptions());
if (copy.getTrustOptions() instanceof PemTrustOptions) {
assertEquals(((PemTrustOptions) options.getTrustOptions()).getCertValues(), ((PemTrustOptions) copy.getTrustOptions()).getCertValues());
} else if (copy.getTrustOptions() instanceof JksOptions) {
JksOptions a = (JksOptions) options.getTrustOptions();
JksOptions b = (JksOptions) copy.getTrustOptions();
assertEquals(a.getPath(), b.getPath());
assertEquals(a.getPassword(), b.getPassword());
assertEquals(a.getValue(), b.getValue());
} else if (copy.getTrustOptions() instanceof PfxOptions) {
PfxOptions a = (PfxOptions) options.getTrustOptions();
PfxOptions b = (PfxOptions) copy.getTrustOptions();
assertEquals(a.getPath(), b.getPath());
assertEquals(a.getPassword(), b.getPassword());
assertEquals(a.getValue(), b.getValue());
}
}
@Test
@SuppressWarnings("deprecation")
public void testDefaultServerOptionsJson() {
HttpServerOptions def = new HttpServerOptions();
HttpServerOptions json = new HttpServerOptions(new JsonObject());
assertEquals(def.getMaxWebSocketFrameSize(), json.getMaxWebSocketFrameSize());
assertEquals(def.getWebSocketSubProtocols(), json.getWebSocketSubProtocols());
assertEquals(def.isCompressionSupported(), json.isCompressionSupported());
assertEquals(def.getCrlPaths(), json.getCrlPaths());
assertEquals(def.getCrlValues(), json.getCrlValues());
assertEquals(def.getAcceptBacklog(), json.getAcceptBacklog());
assertEquals(def.getPort(), json.getPort());
assertEquals(def.getHost(), json.getHost());
assertEquals(def.isTcpNoDelay(), json.isTcpNoDelay());
assertEquals(def.isTcpKeepAlive(), json.isTcpKeepAlive());
assertEquals(def.getSoLinger(), json.getSoLinger());
assertEquals(def.isSsl(), json.isSsl());
assertEquals(def.isHandle100ContinueAutomatically(), json.isHandle100ContinueAutomatically());
assertEquals(def.getMaxChunkSize(), json.getMaxChunkSize());
assertEquals(def.getMaxInitialLineLength(), json.getMaxInitialLineLength());
assertEquals(def.getMaxHeaderSize(), json.getMaxHeaderSize());
assertEquals(def.getInitialSettings(), json.getInitialSettings());
assertEquals(def.isUseAlpn(), json.isUseAlpn());
assertEquals(def.getSslEngineOptions(), json.getSslEngineOptions());
assertEquals(def.getAlpnVersions(), json.getAlpnVersions());
assertEquals(def.getHttp2ConnectionWindowSize(), json.getHttp2ConnectionWindowSize());
assertEquals(def.isDecompressionSupported(), json.isDecompressionSupported());
assertEquals(def.isAcceptUnmaskedFrames(), json.isAcceptUnmaskedFrames());
assertEquals(def.getDecoderInitialBufferSize(), json.getDecoderInitialBufferSize());
}
@Test
public void testServerOptionsJson() {
int sendBufferSize = TestUtils.randomPositiveInt();
int receiverBufferSize = TestUtils.randomPortInt();
Random rand = new Random();
boolean reuseAddress = rand.nextBoolean();
int trafficClass = TestUtils.randomByte() + 128;
boolean tcpNoDelay = rand.nextBoolean();
boolean tcpKeepAlive = rand.nextBoolean();
int soLinger = TestUtils.randomPositiveInt();
int idleTimeout = TestUtils.randomPositiveInt();
boolean ssl = rand.nextBoolean();
JksOptions keyStoreOptions = new JksOptions();
String ksPassword = TestUtils.randomAlphaString(100);
keyStoreOptions.setPassword(ksPassword);
String ksPath = TestUtils.randomAlphaString(100);
keyStoreOptions.setPath(ksPath);
JksOptions trustStoreOptions = new JksOptions();
String tsPassword = TestUtils.randomAlphaString(100);
trustStoreOptions.setPassword(tsPassword);
String tsPath = TestUtils.randomAlphaString(100);
trustStoreOptions.setPath(tsPath);
String enabledCipher = TestUtils.randomAlphaString(100);
String crlPath = TestUtils.randomUnicodeString(100);
int port = 1234;
String host = TestUtils.randomAlphaString(100);
int acceptBacklog = TestUtils.randomPortInt();
boolean compressionSupported = rand.nextBoolean();
int maxWebSocketFrameSize = TestUtils.randomPositiveInt();
List<String> wsSubProtocols = Collections.singletonList(randomAlphaString(10));
boolean is100ContinueHandledAutomatically = rand.nextBoolean();
int maxChunkSize = rand.nextInt(10000);
int maxInitialLineLength = rand.nextInt(10000);
int maxHeaderSize = rand.nextInt(10000);
HttpVersion enabledProtocol = HttpVersion.values()[rand.nextInt(HttpVersion.values().length)];
Http2Settings initialSettings = TestUtils.randomHttp2Settings();
boolean useAlpn = TestUtils.randomBoolean();
int http2ConnectionWindowSize = TestUtils.randomInt();
String sslEngine = TestUtils.randomBoolean() ? "jdkSslEngineOptions" : "openSslEngineOptions";
List<HttpVersion> alpnVersions = Collections.singletonList(HttpVersion.values()[TestUtils.randomPositiveInt() % 3]);
boolean openSslSessionCacheEnabled = TestUtils.randomBoolean();
boolean decompressionSupported = TestUtils.randomBoolean();
boolean acceptUnmaskedFrames = TestUtils.randomBoolean();
int decoderInitialBufferSize = TestUtils.randomPositiveInt();
JsonObject json = new JsonObject();
json.put("sendBufferSize", sendBufferSize)
.put("receiveBufferSize", receiverBufferSize)
.put("reuseAddress", reuseAddress)
.put("trafficClass", trafficClass)
.put("tcpNoDelay", tcpNoDelay)
.put("tcpKeepAlive", tcpKeepAlive)
.put("soLinger", soLinger)
.put("idleTimeout", idleTimeout)
.put("ssl", ssl)
.put("enabledCipherSuites", new JsonArray().add(enabledCipher))
.put("crlPaths", new JsonArray().add(crlPath))
.put("keyStoreOptions", new JsonObject().put("password", ksPassword).put("path", ksPath))
.put("trustStoreOptions", new JsonObject().put("password", tsPassword).put("path", tsPath))
.put("port", port)
.put("host", host)
.put("acceptBacklog", acceptBacklog)
.put("compressionSupported", compressionSupported)
.put("maxWebSocketFrameSize", maxWebSocketFrameSize)
.put("webSocketSubProtocols", wsSubProtocols)
.put("handle100ContinueAutomatically", is100ContinueHandledAutomatically)
.put("maxChunkSize", maxChunkSize)
.put("maxInitialLineLength", maxInitialLineLength)
.put("maxHeaderSize", maxHeaderSize)
.put("enabledProtocols", new JsonArray().add(enabledProtocol.name()))
.put("initialSettings", new JsonObject()
.put("pushEnabled", initialSettings.isPushEnabled())
.put("headerTableSize", initialSettings.getHeaderTableSize())
.put("maxHeaderListSize", initialSettings.getMaxHeaderListSize())
.put("maxConcurrentStreams", initialSettings.getMaxConcurrentStreams())
.put("initialWindowSize", initialSettings.getInitialWindowSize())
.put("maxFrameSize", initialSettings.getMaxFrameSize()))
.put("useAlpn", useAlpn)
.put("http2ConnectionWindowSize", http2ConnectionWindowSize)
.put(sslEngine, new JsonObject())
.put("alpnVersions", new JsonArray().add(alpnVersions.get(0).name()))
.put("openSslSessionCacheEnabled", openSslSessionCacheEnabled)
.put("decompressionSupported", decompressionSupported)
.put("acceptUnmaskedFrames", acceptUnmaskedFrames)
.put("decoderInitialBufferSize", decoderInitialBufferSize);
HttpServerOptions options = new HttpServerOptions(json);
assertEquals(sendBufferSize, options.getSendBufferSize());
assertEquals(receiverBufferSize, options.getReceiveBufferSize());
assertEquals(reuseAddress, options.isReuseAddress());
assertEquals(trafficClass, options.getTrafficClass());
assertEquals(tcpKeepAlive, options.isTcpKeepAlive());
assertEquals(tcpNoDelay, options.isTcpNoDelay());
assertEquals(soLinger, options.getSoLinger());
assertEquals(idleTimeout, options.getIdleTimeout());
assertEquals(ssl, options.isSsl());
assertNotSame(keyStoreOptions, options.getKeyCertOptions());
assertEquals(ksPassword, ((JksOptions) options.getKeyCertOptions()).getPassword());
assertEquals(ksPath, ((JksOptions) options.getKeyCertOptions()).getPath());
assertNotSame(trustStoreOptions, options.getTrustOptions());
assertEquals(tsPassword, ((JksOptions) options.getTrustOptions()).getPassword());
assertEquals(tsPath, ((JksOptions) options.getTrustOptions()).getPath());
assertEquals(1, options.getEnabledCipherSuites().size());
assertTrue(options.getEnabledCipherSuites().contains(enabledCipher));
assertEquals(1, options.getCrlPaths().size());
assertEquals(crlPath, options.getCrlPaths().get(0));
assertEquals(port, options.getPort());
assertEquals(host, options.getHost());
assertEquals(acceptBacklog, options.getAcceptBacklog());
assertEquals(compressionSupported, options.isCompressionSupported());
assertEquals(maxWebSocketFrameSize, options.getMaxWebSocketFrameSize());
assertEquals(wsSubProtocols, options.getWebSocketSubProtocols());
assertEquals(is100ContinueHandledAutomatically, options.isHandle100ContinueAutomatically());
assertEquals(maxChunkSize, options.getMaxChunkSize());
assertEquals(maxInitialLineLength, options.getMaxInitialLineLength());
assertEquals(maxHeaderSize, options.getMaxHeaderSize());
assertEquals(initialSettings, options.getInitialSettings());
assertEquals(useAlpn, options.isUseAlpn());
assertEquals(http2ConnectionWindowSize, options.getHttp2ConnectionWindowSize());
switch (sslEngine) {
case "jdkSslEngineOptions":
assertTrue(options.getSslEngineOptions() instanceof JdkSSLEngineOptions);
break;
case "openSslEngineOptions":
assertTrue(options.getSslEngineOptions() instanceof OpenSSLEngineOptions);
break;
default:
fail();
break;
}
assertEquals(alpnVersions, options.getAlpnVersions());
assertEquals(decompressionSupported, options.isDecompressionSupported());
assertEquals(acceptUnmaskedFrames, options.isAcceptUnmaskedFrames());
assertEquals(decoderInitialBufferSize, options.getDecoderInitialBufferSize());
// Test other keystore/truststore types
json.remove("keyStoreOptions");
json.remove("trustStoreOptions");
json.put("pfxKeyCertOptions", new JsonObject().put("password", ksPassword))
.put("pfxTrustOptions", new JsonObject().put("password", tsPassword));
options = new HttpServerOptions(json);
assertTrue(options.getTrustOptions() instanceof PfxOptions);
assertTrue(options.getKeyCertOptions() instanceof PfxOptions);
json.remove("pfxKeyCertOptions");
json.remove("pfxTrustOptions");
json.put("pemKeyCertOptions", new JsonObject())
.put("pemTrustOptions", new JsonObject());
options = new HttpServerOptions(json);
assertTrue(options.getTrustOptions() instanceof PemTrustOptions);
assertTrue(options.getKeyCertOptions() instanceof PemKeyCertOptions);
}
@Test
public void testCloseHandlerNotCalledWhenConnectionClosedAfterEnd() throws Exception {
testCloseHandlerNotCalledWhenConnectionClosedAfterEnd(0);
}
// Extra tests
@Test
public void testPipeliningOrder() throws Exception {
Assume.assumeFalse(TRANSPORT == Transport.IO_URING);
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setPipelining(true), new PoolOptions().setHttp1MaxSize(1));
int requests = 100;
AtomicInteger reqCount = new AtomicInteger(0);
server.requestHandler(req -> {
assertSame(Vertx.currentContext(), ((HttpServerRequestInternal)req).context());
int theCount = reqCount.get();
assertEquals(theCount, Integer.parseInt(req.headers().get("count")));
reqCount.incrementAndGet();
req.response().setChunked(true);
req.bodyHandler(buff -> {
assertEquals("This is content " + theCount, buff.toString());
// We write the response back after a random time to increase the chances of responses written in the
// wrong order if we didn't implement pipelining correctly
vertx.setTimer(1 + (long) (10 * Math.random()), id -> {
req.response().headers().set("count", String.valueOf(theCount));
req.response().write(buff);
req.response().end();
});
});
});
waitFor(requests);
startServer(testAddress);
vertx.setTimer(500, id -> {
for (int count = 0; count < requests; count++) {
int theCount = count;
client
.request(new RequestOptions(requestOptions)
.setMethod(PUT)
.putHeader("count", String.valueOf(theCount)))
.compose(req -> req
.send(Buffer.buffer("This is content " + theCount))
.expecting(that(resp -> assertEquals(theCount, Integer.parseInt(resp.headers().get("count")))))
.compose(resp -> resp
.body()
.expecting(that(buff -> assertEquals("This is content " + theCount, buff.toString())))))
.onComplete(onSuccess(v -> complete()));
}
});
await();
}
@Test
public void testPipeliningLimit() throws Exception {
int limit = 25;
int requests = limit * 4;
client.close();
client = vertx.createHttpClient(new HttpClientOptions().
setKeepAlive(true).
setPipelining(true).
setPipeliningLimit(limit), new PoolOptions().setHttp1MaxSize(1));
AtomicInteger count = new AtomicInteger();
String data = "GET /somepath HTTP/1.1\r\n" +
"host: " + config.host() + ":" + config.port() + "\r\n" +
"\r\n";
NetServer server = vertx.createNetServer(new NetServerOptions().setPort(config.port()).setHost(config.host()));
server.connectHandler(so -> {
StringBuilder total = new StringBuilder();
so.handler(buff -> {
total.append(buff);
while (total.indexOf(data) == 0) {
total.delete(0, data.length());
if (count.incrementAndGet() == limit) {
vertx.setTimer(100, timerID -> {
assertEquals(limit, count.get());
count.set(0);
for (int i = 0;i < limit;i++) {
so.write("HTTP/1.1 200 OK\r\nContent-Length : 0\r\n\r\n");
}
});
}
}
});
});
server
.listen(testAddress)
.await(20, TimeUnit.SECONDS);
AtomicInteger responses = new AtomicInteger();
for (int i = 0;i < requests;i++) {
client.request(new RequestOptions(requestOptions).setURI("/somepath"))
.compose(HttpClientRequest::send)
.onComplete(onSuccess(resp -> {
assertEquals(200, resp.statusCode());
if (responses.incrementAndGet() == requests) {
testComplete();
}
}));
}
await();
}
@Test
@Repeat(times = 10)
public void testCloseServerConnectionWithPendingMessages() throws Exception {
int n = 5;
server.requestHandler(req -> {
vertx.setTimer(100, id -> {
req.connection().close();
});
});
startServer(testAddress);
client.close();
AtomicBoolean completed = new AtomicBoolean();
client = vertx.httpClientBuilder()
.with(new HttpClientOptions().setPipelining(true))
.with(new PoolOptions().setHttp1MaxSize(n))
.withConnectHandler(conn -> {
conn.closeHandler(v -> {
if (completed.compareAndSet(false, true)) {
testComplete();
}
});
})
.build();
for (int i = 0; i < n * 2; i++) {
client.request(requestOptions)
.compose(HttpClientRequest::send)
.onComplete(onFailure(resp -> {}));
}
await();
}
@Test
public void testPipeliningFailure() throws Exception {
int n = 5;
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setPipelining(true).setPipeliningLimit(n), new PoolOptions().setHttp1MaxSize(1));
AtomicBoolean first = new AtomicBoolean(true);
CompletableFuture<Void> latch = new CompletableFuture<>();
server.requestHandler(req -> {
if (first.compareAndSet(true, false)) {
latch.whenComplete((v, err) -> {
req.connection().close();
});
} else {
req.response().end();
}
});
startServer(testAddress);
AtomicInteger succeeded = new AtomicInteger();
List<HttpClientRequest> requests = new CopyOnWriteArrayList<>();
Consumer<HttpClientRequest> checkEnd = req -> {
requests.remove(req);
if (requests.isEmpty() && succeeded.get() == n) {
testComplete();
}
};
for (int i = 0;i < n * 2;i++) {
boolean countDown = i + 1 == n;
client.request(new RequestOptions(requestOptions).setURI("/" + i)).onComplete(onSuccess(req -> {
req
.send().onComplete(ar -> {
if (ar.succeeded()) {
succeeded.incrementAndGet();
}
checkEnd.accept(req);
});
requests.add(req);
if (countDown) {
latch.complete(null);
}
}));
}
await();
}
/**
* A test that stress HTTP server pipe-lining.
*/
@Test
public void testPipelineStress() throws Exception {
// A client that will aggressively pipeline HTTP requests and close the connection abruptly after one second
| Http1xTest |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/core/session/SessionRegistryImpl.java | {
"start": 1803,
"end": 1903
} | class ____ notified of sessions that expire.
*
* @author Ben Alex
* @author Luke Taylor
*/
public | is |
java | quarkusio__quarkus | independent-projects/bootstrap/app-model/src/main/java/io/quarkus/bootstrap/workspace/DefaultSourceDir.java | {
"start": 288,
"end": 3496
} | class ____ implements SourceDir, Serializable {
private static final long serialVersionUID = 6544177650615687691L;
private final PathTree srcTree;
private final PathTree outputTree;
private final PathTree generatedSourcesTree;
private final Map<Object, Object> data;
public DefaultSourceDir(Path srcDir, Path destinationDir, Path generatedSourcesDir) {
this(srcDir, destinationDir, generatedSourcesDir, Collections.emptyMap());
}
public DefaultSourceDir(Path srcDir, Path destinationDir, Path generatedSourcesDir, Map<Object, Object> data) {
this(new DirectoryPathTree(srcDir), new DirectoryPathTree(destinationDir),
generatedSourcesDir != null ? new DirectoryPathTree(generatedSourcesDir) : null,
data);
}
public DefaultSourceDir(PathTree srcTree, PathTree outputTree, PathTree generatedSourcesTree, Map<Object, Object> data) {
this.srcTree = srcTree;
this.outputTree = outputTree;
this.generatedSourcesTree = generatedSourcesTree;
this.data = data;
}
@Override
public Path getDir() {
return srcTree.getRoots().iterator().next();
}
@Override
public PathTree getSourceTree() {
return srcTree;
}
@Override
public Path getOutputDir() {
return outputTree.getRoots().iterator().next();
}
@Override
public Path getAptSourcesDir() {
return generatedSourcesTree != null ? generatedSourcesTree.getRoots().iterator().next() : null;
}
@Override
public PathTree getOutputTree() {
return outputTree;
}
public <T> T getValue(Object key, Class<T> type) {
final Object o = data.get(key);
return o == null ? null : type.cast(o);
}
@Override
public int hashCode() {
return Objects.hash(data, outputTree, srcTree, generatedSourcesTree);
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
DefaultSourceDir other = (DefaultSourceDir) obj;
return Objects.equals(data, other.data) && Objects.equals(outputTree, other.outputTree)
&& Objects.equals(srcTree, other.srcTree)
&& Objects.equals(generatedSourcesTree, other.generatedSourcesTree);
}
@Override
public String toString() {
final StringBuilder buf = new StringBuilder();
buf.append(srcTree.getRoots()).append(" -> ").append(outputTree.getRoots());
buf.append(" generated sources: ").append(generatedSourcesTree != null ? generatedSourcesTree.getRoots() : null);
if (!data.isEmpty()) {
final Iterator<Map.Entry<Object, Object>> i = data.entrySet().iterator();
Map.Entry<Object, Object> e = i.next();
buf.append(" ").append(e.getKey()).append("=").append(e.getValue());
while (i.hasNext()) {
e = i.next();
buf.append(",").append(e.getKey()).append("=").append(e.getValue());
}
}
return buf.toString();
}
}
| DefaultSourceDir |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/internal/StatelessSessionImpl.java | {
"start": 6378,
"end": 40846
} | class ____ extends AbstractSharedSessionContract implements StatelessSessionImplementor {
public static final MultiIdLoadOptions MULTI_ID_LOAD_OPTIONS = new MultiLoadOptions();
private final LoadQueryInfluencers influencers;
private final PersistenceContext temporaryPersistenceContext;
private final boolean connectionProvided;
private final TransactionCompletionCallbacksImplementor transactionCompletionCallbacks;
private final FlushMode flushMode;
private final EventListenerGroups eventListenerGroups;
public StatelessSessionImpl(SessionFactoryImpl factory, SessionCreationOptions options) {
super( factory, options );
connectionProvided = options.getConnection() != null;
if ( options instanceof SharedSessionCreationOptions sharedOptions
&& sharedOptions.isTransactionCoordinatorShared() ) {
transactionCompletionCallbacks = sharedOptions.getTransactionCompletionCallbacks();
// // register a callback with the child session to propagate auto flushing
// transactionCompletionCallbacks.registerCallback( session -> {
// // NOTE: `session` here is the parent
// if ( !isClosed() ) {
// triggerChildAutoFlush();
// }
// } );
flushMode = FlushMode.AUTO;
}
else {
transactionCompletionCallbacks = new TransactionCompletionCallbacksImpl( this );
flushMode = FlushMode.MANUAL;
}
temporaryPersistenceContext = createPersistenceContext( this );
influencers = new LoadQueryInfluencers( getFactory() );
eventListenerGroups = factory.getEventListenerGroups();
setUpMultitenancy( factory, influencers );
// a nonzero batch size forces use of write-behind
// therefore ignore the value of hibernate.jdbc.batch_size
setJdbcBatchSize( 0 );
}
@Override
public boolean shouldAutoJoinTransaction() {
return true;
}
@Override
public FlushMode getHibernateFlushMode() {
// NOTE: only ever *not* MANUAL when this is a "child session"
return flushMode;
}
private StatisticsImplementor getStatistics() {
return getFactory().getStatistics();
}
// inserts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@Override
public Object insert(Object entity) {
return insert( null, entity );
}
@Override
public void insertMultiple(List<?> entities) {
final Integer batchSize = getJdbcBatchSize();
setJdbcBatchSize( entities.size() );
try {
for ( Object entity : entities ) {
insert( null, entity );
}
}
finally {
setJdbcBatchSize( batchSize );
}
}
@Override
public Object insert(String entityName, Object entity) {
checkOpen();
checkNotReadOnly();
final var persister = getEntityPersister( entityName, entity );
final Object id;
final Object[] state = persister.getValues( entity );
if ( persister.isVersioned() ) {
if ( seedVersion( entity, state, persister, this ) ) {
persister.setValues( entity, state );
}
}
final var generator = persister.getGenerator();
if ( generator.generatedBeforeExecution( entity, this ) ) {
if ( !generator.generatesOnInsert() ) {
throw new IdentifierGenerationException( "Identifier generator must generate on insert" );
}
final Object currentValue = generator.allowAssignedIdentifiers() ? persister.getIdentifier( entity ) : null;
id = ( (BeforeExecutionGenerator) generator ).generate( this, entity, currentValue, INSERT );
persister.setIdentifier( entity, id, this );
if ( firePreInsert(entity, id, state, persister) ) {
return id;
}
else {
getInterceptor().onInsert( entity, id, state, persister.getPropertyNames(), persister.getPropertyTypes() );
final var eventMonitor = getEventMonitor();
final var event = eventMonitor.beginEntityInsertEvent();
boolean success = false;
try {
persister.getInsertCoordinator().insert( entity, id, state, this );
success = true;
}
finally {
eventMonitor.completeEntityInsertEvent( event, id, persister.getEntityName(), success, this );
}
}
}
else if ( generator.generatedOnExecution( entity, this ) ) {
if ( !generator.generatesOnInsert() ) {
throw new IdentifierGenerationException( "Identifier generator must generate on insert" );
}
if ( firePreInsert(entity, null, state, persister) ) {
return null;
}
else {
getInterceptor().onInsert( entity, null, state, persister.getPropertyNames(), persister.getPropertyTypes() );
final var eventMonitor = getEventMonitor();
final var event = eventMonitor.beginEntityInsertEvent();
boolean success = false;
Object generatedId = null;
try {
final var generatedValues = persister.getInsertCoordinator().insert( entity, state, this );
generatedId = castNonNull( generatedValues ).getGeneratedValue( persister.getIdentifierMapping() );
id = generatedId;
success = true;
}
finally {
eventMonitor.completeEntityInsertEvent( event, generatedId, persister.getEntityName(), success, this );
}
persister.setIdentifier( entity, id, this );
}
}
else { // assigned identifier
id = persister.getIdentifier( entity, this );
if ( id == null ) {
throw new IdentifierGenerationException( "Identifier of entity '" + persister.getEntityName() + "' must be manually assigned before calling 'insert()'" );
}
if ( firePreInsert(entity, id, state, persister) ) {
return id;
}
else {
getInterceptor().onInsert( entity, id, state, persister.getPropertyNames(), persister.getPropertyTypes() );
final var eventMonitor = getEventMonitor();
final var event = eventMonitor.beginEntityInsertEvent();
boolean success = false;
try {
persister.getInsertCoordinator().insert( entity, id, state, this );
success = true;
}
finally {
eventMonitor.completeEntityInsertEvent( event, id, persister.getEntityName(), success, this );
}
}
}
recreateCollections( entity, id, persister );
firePostInsert( entity, id, state, persister );
final var statistics = getStatistics();
if ( statistics.isStatisticsEnabled() ) {
statistics.insertEntity( persister.getEntityName() );
}
return id;
}
private void recreateCollections(Object entity, Object id, EntityPersister persister) {
if ( persister.hasOwnedCollections() ) {
final String entityName = persister.getEntityName();
final var eventMonitor = getEventMonitor();
final var statistics = getStatistics();
forEachOwnedCollection( entity, id, persister,
(descriptor, collection) -> {
final String role = descriptor.getRole();
firePreRecreate( collection, id, entityName, entity );
final var event = eventMonitor.beginCollectionRecreateEvent();
boolean success = false;
try {
descriptor.recreate( collection, id, this );
success = true;
}
finally {
eventMonitor.completeCollectionRecreateEvent( event, id, role, success, this );
}
if ( statistics.isStatisticsEnabled() ) {
statistics.recreateCollection( role );
}
firePostRecreate( collection, id, entityName, entity );
} );
}
}
// deletes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@Override
public void delete(Object entity) {
delete( null, entity );
}
@Override
public void deleteMultiple(List<?> entities) {
final Integer batchSize = getJdbcBatchSize();
setJdbcBatchSize( entities.size() );
try {
for ( Object entity : entities ) {
delete( null, entity );
}
}
finally {
setJdbcBatchSize( batchSize );
}
}
@Override
public void delete(String entityName, Object entity) {
checkOpen();
checkNotReadOnly();
final var persister = getEntityPersister( entityName, entity );
final Object id = persister.getIdentifier( entity, this );
final Object version = persister.getVersion( entity );
if ( !firePreDelete(entity, id, persister) ) {
getInterceptor().onDelete( entity, id, persister.getPropertyNames(), persister.getPropertyTypes() );
removeCollections( entity, id, persister );
final Object cacheKey = lockCacheItem( id, version, persister );
final var eventMonitor = getEventMonitor();
final var event = eventMonitor.beginEntityDeleteEvent();
boolean success = false;
try {
persister.getDeleteCoordinator().delete( entity, id, version, this );
success = true;
}
finally {
eventMonitor.completeEntityDeleteEvent( event, id, persister.getEntityName(), success, this );
}
removeCacheItem( cacheKey, persister );
firePostDelete( entity, id, persister );
final var statistics = getStatistics();
if ( statistics.isStatisticsEnabled() ) {
statistics.deleteEntity( persister.getEntityName() );
}
}
}
private void removeCollections(Object entity, Object id, EntityPersister persister) {
if ( persister.hasOwnedCollections() ) {
final String entityName = persister.getEntityName();
final var eventMonitor = getEventMonitor();
final var statistics = getStatistics();
forEachOwnedCollection( entity, id, persister,
(descriptor, collection) -> {
final String role = descriptor.getRole();
firePreRemove( collection, id, entityName, entity );
final DiagnosticEvent event = eventMonitor.beginCollectionRemoveEvent();
boolean success = false;
try {
descriptor.remove( id, this );
success = true;
}
finally {
eventMonitor.completeCollectionRemoveEvent( event, id, role, success, this );
}
firePostRemove( collection, id, entityName, entity );
if ( statistics.isStatisticsEnabled() ) {
statistics.removeCollection( role );
}
} );
}
}
// updates ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@Override
public void update(Object entity) {
update( null, entity );
}
@Override
public void updateMultiple(List<?> entities) {
final Integer batchSize = getJdbcBatchSize();
setJdbcBatchSize( entities.size() );
try {
for ( Object entity : entities ) {
update( null, entity );
}
}
finally {
setJdbcBatchSize( batchSize );
}
}
@Override
public void update(String entityName, Object entity) {
checkOpen();
checkNotReadOnly();
final var persister = getEntityPersister( entityName, entity );
final Object id = persister.getIdentifier( entity, this );
final Object[] state = persister.getValues( entity );
final Object oldVersion;
if ( persister.isVersioned() ) {
oldVersion = persister.getVersion( entity );
final Object newVersion = incrementVersion( entity, oldVersion, persister, this );
setVersion( state, newVersion, persister );
persister.setValues( entity, state );
}
else {
oldVersion = null;
}
if ( !firePreUpdate(entity, id, state, persister) ) {
getInterceptor().onUpdate( entity, id, state, persister.getPropertyNames(), persister.getPropertyTypes() );
final Object cacheKey = lockCacheItem( id, oldVersion, persister );
final var eventMonitor = getEventMonitor();
final var event = eventMonitor.beginEntityUpdateEvent();
boolean success = false;
try {
persister.getUpdateCoordinator().update( entity, id, null, state, oldVersion, null, null, false, this );
success = true;
}
finally {
eventMonitor.completeEntityUpdateEvent( event, id, persister.getEntityName(), success, this );
}
removeCacheItem( cacheKey, persister );
removeAndRecreateCollections( entity, id, persister );
firePostUpdate( entity, id, state, persister );
final var statistics = getStatistics();
if ( statistics.isStatisticsEnabled() ) {
statistics.updateEntity( persister.getEntityName() );
}
}
}
private void removeAndRecreateCollections(Object entity, Object id, EntityPersister persister) {
if ( persister.hasOwnedCollections() ) {
final String entityName = persister.getEntityName();
final var eventMonitor = getEventMonitor();
final var statistics = getStatistics();
forEachOwnedCollection( entity, id, persister,
(descriptor, collection) -> {
final String role = descriptor.getRole();
firePreUpdate( collection, id, entityName, entity );
final DiagnosticEvent event = eventMonitor.beginCollectionRemoveEvent();
boolean success = false;
try {
// TODO: can we do better here?
descriptor.remove( id, this );
descriptor.recreate( collection, id, this );
success = true;
}
finally {
eventMonitor.completeCollectionRemoveEvent( event, id, role, success, this );
}
firePostUpdate( collection, id, entityName, entity );
if ( statistics.isStatisticsEnabled() ) {
statistics.updateCollection( role );
}
} );
}
}
@Override
public void upsert(Object entity) {
upsert( null, entity );
}
@Override
public void upsertMultiple(List<?> entities) {
final Integer batchSize = getJdbcBatchSize();
setJdbcBatchSize( entities.size() );
try {
for ( Object entity : entities ) {
upsert( null, entity );
}
}
finally {
setJdbcBatchSize( batchSize );
}
}
@Override
public void upsert(String entityName, Object entity) {
checkOpen();
checkNotReadOnly();
final var persister = getEntityPersister( entityName, entity );
final Object id = idToUpsert( entity, persister );
final Object[] state = persister.getValues( entity );
if ( !firePreUpsert(entity, id, state, persister) ) {
getInterceptor().onUpsert( entity, id, state, persister.getPropertyNames(), persister.getPropertyTypes() );
final Object oldVersion = versionToUpsert( entity, persister, state );
final Object cacheKey = lockCacheItem( id, oldVersion, persister );
final var eventMonitor = getEventMonitor();
final var event = eventMonitor.beginEntityUpsertEvent();
boolean success = false;
try {
persister.getMergeCoordinator().update( entity, id, null, state, oldVersion, null, null, false, this );
success = true;
}
finally {
eventMonitor.completeEntityUpsertEvent( event, id, persister.getEntityName(), success, this );
}
removeCacheItem( cacheKey, persister );
final var statistics = getStatistics();
if ( statistics.isStatisticsEnabled() ) {
statistics.upsertEntity( persister.getEntityName() );
}
removeAndRecreateCollections( entity, id, persister );
firePostUpsert(entity, id, state, persister);
}
}
// Hibernate Reactive calls this
protected Object versionToUpsert(Object entity, EntityPersister persister, Object[] state) {
if ( persister.isVersioned() ) {
final Object oldVersion = persister.getVersion( entity );
final Boolean knownTransient =
persister.getVersionMapping()
.getUnsavedStrategy()
.isUnsaved( oldVersion );
if ( knownTransient != null && knownTransient ) {
if ( seedVersion( entity, state, persister, this ) ) {
persister.setValues( entity, state );
}
// this is a nonsense but avoids setting version restriction
// parameter to null later on deep in the guts
return state[persister.getVersionPropertyIndex()];
}
else {
final Object newVersion = incrementVersion( entity, oldVersion, persister, this );
setVersion( state, newVersion, persister );
persister.setValues( entity, state );
return oldVersion;
}
}
else {
return null;
}
}
// Hibernate Reactive calls this
protected Object idToUpsert(Object entity, EntityPersister persister) {
final Object id = persister.getIdentifier( entity, this );
final Boolean unsaved =
persister.getIdentifierMapping()
.getUnsavedStrategy()
.isUnsaved( id );
if ( unsaved != null && unsaved ) {
throw new TransientObjectException( "Object passed to upsert() has an unsaved identifier value: "
+ persister.getEntityName() );
}
return id;
}
// event processing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Hibernate Reactive may need to call this
protected boolean firePreInsert(Object entity, Object id, Object[] state, EntityPersister persister) {
getFactory().getEventEngine().getCallbackRegistry().preCreate( entity );
if ( eventListenerGroups.eventListenerGroup_PRE_INSERT.isEmpty() ) {
return false;
}
else {
boolean veto = false;
final var event = new PreInsertEvent( entity, id, state, persister, this );
for ( var listener : eventListenerGroups.eventListenerGroup_PRE_INSERT.listeners() ) {
veto |= listener.onPreInsert( event );
}
return veto;
}
}
// Hibernate Reactive may need to call this
protected boolean firePreUpdate(Object entity, Object id, Object[] state, EntityPersister persister) {
getFactory().getEventEngine().getCallbackRegistry().preUpdate( entity );
if ( eventListenerGroups.eventListenerGroup_PRE_UPDATE.isEmpty() ) {
return false;
}
else {
boolean veto = false;
final var event = new PreUpdateEvent( entity, id, state, null, persister, this );
for ( var listener : eventListenerGroups.eventListenerGroup_PRE_UPDATE.listeners() ) {
veto |= listener.onPreUpdate( event );
}
return veto;
}
}
// Hibernate Reactive may need to call this
protected boolean firePreUpsert(Object entity, Object id, Object[] state, EntityPersister persister) {
if ( eventListenerGroups.eventListenerGroup_PRE_UPSERT.isEmpty() ) {
return false;
}
else {
boolean veto = false;
final var event = new PreUpsertEvent( entity, id, state, persister, this );
for ( var listener : eventListenerGroups.eventListenerGroup_PRE_UPSERT.listeners() ) {
veto |= listener.onPreUpsert( event );
}
return veto;
}
}
// Hibernate Reactive may need to call this
protected boolean firePreDelete(Object entity, Object id, EntityPersister persister) {
getFactory().getEventEngine().getCallbackRegistry().preRemove( entity );
if ( eventListenerGroups.eventListenerGroup_PRE_DELETE.isEmpty() ) {
return false;
}
else {
boolean veto = false;
final var event = new PreDeleteEvent( entity, id, null, persister, this );
for ( var listener : eventListenerGroups.eventListenerGroup_PRE_DELETE.listeners() ) {
veto |= listener.onPreDelete( event );
}
return veto;
}
}
// Hibernate Reactive may need to call this
protected void firePostInsert(Object entity, Object id, Object[] state, EntityPersister persister) {
eventListenerGroups.eventListenerGroup_POST_INSERT.fireLazyEventOnEachListener(
() -> new PostInsertEvent( entity, id, state, persister, this ),
PostInsertEventListener::onPostInsert );
}
// Hibernate Reactive may need to call this
protected void firePostUpdate(Object entity, Object id, Object[] state, EntityPersister persister) {
eventListenerGroups.eventListenerGroup_POST_UPDATE.fireLazyEventOnEachListener(
() -> new PostUpdateEvent( entity, id, state, null, null, persister, this ),
PostUpdateEventListener::onPostUpdate );
}
// Hibernate Reactive may need to call this
protected void firePostUpsert(Object entity, Object id, Object[] state, EntityPersister persister) {
eventListenerGroups.eventListenerGroup_POST_UPSERT.fireLazyEventOnEachListener(
() -> new PostUpsertEvent( entity, id, state, null, persister, this ),
PostUpsertEventListener::onPostUpsert );
}
// Hibernate Reactive may need to call this
protected void firePostDelete(Object entity, Object id, EntityPersister persister) {
eventListenerGroups.eventListenerGroup_POST_DELETE.fireLazyEventOnEachListener(
() -> new PostDeleteEvent( entity, id, null, persister, this ),
PostDeleteEventListener::onPostDelete );
}
// Hibernate Reactive may need to call this
protected void firePreRecreate(PersistentCollection<?> collection, Object id, String entityName, Object owner) {
eventListenerGroups.eventListenerGroup_PRE_COLLECTION_RECREATE.fireLazyEventOnEachListener(
() -> new PreCollectionRecreateEvent( collection, id, entityName, owner ),
PreCollectionRecreateEventListener::onPreRecreateCollection );
}
// Hibernate Reactive may need to call this
protected void firePreUpdate(PersistentCollection<?> collection, Object id, String entityName, Object owner) {
eventListenerGroups.eventListenerGroup_PRE_COLLECTION_UPDATE.fireLazyEventOnEachListener(
() -> new PreCollectionUpdateEvent( collection, id, entityName, owner ),
PreCollectionUpdateEventListener::onPreUpdateCollection );
}
// Hibernate Reactive may need to call this
protected void firePreRemove(PersistentCollection<?> collection, Object id, String entityName, Object owner) {
eventListenerGroups.eventListenerGroup_PRE_COLLECTION_REMOVE.fireLazyEventOnEachListener(
() -> new PreCollectionRemoveEvent( collection, id, entityName, owner ),
PreCollectionRemoveEventListener::onPreRemoveCollection );
}
// Hibernate Reactive may need to call this
protected void firePostRecreate(PersistentCollection<?> collection, Object id, String entityName, Object owner) {
eventListenerGroups.eventListenerGroup_POST_COLLECTION_RECREATE.fireLazyEventOnEachListener(
() -> new PostCollectionRecreateEvent( collection, id, entityName, owner ),
PostCollectionRecreateEventListener::onPostRecreateCollection );
}
// Hibernate Reactive may need to call this
protected void firePostUpdate(PersistentCollection<?> collection, Object id, String entityName, Object owner) {
eventListenerGroups.eventListenerGroup_POST_COLLECTION_UPDATE.fireLazyEventOnEachListener(
() -> new PostCollectionUpdateEvent( collection, id, entityName, owner ),
PostCollectionUpdateEventListener::onPostUpdateCollection );
}
// Hibernate Reactive may need to call this
protected void firePostRemove(PersistentCollection<?> collection, Object id, String entityName, Object owner) {
eventListenerGroups.eventListenerGroup_POST_COLLECTION_REMOVE.fireLazyEventOnEachListener(
() -> new PostCollectionRemoveEvent( collection, id, entityName, owner ),
PostCollectionRemoveEventListener::onPostRemoveCollection );
}
// collections ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Hibernate Reactive calls this
protected void forEachOwnedCollection(
Object entity, Object key,
EntityPersister persister, BiConsumer<CollectionPersister, PersistentCollection<?>> action) {
persister.visitAttributeMappings( attribute -> {
if ( attribute.isPluralAttributeMapping() ) {
final var descriptor = attribute.asPluralAttributeMapping().getCollectionDescriptor();
final Object cacheKey = lockCacheItem( key, descriptor );
if ( !descriptor.isInverse() ) {
final Object value = attribute.getPropertyAccess().getGetter().get(entity);
final PersistentCollection<?> collection;
if ( value instanceof PersistentCollection<?> persistentCollection ) {
if ( !persistentCollection.wasInitialized() ) {
return;
}
collection = persistentCollection;
}
else {
collection = wrapOrInstantiateCollection( entity, key, value, descriptor );
}
action.accept( descriptor, collection );
}
removeCacheItem( cacheKey, descriptor );
}
} );
}
private PersistentCollection<?> wrapOrInstantiateCollection(
Object entity, Object key, Object value, CollectionPersister descriptor) {
return value == null
? instantiateEmpty( key, descriptor )
: wrap( descriptor, value, entity );
}
// Hibernate Reactive calls this
protected PersistentCollection<?> instantiateEmpty(Object key, CollectionPersister descriptor) {
return descriptor.getCollectionSemantics().instantiateWrapper(key, descriptor, this);
}
@SuppressWarnings({"rawtypes", "unchecked"})
protected PersistentCollection<?> wrap(CollectionPersister descriptor, Object collection, Object owner) {
final CollectionSemantics collectionSemantics = descriptor.getCollectionSemantics();
var wrapped = collectionSemantics.wrap( collection, descriptor, this );
wrapped.setOwner( owner );
return wrapped;
}
// loading ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@Override @SuppressWarnings("unchecked")
public <T> T get(Class<T> entityClass, Object id) {
return (T) get( entityClass.getName(), id );
}
@Override @SuppressWarnings("unchecked")
public <T> T get(Class<T> entityClass, Object id, LockMode lockMode) {
return (T) get( entityClass.getName(), id, lockMode );
}
@Override
public Object get(String entityName, Object id) {
return get( entityName, id, LockMode.NONE );
}
@Override
public Object get(String entityName, Object id, LockMode lockMode) {
checkOpen();
final var persister = requireEntityPersister( entityName );
if ( persister.canReadFromCache() ) {
final Object cachedEntity =
loadFromSecondLevelCache( persister, generateEntityKey( id, persister ), null, lockMode );
if ( cachedEntity != null ) {
temporaryPersistenceContext.clear();
return cachedEntity;
}
}
final Object result = persister.load( id, null, getNullSafeLockMode( lockMode ), this );
if ( temporaryPersistenceContext.isLoadFinished() ) {
temporaryPersistenceContext.clear();
}
return result;
}
@Override
public <T> T get(EntityGraph<T> graph, Object id) {
return get( graph, GraphSemantic.LOAD , id);
}
@Override
public <T> T get(EntityGraph<T> graph, Object id, LockMode lockMode) {
return get( graph, GraphSemantic.LOAD, id, lockMode);
}
@Override
public <T> T get(EntityGraph<T> graph, GraphSemantic graphSemantic, Object id) {
return get( graph, graphSemantic, id, LockMode.NONE );
}
@Override @SuppressWarnings("unchecked")
public <T> T get(
EntityGraph<T> graph, GraphSemantic graphSemantic,
Object id, LockMode lockMode) {
final var rootGraph = (RootGraphImplementor<T>) graph;
checkOpen();
final var effectiveEntityGraph = getLoadQueryInfluencers().getEffectiveEntityGraph();
effectiveEntityGraph.applyGraph( rootGraph, graphSemantic );
try {
return (T) get( rootGraph.getGraphedType().getTypeName(), id, lockMode );
}
finally {
effectiveEntityGraph.clear();
}
}
@Override
public <T> List<T> getMultiple(Class<T> entityClass, List<?> ids, LockMode lockMode) {
for ( Object id : ids ) {
if ( id == null ) {
throw new IllegalArgumentException( "Null id" );
}
}
final var persister = requireEntityPersister( entityClass.getName() );
final var results = persister.multiLoad( ids.toArray(), this, new MultiLoadOptions(lockMode) );
//noinspection unchecked
return (List<T>) results;
}
@Override
public <T> List<T> getMultiple(EntityGraph<T> entityGraph, List<?> ids) {
return getMultiple( entityGraph, GraphSemantic.LOAD, ids );
}
@Override
public <T> List<T> getMultiple(EntityGraph<T> entityGraph, GraphSemantic graphSemantic, List<?> ids) {
for ( Object id : ids ) {
if ( id == null ) {
throw new IllegalArgumentException( "Null id" );
}
}
final var rootGraph = (RootGraphImplementor<T>) entityGraph;
final var effectiveEntityGraph = getLoadQueryInfluencers().getEffectiveEntityGraph();
effectiveEntityGraph.applyGraph( rootGraph, graphSemantic );
try {
final var persister = requireEntityPersister( rootGraph.getGraphedType().getTypeName() );
final var results = persister.multiLoad( ids.toArray(), this, MULTI_ID_LOAD_OPTIONS );
//noinspection unchecked
return (List<T>) results;
}
finally {
effectiveEntityGraph.clear();
}
}
@Override
public <T> List<T> getMultiple(Class<T> entityClass, List<?> ids) {
for ( Object id : ids ) {
if ( id == null ) {
throw new IllegalArgumentException("Null id");
}
}
final var persister = requireEntityPersister( entityClass.getName() );
final var results = persister.multiLoad( ids.toArray(), this, MULTI_ID_LOAD_OPTIONS );
//noinspection unchecked
return (List<T>) results;
// final List<Object> uncachedIds;
// final List<T> list = new ArrayList<>( ids.size() );
// if ( persister.canReadFromCache() ) {
// uncachedIds = new ArrayList<>( ids.size() );
// for (Object id : ids) {
// final Object cachedEntity =
// loadFromSecondLevelCache( persister, generateEntityKey( id, persister ), null, LockMode.NONE );
// if ( cachedEntity == null ) {
// uncachedIds.add( id );
// list.add( null );
// }
// else {
// //noinspection unchecked
// list.add( (T) cachedEntity );
// }
// }
// }
// else {
// uncachedIds = unmodifiableList(ids);
// for (int i = 0; i < ids.size(); i++) {
// list.add( null );
// }
// }
//
// final JpaCriteriaQuery<T> query = getCriteriaBuilder().createQuery(entityClass);
// final JpaRoot<T> from = query.from(entityClass);
// query.where( from.get( persister.getIdentifierPropertyName() ).in(uncachedIds) );
// final List<T> resultList = createSelectionQuery(query).getResultList();
// for (int i = 0; i < ids.size(); i++) {
// if ( list.get(i) == null ) {
// final Object id = ids.get(i);
// list.set( i, resultList.stream()
// .filter( entity -> entity != null && persister.getIdentifier( entity, this ).equals(id) )
// .findFirst().orElse( null ) );
// }
// }
// return list;
}
@Override
public void refresh(Object entity) {
refresh( bestGuessEntityName( entity ), entity, LockMode.NONE );
}
@Override
public void refresh(String entityName, Object entity) {
refresh( entityName, entity, LockMode.NONE );
}
@Override
public void refresh(Object entity, LockMode lockMode) {
refresh( bestGuessEntityName( entity ), entity, lockMode );
}
@Override
public void refresh(String entityName, Object entity, LockMode lockMode) {
checkOpen();
final var persister = getEntityPersister( entityName, entity );
final Object id = persister.getIdentifier( entity, this );
if ( SESSION_LOGGER.isTraceEnabled() ) {
SESSION_LOGGER.refreshingTransient( infoString( persister, id, getFactory() ) );
}
if ( persister.canWriteToCache() ) {
final var cacheAccess = persister.getCacheAccessStrategy();
if ( cacheAccess != null ) {
final Object cacheKey = cacheAccess.generateCacheKey(
id,
persister,
getFactory(),
getTenantIdentifier()
);
cacheAccess.evict( cacheKey );
}
}
final Object result =
getLoadQueryInfluencers()
.fromInternalFetchProfile( CascadingFetchProfile.REFRESH,
() -> persister.load( id, entity, getNullSafeLockMode( lockMode ), this ) );
UnresolvableObjectException.throwIfNull( result, id, persister.getEntityName() );
if ( temporaryPersistenceContext.isLoadFinished() ) {
temporaryPersistenceContext.clear();
}
}
@Override
public Object immediateLoad(String entityName, Object id) {
if ( getPersistenceContextInternal().isLoadFinished() ) {
throw new SessionException( "proxies cannot be fetched by a stateless session" );
}
// unless we are still in the process of handling a top-level load
return get( entityName, id );
}
@Override
public void initializeCollection(PersistentCollection<?> collection, boolean writing) {
checkOpen();
final var persistenceContext = getPersistenceContextInternal();
final var ce = persistenceContext.getCollectionEntry( collection );
if ( ce == null ) {
throw new HibernateException( "no entry for collection" );
}
if ( !collection.wasInitialized() ) {
final var loadedPersister = ce.getLoadedPersister();
final Object loadedKey = ce.getLoadedKey();
if ( SESSION_LOGGER.isTraceEnabled() ) {
SESSION_LOGGER.initializingCollection(
collectionInfoString( loadedPersister, collection, loadedKey, this ) );
}
final boolean foundInCache =
initializeCollectionFromCache( loadedKey, loadedPersister, collection, this );
if ( foundInCache ) {
SESSION_LOGGER.collectionInitializedFromCache();
}
else {
loadedPersister.initialize( loadedKey, this );
handlePotentiallyEmptyCollection( collection, persistenceContext, loadedKey, loadedPersister );
SESSION_LOGGER.collectionInitialized();
final var statistics = getStatistics();
if ( statistics.isStatisticsEnabled() ) {
statistics.fetchCollection( loadedPersister.getRole() );
}
}
}
}
@Override @Deprecated
public Object instantiate(String entityName, Object id) {
return instantiate( requireEntityPersister( entityName ), id );
}
@Override
public Object instantiate(EntityPersister persister, Object id) {
checkOpen();
return persister.instantiate( id, this );
}
@Override
public Object internalLoad(
String entityName,
Object id,
boolean eager,
boolean nullable) {
checkOpen();
final var persister = requireEntityPersister( entityName );
final var entityKey = generateEntityKey( id, persister );
// first, try to load it from the temp PC associated to this SS
final var persistenceContext = getPersistenceContext();
final var holder = persistenceContext.getEntityHolder( entityKey );
if ( holder != null && holder.getEntity() != null ) {
// we found it in the temp PC. Should indicate we are in the midst of processing a result set
// containing eager fetches via join fetch
return holder.getEntity();
}
if ( !eager ) {
// caller did not request forceful eager loading, see if we can create
// some form of proxy
// first, check to see if we can use "bytecode proxies"
final var enhancementMetadata = persister.getBytecodeEnhancementMetadata();
if ( enhancementMetadata.isEnhancedForLazyLoading() ) {
// if the entity defines a HibernateProxy factory, see if there is an
// existing proxy associated with the PC - and if so, use it
if ( persister.getRepresentationStrategy().getProxyFactory() != null ) {
final Object proxy = holder == null ? null : holder.getProxy();
if ( proxy != null ) {
SESSION_LOGGER.entityProxyFoundInSessionCache();
if ( SESSION_LOGGER.isDebugEnabled() && extractLazyInitializer( proxy ).isUnwrap() ) {
SESSION_LOGGER.ignoringNoProxyToHonorLaziness();
}
return persistenceContext.narrowProxy( proxy, persister, entityKey, null );
}
// specialized handling for entities with subclasses with a HibernateProxy factory
if ( persister.hasSubclasses() ) {
// entities with subclasses that define a ProxyFactory can create a HibernateProxy.
SESSION_LOGGER.creatingHibernateProxyToHonorLaziness();
return createProxy( entityKey );
}
return enhancementMetadata.createEnhancedProxy( entityKey, false, this );
}
else if ( !persister.hasSubclasses() ) {
return enhancementMetadata.createEnhancedProxy( entityKey, false, this );
}
// If we get here, then the entity | StatelessSessionImpl |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/script/field/CartesianPointDocValuesField.java | {
"start": 3159,
"end": 5881
} | class ____ extends CartesianGeometry<CartesianPoint> {
private final GeometrySupplier<CartesianPoint, CartesianPoint> geometrySupplier;
public CartesianPointScriptValues(GeometrySupplier<CartesianPoint, CartesianPoint> supplier) {
super(supplier);
geometrySupplier = supplier;
}
public CartesianPoint getValue() {
return get(0);
}
public double getX() {
return getValue().getX();
}
public double[] getXs() {
double[] xs = new double[size()];
for (int i = 0; i < size(); i++) {
xs[i] = get(i).getX();
}
return xs;
}
public double[] getYs() {
double[] ys = new double[size()];
for (int i = 0; i < size(); i++) {
ys[i] = get(i).getY();
}
return ys;
}
public double getY() {
return getValue().getY();
}
@Override
public CartesianPoint get(int index) {
if (supplier.size() == 0) {
throw new IllegalStateException(
"A document doesn't have a value for a field! "
+ "Use doc[<field>].size()==0 to check if a document is missing a field!"
);
}
final CartesianPoint point = supplier.getInternal(index);
return new CartesianPoint(point.getX(), point.getY());
}
@Override
public int size() {
return supplier.size();
}
public double planeDistance(double x, double y) {
CartesianPoint point = getValue();
double dx = x - point.getX();
double dy = y - point.getY();
return Math.sqrt(dx * dx + dy * dy);
}
public double planeDistanceWithDefault(double lat, double lon, double defaultValue) {
if (isEmpty()) {
return defaultValue;
}
return planeDistance(lat, lon);
}
@Override
public int getDimensionalType() {
return size() == 0 ? -1 : 0;
}
@Override
public CartesianPoint getCentroid() {
return size() == 0 ? null : geometrySupplier.getInternalCentroid();
}
@Override
public CartesianBoundingBox getBoundingBox() {
return size() == 0 ? null : (CartesianBoundingBox) geometrySupplier.getInternalBoundingBox();
}
@Override
public CartesianPoint getLabelPosition() {
return size() == 0 ? null : geometrySupplier.getInternalLabelPosition();
}
}
}
| CartesianPointScriptValues |
java | jhy__jsoup | src/main/java/org/jsoup/parser/XmlTreeBuilder.java | {
"start": 1097,
"end": 10232
} | class ____ extends TreeBuilder {
static final String XmlnsKey = "xmlns";
static final String XmlnsPrefix = "xmlns:";
private final ArrayDeque<HashMap<String, String>> namespacesStack = new ArrayDeque<>(); // stack of namespaces, prefix => urn
@Override ParseSettings defaultSettings() {
return ParseSettings.preserveCase;
}
@Override
protected void initialiseParse(Reader input, String baseUri, Parser parser) {
super.initialiseParse(input, baseUri, parser);
doc.outputSettings()
.syntax(Document.OutputSettings.Syntax.xml)
.escapeMode(Entities.EscapeMode.xhtml)
.prettyPrint(false); // as XML, we don't understand what whitespace is significant or not
namespacesStack.clear();
HashMap<String, String> ns = new HashMap<>();
ns.put("xml", NamespaceXml);
ns.put("", NamespaceXml);
namespacesStack.push(ns);
}
@Override
void initialiseParseFragment(@Nullable Element context) {
super.initialiseParseFragment(context);
if (context == null) return;
// transition to the tag's text state if available
TokeniserState textState = context.tag().textState();
if (textState != null) tokeniser.transition(textState);
// reconstitute the namespace stack by traversing the element and its parents (top down)
Elements chain = context.parents();
chain.add(0, context);
for (int i = chain.size() - 1; i >= 0; i--) {
Element el = chain.get(i);
HashMap<String, String> namespaces = new HashMap<>(namespacesStack.peek());
namespacesStack.push(namespaces);
if (el.attributesSize() > 0) {
processNamespaces(el.attributes(), namespaces);
}
}
}
Document parse(Reader input, String baseUri) {
return parse(input, baseUri, new Parser(this));
}
Document parse(String input, String baseUri) {
return parse(new StringReader(input), baseUri, new Parser(this));
}
@Override List<Node> completeParseFragment() {
return doc.childNodes();
}
@Override
XmlTreeBuilder newInstance() {
return new XmlTreeBuilder();
}
@Override public String defaultNamespace() {
return NamespaceXml;
}
@Override
TagSet defaultTagSet() {
return new TagSet(); // an empty tagset
}
@Override
int defaultMaxDepth() {
return Integer.MAX_VALUE;
}
@Override
protected boolean process(Token token) {
currentToken = token;
// start tag, end tag, doctype, xmldecl, comment, character, eof
switch (token.type) {
case StartTag:
insertElementFor(token.asStartTag());
break;
case EndTag:
popStackToClose(token.asEndTag());
break;
case Comment:
insertCommentFor(token.asComment());
break;
case Character:
insertCharacterFor(token.asCharacter());
break;
case Doctype:
insertDoctypeFor(token.asDoctype());
break;
case XmlDecl:
insertXmlDeclarationFor(token.asXmlDecl());
break;
case EOF: // could put some normalisation here if desired
break;
default:
Validate.fail("Unexpected token type: " + token.type);
}
return true;
}
void insertElementFor(Token.StartTag startTag) {
// handle namespace for tag
HashMap<String, String> namespaces = new HashMap<>(namespacesStack.peek());
namespacesStack.push(namespaces);
Attributes attributes = startTag.attributes;
if (attributes != null) {
settings.normalizeAttributes(attributes);
attributes.deduplicate(settings);
processNamespaces(attributes, namespaces);
applyNamespacesToAttributes(attributes, namespaces);
}
enforceStackDepthLimit();
String tagName = startTag.tagName.value();
String ns = resolveNamespace(tagName, namespaces);
Tag tag = tagFor(tagName, startTag.normalName, ns, settings);
Element el = new Element(tag, null, attributes);
currentElement().appendChild(el);
push(el);
if (startTag.isSelfClosing()) {
tag.setSeenSelfClose();
pop(); // push & pop ensures onNodeInserted & onNodeClosed
} else if (tag.isEmpty()) {
pop(); // custom defined void tag
} else {
TokeniserState textState = tag.textState();
if (textState != null) tokeniser.transition(textState);
}
}
private static void processNamespaces(Attributes attributes, HashMap<String, String> namespaces) {
// process attributes for namespaces (xmlns, xmlns:)
for (Attribute attr : attributes) {
String key = attr.getKey();
String value = attr.getValue();
if (key.equals(XmlnsKey)) {
namespaces.put("", value); // new default for this level
} else if (key.startsWith(XmlnsPrefix)) {
String nsPrefix = key.substring(XmlnsPrefix.length());
namespaces.put(nsPrefix, value);
}
}
}
private static void applyNamespacesToAttributes(Attributes attributes, HashMap<String, String> namespaces) {
// second pass, apply namespace to attributes. Collects them first then adds (as userData is an attribute)
Map<String, String> attrPrefix = new HashMap<>();
for (Attribute attr: attributes) {
String prefix = attr.prefix();
if (!prefix.isEmpty()) {
if (prefix.equals(XmlnsKey)) continue;
String ns = namespaces.get(prefix);
if (ns != null) attrPrefix.put(SharedConstants.XmlnsAttr + prefix, ns);
}
}
for (Map.Entry<String, String> entry : attrPrefix.entrySet())
attributes.userData(entry.getKey(), entry.getValue());
}
private static String resolveNamespace(String tagName, HashMap<String, String> namespaces) {
String ns = namespaces.get("");
int pos = tagName.indexOf(':');
if (pos > 0) {
String prefix = tagName.substring(0, pos);
if (namespaces.containsKey(prefix))
ns = namespaces.get(prefix);
}
return ns;
}
void insertLeafNode(LeafNode node) {
currentElement().appendChild(node);
onNodeInserted(node);
}
void insertCommentFor(Token.Comment commentToken) {
Comment comment = new Comment(commentToken.getData());
insertLeafNode(comment);
}
void insertCharacterFor(Token.Character token) {
final String data = token.getData();
LeafNode node;
if (token.isCData()) node = new CDataNode(data);
else if (currentElement().tag().is(Tag.Data)) node = new DataNode(data);
else node = new TextNode(data);
insertLeafNode(node);
}
void insertDoctypeFor(Token.Doctype token) {
DocumentType doctypeNode = new DocumentType(settings.normalizeTag(token.getName()), token.getPublicIdentifier(), token.getSystemIdentifier());
doctypeNode.setPubSysKey(token.getPubSysKey());
insertLeafNode(doctypeNode);
}
void insertXmlDeclarationFor(Token.XmlDecl token) {
XmlDeclaration decl = new XmlDeclaration(token.name(), token.isDeclaration);
if (token.attributes != null) decl.attributes().addAll(token.attributes);
insertLeafNode(decl);
}
@Override
Element pop() {
namespacesStack.pop();
return super.pop();
}
/**
* If the stack contains an element with this tag's name, pop up the stack to remove the first occurrence. If not
* found, skips.
*
* @param endTag tag to close
*/
protected void popStackToClose(Token.EndTag endTag) {
// like in HtmlTreeBuilder - don't scan up forever for very (artificially) deeply nested stacks
String elName = settings.normalizeTag(endTag.name());
Element firstFound = null;
final int bottom = stack.size() - 1;
final int upper = bottom >= maxQueueDepth ? bottom - maxQueueDepth : 0;
for (int pos = stack.size() -1; pos >= upper; pos--) {
Element next = stack.get(pos);
if (next.nodeName().equals(elName)) {
firstFound = next;
break;
}
}
if (firstFound == null)
return; // not found, skip
for (int pos = stack.size() -1; pos >= 0; pos--) {
Element next = pop();
if (next == firstFound) {
break;
}
}
}
private static final int maxQueueDepth = 256; // an arbitrary tension point between real XML and crafted pain
}
| XmlTreeBuilder |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/maps/Maps_assertAnySatisfyingConsumer_Test.java | {
"start": 1879,
"end": 4711
} | class ____ extends MapsBaseTest {
private Map<String, Player> greatPlayers;
@Mock
private BiConsumer<String, Player> consumer;
@Override
@BeforeEach
public void setUp() {
super.setUp();
greatPlayers = mapOf(entry("Bulls", jordan), entry("Spurs", duncan), entry("Lakers", magic));
}
@Test
void must_not_check_all_entries() {
// GIVEN
assertThat(greatPlayers).hasSizeGreaterThan(2); // This test requires a map with size > 2
// first entry does not match -> assertion error, 2nd entry does match -> doNothing()
doThrow(new AssertionError("some error message")).doNothing().when(consumer).accept(anyString(), any(Player.class));
// WHEN
maps.assertAnySatisfy(INFO, greatPlayers, consumer);
// THEN
// make sure that we only evaluated 2 out of 3 entries
verify(consumer, times(2)).accept(anyString(), any(Player.class));
}
@Test
void should_pass_if_one_entry_satisfies_the_given_requirements() {
maps.assertAnySatisfy(INFO, greatPlayers, (team, player) -> {
assertThat(team).isEqualTo("Lakers");
assertThat(player.getPointsPerGame()).isGreaterThan(18);
});
}
@Test
void should_fail_if_the_map_under_test_is_empty_whatever_the_assertions_requirements_are() {
// GIVEN
actual.clear();
// WHEN
var error = expectAssertionError(() -> maps.assertAnySatisfy(INFO, actual, ($1, $2) -> assertThat(true).isTrue()));
// THEN
then(error).hasMessage(elementsShouldSatisfyAny(actual, emptyList(), INFO).create());
}
@Test
void should_fail_if_no_entry_satisfies_the_given_requirements() {
// WHEN
BiConsumer<String, String> requirements = ($1, $2) -> assertThat(true).isFalse();
var error = expectAssertionError(() -> maps.assertAnySatisfy(INFO, actual, requirements));
// THEN
// can't build the exact error message due to internal stack traces
then(error).hasMessageStartingWith(format("%n" +
"Expecting any element of:%n" +
" %s%n" +
"to satisfy the given assertions requirements but none did:%n%n",
info.representation().toStringOf(actual)));
}
@Test
void should_fail_if_actual_is_null() {
// WHEN
var error = expectAssertionError(() -> maps.assertAnySatisfy(INFO, null, (team, player) -> {}));
// THEN
then(error).hasMessage(actualIsNull());
}
@Test
void should_fail_if_given_requirements_are_null() {
assertThatNullPointerException().isThrownBy(() -> maps.assertAnySatisfy(INFO, greatPlayers, null))
.withMessage("The BiConsumer<K, V> expressing the assertions requirements must not be null");
}
}
| Maps_assertAnySatisfyingConsumer_Test |
java | google__guava | android/guava/src/com/google/common/hash/Fingerprint2011.java | {
"start": 1128,
"end": 6645
} | class ____ extends AbstractNonStreamingHashFunction {
static final HashFunction FINGERPRINT_2011 = new Fingerprint2011();
// Some primes between 2^63 and 2^64 for various uses.
private static final long K0 = 0xa5b85c5e198ed849L;
private static final long K1 = 0x8d58ac26afe12e47L;
private static final long K2 = 0xc47b6e9e3a970ed3L;
private static final long K3 = 0xc6a4a7935bd1e995L;
@Override
public HashCode hashBytes(byte[] input, int off, int len) {
checkPositionIndexes(off, off + len, input.length);
return HashCode.fromLong(fingerprint(input, off, len));
}
@Override
public int bits() {
return 64;
}
@Override
public String toString() {
return "Hashing.fingerprint2011()";
}
// End of public functions.
@VisibleForTesting
static long fingerprint(byte[] bytes, int offset, int length) {
long result;
if (length <= 32) {
result = murmurHash64WithSeed(bytes, offset, length, K0 ^ K1 ^ K2);
} else if (length <= 64) {
result = hashLength33To64(bytes, offset, length);
} else {
result = fullFingerprint(bytes, offset, length);
}
long u = length >= 8 ? load64(bytes, offset) : K0;
long v = length >= 9 ? load64(bytes, offset + length - 8) : K0;
result = hash128to64(result + v, u);
return result == 0 || result == 1 ? result + ~1 : result;
}
private static long shiftMix(long val) {
return val ^ (val >>> 47);
}
/** Implementation of Hash128to64 from util/hash/hash128to64.h */
@VisibleForTesting
static long hash128to64(long high, long low) {
long a = (low ^ high) * K3;
a ^= a >>> 47;
long b = (high ^ a) * K3;
b ^= b >>> 47;
b *= K3;
return b;
}
/**
* Computes intermediate hash of 32 bytes of byte array from the given offset. Results are
* returned in the output array - this is 12% faster than allocating new arrays every time.
*/
private static void weakHashLength32WithSeeds(
byte[] bytes, int offset, long seedA, long seedB, long[] output) {
long part1 = load64(bytes, offset);
long part2 = load64(bytes, offset + 8);
long part3 = load64(bytes, offset + 16);
long part4 = load64(bytes, offset + 24);
seedA += part1;
seedB = rotateRight(seedB + seedA + part4, 51);
long c = seedA;
seedA += part2;
seedA += part3;
seedB += rotateRight(seedA, 23);
output[0] = seedA + part4;
output[1] = seedB + c;
}
/*
* Compute an 8-byte hash of a byte array of length greater than 64 bytes.
*/
private static long fullFingerprint(byte[] bytes, int offset, int length) {
// For lengths over 64 bytes we hash the end first, and then as we
// loop we keep 56 bytes of state: v, w, x, y, and z.
long x = load64(bytes, offset);
long y = load64(bytes, offset + length - 16) ^ K1;
long z = load64(bytes, offset + length - 56) ^ K0;
long[] v = new long[2];
long[] w = new long[2];
weakHashLength32WithSeeds(bytes, offset + length - 64, length, y, v);
weakHashLength32WithSeeds(bytes, offset + length - 32, length * K1, K0, w);
z += shiftMix(v[1]) * K1;
x = rotateRight(z + x, 39) * K1;
y = rotateRight(y, 33) * K1;
// Decrease length to the nearest multiple of 64, and operate on 64-byte chunks.
length = (length - 1) & ~63;
do {
x = rotateRight(x + y + v[0] + load64(bytes, offset + 16), 37) * K1;
y = rotateRight(y + v[1] + load64(bytes, offset + 48), 42) * K1;
x ^= w[1];
y ^= v[0];
z = rotateRight(z ^ w[0], 33);
weakHashLength32WithSeeds(bytes, offset, v[1] * K1, x + w[0], v);
weakHashLength32WithSeeds(bytes, offset + 32, z + w[1], y, w);
long tmp = z;
z = x;
x = tmp;
offset += 64;
length -= 64;
} while (length != 0);
return hash128to64(hash128to64(v[0], w[0]) + shiftMix(y) * K1 + z, hash128to64(v[1], w[1]) + x);
}
private static long hashLength33To64(byte[] bytes, int offset, int length) {
long z = load64(bytes, offset + 24);
long a = load64(bytes, offset) + (length + load64(bytes, offset + length - 16)) * K0;
long b = rotateRight(a + z, 52);
long c = rotateRight(a, 37);
a += load64(bytes, offset + 8);
c += rotateRight(a, 7);
a += load64(bytes, offset + 16);
long vf = a + z;
long vs = b + rotateRight(a, 31) + c;
a = load64(bytes, offset + 16) + load64(bytes, offset + length - 32);
z = load64(bytes, offset + length - 8);
b = rotateRight(a + z, 52);
c = rotateRight(a, 37);
a += load64(bytes, offset + length - 24);
c += rotateRight(a, 7);
a += load64(bytes, offset + length - 16);
long wf = a + z;
long ws = b + rotateRight(a, 31) + c;
long r = shiftMix((vf + ws) * K2 + (wf + vs) * K0);
return shiftMix(r * K0 + vs) * K2;
}
@VisibleForTesting
static long murmurHash64WithSeed(byte[] bytes, int offset, int length, long seed) {
long mul = K3;
int topBit = 0x7;
int lengthAligned = length & ~topBit;
int lengthRemainder = length & topBit;
long hash = seed ^ (length * mul);
for (int i = 0; i < lengthAligned; i += 8) {
long loaded = load64(bytes, offset + i);
long data = shiftMix(loaded * mul) * mul;
hash ^= data;
hash *= mul;
}
if (lengthRemainder != 0) {
long data = load64Safely(bytes, offset + lengthAligned, lengthRemainder);
hash ^= data;
hash *= mul;
}
hash = shiftMix(hash) * mul;
hash = shiftMix(hash);
return hash;
}
}
| Fingerprint2011 |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/cglib/core/ClassesKey.java | {
"start": 764,
"end": 1471
} | interface ____ {
Object newInstance(Object[] array);
}
private ClassesKey() {
}
public static Object create(Object[] array) {
return FACTORY.newInstance(classNames(array));
}
private static String[] classNames(Object[] objects) {
if (objects == null) {
return null;
}
String[] classNames = new String[objects.length];
for (int i = 0; i < objects.length; i++) {
Object object = objects[i];
if (object != null) {
Class<?> aClass = object.getClass();
classNames[i] = aClass == null ? null : aClass.getName();
}
}
return classNames;
}
}
| Key |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogInitializer.java | {
"start": 1158,
"end": 1211
} | class ____ setting Kerberos configuration.
*/
public | for |
java | spring-projects__spring-framework | spring-core/src/testFixtures/java/org/springframework/core/testfixture/io/buffer/LeakAwareDataBufferFactory.java | {
"start": 1344,
"end": 1744
} | interface ____ keeps track of
* memory leaks.
* <p>Useful for unit tests that handle data buffers. Simply inherit from
* {@link AbstractLeakCheckingTests} or call {@link #checkForLeaks()} in
* a JUnit <em>after</em> method yourself, and any buffers that have not been
* released will result in an {@link AssertionError}.
*
* @author Arjen Poutsma
* @see LeakAwareDataBufferFactory
*/
public | that |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/authentication/jaas/JaasAuthenticationTokenTests.java | {
"start": 959,
"end": 2031
} | class ____ {
@Test
void toBuilderWhenApplyThenCopies() {
JaasAuthenticationToken factorOne = new JaasAuthenticationToken("alice", "pass",
AuthorityUtils.createAuthorityList("FACTOR_ONE"), mock(LoginContext.class));
JaasAuthenticationToken factorTwo = new JaasAuthenticationToken("bob", "ssap",
AuthorityUtils.createAuthorityList("FACTOR_TWO"), mock(LoginContext.class));
JaasAuthenticationToken result = factorOne.toBuilder()
.authorities((a) -> a.addAll(factorTwo.getAuthorities()))
.principal(factorTwo.getPrincipal())
.credentials(factorTwo.getCredentials())
.loginContext(factorTwo.getLoginContext())
.build();
Set<String> authorities = AuthorityUtils.authorityListToSet(result.getAuthorities());
assertThat(result.getPrincipal()).isSameAs(factorTwo.getPrincipal());
assertThat(result.getCredentials()).isSameAs(factorTwo.getCredentials());
assertThat(result.getLoginContext()).isSameAs(factorTwo.getLoginContext());
assertThat(authorities).containsExactlyInAnyOrder("FACTOR_ONE", "FACTOR_TWO");
}
}
| JaasAuthenticationTokenTests |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractCreate.java | {
"start": 1055,
"end": 1268
} | class ____ extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);
}
}
| ITestAzureNativeContractCreate |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/mom/JmsAppender.java | {
"start": 2345,
"end": 2409
} | class ____ extends AbstractAppender {
public static | JmsAppender |
java | google__guava | android/guava/src/com/google/common/collect/TransformedIterator.java | {
"start": 1081,
"end": 1721
} | class ____<F extends @Nullable Object, T extends @Nullable Object>
implements Iterator<T> {
final Iterator<? extends F> backingIterator;
TransformedIterator(Iterator<? extends F> backingIterator) {
this.backingIterator = checkNotNull(backingIterator);
}
@ParametricNullness
abstract T transform(@ParametricNullness F from);
@Override
public final boolean hasNext() {
return backingIterator.hasNext();
}
@Override
@ParametricNullness
public final T next() {
return transform(backingIterator.next());
}
@Override
public final void remove() {
backingIterator.remove();
}
}
| TransformedIterator |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/queue/QueueRemoveParams.java | {
"start": 690,
"end": 952
} | class ____ extends BaseSyncParams<QueueRemoveArgs> implements QueueRemoveArgs {
private final String[] ids;
public QueueRemoveParams(String[] ids) {
this.ids = ids;
}
public String[] getIds() {
return ids;
}
}
| QueueRemoveParams |
java | google__dagger | javatests/dagger/internal/codegen/InjectConstructorFactoryGeneratorTest.java | {
"start": 35365,
"end": 36037
} | class ____ {",
" @Inject private String s;",
"}");
daggerCompiler(file)
.withProcessingOptions(ImmutableMap.of("dagger.privateMemberValidation", "WARNING"))
.compile(
subject -> {
subject.hasErrorCount(0);
// TODO: Verify warning message when supported
// subject.hasWarningCount(1);
});
}
@Test public void staticInjectFieldError() {
Source file =
CompilerTests.javaSource(
"test.StaticInjectField",
"package test;",
"",
"import javax.inject.Inject;",
"",
" | PrivateInjectField |
java | elastic__elasticsearch | modules/lang-painless/src/test/java/org/elasticsearch/painless/ErrorCauseWrapperTests.java | {
"start": 1040,
"end": 2721
} | class ____ extends ESTestCase {
ErrorCauseWrapper assertWraps(Error realError) {
var e = ErrorCauseWrapper.maybeWrap(realError);
assertThat(e.getCause(), nullValue());
assertThat(e, instanceOf(ErrorCauseWrapper.class));
var wrapper = (ErrorCauseWrapper) e;
assertThat(wrapper.realCause, is(realError));
return wrapper;
}
public void testOutOfMemoryError() {
assertWraps(new OutOfMemoryError("oom"));
}
public void testStackOverflowError() {
assertWraps(new StackOverflowError("soe"));
}
public void testLinkageError() {
assertWraps(new LinkageError("le"));
}
public void testPainlessError() {
assertWraps(new PainlessError("pe"));
}
public void testNotWrapped() {
var realError = new AssertionError("not wrapped");
var e = ErrorCauseWrapper.maybeWrap(realError);
assertThat(e, is(realError));
}
public void testXContent() throws IOException {
var e = assertWraps(new PainlessError("some error"));
var output = new ByteArrayOutputStream();
var builder = XContentFactory.jsonBuilder(output);
builder.startObject();
e.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
builder.flush();
try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, output.toByteArray())) {
Map<String, String> content = parser.mapStrings();
assertThat(content, hasEntry("type", "painless_error"));
assertThat(content, hasEntry("reason", "some error"));
}
}
}
| ErrorCauseWrapperTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/TimeUnitMismatchTest.java | {
"start": 1326,
"end": 2091
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(TimeUnitMismatch.class, getClass());
private final BugCheckerRefactoringTestHelper refactoringHelper =
BugCheckerRefactoringTestHelper.newInstance(TimeUnitMismatch.class, getClass());
@Test
public void testPositiveCase() {
compilationHelper
.addSourceLines(
"TimeUnitMismatchPositiveCases.java",
"""
package com.google.errorprone.bugpatterns.time.testdata;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
import java.util.Optional;
/**
* @author cpovirk@google.com (Chris Povirk)
*/
public | TimeUnitMismatchTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/operators/windowing/AsyncWindowOperator.java | {
"start": 25322,
"end": 25426
} | class ____ extends DefaultKeyedStateStore {
// we have this in the base | AbstractPerWindowStateStore |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/CoordinatorEventsToStreamOperatorRecipientExactlyOnceITCase.java | {
"start": 4151,
"end": 4401
} | class ____
* verifies situations when the tested operators are not sources, which means when checkpoint
* barriers are injected into sources, these operators may not have started checkpoint yet.
*
* <h2>Unaligned checkpoint</h2>
*
* <p>This | further |
java | google__guava | android/guava/src/com/google/common/collect/StandardTable.java | {
"start": 15058,
"end": 16342
} | class ____ extends ImprovedAbstractSet<Entry<R, V>> {
@Override
public Iterator<Entry<R, V>> iterator() {
return new EntrySetIterator();
}
@Override
public int size() {
int size = 0;
for (Map<C, V> map : backingMap.values()) {
if (map.containsKey(columnKey)) {
size++;
}
}
return size;
}
@Override
public boolean isEmpty() {
return !containsColumn(columnKey);
}
@Override
public void clear() {
removeFromColumnIf(alwaysTrue());
}
@Override
public boolean contains(@Nullable Object o) {
if (o instanceof Entry) {
Entry<?, ?> entry = (Entry<?, ?>) o;
return containsMapping(entry.getKey(), columnKey, entry.getValue());
}
return false;
}
@Override
public boolean remove(@Nullable Object obj) {
if (obj instanceof Entry) {
Entry<?, ?> entry = (Entry<?, ?>) obj;
return removeMapping(entry.getKey(), columnKey, entry.getValue());
}
return false;
}
@Override
public boolean retainAll(Collection<?> c) {
return removeFromColumnIf(not(in(c)));
}
}
private final | EntrySet |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestPutTrainedModelAction.java | {
"start": 1069,
"end": 2306
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(PUT, BASE_PATH + "trained_models/{" + TrainedModelConfig.MODEL_ID + "}"));
}
@Override
public String getName() {
return "xpack_ml_put_trained_model_action";
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
String id = restRequest.param(TrainedModelConfig.MODEL_ID.getPreferredName());
XContentParser parser = restRequest.contentParser();
boolean deferDefinitionDecompression = restRequest.paramAsBoolean(PutTrainedModelAction.DEFER_DEFINITION_DECOMPRESSION, false);
boolean waitForCompletion = restRequest.paramAsBoolean("wait_for_completion", false);
PutTrainedModelAction.Request putRequest = PutTrainedModelAction.Request.parseRequest(
id,
deferDefinitionDecompression,
waitForCompletion,
parser
);
putRequest.ackTimeout(getAckTimeout(restRequest));
return channel -> client.execute(PutTrainedModelAction.INSTANCE, putRequest, new RestToXContentListener<>(channel));
}
}
| RestPutTrainedModelAction |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/resource/MappingSocketAddressResolver.java | {
"start": 598,
"end": 3752
} | class ____ extends SocketAddressResolver {
private final Function<HostAndPort, HostAndPort> mappingFunction;
private final DnsResolver dnsResolver;
/**
* Create a new {@link SocketAddressResolver} given {@link Function mapping function}.
*
* @param mappingFunction must not be {@code null}.
* @since 6.1
*/
private MappingSocketAddressResolver(Function<HostAndPort, HostAndPort> mappingFunction) {
this(DnsResolver.unresolved(), mappingFunction);
}
/**
* Create a new {@link SocketAddressResolver} given {@link DnsResolver} and {@link Function mapping function}.
*
* @param dnsResolver must not be {@code null}.
* @param mappingFunction must not be {@code null}.
*/
private MappingSocketAddressResolver(DnsResolver dnsResolver, Function<HostAndPort, HostAndPort> mappingFunction) {
super(dnsResolver);
LettuceAssert.notNull(mappingFunction, "Mapping function must not be null!");
this.dnsResolver = dnsResolver;
this.mappingFunction = mappingFunction;
}
/**
* Create a new {@link SocketAddressResolver} given {@link DnsResolver} and {@link Function mapping function}.
*
* @param mappingFunction must not be {@code null}.
* @return the {@link MappingSocketAddressResolver}.
* @since 6.1
*/
public static MappingSocketAddressResolver create(Function<HostAndPort, HostAndPort> mappingFunction) {
return new MappingSocketAddressResolver(mappingFunction);
}
/**
* Create a new {@link SocketAddressResolver} given {@link DnsResolver} and {@link Function mapping function}.
*
* @param dnsResolver must not be {@code null}.
* @param mappingFunction must not be {@code null}.
* @return the {@link MappingSocketAddressResolver}.
*/
public static MappingSocketAddressResolver create(DnsResolver dnsResolver,
Function<HostAndPort, HostAndPort> mappingFunction) {
return new MappingSocketAddressResolver(dnsResolver, mappingFunction);
}
@Override
public SocketAddress resolve(RedisURI redisURI) {
if (redisURI.getSocket() != null) {
return getDomainSocketAddress(redisURI);
}
HostAndPort hostAndPort = HostAndPort.of(redisURI.getHost(), redisURI.getPort());
HostAndPort mapped = mappingFunction.apply(hostAndPort);
if (mapped == null) {
throw new IllegalStateException("Mapping function must not return null for HostAndPort");
}
try {
return doResolve(mapped);
} catch (UnknownHostException e) {
return new InetSocketAddress(redisURI.getHost(), redisURI.getPort());
}
}
private SocketAddress doResolve(HostAndPort mapped) throws UnknownHostException {
InetAddress[] inetAddress = dnsResolver.resolve(mapped.getHostText());
if (inetAddress.length == 0) {
return InetSocketAddress.createUnresolved(mapped.getHostText(), mapped.getPort());
}
return new InetSocketAddress(inetAddress[0], mapped.getPort());
}
}
| MappingSocketAddressResolver |
java | quarkusio__quarkus | extensions/credentials/runtime/src/main/java/io/quarkus/credentials/runtime/CredentialsProviderFinder.java | {
"start": 163,
"end": 735
} | class ____ {
public static CredentialsProvider find(String type) {
ArcContainer container = Arc.container();
CredentialsProvider credentialsProvider = type != null
? (CredentialsProvider) container.instance(type).get()
: container.instance(CredentialsProvider.class).get();
if (credentialsProvider == null) {
throw new RuntimeException("unable to find credentials provider of type " + (type == null ? "default" : type));
}
return credentialsProvider;
}
}
| CredentialsProviderFinder |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/function/FailableDoublePredicate.java | {
"start": 939,
"end": 1116
} | interface ____ {@link DoublePredicate} that declares a {@link Throwable}.
*
* @param <E> The kind of thrown exception or error.
* @since 3.11
*/
@FunctionalInterface
public | like |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionalOnSingleCandidate.java | {
"start": 3491,
"end": 3925
} | class ____ name of the bean to check
* @see Bean#autowireCandidate()
* @see BeanDefinition#isAutowireCandidate
* @see Bean#defaultCandidate()
* @see AbstractBeanDefinition#isDefaultCandidate
*/
String type() default "";
/**
* Strategy to decide if the application context hierarchy (parent contexts) should be
* considered.
* @return the search strategy
*/
SearchStrategy search() default SearchStrategy.ALL;
}
| type |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/predicate/PredicateAssert_rejects_Test.java | {
"start": 1540,
"end": 5157
} | class ____ extends PredicateAssertBaseTest {
@Test
void should_fail_when_predicate_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> assertThat((Predicate<String>) null).rejects("first",
"second"))
.withMessage(actualIsNull());
}
@Test
void should_pass_when_predicate_does_not_accept_value() {
Predicate<String> predicate = val -> val.equals("something");
assertThat(predicate).rejects("something else");
}
@Test
void should_fail_when_predicate_accepts_value_with_no_description() {
Predicate<String> predicate = val -> val.equals("something");
String expectedValue = "something";
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> assertThat(predicate).rejects("something"))
.withMessage(shouldNotAccept(predicate, expectedValue,
PredicateDescription.GIVEN).create());
}
@Test
void should_fail_when_predicate_accepts_value_with_given_string_description() {
Predicate<String> predicate = val -> val.equals("something");
String expectedValue = "something";
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> assertThat(predicate).as("test").rejects("something"))
.withMessage("[test] " + shouldNotAccept(predicate, expectedValue,
PredicateDescription.GIVEN).create());
}
@Test
void should_fail_when_predicate_accepts_some_value() {
Predicate<String> ballSportPredicate = sport -> sport.contains("ball");
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> assertThat(ballSportPredicate).rejects("curling",
"judo",
"football"))
.withMessage(noElementsShouldMatch(newArrayList("curling", "judo",
"football"),
"football",
PredicateDescription.GIVEN).create());
}
@Test
void should_pass_when_predicate_accepts_no_value() {
Predicate<String> ballSportPredicate = sport -> sport.contains("ball");
assertThat(ballSportPredicate).rejects("curling", "judo", "marathon");
}
@Test
void should_pass_and_only_invoke_predicate_once_for_single_value() {
// GIVEN
Predicate<Object> predicate = mock(Predicate.class);
when(predicate.test(any())).thenReturn(false);
// WHEN
assertThat(predicate).rejects("something");
// THEN
verify(predicate, times(1)).test("something");
}
@Override
protected PredicateAssert<Boolean> invoke_api_method() {
return assertions.rejects(false, false);
}
@Override
protected void verify_internal_effects() {
verify(iterables).assertNoneMatch(getInfo(assertions), newArrayList(false, false), getActual(assertions),
PredicateDescription.GIVEN);
}
}
| PredicateAssert_rejects_Test |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/uri/ShouldHaveAnchor.java | {
"start": 791,
"end": 1552
} | class ____ extends BasicErrorMessageFactory {
private static final String SHOULD_HAVE_ANCHOR = "%nExpecting anchor of%n <%s>%nto be:%n <%s>%nbut was:%n <%s>";
private static final String SHOULD_NOT_HAVE_ANCHOR = "%nExpecting actual:%n <%s>%nnot to have an anchor but had:%n <%s>";
public static ErrorMessageFactory shouldHaveAnchor(URL actual, String expectedAnchor) {
return expectedAnchor == null ? new ShouldHaveAnchor(actual) : new ShouldHaveAnchor(actual, expectedAnchor);
}
private ShouldHaveAnchor(URL actual, String expectedAnchor) {
super(SHOULD_HAVE_ANCHOR, actual, expectedAnchor, actual.getRef());
}
private ShouldHaveAnchor(URL actual) {
super(SHOULD_NOT_HAVE_ANCHOR, actual, actual.getRef());
}
}
| ShouldHaveAnchor |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/value/MapPropertyResolver.java | {
"start": 1143,
"end": 3404
} | class ____ implements PropertyResolver {
private final Map<String, Object> map;
private final ConversionService conversionService;
/**
* @param map The map to resolves the properties from
*/
public MapPropertyResolver(Map<String, Object> map) {
this.map = map;
this.conversionService = ConversionService.SHARED;
}
/**
* @param map The map to resolves the properties from
* @param conversionService The conversion service
*/
public MapPropertyResolver(Map<String, Object> map, ConversionService conversionService) {
this.map = map;
this.conversionService = conversionService;
}
@Override
public boolean containsProperty(String name) {
return map.containsKey(name);
}
@Override
public boolean containsProperties(String name) {
return map.keySet().stream().anyMatch(k -> k.startsWith(name));
}
@Override
public <T> Optional<T> getProperty(String name, ArgumentConversionContext<T> conversionContext) {
Object value = map.get(name);
return conversionService.convert(value, conversionContext);
}
@NonNull
@Override
public Collection<String> getPropertyEntries(@NonNull String name) {
if (StringUtils.isNotEmpty(name)) {
String prefix = name + ".";
Set<String> strings = map.keySet();
// to list to retain order from linked hash map
List<String> entries = new ArrayList<>(strings.size());
for (String k : strings) {
if (k.startsWith(prefix)) {
String withoutPrefix = k.substring(prefix.length());
int i = withoutPrefix.indexOf('.');
String e;
if (i > -1) {
e = withoutPrefix.substring(0, i);
} else {
e = withoutPrefix;
}
entries.add(e);
}
}
return entries;
}
return Collections.emptySet();
}
@Override
public List<List<String>> getPropertyPathMatches(String pathPattern) {
return Collections.emptyList();
}
}
| MapPropertyResolver |
java | netty__netty | transport/src/main/java/io/netty/bootstrap/Bootstrap.java | {
"start": 1810,
"end": 12583
} | class ____ extends AbstractBootstrap<Bootstrap, Channel> {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(Bootstrap.class);
private final BootstrapConfig config = new BootstrapConfig(this);
private ExternalAddressResolver externalResolver;
private volatile boolean disableResolver;
private volatile SocketAddress remoteAddress;
public Bootstrap() { }
private Bootstrap(Bootstrap bootstrap) {
super(bootstrap);
externalResolver = bootstrap.externalResolver;
disableResolver = bootstrap.disableResolver;
remoteAddress = bootstrap.remoteAddress;
}
/**
* Sets the {@link NameResolver} which will resolve the address of the unresolved named address.
*
* @param resolver the {@link NameResolver} for this {@code Bootstrap}; may be {@code null}, in which case a default
* resolver will be used
*
* @see io.netty.resolver.DefaultAddressResolverGroup
*/
public Bootstrap resolver(AddressResolverGroup<?> resolver) {
externalResolver = resolver == null ? null : new ExternalAddressResolver(resolver);
disableResolver = false;
return this;
}
/**
* Disables address name resolution. Name resolution may be re-enabled with
* {@link Bootstrap#resolver(AddressResolverGroup)}
*/
public Bootstrap disableResolver() {
externalResolver = null;
disableResolver = true;
return this;
}
/**
* The {@link SocketAddress} to connect to once the {@link #connect()} method
* is called.
*/
public Bootstrap remoteAddress(SocketAddress remoteAddress) {
this.remoteAddress = remoteAddress;
return this;
}
/**
* @see #remoteAddress(SocketAddress)
*/
public Bootstrap remoteAddress(String inetHost, int inetPort) {
remoteAddress = InetSocketAddress.createUnresolved(inetHost, inetPort);
return this;
}
/**
* @see #remoteAddress(SocketAddress)
*/
public Bootstrap remoteAddress(InetAddress inetHost, int inetPort) {
remoteAddress = new InetSocketAddress(inetHost, inetPort);
return this;
}
/**
* Connect a {@link Channel} to the remote peer.
*/
public ChannelFuture connect() {
validate();
SocketAddress remoteAddress = this.remoteAddress;
if (remoteAddress == null) {
throw new IllegalStateException("remoteAddress not set");
}
return doResolveAndConnect(remoteAddress, config.localAddress());
}
/**
* Connect a {@link Channel} to the remote peer.
*/
public ChannelFuture connect(String inetHost, int inetPort) {
return connect(InetSocketAddress.createUnresolved(inetHost, inetPort));
}
/**
* Connect a {@link Channel} to the remote peer.
*/
public ChannelFuture connect(InetAddress inetHost, int inetPort) {
return connect(new InetSocketAddress(inetHost, inetPort));
}
/**
* Connect a {@link Channel} to the remote peer.
*/
public ChannelFuture connect(SocketAddress remoteAddress) {
ObjectUtil.checkNotNull(remoteAddress, "remoteAddress");
validate();
return doResolveAndConnect(remoteAddress, config.localAddress());
}
/**
* Connect a {@link Channel} to the remote peer.
*/
public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress) {
ObjectUtil.checkNotNull(remoteAddress, "remoteAddress");
validate();
return doResolveAndConnect(remoteAddress, localAddress);
}
/**
* @see #connect()
*/
private ChannelFuture doResolveAndConnect(final SocketAddress remoteAddress, final SocketAddress localAddress) {
final ChannelFuture regFuture = initAndRegister();
final Channel channel = regFuture.channel();
if (regFuture.isDone()) {
if (!regFuture.isSuccess()) {
return regFuture;
}
return doResolveAndConnect0(channel, remoteAddress, localAddress, channel.newPromise());
} else {
// Registration future is almost always fulfilled already, but just in case it's not.
final PendingRegistrationPromise promise = new PendingRegistrationPromise(channel);
regFuture.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
// Directly obtain the cause and do a null check so we only need one volatile read in case of a
// failure.
Throwable cause = future.cause();
if (cause != null) {
// Registration on the EventLoop failed so fail the ChannelPromise directly to not cause an
// IllegalStateException once we try to access the EventLoop of the Channel.
promise.setFailure(cause);
} else {
// Registration was successful, so set the correct executor to use.
// See https://github.com/netty/netty/issues/2586
promise.registered();
doResolveAndConnect0(channel, remoteAddress, localAddress, promise);
}
}
});
return promise;
}
}
private ChannelFuture doResolveAndConnect0(final Channel channel, SocketAddress remoteAddress,
final SocketAddress localAddress, final ChannelPromise promise) {
try {
if (disableResolver) {
doConnect(remoteAddress, localAddress, promise);
return promise;
}
final EventLoop eventLoop = channel.eventLoop();
AddressResolver<SocketAddress> resolver;
try {
resolver = ExternalAddressResolver.getOrDefault(externalResolver).getResolver(eventLoop);
} catch (Throwable cause) {
channel.close();
return promise.setFailure(cause);
}
if (!resolver.isSupported(remoteAddress) || resolver.isResolved(remoteAddress)) {
// Resolver has no idea about what to do with the specified remote address or it's resolved already.
doConnect(remoteAddress, localAddress, promise);
return promise;
}
final Future<SocketAddress> resolveFuture = resolver.resolve(remoteAddress);
if (resolveFuture.isDone()) {
final Throwable resolveFailureCause = resolveFuture.cause();
if (resolveFailureCause != null) {
// Failed to resolve immediately
channel.close();
promise.setFailure(resolveFailureCause);
} else {
// Succeeded to resolve immediately; cached? (or did a blocking lookup)
doConnect(resolveFuture.getNow(), localAddress, promise);
}
return promise;
}
// Wait until the name resolution is finished.
resolveFuture.addListener(new FutureListener<SocketAddress>() {
@Override
public void operationComplete(Future<SocketAddress> future) throws Exception {
if (future.cause() != null) {
channel.close();
promise.setFailure(future.cause());
} else {
doConnect(future.getNow(), localAddress, promise);
}
}
});
} catch (Throwable cause) {
promise.tryFailure(cause);
}
return promise;
}
private static void doConnect(
final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise connectPromise) {
// This method is invoked before channelRegistered() is triggered. Give user handlers a chance to set up
// the pipeline in its channelRegistered() implementation.
final Channel channel = connectPromise.channel();
channel.eventLoop().execute(new Runnable() {
@Override
public void run() {
if (localAddress == null) {
channel.connect(remoteAddress, connectPromise);
} else {
channel.connect(remoteAddress, localAddress, connectPromise);
}
connectPromise.addListener(ChannelFutureListener.CLOSE_ON_FAILURE);
}
});
}
@Override
void init(Channel channel) {
ChannelPipeline p = channel.pipeline();
p.addLast(config.handler());
setChannelOptions(channel, newOptionsArray(), logger);
setAttributes(channel, newAttributesArray());
Collection<ChannelInitializerExtension> extensions = getInitializerExtensions();
if (!extensions.isEmpty()) {
for (ChannelInitializerExtension extension : extensions) {
try {
extension.postInitializeClientChannel(channel);
} catch (Exception e) {
logger.warn("Exception thrown from postInitializeClientChannel", e);
}
}
}
}
@Override
public Bootstrap validate() {
super.validate();
if (config.handler() == null) {
throw new IllegalStateException("handler not set");
}
return this;
}
@Override
@SuppressWarnings("CloneDoesntCallSuperClone")
public Bootstrap clone() {
return new Bootstrap(this);
}
/**
* Returns a deep clone of this bootstrap which has the identical configuration except that it uses
* the given {@link EventLoopGroup}. This method is useful when making multiple {@link Channel}s with similar
* settings.
*/
public Bootstrap clone(EventLoopGroup group) {
Bootstrap bs = new Bootstrap(this);
bs.group = group;
return bs;
}
@Override
public final BootstrapConfig config() {
return config;
}
final SocketAddress remoteAddress() {
return remoteAddress;
}
final AddressResolverGroup<?> resolver() {
if (disableResolver) {
return null;
}
return ExternalAddressResolver.getOrDefault(externalResolver);
}
/* Holder to avoid NoClassDefFoundError in case netty-resolver dependency is excluded
(e.g. some address families do not need name resolution) */
static final | Bootstrap |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/util/datetime/DatePrinter.java | {
"start": 2811,
"end": 3244
} | class ____, usually StringBuilder or StringBuffer.
* @return the specified string buffer
* @since 3.5
*/
<B extends Appendable> B format(long millis, B buf);
/**
* <p>Formats a {@code Date} object into the
* supplied {@code Appendable} using a {@code GregorianCalendar}.</p>
*
* @param date the date to format
* @param buf the buffer to format into
* @param <B> the Appendable | type |
java | google__truth | core/src/test/java/com/google/common/truth/StackTraceCleanerTest.java | {
"start": 10749,
"end": 10822
} | class ____ extends Runner {}
/**
* This scenario where truth | SomeRunner |
java | quarkusio__quarkus | extensions/devui/deployment-spi/src/main/java/io/quarkus/devui/spi/workspace/Display.java | {
"start": 87,
"end": 375
} | enum ____ {
nothing, // Nothing will be displayed
dialog, // Content will be displayed in a dialog popup
replace, // Content will replace the original (input) content
split, // Content will display in a split screen
notification // Content will in a notification
}
| Display |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/repositories/blobstore/RequestedRangeNotSatisfiedException.java | {
"start": 598,
"end": 1700
} | class ____ extends IOException {
private final String resource;
private final long position;
private final long length;
public RequestedRangeNotSatisfiedException(String resource, long position, long length) {
super(message(resource, position, length));
this.resource = resource;
this.position = position;
this.length = length;
}
public RequestedRangeNotSatisfiedException(String resource, long position, long length, Throwable cause) {
super(message(resource, position, length), cause);
this.resource = resource;
this.position = position;
this.length = length;
}
public String getResource() {
return resource;
}
public long getPosition() {
return position;
}
public long getLength() {
return length;
}
private static String message(String resource, long position, long length) {
return Strings.format("Requested range [position=%d, length=%d] cannot be satisfied for [%s]", position, length, resource);
}
}
| RequestedRangeNotSatisfiedException |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidatorMetrics.java | {
"start": 1377,
"end": 5350
} | class ____ {
@Metric("# of disk failure") MutableCounterInt failureCount;
@Metric("Time of last failure") MutableGaugeLong lastFailureTime;
private final MetricsRegistry registry;
private static final MetricsInfo RECORD_INFO =
info("ReadWriteDiskValidatorMetrics", "Metrics for the DiskValidator");
private final int[] quantileIntervals = new int[] {
60 * 60, // 1h
24 * 60 * 60, //1 day
10 * 24 * 60 * 60 //10 day
};
private final MutableQuantiles[] fileReadQuantiles;
private final MutableQuantiles[] fileWriteQuantiles;
public ReadWriteDiskValidatorMetrics() {
registry = new MetricsRegistry(RECORD_INFO);
fileReadQuantiles = new MutableQuantiles[quantileIntervals.length];
for (int i = 0; i < fileReadQuantiles.length; i++) {
int interval = quantileIntervals[i];
fileReadQuantiles[i] = registry.newQuantiles(
"readLatency" + interval + "s",
"File read latency", "Ops", "latencyMicros", interval);
}
fileWriteQuantiles = new MutableQuantiles[quantileIntervals.length];
for (int i = 0; i < fileWriteQuantiles.length; i++) {
int interval = quantileIntervals[i];
fileWriteQuantiles[i] = registry.newQuantiles(
"writeLatency" + interval + "s",
"File write latency", "Ops", "latencyMicros", interval);
}
}
/**
* Simple metrics cache to help prevent re-registrations and help to access
* metrics.
*/
protected final static Map<String, ReadWriteDiskValidatorMetrics> DIR_METRICS
= new HashMap<>();
/**
* Get a metric by given directory name.
*
* @param dirName directory name
* @return the metric
*/
public synchronized static ReadWriteDiskValidatorMetrics getMetric(
String dirName) {
MetricsSystem ms = DefaultMetricsSystem.instance();
ReadWriteDiskValidatorMetrics metrics = DIR_METRICS.get(dirName);
if (metrics == null) {
metrics = new ReadWriteDiskValidatorMetrics();
// Register with the MetricsSystems
if (ms != null) {
metrics = ms.register(sourceName(dirName),
"Metrics for directory: " + dirName, metrics);
}
DIR_METRICS.put(dirName, metrics);
}
return metrics;
}
/**
* Add the file write latency to {@link MutableQuantiles} metrics.
*
* @param writeLatency file write latency in microseconds
*/
public void addWriteFileLatency(long writeLatency) {
if (fileWriteQuantiles != null) {
for (MutableQuantiles q : fileWriteQuantiles) {
q.add(writeLatency);
}
}
}
/**
* Add the file read latency to {@link MutableQuantiles} metrics.
*
* @param readLatency file read latency in microseconds
*/
public void addReadFileLatency(long readLatency) {
if (fileReadQuantiles!= null) {
for (MutableQuantiles q : fileReadQuantiles) {
q.add(readLatency);
}
}
}
/**
* Get a source name by given directory name.
*
* @param dirName directory name
* @return the source name
*/
protected static String sourceName(String dirName) {
StringBuilder sb = new StringBuilder(RECORD_INFO.name());
sb.append(",dir=").append(dirName);
return sb.toString();
}
/**
* Increase the failure count and update the last failure timestamp.
*/
public void diskCheckFailed() {
failureCount.incr();
lastFailureTime.set(System.nanoTime());
}
/**
* Get {@link MutableQuantiles} metrics for the file read time.
*
* @return {@link MutableQuantiles} metrics for the file read time
*/
@VisibleForTesting
protected MutableQuantiles[] getFileReadQuantiles() {
return fileReadQuantiles;
}
/**
* Get {@link MutableQuantiles} metrics for the file write time.
*
* @return {@link MutableQuantiles} metrics for the file write time
*/
@VisibleForTesting
protected MutableQuantiles[] getFileWriteQuantiles() {
return fileWriteQuantiles;
}
}
| ReadWriteDiskValidatorMetrics |
java | resilience4j__resilience4j | resilience4j-spring-boot2/src/test/java/io/github/resilience4j/ratelimiter/autoconfigure/RateLimiterAutoConfigurationCustomizerTest.java | {
"start": 9620,
"end": 10373
} | class ____ {
@Bean
public RateLimiterConfigCustomizer defaultCustomizer() {
return RateLimiterConfigCustomizer.of("default",
builder -> builder.limitForPeriod(1000)
.timeoutDuration(Duration.ofMillis(1000))
.limitRefreshPeriod(Duration.ofMillis(1000))
);
}
@Bean
public RateLimiterConfigCustomizer sharedConfigCustomizer() {
return RateLimiterConfigCustomizer.of("sharedConfig",
builder -> builder.limitForPeriod(2000)
.timeoutDuration(Duration.ofMillis(2000))
.limitRefreshPeriod(Duration.ofMillis(2000))
);
}
}
}
| ConfigCustomizerConfiguration |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/inject/ScopeAnnotationOnInterfaceOrAbstractClassTest.java | {
"start": 3433,
"end": 3554
} | interface ____ {
@Subcomponent
@CustomScope
abstract | DaggerInterfaceComponent |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/queries/ClientQueryParamFromPropertyTest.java | {
"start": 2575,
"end": 3204
} | interface ____ {
@GET
String getWithParam();
@GET
@ClientQueryParam(name = "some-other-param", value = "${non-existent-property}")
String missingRequiredProperty();
@GET
@ClientQueryParam(name = "some-other-param", value = "${non-existent-property}", required = false)
String missingNonRequiredProperty();
@GET
@ClientQueryParam(name = "some-other-param", value = "${non-existent-property}", required = false)
@ClientQueryParam(name = "my-param", value = "other")
String missingNonRequiredPropertyAndOverriddenValue();
}
}
| Client |
java | dropwizard__dropwizard | dropwizard-logging/src/main/java/io/dropwizard/logging/common/filter/FilterFactory.java | {
"start": 729,
"end": 837
} | interface ____<E extends DeferredProcessingAware> extends Discoverable {
Filter<E> build();
}
| FilterFactory |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/annotation/SecurityConfigurer.java | {
"start": 1262,
"end": 1937
} | interface ____<O, B extends SecurityBuilder<O>> {
/**
* Initialize the {@link SecurityBuilder}. Here only shared state should be created
* and modified, but not properties on the {@link SecurityBuilder} used for building
* the object. This ensures that the {@link #configure(SecurityBuilder)} method uses
* the correct shared objects when building. Configurers should be applied here.
* @param builder
* @throws Exception
*/
void init(B builder);
/**
* Configure the {@link SecurityBuilder} by setting the necessary properties on the
* {@link SecurityBuilder}.
* @param builder
* @throws Exception
*/
void configure(B builder);
}
| SecurityConfigurer |
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/condition/EnabledInNativeImage.java | {
"start": 1439,
"end": 3876
} | class ____ being
* instantiated, and it does not prevent the execution of class-level lifecycle
* callbacks such as {@code @BeforeAll} methods, {@code @AfterAll} methods, and
* corresponding extension APIs.
*
* <p>This annotation may be used as a meta-annotation in order to create a
* custom <em>composed annotation</em> that inherits the semantics of this
* annotation.
*
* <h2>Technical Details</h2>
*
* <p>JUnit detects whether tests are executing within a GraalVM native image by
* checking for the presence of the {@code org.graalvm.nativeimage.imagecode}
* system property (see
* <a href="https://github.com/oracle/graal/blob/master/sdk/src/org.graalvm.nativeimage/src/org/graalvm/nativeimage/ImageInfo.java">org.graalvm.nativeimage.ImageInfo</a>
* for details). The GraalVM compiler sets the property to {@code buildtime} while
* compiling a native image; the property is set to {@code runtime} while a native
* image is executing; and the Gradle and Maven plug-ins in the GraalVM
* <a href="https://graalvm.github.io/native-build-tools/latest/">Native Build Tools</a>
* project set the property to {@code agent} while executing tests with the GraalVM
* <a href="https://www.graalvm.org/reference-manual/native-image/metadata/AutomaticMetadataCollection/">tracing agent</a>.
*
* @since 5.9.1
* @see org.junit.jupiter.api.condition.EnabledIf
* @see org.junit.jupiter.api.condition.DisabledIf
* @see org.junit.jupiter.api.condition.EnabledOnOs
* @see org.junit.jupiter.api.condition.DisabledOnOs
* @see org.junit.jupiter.api.condition.EnabledOnJre
* @see org.junit.jupiter.api.condition.DisabledOnJre
* @see org.junit.jupiter.api.condition.EnabledForJreRange
* @see org.junit.jupiter.api.condition.DisabledForJreRange
* @see org.junit.jupiter.api.condition.DisabledInNativeImage
* @see org.junit.jupiter.api.condition.EnabledIfSystemProperty
* @see org.junit.jupiter.api.condition.DisabledIfSystemProperty
* @see org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable
* @see org.junit.jupiter.api.condition.DisabledIfEnvironmentVariable
* @see org.junit.jupiter.api.Disabled
*/
@Target({ ElementType.TYPE, ElementType.METHOD })
@Retention(RetentionPolicy.RUNTIME)
@Documented
@EnabledIfSystemProperty(named = "org.graalvm.nativeimage.imagecode", matches = ".+", //
disabledReason = "Not currently executing within a GraalVM native image")
@API(status = STABLE, since = "5.9.1")
public @ | from |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/MyBatisBeanEndpointBuilderFactory.java | {
"start": 1613,
"end": 5101
} | interface ____
extends
EndpointProducerBuilder {
default AdvancedMyBatisBeanEndpointBuilder advanced() {
return (AdvancedMyBatisBeanEndpointBuilder) this;
}
/**
* The executor type to be used while executing statements. simple -
* executor does nothing special. reuse - executor reuses prepared
* statements. batch - executor reuses statements and batches updates.
*
* The option is a: <code>org.apache.ibatis.session.ExecutorType</code>
* type.
*
* Default: SIMPLE
* Group: producer
*
* @param executorType the value to set
* @return the dsl builder
*/
default MyBatisBeanEndpointBuilder executorType(org.apache.ibatis.session.ExecutorType executorType) {
doSetProperty("executorType", executorType);
return this;
}
/**
* The executor type to be used while executing statements. simple -
* executor does nothing special. reuse - executor reuses prepared
* statements. batch - executor reuses statements and batches updates.
*
* The option will be converted to a
* <code>org.apache.ibatis.session.ExecutorType</code> type.
*
* Default: SIMPLE
* Group: producer
*
* @param executorType the value to set
* @return the dsl builder
*/
default MyBatisBeanEndpointBuilder executorType(String executorType) {
doSetProperty("executorType", executorType);
return this;
}
/**
* User the header value for input parameters instead of the message
* body. By default, inputHeader == null and the input parameters are
* taken from the message body. If outputHeader is set, the value is
* used and query parameters will be taken from the header instead of
* the body.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param inputHeader the value to set
* @return the dsl builder
*/
default MyBatisBeanEndpointBuilder inputHeader(String inputHeader) {
doSetProperty("inputHeader", inputHeader);
return this;
}
/**
* Store the query result in a header instead of the message body. By
* default, outputHeader == null and the query result is stored in the
* message body, any existing content in the message body is discarded.
* If outputHeader is set, the value is used as the name of the header
* to store the query result and the original message body is preserved.
* Setting outputHeader will also omit populating the default
* CamelMyBatisResult header since it would be the same as outputHeader
* all the time.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param outputHeader the value to set
* @return the dsl builder
*/
default MyBatisBeanEndpointBuilder outputHeader(String outputHeader) {
doSetProperty("outputHeader", outputHeader);
return this;
}
}
/**
* Advanced builder for endpoint for the MyBatis Bean component.
*/
public | MyBatisBeanEndpointBuilder |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/runtime/src/test/java/io/quarkus/resteasy/reactive/runtime/mapping/TypeSignatureParserTest.java | {
"start": 579,
"end": 3653
} | class ____<T> extends InnerGeneric<T> {
}
@Test
public void testSignatures() throws NoSuchMethodException, SecurityException {
assertType("B", byte.class);
assertType("C", char.class);
assertType("D", double.class);
assertType("F", float.class);
assertType("I", int.class);
assertType("J", long.class);
assertType("S", short.class);
assertType("Z", boolean.class);
assertType("[Z", boolean[].class);
assertType("[[Z", boolean[][].class);
assertType("Ljava/lang/Class;", Class.class);
assertType("[Ljava/lang/Class;", Class[].class);
assertType("[[Ljava/lang/Class;", Class[][].class);
assertType("Lio/quarkus/resteasy/reactive/runtime/mapping/TypeSignatureParserTest;", TypeSignatureParserTest.class);
assertType("Lio/quarkus/resteasy/reactive/runtime/mapping/TypeSignatureParserTest.StaticInner;", StaticInner.class);
assertType("Lio/quarkus/resteasy/reactive/runtime/mapping/TypeSignatureParserTest.Inner;", Inner.class);
assertType("Ljava/util/List<Ljava/lang/String;>;", new TypeLiteral<List<String>>() {
});
assertType("[Ljava/util/List<Ljava/lang/String;>;", new TypeLiteral<List<String>[]>() {
});
assertType("[[Ljava/util/List<Ljava/lang/String;>;", new TypeLiteral<List<String>[][]>() {
});
assertType("Ljava/util/Map<Ljava/lang/String;Ljava/lang/Integer;>;", new TypeLiteral<Map<String, Integer>>() {
});
assertType("Lio/quarkus/resteasy/reactive/runtime/mapping/TypeSignatureParserTest.InnerGeneric<Ljava/lang/String;>;",
new TypeLiteral<InnerGeneric<String>>() {
});
assertType(
"Lio/quarkus/resteasy/reactive/runtime/mapping/TypeSignatureParserTest.InnerGeneric<Ljava/lang/String;>.Inner<Ljava/lang/Integer;>;",
new TypeLiteral<InnerGeneric<String>.Inner<Integer>>() {
});
assertType("Ljava/util/List<+Ljava/lang/String;>;", new TypeLiteral<List<? extends String>>() {
});
assertType("Ljava/util/List<-Ljava/lang/String;>;", new TypeLiteral<List<? super String>>() {
});
assertType("Ljava/util/List<*>;", new TypeLiteral<List<?>>() {
});
// assertType("TT;", ((ParameterizedType)Foo.class.getGenericSuperclass()).getActualTypeArguments()[0]);
}
private void assertType(String signature, TypeLiteral<?> actual) {
assertType(signature, actual.getType());
}
private void assertType(String signature, Type actual) {
Type parsedType = new TypeSignatureParser(signature).parseType();
// the JDK impl has reasonable hashCode/equals for lots of stuff but not TypeVariable, so we do a switcheroo for those
if (actual instanceof TypeVariable) {
Assertions.assertTrue(parsedType.equals(actual), () -> "expecting " + actual + " but got " + parsedType);
} else {
Assertions.assertEquals(actual, parsedType);
}
}
}
| Foo |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateJobConfigAction.java | {
"start": 812,
"end": 1501
} | class ____ extends HandledTransportAction<ValidateJobConfigAction.Request, AcknowledgedResponse> {
@Inject
public TransportValidateJobConfigAction(TransportService transportService, ActionFilters actionFilters) {
super(
ValidateJobConfigAction.NAME,
transportService,
actionFilters,
ValidateJobConfigAction.Request::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
}
@Override
protected void doExecute(Task task, ValidateJobConfigAction.Request request, ActionListener<AcknowledgedResponse> listener) {
listener.onResponse(AcknowledgedResponse.TRUE);
}
}
| TransportValidateJobConfigAction |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineWriteResponse.java | {
"start": 1257,
"end": 1612
} | class ____ holds a list of put errors. This is the response returned when a
* list of {@link TimelineEntity} objects is added to the timeline. If there are
* errors in storing individual entity objects, they will be indicated in the
* list of errors.
*/
@XmlRootElement(name = "response")
@XmlAccessorType(XmlAccessType.NONE)
@Public
@Unstable
public | that |
java | apache__camel | dsl/camel-kamelet-main/src/main/java/org/apache/camel/main/injection/AnnotationDependencyInjection.java | {
"start": 15040,
"end": 19019
} | class ____ implements CamelBeanPostProcessorInjector {
private final CamelContext context;
private final CamelPostProcessorHelper helper;
public QuarkusBeanPostProcessorInjector(CamelContext context) {
this.context = context;
this.helper = new CamelPostProcessorHelper(context);
}
@Override
public void onFieldInject(Field field, Object bean, String beanName) {
boolean inject = AnnotationHelper.hasAnnotation(field, QUARKUS_INJECT);
if (inject) {
String name = null;
String named = AnnotationHelper.getAnnotationValue(field, QUARKUS_NAMED);
if (named != null) {
name = named;
}
ReflectionHelper.setField(field, bean,
helper.getInjectionBeanValue(field.getType(), name));
}
if (AnnotationHelper.hasAnnotation(field, QUARKUS_CONFIG_PROPERTY)) {
String name = (String) AnnotationHelper.getAnnotationValue(field,
QUARKUS_CONFIG_PROPERTY, "name");
String df = (String) AnnotationHelper.getAnnotationValue(field,
QUARKUS_CONFIG_PROPERTY, "defaultValue");
if ("org.eclipse.microprofile.config.configproperty.unconfigureddvalue".equals(df)) {
df = null;
}
ReflectionHelper.setField(field, bean,
helper.getInjectionPropertyValue(field.getType(), field.getGenericType(), name, df, null));
}
}
@Override
public void onMethodInject(Method method, Object bean, String beanName) {
boolean produces = AnnotationHelper.hasAnnotation(method, QUARKUS_PRODUCES);
boolean inject = AnnotationHelper.hasAnnotation(method, QUARKUS_INJECT);
boolean bi = AnnotationHelper.hasAnnotation(method, QUARKUS_NAMED);
if (produces || inject || bi) {
String an = produces ? "Produces" : "Inject";
Object instance;
if (lazyBean) {
instance = (Supplier<Object>) () -> helper.getInjectionBeanMethodValue(context, method, bean, beanName,
an);
} else {
instance = helper.getInjectionBeanMethodValue(context, method, bean, beanName, an);
}
if (instance != null) {
String name = method.getName();
String named = AnnotationHelper.getAnnotationValue(method, QUARKUS_NAMED);
if (ObjectHelper.isNotEmpty(named)) {
name = named;
}
bindBean(context, name, instance, method.getReturnType(), false);
}
}
}
}
private static void bindBean(CamelContext context, String name, Object instance, Class<?> type, boolean postProcess) {
// to support hot reloading of beans then we need to enable unbind mode in bean post processor
Registry registry = context.getRegistry();
CamelBeanPostProcessor bpp = PluginHelper.getBeanPostProcessor(context);
bpp.setUnbindEnabled(true);
try {
// re-bind the bean to the registry
registry.unbind(name);
if (instance instanceof Supplier sup) {
registry.bind(name, type, (Supplier<Object>) sup);
} else {
registry.bind(name, type, instance);
}
if (postProcess) {
bpp.postProcessBeforeInitialization(instance, name);
bpp.postProcessAfterInitialization(instance, name);
}
} catch (Exception e) {
throw RuntimeCamelException.wrapRuntimeException(e);
} finally {
bpp.setUnbindEnabled(false);
}
}
}
| QuarkusBeanPostProcessorInjector |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/dataset/CustomDataSetTest.java | {
"start": 1333,
"end": 2932
} | class ____ extends ContextTestSupport {
protected final DataSet dataSet = new DataSetSupport() {
final Expression expression = new XPathBuilder("/message/@index").resultType(Long.class);
@Override
public void assertMessageExpected(DataSetEndpoint dataSetEndpoint, Exchange expected, Exchange actual, long index) {
// lets compare the XPath result
Predicate predicate = PredicateBuilder.isEqualTo(expression, ExpressionBuilder.constantExpression(index));
log.debug("evaluating predicate: {}", predicate);
PredicateAssertHelper.assertMatches(predicate, "Actual: " + actual, actual);
}
protected Object createMessageBody(long messageIndex) {
return "<message index='" + messageIndex + "'>someBody" + messageIndex + "</message>";
}
};
@Override
protected Registry createCamelRegistry() throws Exception {
Registry answer = super.createCamelRegistry();
answer.bind("foo", dataSet);
return answer;
}
@Test
public void testUsingCustomDataSet() throws Exception {
// data set will itself set its assertions so we should just
// assert that all mocks is ok
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("dataset:foo?initialDelay=0").to("direct:foo");
from("direct:foo").to("dataset:foo?initialDelay=0");
}
};
}
}
| CustomDataSetTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.