language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest143_position.java | {
"start": 457,
"end": 4757
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "CREATE TABLE `resume_position_portrait` (\n" +
" `resume_id` varchar(36) NOT NULL,\n" +
" `method` varchar(50) NOT NULL,\n" +
" `position` varchar(256) NOT NULL,\n" +
" `position_level1` longtext,\n" +
" `position_level2` longtext,\n" +
" `probability` float NOT NULL,\n" +
" `success` bit(1) NOT NULL,\n" +
" `updated` datetime(6) NOT NULL,\n" +
" `created` datetime DEFAULT NULL,\n" +
" PRIMARY KEY (`resume_id`),\n" +
" INDEX `IX_resume_position_portrait_probability` (`probability`) USING BTREE,\n" +
" KEY `IX_resume_position_portrait_method` (`method`) USING BTREE,\n" +
" KEY `IX_resume_position_portrait_method_probability` (`method`,`probability`) USING BTREE,\n" +
" KEY `IX_resume_position_portrait_position_probability` (`position`(191),`probability`) USING BTREE,\n" +
" KEY `ix_probability_method` (`probability`,`method`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 dbpartition by hash(`resume_id`) tbpartition by hash(`resume_id`) tbpartitions 4";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
MySqlCreateTableStatement stmt = (MySqlCreateTableStatement) statementList.get(0);
assertEquals(1, statementList.size());
assertEquals("CREATE TABLE `resume_position_portrait` (\n" +
"\t`resume_id` varchar(36) NOT NULL,\n" +
"\t`method` varchar(50) NOT NULL,\n" +
"\t`position` varchar(256) NOT NULL,\n" +
"\t`position_level1` longtext,\n" +
"\t`position_level2` longtext,\n" +
"\t`probability` float NOT NULL,\n" +
"\t`success` bit(1) NOT NULL,\n" +
"\t`updated` datetime(6) NOT NULL,\n" +
"\t`created` datetime DEFAULT NULL,\n" +
"\tPRIMARY KEY (`resume_id`),\n" +
"\tINDEX `IX_resume_position_portrait_probability` USING BTREE(`probability`),\n" +
"\tKEY `IX_resume_position_portrait_method` USING BTREE (`method`),\n" +
"\tKEY `IX_resume_position_portrait_method_probability` USING BTREE (`method`, `probability`),\n" +
"\tKEY `IX_resume_position_portrait_position_probability` USING BTREE (`position`(191), `probability`),\n" +
"\tKEY `ix_probability_method` (`probability`, `method`)\n" +
") ENGINE = InnoDB CHARSET = utf8mb4\n" +
"DBPARTITION BY hash(`resume_id`)\n" +
"TBPARTITION BY hash(`resume_id`) TBPARTITIONS 4", stmt.toString());
assertEquals("create table `resume_position_portrait` (\n" +
"\t`resume_id` varchar(36) not null,\n" +
"\t`method` varchar(50) not null,\n" +
"\t`position` varchar(256) not null,\n" +
"\t`position_level1` longtext,\n" +
"\t`position_level2` longtext,\n" +
"\t`probability` float not null,\n" +
"\t`success` bit(1) not null,\n" +
"\t`updated` datetime(6) not null,\n" +
"\t`created` datetime default null,\n" +
"\tprimary key (`resume_id`),\n" +
"\tindex `IX_resume_position_portrait_probability` using BTREE(`probability`),\n" +
"\tkey `IX_resume_position_portrait_method` using BTREE (`method`),\n" +
"\tkey `IX_resume_position_portrait_method_probability` using BTREE (`method`, `probability`),\n" +
"\tkey `IX_resume_position_portrait_position_probability` using BTREE (`position`(191), `probability`),\n" +
"\tkey `ix_probability_method` (`probability`, `method`)\n" +
") engine = InnoDB charset = utf8mb4\n" +
"dbpartition by hash(`resume_id`)\n" +
"tbpartition by hash(`resume_id`) tbpartitions 4", stmt.toLowerCaseString());
}
}
| MySqlCreateTableTest143_position |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/annotation/AnnotationUtils.java | {
"start": 3426,
"end": 3763
} | class ____ details. For fine-grained support for
* meta-annotations with <em>attribute overrides</em> in <em>composed annotations</em>,
* consider using the {@link MergedAnnotations} API directly or the more specific
* methods in {@link AnnotatedElementUtils} instead.
*
* <h3>Attribute Aliases</h3>
* <p>All public methods in this | for |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit/jupiter/nested/SqlScriptNestedTests.java | {
"start": 3646,
"end": 4407
} | class ____ {
@Test
void classLevelScripts() {
assertUsers("Catbert");
}
@Test
@Sql("/org/springframework/test/context/jdbc/data-add-dogbert.sql")
void merged() {
assertUsers("Catbert", "Dogbert");
}
@Test
@Sql({
"/org/springframework/test/context/jdbc/recreate-schema.sql",
"/org/springframework/test/context/jdbc/data.sql",
"/org/springframework/test/context/jdbc/data-add-dogbert.sql",
"/org/springframework/test/context/jdbc/data-add-catbert.sql"
})
@SqlMergeMode(MergeMode.OVERRIDE)
void overridden() {
assertUsers("Dilbert", "Dogbert", "Catbert");
}
}
@Nested
@NestedTestConfiguration(EnclosingConfiguration.INHERIT)
@SqlMergeMode(OVERRIDE)
| NestedClassLevelMergeSqlMergeModeTests |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/MllpComponentBuilderFactory.java | {
"start": 1382,
"end": 1828
} | interface ____ {
/**
* MLLP (camel-mllp)
* Communicate with external systems using the MLLP protocol.
*
* Category: health
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-mllp
*
* @return the dsl builder
*/
static MllpComponentBuilder mllp() {
return new MllpComponentBuilderImpl();
}
/**
* Builder for the MLLP component.
*/
| MllpComponentBuilderFactory |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/listener/AlterDatabaseEvent.java | {
"start": 1171,
"end": 2329
} | interface ____ extends DatabaseModificationEvent {
CatalogDatabase newDatabase();
boolean ignoreIfNotExists();
static CatalogModificationEvent createEvent(
final CatalogContext context,
final String databaseName,
final CatalogDatabase newDatabase,
final boolean ignoreIfNotExists) {
return new AlterDatabaseEvent() {
@Override
public CatalogDatabase newDatabase() {
return newDatabase;
}
@Override
public boolean ignoreIfNotExists() {
return ignoreIfNotExists;
}
@Nullable
@Override
public CatalogDatabase database() {
throw new IllegalStateException(
"There is no database in AlterDatabaseEvent, use database name instead.");
}
@Override
public String databaseName() {
return databaseName;
}
@Override
public CatalogContext context() {
return context;
}
};
}
}
| AlterDatabaseEvent |
java | apache__flink | flink-python/src/main/java/org/apache/flink/python/util/PythonDependencyUtils.java | {
"start": 6995,
"end": 19186
} | class ____ {
private static final String PYTHON_FILE_PREFIX = "python_file";
private static final String PYTHON_REQUIREMENTS_FILE_PREFIX = "python_requirements_file";
private static final String PYTHON_REQUIREMENTS_CACHE_PREFIX = "python_requirements_cache";
private static final String PYTHON_ARCHIVE_PREFIX = "python_archive";
private final ReadableConfig config;
private PythonDependencyManager(ReadableConfig config) {
Preconditions.checkArgument(config instanceof WritableConfig);
this.config = config;
}
/**
* Adds a Python dependency which could be .py files, Python packages(.zip, .egg etc.) or
* local directories. The dependencies will be added to the PYTHONPATH of the Python UDF
* worker and the local Py4J python client.
*
* @param filePath The path of the Python dependency.
*/
private void addPythonFile(Configuration pythonDependencyConfig, String filePath) {
Preconditions.checkNotNull(filePath);
String fileKey = generateUniqueFileKey(PYTHON_FILE_PREFIX, filePath);
registerCachedFileIfNotExist(fileKey, filePath);
if (!pythonDependencyConfig.contains(PYTHON_FILES_DISTRIBUTED_CACHE_INFO)) {
pythonDependencyConfig.set(
PYTHON_FILES_DISTRIBUTED_CACHE_INFO, new LinkedHashMap<>());
}
pythonDependencyConfig
.get(PYTHON_FILES_DISTRIBUTED_CACHE_INFO)
.put(fileKey, new File(filePath).getName());
}
/**
* Specifies the third-party dependencies via a requirements file. These dependencies will
* be installed by the command "pip install -r [requirements file]" before launching the
* Python UDF worker.
*
* @param requirementsFilePath The path of the requirements file.
*/
private void setPythonRequirements(
Configuration pythonDependencyConfig, String requirementsFilePath) {
setPythonRequirements(pythonDependencyConfig, requirementsFilePath, null);
}
/**
* Specifies the third-party dependencies via a requirements file. The
* `requirementsCachedDir` will be uploaded to support offline installation. These
* dependencies will be installed by the command "pip install -r [requirements file]
* --find-links [requirements cached dir]" before launching the Python UDF worker.
*
* @param requirementsFilePath The path of the requirements file.
* @param requirementsCachedDir The path of the requirements cached directory.
*/
private void setPythonRequirements(
Configuration pythonDependencyConfig,
String requirementsFilePath,
@Nullable String requirementsCachedDir) {
Preconditions.checkNotNull(requirementsFilePath);
if (!pythonDependencyConfig.contains(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO)) {
pythonDependencyConfig.set(
PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO, new HashMap<>());
}
pythonDependencyConfig.get(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO).clear();
removeCachedFilesByPrefix(PYTHON_REQUIREMENTS_FILE_PREFIX);
removeCachedFilesByPrefix(PYTHON_REQUIREMENTS_CACHE_PREFIX);
String fileKey =
generateUniqueFileKey(PYTHON_REQUIREMENTS_FILE_PREFIX, requirementsFilePath);
registerCachedFileIfNotExist(fileKey, requirementsFilePath);
pythonDependencyConfig
.get(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO)
.put(FILE, fileKey);
if (requirementsCachedDir != null) {
String cacheDirKey =
generateUniqueFileKey(
PYTHON_REQUIREMENTS_CACHE_PREFIX, requirementsCachedDir);
registerCachedFileIfNotExist(cacheDirKey, requirementsCachedDir);
pythonDependencyConfig
.get(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO)
.put(CACHE, cacheDirKey);
}
}
/**
* Adds a Python archive file (zip format). The file will be extracted and moved to a
* dedicated directory under the working directory of the Python UDF workers. The param
* `targetDir` is the name of the dedicated directory. The Python UDFs and the config option
* "python.executable" could access the extracted files via relative path.
*
* @param archivePath The path of the archive file.
* @param targetDir The name of the target directory.
*/
private void addPythonArchive(
Configuration pythonDependencyConfig, String archivePath, String targetDir) {
Preconditions.checkNotNull(archivePath);
if (!pythonDependencyConfig.contains(PYTHON_ARCHIVES_DISTRIBUTED_CACHE_INFO)) {
pythonDependencyConfig.set(PYTHON_ARCHIVES_DISTRIBUTED_CACHE_INFO, new HashMap<>());
}
String fileKey =
generateUniqueFileKey(
PYTHON_ARCHIVE_PREFIX, archivePath + PARAM_DELIMITER + targetDir);
registerCachedFileIfNotExist(fileKey, archivePath);
pythonDependencyConfig
.get(PYTHON_ARCHIVES_DISTRIBUTED_CACHE_INFO)
.put(fileKey, targetDir);
}
private void applyToConfiguration(Configuration pythonDependencyConfig) {
config.getOptional(PythonOptions.PYTHON_FILES)
.ifPresent(
pyFiles -> {
for (String filePath : pyFiles.split(FILE_DELIMITER)) {
addPythonFile(pythonDependencyConfig, filePath);
}
});
config.getOptional(PythonOptions.PYTHON_REQUIREMENTS)
.ifPresent(
pyRequirements -> {
if (pyRequirements.contains(PARAM_DELIMITER)) {
String[] requirementFileAndCache =
pyRequirements.split(PARAM_DELIMITER, 2);
setPythonRequirements(
pythonDependencyConfig,
requirementFileAndCache[0],
requirementFileAndCache[1]);
} else {
setPythonRequirements(pythonDependencyConfig, pyRequirements);
}
});
config.getOptional(PythonOptions.PYTHON_ARCHIVES)
.ifPresent(
pyArchives -> {
for (String archive : pyArchives.split(FILE_DELIMITER)) {
String archivePath;
String targetDir;
if (archive.contains(PARAM_DELIMITER)) {
String[] filePathAndTargetDir =
archive.split(PARAM_DELIMITER, 2);
archivePath = filePathAndTargetDir[0];
targetDir =
new File(archivePath).getName()
+ PARAM_DELIMITER
+ filePathAndTargetDir[1];
} else {
archivePath = archive;
targetDir = new File(archivePath).getName();
}
addPythonArchive(
pythonDependencyConfig, archivePath, targetDir);
}
});
config.getOptional(PYTHON_EXECUTABLE)
.ifPresent(e -> pythonDependencyConfig.set(PYTHON_EXECUTABLE, e));
config.getOptional(PYTHON_CLIENT_EXECUTABLE)
.ifPresent(e -> pythonDependencyConfig.set(PYTHON_CLIENT_EXECUTABLE, e));
config.getOptional(PythonOptions.PYTHON_PATH)
.ifPresent(
pyPath ->
pythonDependencyConfig.set(PythonOptions.PYTHON_PATH, pyPath));
}
private String generateUniqueFileKey(String prefix, String hashString) {
MessageDigest messageDigest;
try {
messageDigest = MessageDigest.getInstance(HASH_ALGORITHM);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
messageDigest.update(hashString.getBytes(StandardCharsets.UTF_8));
return String.format(
"%s_%s", prefix, StringUtils.byteToHexString(messageDigest.digest()));
}
private void registerCachedFileIfNotExist(String name, String path) {
final List<Tuple2<String, String>> cachedFilePairs =
config
.getOptional(PipelineOptions.CACHED_FILES)
.orElse(new ArrayList<>())
.stream()
.map(
m ->
Tuple2.of(
ConfigurationUtils.parseStringToMap(m)
.get("name"),
m))
.collect(Collectors.toList());
final Set<String> cachedFileNames =
cachedFilePairs.stream()
.map(f -> (String) f.getField(0))
.collect(Collectors.toSet());
if (cachedFileNames.contains(name)) {
return;
}
final List<String> cachedFiles =
cachedFilePairs.stream()
.map(f -> (String) f.getField(1))
.collect(Collectors.toList());
Map<String, String> map = new HashMap<>();
map.put("name", name);
map.put("path", path);
cachedFiles.add(ConfigurationUtils.convertValue(map, String.class));
((WritableConfig) config).set(PipelineOptions.CACHED_FILES, cachedFiles);
}
private void removeCachedFilesByPrefix(String prefix) {
final List<String> cachedFiles =
config
.getOptional(PipelineOptions.CACHED_FILES)
.orElse(new ArrayList<>())
.stream()
.map(m -> Tuple2.of(ConfigurationUtils.parseStringToMap(m), m))
.filter(
t ->
t.f0.get("name") != null
&& !(t.f0.get("name")
.matches(
"^"
+ prefix
+ "_[a-z0-9]{64}$")))
.map(t -> t.f1)
.collect(Collectors.toList());
((WritableConfig) config)
.set(PipelineOptions.CACHED_FILES, new ArrayList<>(cachedFiles));
}
}
}
| PythonDependencyManager |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/android/testdata/stubs/android/graphics/Rect.java | {
"start": 642,
"end": 831
} | class ____ {
public boolean intersect(int x, int y, int x2, int y2) {
return false;
}
public boolean intersect(Rect other) {
return false;
}
public void setEmpty() {}
}
| Rect |
java | spring-projects__spring-boot | module/spring-boot-webmvc/src/test/java/org/springframework/boot/webmvc/autoconfigure/error/DefaultErrorViewIntegrationTests.java | {
"start": 3645,
"end": 3800
} | class ____ {
// For manual testing
static void main(String[] args) {
SpringApplication.run(TestConfiguration.class, args);
}
}
}
| TestConfiguration |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptivebatch/SpeculativeExecutionHandler.java | {
"start": 1434,
"end": 3146
} | interface ____ {
/** Initial speculative execution handler. */
void init(
ExecutionGraph executionGraph,
ComponentMainThreadExecutor mainThreadExecutor,
MetricGroup metricGroup);
/** Stops the slow task detector. */
void stopSlowTaskDetector();
/**
* Notifies that a task has finished its execution.
*
* @param execution the execution that has finished
* @param cancelPendingExecutionsFunction the function to cancel pending executions
*/
void notifyTaskFinished(
Execution execution,
Function<ExecutionVertexID, CompletableFuture<?>> cancelPendingExecutionsFunction);
/**
* Notifies that a task has failed its execution.
*
* @param execution the execution that has failed
*/
void notifyTaskFailed(Execution execution);
/**
* Handles a task failure.
*
* @param failedExecution the execution that failed
* @param error the error that caused the failure, if available
* @param handleLocalExecutionAttemptFailure a consumer that handles local execution attempt
* failure
* @return true if the failure was handled as a local failure, false otherwise
*/
boolean handleTaskFailure(
Execution failedExecution,
@Nullable Throwable error,
BiConsumer<Execution, Throwable> handleLocalExecutionAttemptFailure);
/**
* Resets the state of the component for a new execution of a specific execution vertex.
*
* @param executionVertexId the ID of the execution vertex to reset
*/
void resetForNewExecution(ExecutionVertexID executionVertexId);
}
| SpeculativeExecutionHandler |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyMessage.java | {
"start": 20784,
"end": 23330
} | class ____ extends NettyMessage {
static final byte ID = 1;
final Throwable cause;
@Nullable final InputChannelID receiverId;
ErrorResponse(Throwable cause) {
this.cause = checkNotNull(cause);
this.receiverId = null;
}
ErrorResponse(Throwable cause, InputChannelID receiverId) {
this.cause = checkNotNull(cause);
this.receiverId = receiverId;
}
boolean isFatalError() {
return receiverId == null;
}
@Override
void write(ChannelOutboundInvoker out, ChannelPromise promise, ByteBufAllocator allocator)
throws IOException {
final ByteBuf result = allocateBuffer(allocator, ID);
try (ObjectOutputStream oos = new ObjectOutputStream(new ByteBufOutputStream(result))) {
oos.writeObject(cause);
if (receiverId != null) {
result.writeBoolean(true);
receiverId.writeTo(result);
} else {
result.writeBoolean(false);
}
// Update frame length...
result.setInt(0, result.readableBytes());
out.write(result, promise);
} catch (Throwable t) {
handleException(result, null, t);
}
}
static ErrorResponse readFrom(ByteBuf buffer) throws Exception {
try (ObjectInputStream ois = new ObjectInputStream(new ByteBufInputStream(buffer))) {
Object obj = ois.readObject();
if (!(obj instanceof Throwable)) {
throw new ClassCastException(
"Read object expected to be of type Throwable, "
+ "actual type is "
+ obj.getClass()
+ ".");
} else {
if (buffer.readBoolean()) {
InputChannelID receiverId = InputChannelID.fromByteBuf(buffer);
return new ErrorResponse((Throwable) obj, receiverId);
} else {
return new ErrorResponse((Throwable) obj);
}
}
}
}
}
// ------------------------------------------------------------------------
// Client requests
// ------------------------------------------------------------------------
static | ErrorResponse |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java | {
"start": 1955,
"end": 21376
} | class ____ {
private File testDir;
private File testFile;
private Configuration conf;
private FileContext localFs;
@BeforeEach
public void setupForTests() throws IOException {
conf = new Configuration();
localFs = FileContext.getLocalFSFileContext(conf);
testDir = Files.createTempDirectory(TestDirectoryCollection.class.getName()).toFile();
testFile = new File(testDir, "testfile");
testFile.createNewFile();
}
@AfterEach
public void teardown() {
FileUtil.fullyDelete(testDir);
}
@Test
public void testConcurrentAccess() throws IOException {
// Initialize DirectoryCollection with a file instead of a directory
String[] dirs = {testFile.getPath()};
DirectoryCollection dc =
new DirectoryCollection(dirs, conf.getFloat(
YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
// Create an iterator before checkDirs is called to reliable test case
List<String> list = dc.getGoodDirs();
ListIterator<String> li = list.listIterator();
// DiskErrorException will invalidate iterator of non-concurrent
// collections. ConcurrentModificationException will be thrown upon next
// use of the iterator.
assertTrue(dc.checkDirs(),
"checkDirs did not remove test file from directory list");
// Verify no ConcurrentModification is thrown
li.next();
}
@Test
public void testCreateDirectories() throws IOException {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
String dirA = new File(testDir, "dirA").getPath();
String dirB = new File(dirA, "dirB").getPath();
String dirC = new File(testDir, "dirC").getPath();
Path pathC = new Path(dirC);
FsPermission permDirC = new FsPermission((short)0710);
localFs.mkdir(pathC, null, true);
localFs.setPermission(pathC, permDirC);
String[] dirs = { dirA, dirB, dirC };
DirectoryCollection dc =
new DirectoryCollection(dirs, conf.getFloat(
YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
FsPermission defaultPerm = FsPermission.getDefault()
.applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK));
boolean createResult = dc.createNonExistentDirs(localFs, defaultPerm);
assertTrue(createResult);
FileStatus status = localFs.getFileStatus(new Path(dirA));
assertEquals(defaultPerm, status.getPermission(),
"local dir parent not created with proper permissions");
status = localFs.getFileStatus(new Path(dirB));
assertEquals(defaultPerm, status.getPermission(),
"local dir not created with proper permissions");
status = localFs.getFileStatus(pathC);
assertEquals(permDirC, status.getPermission(),
"existing local directory permissions modified");
}
@Test
public void testDiskSpaceUtilizationLimit() throws IOException {
String dirA = new File(testDir, "dirA").getPath();
String[] dirs = { dirA };
DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F);
dc.checkDirs();
assertEquals(0, dc.getGoodDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertEquals(1, dc.getFailedDirs().size());
assertEquals(1, dc.getFullDirs().size());
assertNotNull(dc.getDirectoryErrorInfo(dirA));
assertEquals(DirectoryCollection.DiskErrorCause.DISK_FULL,
dc.getDirectoryErrorInfo(dirA).cause);
// no good dirs
assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage());
dc = new DirectoryCollection(dirs, 100.0F);
int utilizedSpacePerc =
(int) ((testDir.getTotalSpace() - testDir.getUsableSpace()) * 100 /
testDir.getTotalSpace());
dc.checkDirs();
assertEquals(1, dc.getGoodDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertEquals(0, dc.getFailedDirs().size());
assertEquals(0, dc.getFullDirs().size());
assertNull(dc.getDirectoryErrorInfo(dirA));
assertEquals(utilizedSpacePerc,
dc.getGoodDirsDiskUtilizationPercentage());
dc = new DirectoryCollection(dirs, testDir.getTotalSpace() / (1024 * 1024));
dc.checkDirs();
assertEquals(0, dc.getGoodDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertEquals(1, dc.getFailedDirs().size());
assertEquals(1, dc.getFullDirs().size());
assertNotNull(dc.getDirectoryErrorInfo(dirA));
// no good dirs
assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage());
dc = new DirectoryCollection(dirs, 100.0F, 100.0F, 0);
utilizedSpacePerc =
(int)((testDir.getTotalSpace() - testDir.getUsableSpace()) * 100 /
testDir.getTotalSpace());
dc.checkDirs();
assertEquals(1, dc.getGoodDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertEquals(0, dc.getFailedDirs().size());
assertEquals(0, dc.getFullDirs().size());
assertNull(dc.getDirectoryErrorInfo(dirA));
assertEquals(utilizedSpacePerc,
dc.getGoodDirsDiskUtilizationPercentage());
}
@Test
public void testDiskSpaceUtilizationThresholdEnabled() throws IOException {
String dirA = new File(testDir, "dirA").getPath();
String[] dirs = {dirA};
DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F);
// Disable disk utilization threshold.
dc.setDiskUtilizationThresholdEnabled(false);
assertFalse(dc.getDiskUtilizationThresholdEnabled());
dc.checkDirs();
assertEquals(1, dc.getGoodDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertEquals(0, dc.getFailedDirs().size());
assertEquals(0, dc.getFullDirs().size());
assertNull(dc.getDirectoryErrorInfo(dirA));
// Enable disk utilization threshold.
dc.setDiskUtilizationThresholdEnabled(true);
assertTrue(dc.getDiskUtilizationThresholdEnabled());
dc.checkDirs();
assertEquals(0, dc.getGoodDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertEquals(1, dc.getFailedDirs().size());
assertEquals(1, dc.getFullDirs().size());
assertNotNull(dc.getDirectoryErrorInfo(dirA));
assertEquals(DirectoryCollection.DiskErrorCause.DISK_FULL,
dc.getDirectoryErrorInfo(dirA).cause);
// no good dirs
assertEquals(0,
dc.getGoodDirsDiskUtilizationPercentage());
dc = new DirectoryCollection(dirs, 100.0F);
int utilizedSpacePerc =
(int) ((testDir.getTotalSpace() - testDir.getUsableSpace()) * 100 /
testDir.getTotalSpace());
dc.checkDirs();
assertEquals(1, dc.getGoodDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertEquals(0, dc.getFailedDirs().size());
assertEquals(0, dc.getFullDirs().size());
assertNull(dc.getDirectoryErrorInfo(dirA));
assertEquals(utilizedSpacePerc,
dc.getGoodDirsDiskUtilizationPercentage());
dc = new DirectoryCollection(dirs,
testDir.getTotalSpace() / (1024 * 1024));
// Disable disk utilization threshold.
dc.setDiskUtilizationThresholdEnabled(false);
assertFalse(dc.getDiskUtilizationThresholdEnabled());
// Disable disk free space threshold.
dc.setDiskFreeSpaceThresholdEnabled(false);
assertFalse(dc.getDiskFreeSpaceThresholdEnabled());
dc.checkDirs();
assertEquals(1, dc.getGoodDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertEquals(0, dc.getFailedDirs().size());
assertEquals(0, dc.getFullDirs().size());
assertNull(dc.getDirectoryErrorInfo(dirA));
dc = new DirectoryCollection(dirs,
testDir.getTotalSpace() / (1024 * 1024));
// Enable disk free space threshold.
dc.setDiskFreeSpaceThresholdEnabled(true);
assertTrue(dc.getDiskFreeSpaceThresholdEnabled());
dc.checkDirs();
assertEquals(0, dc.getGoodDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertEquals(1, dc.getFailedDirs().size());
assertEquals(1, dc.getFullDirs().size());
assertNotNull(dc.getDirectoryErrorInfo(dirA));
// no good dirs
assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage());
dc = new DirectoryCollection(dirs, 100.0F, 100.0F, 0);
utilizedSpacePerc =
(int)((testDir.getTotalSpace() - testDir.getUsableSpace()) * 100 /
testDir.getTotalSpace());
dc.checkDirs();
assertEquals(1, dc.getGoodDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertEquals(0, dc.getFailedDirs().size());
assertEquals(0, dc.getFullDirs().size());
assertNull(dc.getDirectoryErrorInfo(dirA));
assertEquals(utilizedSpacePerc,
dc.getGoodDirsDiskUtilizationPercentage());
}
@Test
public void testDiskLimitsCutoffSetters() throws IOException {
String[] dirs = { "dir" };
DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F, 0.0F, 100);
float testValue = 57.5F;
float delta = 0.1F;
dc.setDiskUtilizationPercentageCutoff(testValue, 50.0F);
assertEquals(testValue, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(50.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
testValue = -57.5F;
dc.setDiskUtilizationPercentageCutoff(testValue, testValue);
assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
testValue = 157.5F;
dc.setDiskUtilizationPercentageCutoff(testValue, testValue);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
long lowSpaceValue = 57;
dc.setDiskUtilizationSpaceCutoff(lowSpaceValue);
assertEquals(lowSpaceValue, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(lowSpaceValue, dc.getDiskUtilizationSpaceCutoffHigh());
long highSpaceValue = 73;
dc.setDiskUtilizationSpaceCutoff(lowSpaceValue, highSpaceValue);
assertEquals(lowSpaceValue, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(highSpaceValue, dc.getDiskUtilizationSpaceCutoffHigh());
lowSpaceValue = -57;
dc.setDiskUtilizationSpaceCutoff(lowSpaceValue);
assertEquals(0, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(0, dc.getDiskUtilizationSpaceCutoffHigh());
dc.setDiskUtilizationSpaceCutoff(lowSpaceValue, highSpaceValue);
assertEquals(0, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(highSpaceValue, dc.getDiskUtilizationSpaceCutoffHigh());
highSpaceValue = -10;
dc.setDiskUtilizationSpaceCutoff(lowSpaceValue, highSpaceValue);
assertEquals(0, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(0, dc.getDiskUtilizationSpaceCutoffHigh());
lowSpaceValue = 33;
dc.setDiskUtilizationSpaceCutoff(lowSpaceValue, highSpaceValue);
assertEquals(lowSpaceValue, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(lowSpaceValue, dc.getDiskUtilizationSpaceCutoffHigh());
}
@Test
public void testFailedDisksBecomingGoodAgain() throws Exception {
String dirA = new File(testDir, "dirA").getPath();
String[] dirs = { dirA };
DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F);
dc.checkDirs();
assertEquals(0, dc.getGoodDirs().size());
assertEquals(1, dc.getFailedDirs().size());
assertEquals(1, dc.getFullDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertNotNull(dc.getDirectoryErrorInfo(dirA));
assertEquals(DirectoryCollection.DiskErrorCause.DISK_FULL,
dc.getDirectoryErrorInfo(dirA).cause);
dc.setDiskUtilizationPercentageCutoff(100.0F, 100.0F);
dc.checkDirs();
assertEquals(1, dc.getGoodDirs().size());
assertEquals(0, dc.getFailedDirs().size());
assertEquals(0, dc.getFullDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertNull(dc.getDirectoryErrorInfo(dirA));
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
String dirB = new File(testDir, "dirB").getPath();
Path pathB = new Path(dirB);
FsPermission permDirB = new FsPermission((short) 0400);
localFs.mkdir(pathB, null, true);
localFs.setPermission(pathB, permDirB);
String[] dirs2 = { dirB };
dc = new DirectoryCollection(dirs2, 100.0F);
dc.checkDirs();
assertEquals(0, dc.getGoodDirs().size());
assertEquals(1, dc.getFailedDirs().size());
assertEquals(0, dc.getFullDirs().size());
assertEquals(1, dc.getErroredDirs().size());
assertNotNull(dc.getDirectoryErrorInfo(dirB));
assertEquals(DirectoryCollection.DiskErrorCause.OTHER, dc.getDirectoryErrorInfo(dirB).cause);
permDirB = new FsPermission((short) 0700);
localFs.setPermission(pathB, permDirB);
dc.checkDirs();
assertEquals(1, dc.getGoodDirs().size());
assertEquals(0, dc.getFailedDirs().size());
assertEquals(0, dc.getFullDirs().size());
assertEquals(0, dc.getErroredDirs().size());
assertNull(dc.getDirectoryErrorInfo(dirA));
}
@Test
public void testConstructors() {
String[] dirs = { "dir" };
float delta = 0.1F;
DirectoryCollection dc = new DirectoryCollection(dirs);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(0, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(0, dc.getDiskUtilizationSpaceCutoffHigh());
dc = new DirectoryCollection(dirs, 57.5F);
assertEquals(57.5F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(57.5F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(0, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(0, dc.getDiskUtilizationSpaceCutoffHigh());
dc = new DirectoryCollection(dirs, 57);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(57, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(57, dc.getDiskUtilizationSpaceCutoffHigh());
dc = new DirectoryCollection(dirs, 57, 73);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(57, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(73, dc.getDiskUtilizationSpaceCutoffHigh());
dc = new DirectoryCollection(dirs, 57, 33);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(57, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(57, dc.getDiskUtilizationSpaceCutoffHigh());
dc = new DirectoryCollection(dirs, 57, -33);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(57, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(57, dc.getDiskUtilizationSpaceCutoffHigh());
dc = new DirectoryCollection(dirs, -57, -33);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(0, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(0, dc.getDiskUtilizationSpaceCutoffHigh());
dc = new DirectoryCollection(dirs, -57, 33);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(0, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(33, dc.getDiskUtilizationSpaceCutoffHigh());
dc = new DirectoryCollection(dirs, 57.5F, 50.5F, 67);
assertEquals(57.5F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(50.5F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(67, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(67, dc.getDiskUtilizationSpaceCutoffHigh());
dc = new DirectoryCollection(dirs, -57.5F, -57.5F, -67);
assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(0, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(0, dc.getDiskUtilizationSpaceCutoffHigh());
dc = new DirectoryCollection(dirs, 157.5F, 157.5F, -67);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(0, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(0, dc.getDiskUtilizationSpaceCutoffHigh());
dc = new DirectoryCollection(dirs, 157.5F, 157.5F, 5, 10);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
delta);
assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
delta);
assertEquals(5, dc.getDiskUtilizationSpaceCutoffLow());
assertEquals(10, dc.getDiskUtilizationSpaceCutoffHigh());
}
@Test
public void testDirsChangeListener() {
DirsChangeListenerTest listener1 = new DirsChangeListenerTest();
DirsChangeListenerTest listener2 = new DirsChangeListenerTest();
DirsChangeListenerTest listener3 = new DirsChangeListenerTest();
String dirA = new File(testDir, "dirA").getPath();
String[] dirs = { dirA };
DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F);
assertEquals(1, dc.getGoodDirs().size());
assertEquals(listener1.num, 0);
assertEquals(listener2.num, 0);
assertEquals(listener3.num, 0);
dc.registerDirsChangeListener(listener1);
dc.registerDirsChangeListener(listener2);
dc.registerDirsChangeListener(listener3);
assertEquals(listener1.num, 1);
assertEquals(listener2.num, 1);
assertEquals(listener3.num, 1);
dc.deregisterDirsChangeListener(listener3);
dc.checkDirs();
assertEquals(0, dc.getGoodDirs().size());
assertEquals(listener1.num, 2);
assertEquals(listener2.num, 2);
assertEquals(listener3.num, 1);
dc.deregisterDirsChangeListener(listener2);
dc.setDiskUtilizationPercentageCutoff(100.0F, 100.0F);
dc.checkDirs();
assertEquals(1, dc.getGoodDirs().size());
assertEquals(listener1.num, 3);
assertEquals(listener2.num, 2);
assertEquals(listener3.num, 1);
}
@Test
public void testNonAccessibleSub() throws IOException {
Files.setPosixFilePermissions(testDir.toPath(),
PosixFilePermissions.fromString("rwx------"));
Files.setPosixFilePermissions(testFile.toPath(),
PosixFilePermissions.fromString("-w--w--w-"));
DirectoryCollection dc = new DirectoryCollection(new String[]{testDir.toString()});
dc.setSubAccessibilityValidationEnabled(true);
Map<String, DirectoryCollection.DiskErrorInformation> diskErrorInformationMap =
dc.testDirs(Collections.singletonList(testDir.toString()), Collections.emptySet());
assertEquals(1, diskErrorInformationMap.size());
assertTrue(diskErrorInformationMap.values().iterator().next()
.message.contains(testFile.getName()));
}
static | TestDirectoryCollection |
java | spring-projects__spring-framework | spring-context/src/testFixtures/java/org/springframework/context/testfixture/SimpleMapScope.java | {
"start": 987,
"end": 1902
} | class ____ implements Scope, Serializable {
private final Map<String, Object> map = new HashMap<>();
private final List<Runnable> callbacks = new ArrayList<>();
public SimpleMapScope() {
}
public final Map<String, Object> getMap() {
return this.map;
}
@Override
public Object get(String name, ObjectFactory<?> objectFactory) {
synchronized (this.map) {
Object scopedObject = this.map.get(name);
if (scopedObject == null) {
scopedObject = objectFactory.getObject();
this.map.put(name, scopedObject);
}
return scopedObject;
}
}
@Override
public Object remove(String name) {
synchronized (this.map) {
return this.map.remove(name);
}
}
@Override
public void registerDestructionCallback(String name, Runnable callback) {
this.callbacks.add(callback);
}
public void close() {
for (Runnable runnable : this.callbacks) {
runnable.run();
}
}
}
| SimpleMapScope |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java | {
"start": 1389,
"end": 2611
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(PUT, "/_snapshot/{repository}/{snapshot}/_clone/{target_snapshot}"));
}
@Override
public String getName() {
return "clone_snapshot_action";
}
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
try (var parser = request.contentParser()) {
final Map<String, Object> source = parser.map();
final CloneSnapshotRequest cloneSnapshotRequest = new CloneSnapshotRequest(
getMasterNodeTimeout(request),
request.param("repository"),
request.param("snapshot"),
request.param("target_snapshot"),
XContentMapValues.nodeStringArrayValue(source.getOrDefault("indices", Collections.emptyList()))
);
cloneSnapshotRequest.indicesOptions(IndicesOptions.fromMap(source, cloneSnapshotRequest.indicesOptions()));
return channel -> client.admin().cluster().cloneSnapshot(cloneSnapshotRequest, new RestToXContentListener<>(channel));
}
}
}
| RestCloneSnapshotAction |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java | {
"start": 832,
"end": 1126
} | class ____<T> implements NamedWriteable, ToXContentFragment {
/*
* Describes the type of condition - a min_* condition (MIN), max_* condition (MAX), or an automatic condition (automatic conditions
* are something that the platform configures and manages)
*/
public | Condition |
java | apache__rocketmq | store/src/main/java/org/apache/rocketmq/store/index/IndexFile.java | {
"start": 1246,
"end": 10492
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
private static int hashSlotSize = 4;
/**
* Each index's store unit. Format:
* <pre>
* ┌───────────────┬───────────────────────────────┬───────────────┬───────────────┐
* │ Key HashCode │ Physical Offset │ Time Diff │ Next Index Pos│
* │ (4 Bytes) │ (8 Bytes) │ (4 Bytes) │ (4 Bytes) │
* ├───────────────┴───────────────────────────────┴───────────────┴───────────────┤
* │ Index Store Unit │
* │ │
* </pre>
* Each index's store unit. Size:
* Key HashCode(4) + Physical Offset(8) + Time Diff(4) + Next Index Pos(4) = 20 Bytes
*/
private static int indexSize = 20;
private static int invalidIndex = 0;
private final int hashSlotNum;
private final int indexNum;
private final int fileTotalSize;
private final MappedFile mappedFile;
private final MappedByteBuffer mappedByteBuffer;
private final IndexHeader indexHeader;
public IndexFile(final String fileName, final int hashSlotNum, final int indexNum,
final long endPhyOffset, final long endTimestamp) throws IOException {
this.fileTotalSize =
IndexHeader.INDEX_HEADER_SIZE + (hashSlotNum * hashSlotSize) + (indexNum * indexSize);
this.mappedFile = new DefaultMappedFile(fileName, fileTotalSize);
this.mappedByteBuffer = this.mappedFile.getMappedByteBuffer();
this.hashSlotNum = hashSlotNum;
this.indexNum = indexNum;
ByteBuffer byteBuffer = this.mappedByteBuffer.slice();
this.indexHeader = new IndexHeader(byteBuffer);
if (endPhyOffset > 0) {
this.indexHeader.setBeginPhyOffset(endPhyOffset);
this.indexHeader.setEndPhyOffset(endPhyOffset);
}
if (endTimestamp > 0) {
this.indexHeader.setBeginTimestamp(endTimestamp);
this.indexHeader.setEndTimestamp(endTimestamp);
}
}
public String getFileName() {
return this.mappedFile.getFileName();
}
public int getFileSize() {
return this.fileTotalSize;
}
public void load() {
this.indexHeader.load();
}
public void shutdown() {
try {
this.flush();
} catch (Throwable e) {
log.error("flush error when shutdown", e);
}
mappedFile.cleanResources();
}
public void flush() {
long beginTime = System.currentTimeMillis();
if (this.mappedFile.hold()) {
this.indexHeader.updateByteBuffer();
this.mappedByteBuffer.force();
this.mappedFile.release();
log.info("flush index file elapsed time(ms) " + (System.currentTimeMillis() - beginTime));
}
}
public boolean isWriteFull() {
return this.indexHeader.getIndexCount() >= this.indexNum;
}
public boolean destroy(final long intervalForcibly) {
return this.mappedFile.destroy(intervalForcibly);
}
public boolean putKey(final String key, final long phyOffset, final long storeTimestamp) {
if (this.indexHeader.getIndexCount() < this.indexNum) {
int keyHash = indexKeyHashMethod(key);
int slotPos = keyHash % this.hashSlotNum;
int absSlotPos = IndexHeader.INDEX_HEADER_SIZE + slotPos * hashSlotSize;
try {
int slotValue = this.mappedByteBuffer.getInt(absSlotPos);
if (slotValue <= invalidIndex || slotValue > this.indexHeader.getIndexCount()) {
slotValue = invalidIndex;
}
long timeDiff = storeTimestamp - this.indexHeader.getBeginTimestamp();
timeDiff = timeDiff / 1000;
if (this.indexHeader.getBeginTimestamp() <= 0) {
timeDiff = 0;
} else if (timeDiff > Integer.MAX_VALUE) {
timeDiff = Integer.MAX_VALUE;
} else if (timeDiff < 0) {
timeDiff = 0;
}
int absIndexPos =
IndexHeader.INDEX_HEADER_SIZE + this.hashSlotNum * hashSlotSize
+ this.indexHeader.getIndexCount() * indexSize;
this.mappedByteBuffer.putInt(absIndexPos, keyHash);
this.mappedByteBuffer.putLong(absIndexPos + 4, phyOffset);
this.mappedByteBuffer.putInt(absIndexPos + 4 + 8, (int) timeDiff);
this.mappedByteBuffer.putInt(absIndexPos + 4 + 8 + 4, slotValue);
this.mappedByteBuffer.putInt(absSlotPos, this.indexHeader.getIndexCount());
if (this.indexHeader.getIndexCount() <= 1) {
this.indexHeader.setBeginPhyOffset(phyOffset);
this.indexHeader.setBeginTimestamp(storeTimestamp);
}
if (invalidIndex == slotValue) {
this.indexHeader.incHashSlotCount();
}
this.indexHeader.incIndexCount();
this.indexHeader.setEndPhyOffset(phyOffset);
this.indexHeader.setEndTimestamp(storeTimestamp);
return true;
} catch (Exception e) {
log.error("putKey exception, Key: " + key + " KeyHashCode: " + key.hashCode(), e);
}
} else {
log.warn("Over index file capacity: index count = " + this.indexHeader.getIndexCount()
+ "; index max num = " + this.indexNum);
}
return false;
}
public int indexKeyHashMethod(final String key) {
int keyHash = key.hashCode();
int keyHashPositive = Math.abs(keyHash);
if (keyHashPositive < 0) {
keyHashPositive = 0;
}
return keyHashPositive;
}
public long getBeginTimestamp() {
return this.indexHeader.getBeginTimestamp();
}
public long getEndTimestamp() {
return this.indexHeader.getEndTimestamp();
}
public long getEndPhyOffset() {
return this.indexHeader.getEndPhyOffset();
}
public boolean isTimeMatched(final long begin, final long end) {
boolean result = begin < this.indexHeader.getBeginTimestamp() && end > this.indexHeader.getEndTimestamp();
result = result || begin >= this.indexHeader.getBeginTimestamp() && begin <= this.indexHeader.getEndTimestamp();
result = result || end >= this.indexHeader.getBeginTimestamp() && end <= this.indexHeader.getEndTimestamp();
return result;
}
public void selectPhyOffset(final List<Long> phyOffsets, final String key, final int maxNum,
final long begin, final long end) {
if (this.mappedFile.hold()) {
int keyHash = indexKeyHashMethod(key);
int slotPos = keyHash % this.hashSlotNum;
int absSlotPos = IndexHeader.INDEX_HEADER_SIZE + slotPos * hashSlotSize;
try {
int slotValue = this.mappedByteBuffer.getInt(absSlotPos);
if (slotValue <= invalidIndex || slotValue > this.indexHeader.getIndexCount()
|| this.indexHeader.getIndexCount() <= 1) {
} else {
for (int nextIndexToRead = slotValue; ; ) {
if (phyOffsets.size() >= maxNum) {
break;
}
int absIndexPos =
IndexHeader.INDEX_HEADER_SIZE + this.hashSlotNum * hashSlotSize
+ nextIndexToRead * indexSize;
int keyHashRead = this.mappedByteBuffer.getInt(absIndexPos);
long phyOffsetRead = this.mappedByteBuffer.getLong(absIndexPos + 4);
long timeDiff = this.mappedByteBuffer.getInt(absIndexPos + 4 + 8);
int prevIndexRead = this.mappedByteBuffer.getInt(absIndexPos + 4 + 8 + 4);
if (timeDiff < 0) {
break;
}
timeDiff *= 1000L;
long timeRead = this.indexHeader.getBeginTimestamp() + timeDiff;
boolean timeMatched = timeRead >= begin && timeRead <= end;
if (keyHash == keyHashRead && timeMatched) {
phyOffsets.add(phyOffsetRead);
}
if (prevIndexRead <= invalidIndex
|| prevIndexRead > this.indexHeader.getIndexCount()
|| prevIndexRead == nextIndexToRead || timeRead < begin) {
break;
}
nextIndexToRead = prevIndexRead;
}
}
} catch (Exception e) {
log.error("selectPhyOffset exception ", e);
} finally {
this.mappedFile.release();
}
}
}
}
| IndexFile |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/ast/expr/SQLUnaryOperator.java | {
"start": 664,
"end": 1020
} | enum ____ {
Plus("+"),
Negative("-"),
Not("!"),
Compl("~"),
Prior("PRIOR"),
ConnectByRoot("CONNECT BY"),
BINARY("BINARY"),
RAW("RAW"),
NOT("NOT"),
// Number of points in path or polygon
Pound("#");
public final String name;
SQLUnaryOperator(String name) {
this.name = name;
}
}
| SQLUnaryOperator |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/MatrixVariableMethodArgumentResolver.java | {
"start": 2034,
"end": 4928
} | class ____ extends AbstractNamedValueMethodArgumentResolver {
public MatrixVariableMethodArgumentResolver() {
super(null);
}
@Override
public boolean supportsParameter(MethodParameter parameter) {
if (!parameter.hasParameterAnnotation(MatrixVariable.class)) {
return false;
}
if (Map.class.isAssignableFrom(parameter.nestedIfOptional().getNestedParameterType())) {
MatrixVariable matrixVariable = parameter.getParameterAnnotation(MatrixVariable.class);
return (matrixVariable != null && StringUtils.hasText(matrixVariable.name()));
}
return true;
}
@Override
protected NamedValueInfo createNamedValueInfo(MethodParameter parameter) {
MatrixVariable ann = parameter.getParameterAnnotation(MatrixVariable.class);
Assert.state(ann != null, "No MatrixVariable annotation");
return new MatrixVariableNamedValueInfo(ann);
}
@Override
@SuppressWarnings("unchecked")
protected @Nullable Object resolveName(String name, MethodParameter parameter, NativeWebRequest request) throws Exception {
Map<String, MultiValueMap<String, String>> pathParameters = (Map<String, MultiValueMap<String, String>>)
request.getAttribute(HandlerMapping.MATRIX_VARIABLES_ATTRIBUTE, RequestAttributes.SCOPE_REQUEST);
if (CollectionUtils.isEmpty(pathParameters)) {
return null;
}
MatrixVariable ann = parameter.getParameterAnnotation(MatrixVariable.class);
Assert.state(ann != null, "No MatrixVariable annotation");
String pathVar = ann.pathVar();
List<String> paramValues = null;
if (!pathVar.equals(ValueConstants.DEFAULT_NONE)) {
if (pathParameters.containsKey(pathVar)) {
paramValues = pathParameters.get(pathVar).get(name);
}
}
else {
boolean found = false;
paramValues = new ArrayList<>();
for (MultiValueMap<String, String> params : pathParameters.values()) {
if (params.containsKey(name)) {
if (found) {
String paramType = parameter.getNestedParameterType().getName();
throw new ServletRequestBindingException(
"Found more than one match for URI path parameter '" + name +
"' for parameter type [" + paramType + "]. Use 'pathVar' attribute to disambiguate.");
}
paramValues.addAll(params.get(name));
found = true;
}
}
}
if (CollectionUtils.isEmpty(paramValues)) {
return null;
}
else if (paramValues.size() == 1) {
return paramValues.get(0);
}
else {
return paramValues;
}
}
@Override
protected void handleMissingValue(String name, MethodParameter parameter) throws ServletRequestBindingException {
throw new MissingMatrixVariableException(name, parameter);
}
@Override
protected void handleMissingValueAfterConversion(
String name, MethodParameter parameter, NativeWebRequest request) throws Exception {
throw new MissingMatrixVariableException(name, parameter, true);
}
private static final | MatrixVariableMethodArgumentResolver |
java | quarkusio__quarkus | test-framework/junit5/src/test/java/io/quarkus/test/junit/TestResourceUtilTest.java | {
"start": 345,
"end": 1638
} | class ____ {
// Basic sense check, since most of the heavy lifting is done by TestResourceManager#getReloadGroupIdentifier
@Test
public void testReloadGroupIdentifierIsEqualForTestsWithNoResources() {
String identifier1 = TestResourceUtil.getReloadGroupIdentifier(TestClass.class, ProfileClass.class);
String identifier2 = TestResourceUtil.getReloadGroupIdentifier(TestClass.class, AnotherProfileClass.class);
assertEquals(identifier2, identifier1);
}
@Test
public void testReloadGroupIdentifierIsEqualForTestsWithIdenticalResources() {
String identifier1 = TestResourceUtil.getReloadGroupIdentifier(TestClass.class, ProfileClassWithResources.class);
String identifier2 = TestResourceUtil.getReloadGroupIdentifier(TestClass.class, AnotherProfileClassWithResources.class);
assertEquals(identifier2, identifier1);
}
@Test
public void testReloadGroupIdentifierIsEqualForTestsWithDifferentResources() {
String identifier1 = TestResourceUtil.getReloadGroupIdentifier(TestClass.class, ProfileClassWithResources.class);
String identifier2 = TestResourceUtil.getReloadGroupIdentifier(TestClass.class, ProfileClass.class);
assertNotEquals(identifier2, identifier1);
}
}
| TestResourceUtilTest |
java | quarkusio__quarkus | extensions/cache/deployment/src/test/java/io/quarkus/cache/test/runtime/CacheKeyGeneratorTest.java | {
"start": 4924,
"end": 5448
} | class ____ implements CacheKeyGenerator {
static final AtomicBoolean DESTROYED = new AtomicBoolean();
@ConfigProperty(name = "cache-key-element")
String cacheKeyElement;
@Override
public Object generate(Method method, Object... methodParams) {
return new CompositeCacheKey(cacheKeyElement, methodParams[0]);
}
@PreDestroy
void preDestroy() {
DESTROYED.set(true);
}
}
@ApplicationScoped
public static | SingletonKeyGen |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeAccumulator.java | {
"start": 593,
"end": 713
} | interface ____ allow accumulating results of document parsing (collected with {@link XContentParserDecorator})
*/
public | to |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/SystemConsoleNullTest.java | {
"start": 825,
"end": 1151
} | class ____ {
private final CompilationTestHelper testHelper =
CompilationTestHelper.newInstance(SystemConsoleNull.class, getClass());
@Test
public void positive() {
testHelper
.addSourceLines(
"Test.java",
"""
import java.io.Console;
| SystemConsoleNullTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/readonly/ReadOnlySessionLazyNonLazyTest.java | {
"start": 1998,
"end": 40132
} | class ____ extends AbstractReadOnlyTest {
@Test
@SuppressWarnings( {"unchecked"})
public void testExistingModifiableAfterSetSessionReadOnly(SessionFactoryScope scope) {
Container cOrig = createContainer();
Set expectedInitializedObjects = new HashSet(
Arrays.asList(
cOrig,
cOrig.getNoProxyInfo(),
cOrig.getProxyInfo(),
cOrig.getNonLazyInfo(),
cOrig.getNoProxyOwner(),
cOrig.getProxyOwner(),
cOrig.getNonLazyOwner(),
cOrig.getLazyDataPoints().iterator().next(),
cOrig.getNonLazyJoinDataPoints().iterator().next(),
cOrig.getNonLazySelectDataPoints().iterator().next()
)
);
Set expectedReadOnlyObjects = new HashSet();
Session s = openSession(scope);
assertFalse( s.isDefaultReadOnly() );
Transaction t = s.beginTransaction();
s.persist( cOrig );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
t = s.beginTransaction();
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
Container c = ( Container ) s.getReference( Container.class, cOrig.getId() );
assertSame( cOrig, c );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
c = ( Container ) s.get( Container.class, cOrig.getId() );
assertSame( cOrig, c );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.refresh( cOrig );
assertSame( cOrig, c );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.evict( cOrig );
c = s.get( Container.class, cOrig.getId() );
assertNotSame( cOrig, c );
expectedInitializedObjects = new HashSet(
Arrays.asList(
c,
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
expectedReadOnlyObjects = new HashSet(
Arrays.asList(
c,
c.getNoProxyInfo(),
c.getProxyInfo(),
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
//c.getLazyDataPoints(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getNoProxyInfo() ) );
Hibernate.initialize( c.getNoProxyInfo() );
expectedInitializedObjects.add( c.getNoProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getProxyInfo() ) );
Hibernate.initialize( c.getProxyInfo() );
expectedInitializedObjects.add( c.getProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getLazyDataPoints() ) );
Hibernate.initialize( c.getLazyDataPoints() );
expectedInitializedObjects.add( c.getLazyDataPoints().iterator().next() );
expectedReadOnlyObjects.add(c.getLazyDataPoints().iterator().next() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.createQuery("delete from DataPoint").executeUpdate();
s.createQuery("delete from Container").executeUpdate();
s.createQuery("delete from Info").executeUpdate();
s.createQuery("delete from Owner").executeUpdate();
t.commit();
s.close();
}
@Test
@SuppressWarnings( {"unchecked"})
public void testExistingReadOnlyAfterSetSessionModifiable(SessionFactoryScope scope) {
Container cOrig = createContainer();
Set expectedInitializedObjects = new HashSet(
Arrays.asList(
cOrig,
cOrig.getNoProxyInfo(),
cOrig.getProxyInfo(),
cOrig.getNonLazyInfo(),
cOrig.getNoProxyOwner(),
cOrig.getProxyOwner(),
cOrig.getNonLazyOwner(),
cOrig.getLazyDataPoints().iterator().next(),
cOrig.getNonLazyJoinDataPoints().iterator().next(),
cOrig.getNonLazySelectDataPoints().iterator().next()
)
);
Set expectedReadOnlyObjects = new HashSet();
Session s = openSession(scope);
assertFalse( s.isDefaultReadOnly() );
Transaction t = s.beginTransaction();
s.persist( cOrig );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.setDefaultReadOnly( true );
Container c = ( Container ) s.get( Container.class, cOrig.getId() );
assertNotSame( cOrig, c );
expectedInitializedObjects = new HashSet(
Arrays.asList(
c,
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
expectedReadOnlyObjects = new HashSet(
Arrays.asList(
c,
c.getNoProxyInfo(),
c.getProxyInfo(),
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
//c.getLazyDataPoints(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( false );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getNoProxyInfo() ) );
Hibernate.initialize( c.getNoProxyInfo() );
expectedInitializedObjects.add( c.getNoProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getProxyInfo() ) );
Hibernate.initialize( c.getProxyInfo() );
expectedInitializedObjects.add( c.getProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getLazyDataPoints() ) );
Hibernate.initialize( c.getLazyDataPoints() );
expectedInitializedObjects.add( c.getLazyDataPoints().iterator().next() );
//expectedReadOnlyObjects.add(c.getLazyDataPoints().iterator().next() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.createQuery("delete from DataPoint").executeUpdate();
s.createQuery("delete from Container").executeUpdate();
s.createQuery("delete from Info").executeUpdate();
s.createQuery("delete from Owner").executeUpdate();
t.commit();
s.close();
}
@Test
@SuppressWarnings( {"unchecked"})
public void testExistingReadOnlyAfterSetSessionModifiableExisting(SessionFactoryScope scope) {
Container cOrig = createContainer();
Set expectedInitializedObjects = new HashSet(
Arrays.asList(
cOrig,
cOrig.getNoProxyInfo(),
cOrig.getProxyInfo(),
cOrig.getNonLazyInfo(),
cOrig.getNoProxyOwner(),
cOrig.getProxyOwner(),
cOrig.getNonLazyOwner(),
cOrig.getLazyDataPoints().iterator().next(),
cOrig.getNonLazyJoinDataPoints().iterator().next(),
cOrig.getNonLazySelectDataPoints().iterator().next()
)
);
Set expectedReadOnlyObjects = new HashSet();
DataPoint lazyDataPointOrig = ( DataPoint ) cOrig.getLazyDataPoints().iterator().next();
Session s = openSession(scope);
assertFalse( s.isDefaultReadOnly() );
Transaction t = s.beginTransaction();
s.persist( cOrig );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.setDefaultReadOnly( true );
Container c = ( Container ) s.get( Container.class, cOrig.getId() );
assertNotSame( cOrig, c );
expectedInitializedObjects = new HashSet(
Arrays.asList(
c,
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
expectedReadOnlyObjects = new HashSet(
Arrays.asList(
c,
c.getNoProxyInfo(),
c.getProxyInfo(),
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
//c.getLazyDataPoints(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( false );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getNoProxyInfo() ) );
Hibernate.initialize( c.getNoProxyInfo() );
expectedInitializedObjects.add( c.getNoProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getProxyInfo() ) );
Hibernate.initialize( c.getProxyInfo() );
expectedInitializedObjects.add( c.getProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
DataPoint lazyDataPoint = ( DataPoint ) s.get( DataPoint.class, lazyDataPointOrig.getId() );
assertFalse( Hibernate.isInitialized( c.getLazyDataPoints() ) );
Hibernate.initialize( c.getLazyDataPoints() );
assertSame( lazyDataPoint, c.getLazyDataPoints().iterator().next() );
expectedInitializedObjects.add( c.getLazyDataPoints().iterator().next() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.createQuery("delete from DataPoint").executeUpdate();
s.createQuery("delete from Container").executeUpdate();
s.createQuery("delete from Info").executeUpdate();
s.createQuery("delete from Owner").executeUpdate();
t.commit();
s.close();
}
@SuppressWarnings( {"unchecked"})
@Test
public void testExistingReadOnlyAfterSetSessionModifiableExistingEntityReadOnly(SessionFactoryScope scope) {
Container cOrig = createContainer();
Set expectedInitializedObjects = new HashSet(
Arrays.asList(
cOrig,
cOrig.getNoProxyInfo(),
cOrig.getProxyInfo(),
cOrig.getNonLazyInfo(),
cOrig.getNoProxyOwner(),
cOrig.getProxyOwner(),
cOrig.getNonLazyOwner(),
cOrig.getLazyDataPoints().iterator().next(),
cOrig.getNonLazyJoinDataPoints().iterator().next(),
cOrig.getNonLazySelectDataPoints().iterator().next()
)
);
Set expectedReadOnlyObjects = new HashSet();
DataPoint lazyDataPointOrig = ( DataPoint ) cOrig.getLazyDataPoints().iterator().next();
Session s = openSession(scope);
assertFalse( s.isDefaultReadOnly() );
Transaction t = s.beginTransaction();
s.persist( cOrig );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.setDefaultReadOnly( true );
Container c = ( Container ) s.get( Container.class, cOrig.getId() );
assertNotSame( cOrig, c );
expectedInitializedObjects = new HashSet(
Arrays.asList(
c,
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
expectedReadOnlyObjects = new HashSet(
Arrays.asList(
c,
c.getNoProxyInfo(),
c.getProxyInfo(),
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
//c.getLazyDataPoints(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( false );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getNoProxyInfo() ) );
Hibernate.initialize( c.getNoProxyInfo() );
expectedInitializedObjects.add( c.getNoProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getProxyInfo() ) );
Hibernate.initialize( c.getProxyInfo() );
expectedInitializedObjects.add( c.getProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
DataPoint lazyDataPoint = ( DataPoint ) s.get( DataPoint.class, lazyDataPointOrig.getId() );
s.setDefaultReadOnly( false );
assertFalse( Hibernate.isInitialized( c.getLazyDataPoints() ) );
Hibernate.initialize( c.getLazyDataPoints() );
assertSame( lazyDataPoint, c.getLazyDataPoints().iterator().next() );
expectedInitializedObjects.add( c.getLazyDataPoints().iterator().next() );
expectedReadOnlyObjects.add( lazyDataPoint );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.createQuery("delete from DataPoint").executeUpdate();
s.createQuery("delete from Container").executeUpdate();
s.createQuery("delete from Info").executeUpdate();
s.createQuery("delete from Owner").executeUpdate();
t.commit();
s.close();
}
@SuppressWarnings( {"unchecked"})
@Test
public void testExistingReadOnlyAfterSetSessionModifiableProxyExisting(SessionFactoryScope scope) {
Container cOrig = createContainer();
Set expectedInitializedObjects = new HashSet(
Arrays.asList(
cOrig,
cOrig.getNoProxyInfo(),
cOrig.getProxyInfo(),
cOrig.getNonLazyInfo(),
cOrig.getNoProxyOwner(),
cOrig.getProxyOwner(),
cOrig.getNonLazyOwner(),
cOrig.getLazyDataPoints().iterator().next(),
cOrig.getNonLazyJoinDataPoints().iterator().next(),
cOrig.getNonLazySelectDataPoints().iterator().next()
)
);
Set expectedReadOnlyObjects = new HashSet();
DataPoint lazyDataPointOrig = ( DataPoint ) cOrig.getLazyDataPoints().iterator().next();
Session s = openSession(scope);
assertFalse( s.isDefaultReadOnly() );
Transaction t = s.beginTransaction();
s.persist( cOrig );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.setDefaultReadOnly( true );
Container c = ( Container ) s.get( Container.class, cOrig.getId() );
assertNotSame( cOrig, c );
expectedInitializedObjects = new HashSet(
Arrays.asList(
c,
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
expectedReadOnlyObjects = new HashSet(
Arrays.asList(
c,
c.getNoProxyInfo(),
c.getProxyInfo(),
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
//c.getLazyDataPoints(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( false );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getNoProxyInfo() ) );
Hibernate.initialize( c.getNoProxyInfo() );
expectedInitializedObjects.add( c.getNoProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getProxyInfo() ) );
Hibernate.initialize( c.getProxyInfo() );
expectedInitializedObjects.add( c.getProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
DataPoint lazyDataPoint = ( DataPoint ) s.getReference( DataPoint.class, lazyDataPointOrig.getId() );
assertFalse( Hibernate.isInitialized( c.getLazyDataPoints() ) );
Hibernate.initialize( c.getLazyDataPoints() );
assertSame( lazyDataPoint, c.getLazyDataPoints().iterator().next() );
expectedInitializedObjects.add( c.getLazyDataPoints().iterator().next() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.createQuery("delete from DataPoint").executeUpdate();
s.createQuery("delete from Container").executeUpdate();
s.createQuery("delete from Info").executeUpdate();
s.createQuery("delete from Owner").executeUpdate();
t.commit();
s.close();
}
@SuppressWarnings( {"unchecked"})
@Test
public void testExistingReadOnlyAfterSetSessionModifiableExistingProxyReadOnly(SessionFactoryScope scope) {
Container cOrig = createContainer();
Set expectedInitializedObjects = new HashSet(
Arrays.asList(
cOrig,
cOrig.getNoProxyInfo(),
cOrig.getProxyInfo(),
cOrig.getNonLazyInfo(),
cOrig.getNoProxyOwner(),
cOrig.getProxyOwner(),
cOrig.getNonLazyOwner(),
cOrig.getLazyDataPoints().iterator().next(),
cOrig.getNonLazyJoinDataPoints().iterator().next(),
cOrig.getNonLazySelectDataPoints().iterator().next()
)
);
Set expectedReadOnlyObjects = new HashSet();
DataPoint lazyDataPointOrig = ( DataPoint ) cOrig.getLazyDataPoints().iterator().next();
Session s = openSession(scope);
assertFalse( s.isDefaultReadOnly() );
Transaction t = s.beginTransaction();
s.persist( cOrig );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.setDefaultReadOnly( true );
Container c = ( Container ) s.get( Container.class, cOrig.getId() );
assertNotSame( cOrig, c );
expectedInitializedObjects = new HashSet(
Arrays.asList(
c,
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
expectedReadOnlyObjects = new HashSet(
Arrays.asList(
c,
c.getNoProxyInfo(),
c.getProxyInfo(),
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
//c.getLazyDataPoints(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( false );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getNoProxyInfo() ) );
Hibernate.initialize( c.getNoProxyInfo() );
expectedInitializedObjects.add( c.getNoProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getProxyInfo() ) );
Hibernate.initialize( c.getProxyInfo() );
expectedInitializedObjects.add( c.getProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
DataPoint lazyDataPoint = ( DataPoint ) s.getReference( DataPoint.class, lazyDataPointOrig.getId() );
s.setDefaultReadOnly( false );
assertFalse( Hibernate.isInitialized( c.getLazyDataPoints() ) );
Hibernate.initialize( c.getLazyDataPoints() );
assertSame( lazyDataPoint, c.getLazyDataPoints().iterator().next() );
expectedInitializedObjects.add( c.getLazyDataPoints().iterator().next() );
expectedReadOnlyObjects.add( lazyDataPoint );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.createQuery("delete from DataPoint").executeUpdate();
s.createQuery("delete from Container").executeUpdate();
s.createQuery("delete from Info").executeUpdate();
s.createQuery("delete from Owner").executeUpdate();
t.commit();
s.close();
}
@SuppressWarnings( {"unchecked"})
@Test
public void testDefaultModifiableWithReadOnlyQueryForEntity(SessionFactoryScope scope) {
Container cOrig = createContainer();
Set expectedInitializedObjects = new HashSet(
Arrays.asList(
cOrig,
cOrig.getNoProxyInfo(),
cOrig.getProxyInfo(),
cOrig.getNonLazyInfo(),
cOrig.getNoProxyOwner(),
cOrig.getProxyOwner(),
cOrig.getNonLazyOwner(),
cOrig.getLazyDataPoints().iterator().next(),
cOrig.getNonLazyJoinDataPoints().iterator().next(),
cOrig.getNonLazySelectDataPoints().iterator().next()
)
);
Set expectedReadOnlyObjects = new HashSet();
Session s = openSession(scope);
assertFalse( s.isDefaultReadOnly() );
Transaction t = s.beginTransaction();
s.persist( cOrig );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
assertFalse( s.isDefaultReadOnly() );
Container c = ( Container ) s.createQuery( "from Container where id=" + cOrig.getId() )
.setReadOnly( true ).uniqueResult();
expectedInitializedObjects = new HashSet(
Arrays.asList(
c,
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
expectedReadOnlyObjects = new HashSet(
Arrays.asList(
c,
c.getNoProxyInfo(),
c.getProxyInfo(),
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
//c.getLazyDataPoints(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getNoProxyInfo() ) );
Hibernate.initialize( c.getNoProxyInfo() );
expectedInitializedObjects.add( c.getNoProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getProxyInfo() ) );
Hibernate.initialize( c.getProxyInfo() );
expectedInitializedObjects.add( c.getProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getLazyDataPoints() ) );
Hibernate.initialize( c.getLazyDataPoints() );
expectedInitializedObjects.add( c.getLazyDataPoints().iterator().next() );
//expectedReadOnlyObjects.add(c.getLazyDataPoints().iterator().next() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.createQuery("delete from DataPoint").executeUpdate();
s.createQuery("delete from Container").executeUpdate();
s.createQuery("delete from Info").executeUpdate();
s.createQuery("delete from Owner").executeUpdate();
t.commit();
s.close();
}
@SuppressWarnings( {"unchecked"})
@Test
public void testDefaultReadOnlyWithModifiableQueryForEntity(SessionFactoryScope scope) {
Container cOrig = createContainer();
Set expectedInitializedObjects = new HashSet(
Arrays.asList(
cOrig,
cOrig.getNoProxyInfo(),
cOrig.getProxyInfo(),
cOrig.getNonLazyInfo(),
cOrig.getNoProxyOwner(),
cOrig.getProxyOwner(),
cOrig.getNonLazyOwner(),
cOrig.getLazyDataPoints().iterator().next(),
cOrig.getNonLazyJoinDataPoints().iterator().next(),
cOrig.getNonLazySelectDataPoints().iterator().next()
)
);
Set expectedReadOnlyObjects = new HashSet();
Session s = openSession(scope);
assertFalse( s.isDefaultReadOnly() );
Transaction t = s.beginTransaction();
s.persist( cOrig );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
Container c = ( Container ) s.createQuery( "from Container where id=" + cOrig.getId() )
.setReadOnly( false ).uniqueResult();
expectedInitializedObjects = new HashSet(
Arrays.asList(
c,
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
expectedReadOnlyObjects = new HashSet();
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getNoProxyInfo() ) );
Hibernate.initialize( c.getNoProxyInfo() );
expectedInitializedObjects.add( c.getNoProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getProxyInfo() ) );
Hibernate.initialize( c.getProxyInfo() );
expectedInitializedObjects.add( c.getProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getLazyDataPoints() ) );
Hibernate.initialize( c.getLazyDataPoints() );
expectedInitializedObjects.add( c.getLazyDataPoints().iterator().next() );
expectedReadOnlyObjects.add(c.getLazyDataPoints().iterator().next() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.createQuery("delete from DataPoint").executeUpdate();
s.createQuery("delete from Container").executeUpdate();
s.createQuery("delete from Info").executeUpdate();
s.createQuery("delete from Owner").executeUpdate();
t.commit();
s.close();
}
@SuppressWarnings( {"unchecked"})
@Test
public void testDefaultReadOnlyWithQueryForEntity(SessionFactoryScope scope) {
Container cOrig = createContainer();
Set expectedInitializedObjects = new HashSet(
Arrays.asList(
cOrig,
cOrig.getNoProxyInfo(),
cOrig.getProxyInfo(),
cOrig.getNonLazyInfo(),
cOrig.getNoProxyOwner(),
cOrig.getProxyOwner(),
cOrig.getNonLazyOwner(),
cOrig.getLazyDataPoints().iterator().next(),
cOrig.getNonLazyJoinDataPoints().iterator().next(),
cOrig.getNonLazySelectDataPoints().iterator().next()
)
);
Set expectedReadOnlyObjects = new HashSet();
Session s = openSession(scope);
assertFalse( s.isDefaultReadOnly() );
Transaction t = s.beginTransaction();
s.persist( cOrig );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
Container c = ( Container ) s.createQuery( "from Container where id=" + cOrig.getId() )
.uniqueResult();
expectedInitializedObjects = new HashSet(
Arrays.asList(
c,
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
expectedReadOnlyObjects = new HashSet(
Arrays.asList(
c,
c.getNoProxyInfo(),
c.getProxyInfo(),
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
//c.getLazyDataPoints(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getNoProxyInfo() ) );
Hibernate.initialize( c.getNoProxyInfo() );
expectedInitializedObjects.add( c.getNoProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getProxyInfo() ) );
Hibernate.initialize( c.getProxyInfo() );
expectedInitializedObjects.add( c.getProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getLazyDataPoints() ) );
Hibernate.initialize( c.getLazyDataPoints() );
expectedInitializedObjects.add( c.getLazyDataPoints().iterator().next() );
expectedReadOnlyObjects.add(c.getLazyDataPoints().iterator().next() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.createQuery("delete from DataPoint").executeUpdate();
s.createQuery("delete from Container").executeUpdate();
s.createQuery("delete from Info").executeUpdate();
s.createQuery("delete from Owner").executeUpdate();
t.commit();
s.close();
}
@SuppressWarnings( {"unchecked"})
@Test
public void testDefaultModifiableWithQueryForEntity(SessionFactoryScope scope) {
Container cOrig = createContainer();
Set expectedInitializedObjects = new HashSet(
Arrays.asList(
cOrig,
cOrig.getNoProxyInfo(),
cOrig.getProxyInfo(),
cOrig.getNonLazyInfo(),
cOrig.getNoProxyOwner(),
cOrig.getProxyOwner(),
cOrig.getNonLazyOwner(),
cOrig.getLazyDataPoints().iterator().next(),
cOrig.getNonLazyJoinDataPoints().iterator().next(),
cOrig.getNonLazySelectDataPoints().iterator().next()
)
);
Set expectedReadOnlyObjects = new HashSet();
Session s = openSession(scope);
assertFalse( s.isDefaultReadOnly() );
Transaction t = s.beginTransaction();
s.persist( cOrig );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
assertFalse( s.isDefaultReadOnly() );
Container c = ( Container ) s.createQuery( "from Container where id=" + cOrig.getId() )
.uniqueResult();
expectedInitializedObjects = new HashSet(
Arrays.asList(
c,
c.getNonLazyInfo(),
c.getNoProxyOwner(),
c.getProxyOwner(),
c.getNonLazyOwner(),
c.getNonLazyJoinDataPoints().iterator().next(),
c.getNonLazySelectDataPoints().iterator().next()
)
);
expectedReadOnlyObjects = new HashSet();
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getNoProxyInfo() ) );
Hibernate.initialize( c.getNoProxyInfo() );
expectedInitializedObjects.add( c.getNoProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getProxyInfo() ) );
Hibernate.initialize( c.getProxyInfo() );
expectedInitializedObjects.add( c.getProxyInfo() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
assertFalse( Hibernate.isInitialized( c.getLazyDataPoints() ) );
Hibernate.initialize( c.getLazyDataPoints() );
expectedInitializedObjects.add( c.getLazyDataPoints().iterator().next() );
//expectedReadOnlyObjects.add(c.getLazyDataPoints().iterator().next() );
checkContainer( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.createQuery("delete from DataPoint").executeUpdate();
s.createQuery("delete from Container").executeUpdate();
s.createQuery("delete from Info").executeUpdate();
s.createQuery("delete from Owner").executeUpdate();
t.commit();
s.close();
}
@SuppressWarnings( {"unchecked"})
@Test
public void testDefaultModifiableWithReadOnlyQueryForCollectionEntities(SessionFactoryScope scope) {
Container cOrig = createContainer();
Set expectedInitializedObjects = new HashSet(
Arrays.asList(
cOrig,
cOrig.getNoProxyInfo(),
cOrig.getProxyInfo(),
cOrig.getNonLazyInfo(),
cOrig.getNoProxyOwner(),
cOrig.getProxyOwner(),
cOrig.getNonLazyOwner(),
cOrig.getLazyDataPoints().iterator().next(),
cOrig.getNonLazyJoinDataPoints().iterator().next(),
cOrig.getNonLazySelectDataPoints().iterator().next()
)
);
Set expectedReadOnlyObjects = new HashSet();
Session s = openSession(scope);
assertFalse( s.isDefaultReadOnly() );
Transaction t = s.beginTransaction();
s.persist( cOrig );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
s.setDefaultReadOnly( true );
assertTrue( s.isDefaultReadOnly() );
checkContainer( cOrig, expectedInitializedObjects, expectedReadOnlyObjects, s );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
assertFalse( s.isDefaultReadOnly() );
DataPoint dp = ( DataPoint ) s.createQuery( "select c.lazyDataPoints from Container c join c.lazyDataPoints where c.id=" + cOrig.getId() )
.setReadOnly( true ).uniqueResult();
assertTrue( s.isReadOnly( dp ) );
t.commit();
s.close();
s = openSession(scope);
t = s.beginTransaction();
s.createQuery("delete from DataPoint").executeUpdate();
s.createQuery("delete from Container").executeUpdate();
s.createQuery("delete from Info").executeUpdate();
s.createQuery("delete from Owner").executeUpdate();
t.commit();
s.close();
}
private Container createContainer() {
Container c = new Container( "container" );
c.setNoProxyInfo( new Info( "no-proxy info" ) );
c.setProxyInfo( new Info( "proxy info" ) );
c.setNonLazyInfo( new Info( "non-lazy info" ) );
c.setNoProxyOwner( new Owner( "no-proxy owner" ) );
c.setProxyOwner( new Owner( "proxy owner" ) );
c.setNonLazyOwner( new Owner( "non-lazy owner" ) );
c.getLazyDataPoints().add( new DataPoint( new BigDecimal( 1 ), new BigDecimal( 1 ), "lazy data point" ) );
c.getNonLazyJoinDataPoints().add( new DataPoint( new BigDecimal( 2 ), new BigDecimal( 2 ), "non-lazy join data point" ) );
c.getNonLazySelectDataPoints().add( new DataPoint( new BigDecimal( 3 ), new BigDecimal( 3 ), "non-lazy select data point" ) );
return c;
}
private void checkContainer(Container c, Set expectedInitializedObjects, Set expectedReadOnlyObjects, Session s) {
checkObject( c, expectedInitializedObjects, expectedReadOnlyObjects, s );
if ( ! expectedInitializedObjects.contains( c ) ) {
return;
}
checkObject( c.getNoProxyInfo(), expectedInitializedObjects, expectedReadOnlyObjects, s);
checkObject( c.getProxyInfo(), expectedInitializedObjects, expectedReadOnlyObjects, s);
checkObject( c.getNonLazyInfo(), expectedInitializedObjects, expectedReadOnlyObjects, s );
checkObject( c.getNoProxyOwner(), expectedInitializedObjects, expectedReadOnlyObjects, s );
checkObject( c.getProxyOwner(), expectedInitializedObjects, expectedReadOnlyObjects, s );
checkObject( c.getNonLazyOwner(), expectedInitializedObjects, expectedReadOnlyObjects, s );
if ( Hibernate.isInitialized( c.getLazyDataPoints() ) ) {
for ( Iterator it=c.getLazyDataPoints().iterator(); it.hasNext(); ) {
checkObject( it.next(), expectedInitializedObjects, expectedReadOnlyObjects, s );
}
}
for ( Iterator it=c.getNonLazyJoinDataPoints().iterator(); it.hasNext(); ) {
checkObject( it.next(), expectedInitializedObjects, expectedReadOnlyObjects, s );
}
for ( Iterator it=c.getNonLazySelectDataPoints().iterator(); it.hasNext(); ) {
checkObject( it.next(), expectedInitializedObjects, expectedReadOnlyObjects, s );
}
}
private void checkObject(Object entityOrProxy, Set expectedInitializedObjects, Set expectedReadOnlyObjects, Session s) {
boolean isExpectedToBeInitialized = expectedInitializedObjects.contains( entityOrProxy );
boolean isExpectedToBeReadOnly = expectedReadOnlyObjects.contains( entityOrProxy );
SessionImplementor si = (SessionImplementor) s;
assertEquals( isExpectedToBeInitialized, Hibernate.isInitialized( entityOrProxy ) );
assertEquals( isExpectedToBeReadOnly, s.isReadOnly( entityOrProxy ) );
if ( Hibernate.isInitialized( entityOrProxy ) ) {
Object entity = ( entityOrProxy instanceof HibernateProxy ?
( ( HibernateProxy ) entityOrProxy ).getHibernateLazyInitializer().getImplementation( si ) :
entityOrProxy
);
assertNotNull( entity );
assertEquals( isExpectedToBeReadOnly, s.isReadOnly( entity ));
}
}
private Session openSession(SessionFactoryScope scope) {
return scope.getSessionFactory().openSession();
}
}
| ReadOnlySessionLazyNonLazyTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/indices/AbstractIndexShardCacheEntity.java | {
"start": 854,
"end": 1712
} | class ____ implements IndicesRequestCache.CacheEntity {
/**
* Get the {@linkplain ShardRequestCache} used to track cache statistics.
*/
protected abstract ShardRequestCache stats();
@Override
public final void onCached(IndicesRequestCache.Key key, BytesReference value) {
stats().onCached(key, value);
}
@Override
public final void onHit() {
stats().onHit();
}
@Override
public final void onMiss() {
stats().onMiss();
}
@Override
public final void onRemoval(RemovalNotification<IndicesRequestCache.Key, BytesReference> notification) {
stats().onRemoval(
notification.getKey(),
notification.getValue(),
notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED
);
}
}
| AbstractIndexShardCacheEntity |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/TestRootType.java | {
"start": 637,
"end": 867
} | class ____
extends DatabindTestUtil
{
/*
/**********************************************************
/* Annotated helper classes
/**********************************************************
*/
| TestRootType |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | {
"start": 117066,
"end": 182263
} | enum ____ {
CREATE_FILE,
APPEND_FILE,
TRUNCATE_FILE,
RECOVER_LEASE;
public String getExceptionMessage(String src, String holder,
String clientMachine, String reason) {
return "Failed to " + this + " " + src + " for " + holder +
" on " + clientMachine + " because " + reason;
}
}
boolean recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip,
String src, String holder, String clientMachine, boolean force)
throws IOException {
assert hasWriteLock(RwLockMode.GLOBAL);
INodeFile file = iip.getLastINode().asFile();
if (file.isUnderConstruction()) {
//
// If the file is under construction , then it must be in our
// leases. Find the appropriate lease record.
//
Lease lease = leaseManager.getLease(holder);
if (!force && lease != null) {
Lease leaseFile = leaseManager.getLease(file);
if (leaseFile != null && leaseFile.equals(lease)) {
// We found the lease for this file but the original
// holder is trying to obtain it again.
throw new AlreadyBeingCreatedException(
op.getExceptionMessage(src, holder, clientMachine,
holder + " is already the current lease holder."));
}
}
//
// Find the original holder.
//
FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
String clientName = uc.getClientName();
lease = leaseManager.getLease(clientName);
if (lease == null) {
throw new AlreadyBeingCreatedException(
op.getExceptionMessage(src, holder, clientMachine,
"the file is under construction but no leases found."));
}
if (force) {
// close now: no need to wait for soft lease expiration and
// close only the file src
LOG.info("recoverLease: " + lease + ", src=" + src +
" from client " + clientName);
return internalReleaseLease(lease, src, iip, holder);
} else {
assert lease.getHolder().equals(clientName) :
"Current lease holder " + lease.getHolder() +
" does not match file creator " + clientName;
//
// If the original holder has not renewed in the last SOFTLIMIT
// period, then start lease recovery.
//
if (lease.expiredSoftLimit()) {
LOG.info("startFile: recover " + lease + ", src=" + src + " client "
+ clientName);
if (internalReleaseLease(lease, src, iip, null)) {
return true;
} else {
throw new RecoveryInProgressException(
op.getExceptionMessage(src, holder, clientMachine,
"lease recovery is in progress. Try again later."));
}
} else {
final BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
throw new RecoveryInProgressException(
op.getExceptionMessage(src, holder, clientMachine,
"another recovery is in progress by "
+ clientName + " on " + uc.getClientMachine()));
} else {
throw new AlreadyBeingCreatedException(
op.getExceptionMessage(src, holder, clientMachine,
"this file lease is currently owned by "
+ clientName + " on " + uc.getClientMachine()));
}
}
}
} else {
return true;
}
}
/**
* Append to an existing file in the namespace.
*/
LastBlockWithStatus appendFile(String srcArg, String holder,
String clientMachine, EnumSet<CreateFlag> flag, boolean logRetryCache)
throws IOException {
final String operationName = "append";
boolean newBlock = flag.contains(CreateFlag.NEW_BLOCK);
if (newBlock) {
requireEffectiveLayoutVersionForFeature(Feature.APPEND_NEW_BLOCK);
}
NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src={}, holder={}, clientMachine={}",
srcArg, holder, clientMachine);
try {
boolean skipSync = false;
LastBlockWithStatus lbs = null;
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
writeLock(RwLockMode.GLOBAL);
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot append to file" + srcArg);
lbs = FSDirAppendOp.appendFile(this, srcArg, pc, holder, clientMachine,
newBlock, logRetryCache);
} catch (StandbyException se) {
skipSync = true;
throw se;
} finally {
writeUnlock(RwLockMode.GLOBAL, operationName, getLockReportInfoSupplier(srcArg));
// There might be transactions logged while trying to recover the lease
// They need to be sync'ed even when an exception was thrown.
if (!skipSync) {
getEditLog().logSync();
}
}
logAuditEvent(true, operationName, srcArg);
return lbs;
} catch (AccessControlException e) {
logAuditEvent(false, operationName, srcArg);
throw e;
}
}
ExtendedBlock getExtendedBlock(Block blk) {
return new ExtendedBlock(getBlockPoolId(), blk);
}
void setBlockPoolId(String bpid) {
blockManager.setBlockPoolId(bpid);
}
/**
* The client would like to obtain an additional block for the indicated
* filename (which is being written-to). Return an array that consists
* of the block, plus a set of machines. The first on this list should
* be where the client writes data. Subsequent items in the list must
* be provided in the connection to the first datanode.
*
* Make sure the previous blocks have been reported by datanodes and
* are replicated. Will return an empty 2-elt array if we want the
* client to "try again later".
*/
LocatedBlock getAdditionalBlock(
String src, long fileId, String clientName, ExtendedBlock previous,
DatanodeInfo[] excludedNodes, String[] favoredNodes,
EnumSet<AddBlockFlag> flags) throws IOException {
final String operationName = "getAdditionalBlock";
NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: {} inodeId {} for {}",
src, fileId, clientName);
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
FSDirWriteFileOp.ValidateAddBlockResult r;
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
readLock(RwLockMode.GLOBAL);
try {
checkOperation(OperationCategory.WRITE);
r = FSDirWriteFileOp.validateAddBlock(this, pc, src, fileId, clientName,
previous, onRetryBlock);
} finally {
readUnlock(RwLockMode.GLOBAL, operationName);
}
if (r == null) {
assert onRetryBlock[0] != null : "Retry block is null";
// This is a retry. Just return the last block.
return onRetryBlock[0];
}
DatanodeStorageInfo[] targets = FSDirWriteFileOp.chooseTargetForNewBlock(
blockManager, src, excludedNodes, favoredNodes, flags, r);
checkOperation(OperationCategory.WRITE);
writeLock(RwLockMode.GLOBAL);
LocatedBlock lb;
try {
checkOperation(OperationCategory.WRITE);
lb = FSDirWriteFileOp.storeAllocatedBlock(
this, src, fileId, clientName, previous, targets);
} finally {
writeUnlock(RwLockMode.GLOBAL, operationName);
}
getEditLog().logSync();
return lb;
}
/** @see ClientProtocol#getAdditionalDatanode */
LocatedBlock getAdditionalDatanode(String src, long fileId,
final ExtendedBlock blk, final DatanodeInfo[] existings,
final String[] storageIDs,
final Set<Node> excludes,
final int numAdditionalNodes, final String clientName
) throws IOException {
//check if the feature is enabled
dtpReplaceDatanodeOnFailure.checkEnabled();
Node clientnode = null;
String clientMachine;
final long preferredblocksize;
final byte storagePolicyID;
final List<DatanodeStorageInfo> chosen;
final BlockType blockType;
final String operationName = "getAdditionalDatanode";
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
readLock(RwLockMode.FS);
try {
// Changing this operation category to WRITE instead of making getAdditionalDatanode as a
// read method is aim to let Active NameNode to handle this RPC, because Active NameNode
// contains a more complete DN selection context than Observer NameNode.
checkOperation(OperationCategory.WRITE);
//check safe mode
checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk);
final INodesInPath iip = dir.resolvePath(pc, src, fileId);
src = iip.getPath();
//check lease
final INodeFile file = checkLease(iip, clientName, fileId);
clientMachine = file.getFileUnderConstructionFeature().getClientMachine();
clientnode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
preferredblocksize = file.getPreferredBlockSize();
storagePolicyID = file.getStoragePolicyID();
blockType = file.getBlockType();
//find datanode storages
final DatanodeManager dm = blockManager.getDatanodeManager();
chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs,
"src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s",
src, fileId, blk, clientName, clientMachine));
} finally {
readUnlock(RwLockMode.FS, operationName);
}
if (clientnode == null) {
clientnode = FSDirWriteFileOp.getClientNode(blockManager, clientMachine);
}
// choose new datanodes.
final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode(
src, numAdditionalNodes, clientnode, chosen,
excludes, preferredblocksize, storagePolicyID, blockType);
final LocatedBlock lb = BlockManager.newLocatedBlock(
blk, targets, -1, false);
blockManager.setBlockToken(lb, BlockTokenIdentifier.AccessMode.COPY);
return lb;
}
/**
* The client would like to let go of the given block
*/
void abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
throws IOException {
final String operationName = "abandonBlock";
NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: {} of file {}", b, src);
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
writeLock(RwLockMode.GLOBAL);
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot abandon block " + b + " for file" + src);
FSDirWriteFileOp.abandonBlock(dir, pc, b, fileId, src, holder);
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.abandonBlock: {} is removed from pendingCreates", b);
} finally {
writeUnlock(RwLockMode.GLOBAL, operationName);
}
getEditLog().logSync();
}
private String leaseExceptionString(String src, long fileId, String holder) {
final Lease lease = leaseManager.getLease(holder);
return src + " (inode " + fileId + ") " + (lease != null? lease.toString()
: "Holder " + holder + " does not have any open files.");
}
INodeFile checkLease(INodesInPath iip, String holder, long fileId)
throws LeaseExpiredException, FileNotFoundException {
String src = iip.getPath();
INode inode = iip.getLastINode();
assert hasReadLock(RwLockMode.FS);
if (inode == null) {
throw new FileNotFoundException("File does not exist: "
+ leaseExceptionString(src, fileId, holder));
}
if (!inode.isFile()) {
throw new LeaseExpiredException("INode is not a regular file: "
+ leaseExceptionString(src, fileId, holder));
}
final INodeFile file = inode.asFile();
if (!file.isUnderConstruction()) {
throw new LeaseExpiredException("File is not open for writing: "
+ leaseExceptionString(src, fileId, holder));
}
// No further modification is allowed on a deleted file.
// A file is considered deleted, if it is not in the inodeMap or is marked
// as deleted in the snapshot feature.
if (isFileDeleted(file)) {
throw new FileNotFoundException("File is deleted: "
+ leaseExceptionString(src, fileId, holder));
}
final String owner = file.getFileUnderConstructionFeature().getClientName();
if (holder != null && !owner.equals(holder)) {
throw new LeaseExpiredException("Client (=" + holder
+ ") is not the lease owner (=" + owner + ": "
+ leaseExceptionString(src, fileId, holder));
}
return file;
}
/**
* Complete in-progress write to the given file.
* @return true if successful, false if the client should continue to retry
* (e.g if not all blocks have reached minimum replication yet)
* @throws IOException on error (eg lease mismatch, file not open, file deleted)
*/
boolean completeFile(final String src, String holder,
ExtendedBlock last, long fileId)
throws IOException {
boolean success = false;
final String operationName = "completeFile";
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
writeLock(RwLockMode.GLOBAL);
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot complete file " + src);
success = FSDirWriteFileOp.completeFile(this, pc, src, holder, last,
fileId);
} finally {
writeUnlock(RwLockMode.GLOBAL, operationName);
}
getEditLog().logSync();
if (success) {
NameNode.stateChangeLog.info("DIR* completeFile: " + src
+ " is closed by " + holder);
}
return success;
}
/**
* Create new block with a unique block id and a new generation stamp.
* @param blockType is the file under striping or contiguous layout?
*/
Block createNewBlock(BlockType blockType) throws IOException {
// nextBlockId and nextGenerationStamp need to write edit log, so it needs FSLock.
assert hasWriteLock(RwLockMode.GLOBAL);
Block b = new Block(nextBlockId(blockType), 0, 0);
// Increment the generation stamp for every new block.
b.setGenerationStamp(nextGenerationStamp(false));
return b;
}
/**
* Check that the indicated file's blocks are present and
* replicated. If not, return false. If checkall is true, then check
* all blocks, otherwise check only penultimate block.
*/
boolean checkFileProgress(String src, INodeFile v, boolean checkall) {
assert hasReadLock(RwLockMode.GLOBAL);
if (checkall) {
return checkBlocksComplete(src, true, v.getBlocks());
} else {
final BlockInfo[] blocks = v.getBlocks();
final int i = blocks.length - numCommittedAllowed - 2;
return i < 0 || blocks[i] == null
|| checkBlocksComplete(src, false, blocks[i]);
}
}
/**
* Check if the blocks are COMPLETE;
* it may allow the last block to be COMMITTED.
*/
private boolean checkBlocksComplete(String src, boolean allowCommittedBlock,
BlockInfo... blocks) {
final int n = allowCommittedBlock? numCommittedAllowed: 0;
for(int i = 0; i < blocks.length; i++) {
final short min = blockManager.getMinStorageNum(blocks[i]);
final String err = INodeFile.checkBlockComplete(blocks, i, n, min);
if (err != null) {
final int numNodes = blocks[i].numNodes();
LOG.info("BLOCK* " + err + "(numNodes= " + numNodes
+ (numNodes < min ? " < " : " >= ")
+ " minimum = " + min + ") in file " + src);
return false;
}
}
return true;
}
/**
* Change the indicated filename.
* @deprecated Use {@link #renameTo(String, String, boolean,
* Options.Rename...)} instead.
*/
@Deprecated
boolean renameTo(String src, String dst, boolean logRetryCache)
throws IOException {
final String operationName = "rename";
FSDirRenameOp.RenameResult ret = null;
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
try {
writeLock(RwLockMode.FS);
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot rename " + src);
ret = FSDirRenameOp.renameToInt(dir, pc, src, dst, logRetryCache);
} finally {
FileStatus status = ret != null ? ret.auditStat : null;
writeUnlock(RwLockMode.FS, operationName,
getLockReportInfoSupplier(src, dst, status));
}
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src, dst, null);
throw e;
}
assert ret != null;
boolean success = ret.success;
if (success) {
getEditLog().logSync();
logAuditEvent(true, operationName, src, dst, ret.auditStat);
}
return success;
}
void renameTo(final String src, final String dst,
boolean logRetryCache, Options.Rename... options)
throws IOException {
final String operationName = "rename";
FSDirRenameOp.RenameResult res = null;
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
try {
writeLock(RwLockMode.GLOBAL);
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot rename " + src);
res = FSDirRenameOp.renameToInt(dir, pc, src, dst, logRetryCache,
options);
} finally {
FileStatus status = res != null ? res.auditStat : null;
writeUnlock(RwLockMode.GLOBAL, operationName,
getLockReportInfoSupplier(src, dst, status));
}
} catch (AccessControlException e) {
logAuditEvent(false, operationName + " (options=" +
Arrays.toString(options) + ")", src, dst, null);
throw e;
}
getEditLog().logSync();
assert res != null;
BlocksMapUpdateInfo collectedBlocks = res.collectedBlocks;
if (!collectedBlocks.getToDeleteList().isEmpty()) {
blockManager.addBLocksToMarkedDeleteQueue(
collectedBlocks.getToDeleteList());
}
logAuditEvent(true, operationName + " (options=" +
Arrays.toString(options) + ")", src, dst, res.auditStat);
}
/**
* Remove the indicated file from namespace.
*
* @see ClientProtocol#delete(String, boolean) for detailed description and
* description of exceptions
*/
boolean delete(String src, boolean recursive, boolean logRetryCache)
throws IOException {
final String operationName = "delete";
BlocksMapUpdateInfo toRemovedBlocks = null;
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
boolean ret = false;
try {
writeLock(RwLockMode.GLOBAL);
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot delete " + src);
toRemovedBlocks = FSDirDeleteOp.delete(
this, pc, src, recursive, logRetryCache);
ret = toRemovedBlocks != null;
} finally {
writeUnlock(RwLockMode.GLOBAL, operationName, getLockReportInfoSupplier(src));
}
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
}
getEditLog().logSync();
logAuditEvent(ret, operationName, src);
if (toRemovedBlocks != null) {
blockManager.addBLocksToMarkedDeleteQueue(
toRemovedBlocks.getToDeleteList());
}
return ret;
}
FSPermissionChecker getPermissionChecker()
throws AccessControlException {
return dir.getPermissionChecker();
}
/**
* Remove leases and inodes related to a given path
* @param removedUCFiles INodes whose leases need to be released
* @param removedINodes Containing the list of inodes to be removed from
* inodesMap
* @param acquireINodeMapLock Whether to acquire the lock for inode removal
*/
void removeLeasesAndINodes(List<Long> removedUCFiles,
List<INode> removedINodes,
final boolean acquireINodeMapLock) {
assert hasWriteLock(RwLockMode.FS);
for(long i : removedUCFiles) {
leaseManager.removeLease(i);
}
// remove inodes from inodesMap
if (removedINodes != null) {
if (acquireINodeMapLock) {
dir.writeLock();
}
try {
dir.removeFromInodeMap(removedINodes);
} finally {
if (acquireINodeMapLock) {
dir.writeUnlock();
}
}
removedINodes.clear();
}
}
/**
* Get the file info for a specific file.
*
* @param src The string representation of the path to the file
* @param resolveLink whether to throw UnresolvedLinkException
* if src refers to a symlink
*
* @param needLocation Include {@link LocatedBlocks} in result.
* @param needBlockToken Include block tokens in {@link LocatedBlocks}
* @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if a symlink is encountered.
*
* @return object containing information regarding the file
* or null if file not found
* @throws StandbyException
*/
HdfsFileStatus getFileInfo(final String src, boolean resolveLink,
boolean needLocation, boolean needBlockToken) throws IOException {
// if the client requests block tokens, then it can read data blocks
// and should appear in the audit log as if getBlockLocations had been
// called
final String operationName = needBlockToken ? "open" : "getfileinfo";
checkOperation(OperationCategory.READ);
HdfsFileStatus stat = null;
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
try {
readLock(RwLockMode.FS);
try {
checkOperation(OperationCategory.READ);
stat = FSDirStatAndListingOp.getFileInfo(
dir, pc, src, resolveLink, needLocation, needBlockToken);
} finally {
readUnlock(RwLockMode.FS, operationName, getLockReportInfoSupplier(src));
}
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
}
if (needLocation && isObserver() && stat instanceof HdfsLocatedFileStatus) {
LocatedBlocks lbs = ((HdfsLocatedFileStatus) stat).getLocatedBlocks();
checkBlockLocationsWhenObserver(lbs, src);
}
logAuditEvent(true, operationName, src);
return stat;
}
/**
* Returns true if the file is closed
*/
boolean isFileClosed(final String src) throws IOException {
final String operationName = "isFileClosed";
checkOperation(OperationCategory.READ);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
boolean success = false;
try {
readLock(RwLockMode.FS);
try {
checkOperation(OperationCategory.READ);
success = FSDirStatAndListingOp.isFileClosed(dir, pc, src);
} finally {
readUnlock(RwLockMode.FS, operationName, getLockReportInfoSupplier(src));
}
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
}
if (success) {
logAuditEvent(true, operationName, src);
}
return success;
}
/**
* Create all the necessary directories
*/
boolean mkdirs(String src, PermissionStatus permissions,
boolean createParent) throws IOException {
final String operationName = "mkdirs";
FileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
try {
writeLock(RwLockMode.FS);
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create directory " + src);
auditStat = FSDirMkdirOp.mkdirs(this, pc, src, permissions,
createParent);
} finally {
writeUnlock(RwLockMode.FS, operationName,
getLockReportInfoSupplier(src, null, auditStat));
}
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
return true;
}
/**
* Get the content summary for a specific file/dir.
*
* @param src The string representation of the path to the file
*
* @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if a symlink is encountered.
* @throws FileNotFoundException if no file exists
* @throws StandbyException
* @throws IOException for issues with writing to the audit log
*
* @return object containing information regarding the file
* or null if file not found
*/
ContentSummary getContentSummary(final String src) throws IOException {
checkOperation(OperationCategory.READ);
final String operationName = "contentSummary";
ContentSummary cs;
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
try {
readLock(RwLockMode.GLOBAL);
try {
checkOperation(OperationCategory.READ);
cs = FSDirStatAndListingOp.getContentSummary(dir, pc, src);
} finally {
readUnlock(RwLockMode.GLOBAL, operationName, getLockReportInfoSupplier(src));
}
} catch (AccessControlException ace) {
logAuditEvent(false, operationName, src);
throw ace;
}
logAuditEvent(true, operationName, src);
return cs;
}
/**
* Get the quota usage for a specific file/dir.
*
* @param src The string representation of the path to the file
*
* @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if a symlink is encountered.
* @throws FileNotFoundException if no file exists
* @throws StandbyException
* @throws IOException for issues with writing to the audit log
*
* @return object containing information regarding the file
* or null if file not found
*/
QuotaUsage getQuotaUsage(final String src) throws IOException {
checkOperation(OperationCategory.READ);
final String operationName = "quotaUsage";
QuotaUsage quotaUsage;
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
try {
readLock(RwLockMode.GLOBAL);
try {
checkOperation(OperationCategory.READ);
quotaUsage = FSDirStatAndListingOp.getQuotaUsage(dir, pc, src);
} finally {
readUnlock(RwLockMode.GLOBAL, operationName, getLockReportInfoSupplier(src));
}
} catch (AccessControlException ace) {
logAuditEvent(false, operationName, src);
throw ace;
}
logAuditEvent(true, operationName, src);
return quotaUsage;
}
/**
* Set the namespace quota and storage space quota for a directory.
* See {@link ClientProtocol#setQuota(String, long, long, StorageType)} for the
* contract.
*
* Note: This does not support ".inodes" relative path.
*/
void setQuota(String src, long nsQuota, long ssQuota, StorageType type)
throws IOException {
if (type != null) {
requireEffectiveLayoutVersionForFeature(Feature.QUOTA_BY_STORAGE_TYPE);
}
if (type == StorageType.NVDIMM) {
requireEffectiveLayoutVersionForFeature(Feature.NVDIMM_SUPPORT);
}
checkOperation(OperationCategory.WRITE);
final String operationName = getQuotaCommand(nsQuota, ssQuota);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
if(!allowOwnerSetQuota) {
checkSuperuserPrivilege(operationName, src);
}
try {
// Need to compute the curren space usage
writeLock(RwLockMode.GLOBAL);
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set quota on " + src);
FSDirAttrOp.setQuota(dir, pc, src, nsQuota, ssQuota, type,
allowOwnerSetQuota);
} finally {
writeUnlock(RwLockMode.GLOBAL, operationName, getLockReportInfoSupplier(src));
}
} catch (AccessControlException ace) {
logAuditEvent(false, operationName, src);
throw ace;
}
getEditLog().logSync();
logAuditEvent(true, operationName, src);
}
/** Persist all metadata about this file.
* @param src The string representation of the path
* @param fileId The inode ID that we're fsyncing. Older clients will pass
* INodeId.GRANDFATHER_INODE_ID here.
* @param clientName The string representation of the client
* @param lastBlockLength The length of the last block
* under construction reported from client.
* @throws IOException if path does not exist
*/
void fsync(String src, long fileId, String clientName, long lastBlockLength)
throws IOException {
final String operationName = "fsync";
NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName);
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
writeLock(RwLockMode.GLOBAL);
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot fsync file " + src);
INodesInPath iip = dir.resolvePath(pc, src, fileId);
src = iip.getPath();
final INodeFile pendingFile = checkLease(iip, clientName, fileId);
if (lastBlockLength > 0) {
pendingFile.getFileUnderConstructionFeature().updateLengthOfLastBlock(
pendingFile, lastBlockLength);
}
FSDirWriteFileOp.persistBlocks(dir, src, pendingFile, false);
} finally {
writeUnlock(RwLockMode.GLOBAL, operationName);
}
getEditLog().logSync();
}
/**
* Move a file that is being written to be immutable.
* @param src The filename
* @param lease The lease for the client creating the file
* @param recoveryLeaseHolder reassign lease to this holder if the last block
* needs recovery; keep current holder if null.
* @throws AlreadyBeingCreatedException if file is waiting to achieve minimal
* replication;<br>
* RecoveryInProgressException if lease recovery is in progress.<br>
* IOException in case of an error.
* @return true if file has been successfully finalized and closed or
* false if block recovery has been initiated. Since the lease owner
* has been changed and logged, caller should call logSync().
*/
boolean internalReleaseLease(Lease lease, String src, INodesInPath iip,
String recoveryLeaseHolder) throws IOException {
LOG.info("Recovering " + lease + ", src=" + src);
assert !isInSafeMode();
// finalizeINodeFileUnderConstruction needs global write lock.
assert hasWriteLock(RwLockMode.GLOBAL);
final INodeFile pendingFile = iip.getLastINode().asFile();
int nrBlocks = pendingFile.numBlocks();
BlockInfo[] blocks = pendingFile.getBlocks();
int nrCompleteBlocks;
BlockInfo curBlock = null;
for(nrCompleteBlocks = 0; nrCompleteBlocks < nrBlocks; nrCompleteBlocks++) {
curBlock = blocks[nrCompleteBlocks];
if(!curBlock.isComplete())
break;
assert blockManager.hasMinStorage(curBlock) :
"A COMPLETE block is not minimally replicated in " + src;
}
// If there are no incomplete blocks associated with this file,
// then reap lease immediately and close the file.
if(nrCompleteBlocks == nrBlocks) {
finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshotId(), false);
NameNode.stateChangeLog.warn("BLOCK*" +
" internalReleaseLease: All existing blocks are COMPLETE," +
" lease removed, file " + src + " closed.");
return true; // closed!
}
// Only the last and the penultimate blocks may be in non COMPLETE state.
// If the penultimate block is not COMPLETE, then it must be COMMITTED.
if(nrCompleteBlocks < nrBlocks - 2 ||
nrCompleteBlocks == nrBlocks - 2 &&
curBlock != null &&
curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
final String message = "DIR* NameSystem.internalReleaseLease: "
+ "attempt to release a create lock on "
+ src + " but file is already closed.";
NameNode.stateChangeLog.warn(message);
throw new IOException(message);
}
// The last block is not COMPLETE, and
// that the penultimate block if exists is either COMPLETE or COMMITTED
final BlockInfo lastBlock = pendingFile.getLastBlock();
BlockUCState lastBlockState = lastBlock.getBlockUCState();
BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
// If penultimate block doesn't exist then its minReplication is met
boolean penultimateBlockMinStorage = penultimateBlock == null ||
blockManager.hasMinStorage(penultimateBlock);
switch(lastBlockState) {
case COMPLETE:
assert false : "Already checked that the last block is incomplete";
break;
case COMMITTED:
// Close file if committed blocks are minimally replicated
if(penultimateBlockMinStorage &&
blockManager.hasMinStorage(lastBlock)) {
finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshotId(), false);
NameNode.stateChangeLog.warn("BLOCK*" +
" internalReleaseLease: Committed blocks are minimally" +
" replicated, lease removed, file" + src + " closed.");
return true; // closed!
}
// Cannot close file right now, since some blocks
// are not yet minimally replicated.
// This may potentially cause infinite loop in lease recovery
// if there are no valid replicas on data-nodes.
String message = "DIR* NameSystem.internalReleaseLease: " +
"Failed to release lease for file " + src +
". Committed blocks are waiting to be minimally replicated.";
NameNode.stateChangeLog.warn(message);
if (!penultimateBlockMinStorage) {
throw new AlreadyBeingCreatedException(message);
}
// Intentionally fall through to UNDER_RECOVERY so BLOCK_RECOVERY is
// attempted
case UNDER_CONSTRUCTION:
case UNDER_RECOVERY:
BlockUnderConstructionFeature uc =
lastBlock.getUnderConstructionFeature();
// determine if last block was intended to be truncated
BlockInfo recoveryBlock = uc.getTruncateBlock();
boolean truncateRecovery = recoveryBlock != null;
boolean copyOnTruncate = truncateRecovery &&
recoveryBlock.getBlockId() != lastBlock.getBlockId();
assert !copyOnTruncate ||
recoveryBlock.getBlockId() < lastBlock.getBlockId() &&
recoveryBlock.getGenerationStamp() < lastBlock.getGenerationStamp() &&
recoveryBlock.getNumBytes() > lastBlock.getNumBytes() :
"wrong recoveryBlock";
// setup the last block locations from the blockManager if not known
if (uc.getNumExpectedLocations() == 0) {
uc.setExpectedLocations(lastBlock, blockManager.getStorages(lastBlock),
lastBlock.getBlockType());
}
int minLocationsNum = 1;
if (lastBlock.isStriped()) {
minLocationsNum = ((BlockInfoStriped) lastBlock).getRealDataBlockNum();
}
if (uc.getNumExpectedLocations() < minLocationsNum &&
lastBlock.getNumBytes() == 0) {
// There is no datanode reported to this block.
// may be client have crashed before writing data to pipeline.
// This blocks doesn't need any recovery.
// We can remove this block and close the file.
BlockInfo lastBlockInfo = pendingFile.removeLastBlock(lastBlock);
if (lastBlockInfo != null) {
blockManager.removeBlock(lastBlockInfo);
}
finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshotId(), false);
if (uc.getNumExpectedLocations() == 0) {
// If uc.getNumExpectedLocations() is 0, regardless of whether it
// is a striped block or not, we should consider it as an empty block.
NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: "
+ "Removed empty last block and closed file " + src);
} else {
// If uc.getNumExpectedLocations() is greater than 0, it means that
// minLocationsNum must be greater than 1, so this must be a striped
// block.
NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: "
+ "Removed last unrecoverable block group and closed file " + src);
}
return true;
}
// Start recovery of the last block for this file
// Only do so if there is no ongoing recovery for this block,
// or the previous recovery for this block timed out.
if (blockManager.addBlockRecoveryAttempt(lastBlock)) {
long blockRecoveryId = nextGenerationStamp(
blockManager.isLegacyBlock(lastBlock));
if(copyOnTruncate) {
lastBlock.setGenerationStamp(blockRecoveryId);
} else if(truncateRecovery) {
recoveryBlock.setGenerationStamp(blockRecoveryId);
}
uc.initializeBlockRecovery(lastBlock, blockRecoveryId, true);
// Cannot close file right now, since the last block requires recovery.
// This may potentially cause infinite loop in lease recovery
// if there are no valid replicas on data-nodes.
NameNode.stateChangeLog.warn(
"DIR* NameSystem.internalReleaseLease: " +
"File " + src + " has not been closed." +
" Lease recovery is in progress. " +
"RecoveryId = " + blockRecoveryId + " for block " + lastBlock);
}
lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile);
if (recoveryLeaseHolder == null) {
leaseManager.renewLease(lease);
}
break;
}
return false;
}
private Lease reassignLease(Lease lease, String src, String newHolder,
INodeFile pendingFile) {
assert hasWriteLock(RwLockMode.FS);
if(newHolder == null)
return lease;
// The following transaction is not synced. Make sure it's sync'ed later.
logReassignLease(lease.getHolder(), src, newHolder);
return reassignLeaseInternal(lease, newHolder, pendingFile);
}
Lease reassignLeaseInternal(Lease lease, String newHolder, INodeFile pendingFile) {
assert hasWriteLock(RwLockMode.FS);
pendingFile.getFileUnderConstructionFeature().setClientName(newHolder);
return leaseManager.reassignLease(lease, pendingFile, newHolder);
}
void commitOrCompleteLastBlock(
final INodeFile fileINode, final INodesInPath iip,
final Block commitBlock) throws IOException {
assert hasWriteLock(RwLockMode.GLOBAL);
Preconditions.checkArgument(fileINode.isUnderConstruction());
blockManager.commitOrCompleteLastBlock(fileINode, commitBlock, iip);
}
void addCommittedBlocksToPending(final INodeFile pendingFile) {
final BlockInfo[] blocks = pendingFile.getBlocks();
int i = blocks.length - numCommittedAllowed;
if (i < 0) {
i = 0;
}
for(; i < blocks.length; i++) {
final BlockInfo b = blocks[i];
if (b != null && b.getBlockUCState() == BlockUCState.COMMITTED) {
// b is COMMITTED but not yet COMPLETE, add it to pending replication.
blockManager.addExpectedReplicasToPending(b);
}
}
}
void finalizeINodeFileUnderConstruction(String src, INodeFile pendingFile,
int latestSnapshot, boolean allowCommittedBlock) throws IOException {
assert hasWriteLock(RwLockMode.GLOBAL);
FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
if (uc == null) {
throw new IOException("Cannot finalize file " + src
+ " because it is not under construction");
}
pendingFile.recordModification(latestSnapshot);
// The file is no longer pending.
// Create permanent INode, update blocks. No need to replace the inode here
// since we just remove the uc feature from pendingFile
pendingFile.toCompleteFile(now(),
allowCommittedBlock? numCommittedAllowed: 0,
blockManager.getMinReplication());
leaseManager.removeLease(uc.getClientName(), pendingFile);
// close file and persist block allocations for this file
closeFile(src, pendingFile);
blockManager.checkRedundancy(pendingFile);
}
@VisibleForTesting
BlockInfo getStoredBlock(Block block) {
return blockManager.getStoredBlock(block);
}
@Override
public boolean isInSnapshot(long blockCollectionID) {
assert hasReadLock(RwLockMode.FS);
final INodeFile bc = getBlockCollection(blockCollectionID);
if (bc == null || !bc.isUnderConstruction()) {
return false;
}
String fullName = bc.getName();
try {
if (fullName != null && fullName.startsWith(Path.SEPARATOR)
&& dir.getINode(fullName, DirOp.READ) == bc) {
// If file exists in normal path then no need to look in snapshot
return false;
}
} catch (IOException e) {
// the snapshot path and current path may contain symlinks, ancestor
// dirs replaced by files, etc.
LOG.error("Error while resolving the path : " + fullName, e);
return false;
}
/*
* 1. if bc is under construction and also with snapshot, and
* bc is not in the current fsdirectory tree, bc must represent a snapshot
* file.
* 2. if fullName is not an absolute path, bc cannot be existent in the
* current fsdirectory tree.
* 3. if bc is not the current node associated with fullName, bc must be a
* snapshot inode.
*/
return true;
}
INodeFile getBlockCollection(BlockInfo b) {
return getBlockCollection(b.getBlockCollectionId());
}
@Override
public INodeFile getBlockCollection(long id) {
assert hasReadLock(RwLockMode.FS)
: "Accessing INode id = " + id + " without read lock";
INode inode = getFSDirectory().getInode(id);
return inode == null ? null : inode.asFile();
}
void commitBlockSynchronization(ExtendedBlock oldBlock,
long newgenerationstamp, long newlength,
boolean closeFile, boolean deleteblock, DatanodeID[] newtargets,
String[] newtargetstorages) throws IOException {
LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock
+ ", newgenerationstamp=" + newgenerationstamp
+ ", newlength=" + newlength
+ ", newtargets=" + Arrays.asList(newtargets)
+ ", closeFile=" + closeFile
+ ", deleteBlock=" + deleteblock
+ ")");
checkOperation(OperationCategory.WRITE);
final String src;
writeLock(RwLockMode.GLOBAL);
boolean copyTruncate = false;
BlockInfo truncatedBlock = null;
try {
checkOperation(OperationCategory.WRITE);
// If a DN tries to commit to the standby, the recovery will
// fail, and the next retry will succeed on the new NN.
checkNameNodeSafeMode(
"Cannot commitBlockSynchronization while in safe mode");
final BlockInfo storedBlock = getStoredBlock(
ExtendedBlock.getLocalBlock(oldBlock));
if (storedBlock == null) {
if (deleteblock) {
// This may be a retry attempt so ignore the failure
// to locate the block.
LOG.debug("Block (={}) not found", oldBlock);
return;
} else {
throw new IOException("Block (=" + oldBlock + ") not found");
}
}
final long oldGenerationStamp = storedBlock.getGenerationStamp();
final long oldNumBytes = storedBlock.getNumBytes();
//
// The implementation of delete operation (see @deleteInternal method)
// first removes the file paths from namespace, and delays the removal
// of blocks to later time for better performance. When
// commitBlockSynchronization (this method) is called in between, the
// blockCollection of storedBlock could have been assigned to null by
// the delete operation, throw IOException here instead of NPE; if the
// file path is already removed from namespace by the delete operation,
// throw FileNotFoundException here, so not to proceed to the end of
// this method to add a CloseOp to the edit log for an already deleted
// file (See HDFS-6825).
//
if (storedBlock.isDeleted()) {
throw new IOException("The blockCollection of " + storedBlock
+ " is null, likely because the file owning this block was"
+ " deleted and the block removal is delayed");
}
final INodeFile iFile = getBlockCollection(storedBlock);
src = iFile.getFullPathName();
if (isFileDeleted(iFile)) {
throw new FileNotFoundException("File not found: "
+ src + ", likely due to delayed block removal");
}
if ((!iFile.isUnderConstruction() || storedBlock.isComplete()) &&
iFile.getLastBlock().isComplete()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Unexpected block (={}) since the file (={}) is not under construction",
oldBlock, iFile.getLocalName());
}
return;
}
truncatedBlock = iFile.getLastBlock();
final long recoveryId = truncatedBlock.getUnderConstructionFeature()
.getBlockRecoveryId();
copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId();
if(recoveryId != newgenerationstamp) {
throw new IOException("The recovery id " + newgenerationstamp
+ " does not match current recovery id "
+ recoveryId + " for block " + oldBlock);
}
if (deleteblock) {
Block blockToDel = ExtendedBlock.getLocalBlock(oldBlock);
boolean remove = iFile.removeLastBlock(blockToDel) != null;
if (remove) {
blockManager.removeBlock(storedBlock);
}
} else {
// update last block
if(!copyTruncate) {
storedBlock.setGenerationStamp(newgenerationstamp);
storedBlock.setNumBytes(newlength);
}
// Find the target DatanodeStorageInfos. If not found because of invalid
// or empty DatanodeID/StorageID, the slot of same offset in dsInfos is
// null
final DatanodeStorageInfo[] dsInfos = blockManager.getDatanodeManager().
getDatanodeStorageInfos(newtargets, newtargetstorages,
"src=%s, oldBlock=%s, newgenerationstamp=%d, newlength=%d",
src, oldBlock, newgenerationstamp, newlength);
if (closeFile && dsInfos != null) {
// the file is getting closed. Insert block locations into blockManager.
// Otherwise fsck will report these blocks as MISSING, especially if the
// blocksReceived from Datanodes take a long time to arrive.
for (int i = 0; i < dsInfos.length; i++) {
if (dsInfos[i] != null) {
if(copyTruncate) {
dsInfos[i].addBlock(truncatedBlock, truncatedBlock);
} else {
Block bi = new Block(storedBlock);
if (storedBlock.isStriped()) {
bi.setBlockId(bi.getBlockId() + i);
}
dsInfos[i].addBlock(storedBlock, bi);
}
}
}
}
// add pipeline locations into the INodeUnderConstruction
if(copyTruncate) {
iFile.convertLastBlockToUC(truncatedBlock, dsInfos);
} else {
iFile.convertLastBlockToUC(storedBlock, dsInfos);
if (closeFile) {
blockManager.markBlockReplicasAsCorrupt(oldBlock.getLocalBlock(),
storedBlock, oldGenerationStamp, oldNumBytes,
dsInfos);
}
}
}
if (closeFile) {
if(copyTruncate) {
closeFileCommitBlocks(src, iFile, truncatedBlock);
if(!iFile.isBlockInLatestSnapshot(storedBlock)) {
blockManager.removeBlock(storedBlock);
}
} else {
closeFileCommitBlocks(src, iFile, storedBlock);
}
} else {
// If this commit does not want to close the file, persist blocks
FSDirWriteFileOp.persistBlocks(dir, src, iFile, false);
}
blockManager.successfulBlockRecovery(storedBlock);
} finally {
writeUnlock(RwLockMode.GLOBAL, "commitBlockSynchronization");
}
getEditLog().logSync();
if (closeFile) {
LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock
+ ", file=" + src
+ (copyTruncate ? ", newBlock=" + truncatedBlock
: ", newgenerationstamp=" + newgenerationstamp)
+ ", newlength=" + newlength
+ ", newtargets=" + Arrays.asList(newtargets) + ") successful");
} else {
LOG.info("commitBlockSynchronization(" + oldBlock + ") successful");
}
}
/**
* @param pendingFile open file that needs to be closed
* @param storedBlock last block
* @throws IOException on error
*/
@VisibleForTesting
void closeFileCommitBlocks(String src, INodeFile pendingFile,
BlockInfo storedBlock) throws IOException {
final INodesInPath iip = INodesInPath.fromINode(pendingFile);
// commit the last block and complete it if it has minimum replicas
commitOrCompleteLastBlock(pendingFile, iip, storedBlock);
//remove lease, close file
int s = Snapshot.findLatestSnapshot(pendingFile, Snapshot.CURRENT_STATE_ID);
finalizeINodeFileUnderConstruction(src, pendingFile, s, false);
}
/**
* Renew the lease(s) held by the given client
*/
void renewLease(String holder) throws IOException {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot renew lease for " + holder);
// fsn is not mutated so lock is not required. the leaseManger is also
// thread-safe.
leaseManager.renewLease(holder);
}
/**
* Get a partial listing of the indicated directory
*
* @param src the directory name
* @param startAfter the name to start after
* @param needLocation if blockLocations need to be returned
* @return a partial listing starting after startAfter
*
* @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if symbolic link is encountered
* @throws IOException if other I/O error occurred
*/
DirectoryListing getListing(String src, byte[] startAfter,
boolean needLocation)
throws IOException {
checkOperation(OperationCategory.READ);
final String operationName = "listStatus";
DirectoryListing dl = null;
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
try {
readLock(RwLockMode.FS);
try {
checkOperation(NameNode.OperationCategory.READ);
dl = getListingInt(dir, pc, src, startAfter, needLocation);
} finally {
readUnlock(RwLockMode.FS, operationName, getLockReportInfoSupplier(src));
}
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
}
if (dl != null && needLocation && isObserver()) {
for (HdfsFileStatus fs : dl.getPartialListing()) {
if (fs instanceof HdfsLocatedFileStatus) {
LocatedBlocks lbs = ((HdfsLocatedFileStatus) fs).getLocatedBlocks();
checkBlockLocationsWhenObserver(lbs, fs.toString());
}
}
}
logAuditEvent(true, operationName, src);
return dl;
}
public byte[] getSrcPathsHash(String[] srcs) {
synchronized (digest) {
for (String src : srcs) {
digest.update(src.getBytes(StandardCharsets.UTF_8));
}
byte[] result = digest.digest();
digest.reset();
return result;
}
}
BatchedDirectoryListing getBatchedListing(String[] srcs, byte[] startAfter,
boolean needLocation) throws IOException {
if (srcs.length > this.batchedListingLimit) {
String msg = String.format("Too many source paths (%d > %d)",
srcs.length, batchedListingLimit);
throw new IllegalArgumentException(msg);
}
// Parse the startAfter key if present
int srcsIndex = 0;
byte[] indexStartAfter = new byte[0];
if (startAfter.length > 0) {
BatchedListingKeyProto startAfterProto =
BatchedListingKeyProto.parseFrom(startAfter);
// Validate that the passed paths match the checksum from key
Preconditions.checkArgument(
Arrays.equals(
startAfterProto.getChecksum().toByteArray(),
getSrcPathsHash(srcs)));
srcsIndex = startAfterProto.getPathIndex();
indexStartAfter = startAfterProto.getStartAfter().toByteArray();
// Special case: if the indexStartAfter key is an empty array, it
// means the last element we listed was a file, not a directory.
// Skip it so we don't list it twice.
if (indexStartAfter.length == 0) {
srcsIndex++;
}
}
final int startSrcsIndex = srcsIndex;
final String operationName = "listStatus";
final FSPermissionChecker pc = getPermissionChecker();
BatchedDirectoryListing bdl;
checkOperation(OperationCategory.READ);
readLock(RwLockMode.FS);
try {
checkOperation(NameNode.OperationCategory.READ);
// List all directories from the starting index until we've reached
// ls limit OR finished listing all srcs.
LinkedHashMap<Integer, HdfsPartialListing> listings =
Maps.newLinkedHashMap();
DirectoryListing lastListing = null;
int numEntries = 0;
for (; srcsIndex < srcs.length; srcsIndex++) {
String src = srcs[srcsIndex];
HdfsPartialListing listing;
try {
DirectoryListing dirListing =
getListingInt(dir, pc, src, indexStartAfter, needLocation);
if (dirListing == null) {
throw new FileNotFoundException("Path " + src + " does not exist");
}
if (needLocation && isObserver()) {
for (HdfsFileStatus fs : dirListing.getPartialListing()) {
if (fs instanceof HdfsLocatedFileStatus) {
LocatedBlocks lbs = ((HdfsLocatedFileStatus) fs).getLocatedBlocks();
checkBlockLocationsWhenObserver(lbs, fs.toString());
}
}
}
listing = new HdfsPartialListing(
srcsIndex, Lists.newArrayList(dirListing.getPartialListing()));
numEntries += listing.getPartialListing().size();
lastListing = dirListing;
} catch (Exception e) {
if (e instanceof ObserverRetryOnActiveException) {
throw (ObserverRetryOnActiveException) e;
}
if (e instanceof AccessControlException) {
logAuditEvent(false, operationName, src);
}
listing = new HdfsPartialListing(
srcsIndex,
new RemoteException(
e.getClass().getCanonicalName(),
e.getMessage()));
lastListing = null;
LOG.info("Exception listing src {}", src, e);
}
listings.put(srcsIndex, listing);
// Null out the indexStartAfter after the first time.
// If we get a partial result, we're done iterating because we're also
// over the list limit.
if (indexStartAfter.length != 0) {
indexStartAfter = new byte[0];
}
// Terminate if we've reached the maximum listing size
if (numEntries >= dir.getListLimit()) {
break;
}
}
HdfsPartialListing[] partialListingArray =
listings.values().toArray(new HdfsPartialListing[] {});
// Check whether there are more dirs/files to be listed, and if so setting
// up the index to start within the first dir to be listed next time.
if (srcsIndex >= srcs.length) {
// If the loop finished normally, there are no more srcs and we're done.
bdl = new BatchedDirectoryListing(
partialListingArray,
false,
new byte[0]);
} else if (srcsIndex == srcs.length-1 &&
lastListing != null &&
!lastListing.hasMore()) {
// If we're on the last srcsIndex, then we might be done exactly on an
// lsLimit boundary.
bdl = new BatchedDirectoryListing(
partialListingArray,
false,
new byte[0]
);
} else {
byte[] lastName = lastListing != null && lastListing.getLastName() !=
null ? lastListing.getLastName() : new byte[0];
BatchedListingKeyProto proto = BatchedListingKeyProto.newBuilder()
.setChecksum(ByteString.copyFrom(getSrcPathsHash(srcs)))
.setPathIndex(srcsIndex)
.setStartAfter(ByteString.copyFrom(lastName))
.build();
byte[] returnedStartAfter = proto.toByteArray();
// Set the startAfter key if the last listing has more entries
bdl = new BatchedDirectoryListing(
partialListingArray,
true,
returnedStartAfter);
}
} catch(ObserverRetryOnActiveException e){
throw e;
} finally {
readUnlock(RwLockMode.FS, operationName,
getLockReportInfoSupplier(Arrays.toString(srcs)));
}
for (int i = startSrcsIndex; i < srcsIndex; i++) {
logAuditEvent(true, operationName, srcs[i]);
}
return bdl;
}
/////////////////////////////////////////////////////////
//
// These methods are called by datanodes
//
/////////////////////////////////////////////////////////
/**
* Register Datanode.
* <p>
* The purpose of registration is to identify whether the new datanode
* serves a new data storage, and will report new data block copies,
* which the namenode was not aware of; or the datanode is a replacement
* node for the data storage that was previously served by a different
* or the same (in terms of host:port) datanode.
* The data storages are distinguished by their storageIDs. When a new
* data storage is reported the namenode issues a new unique storageID.
* <p>
* Finally, the namenode returns its namespaceID as the registrationID
* for the datanodes.
* namespaceID is a persistent attribute of the name space.
* The registrationID is checked every time the datanode is communicating
* with the namenode.
* Datanodes with inappropriate registrationID are rejected.
* If the namenode stops, and then restarts it can restore its
* namespaceID and will continue serving the datanodes that has previously
* registered with the namenode without restarting the whole cluster.
*
* @see org.apache.hadoop.hdfs.server.datanode.DataNode
*/
void registerDatanode(DatanodeRegistration nodeReg) throws IOException {
writeLock(RwLockMode.BM);
try {
blockManager.registerDatanode(nodeReg);
} finally {
writeUnlock(RwLockMode.BM, "registerDatanode");
}
}
/**
* Get registrationID for datanodes based on the namespaceID.
*
* @see #registerDatanode(DatanodeRegistration)
* @return registration ID
*/
String getRegistrationID() {
return Storage.getRegistrationID(getFSImage().getStorage());
}
/**
* The given node has reported in. This method should:
* 1) Record the heartbeat, so the datanode isn't timed out
* 2) Adjust usage stats for future block allocation
*
* If a substantial amount of time passed since the last datanode
* heartbeat then request an immediate block report.
*
* @return an array of datanode commands
* @throws IOException
*/
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
StorageReport[] reports, long cacheCapacity, long cacheUsed,
int xceiverCount, int xmitsInProgress, int failedVolumes,
VolumeFailureSummary volumeFailureSummary,
boolean requestFullBlockReportLease,
@Nonnull SlowPeerReports slowPeers,
@Nonnull SlowDiskReports slowDisks)
throws IOException {
readLock(RwLockMode.BM);
try {
//get datanode commands
DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
nodeReg, reports, getBlockPoolId(), cacheCapacity, cacheUsed,
xceiverCount, xmitsInProgress, failedVolumes, volumeFailureSummary,
slowPeers, slowDisks);
long blockReportLeaseId = 0;
if (requestFullBlockReportLease) {
blockReportLeaseId = blockManager.requestBlockReportLeaseId(nodeReg);
}
//create ha status
final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
haContext.getState().getServiceState(),
getFSImage().getCorrectLastAppliedOrWrittenTxId());
Set<String> slownodes = DatanodeManager.getSlowNodesUuidSet();
boolean isSlownode = slownodes.contains(nodeReg.getDatanodeUuid());
return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo,
blockReportLeaseId, isSlownode);
} finally {
readUnlock(RwLockMode.BM, "handleHeartbeat");
}
}
/**
* Handles a lifeline message sent by a DataNode. This method updates contact
* information and statistics for the DataNode, so that it doesn't time out.
* Unlike a heartbeat, this method does not dispatch any commands back to the
* DataNode for local execution. This method also cannot request a lease for
* sending a full block report. Lifeline messages are used only as a fallback
* in case something prevents successful delivery of heartbeat messages.
* Therefore, the implementation of this method must remain lightweight
* compared to heartbeat handling. It should avoid lock contention and
* expensive computation.
*
* @param nodeReg registration info for DataNode sending the lifeline
* @param reports storage reports from DataNode
* @param cacheCapacity cache capacity at DataNode
* @param cacheUsed cache used at DataNode
* @param xceiverCount estimated count of transfer threads running at DataNode
* @param xmitsInProgress count of transfers running at DataNode
* @param failedVolumes count of failed volumes at DataNode
* @param volumeFailureSummary info on failed volumes at DataNode
* @throws IOException if there is an error
*/
void handleLifeline(DatanodeRegistration nodeReg, StorageReport[] reports,
long cacheCapacity, long cacheUsed, int xceiverCount, int xmitsInProgress,
int failedVolumes, VolumeFailureSummary volumeFailureSummary)
throws IOException {
blockManager.getDatanodeManager().handleLifeline(nodeReg, reports,
cacheCapacity, cacheUsed, xceiverCount,
failedVolumes, volumeFailureSummary);
}
/**
* Returns whether or not there were available resources at the last check of
* resources.
*
* @return true if there were sufficient resources available, false otherwise.
*/
boolean nameNodeHasResourcesAvailable() {
return hasResourcesAvailable;
}
/**
* Perform resource checks and cache the results.
*/
void checkAvailableResources() {
long resourceCheckTime = monotonicNow();
Preconditions.checkState(nnResourceChecker != null,
"nnResourceChecker not initialized");
hasResourcesAvailable = nnResourceChecker.hasAvailableDiskSpace();
resourceCheckTime = monotonicNow() - resourceCheckTime;
NameNode.getNameNodeMetrics().addResourceCheckTime(resourceCheckTime);
}
/**
* Close file.
* @param path
* @param file
*/
private void closeFile(String path, INodeFile file) {
assert hasWriteLock(RwLockMode.FS);
// file is closed
getEditLog().logCloseFile(path, file);
NameNode.stateChangeLog.debug("closeFile: {} with {} blocks is persisted to the file system",
path, file.getBlocks().length);
}
/**
* Periodically calls hasAvailableResources of NameNodeResourceChecker, and if
* there are found to be insufficient resources available, causes the NN to
* enter safe mode. If resources are later found to have returned to
* acceptable levels, this daemon will cause the NN to exit safe mode.
*/
| RecoverLeaseOp |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/message/Message.java | {
"start": 2278,
"end": 2869
} | interface ____ extends Serializable {
/**
* Gets the Message formatted as a String. Each Message implementation determines the
* appropriate way to format the data encapsulated in the Message. Messages that provide
* more than one way of formatting the Message will implement MultiformatMessage.
* <p>
* When configured to log asynchronously, this method is called before the Message is queued, unless this
* message implements {@link ReusableMessage} or is annotated with {@link AsynchronouslyFormattable}.
* This gives the Message implementation | Message |
java | elastic__elasticsearch | x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java | {
"start": 1053,
"end": 5753
} | class ____ extends InternalNumericMetricsAggregation.SingleValue implements Rate {
public static final String NAME = "rate_with_resets";
private static final int MILLIS_IN_SECOND = 1_000;
private final double startValue;
private final double endValue;
private final long startTime;
private final long endTime;
private final double resetCompensation;
private final Rounding.DateTimeUnit rateUnit;
protected InternalResetTrackingRate(
String name,
DocValueFormat format,
Map<String, Object> metadata,
double startValue,
double endValue,
long startTime,
long endTime,
double resetCompensation,
Rounding.DateTimeUnit rateUnit
) {
super(name, format, metadata);
this.startValue = startValue;
this.endValue = endValue;
this.startTime = startTime;
this.endTime = endTime;
this.resetCompensation = resetCompensation;
this.rateUnit = Objects.requireNonNull(rateUnit);
}
public InternalResetTrackingRate(StreamInput in) throws IOException {
super(in, false);
this.startValue = in.readDouble();
this.endValue = in.readDouble();
this.startTime = in.readLong();
this.endTime = in.readLong();
this.resetCompensation = in.readDouble();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
this.rateUnit = Rounding.DateTimeUnit.resolve(in.readByte());
} else {
this.rateUnit = Rounding.DateTimeUnit.SECOND_OF_MINUTE;
}
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeDouble(startValue);
out.writeDouble(endValue);
out.writeLong(startTime);
out.writeLong(endTime);
out.writeDouble(resetCompensation);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) && rateUnit != null) {
out.writeByte(rateUnit.getId());
} else {
out.writeByte(Rounding.DateTimeUnit.SECOND_OF_MINUTE.getId());
}
}
@Override
protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) {
final List<InternalResetTrackingRate> aggregations = new ArrayList<>(size);
return new AggregatorReducer() {
@Override
public void accept(InternalAggregation aggregation) {
aggregations.add((InternalResetTrackingRate) aggregation);
}
@Override
public InternalAggregation get() {
List<InternalResetTrackingRate> toReduce = aggregations.stream()
.sorted(Comparator.comparingLong(o -> o.startTime))
.toList();
double resetComp = toReduce.get(0).resetCompensation;
double startValue = toReduce.get(0).startValue;
double endValue = toReduce.get(0).endValue;
final int endIndex = toReduce.size() - 1;
for (int i = 1; i < endIndex + 1; i++) {
InternalResetTrackingRate rate = toReduce.get(i);
assert rate.startTime >= toReduce.get(i - 1).endTime;
resetComp += rate.resetCompensation;
if (endValue > rate.startValue) {
resetComp += endValue;
}
endValue = rate.endValue;
}
return new InternalResetTrackingRate(
name,
format,
metadata,
startValue,
endValue,
toReduce.get(0).startTime,
toReduce.get(endIndex).endTime,
resetComp,
toReduce.get(0).rateUnit
);
}
};
}
@Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
return builder.field(CommonFields.VALUE.getPreferredName(), value());
}
@Override
public double value() {
long rateUnitSeconds = rateUnit.getField().getBaseUnit().getDuration().toSeconds();
return (endValue - startValue + resetCompensation) / (endTime - startTime) * MILLIS_IN_SECOND * rateUnitSeconds;
}
@Override
public double getValue() {
return value();
}
boolean includes(InternalResetTrackingRate other) {
return this.startTime < other.startTime && this.endTime > other.endTime;
}
}
| InternalResetTrackingRate |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/checkpoints/TaskCheckpointStatisticsWithSubtaskDetails.java | {
"start": 1457,
"end": 4799
} | class ____ extends TaskCheckpointStatistics {
public static final String FIELD_NAME_SUMMARY = "summary";
public static final String FIELD_NAME_SUBTASKS_CHECKPOINT_STATISTICS = "subtasks";
@JsonProperty(FIELD_NAME_SUMMARY)
private final Summary summary;
@JsonProperty(FIELD_NAME_SUBTASKS_CHECKPOINT_STATISTICS)
private final List<SubtaskCheckpointStatistics> subtaskCheckpointStatistics;
@JsonCreator
public TaskCheckpointStatisticsWithSubtaskDetails(
@JsonProperty(FIELD_NAME_ID) long checkpointId,
@JsonProperty(FIELD_NAME_CHECKPOINT_STATUS) CheckpointStatsStatus checkpointStatus,
@JsonProperty(FIELD_NAME_LATEST_ACK_TIMESTAMP) long latestAckTimestamp,
@JsonProperty(FIELD_NAME_CHECKPOINTED_SIZE) long checkpointedSize,
@JsonProperty(FIELD_NAME_STATE_SIZE) long stateSize,
@JsonProperty(FIELD_NAME_DURATION) long duration,
@JsonProperty(FIELD_NAME_ALIGNMENT_BUFFERED) long alignmentBuffered,
@JsonProperty(FIELD_NAME_PROCESSED_DATA) long processedData,
@JsonProperty(FIELD_NAME_PERSISTED_DATA) long persistedData,
@JsonProperty(FIELD_NAME_NUM_SUBTASKS) int numSubtasks,
@JsonProperty(FIELD_NAME_NUM_ACK_SUBTASKS) int numAckSubtasks,
@JsonProperty(FIELD_NAME_SUMMARY) Summary summary,
@JsonProperty(FIELD_NAME_SUBTASKS_CHECKPOINT_STATISTICS)
List<SubtaskCheckpointStatistics> subtaskCheckpointStatistics) {
super(
checkpointId,
checkpointStatus,
latestAckTimestamp,
checkpointedSize,
stateSize,
duration,
alignmentBuffered,
processedData,
persistedData,
numSubtasks,
numAckSubtasks);
this.summary = Preconditions.checkNotNull(summary);
this.subtaskCheckpointStatistics = Preconditions.checkNotNull(subtaskCheckpointStatistics);
}
public Summary getSummary() {
return summary;
}
public List<SubtaskCheckpointStatistics> getSubtaskCheckpointStatistics() {
return subtaskCheckpointStatistics;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
TaskCheckpointStatisticsWithSubtaskDetails that =
(TaskCheckpointStatisticsWithSubtaskDetails) o;
return Objects.equals(summary, that.summary)
&& Objects.equals(subtaskCheckpointStatistics, that.subtaskCheckpointStatistics);
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), summary, subtaskCheckpointStatistics);
}
// -----------------------------------------------------------------------------------
// Static inner classes
// -----------------------------------------------------------------------------------
/** Summary of the checkpoint statistics for a given task. */
@Schema(name = "TaskCheckpointStatisticsWithSubtaskDetailsSummary")
public static final | TaskCheckpointStatisticsWithSubtaskDetails |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java | {
"start": 39221,
"end": 39287
} | class ____ extends FilterXContentParserWrapper {
| CopyToParser |
java | apache__camel | core/camel-management/src/test/java/org/apache/camel/management/ManagedMBeansLevelContextOnlyTest.java | {
"start": 1136,
"end": 1574
} | class ____ extends ManagedMBeansLevelTestSupport {
public ManagedMBeansLevelContextOnlyTest() {
super(ManagementMBeansLevel.ContextOnly);
}
@Override
void assertResults(Set<ObjectName> contexts, Set<ObjectName> routes, Set<ObjectName> processors) {
assertEquals(1, contexts.size());
assertEquals(0, routes.size());
assertEquals(0, processors.size());
}
}
| ManagedMBeansLevelContextOnlyTest |
java | spring-projects__spring-boot | loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/Launcher.java | {
"start": 3066,
"end": 5188
} | class ____ run
* @param args the incoming arguments
* @throws Exception if the launch fails
*/
protected void launch(ClassLoader classLoader, String mainClassName, String[] args) throws Exception {
Thread.currentThread().setContextClassLoader(classLoader);
Class<?> mainClass = Class.forName(mainClassName, false, classLoader);
Method mainMethod = getMainMethod(mainClass);
mainMethod.setAccessible(true);
if (mainMethod.getParameterCount() == 0) {
mainMethod.invoke(null);
}
else {
mainMethod.invoke(null, new Object[] { args });
}
}
private Method getMainMethod(Class<?> mainClass) throws Exception {
try {
return mainClass.getDeclaredMethod("main", String[].class);
}
catch (NoSuchMethodException ex) {
return mainClass.getDeclaredMethod("main");
}
}
/**
* Returns if the launcher is running in an exploded mode. If this method returns
* {@code true} then only regular JARs are supported and the additional URL and
* ClassLoader support infrastructure can be optimized.
* @return if the jar is exploded.
*/
protected boolean isExploded() {
Archive archive = getArchive();
return (archive != null) && archive.isExploded();
}
ClassPathIndexFile getClassPathIndex(Archive archive) throws IOException {
if (!archive.isExploded()) {
return null; // Regular archives already have a defined order
}
String location = getClassPathIndexFileLocation(archive);
return ClassPathIndexFile.loadIfPossible(archive.getRootDirectory(), location);
}
private String getClassPathIndexFileLocation(Archive archive) throws IOException {
Manifest manifest = archive.getManifest();
Attributes attributes = (manifest != null) ? manifest.getMainAttributes() : null;
String location = (attributes != null) ? attributes.getValue(BOOT_CLASSPATH_INDEX_ATTRIBUTE) : null;
return (location != null) ? location : getEntryPathPrefix() + DEFAULT_CLASSPATH_INDEX_FILE_NAME;
}
/**
* Return the archive being launched or {@code null} if there is no archive.
* @return the launched archive
*/
protected abstract Archive getArchive();
/**
* Returns the main | to |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/specification/MutationSpecification.java | {
"start": 2077,
"end": 4990
} | interface ____<T> {
void augment(CriteriaBuilder builder, CommonAbstractCriteria query, Root<T> mutationTarget);
}
/**
* Add an {@linkplain Augmentation augmentation} to the specification.
*
* @param augmentation A function capable of modifying or augmenting a criteria query.
*
* @return {@code this} for method chaining.
*/
MutationSpecification<T> augment(Augmentation<T> augmentation);
/**
* Finalize the building and create the {@linkplain MutationQuery} instance.
*/
@Override
MutationQuery createQuery(Session session);
/**
* Finalize the building and create the {@linkplain MutationQuery} instance.
*/
@Override
MutationQuery createQuery(StatelessSession session);
@Override
MutationSpecification<T> validate(CriteriaBuilder builder);
@Override
TypedQueryReference<Void> reference();
/**
* Returns a specification reference which can be used to programmatically,
* iteratively build a {@linkplain MutationQuery} based on a base HQL statement,
* allowing the addition of {@linkplain #restrict restrictions}.
*
* @param hql The base HQL query (expected to be an {@code update} or {@code delete} query).
* @param mutationTarget The entity which is the target of the mutation.
*
* @param <T> The root entity type for the mutation (the "target").
* {@code mutationTarget} and {@code <T>} are both expected to refer to the mutation target.
*
* @throws IllegalMutationQueryException Only {@code update} and {@code delete} are supported;
* this method will throw an exception if the given HQL query is not an {@code update} or {@code delete}.
*/
static <T> MutationSpecification<T> create(Class<T> mutationTarget, String hql) {
return new MutationSpecificationImpl<>( hql, mutationTarget );
}
/**
* Returns a specification reference which can be used to programmatically,
* iteratively build a {@linkplain MutationQuery} based on the given criteria update,
* allowing the addition of {@linkplain #restrict restrictions}.
*
* @param criteriaUpdate The criteria update query
*
* @param <T> The root entity type for the mutation (the "target").
*
* @see UpdateSpecification#create(CriteriaUpdate)
*/
static <T> MutationSpecification<T> create(CriteriaUpdate<T> criteriaUpdate) {
return new MutationSpecificationImpl<>( criteriaUpdate );
}
/**
* Returns a specification reference which can be used to programmatically,
* iteratively build a {@linkplain MutationQuery} based on the given criteria delete,
* allowing the addition of {@linkplain #restrict restrictions}.
*
* @param criteriaDelete The criteria delete query
*
* @param <T> The root entity type for the mutation (the "target").
*
* @see DeleteSpecification#create(CriteriaDelete)
*/
static <T> MutationSpecification<T> create(CriteriaDelete<T> criteriaDelete) {
return new MutationSpecificationImpl<>( criteriaDelete );
}
}
| Augmentation |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/FunctionAndEnumsTest.java | {
"start": 3829,
"end": 4054
} | class ____ {
@Enumerated(EnumType.STRING)
private Level lastLevel;
public EmbeddableThirdLevel() {
}
public EmbeddableThirdLevel(Level lastLevel) {
this.lastLevel = lastLevel;
}
}
public | EmbeddableThirdLevel |
java | apache__maven | impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/cisupport/TeamcityCIDetector.java | {
"start": 977,
"end": 1545
} | class ____ implements CIDetector {
public static final String NAME = "TeamCity";
private static final String TEAMCITY_VERSION = "TEAMCITY_VERSION";
@Override
public Optional<CIInfo> detectCI() {
String ciEnv = System.getenv(TEAMCITY_VERSION);
if (ciEnv != null && !ciEnv.trim().isEmpty()) {
return Optional.of(new CIInfo() {
@Override
public String name() {
return NAME;
}
});
}
return Optional.empty();
}
}
| TeamcityCIDetector |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/google/MapGenerators.java | {
"start": 8624,
"end": 8768
} | class ____ static utility methods.
*
* @deprecated Do not instantiate this utility class.
*/
@Deprecated
public MapGenerators() {}
}
| of |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/orm/domain/gambit/EntityWithOneToOneSharingPrimaryKey.java | {
"start": 416,
"end": 1315
} | class ____ {
private Integer id;
// alphabetical
private String name;
private SimpleEntity other;
private Integer someInteger;
public EntityWithOneToOneSharingPrimaryKey() {
}
public EntityWithOneToOneSharingPrimaryKey(Integer id, String name, Integer someInteger) {
this.id = id;
this.name = name;
this.someInteger = someInteger;
}
@Id
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@OneToOne
@PrimaryKeyJoinColumn
public SimpleEntity getOther() {
return other;
}
public void setOther(SimpleEntity other) {
this.other = other;
}
public Integer getSomeInteger() {
return someInteger;
}
public void setSomeInteger(Integer someInteger) {
this.someInteger = someInteger;
}
}
| EntityWithOneToOneSharingPrimaryKey |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SpringAiVectorStoreEndpointBuilderFactory.java | {
"start": 15530,
"end": 15912
} | class ____ extends AbstractEndpointBuilder implements SpringAiVectorStoreEndpointBuilder, AdvancedSpringAiVectorStoreEndpointBuilder {
public SpringAiVectorStoreEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new SpringAiVectorStoreEndpointBuilderImpl(path);
}
} | SpringAiVectorStoreEndpointBuilderImpl |
java | apache__camel | components/camel-ibm/camel-ibm-watson-discovery/src/generated/java/org/apache/camel/component/ibm/watson/discovery/WatsonDiscoveryEndpointUriFactory.java | {
"start": 530,
"end": 2444
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":label";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(8);
props.add("apiKey");
props.add("collectionId");
props.add("label");
props.add("lazyStartProducer");
props.add("operation");
props.add("projectId");
props.add("serviceUrl");
props.add("version");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(1);
secretProps.add("apiKey");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "ibm-watson-discovery".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "label", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| WatsonDiscoveryEndpointUriFactory |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/JsonValueSerializationTest.java | {
"start": 2796,
"end": 2930
} | class ____ extends HashMap<String,String>
{
@JsonValue
public int value() { return 42; }
}
static | MapAsNumber |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/filter/RequestContextFilter.java | {
"start": 2106,
"end": 4626
} | class ____ extends OncePerRequestFilter {
private boolean threadContextInheritable = false;
/**
* Set whether to expose the LocaleContext and RequestAttributes as inheritable
* for child threads (using an {@link java.lang.InheritableThreadLocal}).
* <p>Default is "false", to avoid side effects on spawned background threads.
* Switch this to "true" to enable inheritance for custom child threads which
* are spawned during request processing and only used for this request
* (that is, ending after their initial task, without reuse of the thread).
* <p><b>WARNING:</b> Do not use inheritance for child threads if you are
* accessing a thread pool which is configured to potentially add new threads
* on demand (for example, a JDK {@link java.util.concurrent.ThreadPoolExecutor}),
* since this will expose the inherited context to such a pooled thread.
*/
public void setThreadContextInheritable(boolean threadContextInheritable) {
this.threadContextInheritable = threadContextInheritable;
}
/**
* Returns "false" so that the filter may set up the request context in each
* asynchronously dispatched thread.
*/
@Override
protected boolean shouldNotFilterAsyncDispatch() {
return false;
}
/**
* Returns "false" so that the filter may set up the request context in an
* error dispatch.
*/
@Override
protected boolean shouldNotFilterErrorDispatch() {
return false;
}
@Override
protected void doFilterInternal(
HttpServletRequest request, HttpServletResponse response, FilterChain filterChain)
throws ServletException, IOException {
ServletRequestAttributes attributes = new ServletRequestAttributes(request, response);
initContextHolders(request, attributes);
try {
filterChain.doFilter(request, response);
}
finally {
resetContextHolders();
if (logger.isTraceEnabled()) {
logger.trace("Cleared thread-bound request context: " + request);
}
attributes.requestCompleted();
}
}
private void initContextHolders(HttpServletRequest request, ServletRequestAttributes requestAttributes) {
LocaleContextHolder.setLocale(request.getLocale(), this.threadContextInheritable);
RequestContextHolder.setRequestAttributes(requestAttributes, this.threadContextInheritable);
if (logger.isTraceEnabled()) {
logger.trace("Bound request context to thread: " + request);
}
}
private void resetContextHolders() {
LocaleContextHolder.resetLocaleContext();
RequestContextHolder.resetRequestAttributes();
}
}
| RequestContextFilter |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ExtendsAutoValueTest.java | {
"start": 3349,
"end": 3677
} | class ____ extends AutoClass.Builder {
AutoClass build() {
throw new RuntimeException();
}
}
""")
.doTest();
}
@Test
public void implementsAutoValue_builder_bad() {
helper
.addSourceLines(
"TestBuilder.java",
"""
import com.google.auto.value.AutoValue;
@AutoValue
abstract | TestBuilder |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/layout/AbstractJacksonLayout.java | {
"start": 12406,
"end": 12656
} | class ____ LogEvent with AdditionalFields during serialization
return new LogEventWithAdditionalFields(event, additionalFieldsMap);
} else if (event instanceof Message) {
// If the LogEvent implements the Messagee | combines |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/authentication/ForwardAuthenticationFailureHandler.java | {
"start": 1185,
"end": 1893
} | class ____ implements AuthenticationFailureHandler {
private final String forwardUrl;
/**
* @param forwardUrl
*/
public ForwardAuthenticationFailureHandler(String forwardUrl) {
Assert.isTrue(UrlUtils.isValidRedirectUrl(forwardUrl), () -> "'" + forwardUrl + "' is not a valid forward URL");
this.forwardUrl = forwardUrl;
}
@Override
public void onAuthenticationFailure(HttpServletRequest request, HttpServletResponse response,
AuthenticationException exception) throws IOException, ServletException {
request.setAttribute(WebAttributes.AUTHENTICATION_EXCEPTION, exception);
request.getRequestDispatcher(this.forwardUrl).forward(request, response);
}
}
| ForwardAuthenticationFailureHandler |
java | junit-team__junit5 | junit-jupiter-params/src/main/java/org/junit/jupiter/params/provider/EnumArgumentsProvider.java | {
"start": 2084,
"end": 2495
} | enum ____ in " + enumClass.getSimpleName() + ", but 'from' or 'to' is not empty.");
return EnumSet.noneOf(enumClass);
}
E from = enumSource.from().isEmpty() ? constants[0] : Enum.valueOf(enumClass, enumSource.from());
E to = enumSource.to().isEmpty() ? constants[constants.length - 1] : Enum.valueOf(enumClass, enumSource.to());
Preconditions.condition(from.compareTo(to) <= 0,
() -> "Invalid | constant |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/Tasks.java | {
"start": 2857,
"end": 3232
} | class ____ extends AbstractInvokable {
public AgnosticBinaryReceiver(Environment environment) {
super(environment);
}
@Override
public void invoke() throws Exception {
consumeInputs(2, this);
}
}
/** An {@link AbstractInvokable} that consumes 3 input channels. */
public static | AgnosticBinaryReceiver |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/kotlin/DataClassSimpleTest.java | {
"start": 937,
"end": 1879
} | class ____ extends ClassLoader {
Map<String, byte[]> resources = new HashMap<String, byte[]>();
public ExtClassLoader() throws IOException {
super(Thread.currentThread().getContextClassLoader());
{
byte[] bytes;
InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream("kotlin/DataClassSimple.clazz");
bytes = IOUtils.toByteArray(is);
is.close();
resources.put("DataClassSimple.class", bytes);
super.defineClass("DataClassSimple", bytes, 0, bytes.length);
}
}
public InputStream getResourceAsStream(String name) {
byte[] bytes = resources.get(name);
if (bytes != null) {
return new ByteArrayInputStream(bytes);
}
return super.getResourceAsStream(name);
}
}
}
| ExtClassLoader |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/exceptions/CompositeException.java | {
"start": 10954,
"end": 11469
} | class ____ extends RuntimeException {
private static final long serialVersionUID = 3875212506787802066L;
ExceptionOverview(String message) {
super(message);
}
@Override
public synchronized Throwable fillInStackTrace() {
return this;
}
}
/**
* Returns the number of suppressed exceptions.
* @return the number of suppressed exceptions
*/
public int size() {
return exceptions.size();
}
}
| ExceptionOverview |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/util/collections/IdentitySetTest.java | {
"start": 234,
"end": 582
} | class ____ {
IdentitySet set = new IdentitySet();
@Test
public void shouldWork() throws Exception {
// when
Object o = new Object();
set.add(o);
// then
assertTrue(set.contains(o));
assertFalse(set.contains(new Object()));
}
@SuppressWarnings("EqualsHashCode")
| IdentitySetTest |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java | {
"start": 16588,
"end": 17299
} | class ____ implements DataFrameAnalyticsStep {
private final ProgressTracker progressTracker;
StubReindexingStep(ProgressTracker progressTracker) {
this.progressTracker = progressTracker;
}
@Override
public Name name() {
return Name.REINDEXING;
}
@Override
public void execute(ActionListener<StepResponse> listener) {}
@Override
public void cancel(String reason, TimeValue timeout) {}
@Override
public void updateProgress(ActionListener<Void> listener) {
progressTracker.updateReindexingProgress(100);
listener.onResponse(null);
}
}
}
| StubReindexingStep |
java | spring-projects__spring-boot | module/spring-boot-micrometer-observation/src/main/java/org/springframework/boot/micrometer/observation/autoconfigure/ObservationRegistryCustomizer.java | {
"start": 769,
"end": 1005
} | interface ____ can be used to customize auto-configured
* {@link ObservationRegistry observation registries}.
*
* @param <T> the registry type to customize
* @author Moritz Halbritter
* @since 4.0.0
*/
@FunctionalInterface
public | that |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java | {
"start": 2653,
"end": 2918
} | class ____ extends KeyProvider implements
CryptoExtension,
KeyProviderDelegationTokenExtension.DelegationTokenExtension {
public static Logger LOG =
LoggerFactory.getLogger(LoadBalancingKMSClientProvider.class);
static | LoadBalancingKMSClientProvider |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/reader/BulkFormat.java | {
"start": 4962,
"end": 6541
} | interface ____<T, SplitT extends FileSourceSplit>
extends Serializable, ResultTypeQueryable<T> {
/**
* Creates a new reader that reads from the {@link FileSourceSplit#path() split's path} starting
* at the {@link FileSourceSplit#offset() split's offset} and reads {@link
* FileSourceSplit#length() length} bytes after the offset.
*/
BulkFormat.Reader<T> createReader(Configuration config, SplitT split) throws IOException;
/**
* Creates a new reader that reads from {@code split.path()} starting at {@code offset} and
* reads until {@code length} bytes after the offset. A number of {@code recordsToSkip} records
* should be read and discarded after the offset. This is typically part of restoring a reader
* to a checkpointed position.
*/
BulkFormat.Reader<T> restoreReader(Configuration config, SplitT split) throws IOException;
/**
* Checks whether this format is splittable. Splittable formats allow Flink to create multiple
* splits per file, so that Flink can read multiple regions of the file concurrently.
*
* <p>See {@link BulkFormat top-level JavaDocs} (section "Splitting") for details.
*/
boolean isSplittable();
/**
* Gets the type produced by this format. This type will be the type produced by the file source
* as a whole.
*/
@Override
TypeInformation<T> getProducedType();
// ------------------------------------------------------------------------
/** The actual reader that reads the batches of records. */
| BulkFormat |
java | apache__flink | flink-datastream-api/src/main/java/org/apache/flink/datastream/api/extension/eventtime/EventTimeExtension.java | {
"start": 3855,
"end": 13007
} | class ____ {
// =============== Event Time related Watermark Declarations ===============
/**
* Definition of EventTimeWatermark. The EventTimeWatermark represents a specific timestamp,
* signifying the passage of time. Once a process function receives an EventTimeWatermark, it
* will no longer receive events with a timestamp earlier than that watermark.
*/
public static final LongWatermarkDeclaration EVENT_TIME_WATERMARK_DECLARATION =
WatermarkDeclarations.newBuilder("BUILTIN_API_EVENT_TIME")
.typeLong()
.combineFunctionMin()
.combineWaitForAllChannels(true)
.defaultHandlingStrategyForward()
.build();
/**
* Definition of IdleStatusWatermark. The IdleStatusWatermark indicates that a particular input
* is in an idle state. When a ProcessFunction receives an IdleStatusWatermark from an input, it
* should ignore that input when combining EventTimeWatermarks.
*/
public static final BoolWatermarkDeclaration IDLE_STATUS_WATERMARK_DECLARATION =
WatermarkDeclarations.newBuilder("BUILTIN_API_EVENT_TIME_IDLE")
.typeBool()
.combineFunctionAND()
.combineWaitForAllChannels(true)
.defaultHandlingStrategyForward()
.build();
/**
* Determine if the received watermark is an EventTimeWatermark.
*
* @param watermark The watermark to be checked.
* @return true if the watermark is an EventTimeWatermark; false otherwise.
*/
public static boolean isEventTimeWatermark(Watermark watermark) {
return isEventTimeWatermark(watermark.getIdentifier());
}
/**
* Determine if the received watermark is an EventTimeWatermark by watermark identifier.
*
* @param watermarkIdentifier The identifier of the watermark to be checked.
* @return true if the watermark is an EventTimeWatermark; false otherwise.
*/
public static boolean isEventTimeWatermark(String watermarkIdentifier) {
return watermarkIdentifier.equals(EVENT_TIME_WATERMARK_DECLARATION.getIdentifier());
}
/**
* Determine if the received watermark is an IdleStatusWatermark.
*
* @param watermark The watermark to be checked.
* @return true if the watermark is an IdleStatusWatermark; false otherwise.
*/
public static boolean isIdleStatusWatermark(Watermark watermark) {
return isIdleStatusWatermark(watermark.getIdentifier());
}
/**
* Determine if the received watermark is an IdleStatusWatermark by watermark identifier.
*
* @param watermarkIdentifier The identifier of the watermark to be checked.
* @return true if the watermark is an IdleStatusWatermark; false otherwise.
*/
public static boolean isIdleStatusWatermark(String watermarkIdentifier) {
return watermarkIdentifier.equals(IDLE_STATUS_WATERMARK_DECLARATION.getIdentifier());
}
// ======== EventTimeWatermarkGeneratorBuilder to generate event time watermarks =========
/**
* Create an instance of {@link EventTimeWatermarkGeneratorBuilder}, which contains a {@code
* EventTimeExtractor}.
*
* @param eventTimeExtractor An instance of {@code EventTimeExtractor} used to extract event
* time information from data records.
* @param <T> The type of data records.
* @return An instance of {@code EventTimeWatermarkGeneratorBuilder} containing the specified
* event time extractor.
*/
public static <T> EventTimeWatermarkGeneratorBuilder<T> newWatermarkGeneratorBuilder(
EventTimeExtractor<T> eventTimeExtractor) {
return new EventTimeWatermarkGeneratorBuilder<>(eventTimeExtractor);
}
// ======== Wrap user-defined {@link EventTimeProcessFunction} =========
/**
* Wrap the user-defined {@link OneInputEventTimeStreamProcessFunction}, which will provide
* related components such as {@link EventTimeManager} and declare the necessary built-in state
* required for the Timer, etc. Note that registering event timers of {@link
* EventTimeProcessFunction} can only be used with {@link KeyedPartitionStream}.
*
* @param processFunction The user-defined {@link OneInputEventTimeStreamProcessFunction} that
* needs to be wrapped.
* @return The wrapped {@link OneInputStreamProcessFunction}.
*/
public static <IN, OUT> OneInputStreamProcessFunction<IN, OUT> wrapProcessFunction(
OneInputEventTimeStreamProcessFunction<IN, OUT> processFunction) {
try {
return (OneInputStreamProcessFunction<IN, OUT>)
getEventTimeExtensionImplClass()
.getMethod(
"wrapProcessFunction",
OneInputEventTimeStreamProcessFunction.class)
.invoke(null, processFunction);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Wrap the user-defined {@link TwoOutputStreamProcessFunction}, which will provide related
* components such as {@link EventTimeManager} and declare the necessary built-in state required
* for the Timer, etc. Note that registering event timers of {@link EventTimeProcessFunction}
* can only be used with {@link KeyedPartitionStream}.
*
* @param processFunction The user-defined {@link TwoOutputEventTimeStreamProcessFunction} that
* needs to be wrapped.
* @return The wrapped {@link TwoOutputStreamProcessFunction}.
*/
public static <IN, OUT1, OUT2>
TwoOutputStreamProcessFunction<IN, OUT1, OUT2> wrapProcessFunction(
TwoOutputEventTimeStreamProcessFunction<IN, OUT1, OUT2> processFunction) {
try {
return (TwoOutputStreamProcessFunction<IN, OUT1, OUT2>)
getEventTimeExtensionImplClass()
.getMethod(
"wrapProcessFunction",
TwoOutputEventTimeStreamProcessFunction.class)
.invoke(null, processFunction);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Wrap the user-defined {@link TwoInputNonBroadcastEventTimeStreamProcessFunction}, which will
* provide related components such as {@link EventTimeManager} and declare the necessary
* built-in state required for the Timer, etc. Note that registering event timers of {@link
* EventTimeProcessFunction} can only be used with {@link KeyedPartitionStream}.
*
* @param processFunction The user-defined {@link
* TwoInputNonBroadcastEventTimeStreamProcessFunction} that needs to be wrapped.
* @return The wrapped {@link TwoInputNonBroadcastStreamProcessFunction}.
*/
public static <IN1, IN2, OUT>
TwoInputNonBroadcastStreamProcessFunction<IN1, IN2, OUT> wrapProcessFunction(
TwoInputNonBroadcastEventTimeStreamProcessFunction<IN1, IN2, OUT>
processFunction) {
try {
return (TwoInputNonBroadcastStreamProcessFunction<IN1, IN2, OUT>)
getEventTimeExtensionImplClass()
.getMethod(
"wrapProcessFunction",
TwoInputNonBroadcastEventTimeStreamProcessFunction.class)
.invoke(null, processFunction);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Wrap the user-defined {@link TwoInputBroadcastEventTimeStreamProcessFunction}, which will
* provide related components such as {@link EventTimeManager} and declare the necessary
* built-in state required for the Timer, etc. Note that registering event timers of {@link
* EventTimeProcessFunction} can only be used with {@link KeyedPartitionStream}.
*
* @param processFunction The user-defined {@link
* TwoInputBroadcastEventTimeStreamProcessFunction} that needs to be wrapped.
* @return The wrapped {@link TwoInputBroadcastStreamProcessFunction}.
*/
public static <IN1, IN2, OUT>
TwoInputBroadcastStreamProcessFunction<IN1, IN2, OUT> wrapProcessFunction(
TwoInputBroadcastEventTimeStreamProcessFunction<IN1, IN2, OUT>
processFunction) {
try {
return (TwoInputBroadcastStreamProcessFunction<IN1, IN2, OUT>)
getEventTimeExtensionImplClass()
.getMethod(
"wrapProcessFunction",
TwoInputBroadcastEventTimeStreamProcessFunction.class)
.invoke(null, processFunction);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/** Get the implementation | EventTimeExtension |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/util/EnumValuesTest.java | {
"start": 660,
"end": 3776
} | enum ____ {
A("A"),
B("b"),
C("C");
private final String desc;
private ABC(String d) { desc = d; }
@Override
public String toString() { return desc; }
}
private final ObjectMapper MAPPER = newJsonMapper();
@Test
public void testConstructFromName() {
SerializationConfig cfg = MAPPER.serializationConfig()
.without(EnumFeature.WRITE_ENUMS_USING_TO_STRING);
AnnotatedClass enumClass = resolve(MAPPER, ABC.class);
EnumValues values = EnumValues.constructFromName(cfg, enumClass);
assertEquals("A", values.serializedValueFor(ABC.A).toString());
assertEquals("B", values.serializedValueFor(ABC.B).toString());
assertEquals("C", values.serializedValueFor(ABC.C).toString());
assertEquals(3, values.values().size());
assertEquals(3, values.internalMap().size());
}
@Test
public void testConstructWithToString() {
SerializationConfig cfg = MAPPER.serializationConfig()
.with(EnumFeature.WRITE_ENUMS_USING_TO_STRING);
AnnotatedClass enumClass = resolve(MAPPER, ABC.class);
EnumValues values = EnumValues.constructFromToString(cfg, enumClass);
assertEquals("A", values.serializedValueFor(ABC.A).toString());
assertEquals("b", values.serializedValueFor(ABC.B).toString());
assertEquals("C", values.serializedValueFor(ABC.C).toString());
assertEquals(3, values.values().size());
assertEquals(3, values.internalMap().size());
}
@Test
public void testEnumResolverNew()
{
AnnotatedClass annotatedClass = resolve(MAPPER, ABC.class);
EnumResolver enumRes = EnumResolver.constructUsingToString(MAPPER.deserializationConfig(),
annotatedClass);
assertEquals(ABC.B, enumRes.getEnum(1));
assertNull(enumRes.getEnum(-1));
assertNull(enumRes.getEnum(3));
assertEquals(2, enumRes.lastValidIndex());
List<Enum<?>> enums = enumRes.getEnums();
assertEquals(3, enums.size());
assertEquals(ABC.A, enums.get(0));
assertEquals(ABC.B, enums.get(1));
assertEquals(ABC.C, enums.get(2));
}
// [databind#3053]
@Test
public void testConstructFromNameLowerCased() {
SerializationConfig cfg = MAPPER.serializationConfig()
.with(EnumFeature.WRITE_ENUMS_TO_LOWERCASE);
AnnotatedClass enumClass = resolve(MAPPER, ABC.class);
EnumValues values = EnumValues.constructFromName(cfg, enumClass);
assertEquals("a", values.serializedValueFor(ABC.A).toString());
assertEquals("b", values.serializedValueFor(ABC.B).toString());
assertEquals("c", values.serializedValueFor(ABC.C).toString());
assertEquals(3, values.values().size());
assertEquals(3, values.internalMap().size());
}
private AnnotatedClass resolve(ObjectMapper mapper, Class<?> enumClass) {
return AnnotatedClassResolver.resolve(mapper.serializationConfig(),
mapper.constructType(enumClass), null);
}
}
| ABC |
java | apache__camel | core/camel-util/src/main/java/org/apache/camel/util/ObjectHelper.java | {
"start": 21755,
"end": 23068
} | class ____
if (loader != null) {
url = loader.getResource(resolvedName);
if (url != null) {
return url;
}
}
// #2 Next, is the TCCL
ClassLoader tccl = Thread.currentThread().getContextClassLoader();
if (tccl != null) {
url = tccl.getResource(resolvedName);
if (url != null) {
return url;
}
// #3 The TCCL may be able to see camel-core, but not META-INF resources
try {
Class<?> clazz = tccl.loadClass("org.apache.camel.impl.DefaultCamelContext");
url = clazz.getClassLoader().getResource(resolvedName);
if (url != null) {
return url;
}
} catch (ClassNotFoundException e) {
// ignore
}
}
// #4 Last, for the unlikely case that stuff can be loaded from camel-util
url = ObjectHelper.class.getClassLoader().getResource(resolvedName);
if (url != null) {
return url;
}
url = ObjectHelper.class.getResource(resolvedName);
return url;
}
/**
* Attempts to load the given resources from the given package name using the thread context | loader |
java | google__dagger | hilt-android/main/java/dagger/hilt/android/internal/managers/ViewComponentManager.java | {
"start": 6386,
"end": 6483
} | class ____ expose the {@link Fragment} to the views they're inflating.
*/
public static final | to |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/jackson/SecurityContextMixinTests.java | {
"start": 1347,
"end": 2900
} | class ____ extends AbstractMixinTests {
// @formatter:off
public static final String SECURITY_CONTEXT_JSON = "{"
+ "\"@class\": \"org.springframework.security.core.context.SecurityContextImpl\", "
+ "\"authentication\": " + UsernamePasswordAuthenticationTokenMixinTests.AUTHENTICATED_STRINGPRINCIPAL_JSON
+ "}";
// @formatter:on
@Test
public void securityContextSerializeTest() throws JsonProcessingException, JSONException {
SecurityContext context = new SecurityContextImpl();
context.setAuthentication(UsernamePasswordAuthenticationToken.authenticated("admin", "1234",
Collections.singleton(new SimpleGrantedAuthority("ROLE_USER"))));
String actualJson = this.mapper.writeValueAsString(context);
JSONAssert.assertEquals(SECURITY_CONTEXT_JSON, actualJson, true);
}
@Test
public void securityContextDeserializeTest() throws IOException {
SecurityContext context = this.mapper.readValue(SECURITY_CONTEXT_JSON, SecurityContextImpl.class);
assertThat(context).isNotNull();
assertThat(context.getAuthentication()).isNotNull().isInstanceOf(UsernamePasswordAuthenticationToken.class);
assertThat(context.getAuthentication().getPrincipal()).isEqualTo("admin");
assertThat(context.getAuthentication().getCredentials()).isEqualTo("1234");
assertThat(context.getAuthentication().isAuthenticated()).isTrue();
Collection authorities = context.getAuthentication().getAuthorities();
assertThat(authorities).hasSize(1);
assertThat(authorities).contains(new SimpleGrantedAuthority("ROLE_USER"));
}
}
| SecurityContextMixinTests |
java | netty__netty | codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRecordDecoder.java | {
"start": 2563,
"end": 5087
} | class ____ the record
* @param timeToLive the TTL of the record
* @param in the {@link ByteBuf} that contains the RDATA
* @param offset the start offset of the RDATA in {@code in}
* @param length the length of the RDATA
*
* @return a {@link DnsRawRecord}. Override this method to decode RDATA and return other record implementation.
*/
protected DnsRecord decodeRecord(
String name, DnsRecordType type, int dnsClass, long timeToLive,
ByteBuf in, int offset, int length) throws Exception {
// DNS message compression means that domain names may contain "pointers" to other positions in the packet
// to build a full message. This means the indexes are meaningful and we need the ability to reference the
// indexes un-obstructed, and thus we cannot use a slice here.
// See https://www.ietf.org/rfc/rfc1035 [4.1.4. Message compression]
if (type == DnsRecordType.PTR) {
return new DefaultDnsPtrRecord(
name, dnsClass, timeToLive, decodeName0(in.duplicate().setIndex(offset, offset + length)));
}
if (type == DnsRecordType.CNAME || type == DnsRecordType.NS) {
return new DefaultDnsRawRecord(name, type, dnsClass, timeToLive,
DnsCodecUtil.decompressDomainName(
in.duplicate().setIndex(offset, offset + length)));
}
return new DefaultDnsRawRecord(
name, type, dnsClass, timeToLive, in.retainedDuplicate().setIndex(offset, offset + length));
}
/**
* Retrieves a domain name given a buffer containing a DNS packet. If the
* name contains a pointer, the position of the buffer will be set to
* directly after the pointer's index after the name has been read.
*
* @param in the byte buffer containing the DNS packet
* @return the domain name for an entry
*/
protected String decodeName0(ByteBuf in) {
return decodeName(in);
}
/**
* Retrieves a domain name given a buffer containing a DNS packet. If the
* name contains a pointer, the position of the buffer will be set to
* directly after the pointer's index after the name has been read.
*
* @param in the byte buffer containing the DNS packet
* @return the domain name for an entry
*/
public static String decodeName(ByteBuf in) {
return DnsCodecUtil.decodeDomainName(in);
}
}
| of |
java | quarkusio__quarkus | extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/service/bytecodeprovider/RuntimeBytecodeProvider.java | {
"start": 518,
"end": 1444
} | class ____ implements BytecodeProvider {
private final QuarkusRuntimeProxyFactoryFactory preGeneratedProxyFactory;
public RuntimeBytecodeProvider(QuarkusRuntimeProxyFactoryFactory preGeneratedProxyFactory) {
this.preGeneratedProxyFactory = preGeneratedProxyFactory;
}
@Override
public ProxyFactoryFactory getProxyFactoryFactory() {
return preGeneratedProxyFactory;
}
@Override
public ReflectionOptimizer getReflectionOptimizer(
Class clazz,
String[] getterNames,
String[] setterNames,
Class[] types) {
return null;
}
@Override
public ReflectionOptimizer getReflectionOptimizer(Class<?> clazz, Map<String, PropertyAccess> propertyAccessMap) {
return null;
}
@Override
public Enhancer getEnhancer(EnhancementContext enhancementContext) {
return null;
}
}
| RuntimeBytecodeProvider |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/PQCEndpointBuilderFactory.java | {
"start": 1455,
"end": 1583
} | interface ____ {
/**
* Builder for endpoint for the PQC Algorithms component.
*/
public | PQCEndpointBuilderFactory |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/IOManager.java | {
"start": 1641,
"end": 10413
} | class ____ implements AutoCloseable {
protected static final Logger LOG = LoggerFactory.getLogger(IOManager.class);
private static final String DIR_NAME_PREFIX = "io";
private final FileChannelManager fileChannelManager;
protected final ExecutorService executorService;
// -------------------------------------------------------------------------
// Constructors / Destructors
// -------------------------------------------------------------------------
/**
* Constructs a new IOManager.
*
* @param tempDirs The basic directories for files underlying anonymous channels.
*/
protected IOManager(String[] tempDirs, ExecutorService executorService) {
this.fileChannelManager =
new FileChannelManagerImpl(Preconditions.checkNotNull(tempDirs), DIR_NAME_PREFIX);
if (LOG.isInfoEnabled()) {
LOG.info(
"Created a new {} for spilling of task related data to disk (joins, sorting, ...). Used directories:\n\t{}",
FileChannelManager.class.getSimpleName(),
Arrays.stream(fileChannelManager.getPaths())
.map(File::getAbsolutePath)
.collect(Collectors.joining("\n\t")));
}
this.executorService = executorService;
}
/** Removes all temporary files. */
@Override
public void close() throws Exception {
fileChannelManager.close();
}
// ------------------------------------------------------------------------
// Channel Instantiations
// ------------------------------------------------------------------------
/**
* Creates a new {@link ID} in one of the temp directories. Multiple invocations of this method
* spread the channels evenly across the different directories.
*
* @return A channel to a temporary directory.
*/
public ID createChannel() {
return fileChannelManager.createChannel();
}
/**
* Creates a new {@link Enumerator}, spreading the channels in a round-robin fashion across the
* temporary file directories.
*
* @return An enumerator for channels.
*/
public Enumerator createChannelEnumerator() {
return fileChannelManager.createChannelEnumerator();
}
/**
* Deletes the file underlying the given channel. If the channel is still open, this call may
* fail.
*
* @param channel The channel to be deleted.
*/
public static void deleteChannel(ID channel) {
if (channel != null) {
if (channel.getPathFile().exists() && !channel.getPathFile().delete()) {
LOG.warn("IOManager failed to delete temporary file {}", channel.getPath());
}
}
}
/**
* Gets the directories that the I/O manager spills to.
*
* @return The directories that the I/O manager spills to.
*/
public File[] getSpillingDirectories() {
return fileChannelManager.getPaths();
}
/**
* Gets the directories that the I/O manager spills to, as path strings.
*
* @return The directories that the I/O manager spills to, as path strings.
*/
public String[] getSpillingDirectoriesPaths() {
File[] paths = fileChannelManager.getPaths();
String[] strings = new String[paths.length];
for (int i = 0; i < strings.length; i++) {
strings[i] = paths[i].getAbsolutePath();
}
return strings;
}
// ------------------------------------------------------------------------
// Reader / Writer instantiations
// ------------------------------------------------------------------------
/**
* Creates a block channel writer that writes to the given channel. The writer adds the written
* segment to its return-queue afterwards (to allow for asynchronous implementations).
*
* @param channelID The descriptor for the channel to write to.
* @return A block channel writer that writes to the given channel.
* @throws IOException Thrown, if the channel for the writer could not be opened.
*/
public BlockChannelWriter<MemorySegment> createBlockChannelWriter(ID channelID)
throws IOException {
return createBlockChannelWriter(channelID, new LinkedBlockingQueue<>());
}
/**
* Creates a block channel writer that writes to the given channel. The writer adds the written
* segment to the given queue (to allow for asynchronous implementations).
*
* @param channelID The descriptor for the channel to write to.
* @param returnQueue The queue to put the written buffers into.
* @return A block channel writer that writes to the given channel.
* @throws IOException Thrown, if the channel for the writer could not be opened.
*/
public abstract BlockChannelWriter<MemorySegment> createBlockChannelWriter(
ID channelID, LinkedBlockingQueue<MemorySegment> returnQueue) throws IOException;
/**
* Creates a block channel writer that writes to the given channel. The writer calls the given
* callback after the I/O operation has been performed (successfully or unsuccessfully), to
* allow for asynchronous implementations.
*
* @param channelID The descriptor for the channel to write to.
* @param callback The callback to be called for
* @return A block channel writer that writes to the given channel.
* @throws IOException Thrown, if the channel for the writer could not be opened.
*/
public abstract BlockChannelWriterWithCallback<MemorySegment> createBlockChannelWriter(
ID channelID, RequestDoneCallback<MemorySegment> callback) throws IOException;
/**
* Creates a block channel reader that reads blocks from the given channel. The reader pushed
* full memory segments (with the read data) to its "return queue", to allow for asynchronous
* read implementations.
*
* @param channelID The descriptor for the channel to write to.
* @return A block channel reader that reads from the given channel.
* @throws IOException Thrown, if the channel for the reader could not be opened.
*/
public BlockChannelReader<MemorySegment> createBlockChannelReader(ID channelID)
throws IOException {
return createBlockChannelReader(channelID, new LinkedBlockingQueue<>());
}
/**
* Creates a block channel reader that reads blocks from the given channel. The reader pushes
* the full segments to the given queue, to allow for asynchronous implementations.
*
* @param channelID The descriptor for the channel to write to.
* @param returnQueue The queue to put the full buffers into.
* @return A block channel reader that reads from the given channel.
* @throws IOException Thrown, if the channel for the reader could not be opened.
*/
public abstract BlockChannelReader<MemorySegment> createBlockChannelReader(
ID channelID, LinkedBlockingQueue<MemorySegment> returnQueue) throws IOException;
public abstract BufferFileWriter createBufferFileWriter(ID channelID) throws IOException;
public abstract BufferFileReader createBufferFileReader(
ID channelID, RequestDoneCallback<Buffer> callback) throws IOException;
public abstract BufferFileSegmentReader createBufferFileSegmentReader(
ID channelID, RequestDoneCallback<FileSegment> callback) throws IOException;
/**
* Creates a block channel reader that reads all blocks from the given channel directly in one
* bulk. The reader draws segments to read the blocks into from a supplied list, which must
* contain as many segments as the channel has blocks. After the reader is done, the list with
* the full segments can be obtained from the reader.
*
* <p>If a channel is not to be read in one bulk, but in multiple smaller batches, a {@link
* BlockChannelReader} should be used.
*
* @param channelID The descriptor for the channel to write to.
* @param targetSegments The list to take the segments from into which to read the data.
* @param numBlocks The number of blocks in the channel to read.
* @return A block channel reader that reads from the given channel.
* @throws IOException Thrown, if the channel for the reader could not be opened.
*/
public abstract BulkBlockChannelReader createBulkBlockChannelReader(
ID channelID, List<MemorySegment> targetSegments, int numBlocks) throws IOException;
public ExecutorService getExecutorService() {
return executorService;
}
}
| IOManager |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/NvlCoalesceEmulation.java | {
"start": 1030,
"end": 2144
} | class ____
extends AbstractSqmFunctionDescriptor {
public NvlCoalesceEmulation() {
super(
"coalesce",
StandardArgumentsValidators.min( 2 ),
StandardFunctionReturnTypeResolvers.useFirstNonNull(),
StandardFunctionArgumentTypeResolvers.IMPLIED_RESULT_TYPE
);
}
@Override
protected <T> SelfRenderingSqmFunction<T> generateSqmFunctionExpression(
List<? extends SqmTypedNode<?>> arguments,
ReturnableType<T> impliedResultType,
QueryEngine queryEngine) {
SqmFunctionDescriptor nvl =
queryEngine.getSqmFunctionRegistry()
.namedDescriptorBuilder("nvl")
.setExactArgumentCount(2)
.descriptor();
int pos = arguments.size();
SqmExpression<?> result = (SqmExpression<?>) arguments.get( --pos );
ReturnableType<?> type =
(ReturnableType<?>) result.getNodeType();
while (pos>0) {
SqmExpression<?> next = (SqmExpression<?>) arguments.get( --pos );
result = nvl.generateSqmExpression(
asList( next, result ),
type,
queryEngine
);
}
//noinspection unchecked
return (SelfRenderingSqmFunction<T>) result;
}
}
| NvlCoalesceEmulation |
java | jhy__jsoup | src/main/java/org/jsoup/Connection.java | {
"start": 2791,
"end": 2869
} | interface ____ {
/**
* GET and POST http methods.
*/
| Connection |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-api/src/main/java/org/apache/dubbo/rpc/AttachmentsAdapter.java | {
"start": 1091,
"end": 2415
} | class ____ extends HashMap<String, String> {
private final Map<String, Object> attachments;
public ObjectToStringMap(Map<String, Object> attachments) {
for (Entry<String, Object> entry : attachments.entrySet()) {
String convertResult = convert(entry.getValue());
if (convertResult != null) {
super.put(entry.getKey(), convertResult);
}
}
this.attachments = attachments;
}
@Override
public String put(String key, String value) {
attachments.put(key, value);
return super.put(key, value);
}
@Override
public String remove(Object key) {
attachments.remove(key);
return super.remove(key);
}
private String convert(Object obj) {
if (obj instanceof String) {
return (String) obj;
}
return null; // or JSON.toString(obj);
}
@Override
public void clear() {
attachments.clear();
super.clear();
}
@Override
public void putAll(Map<? extends String, ? extends String> map) {
attachments.putAll(map);
super.putAll(map);
}
}
}
| ObjectToStringMap |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/ZStoreArgs.java | {
"start": 1470,
"end": 5088
} | class ____ {
/**
* Utility constructor.
*/
private Builder() {
}
/**
* Creates new {@link ZStoreArgs} setting {@literal WEIGHTS} using long.
*
* @return new {@link ZAddArgs} with {@literal WEIGHTS} set.
* @see ZStoreArgs#weights(long[])
* @deprecated use {@link #weights(double...)}.
*/
@Deprecated
public static ZStoreArgs weights(long[] weights) {
return new ZStoreArgs().weights(toDoubleArray(weights));
}
/**
* Creates new {@link ZStoreArgs} setting {@literal WEIGHTS}.
*
* @return new {@link ZAddArgs} with {@literal WEIGHTS} set.
* @see ZStoreArgs#weights(double...)
*/
public static ZStoreArgs weights(double... weights) {
return new ZStoreArgs().weights(weights);
}
/**
* Creates new {@link ZStoreArgs} setting {@literal AGGREGATE SUM}.
*
* @return new {@link ZAddArgs} with {@literal AGGREGATE SUM} set.
* @see ZStoreArgs#sum()
*/
public static ZStoreArgs sum() {
return new ZStoreArgs().sum();
}
/**
* Creates new {@link ZStoreArgs} setting {@literal AGGREGATE MIN}.
*
* @return new {@link ZAddArgs} with {@literal AGGREGATE MIN} set.
* @see ZStoreArgs#sum()
*/
public static ZStoreArgs min() {
return new ZStoreArgs().min();
}
/**
* Creates new {@link ZStoreArgs} setting {@literal AGGREGATE MAX}.
*
* @return new {@link ZAddArgs} with {@literal AGGREGATE MAX} set.
* @see ZStoreArgs#sum()
*/
public static ZStoreArgs max() {
return new ZStoreArgs().max();
}
}
/**
* Specify a multiplication factor for each input sorted set.
*
* @param weights must not be {@code null}.
* @return {@code this} {@link ZStoreArgs}.
* @deprecated use {@link #weights(double...)}
*/
@Deprecated
public static ZStoreArgs weights(long[] weights) {
LettuceAssert.notNull(weights, "Weights must not be null");
return new ZStoreArgs().weights(toDoubleArray(weights));
}
/**
* Specify a multiplication factor for each input sorted set.
*
* @param weights must not be {@code null}.
* @return {@code this} {@link ZStoreArgs}.
*/
@Override
public ZStoreArgs weights(double... weights) {
super.weights(weights);
return this;
}
/**
* Aggregate scores of elements existing across multiple sets by summing up.
*
* @return {@code this} {@link ZStoreArgs}.
*/
@Override
public ZStoreArgs sum() {
super.sum();
return this;
}
/**
* Aggregate scores of elements existing across multiple sets by using the lowest score.
*
* @return {@code this} {@link ZStoreArgs}.
*/
@Override
public ZStoreArgs min() {
super.min();
return this;
}
/**
* Aggregate scores of elements existing across multiple sets by using the highest score.
*
* @return {@code this} {@link ZStoreArgs}.
*/
@Override
public ZStoreArgs max() {
super.max();
return this;
}
private static double[] toDoubleArray(long[] weights) {
double[] result = new double[weights.length];
for (int i = 0; i < weights.length; i++) {
result[i] = weights[i];
}
return result;
}
}
| Builder |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_1164/GenericHolder.java | {
"start": 225,
"end": 400
} | class ____<T> {
private T value;
public T getValue() {
return value;
}
public void setValue(T value) {
this.value = value;
}
}
| GenericHolder |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/concurrent/ConstantInitializer.java | {
"start": 1975,
"end": 4628
} | class ____ not place any restrictions
* on the object. It may be <strong>null</strong>, then {@code get()} will return
* <strong>null</strong>, too.
*
* @param obj the object to be managed by this initializer
*/
public ConstantInitializer(final T obj) {
object = obj;
}
/**
* Compares this object with another one. This implementation returns
* <strong>true</strong> if and only if the passed in object is an instance of
* {@link ConstantInitializer} which refers to an object equals to the
* object managed by this instance.
*
* @param obj the object to compare to
* @return a flag whether the objects are equal
*/
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof ConstantInitializer<?>)) {
return false;
}
final ConstantInitializer<?> c = (ConstantInitializer<?>) obj;
return Objects.equals(getObject(), c.getObject());
}
/**
* Gets the object managed by this initializer. This implementation just
* returns the object passed to the constructor.
*
* @return the object managed by this initializer
* @throws ConcurrentException if an error occurs
*/
@Override
public T get() throws ConcurrentException {
return getObject();
}
/**
* Directly returns the object that was passed to the constructor. This is
* the same object as returned by {@code get()}. However, this method does
* not declare that it throws an exception.
*
* @return the object managed by this initializer
*/
public final T getObject() {
return object;
}
/**
* Returns a hash code for this object. This implementation returns the hash
* code of the managed object.
*
* @return a hash code for this object
*/
@Override
public int hashCode() {
return Objects.hashCode(object);
}
/**
* As a {@link ConstantInitializer} is initialized on construction this will
* always return true.
*
* @return true.
* @since 3.14.0
*/
public boolean isInitialized() {
return true;
}
/**
* Returns a string representation for this object. This string also
* contains a string representation of the object managed by this
* initializer.
*
* @return a string for this object
*/
@Override
public String toString() {
return String.format(FMT_TO_STRING, Integer.valueOf(System.identityHashCode(this)), getObject());
}
}
| does |
java | quarkusio__quarkus | integration-tests/gradle/src/main/resources/dev-deps-leak-into-prod-48992/src/test/java/org/acme/GreetingResourceTest.java | {
"start": 203,
"end": 428
} | class ____ {
@Test
void testHelloEndpoint() {
given()
.when().get("/hello")
.then()
.statusCode(200)
.body(is("Hello from Quarkus REST"));
}
} | GreetingResourceTest |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-websocket-jetty/src/main/java/smoketest/websocket/jetty/snake/SnakeTimer.java | {
"start": 1124,
"end": 3131
} | class ____ {
private static final long TICK_DELAY = 100;
private static final Object MONITOR = new Object();
private static final Log logger = LogFactory.getLog(SnakeTimer.class);
private static final ConcurrentHashMap<Integer, Snake> snakes = new ConcurrentHashMap<>();
private static @Nullable Timer gameTimer;
private SnakeTimer() {
}
public static void addSnake(Snake snake) {
synchronized (MONITOR) {
if (snakes.isEmpty()) {
startTimer();
}
snakes.put(snake.getId(), snake);
}
}
public static Collection<Snake> getSnakes() {
return Collections.unmodifiableCollection(snakes.values());
}
public static void removeSnake(Snake snake) {
synchronized (MONITOR) {
snakes.remove(snake.getId());
if (snakes.isEmpty()) {
stopTimer();
}
}
}
public static void tick() throws Exception {
StringBuilder sb = new StringBuilder();
for (Iterator<Snake> iterator = SnakeTimer.getSnakes().iterator(); iterator.hasNext();) {
Snake snake = iterator.next();
snake.update(SnakeTimer.getSnakes());
sb.append(snake.getLocationsJson());
if (iterator.hasNext()) {
sb.append(',');
}
}
broadcast(String.format("{'type': 'update', 'data' : [%s]}", sb));
}
public static void broadcast(String message) {
Collection<Snake> snakes = new CopyOnWriteArrayList<>(SnakeTimer.getSnakes());
for (Snake snake : snakes) {
try {
snake.sendMessage(message);
}
catch (Throwable ex) {
// if Snake#sendMessage fails the client is removed
removeSnake(snake);
}
}
}
public static void startTimer() {
gameTimer = new Timer(SnakeTimer.class.getSimpleName() + " Timer");
gameTimer.scheduleAtFixedRate(new TimerTask() {
@Override
public void run() {
try {
tick();
}
catch (Throwable ex) {
logger.error("Caught to prevent timer from shutting down", ex);
}
}
}, TICK_DELAY, TICK_DELAY);
}
public static void stopTimer() {
if (gameTimer != null) {
gameTimer.cancel();
}
}
}
| SnakeTimer |
java | spring-projects__spring-boot | build-plugin/spring-boot-maven-plugin/src/intTest/projects/aot-arguments/src/main/java/org/test/SampleApplication.java | {
"start": 900,
"end": 1031
} | class ____ {
public static void main(String[] args) {
SpringApplication.run(SampleApplication.class, args);
}
}
| SampleApplication |
java | apache__camel | components/camel-braintree/src/generated/java/org/apache/camel/component/braintree/internal/PaymentMethodGatewayApiMethod.java | {
"start": 653,
"end": 2641
} | enum ____ implements ApiMethod {
CREATE(
com.braintreegateway.Result.class,
"create",
arg("request", com.braintreegateway.PaymentMethodRequest.class)),
DELETE(
com.braintreegateway.Result.class,
"delete",
arg("token", String.class)),
DELETE_1(
com.braintreegateway.Result.class,
"delete",
arg("token", String.class),
arg("deleteRequest", com.braintreegateway.PaymentMethodDeleteRequest.class)),
FIND(
com.braintreegateway.PaymentMethod.class,
"find",
arg("token", String.class)),
GRANT(
com.braintreegateway.Result.class,
"grant",
arg("token", String.class)),
GRANT_1(
com.braintreegateway.Result.class,
"grant",
arg("token", String.class),
arg("grantRequest", com.braintreegateway.PaymentMethodGrantRequest.class)),
REVOKE(
com.braintreegateway.Result.class,
"revoke",
arg("token", String.class)),
UPDATE(
com.braintreegateway.Result.class,
"update",
arg("token", String.class),
arg("request", com.braintreegateway.PaymentMethodRequest.class));
private final ApiMethod apiMethod;
PaymentMethodGatewayApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(PaymentMethodGateway.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
| PaymentMethodGatewayApiMethod |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java | {
"start": 18340,
"end": 19266
} | enum ____ {
DELETE, // Delete the batch without inspecting records
RETAIN_EMPTY, // Retain the batch even if it is empty
DELETE_EMPTY // Delete the batch if it is empty
}
/**
* Check whether the full batch can be discarded (i.e. whether we even need to
* check the records individually).
*/
protected abstract BatchRetentionResult checkBatchRetention(RecordBatch batch);
/**
* Check whether a record should be retained in the log. Note that {@link #checkBatchRetention(RecordBatch)}
* is used prior to checking individual record retention. Only records from batches which were not
* explicitly discarded with {@link BatchRetention#DELETE} will be considered.
*/
protected abstract boolean shouldRetainRecord(RecordBatch recordBatch, Record record);
}
public static | BatchRetention |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest70.java | {
"start": 1022,
"end": 5500
} | class ____ extends MysqlTest {
@Test
public void test_one() throws Exception {
String sql = "CREATE TABLE `app_customer_license` ("
+ " `id` bigint(20) NOT NULL AUTO_INCREMENT ,"
+ " `created_by` varchar(50) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL ,"
+ " `created_date` datetime NOT NULL ,"
+ " `last_modified_by` varchar(50) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL ,"
+ " `last_modified_date` datetime NULL DEFAULT NULL ,"
+ " `version` bigint(20) NOT NULL ,"
+ " `device_id` varchar(20) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL ,"
+ " `customer_info` bigint(20) NULL DEFAULT NULL ,"
+ " PRIMARY KEY (`id`),"
+ " FOREIGN KEY (`customer_info`) REFERENCES `app_customer_info` (`id`) ON DELETE RESTRICT ON UPDATE RESTRICT,"
+ " INDEX `fk_app_customer_info_id` (`customer_info`) USING BTREE,"
+ " UNIQUE `idx_app_customer_license_deviceId` (`device_id`) USING BTREE"
+ ")"
+ "ENGINE=InnoDB "
+ "DEFAULT CHARACTER SET=utf8 COLLATE=utf8_general_ci "
+ "AUTO_INCREMENT=1 "
+ "ROW_FORMAT=DYNAMIC "
+ ";;";
MySqlStatementParser parser = new MySqlStatementParser(sql);
SQLStatement stmt = parser.parseCreateTable();
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
Column column = visitor.getColumn("app_customer_license", "version");
assertNotNull(column);
assertEquals("bigint", column.getDataType());
{
String output = SQLUtils.toMySqlString(stmt);
assertEquals("CREATE TABLE `app_customer_license` ("
+ "\n\t`id` bigint(20) NOT NULL AUTO_INCREMENT,"
+ "\n\t`created_by` varchar(50) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,"
+ "\n\t`created_date` datetime NOT NULL,"
+ "\n\t`last_modified_by` varchar(50) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,"
+ "\n\t`last_modified_date` datetime NULL DEFAULT NULL,"
+ "\n\t`version` bigint(20) NOT NULL,"
+ "\n\t`device_id` varchar(20) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,"
+ "\n\t`customer_info` bigint(20) NULL DEFAULT NULL,"
+ "\n\tPRIMARY KEY (`id`),"
+ "\n\tFOREIGN KEY (`customer_info`) REFERENCES `app_customer_info` (`id`) ON DELETE RESTRICT ON UPDATE RESTRICT,"
+ "\n\tINDEX `fk_app_customer_info_id` USING BTREE(`customer_info`),"
+ "\n\tUNIQUE `idx_app_customer_license_deviceId` USING BTREE (`device_id`)"
+ "\n) ENGINE = InnoDB CHARACTER SET = utf8 COLLATE = utf8_general_ci AUTO_INCREMENT = 1 ROW_FORMAT = DYNAMIC", output);
}
{
String output = SQLUtils.toMySqlString(stmt, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION);
assertEquals("create table `app_customer_license` ("
+ "\n\t`id` bigint(20) not null auto_increment,"
+ "\n\t`created_by` varchar(50) character set utf8 collate utf8_general_ci not null,"
+ "\n\t`created_date` datetime not null,"
+ "\n\t`last_modified_by` varchar(50) character set utf8 collate utf8_general_ci null default null,"
+ "\n\t`last_modified_date` datetime null default null,"
+ "\n\t`version` bigint(20) not null,"
+ "\n\t`device_id` varchar(20) character set utf8 collate utf8_general_ci not null,"
+ "\n\t`customer_info` bigint(20) null default null,"
+ "\n\tprimary key (`id`),"
+ "\n\tforeign key (`customer_info`) references `app_customer_info` (`id`) on delete restrict on update restrict,"
+ "\n\tindex `fk_app_customer_info_id` using BTREE(`customer_info`),"
+ "\n\tunique `idx_app_customer_license_deviceId` using BTREE (`device_id`)"
+ "\n) engine = InnoDB character set = utf8 collate = utf8_general_ci auto_increment = 1 row_format = DYNAMIC", output);
}
}
}
| MySqlCreateTableTest70 |
java | processing__processing4 | app/src/processing/app/ui/Editor.java | {
"start": 1924,
"end": 14047
} | class ____ extends JFrame implements RunnerListener {
protected Base base;
protected EditorState state;
protected Mode mode;
// There may be certain gutter sizes that cause text bounds
// inside the console to be calculated incorrectly.
// 45 seems to work but change with caution. [sampottinger 191107]
static public final int LEFT_GUTTER = Toolkit.zoom(45);
static public final int RIGHT_GUTTER = Toolkit.zoom(12);
static public final int GUTTER_MARGIN = Toolkit.zoom(5);
protected MarkerColumn errorColumn;
// Otherwise, if the window is resized with the message label
// set to blank, its preferredSize() will be fuckered
static protected final String EMPTY =
" " +
" " +
" ";
private PageFormat pageFormat;
private PrinterJob printerJob;
// File and sketch menus for re-inserting items
private JMenu fileMenu;
private JMenu sketchMenu;
protected JPanel spacer = new JPanel();
protected EditorHeader header;
protected EditorToolbar toolbar;
protected JEditTextArea textarea;
protected EditorStatus status;
protected JSplitPane splitPane;
protected EditorFooter footer;
protected EditorConsole console;
protected ErrorTable errorTable;
// currently opened program
protected Sketch sketch;
// runtime information and window placement
private Point sketchWindowLocation;
// undo fellers
private JMenuItem undoItem, redoItem;
protected UndoAction undoAction;
protected RedoAction redoAction;
protected CutAction cutAction;
protected CopyAction copyAction;
protected CopyAsHtmlAction copyAsHtmlAction;
protected PasteAction pasteAction;
/** Menu Actions updated on the opening of the edit menu. */
protected List<UpdatableAction> editMenuUpdatable = new ArrayList<>();
protected FindNextAction findNextAction;
protected FindPreviousAction findPreviousAction;
/** The currently selected tab's undo manager and caret positions*/
private UndoManager undo;
// maintain caret position during undo operations
private Stack<Integer> caretUndoStack = new Stack<>();
private Stack<Integer> caretRedoStack = new Stack<>();
// Used internally for every edit. Groups hot key event text manipulations
// and multi-character inputs into a single undo objects.
private CompoundEdit compoundEdit;
// timer to decide when to group characters into an undo
private final Timer timer;
private TimerTask endUndoEvent;
// true if inserting text, false if removing text
private boolean isInserting;
private FindReplace find;
JMenu toolsMenu;
JMenu modePopup;
JMenu developMenu;
protected List<Problem> problems = Collections.emptyList();
protected Editor(final Base base, String path, final EditorState state,
final Mode mode) throws EditorException {
super("Processing", state.getConfig());
this.base = base;
this.state = state;
this.mode = mode;
// Make sure Base.getActiveEditor() never returns null
base.checkFirstEditor(this);
// This is a Processing window. Get rid of that ugly ass coffee cup.
Toolkit.setIcon(this);
// add listener to handle window close box hit event
addWindowListener(new WindowAdapter() {
public void windowClosing(WindowEvent e) {
base.handleClose(Editor.this, false);
}
});
// don't close the window when clicked, the app will take care
// of that via the handleQuitInternal() methods
// https://download.processing.org/bugzilla/440.html
setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE);
// When bringing a window to front, let the Base know
addWindowListener(new WindowAdapter() {
public void windowActivated(WindowEvent e) {
base.handleActivated(Editor.this);
fileMenu.insert(Recent.getMenu(), 2);
Toolkit.setMenuMnemsInside(fileMenu);
mode.insertImportMenu(sketchMenu);
Toolkit.setMenuMnemsInside(sketchMenu);
mode.insertToolbarRecentMenu();
}
public void windowDeactivated(WindowEvent e) {
// TODO call handleActivated(null)? or do we run the risk of the
// deactivate call for old window being called after the activate?
fileMenu.remove(Recent.getMenu());
mode.removeImportMenu(sketchMenu);
mode.removeToolbarRecentMenu();
}
});
timer = new Timer();
buildMenuBar();
JPanel contentPain = new JPanel();
setContentPane(contentPain);
contentPain.setLayout(new BorderLayout());
Box box = Box.createVerticalBox();
Box upper = Box.createVerticalBox();
if(Platform.isMacOS() && SystemInfo.isMacFullWindowContentSupported) {
getRootPane().putClientProperty( "apple.awt.fullWindowContent", true );
getRootPane().putClientProperty( "apple.awt.transparentTitleBar", true );
spacer.setPreferredSize(new Dimension(1, Toolkit.zoom(18)));
spacer.setMinimumSize(new Dimension(1, Toolkit.zoom(18)));
spacer.setAlignmentX(Component.LEFT_ALIGNMENT);
box.add(spacer);
}
rebuildModePopup();
toolbar = createToolbar();
upper.add(toolbar);
header = createHeader();
upper.add(header);
textarea = createTextArea();
textarea.setRightClickPopup(new TextAreaPopup());
textarea.setHorizontalOffset(JEditTextArea.leftHandGutter);
{ // Hack: add Numpad Slash as alternative shortcut for Comment/Uncomment
KeyStroke keyStroke =
KeyStroke.getKeyStroke(KeyEvent.VK_DIVIDE, Toolkit.SHORTCUT_KEY_MASK);
final String ACTION_KEY = "COMMENT_UNCOMMENT_ALT";
textarea.getInputMap().put(keyStroke, ACTION_KEY);
textarea.getActionMap().put(ACTION_KEY, new AbstractAction() {
@Override
public void actionPerformed(ActionEvent e) {
handleCommentUncomment();
}
});
}
textarea.addCaretListener(e -> updateEditorStatus());
footer = createFooter();
// build the central panel with the text area & error marker column
JPanel editorPanel = new JPanel(new BorderLayout());
errorColumn = new MarkerColumn(this, textarea.getMinimumSize().height);
editorPanel.add(errorColumn, BorderLayout.EAST);
textarea.setBounds(0, 0, errorColumn.getX() - 1, textarea.getHeight());
editorPanel.add(textarea);
upper.add(editorPanel);
splitPane = new JSplitPane(JSplitPane.VERTICAL_SPLIT, upper, footer);
// disable this because it hides the message area
// https://github.com/processing/processing/issues/784
splitPane.setOneTouchExpandable(false);
// repaint child panes while resizing
splitPane.setContinuousLayout(true);
// if window increases in size, give all the increase to
// the textarea in the upper pane
splitPane.setResizeWeight(1D);
// remove any ugly borders added by PLAFs (doesn't fix everything)
splitPane.setBorder(null);
// remove an ugly border around anything in a SplitPane !$*&!%
UIManager.getDefaults().put("SplitPane.border", BorderFactory.createEmptyBorder());
// set the height per our gui design
splitPane.setDividerSize(EditorStatus.HIGH);
// override the look of the SplitPane so that it's identical across OSes
splitPane.setUI(new BasicSplitPaneUI() {
public BasicSplitPaneDivider createDefaultDivider() {
status = new EditorStatus(this, Editor.this);
return status;
}
@Override
public void finishDraggingTo(int location) {
super.finishDraggingTo(location);
// JSplitPane issue: if you only make the lower component visible at
// the last minute, its minimum size is ignored.
if (location > splitPane.getMaximumDividerLocation()) {
splitPane.setDividerLocation(splitPane.getMaximumDividerLocation());
}
}
});
box.add(splitPane);
contentPain.add(box);
// end an undo-chunk any time the caret moves unless it's when text is edited
textarea.addCaretListener(new CaretListener() {
String lastText = textarea.getText();
public void caretUpdate(CaretEvent e) {
String newText = textarea.getText();
if (lastText.equals(newText) && isDirectEdit() && !textarea.isOverwriteEnabled()) {
endTextEditHistory();
}
lastText = newText;
}
});
textarea.addKeyListener(toolbar);
contentPain.setTransferHandler(new FileDropHandler());
// set all fonts and colors
updateTheme();
// Finish preparing Editor
pack();
// Set the window bounds and the divider location before setting it visible
state.apply(this);
// Set the minimum size for the editor window
int minWidth =
Toolkit.zoom(Preferences.getInteger("editor.window.width.min"));
int minHeight =
Toolkit.zoom(Preferences.getInteger("editor.window.height.min"));
setMinimumSize(new Dimension(minWidth, minHeight));
// Bring back the general options for the editor
applyPreferences();
// Make textField get the focus whenever frame is activated.
// http://download.oracle.com/javase/tutorial/uiswing/misc/focus.html
// May not be necessary, but helps avoid random situations with
// the editor not being able to request its own focus.
addWindowFocusListener(new WindowAdapter() {
public void windowGainedFocus(WindowEvent e) {
textarea.requestFocusInWindow();
}
});
// TODO: Subclasses can't initialize anything before Doc Open happens since
// super() has to be the first line in subclass constructor; we might
// want to keep constructor light and call methods later [jv 160318]
// Open the document that was passed in
handleOpenInternal(path);
// Add a window listener to watch for changes to the files in the sketch
addWindowFocusListener(new ChangeDetector(this));
// Enable window resizing (which allows for full screen button)
setResizable(true);
{
// Move Lines Keyboard Shortcut (Alt + Arrow Up/Down)
KeyStroke moveUpKeyStroke = KeyStroke.getKeyStroke(KeyEvent.VK_UP, InputEvent.ALT_DOWN_MASK);
final String MOVE_UP_ACTION_KEY = "moveLinesUp";
textarea.getInputMap(JComponent.WHEN_FOCUSED).put(moveUpKeyStroke, MOVE_UP_ACTION_KEY);
textarea.getActionMap().put(MOVE_UP_ACTION_KEY, new AbstractAction() {
@Override
public void actionPerformed(ActionEvent e) {
handleMoveLines(true);
}
});
KeyStroke moveDownKeyStroke = KeyStroke.getKeyStroke(KeyEvent.VK_DOWN, InputEvent.ALT_DOWN_MASK);
final String MOVE_DOWN_ACTION_KEY = "moveLinesDown";
textarea.getInputMap(JComponent.WHEN_FOCUSED).put(moveDownKeyStroke, MOVE_DOWN_ACTION_KEY);
textarea.getActionMap().put(MOVE_DOWN_ACTION_KEY, new AbstractAction() {
@Override
public void actionPerformed(ActionEvent e) {
handleMoveLines(false);
}
});
}
}
protected JEditTextArea createTextArea() {
return new JEditTextArea(new PdeTextAreaDefaults(),
new PdeInputHandler(this));
}
public EditorFooter createFooter() {
EditorFooter ef = new EditorFooter(this);
console = new EditorConsole(this);
ef.addPanel(console, Language.text("editor.footer.console"), "/lib/footer/console");
return ef;
}
public void addErrorTable(EditorFooter ef) {
JScrollPane scrollPane = new JScrollPane();
errorTable = new ErrorTable(this);
scrollPane.setBorder(BorderFactory.createEmptyBorder());
scrollPane.setViewportView(errorTable);
ef.addPanel(scrollPane, Language.text("editor.footer.errors"), "/lib/footer/error");
}
public EditorState getEditorState() {
return state;
}
/**
* Handles files dragged & dropped from the desktop and into the editor
* window. Dragging files into the editor window is the same as using
* "Sketch → Add File" for each file.
*/
| Editor |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/h2/H2_InsertTest_0.java | {
"start": 305,
"end": 642
} | class ____ {
@Test
public void test_insertSet() {
String sql = "insert into tb1 set name='n1',age=12,date='1990-11-11 12:12:12'";
H2StatementParser parser = new H2StatementParser(sql);
List<SQLStatement> stmtList = parser.parseStatementList();
assertEquals(1, stmtList.size());
}
}
| H2_InsertTest_0 |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/Operators.java | {
"start": 5405,
"end": 56675
} | enum ____ implements Fuseable.QueueSubscription so operators expecting a
* QueueSubscription from a Fuseable source don't have to double-check their Subscription
* received in onSubscribe.
*
* @return a singleton noop {@link Subscription}
*/
public static Subscription emptySubscription() {
return EmptySubscription.INSTANCE;
}
/**
* Check whether the provided {@link Subscription} is the one used to satisfy Spec's §1.9 rule
* before signalling an error.
*
* @param subscription the subscription to test.
* @return true if passed subscription is a subscription created in {@link #reportThrowInSubscribe(CoreSubscriber, Throwable)}.
*/
public static boolean canAppearAfterOnSubscribe(Subscription subscription) {
return subscription == EmptySubscription.FROM_SUBSCRIBE_INSTANCE;
}
/**
* Calls onSubscribe on the target Subscriber with the empty instance followed by a call to onError with the
* supplied error.
*
* @param s target Subscriber to error
* @param e the actual error
*/
public static void error(Subscriber<?> s, Throwable e) {
s.onSubscribe(EmptySubscription.INSTANCE);
s.onError(e);
}
/**
* Report a {@link Throwable} that was thrown from a call to {@link Publisher#subscribe(Subscriber)},
* attempting to notify the {@link Subscriber} by:
* <ol>
* <li>providing a special {@link Subscription} via {@link Subscriber#onSubscribe(Subscription)}</li>
* <li>immediately delivering an {@link Subscriber#onError(Throwable) onError} signal after that</li>
* </ol>
* <p>
* As at that point the subscriber MAY have already been provided with a {@link Subscription}, we
* assume most well formed subscribers will ignore this second {@link Subscription} per Reactive
* Streams rule 1.9. Subscribers that don't usually ignore may recognize this special case and ignore
* it by checking {@link #canAppearAfterOnSubscribe(Subscription)}.
* <p>
* Note that if the {@link Subscriber#onSubscribe(Subscription) onSubscribe} attempt throws,
* {@link Exceptions#throwIfFatal(Throwable) fatal} exceptions are thrown. Other exceptions
* are added as {@link Throwable#addSuppressed(Throwable) suppressed} on the original exception,
* which is then directly notified as an {@link Subscriber#onError(Throwable) onError} signal
* (again assuming that such exceptions occur because a {@link Subscription} is already set).
*
* @param subscriber the {@link Subscriber} being subscribed when the error happened
* @param e the {@link Throwable} that was thrown from {@link Publisher#subscribe(Subscriber)}
* @see #canAppearAfterOnSubscribe(Subscription)
*/
public static void reportThrowInSubscribe(CoreSubscriber<?> subscriber, Throwable e) {
try {
subscriber.onSubscribe(EmptySubscription.FROM_SUBSCRIBE_INSTANCE);
}
catch (Throwable onSubscribeError) {
Exceptions.throwIfFatal(onSubscribeError);
e.addSuppressed(onSubscribeError);
}
subscriber.onError(onOperatorError(e, subscriber.currentContext()));
}
/**
* Create a function that can be used to support a custom operator via
* {@link CoreSubscriber} decoration. The function is compatible with
* {@link Flux#transform(Function)}, {@link Mono#transform(Function)},
* {@link Hooks#onEachOperator(Function)} and {@link Hooks#onLastOperator(Function)},
* but requires that the original {@link Publisher} be {@link Scannable}.
* <p>
* This variant attempts to expose the {@link Publisher} as a {@link Scannable} for
* convenience of introspection. You should however avoid instanceof checks or any
* other processing that depends on identity of the {@link Publisher}, as it might
* get hidden if {@link Scannable#isScanAvailable()} returns {@code false}.
* Use {@link #liftPublisher(BiFunction)} instead for that kind of use case.
*
* @param lifter the bifunction taking {@link Scannable} from the enclosing
* publisher (assuming it is compatible) and consuming {@link CoreSubscriber}.
* It must return a receiving {@link CoreSubscriber} that will immediately subscribe
* to the applied {@link Publisher}.
*
* @param <I> the input type
* @param <O> the output type
*
* @return a new {@link Function}
* @see #liftPublisher(BiFunction)
*/
public static <I, O> Function<? super Publisher<I>, ? extends Publisher<O>> lift(BiFunction<Scannable, ? super CoreSubscriber<? super O>, ? extends CoreSubscriber<? super I>> lifter) {
return LiftFunction.liftScannable(null, lifter);
}
/**
* Create a function that can be used to support a custom operator via
* {@link CoreSubscriber} decoration. The function is compatible with
* {@link Flux#transform(Function)}, {@link Mono#transform(Function)},
* {@link Hooks#onEachOperator(Function)} and {@link Hooks#onLastOperator(Function)},
* but requires that the original {@link Publisher} be {@link Scannable}.
* <p>
* This variant attempts to expose the {@link Publisher} as a {@link Scannable} for
* convenience of introspection. You should however avoid instanceof checks or any
* other processing that depends on identity of the {@link Publisher}, as it might
* get hidden if {@link Scannable#isScanAvailable()} returns {@code false}.
* Use {@link #liftPublisher(Predicate, BiFunction)} instead for that kind of use case.
*
* <p>
* The function will be invoked only if the passed {@link Predicate} matches.
* Therefore the transformed type O must be the same than the input type since
* unmatched predicate will return the applied {@link Publisher}.
*
* @param filter the predicate to match taking {@link Scannable} from the applied
* publisher to operate on. Assumes original is scan-compatible.
* @param lifter the bifunction taking {@link Scannable} from the enclosing
* publisher and consuming {@link CoreSubscriber}. It must return a receiving
* {@link CoreSubscriber} that will immediately subscribe to the applied
* {@link Publisher}. Assumes the original is scan-compatible.
*
* @param <O> the input and output type
*
* @return a new {@link Function}
* @see #liftPublisher(Predicate, BiFunction)
*/
public static <O> Function<? super Publisher<O>, ? extends Publisher<O>> lift(
Predicate<Scannable> filter,
BiFunction<Scannable, ? super CoreSubscriber<? super O>, ? extends CoreSubscriber<? super O>> lifter) {
return LiftFunction.liftScannable(filter, lifter);
}
/**
* Create a function that can be used to support a custom operator via
* {@link CoreSubscriber} decoration. The function is compatible with
* {@link Flux#transform(Function)}, {@link Mono#transform(Function)},
* {@link Hooks#onEachOperator(Function)} and {@link Hooks#onLastOperator(Function)},
* and works with the raw {@link Publisher} as input, which is useful if you need to
* detect the precise type of the source (eg. instanceof checks to detect Mono, Flux,
* true Scannable, etc...).
*
* @param lifter the bifunction taking the raw {@link Publisher} and
* {@link CoreSubscriber}. The publisher can be double-checked (including with
* {@code instanceof}, and the function must return a receiving {@link CoreSubscriber}
* that will immediately subscribe to the {@link Publisher}.
*
* @param <I> the input type
* @param <O> the output type
*
* @return a new {@link Function}
*/
public static <I, O> Function<? super Publisher<I>, ? extends Publisher<O>> liftPublisher(
BiFunction<Publisher, ? super CoreSubscriber<? super O>, ? extends CoreSubscriber<? super I>> lifter) {
return LiftFunction.liftPublisher(null, lifter);
}
/**
* Create a function that can be used to support a custom operator via
* {@link CoreSubscriber} decoration. The function is compatible with
* {@link Flux#transform(Function)}, {@link Mono#transform(Function)},
* {@link Hooks#onEachOperator(Function)} and {@link Hooks#onLastOperator(Function)},
* and works with the raw {@link Publisher} as input, which is useful if you need to
* detect the precise type of the source (eg. instanceof checks to detect Mono, Flux,
* true Scannable, etc...).
*
* <p>
* The function will be invoked only if the passed {@link Predicate} matches.
* Therefore the transformed type O must be the same than the input type since
* unmatched predicate will return the applied {@link Publisher}.
*
* @param filter the {@link Predicate} that the raw {@link Publisher} must pass for
* the transformation to occur
* @param lifter the {@link BiFunction} taking the raw {@link Publisher} and
* {@link CoreSubscriber}. The publisher can be double-checked (including with
* {@code instanceof}, and the function must return a receiving {@link CoreSubscriber}
* that will immediately subscribe to the {@link Publisher}.
*
* @param <O> the input and output type
*
* @return a new {@link Function}
*/
public static <O> Function<? super Publisher<O>, ? extends Publisher<O>> liftPublisher(
Predicate<Publisher> filter,
BiFunction<Publisher, ? super CoreSubscriber<? super O>, ? extends CoreSubscriber<? super O>> lifter) {
return LiftFunction.liftPublisher(filter, lifter);
}
/**
* Cap a multiplication to Long.MAX_VALUE
*
* @param a left operand
* @param b right operand
*
* @return Product result or Long.MAX_VALUE if overflow
*/
public static long multiplyCap(long a, long b) {
long u = a * b;
if (((a | b) >>> 31) != 0) {
if (u / a != b) {
return Long.MAX_VALUE;
}
}
return u;
}
/**
* Create an adapter for local onDiscard hooks that check the element
* being discarded is of a given {@link Class}. The resulting {@link Function} adds the
* hook to the {@link Context}, potentially chaining it to an existing hook in the {@link Context}.
*
* @param type the type of elements to take into account
* @param discardHook the discarding handler for this type of elements
* @param <R> element type
* @return a {@link Function} that can be used to modify a {@link Context}, adding or
* updating a context-local discard hook.
*/
static final <R> Function<Context, Context> discardLocalAdapter(Class<R> type, Consumer<? super R> discardHook) {
Objects.requireNonNull(type, "onDiscard must be based on a type");
Objects.requireNonNull(discardHook, "onDiscard must be provided a discardHook Consumer");
final Consumer<Object> safeConsumer = obj -> {
if (type.isInstance(obj)) {
discardHook.accept(type.cast(obj));
}
};
return ctx -> {
Consumer<Object> consumer = ctx.getOrDefault(Hooks.KEY_ON_DISCARD, null);
if (consumer == null) {
return ctx.put(Hooks.KEY_ON_DISCARD, safeConsumer);
}
else {
return ctx.put(Hooks.KEY_ON_DISCARD, safeConsumer.andThen(consumer));
}
};
}
/**
* Utility method to activate the onDiscard feature (see {@link Flux#doOnDiscard(Class, Consumer)})
* in a target {@link Context}. Prefer using the {@link Flux} API, and reserve this for
* testing purposes.
*
* @param target the original {@link Context}
* @param discardConsumer the consumer that will be used to cleanup discarded elements
* @return a new {@link Context} that holds (potentially combined) cleanup {@link Consumer}
*/
public static final Context enableOnDiscard(@Nullable Context target, Consumer<?> discardConsumer) {
Objects.requireNonNull(discardConsumer, "discardConsumer must be provided");
if (target == null) {
return Context.of(Hooks.KEY_ON_DISCARD, discardConsumer);
}
return target.put(Hooks.KEY_ON_DISCARD, discardConsumer);
}
/**
* Invoke a (local or global) hook that processes elements that get discarded. This
* includes elements that are dropped (for malformed sources), but also filtered out
* (eg. not passing a {@code filter()} predicate).
* <p>
* For elements that are buffered or enqueued, but subsequently discarded due to
* cancellation or error, see {@link #onDiscardMultiple(Stream, Context)} and
* {@link #onDiscardQueueWithClear(Queue, Context, Function)}.
*
* @param element the element that is being discarded
* @param context the context in which to look for a local hook
* @param <T> the type of the element
* @see #onDiscardMultiple(Stream, Context)
* @see #onDiscardMultiple(Collection, Context)
* @see #onDiscardQueueWithClear(Queue, Context, Function)
*/
public static <T> void onDiscard(@Nullable T element, Context context) {
Consumer<Object> hook = context.getOrDefault(Hooks.KEY_ON_DISCARD, null);
if (element != null && hook != null) {
try {
hook.accept(element);
}
catch (Throwable t) {
log.warn("Error in discard hook", t);
}
}
}
/**
* Invoke a (local or global) hook that processes elements that get discarded
* en masse after having been enqueued, due to cancellation or error. This method
* also empties the {@link Queue} (either by repeated {@link Queue#poll()} calls if
* a hook is defined, or by {@link Queue#clear()} as a shortcut if no hook is defined).
*
* @param queue the queue that is being discarded and cleared
* @param context the context in which to look for a local hook
* @param extract an optional extractor method for cases where the queue doesn't
* directly contain the elements to discard
* @param <T> the type of the element
* @see #onDiscardMultiple(Stream, Context)
* @see #onDiscardMultiple(Collection, Context)
* @see #onDiscard(Object, Context)
*/
public static <T> void onDiscardQueueWithClear(
@Nullable Queue<T> queue,
Context context,
@Nullable Function<T, Stream<?>> extract) {
if (queue == null) {
return;
}
Consumer<Object> hook = context.getOrDefault(Hooks.KEY_ON_DISCARD, null);
if (hook == null) {
queue.clear();
return;
}
try {
for(;;) {
T toDiscard = queue.poll();
if (toDiscard == null) {
break;
}
if (extract != null) {
try {
extract.apply(toDiscard)
.forEach(elementToDiscard -> {
try {
hook.accept(elementToDiscard);
}
catch (Throwable t) {
log.warn("Error while discarding item extracted from a queue element, continuing with next item", t);
}
});
}
catch (Throwable t) {
log.warn("Error while extracting items to discard from queue element, continuing with next queue element", t);
}
}
else {
try {
hook.accept(toDiscard);
}
catch (Throwable t) {
log.warn("Error while discarding a queue element, continuing with next queue element", t);
}
}
}
}
catch (Throwable t) {
log.warn("Cannot further apply discard hook while discarding and clearing a queue", t);
}
}
/**
* Invoke a (local or global) hook that processes elements that get discarded en masse.
* This includes elements that are buffered but subsequently discarded due to
* cancellation or error.
*
* @param multiple the collection of elements to discard (possibly extracted from other
* collections/arrays/queues)
* @param context the {@link Context} in which to look for local hook
* @see #onDiscard(Object, Context)
* @see #onDiscardMultiple(Collection, Context)
* @see #onDiscardQueueWithClear(Queue, Context, Function)
*/
public static void onDiscardMultiple(Stream<?> multiple, Context context) {
Consumer<Object> hook = context.getOrDefault(Hooks.KEY_ON_DISCARD, null);
if (hook != null) {
try {
multiple.filter(Objects::nonNull)
.forEach(v -> {
try {
hook.accept(v);
}
catch (Throwable t) {
log.warn("Error while discarding a stream element, continuing with next element", t);
}
});
}
catch (Throwable t) {
log.warn("Error while discarding stream, stopping", t);
}
}
}
/**
* Invoke a (local or global) hook that processes elements that get discarded en masse.
* This includes elements that are buffered but subsequently discarded due to
* cancellation or error.
*
* @param multiple the collection of elements to discard
* @param context the {@link Context} in which to look for local hook
* @see #onDiscard(Object, Context)
* @see #onDiscardMultiple(Stream, Context)
* @see #onDiscardQueueWithClear(Queue, Context, Function)
*/
public static void onDiscardMultiple(@Nullable Collection<?> multiple, Context context) {
if (multiple == null) return;
Consumer<Object> hook = context.getOrDefault(Hooks.KEY_ON_DISCARD, null);
if (hook != null) {
try {
if (multiple.isEmpty()) {
return;
}
for (Object o : multiple) {
if (o != null) {
try {
hook.accept(o);
}
catch (Throwable t) {
log.warn("Error while discarding element from a Collection, continuing with next element", t);
}
}
}
}
catch (Throwable t) {
log.warn("Error while discarding collection, stopping", t);
}
}
}
/**
* Invoke a (local or global) hook that processes elements that remains in an {@link java.util.Iterator}.
* Since iterators can be infinite, this method requires that you explicitly ensure the iterator is
* {@code knownToBeFinite}. Typically, operating on an {@link Iterable} one can get such a
* guarantee by looking at the {@link Iterable#spliterator() Spliterator's} {@link Spliterator#getExactSizeIfKnown()}.
*
* @param multiple the {@link Iterator} whose remainder to discard
* @param knownToBeFinite is the caller guaranteeing that the iterator is finite and can be iterated over
* @param context the {@link Context} in which to look for local hook
* @see #onDiscard(Object, Context)
* @see #onDiscardMultiple(Collection, Context)
* @see #onDiscardQueueWithClear(Queue, Context, Function)
*/
public static void onDiscardMultiple(@Nullable Iterator<?> multiple, boolean knownToBeFinite, Context context) {
if (multiple == null) return;
if (!knownToBeFinite) return;
Consumer<Object> hook = context.getOrDefault(Hooks.KEY_ON_DISCARD, null);
if (hook != null) {
try {
multiple.forEachRemaining(o -> {
if (o != null) {
try {
hook.accept(o);
}
catch (Throwable t) {
log.warn("Error while discarding element from an Iterator, continuing with next element", t);
}
}
});
}
catch (Throwable t) {
log.warn("Error while discarding Iterator, stopping", t);
}
}
}
/**
* Invoke a (local or global) hook that processes elements that remains in an {@link java.util.Spliterator}.
* Since spliterators can be infinite, this method requires that you explicitly ensure the spliterator is
* {@code knownToBeFinite}. Typically, one can get such a guarantee by looking at the {@link Spliterator#getExactSizeIfKnown()}.
*
* @param multiple the {@link Spliterator} whose remainder to discard
* @param knownToBeFinite is the caller guaranteeing that the iterator is finite and can be iterated over
* @param context the {@link Context} in which to look for local hook
* @see #onDiscard(Object, Context)
* @see #onDiscardMultiple(Collection, Context)
* @see #onDiscardQueueWithClear(Queue, Context, Function)
*/
public static void onDiscardMultiple(@Nullable Spliterator<?> multiple, boolean knownToBeFinite, Context context) {
if (multiple == null) return;
if (!knownToBeFinite) return;
Consumer<Object> hook = context.getOrDefault(Hooks.KEY_ON_DISCARD, null);
if (hook != null) {
try {
multiple.forEachRemaining(o -> {
if (o != null) {
try {
hook.accept(o);
}
catch (Throwable t) {
log.warn("Error while discarding element from an Spliterator, continuing with next element", t);
}
}
});
}
catch (Throwable t) {
log.warn("Error while discarding Spliterator, stopping", t);
}
}
}
/**
* An unexpected exception is about to be dropped.
* <p>
* If no hook is registered for {@link Hooks#onErrorDropped(Consumer)}, the dropped
* error is logged at ERROR level.
*
* @param e the dropped exception
* @param context a context that might hold a local error consumer
*/
public static void onErrorDropped(Throwable e, Context context) {
Consumer<? super Throwable> hook = context.getOrDefault(Hooks.KEY_ON_ERROR_DROPPED,null);
if (hook == null) {
hook = Hooks.onErrorDroppedHook;
}
if (hook == null) {
log.error("Operator called default onErrorDropped", e);
return;
}
hook.accept(e);
}
/**
* An unexpected event is about to be dropped.
* <p>
* If no hook is registered for {@link Hooks#onNextDropped(Consumer)}, the dropped
* element is just logged at DEBUG level.
*
* @param <T> the dropped value type
* @param t the dropped data
* @param context a context that might hold a local next consumer
*/
public static <T> void onNextDropped(T t, Context context) {
Objects.requireNonNull(t, "onNext");
Objects.requireNonNull(context, "context");
Consumer<Object> hook = context.getOrDefault(Hooks.KEY_ON_NEXT_DROPPED, null);
if (hook == null) {
hook = Hooks.onNextDroppedHook;
}
if (hook != null) {
hook.accept(t);
}
else if (log.isDebugEnabled()) {
log.debug("onNextDropped: " + t);
}
}
/**
* Map an "operator" error. The
* result error will be passed via onError to the operator downstream after
* checking for fatal error via
* {@link Exceptions#throwIfFatal(Throwable)}.
*
* @param error the callback or operator error
* @param context a context that might hold a local error consumer
* @return mapped {@link Throwable}
*
*/
public static Throwable onOperatorError(Throwable error, Context context) {
return onOperatorError(null, error, context);
}
/**
* Map an "operator" error given an operator parent {@link Subscription}. The
* result error will be passed via onError to the operator downstream.
* {@link Subscription} will be cancelled after checking for fatal error via
* {@link Exceptions#throwIfFatal(Throwable)}.
*
* @param subscription the linked operator parent {@link Subscription}
* @param error the callback or operator error
* @param context a context that might hold a local error consumer
* @return mapped {@link Throwable}
*
*/
public static Throwable onOperatorError(@Nullable Subscription subscription,
Throwable error,
Context context) {
return onOperatorError(subscription, error, null, context);
}
/**
* Map an "operator" error given an operator parent {@link Subscription}. The
* result error will be passed via onError to the operator downstream.
* {@link Subscription} will be cancelled after checking for fatal error via
* {@link Exceptions#throwIfFatal(Throwable)}. Takes an additional signal, which
* can be added as a suppressed exception if it is a {@link Throwable} and the
* default {@link Hooks#onOperatorError(BiFunction) hook} is in place.
*
* @param subscription the linked operator parent {@link Subscription}
* @param error the callback or operator error
* @param dataSignal the value (onNext or onError) signal processed during failure
* @param context a context that might hold a local error consumer
* @return mapped {@link Throwable}
*
*/
public static Throwable onOperatorError(@Nullable Subscription subscription,
Throwable error,
@Nullable Object dataSignal, Context context) {
Exceptions.throwIfFatal(error);
if(subscription != null) {
subscription.cancel();
}
Throwable t = Exceptions.unwrap(error);
BiFunction<? super Throwable, @Nullable Object, ? extends Throwable> hook =
context.getOrDefault(Hooks.KEY_ON_OPERATOR_ERROR, null);
if (hook == null) {
hook = Hooks.onOperatorErrorHook;
}
if (hook == null) {
if (dataSignal != null) {
if (dataSignal != t && dataSignal instanceof Throwable) {
t = Exceptions.addSuppressed(t, (Throwable) dataSignal);
}
//do not wrap original value to avoid strong references
/*else {
}*/
}
return t;
}
return hook.apply(error, dataSignal);
}
/**
* Return a wrapped {@link RejectedExecutionException} which can be thrown by the
* operator. This exception denotes that an execution was rejected by a
* {@link reactor.core.scheduler.Scheduler}, notably when it was already disposed.
* <p>
* Wrapping is done by calling both {@link Exceptions#failWithRejected(Throwable)} and
* {@link #onOperatorError(Subscription, Throwable, Object, Context)}.
*
* @param original the original execution error
* @param context a context that might hold a local error consumer
*
*/
public static RuntimeException onRejectedExecution(Throwable original, Context context) {
return onRejectedExecution(original, null, null, null, context);
}
static final OnNextFailureStrategy onNextErrorStrategy(Context context) {
OnNextFailureStrategy strategy = null;
BiFunction<? super Throwable, Object, ? extends Throwable> fn = context.getOrDefault(
OnNextFailureStrategy.KEY_ON_NEXT_ERROR_STRATEGY, null);
if (fn instanceof OnNextFailureStrategy) {
strategy = (OnNextFailureStrategy) fn;
} else if (fn != null) {
strategy = new OnNextFailureStrategy.LambdaOnNextErrorStrategy(fn);
}
if (strategy == null) strategy = Hooks.onNextErrorHook;
if (strategy == null) strategy = OnNextFailureStrategy.STOP;
return strategy;
}
public static final BiFunction<? super Throwable, Object, ? extends Throwable> onNextErrorFunction(Context context) {
return onNextErrorStrategy(context);
}
/**
* Find the {@link OnNextFailureStrategy} to apply to the calling operator (which could be a local
* error mode defined in the {@link Context}) and apply it. For poll(), prefer
* {@link #onNextPollError(Object, Throwable, Context)} as it returns a {@link RuntimeException}.
* <p>
* Cancels the {@link Subscription} and return a {@link Throwable} if errors are
* fatal for the error mode, in which case the operator should call onError with the
* returned error. On the contrary, if the error mode allows the sequence to
* continue, does not cancel the Subscription and returns {@code null}.
* <p>
* Typical usage pattern differs depending on the calling method:
* <ul>
* <li>{@code onNext}: check for a throwable return value and call
* {@link Subscriber#onError(Throwable)} if not null, otherwise perform a direct
* {@link Subscription#request(long) request(1)} on the upstream.</li>
*
* <li>{@code tryOnNext}: check for a throwable return value and call
* {@link Subscriber#onError(Throwable)} if not null, otherwise
* return {@code false} to indicate value was not consumed and more must be
* tried.</li>
*
* <li>any of the above where the error is going to be propagated through onError but the
* subscription shouldn't be cancelled: use {@link #onNextError(Object, Throwable, Context)} instead.</li>
*
* <li>{@code poll} (where the error will be thrown): use {@link #onNextPollError(Object, Throwable, Context)} instead.</li>
* </ul>
*
* @param value The onNext value that caused an error. Can be null.
* @param error The error.
* @param context The most significant {@link Context} in which to look for an {@link OnNextFailureStrategy}.
* @param subscriptionForCancel The mandatory {@link Subscription} that should be cancelled if the
* strategy is terminal. See also {@link #onNextError(Object, Throwable, Context)} and
* {@link #onNextPollError(Object, Throwable, Context)} for alternatives that don't cancel a subscription
* @param <T> The type of the value causing the error.
* @return a {@link Throwable} to propagate through onError if the strategy is
* terminal and cancelled the subscription, null if not.
*/
public static <T> @Nullable Throwable onNextError(@Nullable T value, Throwable error, Context context,
Subscription subscriptionForCancel) {
error = unwrapOnNextError(error);
OnNextFailureStrategy strategy = onNextErrorStrategy(context);
if (strategy.test(error, value)) {
//some strategies could still return an exception, eg. if the consumer throws
Throwable t = strategy.process(error, value, context);
if (t != null) {
subscriptionForCancel.cancel();
}
return t;
}
else {
//falls back to operator errors
return onOperatorError(subscriptionForCancel, error, value, context);
}
}
/**
* Find the {@link OnNextFailureStrategy} to apply to the calling async operator (which could be
* a local error mode defined in the {@link Context}) and apply it.
* <p>
* This variant never cancels a {@link Subscription}. It returns a {@link Throwable} if the error is
* fatal for the error mode, in which case the operator should call onError with the
* returned error. On the contrary, if the error mode allows the sequence to
* continue, this method returns {@code null}.
*
* @param value The onNext value that caused an error.
* @param error The error.
* @param context The most significant {@link Context} in which to look for an {@link OnNextFailureStrategy}.
* @param <T> The type of the value causing the error.
* @return a {@link Throwable} to propagate through onError if the strategy is terminal, null if not.
* @see #onNextError(Object, Throwable, Context, Subscription)
*/
public static <T> @Nullable Throwable onNextError(@Nullable T value, Throwable error, Context context) {
error = unwrapOnNextError(error);
OnNextFailureStrategy strategy = onNextErrorStrategy(context);
if (strategy.test(error, value)) {
//some strategies could still return an exception, eg. if the consumer throws
return strategy.process(error, value, context);
}
else {
return onOperatorError(null, error, value, context);
}
}
/**
* Find the {@link OnNextFailureStrategy} to apply to the calling operator (which could be a local
* error mode defined in the {@link Context}) and apply it.
*
* @param error The error.
* @param context The most significant {@link Context} in which to look for an {@link OnNextFailureStrategy}.
* @param subscriptionForCancel The {@link Subscription} that should be cancelled if the
* strategy is terminal. Null to ignore (for poll, use {@link #onNextPollError(Object, Throwable, Context)}
* rather than passing null).
* @param <T> The type of the value causing the error.
* @return a {@link Throwable} to propagate through onError if the strategy is
* terminal and cancelled the subscription, null if not.
*/
public static <T> @Nullable Throwable onNextInnerError(Throwable error, Context context,
@Nullable Subscription subscriptionForCancel) {
error = unwrapOnNextError(error);
OnNextFailureStrategy strategy = onNextErrorStrategy(context);
if (strategy.test(error, null)) {
//some strategies could still return an exception, eg. if the consumer throws
Throwable t = strategy.process(error, null, context);
if (t != null && subscriptionForCancel != null) {
subscriptionForCancel.cancel();
}
return t;
}
else {
return error;
}
}
/**
* Find the {@link OnNextFailureStrategy} to apply to the calling async operator (which could be
* a local error mode defined in the {@link Context}) and apply it.
* <p>
* Returns a {@link RuntimeException} if the error is fatal for the error mode, in which
* case the operator poll should throw the returned error. On the contrary if the
* error mode allows the sequence to continue, returns {@code null} in which case
* the operator should retry the {@link Queue#poll() poll()}.
* <p>
* Note that this method {@link Exceptions#propagate(Throwable) wraps} checked exceptions in order to
* return a {@link RuntimeException} that can be thrown from an arbitrary method. If you don't want to
* throw the returned exception and this wrapping behavior is undesirable, but you still don't want to
* cancel a subscription, you can use {@link #onNextError(Object, Throwable, Context)} instead.
*
* @param value The onNext value that caused an error.
* @param error The error.
* @param context The most significant {@link Context} in which to look for an {@link OnNextFailureStrategy}.
* @param <T> The type of the value causing the error.
* @return a {@link RuntimeException} to be thrown (eg. within {@link Queue#poll()} if the error is terminal in
* the strategy, null if not.
* @see #onNextError(Object, Throwable, Context)
*/
public static <T> @Nullable RuntimeException onNextPollError(@Nullable T value, Throwable error, Context context) {
error = unwrapOnNextError(error);
OnNextFailureStrategy strategy = onNextErrorStrategy(context);
if (strategy.test(error, value)) {
//some strategies could still return an exception, eg. if the consumer throws
Throwable t = strategy.process(error, value, context);
if (t != null) return Exceptions.propagate(t);
return null;
}
else {
Throwable t = onOperatorError(null, error, value, context);
return Exceptions.propagate(t);
}
}
/**
* Applies the hooks registered with {@link Hooks#onLastOperator} and returns
* {@link CorePublisher} ready to be subscribed on.
*
* @param source the original {@link CorePublisher}.
* @param <T> the type of the value.
* @return a {@link CorePublisher} to subscribe on.
*/
@SuppressWarnings("unchecked")
public static <T> CorePublisher<T> onLastAssembly(CorePublisher<T> source) {
Function<Publisher, Publisher> hook = Hooks.onLastOperatorHook;
if (hook == null) {
return source;
}
Publisher<T> publisher = Objects.requireNonNull(hook.apply(source),"LastOperator hook returned null");
if (publisher instanceof CorePublisher) {
return (CorePublisher<T>) publisher;
}
else {
return new CorePublisherAdapter<>(publisher);
}
}
public static <T> CorePublisher<T> toFluxOrMono(Publisher<T> publisher) {
if (publisher instanceof Mono) {
return Mono.fromDirect(publisher);
}
return Flux.from(publisher);
}
public static <T> void toFluxOrMono(Publisher<? extends T>[] sources) {
for (int i = 0; i < sources.length; i++) {
if (sources[i] != null) {
sources[i] = toFluxOrMono(sources[i]);
}
}
}
static <T> CoreSubscriber<T> restoreContextOnSubscriberIfPublisherNonInternal(
Publisher<?> publisher, CoreSubscriber<T> subscriber) {
if (ContextPropagationSupport.shouldWrapPublisher(publisher)) {
return restoreContextOnSubscriber(publisher, subscriber);
}
return subscriber;
}
static <T> CoreSubscriber<? super T> restoreContextOnSubscriberIfAutoCPEnabled(
Publisher<?> publisher, CoreSubscriber<? super T> subscriber) {
if (ContextPropagationSupport.shouldPropagateContextToThreadLocals()) {
return restoreContextOnSubscriber(publisher, subscriber);
}
return subscriber;
}
static <T> CoreSubscriber<T> restoreContextOnSubscriber(Publisher<?> publisher, CoreSubscriber<T> subscriber) {
if (publisher instanceof Fuseable) {
return new FluxContextWriteRestoringThreadLocalsFuseable.FuseableContextWriteRestoringThreadLocalsSubscriber<>(
subscriber, subscriber.currentContext());
} else {
return new FluxContextWriteRestoringThreadLocals.ContextWriteRestoringThreadLocalsSubscriber<>(
subscriber,
subscriber.currentContext());
}
}
static <T> CoreSubscriber<? super T>[] restoreContextOnSubscribers(
Publisher<?> publisher, CoreSubscriber<? super T>[] subscribers) {
@SuppressWarnings("unchecked")
CoreSubscriber<? super T>[] actualSubscribers = new CoreSubscriber[subscribers.length];
for (int i = 0; i < subscribers.length; i++) {
actualSubscribers[i] = restoreContextOnSubscriber(publisher, subscribers[i]);
}
return actualSubscribers;
}
private static Throwable unwrapOnNextError(Throwable error) {
return Exceptions.isBubbling(error) ? error : Exceptions.unwrap(error);
}
/**
* Return a wrapped {@link RejectedExecutionException} which can be thrown by the
* operator. This exception denotes that an execution was rejected by a
* {@link reactor.core.scheduler.Scheduler}, notably when it was already disposed.
* <p>
* Wrapping is done by calling both {@link Exceptions#failWithRejected(Throwable)} and
* {@link #onOperatorError(Subscription, Throwable, Object, Context)} (with the passed
* {@link Subscription}).
*
* @param original the original execution error
* @param subscription the subscription to pass to onOperatorError.
* @param suppressed a Throwable to be suppressed by the {@link RejectedExecutionException} (or null if not relevant)
* @param dataSignal a value to be passed to {@link #onOperatorError(Subscription, Throwable, Object, Context)} (or null if not relevant)
* @param context a context that might hold a local error consumer
*/
public static RuntimeException onRejectedExecution(Throwable original,
@Nullable Subscription subscription,
@Nullable Throwable suppressed,
@Nullable Object dataSignal,
Context context) {
//we "cheat" to apply the special key for onRejectedExecution in onOperatorError
if (context.hasKey(Hooks.KEY_ON_REJECTED_EXECUTION)) {
context = context.put(Hooks.KEY_ON_OPERATOR_ERROR, context.get(Hooks.KEY_ON_REJECTED_EXECUTION));
}
//don't create REE if original is a reactor-produced REE (not including singletons)
RejectedExecutionException ree = Exceptions.failWithRejected(original);
if (suppressed != null) {
ree.addSuppressed(suppressed);
}
if (dataSignal != null) {
return Exceptions.propagate(Operators.onOperatorError(subscription, ree,
dataSignal, context));
}
return Exceptions.propagate(Operators.onOperatorError(subscription, ree, context));
}
/**
* Concurrent subtraction bound to 0, mostly used to decrement a request tracker by
* the amount produced by the operator. Any concurrent write will "happen before"
* this operation.
*
* @param <T> the parent instance type
* @param updater current field updater
* @param instance current instance to update
* @param toSub delta to subtract
* @return value after subtraction or zero
*/
public static <T> long produced(AtomicLongFieldUpdater<T> updater, T instance, long toSub) {
long r, u;
do {
r = updater.get(instance);
if (r == 0 || r == Long.MAX_VALUE) {
return r;
}
u = subOrZero(r, toSub);
} while (!updater.compareAndSet(instance, r, u));
return u;
}
/**
* A generic utility to atomically replace a subscription or cancel the replacement
* if the current subscription is marked as already cancelled (as in
* {@link #cancelledSubscription()}).
*
* @param field The Atomic container
* @param instance the instance reference
* @param s the subscription
* @param <F> the instance type
*
* @return true if replaced
*/
public static <F> boolean replace(AtomicReferenceFieldUpdater<F, @Nullable Subscription> field,
F instance,
Subscription s) {
for (; ; ) {
Subscription a = field.get(instance);
if (a == CancelledSubscription.INSTANCE) {
s.cancel();
return false;
}
if (field.compareAndSet(instance, a, s)) {
return true;
}
}
}
/**
* Log an {@link IllegalArgumentException} if the request is null or negative.
*
* @param n the failing demand
*
* @see Exceptions#nullOrNegativeRequestException(long)
*/
public static void reportBadRequest(long n) {
if (log.isDebugEnabled()) {
log.debug("Negative request",
Exceptions.nullOrNegativeRequestException(n));
}
}
/**
* Log an {@link IllegalStateException} that indicates more than the requested
* amount was produced.
*
* @see Exceptions#failWithOverflow()
*/
public static void reportMoreProduced() {
if (log.isDebugEnabled()) {
log.debug("More data produced than requested",
Exceptions.failWithOverflow());
}
}
/**
* Log a {@link Exceptions#duplicateOnSubscribeException() duplicate subscription} error.
*
* @see Exceptions#duplicateOnSubscribeException()
*/
public static void reportSubscriptionSet() {
if (log.isDebugEnabled()) {
log.debug("Duplicate Subscription has been detected",
Exceptions.duplicateOnSubscribeException());
}
}
/**
* Represents a fuseable Subscription that emits a single constant value synchronously
* to a Subscriber or consumer.
*
* @param subscriber the delegate {@link Subscriber} that will be requesting the value
* @param value the single value to be emitted
* @param <T> the value type
* @return a new scalar {@link Subscription}
*/
public static <T> Subscription scalarSubscription(CoreSubscriber<? super T> subscriber,
T value){
return new ScalarSubscription<>(subscriber, value);
}
/**
* Represents a fuseable Subscription that emits a single constant value synchronously
* to a Subscriber or consumer. Also give the subscription a user-defined {@code stepName}
* for the purpose of {@link Scannable#stepName()}.
*
* @param subscriber the delegate {@link Subscriber} that will be requesting the value
* @param value the single value to be emitted
* @param stepName the {@link String} to represent the {@link Subscription} in {@link Scannable#stepName()}
* @param <T> the value type
* @return a new scalar {@link Subscription}
*/
public static <T> Subscription scalarSubscription(CoreSubscriber<? super T> subscriber,
T value, String stepName){
return new ScalarSubscription<>(subscriber, value, stepName);
}
/**
* Safely gate a {@link Subscriber} by making sure onNext signals are delivered
* sequentially (serialized).
* Serialization uses thread-stealing and a potentially unbounded queue that might
* starve a calling thread if races are too important and {@link Subscriber} is slower.
*
* <p>
* <img class="marble" src="https://raw.githubusercontent.com/reactor/reactor-core/v3.1.3.RELEASE/src/docs/marble/serialize.png" alt="">
*
* @param <T> the relayed type
* @param subscriber the subscriber to serialize
* @return a serializing {@link Subscriber}
*/
public static <T> CoreSubscriber<T> serialize(CoreSubscriber<? super T> subscriber) {
return new SerializedSubscriber<>(subscriber);
}
/**
* A generic utility to atomically replace a subscription or cancel the replacement
* if current subscription is marked as cancelled (as in {@link #cancelledSubscription()})
* or was concurrently updated before.
* <p>
* The replaced subscription is itself cancelled.
*
* @param field The Atomic container
* @param instance the instance reference
* @param s the subscription
* @param <F> the instance type
*
* @return true if replaced
*/
public static <F> boolean set(AtomicReferenceFieldUpdater<F, @Nullable Subscription> field,
F instance,
Subscription s) {
for (; ; ) {
Subscription a = field.get(instance);
if (a == CancelledSubscription.INSTANCE) {
s.cancel();
return false;
}
if (field.compareAndSet(instance, a, s)) {
if (a != null) {
a.cancel();
}
return true;
}
}
}
/**
* Sets the given subscription once and returns true if successful, false
* if the field has a subscription already or has been cancelled.
* <p>
* If the field already has a subscription, it is cancelled and the duplicate
* subscription is reported (see {@link #reportSubscriptionSet()}).
*
* @param <F> the instance type containing the field
* @param field the field accessor
* @param instance the parent instance
* @param s the subscription to set once
* @return true if successful, false if the target was not empty or has been cancelled
*/
public static <F> boolean setOnce(AtomicReferenceFieldUpdater<F, @Nullable Subscription> field, F instance, Subscription s) {
Objects.requireNonNull(s, "subscription");
Subscription a = field.get(instance);
if (a == CancelledSubscription.INSTANCE) {
s.cancel();
return false;
}
if (a != null) {
s.cancel();
reportSubscriptionSet();
return false;
}
if (field.compareAndSet(instance, null, s)) {
return true;
}
a = field.get(instance);
if (a == CancelledSubscription.INSTANCE) {
s.cancel();
return false;
}
s.cancel();
reportSubscriptionSet();
return false;
}
/**
* Cap a subtraction to 0
*
* @param a left operand
* @param b right operand
* @return Subtraction result or 0 if overflow
*/
public static long subOrZero(long a, long b) {
long res = a - b;
if (res < 0L) {
return 0;
}
return res;
}
/**
* Atomically terminates the subscription if it is not already a
* {@link #cancelledSubscription()}, cancelling the subscription and setting the field
* to the singleton {@link #cancelledSubscription()}.
*
* @param <F> the instance type containing the field
* @param field the field accessor
* @param instance the parent instance
* @return true if terminated, false if the subscription was already terminated
*/
public static <F> boolean terminate(AtomicReferenceFieldUpdater<F, @Nullable Subscription> field,
F instance) {
Subscription a = field.get(instance);
if (a != CancelledSubscription.INSTANCE) {
a = field.getAndSet(instance, CancelledSubscription.INSTANCE);
if (a != null && a != CancelledSubscription.INSTANCE) {
a.cancel();
return true;
}
}
return false;
}
/**
* Check Subscription current state and cancel new Subscription if current is set,
* or return true if ready to subscribe.
*
* @param current current Subscription, expected to be null
* @param next new Subscription
* @return true if Subscription can be used
*/
public static boolean validate(@Nullable Subscription current, Subscription next) {
Objects.requireNonNull(next, "Subscription cannot be null");
if (current != null) {
next.cancel();
//reportSubscriptionSet();
return false;
}
return true;
}
/**
* Evaluate if a request is strictly positive otherwise {@link #reportBadRequest(long)}
* @param n the request value
* @return true if valid
*/
public static boolean validate(long n) {
if (n <= 0) {
reportBadRequest(n);
return false;
}
return true;
}
/**
* If the actual {@link Subscriber} is not a {@link CoreSubscriber}, it will apply
* safe strict wrapping to apply all reactive streams rules including the ones
* relaxed by internal operators based on {@link CoreSubscriber}.
*
* @param <T> passed subscriber type
*
* @param actual the {@link Subscriber} to apply hook on
* @return an eventually transformed {@link Subscriber}
*/
public static <T> CoreSubscriber<? super T> toCoreSubscriber(Subscriber<? super T> actual) {
Objects.requireNonNull(actual, "actual");
CoreSubscriber<? super T> _actual;
if (actual instanceof CoreSubscriber){
_actual = (CoreSubscriber<? super T>) actual;
}
else {
_actual = new StrictSubscriber<>(actual);
}
return _actual;
}
/**
* If the actual {@link CoreSubscriber} is not {@link reactor.core.Fuseable.ConditionalSubscriber},
* it will apply an adapter which directly maps all
* {@link reactor.core.Fuseable.ConditionalSubscriber#tryOnNext(Object)} to
* {@link Subscriber#onNext(Object)}
* and always returns true as the result
*
* @param <T> passed subscriber type
*
* @param actual the {@link Subscriber} to adapt
* @return a potentially adapted {@link reactor.core.Fuseable.ConditionalSubscriber}
*/
public static <T> Fuseable.ConditionalSubscriber<? super T> toConditionalSubscriber(CoreSubscriber<? super T> actual) {
Objects.requireNonNull(actual, "actual");
Fuseable.ConditionalSubscriber<? super T> _actual;
if (actual instanceof Fuseable.ConditionalSubscriber) {
_actual = (Fuseable.ConditionalSubscriber<? super T>) actual;
}
else {
_actual = new ConditionalSubscriberAdapter<>(actual);
}
return _actual;
}
static Context multiSubscribersContext(InnerProducer<?>[] multicastInners){
if (multicastInners.length > 0){
CoreSubscriber<?> firstSubscriber = multicastInners[0].actual();
return firstSubscriber != null ? firstSubscriber.currentContext() : Context.empty();
}
return Context.empty();
}
/**
* Add the amount {@code n} to the given field, capped to {@link Long#MAX_VALUE},
* unless the field is already at {@link Long#MAX_VALUE} OR {@link Long#MIN_VALUE}.
* Return the value before the update.
*
* @param updater the field to update
* @param instance the instance bearing the field
* @param n the value to add
* @param <T> the type of the field-bearing instance
*
* @return the old value of the field, before update.
*/
static <T> long addCapCancellable(AtomicLongFieldUpdater<T> updater, T instance,
long n) {
for (; ; ) {
long r = updater.get(instance);
if (r == Long.MIN_VALUE || r == Long.MAX_VALUE) {
return r;
}
long u = addCap(r, n);
if (updater.compareAndSet(instance, r, u)) {
return r;
}
}
}
/**
* An unexpected exception is about to be dropped from an operator that has multiple
* subscribers (and thus potentially multiple Context with local onErrorDropped handlers).
*
* @param e the dropped exception
* @param multicastInners the inner targets of the multicast
* @see #onErrorDropped(Throwable, Context)
*/
static void onErrorDroppedMulticast(Throwable e, InnerProducer<?>[] multicastInners) {
//TODO let this method go through multiple contexts and use their local handlers
//if at least one has no local handler, also call onErrorDropped(e, Context.empty())
onErrorDropped(e, multiSubscribersContext(multicastInners));
}
/**
* An unexpected event is about to be dropped from an operator that has multiple
* subscribers (and thus potentially multiple Context with local onNextDropped handlers).
* <p>
* If no hook is registered for {@link Hooks#onNextDropped(Consumer)}, the dropped
* element is just logged at DEBUG level.
*
* @param <T> the dropped value type
* @param t the dropped data
* @param multicastInners the inner targets of the multicast
* @see #onNextDropped(Object, Context)
*/
static <T> void onNextDroppedMulticast(T t, InnerProducer<?>[] multicastInners) {
//TODO let this method go through multiple contexts and use their local handlers
//if at least one has no local handler, also call onNextDropped(t, Context.empty())
onNextDropped(t, multiSubscribersContext(multicastInners));
}
static <T> long producedCancellable(AtomicLongFieldUpdater<T> updater, T instance, long n) {
for (; ; ) {
long current = updater.get(instance);
if (current == Long.MIN_VALUE) {
return Long.MIN_VALUE;
}
if (current == Long.MAX_VALUE) {
return Long.MAX_VALUE;
}
long update = current - n;
if (update < 0L) {
reportBadRequest(update);
update = 0L;
}
if (updater.compareAndSet(instance, current, update)) {
return update;
}
}
}
static long unboundedOrPrefetch(int prefetch) {
return prefetch == Integer.MAX_VALUE ? Long.MAX_VALUE : prefetch;
}
static int unboundedOrLimit(int prefetch) {
return prefetch == Integer.MAX_VALUE ? Integer.MAX_VALUE : (prefetch - (prefetch >> 2));
}
static int unboundedOrLimit(int prefetch, int lowTide) {
if (lowTide <= 0) {
return prefetch;
}
if (lowTide >= prefetch) {
return unboundedOrLimit(prefetch);
}
return prefetch == Integer.MAX_VALUE ? Integer.MAX_VALUE : lowTide;
}
Operators() {
}
static final | also |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java | {
"start": 3204,
"end": 23007
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestNodeAttributesCLI.class);
private ResourceManagerAdministrationProtocol admin;
private ApplicationClientProtocol client;
private NodesToAttributesMappingRequest nodeToAttrRequest;
private NodeAttributesCLI nodeAttributesCLI;
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
private ByteArrayOutputStream sysOutBytes = new ByteArrayOutputStream();
private String errOutput;
private String sysOutput;
@BeforeEach
public void configure() throws IOException, YarnException {
admin = mock(ResourceManagerAdministrationProtocol.class);
client = mock(ApplicationClientProtocol.class);
when(admin.mapAttributesToNodes(any(NodesToAttributesMappingRequest.class)))
.thenAnswer(new Answer<NodesToAttributesMappingResponse>() {
@Override
public NodesToAttributesMappingResponse answer(
InvocationOnMock invocation) throws Throwable {
nodeToAttrRequest =
(NodesToAttributesMappingRequest) invocation.getArguments()[0];
return NodesToAttributesMappingResponse.newInstance();
}
});
nodeAttributesCLI = new NodeAttributesCLI() {
@Override
protected AdminCommandHandler getAdminCommandHandler() {
return new AdminCommandHandler() {
@Override
protected ResourceManagerAdministrationProtocol createAdminProtocol()
throws IOException {
return admin;
}
};
}
@Override
protected ClientCommandHandler getClientCommandHandler() {
ClientCommandHandler handler = new ClientCommandHandler() {
@Override
protected ApplicationClientProtocol createApplicationProtocol()
throws IOException {
return client;
}
};
handler.setSysOut(new PrintStream(sysOutBytes));
return handler;
}
};
nodeAttributesCLI.setErrOut(new PrintStream(errOutBytes));
}
@Test
public void testHelp() throws Exception {
String[] args = new String[] {"-help", "-replace"};
assertTrue(0 == runTool(args), "It should have succeeded help for replace");
assertErrorContains("-replace <\"node1:attribute[(type)][=value],attribute1"
+ "[=value],attribute2 node2:attribute2[=value],attribute3\">");
assertErrorContains("Replace the node to attributes mapping information at"
+ " the ResourceManager with the new mapping. Currently supported"
+ " attribute type. And string is the default type too. Attribute value"
+ " if not specified for string type value will be considered as empty"
+ " string. Replaced node-attributes should not violate the existing"
+ " attribute to attribute type mapping.");
args = new String[] {"-help", "-remove"};
assertTrue(0 == runTool(args), "It should have succeeded help for replace");
assertErrorContains(
"-remove <\"node1:attribute,attribute1" + " node2:attribute2\">");
assertErrorContains("Removes the specified node to attributes mapping"
+ " information at the ResourceManager");
args = new String[] {"-help", "-add"};
assertTrue(0 == runTool(args), "It should have succeeded help for replace");
assertErrorContains("-add <\"node1:attribute[(type)][=value],"
+ "attribute1[=value],attribute2 node2:attribute2[=value],"
+ "attribute3\">");
assertErrorContains("Adds or updates the node to attributes mapping"
+ " information at the ResourceManager. Currently supported attribute"
+ " type is string. And string is the default type too. Attribute value"
+ " if not specified for string type value will be considered as empty"
+ " string. Added or updated node-attributes should not violate the"
+ " existing attribute to attribute type mapping.");
args = new String[] {"-help", "-failOnUnknownNodes"};
assertTrue(0 == runTool(args), "It should have succeeded help for replace");
assertErrorContains("-failOnUnknownNodes");
assertErrorContains("Can be used optionally along with [add,remove,"
+ "replace] options. When set, command will fail if specified nodes "
+ "are unknown.");
args = new String[] {"-help", "-list"};
assertTrue(0 == runTool(args), "It should have succeeded help for replace");
assertErrorContains("-list");
assertErrorContains("List all attributes in cluster");
args = new String[] {"-help", "-nodes"};
assertTrue(0 == runTool(args), "It should have succeeded help for replace");
assertErrorContains("-nodes");
assertErrorContains(
"Works with [list] to specify node hostnames whose mappings "
+ "are required to be displayed.");
args = new String[] {"-help", "-attributes"};
assertTrue(0 == runTool(args), "It should have succeeded help for replace");
assertErrorContains("-attributes");
assertErrorContains(
"Works with [attributestonodes] to specify attributes whose mapping "
+ "are required to be displayed.");
args = new String[] {"-help", "-attributestonodes"};
assertTrue(0 == runTool(args), "It should have succeeded help for replace");
assertErrorContains("-attributestonodes");
assertErrorContains("Displays mapping of attributes to nodes and attribute "
+ "values grouped by attributes");
}
@Test
public void testReplace() throws Exception {
// --------------------------------
// failure scenarios
// --------------------------------
// parenthesis not match
String[] args = new String[] {"-replace", "x("};
assertTrue(0 != runTool(args), "It should have failed as no node is specified");
assertFailureMessageContains(NodeAttributesCLI.INVALID_MAPPING_ERR_MSG);
// parenthesis not match
args = new String[] {"-replace", "x:(=abc"};
assertTrue(0 != runTool(args),
"It should have failed as no closing parenthesis is not specified");
assertFailureMessageContains(
"Attribute for node x is not properly configured : (=abc");
args = new String[] {"-replace", "x:()=abc"};
assertTrue(0 != runTool(args),
"It should have failed as no type specified inside parenthesis");
assertFailureMessageContains(
"Attribute for node x is not properly configured : ()=abc");
args = new String[] {"-replace", ":x(string)"};
assertTrue(0 != runTool(args), "It should have failed as no node is specified");
assertFailureMessageContains("Node name cannot be empty");
// Not expected key=value specifying inner parenthesis
args = new String[] {"-replace", "x:(key=value)"};
assertTrue(0 != runTool(args));
assertFailureMessageContains(
"Attribute for node x is not properly configured : (key=value)");
// Should fail as no attributes specified
args = new String[] {"-replace"};
assertTrue(0 != runTool(args), "Should fail as no attribute mappings specified");
assertFailureMessageContains(NodeAttributesCLI.MISSING_ARGUMENT);
// no labels, should fail
args = new String[] {"-replace", "-failOnUnknownNodes",
"x:key(string)=value,key2=val2"};
assertTrue(0 != runTool(args),
"Should fail as no attribute mappings specified for replace");
assertFailureMessageContains(NodeAttributesCLI.MISSING_ARGUMENT);
// no labels, should fail
args = new String[] {"-replace", " "};
assertTrue(0 != runTool(args));
assertFailureMessageContains(NodeAttributesCLI.NO_MAPPING_ERR_MSG);
args = new String[] {"-replace", ", "};
assertTrue(0 != runTool(args));
assertFailureMessageContains(NodeAttributesCLI.INVALID_MAPPING_ERR_MSG);
// --------------------------------
// success scenarios
// --------------------------------
args = new String[] {"-replace",
"x:key(string)=value,key2=val2 y:key2=val23,key3 z:key4"};
assertTrue(0 == runTool(args),
"Should not fail as attribute has been properly mapped");
List<NodeToAttributes> nodeAttributesList = new ArrayList<>();
List<NodeAttribute> attributes = new ArrayList<>();
attributes.add(
NodeAttribute.newInstance("key", NodeAttributeType.STRING, "value"));
attributes.add(
NodeAttribute.newInstance("key2", NodeAttributeType.STRING, "val2"));
nodeAttributesList.add(NodeToAttributes.newInstance("x", attributes));
// for node y
attributes = new ArrayList<>();
attributes.add(
NodeAttribute.newInstance("key2", NodeAttributeType.STRING, "val23"));
attributes
.add(NodeAttribute.newInstance("key3", NodeAttributeType.STRING, ""));
nodeAttributesList.add(NodeToAttributes.newInstance("y", attributes));
// for node y
attributes = new ArrayList<>();
attributes.add(
NodeAttribute.newInstance("key2", NodeAttributeType.STRING, "val23"));
attributes
.add(NodeAttribute.newInstance("key3", NodeAttributeType.STRING, ""));
nodeAttributesList.add(NodeToAttributes.newInstance("y", attributes));
// for node z
attributes = new ArrayList<>();
attributes
.add(NodeAttribute.newInstance("key4", NodeAttributeType.STRING, ""));
nodeAttributesList.add(NodeToAttributes.newInstance("z", attributes));
NodesToAttributesMappingRequest expected = NodesToAttributesMappingRequest
.newInstance(AttributeMappingOperationType.REPLACE, nodeAttributesList,
false);
assertTrue(nodeToAttrRequest.equals(expected));
}
@Test
public void testRemove() throws Exception {
// --------------------------------
// failure scenarios
// --------------------------------
// parenthesis not match
String[] args = new String[] {"-remove", "x:"};
assertTrue(0 != runTool(args),
"It should have failed as no node is specified");
assertFailureMessageContains(
"Attributes cannot be null or empty for Operation [remove] on the "
+ "node x");
// --------------------------------
// success scenarios
// --------------------------------
args =
new String[] {"-remove", "x:key2,key3 z:key4", "-failOnUnknownNodes"};
assertTrue(0 == runTool(args),
"Should not fail as attribute has been properly mapped");
List<NodeToAttributes> nodeAttributesList = new ArrayList<>();
List<NodeAttribute> attributes = new ArrayList<>();
attributes
.add(NodeAttribute.newInstance("key2", NodeAttributeType.STRING, ""));
attributes
.add(NodeAttribute.newInstance("key3", NodeAttributeType.STRING, ""));
nodeAttributesList.add(NodeToAttributes.newInstance("x", attributes));
// for node z
attributes = new ArrayList<>();
attributes
.add(NodeAttribute.newInstance("key4", NodeAttributeType.STRING, ""));
nodeAttributesList.add(NodeToAttributes.newInstance("z", attributes));
NodesToAttributesMappingRequest expected = NodesToAttributesMappingRequest
.newInstance(AttributeMappingOperationType.REMOVE, nodeAttributesList,
true);
assertTrue(nodeToAttrRequest.equals(expected));
}
@Test
public void testAdd() throws Exception {
// --------------------------------
// failure scenarios
// --------------------------------
// parenthesis not match
String[] args = new String[] {"-add", "x:"};
assertTrue(0 != runTool(args),
"It should have failed as no node is specified");
assertFailureMessageContains(
"Attributes cannot be null or empty for Operation [add] on the node x");
// --------------------------------
// success scenarios
// --------------------------------
args = new String[] {"-add", "x:key2=123,key3=abc z:key4(string)",
"-failOnUnknownNodes"};
assertTrue(0 == runTool(args),
"Should not fail as attribute has been properly mapped");
List<NodeToAttributes> nodeAttributesList = new ArrayList<>();
List<NodeAttribute> attributes = new ArrayList<>();
attributes.add(
NodeAttribute.newInstance("key2", NodeAttributeType.STRING, "123"));
attributes.add(
NodeAttribute.newInstance("key3", NodeAttributeType.STRING, "abc"));
nodeAttributesList.add(NodeToAttributes.newInstance("x", attributes));
// for node z
attributes = new ArrayList<>();
attributes
.add(NodeAttribute.newInstance("key4", NodeAttributeType.STRING, ""));
nodeAttributesList.add(NodeToAttributes.newInstance("z", attributes));
NodesToAttributesMappingRequest expected = NodesToAttributesMappingRequest
.newInstance(AttributeMappingOperationType.ADD, nodeAttributesList,
true);
assertTrue(nodeToAttrRequest.equals(expected));
// --------------------------------
// with Duplicate mappings for a host
// --------------------------------
args = new String[] {"-add", "x:key2=123,key3=abc x:key4(string)",
"-failOnUnknownNodes"};
assertTrue(0 == runTool(args),
"Should not fail as attribute has been properly mapped");
nodeAttributesList = new ArrayList<>();
attributes = new ArrayList<>();
attributes
.add(NodeAttribute.newInstance("key4", NodeAttributeType.STRING, ""));
nodeAttributesList.add(NodeToAttributes.newInstance("x", attributes));
expected = NodesToAttributesMappingRequest
.newInstance(AttributeMappingOperationType.ADD, nodeAttributesList,
true);
assertTrue(nodeToAttrRequest.equals(expected));
}
@Test
public void testListAttributes() throws Exception {
// GetClusterNodeAttributesRequest
when(client
.getClusterNodeAttributes(any(GetClusterNodeAttributesRequest.class)))
.thenAnswer(new Answer<GetClusterNodeAttributesResponse>() {
@Override
public GetClusterNodeAttributesResponse answer(
InvocationOnMock invocation) throws Throwable {
GetClusterNodeAttributesRequest nodeAttrReq =
(GetClusterNodeAttributesRequest) invocation.getArguments()[0];
return GetClusterNodeAttributesResponse.newInstance(ImmutableSet
.of(NodeAttributeInfo
.newInstance(NodeAttributeKey.newInstance("GPU"),
NodeAttributeType.STRING)));
}
});
// --------------------------------
// Success scenarios
// --------------------------------
String[] args = new String[] {"-list"};
assertTrue(0 == runTool(args),
"It should be success since it list all attributes");
assertSysOutContains("Attribute\t Type",
"rm.yarn.io/GPU\t STRING");
}
@Test
public void testNodeToAttributes() throws Exception {
// GetNodesToAttributesRequest response
when(client.getNodesToAttributes(any(GetNodesToAttributesRequest.class)))
.thenAnswer(new Answer<GetNodesToAttributesResponse>() {
@Override
public GetNodesToAttributesResponse answer(
InvocationOnMock invocation) throws Throwable {
GetNodesToAttributesRequest nodeToAttributes =
(GetNodesToAttributesRequest) invocation.getArguments()[0];
return GetNodesToAttributesResponse.newInstance(
ImmutableMap.<String, Set<NodeAttribute>>builder()
.put("hostname", ImmutableSet.of(NodeAttribute
.newInstance("GPU", NodeAttributeType.STRING, "ARM")))
.build());
}
});
// --------------------------------
// Failure scenarios
// --------------------------------
String[] args = new String[] {"-nodetoattributes", "-nodes"};
assertTrue(0 != runTool(args),
"It should not success since nodes are not specified");
assertErrorContains(NodeAttributesCLI.INVALID_COMMAND_USAGE);
// Missing argument for nodes
args = new String[] {"-nodestoattributes", "-nodes"};
assertTrue(0 != runTool(args),
"It should not success since nodes are not specified");
assertErrorContains(NodeAttributesCLI.MISSING_ARGUMENT);
// --------------------------------
// Success with hostname param
// --------------------------------
args = new String[] {"-nodestoattributes", "-nodes", "hostname"};
assertTrue(0 == runTool(args), "Should return hostname to attributed list");
assertSysOutContains("hostname");
}
@Test
public void testAttributesToNodes() throws Exception {
// GetAttributesToNodesResponse response
when(client.getAttributesToNodes(any(GetAttributesToNodesRequest.class)))
.thenAnswer(new Answer<GetAttributesToNodesResponse>() {
@Override
public GetAttributesToNodesResponse answer(
InvocationOnMock invocation) throws Throwable {
GetAttributesToNodesRequest attrToNodes =
(GetAttributesToNodesRequest) invocation.getArguments()[0];
return GetAttributesToNodesResponse.newInstance(
ImmutableMap.<NodeAttributeKey,
List<NodeToAttributeValue>>builder()
.put(NodeAttributeKey.newInstance("GPU"), ImmutableList
.of(NodeToAttributeValue.newInstance("host1", "ARM")))
.build());
}
});
// --------------------------------
// Success scenarios
// --------------------------------
String[] args = new String[] {"-attributestonodes"};
assertTrue(0 == runTool(args),
"It should be success since it list all attributes");
assertSysOutContains("Hostname\tAttribute-value", "rm.yarn.io/GPU :",
"host1\t ARM");
// --------------------------------
// fail scenario argument filter missing
// --------------------------------
args = new String[] {"-attributestonodes", "-attributes"};
assertTrue(0 != runTool(args),
"It should not success since attributes for filter are not specified");
assertErrorContains(NodeAttributesCLI.MISSING_ARGUMENT);
// --------------------------------
// fail scenario argument filter missing
// --------------------------------
args = new String[] {"-attributestonodes", "-attributes", "fail/da/fail"};
assertTrue(0 != runTool(args),
"It should not success since attributes format is not correct");
assertErrorContains(
"Attribute format not correct. Should be <[prefix]/[name]> "
+ ":fail/da/fail");
}
private void assertFailureMessageContains(String... messages) {
assertErrorContains(messages);
assertErrorContains(NodeAttributesCLI.USAGE_YARN_NODE_ATTRIBUTES);
}
private void assertErrorContains(String... messages) {
for (String message : messages) {
if (!errOutput.contains(message)) {
fail(
"Expected output to contain '" + message + "' but err_output was:\n"
+ errOutput);
}
}
}
private void assertSysOutContains(String... messages) {
for (String message : messages) {
if (!sysOutput.contains(message)) {
fail(
"Expected output to contain '" + message + "' but sys_output was:\n"
+ sysOutput);
}
}
}
private int runTool(String... args) throws Exception {
errOutBytes.reset();
sysOutBytes.reset();
LOG.info("Running: NodeAttributesCLI " + Joiner.on(" ").join(args));
int ret = nodeAttributesCLI.run(args);
errOutput = new String(errOutBytes.toByteArray(), StandardCharsets.UTF_8);
sysOutput = new String(sysOutBytes.toByteArray(), StandardCharsets.UTF_8);
LOG.info("Err_output:\n" + errOutput);
LOG.info("Sys_output:\n" + sysOutput);
return ret;
}
}
| TestNodeAttributesCLI |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webmvc/src/main/java/org/springframework/cloud/gateway/server/mvc/handler/ClientHttpRequestFactoryProxyExchange.java | {
"start": 1340,
"end": 3609
} | class ____ extends AbstractProxyExchange {
private final ClientHttpRequestFactory requestFactory;
public ClientHttpRequestFactoryProxyExchange(ClientHttpRequestFactory requestFactory,
GatewayMvcProperties properties) {
super(properties);
this.requestFactory = requestFactory;
}
@Override
public ServerResponse exchange(Request request) {
try {
Objects.requireNonNull(request.getUri(), "uri is required");
ClientHttpRequest clientHttpRequest = requestFactory.createRequest(request.getUri(), request.getMethod());
clientHttpRequest.getHeaders().putAll(request.getHeaders());
// copy body from request to clientHttpRequest
StreamUtils.copy(request.getServerRequest().servletRequest().getInputStream(), clientHttpRequest.getBody());
ClientHttpResponse clientHttpResponse = clientHttpRequest.execute();
InputStream body = clientHttpResponse.getBody();
// put the body input stream in a request attribute so filters can read it.
MvcUtils.putAttribute(request.getServerRequest(), MvcUtils.CLIENT_RESPONSE_INPUT_STREAM_ATTR, body);
MvcUtils.putAttribute(request.getServerRequest(), MvcUtils.CLIENT_RESPONSE_ATTR, clientHttpResponse);
ServerResponse serverResponse = GatewayServerResponse.status(clientHttpResponse.getStatusCode())
.build((req, httpServletResponse) -> {
try (clientHttpResponse) {
// get input stream from request attribute in case it was
// modified.
InputStream inputStream = MvcUtils.getAttribute(request.getServerRequest(),
MvcUtils.CLIENT_RESPONSE_INPUT_STREAM_ATTR);
Objects.requireNonNull(inputStream, "input stream cannot be null");
// copy body from request to clientHttpRequest
ClientHttpRequestFactoryProxyExchange.this.copyResponseBody(clientHttpResponse, inputStream,
httpServletResponse.getOutputStream());
}
return null;
});
ClientHttpResponseAdapter proxyExchangeResponse = new ClientHttpResponseAdapter(clientHttpResponse);
request.getResponseConsumers()
.forEach(responseConsumer -> responseConsumer.accept(proxyExchangeResponse, serverResponse));
return serverResponse;
}
catch (IOException e) {
// TODO: log error?
throw new UncheckedIOException(e);
}
}
}
| ClientHttpRequestFactoryProxyExchange |
java | google__dagger | javatests/dagger/functional/componentdependency/ComponentDependenciesTest.java | {
"start": 1562,
"end": 1624
} | interface ____ {
Object getString();
}
public | OneOverride |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java | {
"start": 6498,
"end": 6783
} | interface ____ extends Builder permits DoubleVectorFixedBuilder {
/**
* Appends a double to the current entry.
*/
@Override
FixedBuilder appendDouble(double value);
FixedBuilder appendDouble(int index, double value);
}
}
| FixedBuilder |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/cache/FederationCache.java | {
"start": 13519,
"end": 14312
} | class ____<K, V> {
private K key;
private V value;
CacheRequest(K pKey, V pValue) {
this.key = pKey;
this.value = pValue;
}
public V getValue() throws Exception {
return value;
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(key).toHashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (obj instanceof CacheRequest) {
Class<CacheRequest> cacheRequestClass = CacheRequest.class;
CacheRequest other = cacheRequestClass.cast(obj);
return new EqualsBuilder().append(key, other.key).isEquals();
}
return false;
}
}
public | CacheRequest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/manytomany/generic/ManyToManyNonGenericTest.java | {
"start": 1290,
"end": 2168
} | class ____ {
@Test
void testSelfReferencingGeneric(final EntityManagerFactoryScope scope) {
final UUID treeId = scope.fromTransaction(em -> {
final NodeTree tree = new NodeTree();
final Node root = new Node();
root.tree = tree;
final Node branch = new Node();
branch.tree = tree;
tree.nodes.add(root);
tree.nodes.add(branch);
root.children.add(branch);
em.persist(tree);
return tree.id;
});
final NodeTree nodeTree = scope.fromEntityManager(em -> em.find(NodeTree.class, treeId));
assertThat(nodeTree, is(notNullValue()));
assertThat(nodeTree.id, is(treeId));
assertThat(nodeTree.nodes, iterableWithSize(2));
assertThat(nodeTree.nodes, containsInAnyOrder(List.of(
hasProperty("children", iterableWithSize(1)),
hasProperty("children", emptyIterable())
)));
}
@Entity(name = "tree")
public static | ManyToManyNonGenericTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureDoc.java | {
"start": 1011,
"end": 2984
} | class ____ extends RankDoc {
public static final String NAME = "rank_feature_doc";
private static final TransportVersion RERANK_SNIPPETS = TransportVersion.fromName("rerank_snippets");
// TODO: update to support more than 1 fields; and not restrict to string data
public List<String> featureData;
public RankFeatureDoc(int doc, float score, int shardIndex) {
super(doc, score, shardIndex);
}
public RankFeatureDoc(StreamInput in) throws IOException {
super(in);
if (in.getTransportVersion().supports(RERANK_SNIPPETS)) {
featureData = in.readOptionalStringCollectionAsList();
} else {
String featureDataString = in.readOptionalString();
featureData = featureDataString == null ? null : List.of(featureDataString);
}
}
@Override
public Explanation explain(Explanation[] sources, String[] queryNames) {
throw new UnsupportedOperationException("explain is not supported for {" + getClass() + "}");
}
public void featureData(List<String> featureData) {
this.featureData = featureData;
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().supports(RERANK_SNIPPETS)) {
out.writeOptionalStringCollection(featureData);
} else {
out.writeOptionalString(featureData.get(0));
}
}
@Override
protected boolean doEquals(RankDoc rd) {
RankFeatureDoc other = (RankFeatureDoc) rd;
return Objects.equals(this.featureData, other.featureData);
}
@Override
protected int doHashCode() {
return Objects.hash(featureData);
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
protected void doToXContent(XContentBuilder builder, Params params) throws IOException {
builder.array("featureData", featureData);
}
}
| RankFeatureDoc |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java | {
"start": 1826,
"end": 1945
} | class ____ used for all replicas which are on local storage media
* and hence, are backed by files.
*/
abstract public | is |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/eval/EvalMethodTest_insert_2.java | {
"start": 185,
"end": 429
} | class ____ extends TestCase {
public void test_method() throws Exception {
assertEquals("QuWhat",
SQLEvalVisitorUtils.evalExpr(JdbcConstants.MYSQL, "INSERT('Quadratic', 3, 100, 'What')"));
}
}
| EvalMethodTest_insert_2 |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_2100/Issue2179.java | {
"start": 6458,
"end": 7024
} | class ____ implements ObjectDeserializer {
@SuppressWarnings("unchecked")
public <T> T deserialze(DefaultJSONParser parser, Type type, Object fieldName) {
String val = StringCodec.instance.deserialze(parser, type, fieldName);
System.out.println("-----------------EnumAwareSerializer2.deserialze-----------------------------");
System.out.println(val);
return (T) ProductType2.get(JSON.parseObject(val).getInteger("code"));
}
@Override
public int getFastMatchToken() {
return JSONToken.LITERAL_STRING;
}
}
public static | EnumAwareSerializer2 |
java | google__guice | extensions/servlet/src/com/google/inject/servlet/ContinuingHttpServletRequest.java | {
"start": 1151,
"end": 3434
} | class ____ extends HttpServletRequestWrapper {
// We clear out the attributes as they are mutable and not thread-safe.
private final Map<String, Object> attributes = Maps.newHashMap();
private final Cookie[] cookies;
public ContinuingHttpServletRequest(HttpServletRequest request) {
super(request);
Cookie[] originalCookies = request.getCookies();
if (originalCookies != null) {
int numberOfCookies = originalCookies.length;
cookies = new Cookie[numberOfCookies];
for (int i = 0; i < numberOfCookies; i++) {
Cookie originalCookie = originalCookies[i];
// Snapshot each cookie + freeze.
// No snapshot is required if this is a snapshot of a snapshot(!)
if (originalCookie instanceof ImmutableCookie) {
cookies[i] = originalCookie;
} else {
cookies[i] = new ImmutableCookie(originalCookie);
}
}
} else {
cookies = null;
}
}
@Override
public HttpSession getSession() {
throw new OutOfScopeException("Cannot access the session in a continued request");
}
@Override
public HttpSession getSession(boolean create) {
throw new UnsupportedOperationException("Cannot access the session in a continued request");
}
@Override
public ServletInputStream getInputStream() throws IOException {
throw new UnsupportedOperationException("Cannot access raw request on a continued request");
}
@Override
public void setAttribute(String name, Object o) {
attributes.put(name, o);
}
@Override
public void removeAttribute(String name) {
attributes.remove(name);
}
@Override
public Object getAttribute(String name) {
return attributes.get(name);
}
@Override
public Cookie[] getCookies() {
// NOTE(user): Cookies themselves are mutable. However a ContinuingHttpServletRequest
// snapshots the original set of cookies it received and imprisons them in immutable
// form. Unfortunately, the cookie array itself is mutable and there is no way for us
// to avoid this. At worst, however, mutation effects are restricted within the scope
// of a single request. Continued requests are not affected after snapshot time.
return cookies;
}
private static final | ContinuingHttpServletRequest |
java | apache__camel | core/camel-util/src/main/java/org/apache/camel/util/ReflectionHelper.java | {
"start": 5531,
"end": 5747
} | class ____ the supplied name and parameter types. Searches all
* superclasses up to {@code Object}.
* <p>
* Returns {@code null} if no {@link Method} can be found.
*
* @param clazz the | with |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/request/transition/TransitionFactory.java | {
"start": 289,
"end": 645
} | interface ____<R> {
/**
* Returns a new {@link Transition}.
*
* @param dataSource The {@link com.bumptech.glide.load.DataSource} the resource was loaded from.
* @param isFirstResource True if this is the first resource to be loaded into the target.
*/
Transition<R> build(DataSource dataSource, boolean isFirstResource);
}
| TransitionFactory |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/convert/support/ByteBufferConverterTests.java | {
"start": 3044,
"end": 3237
} | class ____ implements Converter<byte[], OtherType> {
@Override
public OtherType convert(byte[] source) {
return new OtherType(source);
}
}
private static | ByteArrayToOtherTypeConverter |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/inheritfromconfig/CarMapperReverseWithAutoInheritance.java | {
"start": 402,
"end": 700
} | class ____ {
public static final CarMapperReverseWithAutoInheritance INSTANCE =
Mappers.getMapper( CarMapperReverseWithAutoInheritance.class );
@Mapping( target = "colour", source = "color" )
public abstract CarDto toCarDto(CarEntity entity);
}
| CarMapperReverseWithAutoInheritance |
java | apache__kafka | connect/api/src/main/java/org/apache/kafka/connect/sink/SinkConnectorContext.java | {
"start": 1005,
"end": 1065
} | interface ____ extends ConnectorContext {
}
| SinkConnectorContext |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/domain/SqmCorrelatedRoot.java | {
"start": 626,
"end": 2907
} | class ____<T> extends SqmRoot<T> implements SqmPathWrapper<T, T>, SqmCorrelation<T, T> {
private final SqmRoot<T> correlationParent;
public SqmCorrelatedRoot(SqmRoot<T> correlationParent) {
super(
correlationParent.getNavigablePath(),
correlationParent.getModel(),
correlationParent.getExplicitAlias(),
correlationParent.nodeBuilder()
);
this.correlationParent = correlationParent;
}
protected SqmCorrelatedRoot(NavigablePath navigablePath, SqmPathSource<T> referencedNavigable, NodeBuilder nodeBuilder, SqmRoot<T> correlationParent) {
super( navigablePath, referencedNavigable, nodeBuilder );
this.correlationParent = correlationParent;
}
@Override
public SqmCorrelatedRoot<T> copy(SqmCopyContext context) {
final SqmCorrelatedRoot<T> existing = context.getCopy( this );
if ( existing != null ) {
return existing;
}
final SqmCorrelatedRoot<T> path = context.registerCopy(
this,
new SqmCorrelatedRoot<>( correlationParent.copy( context ) )
);
copyTo( path, context );
return path;
}
@Override
public SqmRoot<T> getCorrelationParent() {
return correlationParent;
}
@Override
public SqmPath<T> getWrappedPath() {
return getCorrelationParent();
}
@Override
public @Nullable String getExplicitAlias() {
return correlationParent.getExplicitAlias();
}
@Override
public void setExplicitAlias(@Nullable String explicitAlias) {
throw new UnsupportedOperationException( "Can't set alias on a correlated root" );
}
@Override
public JpaSelection<T> alias(String name) {
setAlias( name );
return this;
}
@Override
public boolean isCorrelated() {
return true;
}
@Override
public SqmRoot<T> getCorrelatedRoot() {
return this;
}
@Override
public <X> X accept(SemanticQueryWalker<X> walker) {
return walker.visitCorrelatedRoot( this );
}
@Override
public boolean deepEquals(SqmFrom<?, ?> other) {
return super.deepEquals( other )
&& other instanceof SqmCorrelatedRoot<?> that
&& correlationParent.equals( that.correlationParent );
}
@Override
public boolean isDeepCompatible(SqmFrom<?, ?> other) {
return super.isDeepCompatible( other )
&& other instanceof SqmCorrelatedRoot<?> that
&& correlationParent.isCompatible( that.correlationParent );
}
}
| SqmCorrelatedRoot |
java | apache__flink | flink-architecture-tests/flink-architecture-tests-test/src/main/java/org/apache/flink/architecture/TestCodeArchitectureTestBase.java | {
"start": 1214,
"end": 1292
} | class ____ {@link
* ArchTests#in(Class)} to cover the common part.
*/
public | via |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java | {
"start": 3851,
"end": 20153
} | class ____ {
public static final Logger LOG =
LoggerFactory.getLogger(TestMRIntermediateDataEncryption.class);
/**
* The number of bytes generated by the input generator.
*/
public static final long TOTAL_MBS_DEFAULT = 128L;
public static final long BLOCK_SIZE_DEFAULT = 32 * 1024 * 1024L;
public static final int INPUT_GEN_NUM_THREADS = 16;
public static final long TASK_SORT_IO_MB_DEFAULT = 128L;
public static final String JOB_DIR_PATH = "jobs-data-path";
/**
* Directory of the test data.
*/
private static File testRootDir;
private static volatile BufferedWriter inputBufferedWriter;
private static Configuration commonConfig;
private static MiniDFSCluster dfsCluster;
private static MiniMRClientCluster mrCluster;
private static FileSystem fs;
private static FileChecksum checkSumReference;
private static Path jobInputDirPath;
private static long inputFileSize;
/**
* Test parameters.
*/
private String testTitleName;
private int numMappers;
private int numReducers;
private boolean isUber;
private Configuration config;
private Path jobOutputPath;
/**
* Initialized the parametrized JUnit test.
* @param pTestName the name of the unit test to be executed.
* @param pMappers number of mappers in the tests.
* @param pReducers number of the reducers.
* @param pUberEnabled boolean flag for isUber
* @throws Exception unit test error.
*/
public void initTestMRIntermediateDataEncryption(String pTestName,
int pMappers, int pReducers, boolean pUberEnabled) throws Exception {
this.testTitleName = pTestName;
this.numMappers = pMappers;
this.numReducers = pReducers;
this.isUber = pUberEnabled;
setup();
}
/**
* List of arguments to run the JunitTest.
* @return
*/
public static Collection<Object[]> getTestParameters() {
return Arrays.asList(new Object[][]{
{"testSingleReducer", 3, 1, false},
{"testUberMode", 3, 1, true},
{"testMultipleMapsPerNode", 8, 1, false},
{"testMultipleReducers", 2, 4, false}
});
}
@BeforeAll
public static void setupClass() throws Exception {
// setup the test root directory
testRootDir =
GenericTestUtils.setupTestRootDir(
TestMRIntermediateDataEncryption.class);
// setup the base configurations and the clusters
final File dfsFolder = new File(testRootDir, "dfs");
final Path jobsDirPath = new Path(JOB_DIR_PATH);
commonConfig = createBaseConfiguration();
dfsCluster =
new MiniDFSCluster.Builder(commonConfig, dfsFolder)
.numDataNodes(2).build();
dfsCluster.waitActive();
mrCluster = MiniMRClientClusterFactory.create(
TestMRIntermediateDataEncryption.class, 2, commonConfig);
mrCluster.start();
fs = dfsCluster.getFileSystem();
if (fs.exists(jobsDirPath) && !fs.delete(jobsDirPath, true)) {
throw new IOException("Could not delete JobsDirPath" + jobsDirPath);
}
fs.mkdirs(jobsDirPath);
jobInputDirPath = new Path(jobsDirPath, "in-dir");
// run the input generator job.
assertEquals(0,
generateInputTextFile(), "Generating input should succeed");
// run the reference job
runReferenceJob();
}
@AfterAll
public static void tearDown() throws IOException {
// shutdown clusters
if (mrCluster != null) {
mrCluster.stop();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
// make sure that generated input file is deleted
final File textInputFile = new File(testRootDir, "input.txt");
if (textInputFile.exists()) {
assertTrue(textInputFile.delete());
}
}
/**
* Creates a configuration object setting the common properties before
* initializing the clusters.
* @return configuration to be used as a base for the unit tests.
*/
private static Configuration createBaseConfiguration() {
// Set the jvm arguments to enable intermediate encryption.
Configuration conf =
MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(null);
// Set the temp directories a subDir of the test directory.
conf = MRJobConfUtil.setLocalDirectoriesConfigForTesting(conf, testRootDir);
conf.setLong("dfs.blocksize", BLOCK_SIZE_DEFAULT);
return conf;
}
/**
* Creates a thread safe BufferedWriter to be used among the task generators.
* @return A synchronized <code>BufferedWriter</code> to the input file.
* @throws IOException opening a new {@link FileWriter}.
*/
private static synchronized BufferedWriter getTextInputWriter()
throws IOException {
if (inputBufferedWriter == null) {
final File textInputFile = new File(testRootDir, "input.txt");
inputBufferedWriter = new BufferedWriter(new FileWriter(textInputFile));
}
return inputBufferedWriter;
}
/**
* Generates input text file of size <code>TOTAL_MBS_DEFAULT</code>.
* It creates a total <code>INPUT_GEN_NUM_THREADS</code> future tasks.
*
* @return the result of the input generation. 0 for success.
* @throws Exception during the I/O of job.
*/
private static int generateInputTextFile() throws Exception {
final File textInputFile = new File(testRootDir, "input.txt");
final AtomicLong actualWrittenBytes = new AtomicLong(0);
// create INPUT_GEN_NUM_THREADS callables
final ExecutorService executor =
Executors.newFixedThreadPool(INPUT_GEN_NUM_THREADS);
//create a list to hold the Future object associated with Callable
final List<Future<Long>> inputGenerators = new ArrayList<>();
final Callable<Long> callableGen = new InputGeneratorTask();
final long startTime = Time.monotonicNow();
for (int i = 0; i < INPUT_GEN_NUM_THREADS; i++) {
//submit Callable tasks to be executed by thread pool
Future<Long> genFutureTask = executor.submit(callableGen);
inputGenerators.add(genFutureTask);
}
for (Future<Long> genFutureTask : inputGenerators) {
// print the return value of Future, notice the output delay in console
// because Future.get() waits for task to get completed
LOG.info("Received one task. Current total bytes: {}",
actualWrittenBytes.addAndGet(genFutureTask.get()));
}
getTextInputWriter().close();
final long endTime = Time.monotonicNow();
LOG.info("Finished generating input. Wrote {} bytes in {} seconds",
actualWrittenBytes.get(), ((endTime - startTime) * 1.0) / 1000);
executor.shutdown();
// copy text file to HDFS deleting the source.
fs.mkdirs(jobInputDirPath);
Path textInputPath =
fs.makeQualified(new Path(jobInputDirPath, "input.txt"));
fs.copyFromLocalFile(true, new Path(textInputFile.getAbsolutePath()),
textInputPath);
if (!fs.exists(textInputPath)) {
// the file was not generated. Fail.
return 1;
}
// update the input size.
FileStatus[] fileStatus =
fs.listStatus(textInputPath);
inputFileSize = fileStatus[0].getLen();
LOG.info("Text input file; path: {}, size: {}",
textInputPath, inputFileSize);
return 0;
}
/**
* Runs a WordCount job with encryption disabled and stores the checksum of
* the output file.
* @throws Exception due to I/O errors.
*/
private static void runReferenceJob() throws Exception {
final String jobRefLabel = "job-reference";
final Path jobRefDirPath = new Path(JOB_DIR_PATH, jobRefLabel);
if (fs.exists(jobRefDirPath) && !fs.delete(jobRefDirPath, true)) {
throw new IOException("Could not delete " + jobRefDirPath);
}
assertTrue(fs.mkdirs(jobRefDirPath));
Path jobRefOutputPath = new Path(jobRefDirPath, "out-dir");
Configuration referenceConf = new Configuration(commonConfig);
referenceConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, false);
Job jobReference = runWordCountJob(jobRefLabel, jobRefOutputPath,
referenceConf, 4, 1);
assertTrue(jobReference.isSuccessful());
FileStatus[] fileStatusArr =
fs.listStatus(jobRefOutputPath,
new Utils.OutputFileUtils.OutputFilesFilter());
assertEquals(1, fileStatusArr.length);
checkSumReference = fs.getFileChecksum(fileStatusArr[0].getPath());
assertTrue(fs.delete(jobRefDirPath, true));
}
private static Job runWordCountJob(String postfixName, Path jOutputPath,
Configuration jConf, int mappers, int reducers) throws Exception {
Job job = Job.getInstance(jConf);
job.getConfiguration().setInt(MRJobConfig.NUM_MAPS, mappers);
job.setJarByClass(TestMRIntermediateDataEncryption.class);
job.setJobName("mr-spill-" + postfixName);
// Mapper configuration
job.setMapperClass(TokenizerMapper.class);
job.setInputFormatClass(TextInputFormat.class);
job.setCombinerClass(LongSumReducer.class);
FileInputFormat.setMinInputSplitSize(job,
(inputFileSize + mappers) / mappers);
// Reducer configuration
job.setReducerClass(LongSumReducer.class);
job.setNumReduceTasks(reducers);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
// Set the IO paths for the job.
FileInputFormat.addInputPath(job, jobInputDirPath);
FileOutputFormat.setOutputPath(job, jOutputPath);
if (job.waitForCompletion(true)) {
FileStatus[] fileStatusArr =
fs.listStatus(jOutputPath,
new Utils.OutputFileUtils.OutputFilesFilter());
for (FileStatus fStatus : fileStatusArr) {
LOG.info("Job: {} .. Output file {} .. Size = {}",
postfixName, fStatus.getPath(), fStatus.getLen());
}
}
return job;
}
/**
* Compares the checksum of the output file to the
* <code>checkSumReference</code>.
* If the job has a multiple reducers, the output files are combined by
* launching another job.
* @return true if the checksums are equal.
* @throws Exception if the output is missing or the combiner job fails.
*/
private boolean validateJobOutput() throws Exception {
assertTrue(fs.exists(jobOutputPath),
"Job Output path [" + jobOutputPath + "] should exist");
Path outputPath = jobOutputPath;
if (numReducers != 1) {
// combine the result into one file by running a combiner job
final String jobRefLabel = testTitleName + "-combine";
final Path jobRefDirPath = new Path(JOB_DIR_PATH, jobRefLabel);
if (fs.exists(jobRefDirPath) && !fs.delete(jobRefDirPath, true)) {
throw new IOException("Could not delete " + jobRefDirPath);
}
fs.mkdirs(jobRefDirPath);
outputPath = new Path(jobRefDirPath, "out-dir");
Configuration referenceConf = new Configuration(commonConfig);
referenceConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA,
false);
Job combinerJob = Job.getInstance(referenceConf);
combinerJob.setJarByClass(TestMRIntermediateDataEncryption.class);
combinerJob.setJobName("mr-spill-" + jobRefLabel);
combinerJob.setMapperClass(CombinerJobMapper.class);
FileInputFormat.addInputPath(combinerJob, jobOutputPath);
// Reducer configuration
combinerJob.setReducerClass(LongSumReducer.class);
combinerJob.setNumReduceTasks(1);
combinerJob.setOutputKeyClass(Text.class);
combinerJob.setOutputValueClass(LongWritable.class);
// Set the IO paths for the job.
FileOutputFormat.setOutputPath(combinerJob, outputPath);
if (!combinerJob.waitForCompletion(true)) {
return false;
}
FileStatus[] fileStatusArr =
fs.listStatus(outputPath,
new Utils.OutputFileUtils.OutputFilesFilter());
LOG.info("Job-Combination: {} .. Output file {} .. Size = {}",
jobRefDirPath, fileStatusArr[0].getPath(), fileStatusArr[0].getLen());
}
// Get the output files of the job.
FileStatus[] fileStatusArr =
fs.listStatus(outputPath,
new Utils.OutputFileUtils.OutputFilesFilter());
FileChecksum jobFileChecksum =
fs.getFileChecksum(fileStatusArr[0].getPath());
return checkSumReference.equals(jobFileChecksum);
}
public void setup() throws Exception {
LOG.info("Starting TestMRIntermediateDataEncryption#{}.......",
testTitleName);
final Path jobDirPath = new Path(JOB_DIR_PATH, testTitleName);
if (fs.exists(jobDirPath) && !fs.delete(jobDirPath, true)) {
throw new IOException("Could not delete " + jobDirPath);
}
fs.mkdirs(jobDirPath);
jobOutputPath = new Path(jobDirPath, "out-dir");
// Set the configuration for the job.
config = new Configuration(commonConfig);
config.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, isUber);
config.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 1.0F);
// Set the configuration to make sure that we get spilled files.
long ioSortMb = TASK_SORT_IO_MB_DEFAULT;
config.setLong(MRJobConfig.IO_SORT_MB, ioSortMb);
long mapMb = Math.max(2 * ioSortMb, config.getInt(MRJobConfig.MAP_MEMORY_MB,
MRJobConfig.DEFAULT_MAP_MEMORY_MB));
// Make sure the map tasks will spill to disk.
config.setLong(MRJobConfig.MAP_MEMORY_MB, mapMb);
config.set(MRJobConfig.MAP_JAVA_OPTS, "-Xmx" + (mapMb - 200) + "m");
config.setInt(MRJobConfig.NUM_MAPS, numMappers);
// Max attempts have to be set to 1 when intermediate encryption is enabled.
config.setInt("mapreduce.map.maxattempts", 1);
config.setInt("mapreduce.reduce.maxattempts", 1);
}
@ParameterizedTest(name = "{index}: TestMRIntermediateDataEncryption.{0} .. "
+ "mappers:{1}, reducers:{2}, isUber:{3})")
@MethodSource("getTestParameters")
public void testWordCount(String pTestName,
int pMappers, int pReducers, boolean pUberEnabled) throws Exception {
initTestMRIntermediateDataEncryption(pTestName, pMappers, pReducers, pUberEnabled);
LOG.info("........Starting main Job Driver #{} starting at {}.......",
testTitleName, Time.formatTime(System.currentTimeMillis()));
SpillCallBackPathsFinder spillInjector =
(SpillCallBackPathsFinder) IntermediateEncryptedStream
.setSpillCBInjector(new SpillCallBackPathsFinder());
StringBuilder testSummary =
new StringBuilder(String.format("%n ===== test %s summary ======",
testTitleName));
try {
long startTime = Time.monotonicNow();
testSummary.append(String.format("%nJob %s started at %s",
testTitleName, Time.formatTime(System.currentTimeMillis())));
Job job = runWordCountJob(testTitleName, jobOutputPath, config,
numMappers, numReducers);
assertTrue(job.isSuccessful());
long endTime = Time.monotonicNow();
testSummary.append(String.format("%nJob %s ended at %s",
job.getJobName(), Time.formatTime(System.currentTimeMillis())));
testSummary.append(String.format("%n\tThe job took %.3f seconds",
(1.0 * (endTime - startTime)) / 1000));
FileStatus[] fileStatusArr =
fs.listStatus(jobOutputPath,
new Utils.OutputFileUtils.OutputFilesFilter());
for (FileStatus fStatus : fileStatusArr) {
long fileSize = fStatus.getLen();
testSummary.append(
String.format("%n\tOutput file %s: %d",
fStatus.getPath(), fileSize));
}
// Validate the checksum of the output.
assertTrue(validateJobOutput());
// Check intermediate files and spilling.
long spilledRecords =
job.getCounters().findCounter(TaskCounter.SPILLED_RECORDS).getValue();
assertTrue(
spilledRecords > 0, "Spill records must be greater than 0");
assertFalse(spillInjector.getEncryptedSpilledFiles().isEmpty(),
"The encrypted spilled files should not be empty.");
assertTrue(spillInjector.getInvalidSpillEntries().isEmpty(),
"Invalid access to spill file positions");
} finally {
testSummary.append(spillInjector.getSpilledFileReport());
LOG.info(testSummary.toString());
IntermediateEncryptedStream.resetSpillCBInjector();
}
}
/**
* A callable implementation that generates a portion of the
* <code>TOTAL_MBS_DEFAULT</code> into {@link #inputBufferedWriter}.
*/
static | TestMRIntermediateDataEncryption |
java | apache__camel | components/camel-grpc/src/main/java/org/apache/camel/component/grpc/client/GrpcExchangeForwarder.java | {
"start": 1053,
"end": 1256
} | interface ____ {
boolean forward(Exchange exchange, StreamObserver<Object> responseObserver, AsyncCallback callback);
void forward(Exchange exchange);
void shutdown();
}
| GrpcExchangeForwarder |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/delegation/FlinkSqlParserFactories.java | {
"start": 1234,
"end": 1609
} | class ____ {
private FlinkSqlParserFactories() {}
public static SqlParserImplFactory create(SqlConformance conformance) {
if (conformance == FlinkSqlConformance.DEFAULT) {
return FlinkSqlParserImpl.FACTORY;
} else {
throw new TableException("Unsupported SqlConformance: " + conformance);
}
}
}
| FlinkSqlParserFactories |
java | google__guava | android/guava/src/com/google/common/base/CharMatcher.java | {
"start": 46133,
"end": 47081
} | class ____ extends RangesMatcher {
// Plug the following UnicodeSet pattern into
// https://unicode.org/cldr/utility/list-unicodeset.jsp
// [[[:Zs:][:Zl:][:Zp:][:Cc:][:Cf:][:Cs:][:Co:]]&[\u0000-\uFFFF]]
// with the "Abbreviate" option, and get the ranges from there.
private static final String RANGE_STARTS =
"\u0000\u007f\u00ad\u0600\u061c\u06dd\u070f\u0890\u08e2\u1680\u180e\u2000\u2028\u205f\u2066"
+ "\u3000\ud800\ufeff\ufff9";
private static final String RANGE_ENDS = // inclusive ends
"\u0020\u00a0\u00ad\u0605\u061c\u06dd\u070f\u0891\u08e2\u1680\u180e\u200f\u202f\u2064\u206f"
+ "\u3000\uf8ff\ufeff\ufffb";
static final CharMatcher INSTANCE = new Invisible();
private Invisible() {
super("CharMatcher.invisible()", RANGE_STARTS.toCharArray(), RANGE_ENDS.toCharArray());
}
}
/** Implementation of {@link #singleWidth()}. */
private static final | Invisible |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/factories/assignment/ParameterAssigmentFactoryTest.java | {
"start": 630,
"end": 2490
} | class ____ {
@ProcessorTest
public void shouldUseFactoryMethodWithMultipleParams() {
Foo5A foo5a = new Foo5A();
foo5a.setPropB( "foo5a" );
Foo5B foo5b = new Foo5B();
foo5b.setPropB( "foo5b" );
Bar5 bar5 = ParameterAssignmentFactoryTestMapper.INSTANCE.foos5ToBar5( foo5a, foo5b );
// foo5a and foo5b get merged into bar5 by a custom factory
assertThat( bar5 ).isNotNull();
assertThat( bar5.getPropA() ).isEqualTo( "foo5a" );
assertThat( bar5.getPropB() ).isEqualTo( "foo5b" );
assertThat( bar5.getSomeTypeProp0() ).isEqualTo( "FOO5A" );
assertThat( bar5.getSomeTypeProp1() ).isEqualTo( "foo5b" );
}
@ProcessorTest
public void shouldUseFactoryMethodWithFirstParamsOfMappingMethod() {
Foo6A foo6a = new Foo6A();
foo6a.setPropB( "foo6a" );
Foo6B foo6b = new Foo6B();
foo6b.setPropB( "foo6b" );
Bar6 bar6 = ParameterAssignmentFactoryTestMapper.INSTANCE.foos6ToBar6( foo6a, foo6b );
assertThat( bar6 ).isNotNull();
assertThat( bar6.getPropA() ).isEqualTo( "foo6a" );
assertThat( bar6.getPropB() ).isEqualTo( "foo6b" );
assertThat( bar6.getSomeTypeProp0() ).isEqualTo( "FOO6A" );
}
@ProcessorTest
public void shouldUseFactoryMethodWithSecondParamsOfMappingMethod() {
Foo7A foo7a = new Foo7A();
foo7a.setPropB( "foo7a" );
Foo7B foo7b = new Foo7B();
foo7b.setPropB( "foo7b" );
Bar7 bar7 = ParameterAssignmentFactoryTestMapper.INSTANCE.foos7ToBar7( foo7a, foo7b );
assertThat( bar7 ).isNotNull();
assertThat( bar7.getPropA() ).isEqualTo( "foo7a" );
assertThat( bar7.getPropB() ).isEqualTo( "foo7b" );
assertThat( bar7.getSomeTypeProp0() ).isEqualTo( "FOO7B" );
}
}
| ParameterAssigmentFactoryTest |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/extension/SpringExtensionInjector.java | {
"start": 1221,
"end": 3399
} | class ____ implements ExtensionInjector {
private ApplicationContext context;
@Deprecated
public static void addApplicationContext(final ApplicationContext context) {}
public static SpringExtensionInjector get(final ExtensionAccessor extensionAccessor) {
return (SpringExtensionInjector) extensionAccessor.getExtension(ExtensionInjector.class, "spring");
}
public ApplicationContext getContext() {
return context;
}
public void init(final ApplicationContext context) {
this.context = context;
}
@Override
@SuppressWarnings("unchecked")
public <T> T getInstance(Class<T> type, String name) {
if (context == null) {
// ignore if spring context is not bound
return null;
}
// check @SPI annotation
if (type.isInterface() && type.isAnnotationPresent(SPI.class)) {
return null;
}
T bean = getOptionalBean(context, name, type);
if (bean != null) {
return bean;
}
// logger.warn("No spring extension (bean) named:" + name + ", try to find an extension (bean) of type " +
// type.getName());
return null;
}
private <T> T getOptionalBean(final ListableBeanFactory beanFactory, final String name, final Class<T> type) {
if (StringUtils.isEmpty(name)) {
return getOptionalBeanByType(beanFactory, type);
}
if (beanFactory.containsBean(name)) {
return beanFactory.getBean(name, type);
}
return null;
}
private <T> T getOptionalBeanByType(final ListableBeanFactory beanFactory, final Class<T> type) {
String[] beanNamesForType = beanFactory.getBeanNamesForType(type, true, false);
if (beanNamesForType == null) {
return null;
}
if (beanNamesForType.length > 1) {
throw new IllegalStateException("Expect single but found " + beanNamesForType.length
+ " beans in spring context: " + Arrays.toString(beanNamesForType));
}
return beanFactory.getBean(beanNamesForType[0], type);
}
}
| SpringExtensionInjector |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.