language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/BinaryComparison.java | {
"start": 843,
"end": 1996
} | class ____ extends BinaryOperator<Object, Object, Boolean, BinaryComparisonOperation> {
private final ZoneId zoneId;
protected BinaryComparison(Source source, Expression left, Expression right, BinaryComparisonOperation operation, ZoneId zoneId) {
super(source, left, right, operation);
this.zoneId = zoneId;
}
public ZoneId zoneId() {
return zoneId;
}
@Override
protected TypeResolution resolveInputType(Expression e, ParamOrdinal paramOrdinal) {
return TypeResolutions.isExact(e, sourceText(), paramOrdinal);
}
@Override
public DataType dataType() {
return DataType.BOOLEAN;
}
public static Integer compare(Object left, Object right) {
return Comparisons.compare(left, right);
}
/**
* Reverses the direction of this comparison on the comparison axis.
* Some operations like Greater/LessThan/OrEqual will behave as if the operands of a numerical comparison get multiplied with a
* negative number. Others like Not/Equal can be immutable to this operation.
*/
public abstract BinaryComparison reverse();
}
| BinaryComparison |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java | {
"start": 2615,
"end": 38693
} | class ____ {
private static final OperatingSystemMXBean osMxBean = ManagementFactory.getOperatingSystemMXBean();
// This property is specified without units because it also needs to be parsed by the launcher
// code, which does not have access to all the utility classes of the Elasticsearch server.
private static final String memoryOverrideProperty = System.getProperty("es.total_memory_bytes");
private static final Method getFreePhysicalMemorySize;
private static final Method getTotalPhysicalMemorySize;
private static final Method getFreeSwapSpaceSize;
private static final Method getTotalSwapSpaceSize;
private static final Method getSystemLoadAverage;
private static final Method getSystemCpuLoad;
private static final Method getAvailableProcessors;
static {
getFreePhysicalMemorySize = getMethod("getFreePhysicalMemorySize");
getTotalPhysicalMemorySize = getMethod("getTotalPhysicalMemorySize");
getFreeSwapSpaceSize = getMethod("getFreeSwapSpaceSize");
getTotalSwapSpaceSize = getMethod("getTotalSwapSpaceSize");
getSystemLoadAverage = getMethod("getSystemLoadAverage");
getSystemCpuLoad = getMethod("getSystemCpuLoad");
getAvailableProcessors = getMethod("getAvailableProcessors");
}
/**
* Returns the amount of free physical memory in bytes.
*/
public long getFreePhysicalMemorySize() {
if (getFreePhysicalMemorySize == null) {
logger.warn("getFreePhysicalMemorySize is not available");
return 0;
}
try {
final long freeMem = (long) getFreePhysicalMemorySize.invoke(osMxBean);
if (freeMem < 0) {
logger.debug("OS reported a negative free memory value [{}]", freeMem);
return 0;
}
return freeMem;
} catch (Exception e) {
logger.warn("exception retrieving free physical memory", e);
return 0;
}
}
/**
* Returns the total amount of physical memory in bytes.
*/
public long getTotalPhysicalMemorySize() {
if (getTotalPhysicalMemorySize == null) {
logger.warn("getTotalPhysicalMemorySize is not available");
return 0;
}
try {
long totalMem = (long) getTotalPhysicalMemorySize.invoke(osMxBean);
if (totalMem < 0) {
logger.debug("OS reported a negative total memory value [{}]", totalMem);
return 0;
}
if (totalMem == 0 && isDebian8()) {
// workaround for JDK bug on debian8: https://github.com/elastic/elasticsearch/issues/67089#issuecomment-756114654
totalMem = getTotalMemFromProcMeminfo();
}
return totalMem;
} catch (Exception e) {
logger.warn("exception retrieving total physical memory", e);
return 0;
}
}
/**
* Returns the adjusted total amount of physical memory in bytes.
* Total memory may be overridden when some other process is running
* that is known to consume a non-negligible amount of memory. This
* is read from the "es.total_memory_bytes" system property. When
* there is no override this method returns the same value as
* {@link #getTotalPhysicalMemorySize}.
*/
public long getAdjustedTotalMemorySize() {
return Optional.ofNullable(getTotalMemoryOverride(memoryOverrideProperty)).orElse(getTotalPhysicalMemorySize());
}
static Long getTotalMemoryOverride(String memoryOverrideProperty) {
if (memoryOverrideProperty == null) {
return null;
}
try {
long memoryOverride = Long.parseLong(memoryOverrideProperty);
if (memoryOverride < 0) {
throw new IllegalArgumentException(
"Negative memory size specified in [es.total_memory_bytes]: [" + memoryOverrideProperty + "]"
);
}
return memoryOverride;
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid value for [es.total_memory_bytes]: [" + memoryOverrideProperty + "]", e);
}
}
/**
* Returns the amount of free swap space in bytes.
*/
public long getFreeSwapSpaceSize() {
if (getFreeSwapSpaceSize == null) {
logger.warn("getFreeSwapSpaceSize is not available");
return 0;
}
try {
final long mem = (long) getFreeSwapSpaceSize.invoke(osMxBean);
if (mem < 0) {
logger.debug("OS reported a negative free swap space size [{}]", mem);
return 0;
}
return mem;
} catch (Exception e) {
logger.warn("exception retrieving free swap space size", e);
return 0;
}
}
/**
* Returns the total amount of swap space in bytes.
*/
public long getTotalSwapSpaceSize() {
if (getTotalSwapSpaceSize == null) {
logger.warn("getTotalSwapSpaceSize is not available");
return 0;
}
try {
final long mem = (long) getTotalSwapSpaceSize.invoke(osMxBean);
if (mem < 0) {
logger.debug("OS reported a negative total swap space size [{}]", mem);
return 0;
}
return mem;
} catch (Exception e) {
logger.warn("exception retrieving total swap space size", e);
return 0;
}
}
/**
* The system load averages as an array.
*
* On Windows, this method returns {@code null}.
*
* On Linux, this method returns the 1, 5, and 15-minute load averages.
*
* On macOS, this method should return the 1-minute load average.
*
* @return the available system load averages or {@code null}
*/
final double[] getSystemLoadAverage() {
if (Constants.WINDOWS) {
return null;
} else if (Constants.LINUX) {
try {
final String procLoadAvg = readProcLoadavg();
assert procLoadAvg.matches("(\\d+\\.\\d+\\s+){3}\\d+/\\d+\\s+\\d+");
final String[] fields = procLoadAvg.split("\\s+");
return new double[] { Double.parseDouble(fields[0]), Double.parseDouble(fields[1]), Double.parseDouble(fields[2]) };
} catch (final IOException e) {
if (logger.isDebugEnabled()) {
logger.debug("error reading /proc/loadavg", e);
}
return null;
}
} else {
assert Constants.MAC_OS_X;
if (getSystemLoadAverage == null) {
return null;
}
try {
final double oneMinuteLoadAverage = (double) getSystemLoadAverage.invoke(osMxBean);
return new double[] { oneMinuteLoadAverage >= 0 ? oneMinuteLoadAverage : -1, -1, -1 };
} catch (IllegalAccessException | InvocationTargetException e) {
if (logger.isDebugEnabled()) {
logger.debug("error reading one minute load average from operating system", e);
}
return null;
}
}
}
/**
* The line from {@code /proc/loadavg}. The first three fields are the load averages averaged over 1, 5, and 15 minutes. The fourth
* field is two numbers separated by a slash, the first is the number of currently runnable scheduling entities, the second is the
* number of scheduling entities on the system. The fifth field is the PID of the most recently created process.
*
* @return the line from {@code /proc/loadavg} or {@code null}
*/
@SuppressForbidden(reason = "access /proc/loadavg")
String readProcLoadavg() throws IOException {
return readSingleLine(PathUtils.get("/proc/loadavg"));
}
public static short getSystemCpuPercent() {
return Probes.getLoadAndScaleToPercent(getSystemCpuLoad, osMxBean);
}
public static int getAvailableProcessors() {
if (getAvailableProcessors == null) {
logger.warn("getAvailableProcessors is not available");
return 0;
}
try {
int availableProcessors = (int) getAvailableProcessors.invoke(osMxBean);
if (availableProcessors <= 0) {
logger.debug("OS reported a non-positive number of available processors [{}]", availableProcessors);
return 0;
}
return availableProcessors;
} catch (Exception e) {
logger.warn("exception retrieving available processors", e);
return 0;
}
}
/**
* Reads a file containing a single line.
*
* @param path path to the file to read
* @return the single line
* @throws IOException if an I/O exception occurs reading the file
*/
private static String readSingleLine(final Path path) throws IOException {
final List<String> lines = Files.readAllLines(path);
assert lines.size() == 1 : String.join("\n", lines);
return lines.get(0);
}
// this property is to support a hack to workaround an issue with Docker containers mounting the cgroups hierarchy inconsistently with
// respect to /proc/self/cgroup; for Docker containers this should be set to "/"
private static final String CONTROL_GROUPS_HIERARCHY_OVERRIDE = System.getProperty("es.cgroups.hierarchy.override");
/**
* A map of the control groups to which the Elasticsearch process belongs. Note that this is a map because the control groups can vary
* from subsystem to subsystem. Additionally, this map can not be cached because a running process can be reclassified.
*
* @return a map from subsystems to the control group for the Elasticsearch process.
* @throws IOException if an I/O exception occurs reading {@code /proc/self/cgroup}
*/
private Map<String, String> getControlGroups() throws IOException {
final List<String> lines = readProcSelfCgroup();
final Map<String, String> controllerMap = new HashMap<>();
for (final String line : lines) {
/*
* The virtual file /proc/self/cgroup lists the control groups that the Elasticsearch process is a member of. Each line contains
* three colon-separated fields of the form hierarchy-ID:subsystem-list:cgroup-path. For cgroups version 1 hierarchies, the
* subsystem-list is a comma-separated list of subsystems. The subsystem-list can be empty if the hierarchy represents a cgroups
* version 2 hierarchy. For cgroups version 1
*/
final String[] fields = line.split(":");
assert fields.length == 3;
final String[] controllers = fields[1].split(",");
for (final String controller : controllers) {
final String controlGroupPath;
if (CONTROL_GROUPS_HIERARCHY_OVERRIDE != null) {
/*
* Docker violates the relationship between /proc/self/cgroup and the /sys/fs/cgroup hierarchy. It's possible that this
* will be fixed in future versions of Docker with cgroup namespaces, but this requires modern kernels. Thus, we provide
* an undocumented hack for overriding the control group path. Do not rely on this hack, it will be removed.
*/
controlGroupPath = CONTROL_GROUPS_HIERARCHY_OVERRIDE;
} else {
controlGroupPath = fields[2];
}
final String previous = controllerMap.put(controller, controlGroupPath);
assert previous == null;
}
}
return controllerMap;
}
/**
* The lines from {@code /proc/self/cgroup}. This file represents the control groups to which the Elasticsearch process belongs. Each
* line in this file represents a control group hierarchy of the form
* <p>
* {@code \d+:([^:,]+(?:,[^:,]+)?):(/.*)}
* <p>
* with the first field representing the hierarchy ID, the second field representing a comma-separated list of the subsystems bound to
* the hierarchy, and the last field representing the control group.
*
* @return the lines from {@code /proc/self/cgroup}
* @throws IOException if an I/O exception occurs reading {@code /proc/self/cgroup}
*/
@SuppressForbidden(reason = "access /proc/self/cgroup")
List<String> readProcSelfCgroup() throws IOException {
final List<String> lines = Files.readAllLines(PathUtils.get("/proc/self/cgroup"));
assert lines != null && lines.isEmpty() == false;
return lines;
}
/**
* The total CPU time in nanoseconds consumed by all tasks in the cgroup to which the Elasticsearch process belongs for the {@code
* cpuacct} subsystem.
*
* @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem
* @return the total CPU time in nanoseconds
* @throws IOException if an I/O exception occurs reading {@code cpuacct.usage} for the control group
*/
private BigInteger getCgroupCpuAcctUsageNanos(final String controlGroup) throws IOException {
return new BigInteger(readSysFsCgroupCpuAcctCpuAcctUsage(controlGroup));
}
/**
* Returns the line from {@code cpuacct.usage} for the control group to which the Elasticsearch process belongs for the {@code cpuacct}
* subsystem. This line represents the total CPU time in nanoseconds consumed by all tasks in the same control group.
*
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpuacct} subsystem
* @return the line from {@code cpuacct.usage}
* @throws IOException if an I/O exception occurs reading {@code cpuacct.usage} for the control group
*/
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpuacct")
String readSysFsCgroupCpuAcctCpuAcctUsage(final String controlGroup) throws IOException {
return readSingleLine(PathUtils.get("/sys/fs/cgroup/cpuacct", controlGroup, "cpuacct.usage"));
}
private long[] getCgroupV2CpuLimit(String controlGroup) throws IOException {
String entry = readCgroupV2CpuLimit(controlGroup);
String[] parts = entry.split("\\s+");
assert parts.length == 2 : "Expected 2 fields in [cpu.max]";
long[] values = new long[2];
values[0] = "max".equals(parts[0]) ? -1L : Long.parseLong(parts[0]);
values[1] = Long.parseLong(parts[1]);
return values;
}
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpu.max")
String readCgroupV2CpuLimit(String controlGroup) throws IOException {
return readSingleLine(PathUtils.get("/sys/fs/cgroup/", controlGroup, "cpu.max"));
}
/**
* The total period of time in microseconds for how frequently the Elasticsearch control group's access to CPU resources will be
* reallocated.
*
* @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem
* @return the CFS quota period in microseconds
* @throws IOException if an I/O exception occurs reading {@code cpu.cfs_period_us} for the control group
*/
private long getCgroupCpuAcctCpuCfsPeriodMicros(final String controlGroup) throws IOException {
return Long.parseLong(readSysFsCgroupCpuAcctCpuCfsPeriod(controlGroup));
}
/**
* Returns the line from {@code cpu.cfs_period_us} for the control group to which the Elasticsearch process belongs for the {@code cpu}
* subsystem. This line represents the period of time in microseconds for how frequently the control group's access to CPU resources
* will be reallocated.
*
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpu} subsystem
* @return the line from {@code cpu.cfs_period_us}
* @throws IOException if an I/O exception occurs reading {@code cpu.cfs_period_us} for the control group
*/
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpu")
String readSysFsCgroupCpuAcctCpuCfsPeriod(final String controlGroup) throws IOException {
return readSingleLine(PathUtils.get("/sys/fs/cgroup/cpu", controlGroup, "cpu.cfs_period_us"));
}
/**
* The total time in microseconds that all tasks in the Elasticsearch control group can run during one period as specified by {@code
* cpu.cfs_period_us}.
*
* @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem
* @return the CFS quota in microseconds
* @throws IOException if an I/O exception occurs reading {@code cpu.cfs_quota_us} for the control group
*/
private long getCgroupCpuAcctCpuCfsQuotaMicros(final String controlGroup) throws IOException {
return Long.parseLong(readSysFsCgroupCpuAcctCpuAcctCfsQuota(controlGroup));
}
/**
* Returns the line from {@code cpu.cfs_quota_us} for the control group to which the Elasticsearch process belongs for the {@code cpu}
* subsystem. This line represents the total time in microseconds that all tasks in the control group can run during one period as
* specified by {@code cpu.cfs_period_us}.
*
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpu} subsystem
* @return the line from {@code cpu.cfs_quota_us}
* @throws IOException if an I/O exception occurs reading {@code cpu.cfs_quota_us} for the control group
*/
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpu")
String readSysFsCgroupCpuAcctCpuAcctCfsQuota(final String controlGroup) throws IOException {
return readSingleLine(PathUtils.get("/sys/fs/cgroup/cpu", controlGroup, "cpu.cfs_quota_us"));
}
/**
* The CPU time statistics for all tasks in the Elasticsearch control group.
*
* @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem
* @return the CPU time statistics
* @throws IOException if an I/O exception occurs reading {@code cpu.stat} for the control group
*/
private OsStats.Cgroup.CpuStat getCgroupCpuAcctCpuStat(final String controlGroup) throws IOException {
final var SENTINEL_VALUE = BigInteger.valueOf(-1);
final List<String> lines = readSysFsCgroupCpuAcctCpuStat(controlGroup);
var numberOfPeriods = SENTINEL_VALUE;
var numberOfTimesThrottled = SENTINEL_VALUE;
var timeThrottledNanos = SENTINEL_VALUE;
for (final String line : lines) {
final String[] fields = line.split("\\s+");
switch (fields[0]) {
case "nr_periods" -> numberOfPeriods = new BigInteger(fields[1]);
case "nr_throttled" -> numberOfTimesThrottled = new BigInteger(fields[1]);
case "throttled_time" -> timeThrottledNanos = new BigInteger(fields[1]);
}
}
assert numberOfPeriods.equals(SENTINEL_VALUE) == false;
assert numberOfTimesThrottled.equals(SENTINEL_VALUE) == false;
assert timeThrottledNanos.equals(SENTINEL_VALUE) == false;
return new OsStats.Cgroup.CpuStat(numberOfPeriods, numberOfTimesThrottled, timeThrottledNanos);
}
/**
* Returns the lines from {@code cpu.stat} for the control group to which the Elasticsearch process belongs for the {@code cpu}
* subsystem. These lines represent the CPU time statistics and have the form
* <blockquote><pre>
* nr_periods \d+
* nr_throttled \d+
* throttled_time \d+
* </pre></blockquote>
* where {@code nr_periods} is the number of period intervals as specified by {@code cpu.cfs_period_us} that have elapsed, {@code
* nr_throttled} is the number of times tasks in the given control group have been throttled, and {@code throttled_time} is the total
* time in nanoseconds for which tasks in the given control group have been throttled.
*
* If the burst feature of the scheduler is enabled, the statistics contain an additional two fields of the form
* <blockquote><pre>
* nr_bursts \d+
* burst_time
* </pre></blockquote>
*
* When schedstat_enabled is enabled, an additional statistics information {@code wait_sum} will also be available
* <blockquote><pre>
* wait_sum \d+
* </pre></blockquote>
* {@code wait_sum} represent the conflict between task groups, which is simply sum the wait time of group's cfs_rq
* These three additional fields are currently ignored.
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpu} subsystem
* @return the lines from {@code cpu.stat}
* @throws IOException if an I/O exception occurs reading {@code cpu.stat} for the control group
*/
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpu")
List<String> readSysFsCgroupCpuAcctCpuStat(final String controlGroup) throws IOException {
final List<String> lines = Files.readAllLines(PathUtils.get("/sys/fs/cgroup/cpu", controlGroup, "cpu.stat"));
assert lines != null && (lines.size() >= 3);
return lines;
}
/**
* The maximum amount of user memory (including file cache).
* If there is no limit then some Linux versions return the maximum value that can be stored in an
* unsigned 64 bit number, and this will overflow a long, hence the result type is <code>String</code>.
* (The alternative would have been <code>BigInteger</code> but then it would not be possible to index
* the OS stats document into Elasticsearch without losing information, as <code>BigInteger</code> is
* not a supported Elasticsearch type.)
*
* @param controlGroup the control group for the Elasticsearch process for the {@code memory} subsystem
* @return the maximum amount of user memory (including file cache)
* @throws IOException if an I/O exception occurs reading {@code memory.limit_in_bytes} for the control group
*/
private String getCgroupMemoryLimitInBytes(final String controlGroup) throws IOException {
return readSysFsCgroupMemoryLimitInBytes(controlGroup);
}
/**
* Returns the line from {@code memory.limit_in_bytes} for the control group to which the Elasticsearch process belongs for the
* {@code memory} subsystem. This line represents the maximum amount of user memory (including file cache).
*
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code memory} subsystem
* @return the line from {@code memory.limit_in_bytes}
* @throws IOException if an I/O exception occurs reading {@code memory.limit_in_bytes} for the control group
*/
@SuppressForbidden(reason = "access /sys/fs/cgroup/memory")
String readSysFsCgroupMemoryLimitInBytes(final String controlGroup) throws IOException {
return readSingleLine(PathUtils.get("/sys/fs/cgroup/memory", controlGroup, "memory.limit_in_bytes"));
}
/**
* The maximum amount of user memory (including file cache).
* If there is no limit then some Linux versions return the maximum value that can be stored in an
* unsigned 64 bit number, and this will overflow a long, hence the result type is <code>String</code>.
* (The alternative would have been <code>BigInteger</code> but then it would not be possible to index
* the OS stats document into Elasticsearch without losing information, as <code>BigInteger</code> is
* not a supported Elasticsearch type.)
*
* @param controlGroup the control group for the Elasticsearch process for the {@code memory} subsystem
* @return the maximum amount of user memory (including file cache)
* @throws IOException if an I/O exception occurs reading {@code memory.limit_in_bytes} for the control group
*/
private String getCgroupV2MemoryLimitInBytes(final String controlGroup) throws IOException {
return readSysFsCgroupV2MemoryLimitInBytes(controlGroup);
}
/**
* Returns the line from {@code memory.max} for the control group to which the Elasticsearch process belongs for the
* {@code memory} subsystem. This line represents the maximum amount of user memory (including file cache).
*
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code memory} subsystem
* @return the line from {@code memory.max}
* @throws IOException if an I/O exception occurs reading {@code memory.max} for the control group
*/
@SuppressForbidden(reason = "access /sys/fs/cgroup/memory.max")
String readSysFsCgroupV2MemoryLimitInBytes(final String controlGroup) throws IOException {
return readSingleLine(PathUtils.get("/sys/fs/cgroup/", controlGroup, "memory.max"));
}
/**
* The total current memory usage by processes in the cgroup (in bytes).
* If there is no limit then some Linux versions return the maximum value that can be stored in an
* unsigned 64 bit number, and this will overflow a long, hence the result type is <code>String</code>.
* (The alternative would have been <code>BigInteger</code> but then it would not be possible to index
* the OS stats document into Elasticsearch without losing information, as <code>BigInteger</code> is
* not a supported Elasticsearch type.)
*
* @param controlGroup the control group for the Elasticsearch process for the {@code memory} subsystem
* @return the total current memory usage by processes in the cgroup (in bytes)
* @throws IOException if an I/O exception occurs reading {@code memory.limit_in_bytes} for the control group
*/
private String getCgroupMemoryUsageInBytes(final String controlGroup) throws IOException {
return readSysFsCgroupMemoryUsageInBytes(controlGroup);
}
/**
* Returns the line from {@code memory.usage_in_bytes} for the control group to which the Elasticsearch process belongs for the
* {@code memory} subsystem. This line represents the total current memory usage by processes in the cgroup (in bytes).
*
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code memory} subsystem
* @return the line from {@code memory.usage_in_bytes}
* @throws IOException if an I/O exception occurs reading {@code memory.usage_in_bytes} for the control group
*/
@SuppressForbidden(reason = "access /sys/fs/cgroup/memory")
String readSysFsCgroupMemoryUsageInBytes(final String controlGroup) throws IOException {
return readSingleLine(PathUtils.get("/sys/fs/cgroup/memory", controlGroup, "memory.usage_in_bytes"));
}
/**
* The total current memory usage by processes in the cgroup (in bytes).
* If there is no limit then some Linux versions return the maximum value that can be stored in an
* unsigned 64 bit number, and this will overflow a long, hence the result type is <code>String</code>.
* (The alternative would have been <code>BigInteger</code> but then it would not be possible to index
* the OS stats document into Elasticsearch without losing information, as <code>BigInteger</code> is
* not a supported Elasticsearch type.)
*
* @param controlGroup the control group for the Elasticsearch process for the {@code memory} subsystem
* @return the total current memory usage by processes in the cgroup (in bytes)
* @throws IOException if an I/O exception occurs reading {@code memory.current} for the control group
*/
private String getCgroupV2MemoryUsageInBytes(final String controlGroup) throws IOException {
return readSysFsCgroupV2MemoryUsageInBytes(controlGroup);
}
/**
* Returns the line from {@code memory.current} for the control group to which the Elasticsearch process belongs for the
* {@code memory} subsystem. This line represents the total current memory usage by processes in the cgroup (in bytes).
*
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code memory} subsystem
* @return the line from {@code memory.current}
* @throws IOException if an I/O exception occurs reading {@code memory.current} for the control group
*/
@SuppressForbidden(reason = "access /sys/fs/cgroup/memory.current")
String readSysFsCgroupV2MemoryUsageInBytes(final String controlGroup) throws IOException {
return readSingleLine(PathUtils.get("/sys/fs/cgroup/", controlGroup, "memory.current"));
}
/**
* Checks if cgroup stats are available by checking for the existence of {@code /proc/self/cgroup}, {@code /sys/fs/cgroup/cpu},
* {@code /sys/fs/cgroup/cpuacct} and {@code /sys/fs/cgroup/memory}.
*
* @return {@code true} if the stats are available, otherwise {@code false}
*/
@SuppressForbidden(reason = "access /proc/self/cgroup, /sys/fs/cgroup/cpu, /sys/fs/cgroup/cpuacct and /sys/fs/cgroup/memory")
boolean areCgroupStatsAvailable() throws IOException {
if (Files.exists(PathUtils.get("/proc/self/cgroup")) == false) {
return false;
}
List<String> lines = readProcSelfCgroup();
// cgroup v2
if (lines.size() == 1 && lines.get(0).startsWith("0::")) {
return Stream.of("/sys/fs/cgroup/cpu.stat", "/sys/fs/cgroup/memory.stat").allMatch(path -> Files.exists(PathUtils.get(path)));
}
return Stream.of("/sys/fs/cgroup/cpu", "/sys/fs/cgroup/cpuacct", "/sys/fs/cgroup/memory")
.allMatch(path -> Files.exists(PathUtils.get(path)));
}
/**
* The CPU statistics for all tasks in the Elasticsearch control group.
*
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code memory} subsystem
* @return the CPU statistics
* @throws IOException if an I/O exception occurs reading {@code cpu.stat} for the control group
*/
@SuppressForbidden(reason = "Uses PathUtils.get to generate meaningful assertion messages")
private Map<String, BigInteger> getCgroupV2CpuStats(String controlGroup) throws IOException {
final List<String> lines = readCgroupV2CpuStats(controlGroup);
final Map<String, BigInteger> stats = new HashMap<>();
final BigInteger SENTINEL_VALUE = BigInteger.valueOf(-1);
for (String line : lines) {
String[] parts = line.split("\\s+");
assert parts.length == 2 : "Corrupt cpu.stat line: [" + line + "]";
stats.put(parts[0], new BigInteger(parts[1]));
}
final List<String> expectedKeys = List.of("system_usec", "usage_usec", "user_usec");
expectedKeys.forEach(key -> {
assert stats.containsKey(key) : "[" + key + "] missing from " + PathUtils.get("/sys/fs/cgroup", controlGroup, "cpu.stat");
assert stats.get(key).compareTo(SENTINEL_VALUE) != 0 : stats.get(key).toString();
});
final List<String> optionalKeys = List.of("nr_periods", "nr_throttled", "throttled_usec");
optionalKeys.forEach(key -> {
if (stats.containsKey(key) == false) {
stats.put(key, BigInteger.ZERO);
}
assert stats.get(key).compareTo(SENTINEL_VALUE) != 0
: "[" + key + "] in " + PathUtils.get("/sys/fs/cgroup", controlGroup, "cpu.stat") + " is -1";
});
return stats;
}
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpu.stat")
List<String> readCgroupV2CpuStats(final String controlGroup) throws IOException {
return Files.readAllLines(PathUtils.get("/sys/fs/cgroup", controlGroup, "cpu.stat"));
}
/**
* Basic cgroup stats.
*
* @return basic cgroup stats, or {@code null} if an I/O exception occurred reading the cgroup stats
*/
private OsStats.Cgroup getCgroup() {
try {
if (areCgroupStatsAvailable() == false) {
return null;
}
final Map<String, String> controllerMap = getControlGroups();
assert controllerMap.isEmpty() == false;
final String cpuAcctControlGroup;
final BigInteger cgroupCpuAcctUsageNanos;
final long cgroupCpuAcctCpuCfsPeriodMicros;
final long cgroupCpuAcctCpuCfsQuotaMicros;
final String cpuControlGroup;
final OsStats.Cgroup.CpuStat cpuStat;
final String memoryControlGroup;
final String cgroupMemoryLimitInBytes;
final String cgroupMemoryUsageInBytes;
if (controllerMap.size() == 1 && controllerMap.containsKey("")) {
// There's a single hierarchy for all controllers
cpuControlGroup = cpuAcctControlGroup = memoryControlGroup = controllerMap.get("");
// `cpuacct` was merged with `cpu` in v2
final Map<String, BigInteger> cpuStatsMap = getCgroupV2CpuStats(cpuControlGroup);
final BigInteger THOUSAND = BigInteger.valueOf(1000);
cgroupCpuAcctUsageNanos = cpuStatsMap.get("usage_usec").multiply(THOUSAND); // convert from micros to nanos
long[] cpuLimits = getCgroupV2CpuLimit(cpuControlGroup);
cgroupCpuAcctCpuCfsQuotaMicros = cpuLimits[0];
cgroupCpuAcctCpuCfsPeriodMicros = cpuLimits[1];
cpuStat = new OsStats.Cgroup.CpuStat(
cpuStatsMap.get("nr_periods"),
cpuStatsMap.get("nr_throttled"),
cpuStatsMap.get("throttled_usec").multiply(THOUSAND)
);
cgroupMemoryLimitInBytes = getCgroupV2MemoryLimitInBytes(memoryControlGroup);
cgroupMemoryUsageInBytes = getCgroupV2MemoryUsageInBytes(memoryControlGroup);
} else {
cpuAcctControlGroup = controllerMap.get("cpuacct");
if (cpuAcctControlGroup == null) {
logger.debug("no [cpuacct] data found in cgroup stats");
return null;
}
cgroupCpuAcctUsageNanos = getCgroupCpuAcctUsageNanos(cpuAcctControlGroup);
cpuControlGroup = controllerMap.get("cpu");
if (cpuControlGroup == null) {
logger.debug("no [cpu] data found in cgroup stats");
return null;
}
cgroupCpuAcctCpuCfsPeriodMicros = getCgroupCpuAcctCpuCfsPeriodMicros(cpuControlGroup);
cgroupCpuAcctCpuCfsQuotaMicros = getCgroupCpuAcctCpuCfsQuotaMicros(cpuControlGroup);
cpuStat = getCgroupCpuAcctCpuStat(cpuControlGroup);
memoryControlGroup = controllerMap.get("memory");
if (memoryControlGroup == null) {
logger.debug("no [memory] data found in cgroup stats");
return null;
}
cgroupMemoryLimitInBytes = getCgroupMemoryLimitInBytes(memoryControlGroup);
cgroupMemoryUsageInBytes = getCgroupMemoryUsageInBytes(memoryControlGroup);
}
return new OsStats.Cgroup(
cpuAcctControlGroup,
cgroupCpuAcctUsageNanos,
cpuControlGroup,
cgroupCpuAcctCpuCfsPeriodMicros,
cgroupCpuAcctCpuCfsQuotaMicros,
cpuStat,
memoryControlGroup,
cgroupMemoryLimitInBytes,
cgroupMemoryUsageInBytes
);
} catch (final IOException e) {
logger.debug("error reading control group stats", e);
return null;
}
}
private static | OsProbe |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBStore.java | {
"start": 35135,
"end": 36970
} | interface ____ {
void put(final DBAccessor accessor, final byte[] key, final byte[] value);
void prepareBatch(final List<KeyValue<Bytes, byte[]>> entries,
final WriteBatchInterface batch) throws RocksDBException;
byte[] get(final DBAccessor accessor, final byte[] key) throws RocksDBException;
byte[] get(final DBAccessor accessor, final byte[] key, final ReadOptions readOptions) throws RocksDBException;
/**
* In contrast to get(), we don't migrate the key to new CF.
* <p>
* Use for get() within delete() -- no need to migrate, as it's deleted anyway
*/
byte[] getOnly(final DBAccessor accessor, final byte[] key) throws RocksDBException;
ManagedKeyValueIterator<Bytes, byte[]> range(final DBAccessor accessor,
final Bytes from,
final Bytes to,
final boolean forward);
/**
* Deletes keys entries in the range ['from', 'to'], including 'from' and excluding 'to'.
*/
void deleteRange(final DBAccessor accessor, final byte[] from, final byte[] to);
ManagedKeyValueIterator<Bytes, byte[]> all(final DBAccessor accessor, final boolean forward);
ManagedKeyValueIterator<Bytes, byte[]> prefixScan(final DBAccessor accessor, final Bytes prefix);
long approximateNumEntries(final DBAccessor accessor) throws RocksDBException;
void flush(final DBAccessor accessor) throws RocksDBException;
void addToBatch(final byte[] key,
final byte[] value,
final WriteBatchInterface batch) throws RocksDBException;
void close();
}
| ColumnFamilyAccessor |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-tomcat/src/test/java/smoketest/tomcat/SampleTomcatApplicationTests.java | {
"start": 5638,
"end": 5909
} | class ____ {
@Bean
RestTemplateBuilder restTemplateBuilder() {
return new RestTemplateBuilder().requestFactoryBuilder(ClientHttpRequestFactoryBuilder.jdk()
.withCustomizer((factory) -> factory.enableCompression(false)));
}
}
}
| DisableCompressionConfiguration |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/task/ForegroundTask.java | {
"start": 1393,
"end": 1554
} | class ____ extends AbstractTask implements BlockingTask {
/**
* A builder helper for building new foreground tasks
*/
public static | ForegroundTask |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/strategy/RevisionEndTimestampJoinedInheritanceTest.java | {
"start": 6662,
"end": 7521
} | class ____ extends FullTimeEmployee {
private String title;
Executive() {
}
Executive(String name, Integer salary, String title) {
super( name, salary );
this.title = title;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = result * 31 + ( title != null ? title.hashCode() : 0 );
return result;
}
@Override
public boolean equals(Object object) {
if ( this == object ) {
return true;
}
if ( object == null || !( object instanceof Executive ) ) {
return false;
}
if ( !super.equals( object ) ) {
return false;
}
Executive that = (Executive) object;
return !( title != null ? !title.equals( that.title ) : that.title != null );
}
}
}
| Executive |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/BrowseEndpointBuilderFactory.java | {
"start": 1597,
"end": 1948
} | interface ____
extends
EndpointConsumerBuilder {
default AdvancedBrowseEndpointConsumerBuilder advanced() {
return (AdvancedBrowseEndpointConsumerBuilder) this;
}
}
/**
* Advanced builder for endpoint consumers for the Browse component.
*/
public | BrowseEndpointConsumerBuilder |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/persistence/ResourceVersion.java | {
"start": 1151,
"end": 1461
} | interface ____<R> extends Comparable<R>, Serializable {
/**
* Check whether the state handle is existing.
*
* @return true if state handle exists with current {@link ResourceVersion} on external storage.
* Or false it does not exist.
*/
boolean isExisting();
}
| ResourceVersion |
java | micronaut-projects__micronaut-core | http-server-tck/src/main/java/io/micronaut/http/server/tck/CorsUtils.java | {
"start": 832,
"end": 928
} | class ____ do CORS related assertions.
* @author Sergio del Amo
* @since 3.9.0
*/
public final | to |
java | apache__avro | lang/java/ipc/src/test/java/org/apache/avro/io/Perf.java | {
"start": 50671,
"end": 50961
} | class ____ extends ReflectTest<double[]> {
ReflectDoubleArrayTest() throws IOException {
super("ReflectDoubleArray", new double[0], 20);
}
@Override
protected double[] createDatum(Random r) {
return populateDoubleArray(r);
}
}
static | ReflectDoubleArrayTest |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AzureBlockManager.java | {
"start": 1090,
"end": 4623
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(
AbfsOutputStream.class);
/** Factory for blocks. */
private final DataBlocks.BlockFactory blockFactory;
/** Current data block. Null means none currently active. */
private AbfsBlock activeBlock;
/** Count of blocks uploaded. */
private long blockCount = 0;
/** The size of a single block. */
private final int blockSize;
private AbfsOutputStream abfsOutputStream;
/**
* Constructs an AzureBlockManager.
*
* @param abfsOutputStream the output stream associated with this block manager
* @param blockFactory the factory to create blocks
* @param blockSize the size of each block
*/
protected AzureBlockManager(AbfsOutputStream abfsOutputStream,
DataBlocks.BlockFactory blockFactory,
final int blockSize) {
this.abfsOutputStream = abfsOutputStream;
this.blockFactory = blockFactory;
this.blockSize = blockSize;
}
/**
* Creates a new block at the given position.
*
* @param position the position in the output stream where the block should be created
* @return the created block
* @throws IOException if an I/O error occurs
*/
protected final synchronized AbfsBlock createBlock(final long position)
throws IOException {
return createBlockInternal(position);
}
/**
* Internal method to create a new block at the given position.
*
* @param position the position in the output stream where the block should be created.
* @return the created block.
* @throws IOException if an I/O error occurs.
*/
protected abstract AbfsBlock createBlockInternal(long position)
throws IOException;
/**
* Gets the active block.
*
* @return the active block
*/
public synchronized AbfsBlock getActiveBlock() {
return activeBlock;
}
/**
* Sets the active block.
*
* @param activeBlock the block to set as active
*/
public synchronized void setActiveBlock(final AbfsBlock activeBlock) {
this.activeBlock = activeBlock;
}
/**
* Checks if there is an active block.
*
* @return true if there is an active block, false otherwise
*/
protected synchronized boolean hasActiveBlock() {
return activeBlock != null;
}
/**
* Gets the block factory.
*
* @return the block factory
*/
protected DataBlocks.BlockFactory getBlockFactory() {
return blockFactory;
}
/**
* Gets the count of blocks uploaded.
*
* @return the block count
*/
public long getBlockCount() {
return blockCount;
}
/**
* Sets the count of blocks uploaded.
*
* @param blockCount the count of blocks to set
*/
protected void setBlockCount(final long blockCount) {
this.blockCount = blockCount;
}
/**
* Gets the block size.
*
* @return the block size
*/
protected int getBlockSize() {
return blockSize;
}
/**
* Gets the AbfsOutputStream associated with this block manager.
*
* @return the AbfsOutputStream
*/
protected AbfsOutputStream getAbfsOutputStream() {
return abfsOutputStream;
}
/**
* Clears the active block.
*/
void clearActiveBlock() {
synchronized (this) {
if (activeBlock != null) {
LOG.debug("Clearing active block");
}
activeBlock = null;
}
}
// Used to clear any resources used by the block manager.
void close() {
if (hasActiveBlock()) {
clearActiveBlock();
}
LOG.debug("AzureBlockManager closed.");
}
}
| AzureBlockManager |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java | {
"start": 14636,
"end": 16135
} | class ____ extends FilterXContentParserWrapper {
private final String value;
GeoHashMultiFieldParser(XContentParser innerParser, String value) {
super(innerParser);
this.value = value;
}
@Override
public XContentString optimizedTextOrNull() throws IOException {
return new Text(value);
}
@Override
public String textOrNull() throws IOException {
return value;
}
@Override
public String text() throws IOException {
return value;
}
@Override
public Token currentToken() {
return Token.VALUE_STRING;
}
@Override
public Token nextToken() throws IOException {
throw new UnsupportedOperationException();
}
}
@Override
protected void indexScriptValues(
SearchLookup searchLookup,
LeafReaderContext readerContext,
int doc,
DocumentParserContext documentParserContext
) {
this.scriptValues.valuesForDoc(searchLookup, readerContext, doc, point -> {
try {
index(documentParserContext, point);
} catch (IOException e) {
throw new UncheckedIOException(e); // only thrown by MultiFields which is always null
}
});
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
public static | GeoHashMultiFieldParser |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/spi/ServerMessageBodyWriter.java | {
"start": 1061,
"end": 1567
} | class ____ implements ServerMessageBodyWriter<Object>, AllWriteableMarker {
@Override
public final boolean isWriteable(Class<?> type, Type genericType, ResteasyReactiveResourceInfo target,
MediaType mediaType) {
return true;
}
@Override
public final boolean isWriteable(Class<?> type, Type genericType,
Annotation[] annotations, MediaType mediaType) {
return true;
}
}
}
| AllWriteableMessageBodyWriter |
java | micronaut-projects__micronaut-core | inject-java/src/test/java/io/micronaut/aop/adapter/ByteBuddyRuntimeAdapterBean.java | {
"start": 1039,
"end": 1309
} | class ____ {
private String message;
@ByteBuddyStacktraceVerified
@Adapter(MyAdapter.class)
void onMessage(String message) {
this.message = message;
}
public String getMessage() {
return message;
}
}
| ByteBuddyRuntimeAdapterBean |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/ast/ReflectGenericPlaceholderElement.java | {
"start": 1108,
"end": 2346
} | class ____
extends ReflectTypeElement<TypeVariable<?>>
implements GenericPlaceholderElement, ArrayableClassElement {
private final int arrayDimensions;
ReflectGenericPlaceholderElement(TypeVariable<?> typeVariable, int arrayDimensions) {
super(typeVariable);
this.arrayDimensions = arrayDimensions;
}
@Override
public ClassElement withArrayDimensions(int arrayDimensions) {
return new ReflectGenericPlaceholderElement(type, arrayDimensions);
}
@Override
public int getArrayDimensions() {
return arrayDimensions;
}
@NonNull
@Override
public List<? extends ClassElement> getBounds() {
return Arrays.stream(type.getBounds()).map(ClassElement::of).collect(Collectors.toList());
}
@Override
@NonNull
public String getVariableName() {
return type.getName();
}
@Override
public Optional<Element> getDeclaringElement() {
GenericDeclaration declaration = type.getGenericDeclaration();
if (declaration instanceof Class class1) {
return Optional.of(ClassElement.of(class1));
} else {
return Optional.empty();
}
}
}
| ReflectGenericPlaceholderElement |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/io/support/SpringFactoriesLoader.java | {
"start": 4677,
"end": 5070
} | class ____
* and a default argument resolver that expects a no-arg constructor.
* <p>The returned factories are sorted using {@link AnnotationAwareOrderComparator}.
* <p>If a custom instantiation strategy is required, use {@code load(...)}
* with a custom {@link ArgumentResolver ArgumentResolver} and/or
* {@link FailureHandler FailureHandler}.
* <p>If duplicate implementation | loader |
java | grpc__grpc-java | api/src/test/java/io/grpc/AttributesTest.java | {
"start": 2271,
"end": 4138
} | class ____ {
@Override public boolean equals(Object o) {
return o instanceof EqualObject;
}
@Override public int hashCode() {
return 42;
}
}
Attributes.Key<EqualObject> key = Attributes.Key.create("ints");
EqualObject v1 = new EqualObject();
EqualObject v2 = new EqualObject();
assertNotSame(v1, v2);
assertEquals(v1, v2);
Attributes attr1 = Attributes.newBuilder().set(key, v1).build();
Attributes attr2 = Attributes.newBuilder().set(key, v2).build();
assertEquals(attr1, attr2);
assertEquals(attr1.hashCode(), attr2.hashCode());
}
@Test
public void discard_baseAttributes() {
Attributes attrs = Attributes.newBuilder().set(YOLO_KEY, "value").build();
Attributes newAttrs = attrs.toBuilder().discard(YOLO_KEY).build();
assertNull(newAttrs.get(YOLO_KEY));
assertThat(newAttrs.keysForTest()).doesNotContain(YOLO_KEY);
}
@Test
public void discard_noBase() {
Attributes.Builder attrs = Attributes.newBuilder().set(YOLO_KEY, "value");
Attributes newAttrs = attrs.discard(YOLO_KEY).build();
assertNull(newAttrs.get(YOLO_KEY));
assertThat(newAttrs.keysForTest()).doesNotContain(YOLO_KEY);
}
@Test
public void discard_baseAttributesAndBuilder() {
Attributes attrs = Attributes.newBuilder().set(YOLO_KEY, "value").build();
Attributes.Builder attrsBuilder = attrs.toBuilder().set(YOLO_KEY, "other value");
Attributes newAttrs = attrsBuilder.discard(YOLO_KEY).build();
assertNull(newAttrs.get(YOLO_KEY));
assertThat(newAttrs.keysForTest()).doesNotContain(YOLO_KEY);
}
@Test
public void discard_empty() {
Attributes newAttrs = Attributes.EMPTY.toBuilder().discard(YOLO_KEY).build();
assertNull(newAttrs.get(YOLO_KEY));
assertThat(newAttrs.keysForTest()).doesNotContain(YOLO_KEY);
}
}
| EqualObject |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/internal/reader/AuditReaderImplementor.java | {
"start": 402,
"end": 568
} | interface ____ extends AuditReader {
SessionImplementor getSessionImplementor();
Session getSession();
FirstLevelCache getFirstLevelCache();
}
| AuditReaderImplementor |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/AnnotationUtilsTests.java | {
"start": 63882,
"end": 64266
} | interface ____ {
@AliasFor(annotation = ContextConfig.class, attribute = "location")
String location1() default "foo";
@AliasFor(annotation = ContextConfig.class, attribute = "location")
String location2() default "bar";
}
@ImplicitAliasesWithDifferentDefaultValuesContextConfig(location1 = "1", location2 = "2")
static | ImplicitAliasesWithDifferentDefaultValuesContextConfig |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/annotation/CustomSerializerTest.java | {
"start": 814,
"end": 1257
} | class ____ implements ObjectSerializer {
@Override
public void write(JSONSerializer serializer, Object object, Object fieldName, Type fieldType,
int features) throws IOException {
Model model = (Model) object;
SerializeWriter out = serializer.getWriter();
out.writeFieldValue('{', "ID", model.id);
out.write('}');
}
}
}
| ModelSerializer |
java | apache__camel | components/camel-jt400/src/test/java/org/apache/camel/component/jt400/Jt400EndpointTest.java | {
"start": 1057,
"end": 1907
} | class ____ extends Jt400TestSupport {
private Jt400Endpoint endpoint;
@Override
public void doPostSetup() throws Exception {
endpoint = (Jt400Endpoint) resolveMandatoryEndpoint(
"jt400://user:password@host/qsys.lib/library.lib/queue.dtaq?ccsid=500&format=binary&guiAvailable=true&connectionPool=#mockPool");
}
/**
* Check that the AS/400 connection is correctly configured for the URL
*/
@Test
public void testSystemConfiguration() {
assertEquals("USER", endpoint.getSystem().getUserId());
assertEquals("host", endpoint.getSystem().getSystemName());
assertEquals(500, endpoint.getSystem().getCcsid());
assertEquals(Jt400Configuration.Format.binary, endpoint.getFormat());
assertTrue(endpoint.getSystem().isGuiAvailable());
}
}
| Jt400EndpointTest |
java | apache__rocketmq | example/src/main/java/org/apache/rocketmq/example/simple/PopConsumer.java | {
"start": 1530,
"end": 3362
} | class ____ {
public static final String TOPIC = "TopicTest";
public static final String CONSUMER_GROUP = "CID_JODIE_1";
public static final String NAMESRV_ADDR = "127.0.0.1:9876";
public static void main(String[] args) throws Exception {
switchPop();
DefaultMQPushConsumer consumer = new DefaultMQPushConsumer(CONSUMER_GROUP);
consumer.subscribe(TOPIC, "*");
consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET);
consumer.registerMessageListener(new MessageListenerConcurrently() {
@Override
public ConsumeConcurrentlyStatus consumeMessage(List<MessageExt> msgs, ConsumeConcurrentlyContext context) {
System.out.printf("%s Receive New Messages: %s %n", Thread.currentThread().getName(), msgs);
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
}
});
// Uncomment the following line while debugging, namesrvAddr should be set to your local address
// consumer.setNamesrvAddr(NAMESRV_ADDR);
consumer.setClientRebalance(false);
consumer.start();
System.out.printf("Consumer Started.%n");
}
private static void switchPop() throws Exception {
DefaultMQAdminExt mqAdminExt = new DefaultMQAdminExt();
// mqAdminExt.setNamesrvAddr(NAMESRV_ADDR);
mqAdminExt.start();
List<BrokerData> brokerDatas = mqAdminExt.examineTopicRouteInfo(TOPIC).getBrokerDatas();
for (BrokerData brokerData : brokerDatas) {
Set<String> brokerAddrs = new HashSet<>(brokerData.getBrokerAddrs().values());
for (String brokerAddr : brokerAddrs) {
mqAdminExt.setMessageRequestMode(brokerAddr, TOPIC, CONSUMER_GROUP, MessageRequestMode.POP, 8, 3_000);
}
}
}
}
| PopConsumer |
java | quarkusio__quarkus | extensions/jaxp/deployment/src/main/java/io/quarkus/jaxp/deployment/JaxpProcessor.java | {
"start": 460,
"end": 2850
} | class ____ {
@BuildStep
void reflectiveClasses(BuildProducer<ReflectiveClassBuildItem> reflectiveClass) {
reflectiveClass
.produce(ReflectiveClassBuildItem.builder("com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl",
"com.sun.org.apache.xerces.internal.jaxp.datatype.DatatypeFactoryImpl",
"com.sun.org.apache.xalan.internal.xsltc.trax.TransformerFactoryImpl",
"com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl",
"com.sun.org.apache.xerces.internal.parsers.SAXParser",
"com.sun.org.apache.xml.internal.utils.FastStringBuffer").build());
reflectiveClass.produce(ReflectiveClassBuildItem.builder("com.sun.xml.internal.stream.XMLInputFactoryImpl",
"com.sun.xml.internal.stream.XMLOutputFactoryImpl",
"com.sun.org.apache.xpath.internal.functions.FuncNot",
"com.sun.org.apache.xerces.internal.impl.dv.xs.SchemaDVFactoryImpl",
"javax.xml.namespace.QName").methods().build());
}
@BuildStep
void resourceBundles(BuildProducer<NativeImageResourceBundleBuildItem> resourceBundle) {
Consumer<String> resourceBundleItemProducer = bundleName -> resourceBundle
.produce(new NativeImageResourceBundleBuildItem(bundleName, "java.xml"));
Stream.of(
"com.sun.org.apache.xml.internal.serializer.utils.SerializerMessages",
"com.sun.org.apache.xml.internal.res.XMLErrorResources",
"com.sun.org.apache.xerces.internal.impl.msg.SAXMessages",
"com.sun.org.apache.xerces.internal.impl.msg.XMLMessages",
"com.sun.org.apache.xerces.internal.impl.msg.XMLSchemaMessages",
"com.sun.org.apache.xerces.internal.impl.xpath.regex.message")
.forEach(resourceBundleItemProducer);
}
@BuildStep
void resources(BuildProducer<NativeImageResourceBuildItem> resource) {
Stream.of(
"html",
"text",
"xml",
"unknown")
.map(s -> "com/sun/org/apache/xml/internal/serializer/output_" + s + ".properties")
.map(NativeImageResourceBuildItem::new)
.forEach(resource::produce);
}
}
| JaxpProcessor |
java | apache__maven | its/core-it-suite/src/test/resources/mng-5771-core-extensions/repo-src/maven-it-plugin-core-extensions-client/src/main/java/org/apache/maven/its/it_core_extensions/plugin/ValidateClasspathMojo.java | {
"start": 1238,
"end": 1591
} | class ____ extends AbstractMojo {
@Component
private TestCoreExtensionComponent component;
public void execute() throws MojoExecutionException, MojoFailureException {
if (this.component == null) {
throw new MojoExecutionException("Expected core extension component is not available");
}
}
}
| ValidateClasspathMojo |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/cglib/core/Customizer.java | {
"start": 910,
"end": 1010
} | interface ____ extends KeyFactoryCustomizer {
void customize(CodeEmitter e, Type type);
}
| Customizer |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java | {
"start": 17602,
"end": 17925
} | class ____ extends AbstractBinder {
private AppContext context;
JerseyBinder(AppContext context) {
this.context = context;
}
@Override
protected void configure() {
bind(context).to(AppContext.class).named("am");
bind(new App(context)).to(App.class).named("app");
}
}
}
| JerseyBinder |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/operators/CollectionExecutor.java | {
"start": 3591,
"end": 7910
} | class ____ {
private final Map<Operator<?>, List<?>> intermediateResults;
private final Map<String, Accumulator<?, ?>> accumulators;
private final Map<String, Future<Path>> cachedFiles;
private final Map<String, Value> previousAggregates;
private final Map<String, Aggregator<?>> aggregators;
private final ClassLoader userCodeClassLoader;
private final ExecutionConfig executionConfig;
private int iterationSuperstep;
// --------------------------------------------------------------------------------------------
public CollectionExecutor(ExecutionConfig executionConfig) {
this.executionConfig = executionConfig;
this.intermediateResults = new HashMap<Operator<?>, List<?>>();
this.accumulators = new HashMap<String, Accumulator<?, ?>>();
this.previousAggregates = new HashMap<String, Value>();
this.aggregators = new HashMap<String, Aggregator<?>>();
this.cachedFiles = new HashMap<String, Future<Path>>();
this.userCodeClassLoader = Thread.currentThread().getContextClassLoader();
}
// --------------------------------------------------------------------------------------------
// General execution methods
// --------------------------------------------------------------------------------------------
public JobExecutionResult execute(Plan program) throws Exception {
long startTime = System.currentTimeMillis();
JobID jobID = program.getJobId() == null ? new JobID() : program.getJobId();
JobInfo jobInfo = new JobInfoImpl(jobID, program.getJobName());
initCache(program.getCachedFiles());
Collection<? extends GenericDataSinkBase<?>> sinks = program.getDataSinks();
for (Operator<?> sink : sinks) {
execute(sink, jobInfo);
}
long endTime = System.currentTimeMillis();
Map<String, OptionalFailure<Object>> accumulatorResults =
AccumulatorHelper.toResultMap(accumulators);
return new JobExecutionResult(null, endTime - startTime, accumulatorResults);
}
private void initCache(Set<Map.Entry<String, DistributedCache.DistributedCacheEntry>> files) {
for (Map.Entry<String, DistributedCache.DistributedCacheEntry> file : files) {
Future<Path> doNothing = new CompletedFuture(new Path(file.getValue().filePath));
cachedFiles.put(file.getKey(), doNothing);
}
}
private List<?> execute(Operator<?> operator, JobInfo jobInfo) throws Exception {
return execute(operator, 0, jobInfo);
}
private List<?> execute(Operator<?> operator, int superStep, JobInfo jobInfo) throws Exception {
List<?> result = this.intermediateResults.get(operator);
// if it has already been computed, use the cached variant
if (result != null) {
return result;
}
if (operator instanceof BulkIterationBase) {
result = executeBulkIteration((BulkIterationBase<?>) operator, jobInfo);
} else if (operator instanceof DeltaIterationBase) {
result = executeDeltaIteration((DeltaIterationBase<?, ?>) operator, jobInfo);
} else if (operator instanceof SingleInputOperator) {
result =
executeUnaryOperator(
(SingleInputOperator<?, ?, ?>) operator, superStep, jobInfo);
} else if (operator instanceof DualInputOperator) {
result =
executeBinaryOperator(
(DualInputOperator<?, ?, ?, ?>) operator, superStep, jobInfo);
} else if (operator instanceof GenericDataSourceBase) {
result = executeDataSource((GenericDataSourceBase<?, ?>) operator, superStep, jobInfo);
} else if (operator instanceof GenericDataSinkBase) {
executeDataSink((GenericDataSinkBase<?>) operator, superStep, jobInfo);
result = Collections.emptyList();
} else {
throw new RuntimeException("Cannot execute operator " + operator.getClass().getName());
}
this.intermediateResults.put(operator, result);
return result;
}
// --------------------------------------------------------------------------------------------
// Operator | CollectionExecutor |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/vectors/DenseVectorQueryFloatsTests.java | {
"start": 673,
"end": 1321
} | class ____ extends AbstractDenseVectorQueryTestCase {
@Override
DenseVectorQuery getDenseVectorQuery(String field, float[] query) {
return new DenseVectorQuery.Floats(query, field);
}
@Override
float[] randomVector(int dim) {
float[] vector = new float[dim];
for (int i = 0; i < vector.length; i++) {
vector[i] = randomFloat();
}
return vector;
}
@Override
Field getKnnVectorField(String name, float[] vector, VectorSimilarityFunction similarityFunction) {
return new KnnFloatVectorField(name, vector, similarityFunction);
}
}
| DenseVectorQueryFloatsTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/node/PluginComponentBinding.java | {
"start": 2313,
"end": 2462
} | interface ____
* @param <T> The implementation class
*/
public record PluginComponentBinding<I, T extends I>(Class<? extends I> inter, T impl) {}
| class |
java | apache__camel | components/camel-asn1/src/test/java/org/apache/camel/dataformat/asn1/SpringASN1DataFormatRouteTest.java | {
"start": 1461,
"end": 2682
} | class ____ extends CamelSpringTestSupport {
private String fileName = "src/test/resources/asn1_data/SMS_SINGLE.tt";
private void baseUnmarshalReturnClassObjectTest(String mockEnpointName, String directEndpointName) throws Exception {
getMockEndpoint(mockEnpointName).expectedMessageCount(1);
File testFile = new File(fileName);
ByteArrayInputStream bais = ASN1DataFormatTestHelper.reteriveByteArrayInputStream(testFile);
template.sendBody(directEndpointName, bais);
List<Exchange> exchanges = getMockEndpoint(mockEnpointName).getExchanges();
assertEquals(1, exchanges.size());
for (Exchange exchange : exchanges) {
assertTrue(exchange.getIn().getBody() instanceof SmsCdr);
}
MockEndpoint.assertIsSatisfied(context);
}
@Test
void testUnmarshalReturnClassObject() throws Exception {
baseUnmarshalReturnClassObjectTest("mock:unmarshal", "direct:unmarshal");
}
@Override
protected AbstractApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/dataformat/asn1/SpringASN1DataFormatRouteTest.xml");
}
}
| SpringASN1DataFormatRouteTest |
java | apache__camel | components/camel-solr/src/test/java/org/apache/camel/component/solr/integration/SolrTestSupport.java | {
"start": 2498,
"end": 10196
} | class ____ implements CamelTestSupportHelper, ConfigurableRoute, ConfigurableContext {
@Order(1)
@RegisterExtension
public static final SolrService service = SolrServiceFactory.createService();
@Order(2)
@RegisterExtension
public static final CamelContextExtension contextExtension = new DefaultCamelContextExtension();
static final Logger LOG = LoggerFactory.getLogger(SolrTestSupport.class);
static final String DEFAULT_START_ENDPOINT = "direct:start";
static final String DEFAULT_START_ENDPOINT_AUTO_COMMIT = DEFAULT_START_ENDPOINT + "AutoCommit";
static final String DEFAULT_START_ENDPOINT_SPLIT_THEN_COMMIT = DEFAULT_START_ENDPOINT + "SplitThenCommit";
static final String DEFAULT_SOLR_ENDPOINT = "solr:default";
static final String DEFAULT_MOCK_ENDPOINT = "mock:result";
static final String TEST_DATA_PATH_URI = Objects.requireNonNull(Path
.of(Objects.requireNonNull(SolrTestSupport.class.getClassLoader().getResource("data/books.csv")).getFile())
.getParent().toUri().toString());
static final String TEST_ID = "test1";
static final String TEST_ID2 = "test2";
SolrEndpoint solrEndpoint;
CamelContext context;
ProducerTemplate template;
ConsumerTemplate consumer;
String prefix;
@BeforeEach
public void beforeEach(TestInfo testInfo) {
// make use of the test method name to avoid collision
prefix = testInfo.getDisplayName().toLowerCase() + "-";
context = contextExtension.getContext();
template = contextExtension.getProducerTemplate();
consumer = contextExtension.getConsumerTemplate();
// clean solr endpoints
executeDeleteAll();
solrEndpoint = context.getEndpoint(DEFAULT_SOLR_ENDPOINT, SolrEndpoint.class);
solrEndpoint.getConfiguration().setRequestHandler(null);
template.setDefaultEndpoint(solrEndpoint);
}
@Override
public CamelContextExtension getCamelContextExtension() {
return contextExtension;
}
protected CamelContext camelContext() {
return getCamelContextExtension().getContext();
}
protected ProducerTemplate template() {
return getCamelContextExtension().getProducerTemplate();
}
@ContextFixture
@Override
public void configureContext(CamelContext context) {
final SolrComponent solrComponent = new SolrComponent();
solrComponent.setHost(service.getSolrHost());
solrComponent.setPort(service.getPort());
solrComponent.setDefaultCollection(SolrConstants.DEFAULT_COLLECTION);
context.addComponent("solr", solrComponent);
}
@RouteFixture
@Override
public void createRouteBuilder(CamelContext context) throws Exception {
final RouteBuilder routeBuilder = createRouteBuilder();
if (routeBuilder != null) {
context.addRoutes(routeBuilder);
}
}
protected abstract RouteBuilder createRouteBuilder();
public Exchange processRequest(String uri, Object body, Map<String, Object> headers) {
ExchangeBuilder builder = ExchangeBuilder.anExchange(camelContext()).withBody(body);
for (Map.Entry<String, Object> entry : headers.entrySet()) {
builder.withHeader(entry.getKey(), entry.getValue());
}
return processRequest(uri, builder.build());
}
public Exchange processRequest(String uri, Exchange exchange) {
return template.send(uri, exchange);
}
protected void solrInsertTestEntry() {
solrInsertTestEntry(TEST_ID);
}
protected void solrInsertTestEntry(String id) {
Map<String, Object> headers = new HashMap<>();
headers.put(SolrConstants.PARAM_OPERATION, SolrConstants.OPERATION_INSERT);
headers.put("SolrField.id", id);
template.sendBodyAndHeaders(DEFAULT_START_ENDPOINT, "", headers);
}
protected void solrCommit() {
template.sendBodyAndHeaders(
DEFAULT_START_ENDPOINT,
null,
Map.of(
SolrConstants.PARAM_OPERATION, SolrOperation.INSERT.name(),
SolrConstants.HEADER_PARAM_PREFIX + "commit", "true"));
}
public Exchange executeInsertFor(Object body) {
return executeInsertFor(body, new HashMap<>(), true);
}
public Exchange executeInsertFor(Object body, boolean withCommit) {
return executeInsertFor(body, new HashMap<>(), withCommit);
}
public Exchange executeInsertFor(Object body, Map<String, Object> headers) {
return executeInsertFor(body, headers, true);
}
public Exchange executeInsertFor(Object body, Map<String, Object> headers, boolean withCommit) {
ExchangeBuilder builder = ExchangeBuilder.anExchange(camelContext());
builder.withBody(body);
headers.forEach(builder::withHeader);
return executeInsert(builder.build(), withCommit);
}
public Exchange executeInsert(Exchange exchange) {
return executeInsert(exchange, true);
}
public Exchange executeInsert(Exchange exchange, boolean withCommit) {
return executeInsert(DEFAULT_SOLR_ENDPOINT, exchange, withCommit);
}
public Exchange executeInsert(String uri, Exchange exchange, boolean withCommit) {
exchange.getMessage().getHeaders().put(SolrConstants.PARAM_OPERATION, SolrOperation.INSERT);
if (withCommit) {
SolrUtils.addHeadersForCommit(exchange);
}
return processRequest(uri, exchange);
}
public void executeDeleteAll() {
Exchange exchange = ExchangeBuilder.anExchange(camelContext())
.withBody("*:*")
.withHeader(SolrConstants.PARAM_OPERATION, SolrOperation.DELETE)
.withHeader(SolrConstants.PARAM_DELETE_BY_QUERY, true)
.build();
SolrUtils.addHeadersForCommit(exchange);
processRequest(DEFAULT_SOLR_ENDPOINT, exchange);
}
public QueryResponse executeSolrQuery(String queryString) {
return executeSolrQuery(DEFAULT_SOLR_ENDPOINT, queryString);
}
public QueryResponse executeSolrQuery(String uri, String queryString) {
return executeSolrQuery(uri, queryString, Collections.emptyMap());
}
public QueryResponse executeSolrQuery(String uri, String queryString, Map<String, Object> additionalExchangeHeaders) {
var exchangeBuilder = ExchangeBuilder.anExchange(camelContext())
.withHeader(SolrConstants.PARAM_OPERATION, SolrOperation.SEARCH)
.withHeader(SolrConstants.PARAM_QUERY_STRING, queryString)
.withHeader(SolrConstants.PARAM_REQUEST_HANDLER, null);
if (additionalExchangeHeaders != null) {
for (var entry : additionalExchangeHeaders.entrySet()) {
exchangeBuilder.withHeader(entry.getKey(), entry.getValue());
}
}
return processRequest(uri, exchangeBuilder.build()).getMessage().getBody(QueryResponse.class);
}
public Exchange pingInstance(String uri) {
return pingInstance(uri, SolrConstants.DEFAULT_COLLECTION);
}
public Exchange pingInstance(String uri, String collection) {
return pingInstance(uri, collection == null ? Map.of() : Map.of(SolrConstants.PARAM_COLLECTION, collection));
}
public Exchange pingInstance(String uri, Map<String, Object> headers) {
if (!headers.containsKey(SolrConstants.PARAM_OPERATION)) {
headers = new HashMap<>(headers);
headers.put(SolrConstants.PARAM_OPERATION, SolrOperation.PING);
}
return processRequest(uri, null, headers);
}
}
| SolrTestSupport |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesBeanRegistrationAotProcessorTests.java | {
"start": 8552,
"end": 9117
} | class ____ {
@SuppressWarnings("unused")
private final String name;
@SuppressWarnings("unused")
private final Integer counter;
ValueObjectWithSpecificConstructorSampleBean(String name, Integer counter) {
this.name = name;
this.counter = counter;
}
@SuppressWarnings("unused")
private ValueObjectWithSpecificConstructorSampleBean(String name) {
this(name, 42);
}
}
@Configuration(proxyBeanMethods = false)
@ConfigurationPropertiesScan(basePackageClasses = BScanConfiguration.class)
static | ValueObjectWithSpecificConstructorSampleBean |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/TimeUnitConversionCheckerTest.java | {
"start": 1252,
"end": 1720
} | class ____ {
// BUG: Diagnostic contains: private long value = 42L /* milliseconds */;
private long value = TimeUnit.MILLISECONDS.toMillis(42);
}
""")
.doTest();
}
@Test
public void literalConvertToSelf_withStaticImport() {
helper
.addSourceLines(
"TestClass.java",
"""
import static java.util.concurrent.TimeUnit.MILLISECONDS;
public | TestClass |
java | micronaut-projects__micronaut-core | discovery-core/src/main/java/io/micronaut/discovery/event/ServiceStoppedEvent.java | {
"start": 848,
"end": 1195
} | class ____ extends AbstractServiceInstanceEvent {
/**
* Constructs a prototypical Event.
*
* @param source The object on which the Event initially occurred.
* @throws IllegalArgumentException if source is null.
*/
public ServiceStoppedEvent(ServiceInstance source) {
super(source);
}
}
| ServiceStoppedEvent |
java | spring-projects__spring-framework | spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/SseIntegrationTests.java | {
"start": 3304,
"end": 7209
} | class ____ extends AbstractHttpHandlerIntegrationTests {
private AnnotationConfigApplicationContext wac;
private WebClient webClient;
private void startServer(HttpServer httpServer, ClientHttpConnector connector) throws Exception {
super.startServer(httpServer);
this.webClient = WebClient
.builder()
.clientConnector(connector)
.baseUrl("http://localhost:" + this.port + "/sse")
.build();
}
@Override
protected HttpHandler createHttpHandler() {
this.wac = new AnnotationConfigApplicationContext(TestConfiguration.class);
return WebHttpHandlerBuilder.webHandler(new DispatcherHandler(this.wac)).build();
}
@ParameterizedSseTest
void sseAsString(HttpServer httpServer, ClientHttpConnector connector) throws Exception {
startServer(httpServer, connector);
Flux<String> result = this.webClient.get()
.uri("/string")
.accept(TEXT_EVENT_STREAM)
.retrieve()
.bodyToFlux(String.class);
StepVerifier.create(result)
.expectNext("foo 0")
.expectNext("foo 1")
.thenCancel()
.verify(Duration.ofSeconds(5L));
}
@ParameterizedSseTest
void sseAsPerson(HttpServer httpServer, ClientHttpConnector connector) throws Exception {
startServer(httpServer, connector);
Flux<Person> result = this.webClient.get()
.uri("/person")
.accept(TEXT_EVENT_STREAM)
.retrieve()
.bodyToFlux(Person.class);
StepVerifier.create(result)
.expectNext(new Person("foo 0"))
.expectNext(new Person("foo 1"))
.thenCancel()
.verify(Duration.ofSeconds(5L));
}
@ParameterizedSseTest
void sseAsEvent(HttpServer httpServer, ClientHttpConnector connector) throws Exception {
assumeTrue(httpServer instanceof JettyHttpServer || httpServer instanceof JettyCoreHttpServer);
startServer(httpServer, connector);
Flux<ServerSentEvent<Person>> result = this.webClient.get()
.uri("/event")
.accept(TEXT_EVENT_STREAM)
.retrieve()
.bodyToFlux(new ParameterizedTypeReference<>() {});
verifyPersonEvents(result);
}
@ParameterizedSseTest
void sseAsEventWithoutAcceptHeader(HttpServer httpServer, ClientHttpConnector connector) throws Exception {
startServer(httpServer, connector);
Flux<ServerSentEvent<Person>> result = this.webClient.get()
.uri("/event")
.retrieve()
.bodyToFlux(new ParameterizedTypeReference<>() {});
verifyPersonEvents(result);
}
private void verifyPersonEvents(Flux<ServerSentEvent<Person>> result) {
StepVerifier.create(result)
.consumeNextWith( event -> {
assertThat(event.id()).isEqualTo("0");
assertThat(event.data()).isEqualTo(new Person("foo 0"));
assertThat(event.comment()).isEqualTo("bar 0");
assertThat(event.event()).isNull();
assertThat(event.retry()).isNull();
})
.consumeNextWith( event -> {
assertThat(event.id()).isEqualTo("1");
assertThat(event.data()).isEqualTo(new Person("foo 1"));
assertThat(event.comment()).isEqualTo("bar 1");
assertThat(event.event()).isNull();
assertThat(event.retry()).isNull();
})
.thenCancel()
.verify(Duration.ofSeconds(5L));
}
@ParameterizedSseTest // SPR-16494
@Disabled // https://github.com/reactor/reactor-netty/issues/283
void serverDetectsClientDisconnect(HttpServer httpServer, ClientHttpConnector connector) throws Exception {
assumeTrue(httpServer instanceof ReactorHttpServer);
startServer(httpServer, connector);
Flux<String> result = this.webClient.get()
.uri("/infinite")
.accept(TEXT_EVENT_STREAM)
.retrieve()
.bodyToFlux(String.class);
StepVerifier.create(result)
.expectNext("foo 0")
.expectNext("foo 1")
.thenCancel()
.verify(Duration.ofSeconds(5L));
SseController controller = this.wac.getBean(SseController.class);
controller.cancellation.block(Duration.ofSeconds(5));
}
@RestController
@SuppressWarnings("unused")
@RequestMapping("/sse")
static | SseIntegrationTests |
java | redisson__redisson | redisson-hibernate/redisson-hibernate-53/src/main/java/org/redisson/hibernate/RedissonRegionFactory.java | {
"start": 1782,
"end": 9507
} | class ____ extends RegionFactoryTemplate {
private static final long serialVersionUID = 3785315696581773811L;
public static final String QUERY_DEF = "query";
public static final String COLLECTION_DEF = "collection";
public static final String ENTITY_DEF = "entity";
public static final String NATURAL_ID_DEF = "naturalid";
public static final String TIMESTAMPS_DEF = "timestamps";
public static final String MAX_ENTRIES_SUFFIX = ".eviction.max_entries";
public static final String TTL_SUFFIX = ".expiration.time_to_live";
public static final String MAX_IDLE_SUFFIX = ".expiration.max_idle_time";
public static final String CONFIG_PREFIX = "hibernate.cache.redisson.";
public static final String REDISSON_CONFIG_PATH = CONFIG_PREFIX + "config";
public static final String FALLBACK = CONFIG_PREFIX + "fallback";
RedissonClient redisson;
private CacheKeysFactory cacheKeysFactory;
protected boolean fallback;
@Override
protected CacheKeysFactory getImplicitCacheKeysFactory() {
return cacheKeysFactory;
}
@Override
protected void prepareForUse(SessionFactoryOptions settings, @SuppressWarnings("rawtypes") Map properties) throws CacheException {
this.redisson = createRedissonClient(properties);
String fallbackValue = (String) properties.getOrDefault(FALLBACK, "false");
fallback = Boolean.valueOf(fallbackValue);
StrategySelector selector = settings.getServiceRegistry().getService(StrategySelector.class);
cacheKeysFactory = selector.resolveDefaultableStrategy(CacheKeysFactory.class,
properties.get(Environment.CACHE_KEYS_FACTORY), new RedissonCacheKeysFactory(redisson.getConfig().getCodec()));
}
protected RedissonClient createRedissonClient(Map properties) {
Config config = null;
if (!properties.containsKey(REDISSON_CONFIG_PATH)) {
config = loadConfig(RedissonRegionFactory.class.getClassLoader(), "redisson.json");
if (config == null) {
config = loadConfig(RedissonRegionFactory.class.getClassLoader(), "redisson.yaml");
}
} else {
String configPath = ConfigurationHelper.getString(REDISSON_CONFIG_PATH, properties);
config = loadConfig(RedissonRegionFactory.class.getClassLoader(), configPath);
if (config == null) {
config = loadConfig(configPath);
}
}
if (config == null) {
throw new CacheException("Unable to locate Redisson configuration");
}
return Redisson.create(config);
}
private Config loadConfig(String configPath) {
try {
return Config.fromYAML(new File(configPath));
} catch (IOException e) {
// trying next format
try {
return Config.fromJSON(new File(configPath));
} catch (IOException e1) {
e1.addSuppressed(e);
throw new CacheException("Can't parse default config", e1);
}
}
}
private Config loadConfig(ClassLoader classLoader, String fileName) {
InputStream is = classLoader.getResourceAsStream(fileName);
if (is != null) {
try {
return Config.fromYAML(is);
} catch (IOException e) {
try {
is = classLoader.getResourceAsStream(fileName);
return Config.fromJSON(is);
} catch (IOException e1) {
e1.addSuppressed(e);
throw new CacheException("Can't parse config", e1);
}
}
}
return null;
}
@Override
protected void releaseFromUse() {
redisson.shutdown();
}
@Override
public boolean isMinimalPutsEnabledByDefault() {
return true;
}
@Override
public AccessType getDefaultAccessType() {
return AccessType.TRANSACTIONAL;
}
@Override
public long nextTimestamp() {
long time = System.currentTimeMillis() << 12;
try {
return redisson.getScript(LongCodec.INSTANCE).eval(RScript.Mode.READ_WRITE,
"local currentTime = redis.call('get', KEYS[1]);"
+ "if currentTime == false then "
+ "redis.call('set', KEYS[1], ARGV[1]); "
+ "return ARGV[1]; "
+ "end;"
+ "local nextValue = math.max(tonumber(ARGV[1]), tonumber(currentTime) + 1); "
+ "redis.call('set', KEYS[1], nextValue); "
+ "return nextValue;",
RScript.ReturnType.INTEGER, Arrays.<Object>asList(qualifyName("redisson-hibernate-timestamp")), time);
} catch (Exception e) {
if (fallback) {
return super.nextTimestamp();
}
throw e;
}
}
@Override
public DomainDataRegion buildDomainDataRegion(
DomainDataRegionConfig regionConfig,
DomainDataRegionBuildingContext buildingContext) {
verifyStarted();
return new DomainDataRegionImpl(
regionConfig,
this,
createDomainDataStorageAccess( regionConfig, buildingContext ),
getImplicitCacheKeysFactory(),
buildingContext
);
}
@Override
protected DomainDataStorageAccess createDomainDataStorageAccess(DomainDataRegionConfig regionConfig,
DomainDataRegionBuildingContext buildingContext) {
String defaultKey = null;
if (!regionConfig.getCollectionCaching().isEmpty()) {
defaultKey = COLLECTION_DEF;
} else if (!regionConfig.getEntityCaching().isEmpty()) {
defaultKey = ENTITY_DEF;
} else if (!regionConfig.getNaturalIdCaching().isEmpty()) {
defaultKey = NATURAL_ID_DEF;
} else {
throw new IllegalArgumentException("Unable to determine entity cache type!");
}
RMapCache<Object, Object> mapCache = getCache(qualifyName(regionConfig.getRegionName()), buildingContext.getSessionFactory().getProperties(), defaultKey);
return new RedissonStorage(regionConfig.getRegionName(), mapCache, ((Redisson)redisson).getServiceManager(), buildingContext.getSessionFactory().getProperties(), defaultKey);
}
private String qualifyName(String name) {
return RegionNameQualifier.INSTANCE.qualify(name, getOptions());
}
@Override
protected StorageAccess createQueryResultsRegionStorageAccess(String regionName,
SessionFactoryImplementor sessionFactory) {
RMapCache<Object, Object> mapCache = getCache(qualifyName(regionName), sessionFactory.getProperties(), QUERY_DEF);
return new RedissonStorage(regionName, mapCache, ((Redisson)redisson).getServiceManager(), sessionFactory.getProperties(), QUERY_DEF);
}
@Override
protected StorageAccess createTimestampsRegionStorageAccess(String regionName,
SessionFactoryImplementor sessionFactory) {
RMapCache<Object, Object> mapCache = getCache(qualifyName(regionName), sessionFactory.getProperties(), TIMESTAMPS_DEF);
return new RedissonStorage(regionName, mapCache, ((Redisson)redisson).getServiceManager(), sessionFactory.getProperties(), TIMESTAMPS_DEF);
}
protected RMapCache<Object, Object> getCache(String cacheName, Map properties, String defaultKey) {
return redisson.getMapCache(cacheName);
}
}
| RedissonRegionFactory |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/filecache/FileCacheDirectoriesTest.java | {
"start": 8496,
"end": 10104
} | class ____
extends DirectScheduledExecutorService {
FileCache.DeleteProcess lastDeleteProcess;
long lastDelayMillis;
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
if (command instanceof FileCache.DeleteProcess) {
assertNull("Multiple delete process registered", lastDeleteProcess);
lastDeleteProcess = (FileCache.DeleteProcess) command;
lastDelayMillis = unit.toMillis(delay);
return super.schedule(() -> {}, delay, unit);
} else {
return super.schedule(command, delay, unit);
}
}
}
private void testDirectoryDownloaded(DistributedCache.DistributedCacheEntry entry)
throws Exception {
JobID jobID = new JobID();
ExecutionAttemptID attemptID = createExecutionAttemptId();
// copy / create the file
final String fileName = "test_file";
Future<Path> copyResult = fileCache.createTmpFile(fileName, entry, jobID, attemptID);
final Path dstPath = copyResult.get();
final FileSystem fs = dstPath.getFileSystem();
final FileStatus fileStatus = fs.getFileStatus(dstPath);
assertTrue(fileStatus.isDir());
final Path cacheFile = new Path(dstPath, "cacheFile");
assertTrue(fs.exists(cacheFile));
final String actualContent = FileUtils.readFileUtf8(new File(cacheFile.getPath()));
assertEquals(testFileContent, actualContent);
}
}
| DeleteCapturingDirectScheduledExecutorService |
java | apache__hadoop | hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java | {
"start": 3457,
"end": 16405
} | enum ____ {
SLS, RUMEN, SYNTH
}
public static final String NETWORK_CACHE_TTL = "networkaddress.cache.ttl";
public static final String NETWORK_NEGATIVE_CACHE_TTL =
"networkaddress.cache.negative.ttl";
public int getRemainingApps() {
return amRunner.remainingApps;
}
public SLSRunner() throws ClassNotFoundException, YarnException {
Configuration tempConf = new Configuration(false);
init(tempConf);
}
public SLSRunner(Configuration tempConf) throws ClassNotFoundException, YarnException {
init(tempConf);
}
@Override
public void setConf(Configuration conf) {
if (null != conf) {
// Override setConf to make sure all conf added load sls-runner.xml, see
// YARN-6560
conf.addResource("sls-runner.xml");
}
super.setConf(conf);
}
private void init(Configuration tempConf) throws ClassNotFoundException, YarnException {
// runner configuration
setConf(tempConf);
int poolSize = tempConf.getInt(SLSConfiguration.RUNNER_POOL_SIZE,
SLSConfiguration.RUNNER_POOL_SIZE_DEFAULT);
SLSRunner.runner.setQueueSize(poolSize);
rmRunner = new RMRunner(getConf(), this);
nmRunner = new NMRunner(runner, getConf(), rmRunner.getRm(), rmRunner.getTableMapping(), poolSize);
amRunner = new AMRunner(runner, this);
amRunner.init(tempConf);
}
private SynthTraceJobProducer getSynthJobTraceProducer() throws YarnException {
// if we use the nodeFile this could have been not initialized yet.
if (nmRunner.getStjp() != null) {
return nmRunner.getStjp();
} else {
try {
return new SynthTraceJobProducer(getConf(), new Path(inputTraces[0]));
} catch (IOException e) {
throw new YarnException("Failed to initialize SynthTraceJobProducer", e);
}
}
}
/**
* @return an unmodifiable view of the simulated info map.
*/
public static Map<String, Object> getSimulateInfoMap() {
return Collections.unmodifiableMap(simulateInfoMap);
}
/**
* This is invoked before start.
* @param inputType The trace type
* @param inTraces Input traces
* @param nodes The node file
* @param metricsOutputDir Output dir for metrics
* @param trackApps Track these applications
* @param printSimulation Whether to print the simulation
*/
public void setSimulationParams(TraceType inputType, String[] inTraces,
String nodes, String metricsOutputDir, Set<String> trackApps,
boolean printSimulation) throws YarnException {
this.inputType = inputType;
this.inputTraces = inTraces.clone();
this.amRunner.setInputType(inputType);
this.amRunner.setInputTraces(this.inputTraces);
this.amRunner.setTrackedApps(trackApps);
this.nmRunner.setNodeFile(nodes);
this.nmRunner.setInputType(inputType);
this.nmRunner.setInputTraces(this.inputTraces);
this.printSimulation = printSimulation;
this.rmRunner.setMetricsOutputDir(metricsOutputDir);
String tableMapping = metricsOutputDir + "/tableMapping.csv";
this.rmRunner.setTableMapping(tableMapping);
this.nmRunner.setTableMapping(tableMapping);
//We need this.inputTraces to set before creating SynthTraceJobProducer
if (inputType == TraceType.SYNTH) {
this.stjp = getSynthJobTraceProducer();
}
}
public void start() throws IOException, ClassNotFoundException, YarnException,
InterruptedException {
enableDNSCaching(getConf());
// start resource manager
rmRunner.startRM();
nmRunner.setRm(rmRunner.getRm());
amRunner.setResourceManager(rmRunner.getRm());
// start node managers
nmRunner.startNM();
// start application masters
amRunner.startAM();
// set queue & tracked apps information
SchedulerWrapper resourceScheduler =
(SchedulerWrapper) rmRunner.getRm().getResourceScheduler();
resourceScheduler.setSLSRunner(this);
Tracker tracker = resourceScheduler.getTracker();
tracker.setQueueSet(rmRunner.getQueueAppNumMap().keySet());
tracker.setTrackedAppSet(amRunner.getTrackedApps());
// print out simulation info
printSimulationInfo();
// blocked until all nodes RUNNING
nmRunner.waitForNodesRunning();
// starting the runner once everything is ready to go,
runner.start();
}
/**
* Enables DNS Caching based on config. If DNS caching is enabled, then set
* the DNS cache to infinite time. Since in SLS random nodes are added, DNS
* resolution can take significant time which can cause erroneous results.
* For more details, check <a href=
* "https://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.html">
* Java Networking Properties</a>
* @param conf Configuration object.
*/
static void enableDNSCaching(Configuration conf) {
if (conf.getBoolean(SLSConfiguration.DNS_CACHING_ENABLED,
SLSConfiguration.DNS_CACHING_ENABLED_DEFAULT)) {
Security.setProperty(NETWORK_CACHE_TTL, "-1");
Security.setProperty(NETWORK_NEGATIVE_CACHE_TTL, "-1");
}
}
Resource getDefaultContainerResource() {
int containerMemory = getConf().getInt(SLSConfiguration.CONTAINER_MEMORY_MB,
SLSConfiguration.CONTAINER_MEMORY_MB_DEFAULT);
int containerVCores = getConf().getInt(SLSConfiguration.CONTAINER_VCORES,
SLSConfiguration.CONTAINER_VCORES_DEFAULT);
return Resources.createResource(containerMemory, containerVCores);
}
public void increaseQueueAppNum(String queue) throws YarnException {
rmRunner.increaseQueueAppNum(queue);
}
private void printSimulationInfo() {
final int numAMs = amRunner.getNumAMs();
final int numTasks = amRunner.getNumTasks();
final long maxRuntime = amRunner.getMaxRuntime();
Map<String, AMSimulator> amMap = amRunner.getAmMap();
Map<String, Integer> queueAppNumMap = rmRunner.getQueueAppNumMap();
if (printSimulation) {
// node
LOG.info("------------------------------------");
LOG.info("# nodes = {}, # racks = {}, capacity " +
"of each node {}.",
nmRunner.getNumNMs(), nmRunner.getNumRacks(), nmRunner.getNodeManagerResource());
LOG.info("------------------------------------");
// job
LOG.info("# applications = {}, # total " +
"tasks = {}, average # tasks per application = {}",
numAMs, numTasks, (int)(Math.ceil((numTasks + 0.0) / numAMs)));
LOG.info("JobId\tQueue\tAMType\tDuration\t#Tasks");
for (Map.Entry<String, AMSimulator> entry : amMap.entrySet()) {
AMSimulator am = entry.getValue();
LOG.info(entry.getKey() + "\t" + am.getQueue() + "\t" + am.getAMType()
+ "\t" + am.getDuration() + "\t" + am.getNumTasks());
}
LOG.info("------------------------------------");
// queue
LOG.info("number of queues = {} average number of apps = {}",
queueAppNumMap.size(),
(int)(Math.ceil((numAMs + 0.0) / queueAppNumMap.size())));
LOG.info("------------------------------------");
// runtime
LOG.info("estimated simulation time is {} seconds",
(long)(Math.ceil(maxRuntime / 1000.0)));
LOG.info("------------------------------------");
}
// package these information in the simulateInfoMap used by other places
simulateInfoMap.put("Number of racks", nmRunner.getNumRacks());
simulateInfoMap.put("Number of nodes", nmRunner.getNumNMs());
simulateInfoMap.put("Node memory (MB)",
nmRunner.getNodeManagerResource().getResourceValue(ResourceInformation.MEMORY_URI));
simulateInfoMap.put("Node VCores",
nmRunner.getNodeManagerResource().getResourceValue(ResourceInformation.VCORES_URI));
simulateInfoMap.put("Number of applications", numAMs);
simulateInfoMap.put("Number of tasks", numTasks);
simulateInfoMap.put("Average tasks per applicaion",
(int)(Math.ceil((numTasks + 0.0) / numAMs)));
simulateInfoMap.put("Number of queues", queueAppNumMap.size());
simulateInfoMap.put("Average applications per queue",
(int)(Math.ceil((numAMs + 0.0) / queueAppNumMap.size())));
simulateInfoMap.put("Estimated simulate time (s)",
(long)(Math.ceil(maxRuntime / 1000.0)));
}
public Map<NodeId, NMSimulator> getNmMap() {
return nmRunner.getNmMap();
}
public void decreaseRemainingApps() {
amRunner.remainingApps--;
if (amRunner.remainingApps == 0) {
exitSLSRunner();
}
}
public static void exitSLSRunner() {
LOG.info("SLSRunner tears down.");
if (exitAtTheFinish) {
System.exit(0);
}
}
public void stop() throws InterruptedException {
rmRunner.stop();
runner.stop();
}
public int run(final String[] argv) throws IOException, InterruptedException,
ParseException, ClassNotFoundException, YarnException {
Options options = new Options();
// Left for compatibility
options.addOption("inputrumen", true, "input rumen files");
options.addOption("inputsls", true, "input sls files");
// New more general format
options.addOption("tracetype", true, "the type of trace");
options.addOption("tracelocation", true, "input trace files");
options.addOption("nodes", true, "input topology");
options.addOption("output", true, "output directory");
options.addOption("trackjobs", true,
"jobs to be tracked during simulating");
options.addOption("printsimulation", false,
"print out simulation information");
CommandLineParser parser = new GnuParser();
CommandLine cmd = parser.parse(options, argv);
// compatibility with old commandline
boolean hasInputRumenOption = cmd.hasOption("inputrumen");
boolean hasInputSlsOption = cmd.hasOption("inputsls");
boolean hasTraceTypeOption = cmd.hasOption("tracetype");
TraceType traceType = determineTraceType(cmd, hasInputRumenOption,
hasInputSlsOption, hasTraceTypeOption);
String traceLocation = determineTraceLocation(cmd, hasInputRumenOption,
hasInputSlsOption, hasTraceTypeOption);
String output = cmd.getOptionValue("output");
File outputFile = new File(output);
if (!outputFile.exists() && !outputFile.mkdirs()) {
System.err.println("ERROR: Cannot create output directory "
+ outputFile.getAbsolutePath());
throw new YarnException("Cannot create output directory");
}
Set<String> trackedJobSet = new HashSet<>();
if (cmd.hasOption("trackjobs")) {
String trackjobs = cmd.getOptionValue("trackjobs");
String[] jobIds = trackjobs.split(",");
trackedJobSet.addAll(Arrays.asList(jobIds));
}
String tempNodeFile =
cmd.hasOption("nodes") ? cmd.getOptionValue("nodes") : "";
String[] inputFiles = traceLocation.split(",");
setSimulationParams(traceType, inputFiles, tempNodeFile, output,
trackedJobSet, cmd.hasOption("printsimulation"));
start();
return 0;
}
private TraceType determineTraceType(CommandLine cmd, boolean hasInputRumenOption,
boolean hasInputSlsOption, boolean hasTraceTypeOption) throws YarnException {
String traceType = null;
if (hasInputRumenOption) {
traceType = "RUMEN";
}
if (hasInputSlsOption) {
traceType = "SLS";
}
if (hasTraceTypeOption) {
traceType = cmd.getOptionValue("tracetype");
}
if (traceType == null) {
throw new YarnException("Misconfigured input");
}
switch (traceType) {
case "SLS":
return TraceType.SLS;
case "RUMEN":
return TraceType.RUMEN;
case "SYNTH":
return TraceType.SYNTH;
default:
printUsage();
throw new YarnException("Misconfigured input");
}
}
private String determineTraceLocation(CommandLine cmd, boolean hasInputRumenOption,
boolean hasInputSlsOption, boolean hasTraceTypeOption) throws YarnException {
if (hasInputRumenOption) {
return cmd.getOptionValue("inputrumen");
}
if (hasInputSlsOption) {
return cmd.getOptionValue("inputsls");
}
if (hasTraceTypeOption) {
return cmd.getOptionValue("tracelocation");
}
throw new YarnException("Misconfigured input! ");
}
public static void main(String[] argv) throws Exception {
exitAtTheFinish = true;
ToolRunner.run(new Configuration(), new SLSRunner(), argv);
}
static void printUsage() {
System.err.println();
System.err.println("ERROR: Wrong tracetype");
System.err.println();
System.err.println(
"Options: -tracetype " + "SLS|RUMEN|SYNTH -tracelocation FILE,FILE... "
+ "(deprecated alternative options --inputsls FILE, FILE,... "
+ " | --inputrumen FILE,FILE,...)"
+ "-output FILE [-nodes FILE] [-trackjobs JobId,JobId...] "
+ "[-printsimulation]");
System.err.println();
}
/**
* Class to encapsulate all details about the node.
*/
@Private
@Unstable
public static | TraceType |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/convert/converter/DefaultConversionServiceTests.java | {
"start": 30895,
"end": 40116
} | class ____ {
private static final TypeDescriptor rawOptionalType = TypeDescriptor.valueOf(Optional.class);
@Test
@SuppressWarnings("unchecked")
void convertObjectToOptional() {
Method method = ClassUtils.getMethod(getClass(), "handleOptionalList", Optional.class);
MethodParameter parameter = new MethodParameter(method, 0);
TypeDescriptor descriptor = new TypeDescriptor(parameter);
Object actual = conversionService.convert("1,2,3", TypeDescriptor.valueOf(String.class), descriptor);
assertThat(((Optional<List<Integer>>) actual)).contains(List.of(1, 2, 3));
}
@Test
void convertNullToOptional() {
assertThat((Object) conversionService.convert(null, Optional.class)).isSameAs(Optional.empty());
assertThat(conversionService.convert(null, TypeDescriptor.valueOf(Object.class), rawOptionalType))
.isSameAs(Optional.empty());
}
@Test
void convertNullOptionalToNull() {
assertThat(conversionService.convert(null, rawOptionalType, TypeDescriptor.valueOf(Object.class))).isNull();
}
@Test // gh-34544
void convertEmptyOptionalToNull() {
Optional<Object> empty = Optional.empty();
assertThat(conversionService.convert(empty, Object.class)).isNull();
assertThat(conversionService.convert(empty, String.class)).isNull();
assertThat(conversionService.convert(empty, rawOptionalType, TypeDescriptor.valueOf(Object.class))).isNull();
assertThat(conversionService.convert(empty, rawOptionalType, TypeDescriptor.valueOf(String.class))).isNull();
assertThat(conversionService.convert(empty, rawOptionalType, TypeDescriptor.valueOf(Integer[].class))).isNull();
assertThat(conversionService.convert(empty, rawOptionalType, TypeDescriptor.valueOf(List.class))).isNull();
}
@Test
void convertEmptyOptionalToOptional() {
assertThat((Object) conversionService.convert(Optional.empty(), Optional.class)).isSameAs(Optional.empty());
assertThat(conversionService.convert(Optional.empty(), TypeDescriptor.valueOf(Object.class), rawOptionalType))
.isSameAs(Optional.empty());
}
@Test // gh-34544
@SuppressWarnings("unchecked")
void convertOptionalToOptionalWithoutConversionOfContainedObject() {
assertThat(conversionService.convert(Optional.of(42), Optional.class)).contains(42);
assertThat(conversionService.convert(Optional.of("enigma"), Optional.class)).contains("enigma");
assertThat((Optional<String>) conversionService.convert(Optional.of("enigma"), rawOptionalType, rawOptionalType))
.contains("enigma");
}
@Test // gh-34544
@SuppressWarnings("unchecked")
void convertOptionalToOptionalWithConversionOfContainedObject() {
TypeDescriptor integerOptionalType =
new TypeDescriptor(ResolvableType.forClassWithGenerics(Optional.class, Integer.class), null, null);
TypeDescriptor stringOptionalType =
new TypeDescriptor(ResolvableType.forClassWithGenerics(Optional.class, String.class), null, null);
assertThat((Optional<String>) conversionService.convert(Optional.of(42), integerOptionalType, stringOptionalType))
.contains("42");
}
@Test // gh-34544
@SuppressWarnings("unchecked")
void convertOptionalToObjectWithoutConversionOfContainedObject() {
assertThat(conversionService.convert(Optional.of("enigma"), String.class)).isEqualTo("enigma");
assertThat(conversionService.convert(Optional.of(42), Integer.class)).isEqualTo(42);
assertThat(conversionService.convert(Optional.of(new int[] {1, 2, 3}), int[].class)).containsExactly(1, 2, 3);
assertThat(conversionService.convert(Optional.of(new Integer[] {1, 2, 3}), Integer[].class)).containsExactly(1, 2, 3);
assertThat(conversionService.convert(Optional.of(List.of(1, 2, 3)), List.class)).containsExactly(1, 2, 3);
}
@Test // gh-34544
@SuppressWarnings("unchecked")
void convertOptionalToObjectWithConversionOfContainedObject() {
assertThat(conversionService.convert(Optional.of(42), String.class)).isEqualTo("42");
assertThat(conversionService.convert(Optional.of(3.14F), Double.class)).isCloseTo(3.14, byLessThan(0.001));
assertThat(conversionService.convert(Optional.of(new int[] {1, 2, 3}), Integer[].class)).containsExactly(1, 2, 3);
assertThat(conversionService.convert(Optional.of(List.of(1, 2, 3)), Set.class)).containsExactly(1, 2, 3);
}
@Test // gh-34544
@SuppressWarnings("unchecked")
void convertNestedOptionalsToObject() {
assertThat(conversionService.convert(Optional.of(Optional.of("unwrap me twice")), String.class))
.isEqualTo("unwrap me twice");
}
@Test // gh-34544
@SuppressWarnings("unchecked")
void convertOptionalToObjectViaTypeDescriptorForMethodParameter() {
Method method = ClassUtils.getMethod(getClass(), "handleList", List.class);
MethodParameter parameter = new MethodParameter(method, 0);
TypeDescriptor descriptor = new TypeDescriptor(parameter);
Optional<List<Integer>> source = Optional.of(List.of(1, 2, 3));
assertThat((List<Integer>) conversionService.convert(source, rawOptionalType, descriptor)).containsExactly(1, 2, 3);
}
public void handleList(List<Integer> value) {
}
public void handleOptionalList(Optional<List<Integer>> value) {
}
}
@Test // gh-35175
void convertDateToInstant() {
TypeDescriptor dateDescriptor = TypeDescriptor.valueOf(Date.class);
TypeDescriptor instantDescriptor = TypeDescriptor.valueOf(Instant.class);
Date date = new Date();
// Conversion performed by DateToInstantConverter.
assertThat(conversionService.convert(date, dateDescriptor, instantDescriptor))
.isEqualTo(date.toInstant());
}
@Test // gh-35175
void convertSqlDateToInstant() {
TypeDescriptor sqlDateDescriptor = TypeDescriptor.valueOf(java.sql.Date.class);
TypeDescriptor instantDescriptor = TypeDescriptor.valueOf(Instant.class);
java.sql.Date sqlDate = new java.sql.Date(System.currentTimeMillis());
// DateToInstantConverter blindly invokes toInstant() on any java.util.Date
// subtype, which results in an UnsupportedOperationException since
// java.sql.Date does not have a time component. However, even if
// DateToInstantConverter were not registered, ObjectToObjectConverter
// would still attempt to invoke toInstant() on a java.sql.Date by convention,
// which results in the same UnsupportedOperationException.
assertThatExceptionOfType(ConversionFailedException.class)
.isThrownBy(() -> conversionService.convert(sqlDate, sqlDateDescriptor, instantDescriptor))
.withCauseExactlyInstanceOf(UnsupportedOperationException.class);
}
@Test // gh-35175
void convertSqlTimeToInstant() {
TypeDescriptor timeDescriptor = TypeDescriptor.valueOf(Time.class);
TypeDescriptor instantDescriptor = TypeDescriptor.valueOf(Instant.class);
Time time = new Time(System.currentTimeMillis());
// DateToInstantConverter blindly invokes toInstant() on any java.util.Date
// subtype, which results in an UnsupportedOperationException since
// java.sql.Date does not have a time component. However, even if
// DateToInstantConverter were not registered, ObjectToObjectConverter
// would still attempt to invoke toInstant() on a java.sql.Date by convention,
// which results in the same UnsupportedOperationException.
assertThatExceptionOfType(ConversionFailedException.class)
.isThrownBy(() -> conversionService.convert(time, timeDescriptor, instantDescriptor))
.withCauseExactlyInstanceOf(UnsupportedOperationException.class);
}
@Test // gh-35175
void convertSqlTimestampToInstant() {
TypeDescriptor timestampDescriptor = TypeDescriptor.valueOf(Timestamp.class);
TypeDescriptor instantDescriptor = TypeDescriptor.valueOf(Instant.class);
Timestamp timestamp = new Timestamp(System.currentTimeMillis());
// Conversion performed by DateToInstantConverter.
assertThat(conversionService.convert(timestamp, timestampDescriptor, instantDescriptor))
.isEqualTo(timestamp.toInstant());
}
@Test // gh-35175
void convertInstantToDate() {
TypeDescriptor instantDescriptor = TypeDescriptor.valueOf(Instant.class);
TypeDescriptor dateDescriptor = TypeDescriptor.valueOf(Date.class);
Date date = new Date();
Instant instant = date.toInstant();
// Conversion performed by InstantToDateConverter.
assertThat(conversionService.convert(instant, instantDescriptor, dateDescriptor))
.isExactlyInstanceOf(Date.class)
.isEqualTo(date);
}
@Test
void convertInstantToSqlTimestamp() {
TypeDescriptor instantDescriptor = TypeDescriptor.valueOf(Instant.class);
TypeDescriptor timestampDescriptor = TypeDescriptor.valueOf(Timestamp.class);
Timestamp timestamp = new Timestamp(System.currentTimeMillis());
Instant instant = timestamp.toInstant();
// Conversion performed by ObjectToObjectConverter.
assertThat(conversionService.convert(instant, instantDescriptor, timestampDescriptor))
.isExactlyInstanceOf(Timestamp.class)
.isEqualTo(timestamp);
}
// test fields and helpers
public List<Integer> genericList = new ArrayList<>();
public Stream<Integer> genericStream;
public Map<Integer, Foo> genericMap = new HashMap<>();
public EnumSet<Foo> enumSet;
public Object assignableTarget;
public void handlerMethod(List<Color> color) {
}
public | OptionalConversionTests |
java | apache__flink | flink-table/flink-table-code-splitter/src/main/java/org/apache/flink/table/codesplit/DeclarationRewriter.java | {
"start": 1854,
"end": 2212
} | class ____ {
* int a;
* int local$0;
* int local$1;
* public void myFun1(int[] arr) {
* a = 1;
* for (int b : arr) {
* local$0 = b;
* System.out.println(local$0);
* }
* }
* public void myFun2() {
* local$1 = 2;
* }
* }
* </code></pre>
*/
@Internal
public | Example |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/datageneration/datasource/DefaultWrappersHandler.java | {
"start": 795,
"end": 4985
} | class ____ implements DataSourceHandler {
@Override
public DataSourceResponse.NullWrapper handle(DataSourceRequest.NullWrapper ignored) {
return new DataSourceResponse.NullWrapper(injectNulls());
}
@Override
public DataSourceResponse.ArrayWrapper handle(DataSourceRequest.ArrayWrapper ignored) {
return new DataSourceResponse.ArrayWrapper(wrapInArray());
}
@Override
public DataSourceResponse.RepeatingWrapper handle(DataSourceRequest.RepeatingWrapper ignored) {
return new DataSourceResponse.RepeatingWrapper(repeatValues());
}
@Override
public DataSourceResponse.MalformedWrapper handle(DataSourceRequest.MalformedWrapper request) {
return new DataSourceResponse.MalformedWrapper(injectMalformed(request.malformedValues()));
}
@Override
public DataSourceResponse.TransformWrapper handle(DataSourceRequest.TransformWrapper request) {
return new DataSourceResponse.TransformWrapper(transform(request.transformedProportion(), request.transformation()));
}
@Override
public DataSourceResponse.TransformWeightedWrapper handle(DataSourceRequest.TransformWeightedWrapper<?> request) {
return new DataSourceResponse.TransformWeightedWrapper(transformWeighted(request.transformations()));
}
private static Function<Supplier<Object>, Supplier<Object>> injectNulls() {
// Inject some nulls but majority of data should be non-null (as it likely is in reality).
return transform(0.05, ignored -> null);
}
private static Function<Supplier<Object>, Supplier<Object>> wrapInArray() {
return (values) -> () -> {
if (ESTestCase.randomBoolean()) {
var size = ESTestCase.randomIntBetween(0, 5);
return IntStream.range(0, size).mapToObj((i) -> values.get()).toList();
}
return values.get();
};
}
private static Function<Supplier<Object>, Supplier<Object>> repeatValues() {
return (values) -> {
HashSet<Object> previousValues = new HashSet<>();
return () -> {
if (previousValues.size() > 0 && ESTestCase.randomBoolean()) {
return ESTestCase.randomFrom(previousValues);
} else {
var value = values.get();
previousValues.add(value);
return value;
}
};
};
}
private static Function<Supplier<Object>, Supplier<Object>> injectMalformed(Supplier<Object> malformedValues) {
return transform(0.1, ignored -> malformedValues.get());
}
private static Function<Supplier<Object>, Supplier<Object>> transform(
double transformedProportion,
Function<Object, Object> transformation
) {
return (values) -> () -> ESTestCase.randomDouble() <= transformedProportion ? transformation.apply(values.get()) : values.get();
}
@SuppressWarnings("unchecked")
public static <T> Function<Supplier<Object>, Supplier<Object>> transformWeighted(
List<Tuple<Double, Function<T, Object>>> transformations
) {
double totalWeight = transformations.stream().mapToDouble(Tuple::v1).sum();
if (totalWeight != 1.0) {
throw new IllegalArgumentException("Sum of weights must be equal to 1");
}
List<Tuple<Double, Double>> lookup = new ArrayList<>();
Double leftBound = 0d;
for (var tuple : transformations) {
lookup.add(Tuple.tuple(leftBound, leftBound + tuple.v1()));
leftBound += tuple.v1();
}
return values -> {
var roll = ESTestCase.randomDouble();
for (int i = 0; i < lookup.size(); i++) {
var bounds = lookup.get(i);
if (roll >= bounds.v1() && roll <= bounds.v2()) {
var transformation = transformations.get(i).v2();
return () -> transformation.apply((T) values.get());
}
}
assert false : "Should not get here if weights add up to 1";
return null;
};
}
}
| DefaultWrappersHandler |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/aot/samples/basic/AbstractSpringJupiterParameterizedClassTests.java | {
"start": 1866,
"end": 1937
} | class ____ {
@Nested
public | AbstractSpringJupiterParameterizedClassTests |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java | {
"start": 9454,
"end": 29906
} | class ____
* (eg. org.apache.hadoop.mapred.InterTrackerProtocol)
*/
public String getInterfaceName() {
return interfaceName;
}
/**
* @return Get the client's preferred version.
*/
public long getClientVersion() {
return clientVersion;
}
/**
* @return Get the server's agreed to version.
*/
public long getServerVersion() {
return serverVersion;
}
/**
* get the rpc status corresponding to this exception
*/
public RpcStatusProto getRpcStatusProto() {
return RpcStatusProto.ERROR;
}
/**
* get the detailed rpc status corresponding to this exception
*/
public RpcErrorCodeProto getRpcErrorCodeProto() {
return RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH;
}
}
/**
* Get a proxy connection to a remote server.
*
* @param <T> Generics Type T.
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @return the proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> T waitForProxy(
Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
Configuration conf
) throws IOException {
return waitForProtocolProxy(protocol, clientVersion, addr, conf).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server.
*
* @param <T> Generics Type T.
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @return the protocol proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> ProtocolProxy<T> waitForProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
Configuration conf) throws IOException {
return waitForProtocolProxy(
protocol, clientVersion, addr, conf, Long.MAX_VALUE);
}
/**
* Get a proxy connection to a remote server.
*
* @param <T> Generics Type T.
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @param connTimeout time in milliseconds before giving up
* @return the proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> T waitForProxy(Class<T> protocol, long clientVersion,
InetSocketAddress addr, Configuration conf,
long connTimeout) throws IOException {
return waitForProtocolProxy(protocol, clientVersion, addr,
conf, connTimeout).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server
*
* @param <T> Generics Type T.
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @param connTimeout time in milliseconds before giving up
* @return the protocol proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> ProtocolProxy<T> waitForProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
long connTimeout) throws IOException {
return waitForProtocolProxy(protocol, clientVersion, addr, conf,
getRpcTimeout(conf), null, connTimeout);
}
/**
* Get a proxy connection to a remote server.
*
* @param <T> Generics Type T.
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @param rpcTimeout timeout for each RPC
* @param timeout time in milliseconds before giving up
* @return the proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> T waitForProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
int rpcTimeout,
long timeout) throws IOException {
return waitForProtocolProxy(protocol, clientVersion, addr,
conf, rpcTimeout, null, timeout).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server.
*
* @param <T> Generics Type.
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @param rpcTimeout timeout for each RPC
* @param connectionRetryPolicy input connectionRetryPolicy.
* @param timeout time in milliseconds before giving up
* @return the proxy
* @throws IOException if the far end through a RemoteException.
*/
public static <T> ProtocolProxy<T> waitForProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
int rpcTimeout,
RetryPolicy connectionRetryPolicy,
long timeout) throws IOException {
long startTime = Time.now();
IOException ioe;
while (true) {
try {
return getProtocolProxy(protocol, clientVersion, addr,
UserGroupInformation.getCurrentUser(), conf, NetUtils
.getDefaultSocketFactory(conf), rpcTimeout, connectionRetryPolicy);
} catch(ConnectException se) { // namenode has not been started
LOG.info("Server at " + addr + " not available yet, Zzzzz...");
ioe = se;
} catch(SocketTimeoutException te) { // namenode is busy
LOG.info("Problem connecting to server: " + addr);
ioe = te;
} catch(NoRouteToHostException nrthe) { // perhaps a VIP is failing over
LOG.info("No route to host for server: " + addr);
ioe = nrthe;
}
// check if timed out
if (Time.now()-timeout >= startTime) {
throw ioe;
}
if (Thread.currentThread().isInterrupted()) {
// interrupted during some IO; this may not have been caught
throw new InterruptedIOException("Interrupted waiting for the proxy");
}
// wait for retry
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw (IOException) new InterruptedIOException(
"Interrupted waiting for the proxy").initCause(ioe);
}
}
}
/**
* Construct a client-side proxy object that implements the named protocol,
* talking to a server at the named address.
* @param <T> Generics Type T.
* @param protocol input protocol.
* @param clientVersion input clientVersion.
* @param addr input addr.
* @param conf input Configuration.
* @param factory input factory.
* @throws IOException raised on errors performing I/O.
* @return proxy.
*/
public static <T> T getProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
SocketFactory factory) throws IOException {
return getProtocolProxy(
protocol, clientVersion, addr, conf, factory).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server.
*
* @param <T> Generics Type T.
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @param factory socket factory
* @return the protocol proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
SocketFactory factory) throws IOException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
return getProtocolProxy(protocol, clientVersion, addr, ugi, conf, factory);
}
/**
* Construct a client-side proxy object that implements the named protocol,
* talking to a server at the named address.
*
* @param <T> Generics Type T.
* @param protocol input protocol.
* @param clientVersion input clientVersion.
* @param addr input addr.
* @param ticket input tocket.
* @param conf input conf.
* @param factory input factory.
* @return the protocol proxy.
* @throws IOException raised on errors performing I/O.
*
*/
public static <T> T getProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory) throws IOException {
return getProtocolProxy(
protocol, clientVersion, addr, ticket, conf, factory).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server
*
* @param <T> Generics Type T.
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param ticket user group information
* @param conf configuration to use
* @param factory socket factory
* @return the protocol proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory) throws IOException {
return getProtocolProxy(protocol, clientVersion, addr, ticket, conf,
factory, getRpcTimeout(conf), null);
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server.
*
* @param <T> Generics Type T
* @param protocol protocol class
* @param clientVersion client's version
* @param connId client connection identifier
* @param conf configuration
* @param factory socket factory
* @return the protocol proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion, ConnectionId connId, Configuration conf,
SocketFactory factory) throws IOException {
return getProtocolProxy(protocol, clientVersion, connId, conf,
factory, null);
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server.
*
* @param <T> Generics Type T
* @param protocol protocol class
* @param clientVersion client's version
* @param connId client connection identifier
* @param conf configuration
* @param factory socket factory
* @param alignmentContext StateID alignment context
* @return the protocol proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion, ConnectionId connId, Configuration conf,
SocketFactory factory, AlignmentContext alignmentContext) throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
SaslRpcServer.init(conf);
}
return getProtocolEngine(protocol, conf).getProxy(
protocol, clientVersion, connId, conf, factory, alignmentContext);
}
/**
* Construct a client-side proxy that implements the named protocol,
* talking to a server at the named address.
*
* @param <T> Generics Type T.
* @param protocol protocol
* @param clientVersion client's version
* @param addr server address
* @param ticket security ticket
* @param conf configuration
* @param factory socket factory
* @param rpcTimeout max time for each rpc; 0 means no timeout
* @return the proxy
* @throws IOException if any error occurs
*/
public static <T> T getProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory,
int rpcTimeout) throws IOException {
return getProtocolProxy(protocol, clientVersion, addr, ticket,
conf, factory, rpcTimeout, null).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server.
*
* @param <T> Generics Type T.
* @param protocol protocol
* @param clientVersion client's version
* @param addr server address
* @param ticket security ticket
* @param conf configuration
* @param factory socket factory
* @param rpcTimeout max time for each rpc; 0 means no timeout
* @param connectionRetryPolicy retry policy
* @return the proxy
* @throws IOException if any error occurs
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory,
int rpcTimeout,
RetryPolicy connectionRetryPolicy) throws IOException {
return getProtocolProxy(protocol, clientVersion, addr, ticket,
conf, factory, rpcTimeout, connectionRetryPolicy, null);
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server.
*
* @param <T> Generics Type T.
* @param protocol protocol
* @param clientVersion client's version
* @param addr server address
* @param ticket security ticket
* @param conf configuration
* @param factory socket factory
* @param rpcTimeout max time for each rpc; 0 means no timeout
* @param connectionRetryPolicy retry policy
* @param fallbackToSimpleAuth set to true or false during calls to indicate if
* a secure client falls back to simple auth
* @return the proxy
* @throws IOException if any error occurs
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory,
int rpcTimeout,
RetryPolicy connectionRetryPolicy,
AtomicBoolean fallbackToSimpleAuth)
throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
SaslRpcServer.init(conf);
}
return getProtocolEngine(protocol, conf).getProxy(protocol, clientVersion,
addr, ticket, conf, factory, rpcTimeout, connectionRetryPolicy,
fallbackToSimpleAuth, null);
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server.
*
* @param protocol protocol
* @param clientVersion client's version
* @param addr server address
* @param ticket security ticket
* @param conf configuration
* @param factory socket factory
* @param rpcTimeout max time for each rpc; 0 means no timeout
* @param connectionRetryPolicy retry policy
* @param fallbackToSimpleAuth set to true or false during calls to indicate
* if a secure client falls back to simple auth
* @param alignmentContext state alignment context
* @param <T> Generics Type T.
* @return the proxy
* @throws IOException if any error occurs
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory,
int rpcTimeout,
RetryPolicy connectionRetryPolicy,
AtomicBoolean fallbackToSimpleAuth,
AlignmentContext alignmentContext)
throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
SaslRpcServer.init(conf);
}
return getProtocolEngine(protocol, conf).getProxy(protocol, clientVersion,
addr, ticket, conf, factory, rpcTimeout, connectionRetryPolicy,
fallbackToSimpleAuth, alignmentContext);
}
/**
* Construct a client-side proxy object with the default SocketFactory.
*
* @param <T> Generics Type T.
* @param protocol input protocol.
* @param clientVersion input clientVersion.
* @param addr input addr.
* @param conf input Configuration.
* @return a proxy instance
* @throws IOException if the thread is interrupted.
*/
public static <T> T getProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf)
throws IOException {
return getProtocolProxy(protocol, clientVersion, addr, conf).getProxy();
}
/**
* @return Returns the server address for a given proxy.
* @param proxy input proxy.
*/
public static InetSocketAddress getServerAddress(Object proxy) {
return getConnectionIdForProxy(proxy).getAddress();
}
/**
* Return the connection ID of the given object. If the provided object is in
* fact a protocol translator, we'll get the connection ID of the underlying
* proxy object.
*
* @param proxy the proxy object to get the connection ID of.
* @return the connection ID for the provided proxy object.
*/
public static ConnectionId getConnectionIdForProxy(Object proxy) {
if (proxy instanceof ProtocolTranslator) {
proxy = ((ProtocolTranslator)proxy).getUnderlyingProxyObject();
}
RpcInvocationHandler inv = (RpcInvocationHandler) Proxy
.getInvocationHandler(proxy);
return inv.getConnectionId();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server
*
* @param protocol input protocol.
* @param clientVersion input clientVersion.
* @param addr input addr.
* @param conf input configuration.
* @param <T> Generics Type T.
* @return a protocol proxy
* @throws IOException if the thread is interrupted.
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf)
throws IOException {
return getProtocolProxy(protocol, clientVersion, addr, conf, NetUtils
.getDefaultSocketFactory(conf));
}
/**
* Stop the proxy. Proxy must either implement {@link Closeable} or must have
* associated {@link RpcInvocationHandler}.
*
* @param proxy
* the RPC proxy object to be stopped
* @throws HadoopIllegalArgumentException
* if the proxy does not implement {@link Closeable} | name |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/StringUtils.java | {
"start": 393421,
"end": 393735
} | class ____ be used as {@code StringUtils.trim(" foo ");}.
*
* <p>
* This constructor is public to permit tools that require a JavaBean instance to operate.
* </p>
*
* @deprecated TODO Make private in 4.0.
*/
@Deprecated
public StringUtils() {
// empty
}
}
| should |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/PatternMatchingInstanceofTest.java | {
"start": 10014,
"end": 10433
} | class ____ {
void test(Object o) {
if (o instanceof Long l) {
test(l);
test(l.hashCode());
}
}
}
""")
.doTest();
}
@Test
public void rawType_findingAvoided() {
helper
.addInputLines(
"Test.java",
"""
import java.util.List;
| Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/derivedidentities/e3/a/DependentId.java | {
"start": 234,
"end": 413
} | class ____ implements Serializable {
String name; // matches name of @Id attribute
EmployeeId emp; // matches name of @Id attribute and type of embedded id of Employee
}
| DependentId |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/RequiredModifiersCheckerTest.java | {
"start": 5403,
"end": 5936
} | class ____ {
// BUG: Diagnostic contains: The annotation '@PublicAndFinalRequired' has specified that it must
// be used together with the following modifiers: [public]
@PublicAndFinalRequired
final void foo() {}
}
""")
.doTest();
}
@Test
public void hasRequiredModifiersSucceeds() {
compilationHelper
.addSourceLines(
"test/RequiredModifiersTestCase.java",
"""
package test;
import test.AbstractRequired;
abstract | RequiredModifiersTestCase |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/legacy/Custom.java | {
"start": 141,
"end": 453
} | class ____ implements Cloneable {
String id;
private String name;
public Object clone() {
try {
return super.clone();
}
catch (CloneNotSupportedException cnse) {
throw new RuntimeException();
}
}
void setName(String name) {
this.name = name;
}
String getName() {
return name;
}
}
| Custom |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableConcatMapSchedulerTest.java | {
"start": 38147,
"end": 39046
} | class ____ extends Observable<Object>
implements Supplier<Object> {
final TestObserver<Object> to;
EmptyDisposingObservable(TestObserver<Object> to) {
this.to = to;
}
@Override
protected void subscribeActual(@NonNull Observer<? super @NonNull Object> observer) {
EmptyDisposable.complete(observer);
}
@Override
public @NonNull Object get() throws Throwable {
to.dispose();
return null;
}
}
@Test
public void scalarInnerEmptyDisposeDelayError() {
TestObserver<Object> to = new TestObserver<>();
Observable.just(1)
.hide()
.concatMapDelayError(v -> new EmptyDisposingObservable(to),
true, 2, ImmediateThinScheduler.INSTANCE
)
.subscribe(to);
to.assertEmpty();
}
}
| EmptyDisposingObservable |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/graph/util/ChainedSourceInfo.java | {
"start": 974,
"end": 1069
} | class ____ help maintain the chained source info of an operator chain. */
@Internal
public final | to |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/jdk/TypedArraySerTest.java | {
"start": 1909,
"end": 1966
} | class ____ {
public int x = 0;
}
static | Bean |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/http/server/DefaultRequestPathTests.java | {
"start": 844,
"end": 2130
} | class ____ {
@Test
void parse() {
// basic
testParse("/app/a/b/c", "/app", "/a/b/c");
// no context path
testParse("/a/b/c", "", "/a/b/c");
// context path only
testParse("/a/b", "/a/b", "");
// root path
testParse("/", "", "/");
// empty path
testParse("", "", "");
testParse("", "/", "");
// trailing slash
testParse("/app/a/", "/app", "/a/");
testParse("/app/a//", "/app", "/a//");
}
private void testParse(String fullPath, String contextPath, String pathWithinApplication) {
RequestPath requestPath = RequestPath.parse(fullPath, contextPath);
Object expected = contextPath.equals("/") ? "" : contextPath;
assertThat(requestPath.contextPath().value()).isEqualTo(expected);
assertThat(requestPath.pathWithinApplication().value()).isEqualTo(pathWithinApplication);
}
@Test
void modifyContextPath() {
RequestPath requestPath = RequestPath.parse("/aA/bB/cC", null);
assertThat(requestPath.contextPath().value()).isEmpty();
assertThat(requestPath.pathWithinApplication().value()).isEqualTo("/aA/bB/cC");
requestPath = requestPath.modifyContextPath("/aA");
assertThat(requestPath.contextPath().value()).isEqualTo("/aA");
assertThat(requestPath.pathWithinApplication().value()).isEqualTo("/bB/cC");
}
}
| DefaultRequestPathTests |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/multipart/MultipartFormDataInput.java | {
"start": 294,
"end": 387
} | interface ____ {
Map<String, Collection<FormValue>> getValues();
| MultipartFormDataInput |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/query/NativeQueryResultTypeAutoDiscoveryTest.java | {
"start": 21351,
"end": 21683
} | class ____ extends TestedEntity<byte[]> {
/**
* The custom type sets the SQL type to {@link Types#BINARY}
* instead of the default {@link Types#VARBINARY}.
*/
@JdbcTypeCode( Types.BINARY )
public byte[] getTestedProperty() {
return testedProperty;
}
}
@Entity(name = "varbinaryEntity")
public static | BinaryEntity |
java | elastic__elasticsearch | libs/core/src/main/java/org/elasticsearch/core/internal/provider/EmbeddedModulePath.java | {
"start": 5686,
"end": 9588
} | class ____ and services.
static ScanResult scan(Path path) throws IOException {
try (var stream = Files.walk(path)) {
Map<Boolean, Set<String>> map = stream.filter(p -> Files.isDirectory(p) == false)
.map(p -> path.relativize(p).toString())
.filter(p -> (p.endsWith(".class") ^ p.startsWith(SERVICES_PREFIX)))
.collect(Collectors.partitioningBy(e -> e.startsWith(SERVICES_PREFIX), Collectors.toSet()));
return new ScanResult(map.get(Boolean.FALSE), map.get(Boolean.TRUE));
}
}
private static final Pattern DASH_VERSION = Pattern.compile("-(\\d+(\\.|$))");
// Determines the module version (of an automatic module), given the jar file name. As per,
// https://docs.oracle.com/en/java/javase/18/docs/api/java.base/java/lang/module/ModuleFinder.html#of(java.nio.file.Path...)
static Optional<ModuleDescriptor.Version> version(String jarName) {
if (jarName.endsWith(".jar") == false) {
throw new IllegalArgumentException("unexpected jar name: " + jarName);
}
// drop ".jar"
String name = jarName.substring(0, jarName.length() - 4);
// find first occurrence of -${NUMBER}. or -${NUMBER}$
Matcher matcher = DASH_VERSION.matcher(name);
if (matcher.find()) {
int start = matcher.start();
try {
String tail = name.substring(start + 1);
return Optional.of(ModuleDescriptor.Version.parse(tail));
} catch (IllegalArgumentException ignore) {}
}
return Optional.empty();
}
// Parses a set of given service files, and returns a map of service name to list of provider
// classes.
static Map<String, List<String>> services(Set<String> serviceFiles, Path path) throws IOException {
// map names of service configuration files to service names
Set<String> serviceNames = serviceFiles.stream()
.map(EmbeddedModulePath::toServiceName)
.flatMap(Optional::stream)
.collect(Collectors.toSet());
Map<String, List<String>> map = new HashMap<>();
// parse each service configuration file
for (String sn : serviceNames) {
Path se = path.resolve(SERVICES_PREFIX + sn);
List<String> providerClasses = Files.readAllLines(se)
.stream()
.map(EmbeddedModulePath::dropCommentAndTrim)
.filter(Predicate.not(String::isEmpty))
.toList();
if (providerClasses.isEmpty() == false) {
map.put(sn, providerClasses);
}
}
return map;
}
// Drops comments and trims the given string.
private static String dropCommentAndTrim(String line) {
int ci = line.indexOf('#');
if (ci >= 0) {
line = line.substring(0, ci);
}
return line.trim();
}
// Returns an optional containing the package name from a given path and separator, or an
// empty optional if none.
static Optional<String> toPackageName(Path file, String separator) {
Path parent = file.getParent();
if (parent == null) {
String name = file.toString();
if (name.endsWith(".class") && name.equals(MODULE_INFO) == false) {
String msg = name + " found in top-level directory (unnamed package not allowed in module)";
throw new InvalidModuleDescriptorException(msg);
}
return Optional.empty();
}
String pn = parent.toString().replace(separator, ".");
if (isPackageName(pn)) {
return Optional.of(pn);
} else {
// not a valid package name
return Optional.empty();
}
}
// Returns an optional containing the package name from a given binary | files |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestResourcePBImpl.java | {
"start": 1810,
"end": 1880
} | class ____ handle various proto related tests for resources.
*/
public | to |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/enricher/EnricherAggregateOnExceptionTest.java | {
"start": 1313,
"end": 3271
} | class ____ extends ContextTestSupport {
@Test
public void testEnrichTrueOk() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
template.sendBody("direct:start", "World");
assertMockEndpointsSatisfied();
}
@Test
public void testEnrichTrueKaboom() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("I cannot do this");
template.sendBody("direct:start", "Kaboom");
assertMockEndpointsSatisfied();
}
@Test
public void testEnrichFalseOk() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
template.sendBody("direct:start2", "World");
assertMockEndpointsSatisfied();
}
@Test
public void testEnrichFalseKaboom() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(0);
try {
template.sendBody("direct:start2", "Kaboom");
fail("Should have thrown exception");
} catch (CamelExecutionException e) {
assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertEquals("I cannot do this", e.getCause().getMessage());
}
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").enrich("direct:foo", new MyAggregationStrategy(), true).to("mock:result");
from("direct:start2").enrich("direct:foo", new MyAggregationStrategy(), false).to("mock:result");
from("direct:foo").process(new MyProcessor());
}
};
}
public static | EnricherAggregateOnExceptionTest |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/beanbuilder/ApplyAopToTypeVisitor.java | {
"start": 336,
"end": 1140
} | class ____ implements TypeElementVisitor<Object, Object> {
@Override
public void visitClass(ClassElement element, VisitorContext context) {
if (element.getSimpleName().equals("Test")) {
AnnotationValue<Annotation> av = AnnotationValue
.builder("aopbuilder.Mutating")
.value("name")
.build();
context.getClassElement(ApplyAopToMe.class)
.ifPresent((applyAopToMe) -> element
.addAssociatedBean(applyAopToMe)
.intercept(av)
.inject()
);
}
}
@Override
public @NonNull VisitorKind getVisitorKind() {
return VisitorKind.ISOLATING;
}
}
| ApplyAopToTypeVisitor |
java | spring-projects__spring-security | access/src/main/java/org/springframework/security/access/intercept/aspectj/AspectJCallback.java | {
"start": 969,
"end": 1124
} | class ____ be removed from the public API. Please either use
* `spring-security-aspects`, Spring Security's method security support or create your own
* | will |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1300/Issue1341.java | {
"start": 3866,
"end": 5571
} | class ____ {
@GET
@Path("{id}")
@Produces({"application/javascript", "application/json"})
@Consumes({"application/javascript", "application/json"})
@JSONP(queryParam = "callback")
public Book getBookById(@PathParam("id") Long id) {
Book book = new Book();
book.setBookId(2);
book.setBookName("Python源码剖析");
book.setPublisher("电子工业出版社");
book.setPublishTime(new Date());
book.setIsbn("911122");
return book;
}
}
@Override
protected void configureClient(ClientConfig config) {
config.register(new FastJsonFeature()).register(FastJsonProvider.class);
}
@Override
protected Application configure() {
enable(TestProperties.LOG_TRAFFIC);
enable(TestProperties.DUMP_ENTITY);
ResourceConfig config = new ResourceConfig();
FastJsonProvider fastJsonProvider = new FastJsonProvider();
FastJsonConfig fastJsonConfig = new FastJsonConfig();
fastJsonConfig.setSerializerFeatures(SerializerFeature.DisableCircularReferenceDetect, SerializerFeature.BrowserSecure);
fastJsonProvider.setFastJsonConfig(fastJsonConfig);
config.register(fastJsonProvider);
config.packages("com.alibaba.json.bvt.issue_1300");
return config;
}
@Test
public void test() {
final String reponse = target("book1341").path("123").request().accept("application/javascript").get(String.class);
System.out.println(reponse);
Assert.assertTrue(reponse.indexOf("Python源码剖析") > 0);
Assert.assertTrue(reponse.indexOf("电子工业出版社") > 0);
}
}
| BookRestFul |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/binding/MapperMethodParamTest.java | {
"start": 2830,
"end": 3235
} | interface ____ {
@Insert("insert into param_test (id, size) values(#{id}, #{size})")
void insert(@Param("id") String id, @Param("size") long size);
@Insert("insert into param_test (id, size) values(#{id}, #{size})")
void insertUsingHashMap(HashMap<String, Object> params);
@Select("select size from param_test where id = #{id}")
long selectSize(@Param("id") String id);
}
}
| Mapper |
java | apache__avro | lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroMultipleOutputs.java | {
"start": 3229,
"end": 3850
} | class ____ extends
* Reducer<K, V, T, NullWritable> {
* private MultipleOutputs amos;
*
*
* public void setup(Context context) {
* ...
* amos = new AvroMultipleOutputs(context);
* }
*
* public void reduce(K, Iterator<V> values,Context context)
* throws IOException {
* ...
* amos.write("avro1",datum,NullWritable.get());
* amos.write("avro2",datum,NullWritable.get());
* amos.getCollector("avro3",datum); // here the value is taken as NullWritable
* ...
* }
*
* public void cleanup(Context context) throws IOException {
* amos.close();
* ...
* }
*
* }
* </pre>
*/
public | MyAvroReducer |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/bugs/ConcurrentModificationExceptionOnMultiThreadedVerificationTest.java | {
"start": 1899,
"end": 2443
} | class ____ implements Callable<Object> {
private final int seq;
TargetInvoker(int seq) {
this.seq = seq;
}
public Object call() throws Exception {
System.err.println("started " + seq);
for (int i = 0; i < TIMES; i++) {
Thread.yield();
target.targetMethod("arg");
Thread.sleep((long) INTERVAL_MILLIS);
}
System.err.println("finished" + seq);
return seq;
}
}
public | TargetInvoker |
java | apache__camel | components/camel-coap/src/generated/java/org/apache/camel/coap/CoAPComponentConfigurer.java | {
"start": 721,
"end": 3006
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
CoAPComponent target = (CoAPComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "configurationfile":
case "configurationFile": target.setConfigurationFile(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "configurationfile":
case "configurationFile": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
CoAPComponent target = (CoAPComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "configurationfile":
case "configurationFile": return target.getConfigurationFile();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| CoAPComponentConfigurer |
java | jhy__jsoup | src/main/java/org/jsoup/nodes/Printer.java | {
"start": 284,
"end": 1781
} | class ____ implements NodeVisitor {
final Node root;
final QuietAppendable accum;
final OutputSettings settings;
Printer(Node root, QuietAppendable accum, OutputSettings settings) {
this.root = root;
this.accum = accum;
this.settings = settings;
}
void addHead(Element el, int depth) {
el.outerHtmlHead(accum, settings);
}
void addTail(Element el, int depth) {
el.outerHtmlTail(accum, settings);
}
void addText(TextNode textNode, int textOptions, int depth) {
int options = Entities.ForText | textOptions;
Entities.escape(accum, textNode.coreValue(), settings, options);
}
void addNode(LeafNode node, int depth) {
node.outerHtmlHead(accum, settings);
}
void indent(int depth) {
accum.append('\n').append(StringUtil.padding(depth * settings.indentAmount(), settings.maxPaddingWidth()));
}
@Override
public void head(Node node, int depth) {
if (node.getClass() == TextNode.class) addText((TextNode) node, 0, depth); // Excludes CData; falls to addNode
else if (node instanceof Element) addHead((Element) node, depth);
else addNode((LeafNode) node, depth);
}
@Override
public void tail(Node node, int depth) {
if (node instanceof Element) { // otherwise a LeafNode
addTail((Element) node, depth);
}
}
/** Pretty Printer */
static | Printer |
java | quarkusio__quarkus | extensions/narayana-jta/runtime/src/main/java/io/quarkus/narayana/jta/QuarkusTransaction.java | {
"start": 416,
"end": 1501
} | class ____ designed to be easier to use. The main features it offers over {@code UserTransaction} are:
*
* <ul>
* <li><b>No Checked Exceptions: </b>All underlying checked exceptions are wrapped in an unchecked
* {@link QuarkusTransactionException}.</li>
* <li><b>No Transaction Leaks: </b>Transactions are tied to the request scope, if the scope is destroyed before the transaction
* is committed the transaction is rolled back. Note that this means this can only currently be used when the request scope is
* active.</li>
* <li><b>Per Transaction Timeouts:</b>
* {{@link BeginOptions#timeout(int)}/{@link TransactionRunnerOptions#timeout(int)}
* can be used to set the new transactions timeout, without affecting the per thread default.</li>
* <li><b>Lambda Style Transactions: </b> {@link Runnable} and {@link Callable} instances can be run inside the scope of a new
* transaction.</li>
* </ul>
* <p>
* Note that any checked exception will be wrapped by a {@link QuarkusTransactionException}, while unchecked exceptions are
* allowed to propagate unchanged.
*/
public | is |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/diagnostics/analyzer/BeanNotOfRequiredTypeFailureAnalyzerTests.java | {
"start": 1509,
"end": 2975
} | class ____ {
private final FailureAnalyzer analyzer = new BeanNotOfRequiredTypeFailureAnalyzer();
@Test
void jdkProxyCausesInjectionFailure() {
FailureAnalysis analysis = performAnalysis(JdkProxyConfiguration.class);
assertThat(analysis.getDescription()).startsWith("The bean 'asyncBean'");
assertThat(analysis.getDescription())
.containsPattern("The bean is of type '" + AsyncBean.class.getPackage().getName() + ".\\$Proxy.*'");
assertThat(analysis.getDescription())
.contains(String.format("and implements:%n\t") + SomeInterface.class.getName());
assertThat(analysis.getDescription()).contains("Expected a bean of type '" + AsyncBean.class.getName() + "'");
assertThat(analysis.getDescription())
.contains(String.format("which implements:%n\t") + SomeInterface.class.getName());
}
private FailureAnalysis performAnalysis(Class<?> configuration) {
FailureAnalysis analysis = this.analyzer.analyze(createFailure(configuration));
assertThat(analysis).isNotNull();
return analysis;
}
private Exception createFailure(Class<?> configuration) {
try (ConfigurableApplicationContext context = new AnnotationConfigApplicationContext(configuration)) {
fail("Expected failure did not occur");
throw new AssertionError("Should not be reached");
}
catch (Exception ex) {
return ex;
}
}
@Configuration(proxyBeanMethods = false)
@EnableAsync
@Import(UserConfiguration.class)
static | BeanNotOfRequiredTypeFailureAnalyzerTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/blockloader/docvalues/fn/MvMinBytesRefsFromOrdsBlockLoader.java | {
"start": 1604,
"end": 3831
} | class ____ extends BlockDocValuesReader {
private final SortedSetDocValues ordinals;
MvMinSortedSet(SortedSetDocValues ordinals) {
this.ordinals = ordinals;
}
@Override
public Block read(BlockFactory factory, Docs docs, int offset, boolean nullsFiltered) throws IOException {
if (docs.count() - offset == 1) {
return readSingleDoc(factory, docs.get(offset));
}
try (var builder = factory.sortedSetOrdinalsBuilder(ordinals, docs.count() - offset)) {
for (int i = offset; i < docs.count(); i++) {
int doc = docs.get(i);
if (doc < ordinals.docID()) {
throw new IllegalStateException("docs within same block must be in order");
}
if (ordinals.advanceExact(doc) == false) {
builder.appendNull();
continue;
}
builder.appendOrd(Math.toIntExact(ordinals.nextOrd()));
}
return builder.build();
}
}
@Override
public void read(int docId, StoredFields storedFields, Builder builder) throws IOException {
read(docId, (BytesRefBuilder) builder);
}
private Block readSingleDoc(BlockFactory factory, int docId) throws IOException {
if (ordinals.advanceExact(docId) == false) {
return factory.constantNulls(1);
}
BytesRef v = ordinals.lookupOrd(ordinals.nextOrd());
return factory.constantBytes(BytesRef.deepCopyOf(v), 1);
}
private void read(int docId, BytesRefBuilder builder) throws IOException {
if (false == ordinals.advanceExact(docId)) {
builder.appendNull();
return;
}
builder.appendBytesRef(ordinals.lookupOrd(ordinals.nextOrd()));
}
@Override
public int docId() {
return ordinals.docID();
}
@Override
public String toString() {
return "MvMinBytesRefsFromOrds.SortedSet";
}
}
}
| MvMinSortedSet |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/base/KeywordValidator.java | {
"start": 1789,
"end": 5168
} | class ____ {
@Inject
KeywordValidator() {}
/**
* Validates that the given element does not have a name that is a Java keyword.
*
* <p>This is not allowed because Dagger currently generates Java code for KSP.
*
* @param element the element to validate
* @return a validation report containing any errors found
*/
ValidationReport validateJavaKeyword(XElement element) {
ValidationReport.Builder report = ValidationReport.about(element);
if (element == null) {
return report.build();
}
if (isTypeElement(element)) {
keywordFromName(getSimpleName(element))
.ifPresent(keyword -> report.addError(javaKeywordErrorMessage(keyword), element));
// For KAPT We need to validate the Kotlin metadata methods name for type elements.
// TODO(emjich): Re-enable this validation once we figure out how to avoid false positives.
// As of now, we are seeing false positives for this validation because KAPT stubs do not
// contain annotation information and we cannot identify which methods to validate for Dagger
// annotations.
} else if (isExecutable(element)) {
if (isMethod(element)) {
// Validate the method name.
keywordFromName(getSimpleName(element))
.ifPresent(keyword -> report.addError(javaKeywordErrorMessage(keyword), element));
// Validate the method return type.
validateJavaKeywordType(asMethod(element).getReturnType(), report);
}
asExecutable(element)
.getParameters()
.forEach(parameter -> validateJavaKeywordType(parameter.getType(), report));
} else if (isField(element)) {
keywordFromName(getSimpleName(element))
.ifPresent(keyword -> report.addError(javaKeywordErrorMessage(keyword), element));
validateJavaKeywordType(asField(element).getType(), report);
} else if (isMethodParameter(element)) {
// Method parameters names do not cause errors, so we only validate the types.
validateJavaKeywordType(asMethodParameter(element).getType(), report);
}
return report.build();
}
private void validateJavaKeywordType(@Nullable XType type, ValidationReport.Builder report) {
if (type == null || type.isError() || isPrimitive(type)) {
return;
}
// Checks the raw types like `List` in `List<Foo>`
if (type.getTypeElement() != null) {
keywordFromName(getSimpleName(type.getTypeElement()))
.ifPresent(
keyword -> report.addError(javaKeywordErrorMessage(keyword), type.getTypeElement()));
}
// Checks the type arguments like `Foo` in `List<Foo>`
for (XType typeArgument : type.getTypeArguments()) {
validateJavaKeywordType(typeArgument, report);
}
// Checks the wildcard bound types like `Foo` in `? extends Foo`
if (type.extendsBound() != null) {
validateJavaKeywordType(type.extendsBound(), report);
}
}
private Optional<String> keywordFromName(String name) {
return isJavaKeyword(name) ? Optional.of(name) : Optional.empty();
}
boolean isJavaKeyword(String name) {
return isKeyword(name);
}
private String javaKeywordErrorMessage(String keyword) {
return String.format(
"The name '%s' cannot be used because it is a Java keyword."
+ " Please use a different name.",
keyword);
}
}
| KeywordValidator |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringDelayerTest.java | {
"start": 1032,
"end": 1264
} | class ____ extends DelayerTest {
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/spring/processor/delayer.xml");
}
}
| SpringDelayerTest |
java | grpc__grpc-java | binder/src/main/java/io/grpc/binder/internal/SingleMessageClientStream.java | {
"start": 1866,
"end": 4917
} | class ____ implements ClientStream {
private final Inbound.ClientInbound inbound;
private final Outbound.ClientOutbound outbound;
private final Attributes attributes;
@Nullable private InputStream pendingSingleMessage;
@Nullable private Deadline pendingDeadline;
SingleMessageClientStream(
Inbound.ClientInbound inbound, Outbound.ClientOutbound outbound, Attributes attributes) {
this.inbound = inbound;
this.outbound = outbound;
this.attributes = attributes;
}
@Override
public void start(ClientStreamListener listener) {
synchronized (inbound) {
inbound.init(outbound, listener);
}
if (outbound.isReady()) {
listener.onReady();
}
}
@Override
public boolean isReady() {
return outbound.isReady();
}
@Override
public void request(int numMessages) {
synchronized (inbound) {
inbound.requestMessages(numMessages);
}
}
@Override
public void writeMessage(InputStream message) {
if (pendingSingleMessage != null) {
synchronized (inbound) {
inbound.closeAbnormal(Status.INTERNAL.withDescription("too many messages"));
}
} else {
pendingSingleMessage = message;
}
}
@Override
public void halfClose() {
try {
synchronized (outbound) {
if (pendingDeadline != null) {
outbound.setDeadline(pendingDeadline);
}
outbound.onPrefixReady();
outbound.sendSingleMessageAndHalfClose(pendingSingleMessage);
}
} catch (StatusException se) {
synchronized (inbound) {
inbound.closeAbnormal(se.getStatus());
}
}
}
@Override
public void cancel(Status status) {
synchronized (inbound) {
inbound.closeOnCancel(status);
}
}
@Override
public void setDeadline(@Nonnull Deadline deadline) {
this.pendingDeadline = deadline;
}
@Override
public Attributes getAttributes() {
return attributes;
}
@Override
public final String toString() {
return "SingleMessageClientStream[" + inbound + "/" + outbound + "]";
}
// =====================
// Misc stubbed & unsupported methods.
@Override
public final void flush() {
// Ignore.
}
@Override
public final void setCompressor(Compressor compressor) {
// Ignore.
}
@Override
public final void setMessageCompression(boolean enable) {
// Ignore.
}
@Override
public void setAuthority(String authority) {
// Ignore.
}
@Override
public void setMaxInboundMessageSize(int maxSize) {
// Ignore.
}
@Override
public void setMaxOutboundMessageSize(int maxSize) {
// Ignore.
}
@Override
public void appendTimeoutInsight(InsightBuilder insight) {
// Ignore
}
@Override
public void setFullStreamDecompression(boolean fullStreamDecompression) {
// Ignore.
}
@Override
public void setDecompressorRegistry(DecompressorRegistry decompressorRegistry) {
// Ignore.
}
@Override
public void optimizeForDirectExecutor() {
// Ignore.
}
}
| SingleMessageClientStream |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/exception/JDBCConnectionException.java | {
"start": 364,
"end": 630
} | class ____ extends JDBCException {
public JDBCConnectionException(String string, SQLException root) {
super( string, root );
}
public JDBCConnectionException(String string, SQLException root, String sql) {
super( string, root, sql );
}
}
| JDBCConnectionException |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/search/QueryArgs.java | {
"start": 1009,
"end": 18584
} | class ____ implements RedisCommandExtraArguments {
public boolean nocontent;
private boolean verbatim;
private boolean withScores;
private boolean withPayloads;
private boolean withSortKeys;
private final List<NumericFilter> filters = new ArrayList<>();
private final List<GeoFilter> geoFilters = new ArrayList<>();
private String[] inKeys;
private String[] inFields;
private final List<ReturnClause> returns = new ArrayList<>();
private SummarizeArgs summarize;
private HighlightArgs highlight;
private int slop = -1;
private boolean inOrder;
private String lang;
private String expander;
private String scorer;
private boolean explainScore;
private String asc;
private String desc;
private int offset = -1;
private int count = -1;
private Duration timeout;
private final Map<String, String> params = new HashMap<>();
private final Map<String, byte[]> byteArrayParams = new HashMap<>();
private int dialect = -1;
/**
* Returns the document ids and not the content. This is useful if RedisSearch is only an index on an external
* document collection.
*
* @return the current {@code QueryArgs}
*/
public QueryArgs nocontent() {
this.nocontent = true;
return this;
}
/**
* Does not try to use stemming for query expansion but searches the query terms verbatim.
*
* @return the current {@code QueryArgs}
*/
public QueryArgs verbatim() {
this.verbatim = true;
return this;
}
/**
* Also returns the relative internal score of each document.
* This can be used to merge results from multiple instances.
*
* @return the current {@code QueryArgs}
*/
public QueryArgs withScores() {
this.withScores = true;
return this;
}
/**
* Retrieves optional document payloads.
*
* @return the current {@code QueryArgs}
*/
public QueryArgs withPayloads() {
this.withPayloads = true;
return this;
}
/**
* returns the value of the sorting key, right after the id and score and/or payload, if requested.
* This is usually not needed, and exists for distributed search coordination purposes.
* This option is relevant only if used in conjunction with {@code SORTBY}.
*
* @return the current {@code QueryArgs}
*/
public QueryArgs withSortKeys() {
this.withSortKeys = true;
return this;
}
/**
* Limits results to those having numeric values ranging between min and max, if {@code numberFilter} is defined as
* a numeric attribute in {@code FT.CREATE}.
* Min and max follow {@code ZRANGE} syntax, and can be {@code -inf}, {@code +inf}, and use ( for exclusive ranges. Multiple
* numeric filters for different attributes are supported in one query.
*
* @param filter the filter
* @return the current {@code QueryArgs}
*/
public QueryArgs filter(NumericFilter filter) {
nonNull(filter, "filter");
this.filters.add(filter);
return this;
}
/**
* Filters the results to a given radius from lon and lat.
* Radius is given as a number and units. See {@code GEORADIUS} for more details.
*
* @param filter the filter
* @return the current {@code QueryArgs}
*/
public QueryArgs geoFilter(GeoFilter filter) {
nonNull(filter, "filter");
this.geoFilters.add(filter);
return this;
}
/**
* Limits the result to a given set of keys specified in the list.
* Non-existent keys are ignored, unless all the keys are non-existent.
*
* @param keys the list of keys
* @return the current {@code QueryArgs}
*/
public final QueryArgs inKeys(String... keys) {
doesNotContainNull(notNullOrEmpty(keys, "keys"), "keys");
this.inKeys = keys;
return this;
}
/**
* Filters the results to those appearing only in specific attributes of the document, like title or URL.
*
* @param fields the list of fields
* @return the current {@code QueryArgs}
*/
public QueryArgs inFields(String... fields) {
doesNotContainNull(notNullOrEmpty(fields, "fields"), "fields");
this.inFields = fields;
return this;
}
/**
* Limits the attributes returned from the document.
* If no return clauses are passed, it acts like {@code NOCONTENT}.
* {@code field} is either an attribute name (for hashes and JSON) or a JSON Path expression (for JSON).
* {@code alias} is the optional name used in the result. If not provided, the {@code field} is used in the result.
*
* @param field the field
* @param alias the alias
* @return the current {@code QueryArgs}
*/
public QueryArgs returnAttribute(String field, String alias) {
notNullOrBlank(field, "field");
this.returns.add(new ReturnClause(field, alias));
return this;
}
/**
* Limits the attributes returned from the document.
* If no return clauses are passed, it acts like {@code NOCONTENT}.
* {@code field} is either an attribute name (for hashes and JSON) or a JSON Path expression (for JSON).
* {@code alias} is the name used in the result. As it is not provided, the {@code field} is used in the result.
*
* @param field the field
* @return the current {@code QueryArgs}
*/
public QueryArgs returnAttribute(String field) {
this.returns.add(new ReturnClause(notNullOrBlank(field, "field"), null));
return this;
}
/**
* Returns only the sections of the attribute that contain the matched text.
*
* @param args the summarize argument
* @return the current {@code QueryArgs}
*/
public QueryArgs summarize(SummarizeArgs args) {
this.summarize = nonNull(args, "args");
return this;
}
/**
* formats occurrences of matched text.
*
* @param args the summarize argument
* @return the current {@code QueryArgs}
*/
public QueryArgs highlight(HighlightArgs args) {
this.highlight = nonNull(args, "args");
return this;
}
/**
* Allows a maximum of {@code slop} intervening number of unmatched offsets between phrase terms.
* In other words, the slop for exact phrases is 0.
*
* @param slop the slop
* @return the current {@code QueryArgs}
*/
public QueryArgs slop(int slop) {
this.slop = positive(slop, "slop");
return this;
}
/**
* Puts the query terms in the same order in the document as in the query, regardless of the offsets between them.
* Typically used in conjunction with {@code SLOP}.
*
* @return the current {@code QueryArgs}
*/
public QueryArgs inOrder() {
this.inOrder = true;
return this;
}
/**
* Use a stemmer for the supplied language during search for query expansion.
* If querying documents in Chinese, set to chinese to properly tokenize the query terms.
* Defaults to English.
* If an unsupported language is sent, the command returns an error.
*
* @param lang the language
* @return the current {@code QueryArgs}
*/
public QueryArgs language(String lang) {
this.lang = notNullOrBlank(lang, "lang");
return this;
}
/**
* Uses a custom query expander instead of the stemmer.
*
* @param expander the expander
* @return the current {@code QueryArgs}
*/
public QueryArgs expander(String expander) {
this.expander = notNullOrBlank(expander, "expander");
return this;
}
/**
* Uses a custom scoring function you define
*
* @param scorer the scorer
* @return the current {@code QueryArgs}
*/
public QueryArgs scorer(String scorer) {
this.scorer = notNullOrBlank(scorer, "scorer");
return this;
}
/**
* Returns a textual description of how the scores were calculated. Using this options requires
* the {@code sCORES} option.
*
* @return the current {@code QueryArgs}
*/
public QueryArgs explainScore() {
this.explainScore = true;
return this;
}
/**
* Orders the results by the value of this attribute.
* Use ascending order.
* This applies to both text and numeric attributes.
* Attributes needed for {@code SORTBY} should be declared as {@code SORTABLE} in the index, in order to be
* available with very low latency. Note that this adds memory overhead.
*
* @param field the field
* @return the current {@code QueryArgs}
*/
public QueryArgs sortByAscending(String field) {
this.asc = notNullOrBlank(field, "field");
return this;
}
/**
* Orders the results by the value of this attribute.
* Use descending order.
* This applies to both text and numeric attributes.
* Attributes needed for {@code SORTBY} should be declared as {@code SORTABLE} in the index, in order to be
* available with very low latency. Note that this adds memory overhead.
*
* @param field the field
* @return the current {@code QueryArgs}
*/
public QueryArgs sortByDescending(String field) {
this.desc = notNullOrBlank(field, "field");
return this;
}
/**
* Limits the results to the offset and number of results given.
* Note that the offset is zero-indexed. The default is 0 10, which returns 10 items starting from the first result.
* You can use {@code LIMIT 0 0} to count the number of documents in the result set without actually returning them.
*
* @param offset the offset
* @param count the count
* @return the current {@code QueryArgs}
*/
public QueryArgs limit(int offset, int count) {
this.offset = positiveOrZero(offset, "offset");
this.count = positiveOrZero(count, "count");
return this;
}
/**
* Overrides the timeout parameter of the module.
*
* @param timeout the timeout
* @return the current {@code QueryArgs}
*/
public QueryArgs timeout(Duration timeout) {
validateTimeout(timeout, "timeout");
this.timeout = timeout;
return this;
}
/**
* Defines one or more value parameters. Each parameter has a name and a value.
* You can reference parameters in the query by a $, followed by the parameter name, for example, $user.
* Each such reference in the search query to a parameter name is substituted by the corresponding parameter value.
* For example, with parameter definition PARAMS 4 lon 29.69465 lat 34.95126, the expression @loc:[$lon $lat 10 km]
* is evaluated to @loc:[29.69465 34.95126 10 km]. You cannot reference parameters in the query string where concrete
* values are not allowed, such as in field names, for example, @loc.
* <p>
* To use PARAMS, set DIALECT to 2.
*
* @param name the parameter name
* @param value the parameter value as String
* @return the current {@code QueryArgs}
*/
public QueryArgs param(String name, String value) {
this.params.put(notNullOrBlank(name, "name"), notNullOrBlank(value, "value"));
return this;
}
/**
* Defines a parameter with a byte array value.
*
* @param name the parameter name
* @param value the parameter value as byte array
* @return the current {@code QueryArgs}
*/
public QueryArgs param(String name, byte[] value) {
this.byteArrayParams.put(notNullOrBlank(name, "name"), notNullOrEmpty(value, "value"));
return this;
}
/**
* Defines a parameter with a float array value.
*
* @param name the parameter name
* @param value the parameter value as array of floats
* @return the current {@code QueryArgs}
*/
public QueryArgs param(String name, float[] value) {
this.byteArrayParams.put(notNullOrBlank(name, "name"), toByteArray(notNullOrEmpty(value, "value")));
return this;
}
/**
* Defines a parameter with a double array value.
*
* @param name the parameter name
* @param value the parameter value as array of doubles
* @return the current {@code QueryArgs}
*/
public QueryArgs param(String name, double[] value) {
this.byteArrayParams.put(notNullOrBlank(name, "name"), toByteArray(notNullOrEmpty(value, "value")));
return this;
}
/**
* Defines a parameter with an int array value.
*
* @param name the parameter name
* @param value the parameter value as array of ints
* @return the current {@code QueryArgs}
*/
public QueryArgs param(String name, int[] value) {
this.byteArrayParams.put(notNullOrBlank(name, "name"), toByteArray(notNullOrEmpty(value, "value")));
return this;
}
/**
* Defines a parameter with a long array value.
*
* @param name the parameter name
* @param value the parameter value as array of longs
* @return the current {@code QueryArgs}
*/
public QueryArgs param(String name, long[] value) {
this.byteArrayParams.put(notNullOrBlank(name, "name"), toByteArray(notNullOrEmpty(value, "value")));
return this;
}
/**
* Selects the dialect version under which to execute the query.
* If not specified, the query will execute under the default dialect version set during module initial loading.
*
* @param version the version
* @return the current {@code QueryArgs}
*/
public QueryArgs dialect(int version) {
this.dialect = version;
return this;
}
@Override
public List<Object> toArgs(Codec encoder) {
List<Object> list = new ArrayList<>();
if (nocontent) {
list.add("NOCONTENT");
}
if (verbatim) {
list.add("VERBATIM");
}
if (withScores) {
list.add("WITHSCORES");
}
if (withPayloads) {
list.add("WITHPAYLOADS");
}
if (withSortKeys) {
list.add("WITHSORTKEYS");
}
for (NumericFilter filter : filters) {
list.add("FILTER");
list.add(filter.getField());
list.add(filter.getLowerBound());
list.add(filter.getUpperBound());
}
for (GeoFilter filter : geoFilters) {
list.add(filter.toString());
}
if (inKeys != null && inKeys.length > 0) {
list.add(Integer.toString(inKeys.length));
list.addAll(Arrays.asList(inKeys));
}
if (!returns.isEmpty()) {
list.add("RETURN");
List<String> clauses = new ArrayList<>();
for (ReturnClause clause : returns) {
clauses.addAll(clause.toArgs());
}
list.add(Integer.toString(clauses.size()));
list.addAll(clauses);
}
if (inFields != null && inFields.length > 0) {
list.add(Integer.toString(inFields.length));
Collections.addAll(list, inFields);
}
if (summarize != null) {
list.addAll(summarize.toArgs());
}
if (highlight != null) {
list.addAll(highlight.toArgs());
}
if (slop > -1) {
list.add("SLOP");
list.add(Integer.toString(slop));
}
if (inOrder) {
list.add("INORDER");
}
if (lang != null) {
list.add("LANGUAGE");
list.add(lang);
}
if (expander != null) {
list.add("EXPANDER");
list.add(expander);
}
if (scorer != null) {
list.add("SCORER");
list.add(scorer);
}
if (explainScore) {
list.add("EXPLAINSCORE");
}
if (asc != null || desc != null) {
if (asc != null && desc != null) {
throw new IllegalArgumentException("Cannot use descending and ascending order at the same time");
}
list.add("SORTBY");
if (asc != null) {
list.add(asc);
list.add("ASC");
}
if (desc != null) {
list.add(desc);
list.add("DESC");
}
}
if (offset != -1) {
list.add("LIMIT");
list.add(Integer.toString(offset));
list.add(Integer.toString(count));
}
if (timeout != null) {
list.add("TIMEOUT");
list.add(Long.toString(timeout.toMillis()));
}
if (!params.isEmpty() || !byteArrayParams.isEmpty()) {
list.add("PARAMS");
list.add(Integer.toString((params.size() + byteArrayParams.size()) * 2));
for (Map.Entry<String, byte[]> entry : byteArrayParams.entrySet()) {
list.add(entry.getKey());
list.add(entry.getValue());
}
for (Map.Entry<String, String> entry : params.entrySet()) {
list.add(entry.getKey());
list.add(entry.getValue());
}
}
if (dialect != -1) {
list.add("DIALECT");
list.add(Integer.toString(dialect));
}
return list;
}
private final static | QueryArgs |
java | spring-projects__spring-security | oauth2/oauth2-client/src/test/java/org/springframework/security/oauth2/client/web/client/support/OAuth2RestClientHttpServiceGroupConfigurerTests.java | {
"start": 1612,
"end": 3123
} | class ____ {
@Mock
private OAuth2AuthorizedClientManager authoriedClientManager;
@Mock
private HttpServiceGroupConfigurer.Groups<RestClient.Builder> groups;
@Captor
ArgumentCaptor<HttpServiceGroupConfigurer.ProxyFactoryCallback> forProxyFactory;
@Mock
private HttpServiceProxyFactory.Builder factoryBuilder;
@Captor
private ArgumentCaptor<HttpServiceGroupConfigurer.ClientCallback<RestClient.Builder>> configureClient;
@Mock
private RestClient.Builder clientBuilder;
@Test
void configureGroupsConfigureProxyFactory() {
OAuth2RestClientHttpServiceGroupConfigurer configurer = OAuth2RestClientHttpServiceGroupConfigurer
.from(this.authoriedClientManager);
configurer.configureGroups(this.groups);
verify(this.groups).forEachProxyFactory(this.forProxyFactory.capture());
this.forProxyFactory.getValue().withProxyFactory(null, this.factoryBuilder);
verify(this.factoryBuilder).httpRequestValuesProcessor(ClientRegistrationIdProcessor.DEFAULT_INSTANCE);
}
@Test
void configureGroupsConfigureClient() {
OAuth2RestClientHttpServiceGroupConfigurer configurer = OAuth2RestClientHttpServiceGroupConfigurer
.from(this.authoriedClientManager);
configurer.configureGroups(this.groups);
verify(this.groups).forEachClient(this.configureClient.capture());
this.configureClient.getValue().withClient(null, this.clientBuilder);
verify(this.clientBuilder).requestInterceptor(any(OAuth2ClientHttpRequestInterceptor.class));
}
}
| OAuth2RestClientHttpServiceGroupConfigurerTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java | {
"start": 2977,
"end": 12472
} | enum ____ {
INIT_BLOCK,
DECOMPRESS_DATA,
FINISHED,
CORRUPTED
}
private State currentState = State.INIT_BLOCK;
/**
* Underlying decompressor in use.
*/
private LZ4FastDecompressor decompressor;
/**
* Type of current block.
*/
private int blockType;
/**
* Compressed length of current incoming block.
*/
private int compressedLength;
/**
* Decompressed length of current incoming block.
*/
private int decompressedLength;
public Lz4TransportDecompressor(Recycler<BytesRef> recycler) {
super(recycler);
this.decompressor = Compression.Scheme.lz4Decompressor();
}
@Override
public Compression.Scheme getScheme() {
return Compression.Scheme.LZ4;
}
@Override
public int decompress(BytesReference bytesReference) throws IOException {
int bytesConsumed = 0;
if (hasSkippedHeader == false) {
hasSkippedHeader = true;
int esHeaderLength = Compression.Scheme.HEADER_LENGTH;
bytesReference = bytesReference.slice(esHeaderLength, bytesReference.length() - esHeaderLength);
bytesConsumed += esHeaderLength;
}
while (true) {
int consumed = decodeBlock(bytesReference);
bytesConsumed += consumed;
int newLength = bytesReference.length() - consumed;
if (consumed > 0 && newLength > 0) {
bytesReference = bytesReference.slice(consumed, newLength);
} else {
break;
}
}
return bytesConsumed;
}
private int decodeBlock(BytesReference reference) throws IOException {
int bytesConsumed = 0;
try {
switch (currentState) {
case INIT_BLOCK:
if (reference.length() < HEADER_LENGTH) {
return bytesConsumed;
}
try (StreamInput in = reference.streamInput()) {
final long magic = in.readLong();
if (magic != MAGIC_NUMBER) {
throw new IllegalStateException("unexpected block identifier");
}
final int token = in.readByte();
final int compressionLevel = (token & 0x0F) + COMPRESSION_LEVEL_BASE;
int blockType = token & 0xF0;
int compressedLength = Integer.reverseBytes(in.readInt());
if (compressedLength < 0 || compressedLength > MAX_BLOCK_SIZE) {
throw new IllegalStateException(
String.format(
Locale.ROOT,
"invalid compressedLength: %d (expected: 0-%d)",
compressedLength,
MAX_BLOCK_SIZE
)
);
}
int decompressedLength = Integer.reverseBytes(in.readInt());
final int maxDecompressedLength = 1 << compressionLevel;
if (decompressedLength < 0 || decompressedLength > maxDecompressedLength) {
throw new IllegalStateException(
String.format(
Locale.ROOT,
"invalid decompressedLength: %d (expected: 0-%d)",
decompressedLength,
maxDecompressedLength
)
);
}
if (decompressedLength == 0 && compressedLength != 0
|| decompressedLength != 0 && compressedLength == 0
|| blockType == BLOCK_TYPE_NON_COMPRESSED && decompressedLength != compressedLength) {
throw new IllegalStateException(
String.format(
Locale.ROOT,
"stream corrupted: compressedLength(%d) and decompressedLength(%d) mismatch",
compressedLength,
decompressedLength
)
);
}
// Read int where checksum would normally be written
in.readInt();
bytesConsumed += HEADER_LENGTH;
if (decompressedLength == 0) {
currentState = State.FINISHED;
decompressor = null;
break;
}
this.blockType = blockType;
this.compressedLength = compressedLength;
this.decompressedLength = decompressedLength;
}
currentState = State.DECOMPRESS_DATA;
break;
case DECOMPRESS_DATA:
if (reference.length() < compressedLength) {
break;
}
byte[] decompressed = getThreadLocalBuffer(DECOMPRESSED, decompressedLength);
try {
switch (blockType) {
case BLOCK_TYPE_NON_COMPRESSED:
try (StreamInput streamInput = reference.streamInput()) {
streamInput.readBytes(decompressed, 0, decompressedLength);
}
break;
case BLOCK_TYPE_COMPRESSED:
BytesRef ref = reference.iterator().next();
final byte[] compressed;
final int compressedOffset;
if (ref.length >= compressedLength) {
compressed = ref.bytes;
compressedOffset = ref.offset;
} else {
compressed = getThreadLocalBuffer(COMPRESSED, compressedLength);
compressedOffset = 0;
try (StreamInput streamInput = reference.streamInput()) {
streamInput.readBytes(compressed, 0, compressedLength);
}
}
decompressor.decompress(compressed, compressedOffset, decompressed, 0, decompressedLength);
break;
default:
throw new IllegalStateException(
String.format(
Locale.ROOT,
"unexpected blockType: %d (expected: %d or %d)",
blockType,
BLOCK_TYPE_NON_COMPRESSED,
BLOCK_TYPE_COMPRESSED
)
);
}
// Skip inbound bytes after we processed them.
bytesConsumed += compressedLength;
int bytesToCopy = decompressedLength;
int uncompressedOffset = 0;
while (bytesToCopy > 0) {
maybeAddNewPage();
final Recycler.V<BytesRef> page = pages.getLast();
int toCopy = Math.min(bytesToCopy, pageLength - pageOffset);
System.arraycopy(decompressed, uncompressedOffset, page.v().bytes, page.v().offset + pageOffset, toCopy);
pageOffset += toCopy;
bytesToCopy -= toCopy;
uncompressedOffset += toCopy;
}
currentState = State.INIT_BLOCK;
} catch (LZ4Exception e) {
throw new IllegalStateException(e);
}
break;
case FINISHED:
break;
case CORRUPTED:
throw new IllegalStateException("LZ4 stream corrupted.");
default:
throw new IllegalStateException();
}
} catch (IOException e) {
currentState = State.CORRUPTED;
throw e;
}
return bytesConsumed;
}
private static byte[] getThreadLocalBuffer(ThreadLocal<byte[]> threadLocal, int requiredSize) {
byte[] buffer = threadLocal.get();
if (requiredSize > buffer.length) {
buffer = new byte[requiredSize];
threadLocal.set(buffer);
}
return buffer;
}
/**
* Returns {@code true} if and only if the end of the compressed stream
* has been reached.
*/
public boolean isClosed() {
return currentState == State.FINISHED;
}
}
| State |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_3668/Child.java | {
"start": 376,
"end": 426
} | class ____ extends Child { }
public static | ChildA |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java | {
"start": 3932,
"end": 5034
} | class ____ extends SubjectInheritingThread {
private Throwable caught;
private int connectRetries;
private volatile boolean waitStarted = false;
private RpcThread(int connectRetries) {
this.connectRetries = connectRetries;
}
@Override
public void work() {
try {
Configuration config = new Configuration(conf);
config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
connectRetries);
config.setInt(
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
connectRetries);
waitStarted = true;
short invalidPort = 20;
InetSocketAddress invalidAddress = new InetSocketAddress(ADDRESS,
invalidPort);
TestRpcBase.TestRpcService proxy = RPC.getProxy(
TestRpcBase.TestRpcService.class,
1L, invalidAddress, conf);
// Test echo method
proxy.echo(null, newEchoRequest("hello"));
} catch (Throwable throwable) {
caught = throwable;
}
}
public Throwable getCaught() {
return caught;
}
}
}
| RpcThread |
java | grpc__grpc-java | binder/src/main/java/io/grpc/binder/AsyncSecurityPolicy.java | {
"start": 995,
"end": 1337
} | class ____ the asynchronous version of {@link SecurityPolicy}, allowing
* implementations of authorization logic that involves slow or asynchronous calls without
* necessarily blocking the calling thread.
*
* @see SecurityPolicy
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/10566")
@CheckReturnValue
public abstract | provides |
java | playframework__playframework | core/play/src/main/java/play/mvc/Security.java | {
"start": 2245,
"end": 3002
} | class ____ extends Results {
/**
* Retrieves the username from the HTTP request; the default is to read from the session cookie.
*
* @param req the current request
* @return the username if the user is authenticated.
*/
public Optional<String> getUsername(Request req) {
return req.session().get("username");
}
/**
* Generates an alternative result if the user is not authenticated; the default a simple '401
* Not Authorized' page.
*
* @param req the current request
* @return a <code>401 Not Authorized</code> result
*/
public Result onUnauthorized(Request req) {
return unauthorized(views.html.defaultpages.unauthorized.render(req.asScala()));
}
}
}
| Authenticator |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/TruncatingRequestManager.java | {
"start": 1136,
"end": 2591
} | class ____ extends BaseRequestManager {
private static final Logger logger = LogManager.getLogger(TruncatingRequestManager.class);
private final ResponseHandler responseHandler;
private final Function<Truncator.TruncationResult, Request> requestCreator;
private final Integer maxInputTokens;
public TruncatingRequestManager(
ThreadPool threadPool,
RateLimitGroupingModel rateLimitGroupingModel,
ResponseHandler responseHandler,
Function<Truncator.TruncationResult, Request> requestCreator,
@Nullable Integer maxInputTokens
) {
super(threadPool, rateLimitGroupingModel);
this.responseHandler = Objects.requireNonNull(responseHandler);
this.requestCreator = Objects.requireNonNull(requestCreator);
this.maxInputTokens = maxInputTokens;
}
@Override
public void execute(
InferenceInputs inferenceInputs,
RequestSender requestSender,
Supplier<Boolean> hasRequestCompletedFunction,
ActionListener<InferenceServiceResults> listener
) {
var docsInput = inferenceInputs.castTo(EmbeddingsInput.class).getTextInputs();
var truncatedInput = truncate(docsInput, maxInputTokens);
var request = requestCreator.apply(truncatedInput);
execute(new ExecutableInferenceRequest(requestSender, logger, request, responseHandler, hasRequestCompletedFunction, listener));
}
}
| TruncatingRequestManager |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableToListTest.java | {
"start": 1305,
"end": 12025
} | class ____ extends RxJavaTest {
@Test
public void listFlowable() {
Flowable<String> w = Flowable.fromIterable(Arrays.asList("one", "two", "three"));
Flowable<List<String>> flowable = w.toList().toFlowable();
Subscriber<List<String>> subscriber = TestHelper.mockSubscriber();
flowable.subscribe(subscriber);
verify(subscriber, times(1)).onNext(Arrays.asList("one", "two", "three"));
verify(subscriber, Mockito.never()).onError(any(Throwable.class));
verify(subscriber, times(1)).onComplete();
}
@Test
public void listViaFlowableFlowable() {
Flowable<String> w = Flowable.fromIterable(Arrays.asList("one", "two", "three"));
Flowable<List<String>> flowable = w.toList().toFlowable();
Subscriber<List<String>> subscriber = TestHelper.mockSubscriber();
flowable.subscribe(subscriber);
verify(subscriber, times(1)).onNext(Arrays.asList("one", "two", "three"));
verify(subscriber, Mockito.never()).onError(any(Throwable.class));
verify(subscriber, times(1)).onComplete();
}
@Test
public void listMultipleSubscribersFlowable() {
Flowable<String> w = Flowable.fromIterable(Arrays.asList("one", "two", "three"));
Flowable<List<String>> flowable = w.toList().toFlowable();
Subscriber<List<String>> subscriber1 = TestHelper.mockSubscriber();
flowable.subscribe(subscriber1);
Subscriber<List<String>> subscriber2 = TestHelper.mockSubscriber();
flowable.subscribe(subscriber2);
List<String> expected = Arrays.asList("one", "two", "three");
verify(subscriber1, times(1)).onNext(expected);
verify(subscriber1, Mockito.never()).onError(any(Throwable.class));
verify(subscriber1, times(1)).onComplete();
verify(subscriber2, times(1)).onNext(expected);
verify(subscriber2, Mockito.never()).onError(any(Throwable.class));
verify(subscriber2, times(1)).onComplete();
}
@Test
public void listWithBlockingFirstFlowable() {
Flowable<String> f = Flowable.fromIterable(Arrays.asList("one", "two", "three"));
List<String> actual = f.toList().toFlowable().blockingFirst();
Assert.assertEquals(Arrays.asList("one", "two", "three"), actual);
}
@Test
public void backpressureHonoredFlowable() {
Flowable<List<Integer>> w = Flowable.just(1, 2, 3, 4, 5).toList().toFlowable();
TestSubscriber<List<Integer>> ts = new TestSubscriber<>(0L);
w.subscribe(ts);
ts.assertNoValues();
ts.assertNoErrors();
ts.assertNotComplete();
ts.request(1);
ts.assertValue(Arrays.asList(1, 2, 3, 4, 5));
ts.assertNoErrors();
ts.assertComplete();
ts.request(1);
ts.assertValue(Arrays.asList(1, 2, 3, 4, 5));
ts.assertNoErrors();
ts.assertComplete();
}
@Test
public void capacityHintFlowable() {
Flowable.range(1, 10)
.toList(4)
.toFlowable()
.test()
.assertResult(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
}
@Test
public void list() {
Flowable<String> w = Flowable.fromIterable(Arrays.asList("one", "two", "three"));
Single<List<String>> single = w.toList();
SingleObserver<List<String>> observer = TestHelper.mockSingleObserver();
single.subscribe(observer);
verify(observer, times(1)).onSuccess(Arrays.asList("one", "two", "three"));
verify(observer, Mockito.never()).onError(any(Throwable.class));
}
@Test
public void listViaFlowable() {
Flowable<String> w = Flowable.fromIterable(Arrays.asList("one", "two", "three"));
Single<List<String>> single = w.toList();
SingleObserver<List<String>> observer = TestHelper.mockSingleObserver();
single.subscribe(observer);
verify(observer, times(1)).onSuccess(Arrays.asList("one", "two", "three"));
verify(observer, Mockito.never()).onError(any(Throwable.class));
}
@Test
public void listMultipleSubscribers() {
Flowable<String> w = Flowable.fromIterable(Arrays.asList("one", "two", "three"));
Single<List<String>> single = w.toList();
SingleObserver<List<String>> o1 = TestHelper.mockSingleObserver();
single.subscribe(o1);
SingleObserver<List<String>> o2 = TestHelper.mockSingleObserver();
single.subscribe(o2);
List<String> expected = Arrays.asList("one", "two", "three");
verify(o1, times(1)).onSuccess(expected);
verify(o1, Mockito.never()).onError(any(Throwable.class));
verify(o2, times(1)).onSuccess(expected);
verify(o2, Mockito.never()).onError(any(Throwable.class));
}
@Test
public void listWithBlockingFirst() {
Flowable<String> f = Flowable.fromIterable(Arrays.asList("one", "two", "three"));
List<String> actual = f.toList().blockingGet();
Assert.assertEquals(Arrays.asList("one", "two", "three"), actual);
}
static void await(CyclicBarrier cb) {
try {
cb.await();
} catch (InterruptedException ex) {
ex.printStackTrace();
} catch (BrokenBarrierException ex) {
ex.printStackTrace();
}
}
@Test
public void capacityHint() {
Flowable.range(1, 10)
.toList(4)
.test()
.assertResult(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
}
@Test
public void dispose() {
TestHelper.checkDisposed(Flowable.just(1).toList().toFlowable());
TestHelper.checkDisposed(Flowable.just(1).toList());
}
@Test
public void error() {
Flowable.error(new TestException())
.toList()
.toFlowable()
.test()
.assertFailure(TestException.class);
}
@Test
public void errorSingle() {
Flowable.error(new TestException())
.toList()
.test()
.assertFailure(TestException.class);
}
@Test
public void collectionSupplierThrows() {
Flowable.just(1)
.toList(new Supplier<Collection<Integer>>() {
@Override
public Collection<Integer> get() throws Exception {
throw new TestException();
}
})
.toFlowable()
.test()
.assertFailure(TestException.class);
}
@Test
public void collectionSupplierReturnsNull() {
Flowable.just(1)
.toList(new Supplier<Collection<Integer>>() {
@Override
public Collection<Integer> get() throws Exception {
return null;
}
})
.toFlowable()
.to(TestHelper.<Collection<Integer>>testConsumer())
.assertFailure(NullPointerException.class)
.assertErrorMessage(ExceptionHelper.nullWarning("The collectionSupplier returned a null Collection."));
}
@Test
public void singleCollectionSupplierThrows() {
Flowable.just(1)
.toList(new Supplier<Collection<Integer>>() {
@Override
public Collection<Integer> get() throws Exception {
throw new TestException();
}
})
.test()
.assertFailure(TestException.class);
}
@Test
public void singleCollectionSupplierReturnsNull() {
Flowable.just(1)
.toList(new Supplier<Collection<Integer>>() {
@Override
public Collection<Integer> get() throws Exception {
return null;
}
})
.to(TestHelper.<Collection<Integer>>testConsumer())
.assertFailure(NullPointerException.class)
.assertErrorMessage(ExceptionHelper.nullWarning("The collectionSupplier returned a null Collection."));
}
@Test
public void onNextCancelRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final TestObserver<List<Integer>> to = pp.toList().test();
Runnable r1 = new Runnable() {
@Override
public void run() {
pp.onNext(1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
to.dispose();
}
};
TestHelper.race(r1, r2);
}
}
@Test
public void onNextCancelRaceFlowable() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final TestSubscriber<List<Integer>> ts = pp.toList().toFlowable().test();
Runnable r1 = new Runnable() {
@Override
public void run() {
pp.onNext(1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ts.cancel();
}
};
TestHelper.race(r1, r2);
}
}
@Test
public void onCompleteCancelRaceFlowable() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final PublishProcessor<Integer> pp = PublishProcessor.create();
final TestSubscriber<List<Integer>> ts = pp.toList().toFlowable().test();
pp.onNext(1);
Runnable r1 = new Runnable() {
@Override
public void run() {
pp.onComplete();
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ts.cancel();
}
};
TestHelper.race(r1, r2);
if (ts.values().size() != 0) {
ts.assertValue(Arrays.asList(1))
.assertNoErrors();
}
}
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeFlowable(new Function<Flowable<Object>, Flowable<List<Object>>>() {
@Override
public Flowable<List<Object>> apply(Flowable<Object> f)
throws Exception {
return f.toList().toFlowable();
}
});
TestHelper.checkDoubleOnSubscribeFlowableToSingle(new Function<Flowable<Object>, Single<List<Object>>>() {
@Override
public Single<List<Object>> apply(Flowable<Object> f)
throws Exception {
return f.toList();
}
});
}
}
| FlowableToListTest |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/matchers/method/MethodInvocationMatcher.java | {
"start": 10052,
"end": 19143
} | class ____ {
static Matcher<ExpressionTree> from(
Map<Set<Node>, NodeWithDefault> mappings, NodeWithDefault root) {
BiPredicate<Context, VisitorState> pred = traverse(mappings, root);
return (tree, state) -> {
Optional<Context> ctx = Context.create(tree);
// Could be ctx.map(...).orElse(false), but why pay to box the Boolean?
return ctx.isPresent() && pred.test(ctx.get(), state);
};
}
private static BiPredicate<Context, VisitorState> traverse(
Map<Set<Node>, NodeWithDefault> mappings, NodeWithDefault root) {
if (root.states.contains(ACCEPT)) {
// If there was any path from the root to the accept node, the predicate matched.
return (ctx, state) -> true;
}
SetMultimap<Token, Node> children = root.mapping;
if (children.isEmpty()) {
Preconditions.checkArgument(root.def != null, "Found node with no mappings and no default");
// Since this node is only a default, we don't have to bother checking its token type at
// all, and can just return the next matcher we "would have" unconditionally delegated to.
return traverse(mappings, mappings.get(root.def));
}
ImmutableSet<TokenType> tokenTypes =
children.keySet().stream().map(Token::type).collect(ImmutableSet.toImmutableSet());
Preconditions.checkArgument(
tokenTypes.size() == 1,
"Found mismatched token types in node with mappings %s",
children);
// We have a valid input. Translate each of its children into a Predicate, and return a new
// Predicate that delegates appropriately depending on context
TokenType type = tokenTypes.iterator().next(); // safe since the set is a singleton.
BiPredicate<Context, VisitorState> defaultBehavior;
if (root.def == null) {
defaultBehavior = (ctx, state) -> false;
} else {
defaultBehavior = traverse(mappings, mappings.get(root.def));
}
Map<Object, BiPredicate<Context, VisitorState>> lookup = new HashMap<>();
@SuppressWarnings("UnstableApiUsage")
Set<Map.Entry<Token, Set<Node>>> entries = Multimaps.asMap(children).entrySet();
// Would be cleaner as a stream collecting into a map, but the cost of that stream operation
// is non-negligible and DFA compilation needs to be faster.
for (Map.Entry<Token, Set<Node>> entry : entries) {
lookup.put(
entry.getKey().comparisonKey(), traverse(mappings, mappings.get(entry.getValue())));
}
return switch (type) {
case RECEIVER_SUPERTYPE ->
(ctx, state) -> {
Type receiverType = (Type) TokenType.RECEIVER_SUPERTYPE.extract(ctx, state);
// Have to iterate here because subclassing can't be checked by lookup.
for (Map.Entry<Object, BiPredicate<Context, VisitorState>> child :
lookup.entrySet()) {
if (ASTHelpers.isSubtype(
receiverType, state.getTypeFromString((String) child.getKey()), state)) {
return child.getValue().test(ctx, state);
}
}
return defaultBehavior.test(ctx, state);
};
default ->
(ctx, state) -> {
// All other token types can be checked via a map lookup.
Object lookupKey = type.extract(ctx, state);
BiPredicate<Context, VisitorState> child = lookup.get(lookupKey);
if (child != null) {
return child.test(ctx, state);
}
return defaultBehavior.test(ctx, state);
};
};
}
}
/**
* Constructs a Matcher that matches for method invocations (including constructor invocations)
* satisfying at least one of the given Rule specifications. For an easy way to create such Rules,
* see the factories in {@link com.google.errorprone.matchers.Matchers} returning subtypes of
* {@link com.google.errorprone.matchers.method.MethodMatchers.MethodMatcher}.
*/
public static Matcher<ExpressionTree> compile(Iterable<Rule> rules) {
// A set of Rule objects represents a predicate stated in disjunctive normal form, where the
// atoms are equality tests on parts of a MethodSymbol allowed by TokenType and Token classes.
//
// We optimize for frequent evaluation of this predicate by first compiling it into a graph
// where each node has a single outgoing edge for each relevant token, so that each token
// comparison is just a lookup in a map of edges, instead of a linear scan over N rules.
// The final graph will have one root state, and one accept state; if we reach the accept, we
// say the predicate matches, and if we ever find no matching edge from the current node, we say
// the predicate fails to match. To construct this graph optimally, we start with an NFA with
// one path from the root to accept per rule; and then apply a modified version of the power-set
// construction to reduce it to an equivalent DFA.
Table<Node, Optional<Token>, Node> nfa = HashBasedTable.create();
ImmutableSet.Builder<Node> rootsBuilder = ImmutableSet.builder();
for (Rule rule : rules) {
ImmutableMap<TokenType, ? extends Set<Token>> required = rule.required();
int numTokens = required.size();
if (numTokens == 0) {
// Forget this whole graph business if one of the alternatives is "anything". This isn't
// just an optimization: it simplifies building the graph if we know no rules are empty.
return (tree, state) -> true;
}
Node root = new Node();
rootsBuilder.add(root);
Node src = root;
int tokensHandled = 0;
for (TokenType type : TokenType.values()) {
Optional<Set<Token>> labels = Optional.ofNullable(required.get(type));
if (labels.isPresent()) {
tokensHandled++;
}
boolean lastToken = tokensHandled == numTokens;
Node dst = lastToken ? ACCEPT : new Node();
if (labels.isPresent()) {
for (Token label : labels.get()) {
nfa.put(src, Optional.of(label), dst);
}
} else {
nfa.put(src, Optional.empty(), dst);
}
if (lastToken) {
break; // No transitions out of the accept state.
}
src = dst;
}
}
ImmutableSet<Node> roots = rootsBuilder.build();
// TODO(amalloy): When converting to a DFA, we could use the information that there is
// exactly one accept state
// (and no transitions out of that accept state) to help us prune unnecessary identical states
// from the graph. Instead of starting from the root and proceeding forwards, we could start
// from the
// accept state and proceed backwards through the graph. If we ever have two nodes with exactly
// the same set of outgoing edges, we could replace them with a single node.
Map<Set<Node>, NodeWithDefault> mappings = new HashMap<>();
ArrayDeque<Set<Node>> open = new ArrayDeque<>();
open.add(roots);
while (!open.isEmpty()) {
Set<Node> curr = open.removeFirst();
Set<Node> acceptsAny = new HashSet<>();
SetMultimap<Token, Node> destinations = HashMultimap.create();
// First collect all the nodes that accept any token at all
for (Node node : curr) {
for (Map.Entry<Optional<Token>, Node> entry : nfa.row(node).entrySet()) {
if (entry.getKey().isEmpty()) {
acceptsAny.add(entry.getValue());
}
}
}
// Then collect the pickier nodes that want a specific token. Now instead of going to that
// specific node, that token takes us to the union of that node and all "any" nodes.
for (Node node : curr) {
for (Map.Entry<Optional<Token>, Node> entry : nfa.row(node).entrySet()) {
entry
.getKey()
.ifPresent(
label -> {
destinations.put(label, entry.getValue());
destinations.putAll(label, acceptsAny);
});
}
}
mappings.put(
curr, new NodeWithDefault(curr, acceptsAny.isEmpty() ? null : acceptsAny, destinations));
if (!acceptsAny.isEmpty()) {
open.addLast(acceptsAny);
}
// asMap has been @Beta and unmodified for 6 years. I'll take my chances. If it changes, we
// can just reimplement this simple helper.
@SuppressWarnings("UnstableApiUsage")
Collection<Set<Node>> values = Multimaps.asMap(destinations).values();
open.addAll(values);
}
// At this point, mappings has all the information we need, but it's burdened with a bunch of
// now-unnecessary Set<Node> objects for its map keys. We want to replace those with something
// lightweight, and convert the indirect references through mappings to a direct pointer lookup.
return GraphMatcher.from(mappings, mappings.get(roots));
}
private MethodInvocationMatcher() {}
}
| GraphMatcher |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/TableChange.java | {
"start": 40329,
"end": 40737
} | interface ____ extends TableChange {}
// --------------------------------------------------------------------------------------------
// Materialized table change
// --------------------------------------------------------------------------------------------
/** {@link MaterializedTableChange} represents the modification of the materialized table. */
@PublicEvolving
| CatalogTableChange |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/mutation/internal/inline/InlineMutationStrategy.java | {
"start": 1267,
"end": 3426
} | class ____ implements SqmMultiTableMutationStrategy {
private final Function<SqmDeleteOrUpdateStatement<?>,MatchingIdRestrictionProducer> matchingIdsStrategy;
public InlineMutationStrategy(Dialect dialect) {
this( determinePredicateProducer( dialect ) );
}
private static Function<SqmDeleteOrUpdateStatement<?>,MatchingIdRestrictionProducer> determinePredicateProducer(Dialect dialect) {
return statement -> new InPredicateRestrictionProducer();
}
public InlineMutationStrategy(Function<SqmDeleteOrUpdateStatement<?>,MatchingIdRestrictionProducer> matchingIdsStrategy) {
this.matchingIdsStrategy = matchingIdsStrategy;
}
@Override
public MultiTableHandlerBuildResult buildHandler(SqmDeleteOrUpdateStatement<?> sqmStatement, DomainParameterXref domainParameterXref, DomainQueryExecutionContext context) {
final MutableObject<JdbcParameterBindings> firstJdbcParameterBindings = new MutableObject<>();
final MultiTableHandler multiTableHandler = sqmStatement instanceof SqmDeleteStatement<?> sqmDelete
? buildHandler( sqmDelete, domainParameterXref, context, firstJdbcParameterBindings )
: buildHandler( (SqmUpdateStatement<?>) sqmStatement, domainParameterXref, context, firstJdbcParameterBindings );
return new MultiTableHandlerBuildResult( multiTableHandler, firstJdbcParameterBindings.get() );
}
public MultiTableHandler buildHandler(SqmUpdateStatement<?> sqmUpdate, DomainParameterXref domainParameterXref, DomainQueryExecutionContext context, MutableObject<JdbcParameterBindings> firstJdbcParameterBindingsConsumer) {
return new InlineUpdateHandler(
matchingIdsStrategy.apply( sqmUpdate ),
sqmUpdate,
domainParameterXref,
context,
firstJdbcParameterBindingsConsumer
);
}
public MultiTableHandler buildHandler(SqmDeleteStatement<?> sqmDelete, DomainParameterXref domainParameterXref, DomainQueryExecutionContext context, MutableObject<JdbcParameterBindings> firstJdbcParameterBindingsConsumer) {
return new InlineDeleteHandler(
matchingIdsStrategy.apply( sqmDelete ),
sqmDelete,
domainParameterXref,
context,
firstJdbcParameterBindingsConsumer
);
}
}
| InlineMutationStrategy |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/optionallong/OptionalLongAssert_isPresent_Test.java | {
"start": 1005,
"end": 1712
} | class ____ {
@Test
void should_pass_when_OptionalLong_is_present() {
assertThat(OptionalLong.of(10L)).isPresent();
}
@Test
void should_fail_when_OptionalLong_is_empty() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> assertThat(OptionalLong.empty()).isPresent())
.withMessage(shouldBePresent(OptionalLong.empty()).create());
}
@Test
void should_fail_when_OptionalLong_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> assertThat((OptionalLong) null).isPresent())
.withMessage(actualIsNull());
}
}
| OptionalLongAssert_isPresent_Test |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/results/LoadingLogger.java | {
"start": 867,
"end": 1657
} | interface ____ extends BasicLogger {
String LOGGER_NAME = ResultsLogger.LOGGER_NAME + ".loading";
LoadingLogger LOADING_LOGGER = Logger.getMessageLogger( MethodHandles.lookup(), LoadingLogger.class, LOGGER_NAME );
static String subLoggerName(String subName) {
return LOGGER_NAME + "." + subName;
}
static Logger subLogger(String subName) {
return Logger.getLogger( subLoggerName( subName ) );
}
@LogMessage(level = DEBUG)
@Message(id = 90005801,
value = "Found matching entity in context, but it is scheduled for removal (returning null)")
void foundEntityScheduledForRemoval();
@LogMessage(level = DEBUG)
@Message(id = 90005802,
value = "Found matching entity in context, but the matched entity had an inconsistent type")
void foundEntityWrongType();
}
| LoadingLogger |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/settings/RestUpdateSecuritySettingsAction.java | {
"start": 914,
"end": 2213
} | class ____ extends SecurityBaseRestHandler {
public RestUpdateSecuritySettingsAction(Settings settings, XPackLicenseState licenseState) {
super(settings, licenseState);
}
@Override
public String getName() {
return "security_update_settings";
}
@Override
public List<Route> routes() {
return List.of(new Route(PUT, "/_security/settings"));
}
@Override
protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException {
final UpdateSecuritySettingsAction.Request req;
try (var parser = request.contentParser()) {
req = UpdateSecuritySettingsAction.Request.parse(
parser,
(mainIndexSettings, tokensIndexSettings, profilesIndexSettings) -> new UpdateSecuritySettingsAction.Request(
RestUtils.getMasterNodeTimeout(request),
RestUtils.getAckTimeout(request),
mainIndexSettings,
tokensIndexSettings,
profilesIndexSettings
)
);
}
return restChannel -> client.execute(UpdateSecuritySettingsAction.INSTANCE, req, new RestToXContentListener<>(restChannel));
}
}
| RestUpdateSecuritySettingsAction |
java | google__guice | core/src/com/google/inject/spi/InjectionPoint.java | {
"start": 20444,
"end": 21382
} | class ____ extends InjectableMember {
final Method method;
/**
* true if this method overrode a method that was annotated with com.google.inject.Inject. used
* to allow different override behavior for guice inject vs jsr330 Inject
*/
boolean overrodeGuiceInject;
InjectableMethod(TypeLiteral<?> declaringType, Method method, Annotation atInject) {
super(declaringType, atInject);
this.method = method;
}
@Override
InjectionPoint toInjectionPoint() {
return new InjectionPoint(declaringType, method, optional);
}
public boolean isFinal() {
return Modifier.isFinal(method.getModifiers());
}
}
static Annotation getAtInject(AnnotatedElement member) {
Annotation a = member.getAnnotation(jakarta.inject.Inject.class);
return a == null ? member.getAnnotation(Inject.class) : a;
}
/** Linked list of injectable members. */
static | InjectableMethod |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cache/LazyOneToOneWithCollectionTest.java | {
"start": 5620,
"end": 5871
} | class ____ {
@Id
@GeneratedValue
private Long id;
private String name;
@ManyToOne
private Diagram diagram;
public Entry() {
}
public Entry(String name, Diagram diagram) {
this.name = name;
this.diagram = diagram;
}
}
}
| Entry |
java | google__error-prone | core/src/test/java/com/google/errorprone/refaster/testdata/output/EmitCommentTemplateExample.java | {
"start": 656,
"end": 772
} | class ____ {
public void example() {
// comment
System.out.println("foobar");
}
}
| EmitCommentTemplateExample |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RBucketsReactive.java | {
"start": 790,
"end": 1635
} | interface ____ {
/**
* Returns Redis object mapped by key. Result Map is not contains
* key-value entry for null values.
*
* @param <V> type of value
* @param keys - keys
* @return Map with name of bucket as key and bucket as value
*/
<V> Mono<Map<String, V>> get(String... keys);
/**
* Try to save objects mapped by Redis key.
* If at least one of them is already exist then
* don't set none of them.
*
* @param buckets - map of buckets
* @return <code>true</code> if object has been set otherwise <code>false</code>
*/
Mono<Boolean> trySet(Map<String, ?> buckets);
/**
* Saves objects mapped by Redis key.
*
* @param buckets - map of buckets
* @return void
*/
Mono<Void> set(Map<String, ?> buckets);
}
| RBucketsReactive |
java | elastic__elasticsearch | libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramBuilder.java | {
"start": 1240,
"end": 11815
} | class ____ implements Releasable {
private static final int DEFAULT_ESTIMATED_BUCKET_COUNT = 32;
private final ExponentialHistogramCircuitBreaker breaker;
private int scale;
private ZeroBucket zeroBucket = ZeroBucket.minimalEmpty();
private Double sum;
private Double min;
private Double max;
private int estimatedBucketCount = DEFAULT_ESTIMATED_BUCKET_COUNT;
// If the buckets are provided in order, we directly build the histogram to avoid unnecessary copies and allocations
// If a bucket is received out of order, we fallback to storing the buckets in the TreeMaps and build the histogram at the end.
private FixedCapacityExponentialHistogram result;
// Visible for testing to ensure that the low-allocation path is taken for ordered buckets
TreeMap<Long, Long> negativeBuckets;
TreeMap<Long, Long> positiveBuckets;
private boolean resultAlreadyReturned = false;
ExponentialHistogramBuilder(int scale, ExponentialHistogramCircuitBreaker breaker) {
this.breaker = breaker;
this.scale = scale;
}
ExponentialHistogramBuilder(ExponentialHistogram toCopy, ExponentialHistogramCircuitBreaker breaker) {
this(toCopy.scale(), breaker);
zeroBucket(toCopy.zeroBucket());
sum(toCopy.sum());
min(toCopy.min());
max(toCopy.max());
estimatedBucketCount(toCopy.negativeBuckets().bucketCount() + toCopy.positiveBuckets().bucketCount());
BucketIterator negBuckets = toCopy.negativeBuckets().iterator();
while (negBuckets.hasNext()) {
setNegativeBucket(negBuckets.peekIndex(), negBuckets.peekCount());
negBuckets.advance();
}
BucketIterator posBuckets = toCopy.positiveBuckets().iterator();
while (posBuckets.hasNext()) {
setPositiveBucket(posBuckets.peekIndex(), posBuckets.peekCount());
posBuckets.advance();
}
}
/**
* If known, sets the estimated total number of buckets to minimize unnecessary allocations.
* Only has an effect if invoked before the first call to
* {@link #setPositiveBucket(long, long)} and {@link #setNegativeBucket(long, long)}.
*
* @param totalBuckets the total number of buckets expected to be added
* @return the builder
*/
public ExponentialHistogramBuilder estimatedBucketCount(int totalBuckets) {
estimatedBucketCount = totalBuckets;
return this;
}
public ExponentialHistogramBuilder scale(int scale) {
this.scale = scale;
return this;
}
public ExponentialHistogramBuilder zeroBucket(ZeroBucket zeroBucket) {
this.zeroBucket = zeroBucket;
return this;
}
/**
* Sets the sum of the histogram values. If not set, the sum will be estimated from the buckets.
* @param sum the sum value
* @return the builder
*/
public ExponentialHistogramBuilder sum(double sum) {
this.sum = sum;
return this;
}
/**
* Sets the min value of the histogram values. If not set, the min will be estimated from the buckets.
* @param min the min value
* @return the builder
*/
public ExponentialHistogramBuilder min(double min) {
this.min = min;
return this;
}
/**
* Sets the max value of the histogram values. If not set, the max will be estimated from the buckets.
* @param max the max value
* @return the builder
*/
public ExponentialHistogramBuilder max(double max) {
this.max = max;
return this;
}
/**
* Sets the given bucket of the positive buckets. If the bucket already exists, it will be replaced.
* Buckets may be set in arbitrary order. However, for best performance and minimal allocations,
* buckets should be set in order of increasing index and all negative buckets should be set before positive buckets.
*
* @param index the index of the bucket
* @param count the count of the bucket, must be at least 1
* @return the builder
*/
public ExponentialHistogramBuilder setPositiveBucket(long index, long count) {
setBucket(index, count, true);
return this;
}
/**
* Sets the given bucket of the negative buckets. If the bucket already exists, it will be replaced.
* Buckets may be set in arbitrary order. However, for best performance and minimal allocations,
* buckets should be set in order of increasing index and all negative buckets should be set before positive buckets.
*
* @param index the index of the bucket
* @param count the count of the bucket, must be at least 1
* @return the builder
*/
public ExponentialHistogramBuilder setNegativeBucket(long index, long count) {
setBucket(index, count, false);
return this;
}
private void setBucket(long index, long count, boolean isPositive) {
if (count < 1) {
throw new IllegalArgumentException("Bucket count must be at least 1");
}
if (negativeBuckets == null && positiveBuckets == null) {
// so far, all received buckets were in order, try to directly build the result
if (result == null) {
// Initialize the result buffer if required
reallocateResultWithCapacity(estimatedBucketCount, false);
}
if ((isPositive && result.wasLastAddedBucketPositive() == false)
|| (isPositive == result.wasLastAddedBucketPositive() && index > result.getLastAddedBucketIndex())) {
// the new bucket is in order too, we can directly add the bucket
addBucketToResult(index, count, isPositive);
return;
}
}
// fallback to TreeMap if a bucket is received out of order
initializeBucketTreeMapsIfNeeded();
if (isPositive) {
positiveBuckets.put(index, count);
} else {
negativeBuckets.put(index, count);
}
}
private void initializeBucketTreeMapsIfNeeded() {
if (negativeBuckets == null) {
negativeBuckets = new TreeMap<>();
positiveBuckets = new TreeMap<>();
// copy existing buckets to the maps
if (result != null) {
BucketIterator it = result.negativeBuckets().iterator();
while (it.hasNext()) {
negativeBuckets.put(it.peekIndex(), it.peekCount());
it.advance();
}
it = result.positiveBuckets().iterator();
while (it.hasNext()) {
positiveBuckets.put(it.peekIndex(), it.peekCount());
it.advance();
}
}
}
}
private void addBucketToResult(long index, long count, boolean isPositive) {
if (resultAlreadyReturned) {
// we cannot modify the result anymore, create a new one
reallocateResultWithCapacity(result.getCapacity(), true);
}
assert resultAlreadyReturned == false;
boolean sufficientCapacity = result.tryAddBucket(index, count, isPositive);
if (sufficientCapacity == false) {
int newCapacity = Math.max(result.getCapacity() * 2, DEFAULT_ESTIMATED_BUCKET_COUNT);
reallocateResultWithCapacity(newCapacity, true);
boolean bucketAdded = result.tryAddBucket(index, count, isPositive);
assert bucketAdded : "Output histogram should have enough capacity";
}
}
private void reallocateResultWithCapacity(int newCapacity, boolean copyBucketsFromPreviousResult) {
FixedCapacityExponentialHistogram newResult = FixedCapacityExponentialHistogram.create(newCapacity, breaker);
if (copyBucketsFromPreviousResult && result != null) {
BucketIterator it = result.negativeBuckets().iterator();
while (it.hasNext()) {
boolean added = newResult.tryAddBucket(it.peekIndex(), it.peekCount(), false);
assert added : "Output histogram should have enough capacity";
it.advance();
}
it = result.positiveBuckets().iterator();
while (it.hasNext()) {
boolean added = newResult.tryAddBucket(it.peekIndex(), it.peekCount(), true);
assert added : "Output histogram should have enough capacity";
it.advance();
}
}
if (result != null && resultAlreadyReturned == false) {
Releasables.close(result);
}
resultAlreadyReturned = false;
result = newResult;
}
public ReleasableExponentialHistogram build() {
if (resultAlreadyReturned) {
// result was already returned on a previous call, return a new instance
reallocateResultWithCapacity(result.getCapacity(), true);
}
assert resultAlreadyReturned == false;
if (negativeBuckets != null) {
// copy buckets from tree maps into result
reallocateResultWithCapacity(negativeBuckets.size() + positiveBuckets.size(), false);
result.resetBuckets(scale);
negativeBuckets.forEach((index, count) -> result.tryAddBucket(index, count, false));
positiveBuckets.forEach((index, count) -> result.tryAddBucket(index, count, true));
} else {
if (result == null) {
// no buckets were added
reallocateResultWithCapacity(0, false);
}
result.setScale(scale);
}
result.setZeroBucket(zeroBucket);
double sumVal = (sum != null)
? sum
: ExponentialHistogramUtils.estimateSum(result.negativeBuckets().iterator(), result.positiveBuckets().iterator());
double minVal = (min != null)
? min
: ExponentialHistogramUtils.estimateMin(zeroBucket, result.negativeBuckets(), result.positiveBuckets()).orElse(Double.NaN);
double maxVal = (max != null)
? max
: ExponentialHistogramUtils.estimateMax(zeroBucket, result.negativeBuckets(), result.positiveBuckets()).orElse(Double.NaN);
result.setMin(minVal);
result.setMax(maxVal);
result.setSum(sumVal);
resultAlreadyReturned = true;
return result;
}
@Override
public void close() {
if (resultAlreadyReturned == false) {
Releasables.close(result);
}
}
}
| ExponentialHistogramBuilder |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java | {
"start": 828,
"end": 7090
} | class ____ extends AbstractClusterStateLicenseServiceTestCase {
public void testSelfGeneratedTrialLicense() throws Exception {
XPackLicenseState licenseState = TestUtils.newTestLicenseState();
setInitialState(null, licenseState, Settings.EMPTY, "trial");
when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(true);
licenseService.start();
ClusterState state = ClusterState.builder(new ClusterName("a")).build();
ArgumentCaptor<ClusterStateUpdateTask> stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class);
verify(clusterService, Mockito.times(1)).submitUnbatchedStateUpdateTask(any(), stateUpdater.capture());
ClusterState stateWithLicense = stateUpdater.getValue().execute(state);
LicensesMetadata licenseMetadata = stateWithLicense.metadata().custom(LicensesMetadata.TYPE);
assertNotNull(licenseMetadata);
assertNotNull(licenseMetadata.getLicense());
assertFalse(licenseMetadata.isEligibleForTrial());
assertEquals("trial", licenseMetadata.getLicense().type());
assertEquals(
clock.millis() + LicenseSettings.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.millis(),
licenseMetadata.getLicense().expiryDate()
);
}
public void testSelfGeneratedBasicLicense() throws Exception {
XPackLicenseState licenseState = TestUtils.newTestLicenseState();
setInitialState(null, licenseState, Settings.EMPTY, "basic");
when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(true);
licenseService.start();
ClusterState state = ClusterState.builder(new ClusterName("a")).build();
ArgumentCaptor<ClusterStateUpdateTask> stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class);
verify(clusterService, Mockito.times(1)).submitUnbatchedStateUpdateTask(any(), stateUpdater.capture());
ClusterState stateWithLicense = stateUpdater.getValue().execute(state);
LicensesMetadata licenseMetadata = stateWithLicense.metadata().custom(LicensesMetadata.TYPE);
assertNotNull(licenseMetadata);
assertNotNull(licenseMetadata.getLicense());
assertTrue(licenseMetadata.isEligibleForTrial());
assertEquals("basic", licenseMetadata.getLicense().type());
assertEquals(LicenseSettings.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS, licenseMetadata.getLicense().expiryDate());
}
public void testNonSelfGeneratedBasicLicenseIsReplaced() throws Exception {
long now = System.currentTimeMillis();
String uid = UUID.randomUUID().toString();
final License.Builder builder = License.builder()
.uid(uid)
.version(License.VERSION_CURRENT)
.expiryDate(dateMath("now+2h", now))
.startDate(now)
.issueDate(now)
.type("basic")
.issuedTo("customer")
.issuer("elasticsearch")
.maxNodes(5);
License license = TestUtils.generateSignedLicense(builder);
XPackLicenseState licenseState = TestUtils.newTestLicenseState();
setInitialState(license, licenseState, Settings.EMPTY);
when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(true);
licenseService.start();
Metadata.Builder mdBuilder = Metadata.builder();
mdBuilder.putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, null));
ClusterState state = ClusterState.builder(new ClusterName("a")).metadata(mdBuilder.build()).build();
ArgumentCaptor<ClusterStateUpdateTask> stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class);
verify(clusterService, Mockito.times(1)).submitUnbatchedStateUpdateTask(any(), stateUpdater.capture());
ClusterState stateWithLicense = stateUpdater.getValue().execute(state);
LicensesMetadata licenseMetadata = stateWithLicense.metadata().custom(LicensesMetadata.TYPE);
assertNotNull(licenseMetadata);
assertNotNull(licenseMetadata.getLicense());
assertTrue(licenseMetadata.isEligibleForTrial());
assertEquals("basic", licenseMetadata.getLicense().type());
assertEquals(LicenseSettings.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS, licenseMetadata.getLicense().expiryDate());
assertEquals(uid, licenseMetadata.getLicense().uid());
}
public void testExpiredSelfGeneratedBasicLicenseIsExtended() throws Exception {
long now = System.currentTimeMillis();
String uid = UUID.randomUUID().toString();
License.Builder builder = License.builder()
.uid(uid)
.issuedTo("name")
.maxNodes(1000)
.issueDate(dateMath("now-10h", now))
.type("basic")
.expiryDate(dateMath("now-2h", now));
License license = SelfGeneratedLicense.create(builder, License.VERSION_CURRENT);
XPackLicenseState licenseState = TestUtils.newTestLicenseState();
setInitialState(license, licenseState, Settings.EMPTY);
when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(true);
licenseService.start();
Metadata.Builder mdBuilder = Metadata.builder();
mdBuilder.putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, null));
ClusterState state = ClusterState.builder(new ClusterName("a")).metadata(mdBuilder.build()).build();
ArgumentCaptor<ClusterStateUpdateTask> stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class);
verify(clusterService, Mockito.times(1)).submitUnbatchedStateUpdateTask(any(), stateUpdater.capture());
ClusterState stateWithLicense = stateUpdater.getValue().execute(state);
LicensesMetadata licenseMetadata = stateWithLicense.metadata().custom(LicensesMetadata.TYPE);
assertNotNull(licenseMetadata);
assertNotNull(licenseMetadata.getLicense());
assertTrue(licenseMetadata.isEligibleForTrial());
assertEquals("basic", licenseMetadata.getLicense().type());
assertEquals(LicenseSettings.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS, licenseMetadata.getLicense().expiryDate());
assertEquals(uid, licenseMetadata.getLicense().uid());
}
}
| LicenseRegistrationTests |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java | {
"start": 939,
"end": 1928
} | class ____ extends StringParam {
/** Parameter name. */
public static final String NAME = "sources";
public static final String DEFAULT = "";
private static final Domain DOMAIN = new Domain(NAME, null);
private static String paths2String(Path[] paths) {
if (paths == null || paths.length == 0) {
return "";
}
final StringBuilder b = new StringBuilder(paths[0].toUri().getPath());
for(int i = 1; i < paths.length; i++) {
b.append(',').append(paths[i].toUri().getPath());
}
return b.toString();
}
/**
* Constructor.
* @param str a string representation of the parameter value.
*/
public ConcatSourcesParam(String str) {
super(DOMAIN, str);
}
public ConcatSourcesParam(Path[] paths) {
this(paths2String(paths));
}
@Override
public String getName() {
return NAME;
}
/** @return the absolute path. */
public final String[] getAbsolutePaths() {
return getValue().split(",");
}
}
| ConcatSourcesParam |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java | {
"start": 3026,
"end": 3407
} | class ____. Returns -1
* if the arguments aren't assignable. Fills a specific purpose for getMatchingMethod and is not generalized.
*
* @param fromClassArray the Class array to calculate the distance from.
* @param toClassArray the Class array to calculate the distance to.
* @return the aggregate number of inheritance hops between assignable argument | types |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_3100/Issue3150.java | {
"start": 221,
"end": 735
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
MyRefAfterFilter refAfterFilterTest = new MyRefAfterFilter();
List<Item> items = new ArrayList<Item>(2);
Category category = new Category("category");
items.add(new Item("item1",category));
items.add(new Item("item2",category));
// System.out.println(JSON.toJSONString(items));
System.out.println(JSON.toJSONString(items, refAfterFilterTest));
}
public static | Issue3150 |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringInterceptFromUriRegexTest.java | {
"start": 1056,
"end": 1425
} | class ____ extends InterceptFromUriRegexTest {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/spring/processor/SpringInterceptFromUriRegexTest.xml");
}
}
| SpringInterceptFromUriRegexTest |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java | {
"start": 1821,
"end": 10519
} | class ____ extends ESTestCase {
private static final String TASK_ID = "mem_est_123";
private static final String CONFIG_ID = "dummy";
private static final int NUM_ROWS = 100;
private static final int NUM_COLS = 4;
private static final MemoryUsageEstimationResult PROCESS_RESULT = new MemoryUsageEstimationResult(
ByteSizeValue.parseBytesSizeValue("20kB", ""),
ByteSizeValue.parseBytesSizeValue("10kB", "")
);
private ExecutorService executorServiceForProcess;
private AnalyticsProcess<MemoryUsageEstimationResult> process;
private AnalyticsProcessFactory<MemoryUsageEstimationResult> processFactory;
private DataFrameDataExtractor dataExtractor;
private DataFrameDataExtractorFactory dataExtractorFactory;
private DataFrameAnalyticsConfig dataFrameAnalyticsConfig;
private ActionListener<MemoryUsageEstimationResult> listener;
private ArgumentCaptor<MemoryUsageEstimationResult> resultCaptor;
private ArgumentCaptor<Exception> exceptionCaptor;
private MemoryUsageEstimationProcessManager processManager;
@SuppressWarnings("unchecked")
@Before
public void setUpMocks() {
executorServiceForProcess = mock(ExecutorService.class);
process = mock(AnalyticsProcess.class);
when(process.readAnalyticsResults()).thenReturn(List.of(PROCESS_RESULT).iterator());
processFactory = mock(AnalyticsProcessFactory.class);
when(processFactory.createAnalyticsProcess(any(), any(), anyBoolean(), any(), any())).thenReturn(process);
dataExtractor = mock(DataFrameDataExtractor.class);
when(dataExtractor.collectDataSummary()).thenReturn(new DataFrameDataExtractor.DataSummary(NUM_ROWS, NUM_COLS));
dataExtractorFactory = mock(DataFrameDataExtractorFactory.class);
when(dataExtractorFactory.newExtractor(anyBoolean())).thenReturn(dataExtractor);
when(dataExtractorFactory.getExtractedFields()).thenReturn(mock(ExtractedFields.class));
dataFrameAnalyticsConfig = DataFrameAnalyticsConfigTests.createRandom(CONFIG_ID);
listener = mock(ActionListener.class);
resultCaptor = ArgumentCaptor.forClass(MemoryUsageEstimationResult.class);
exceptionCaptor = ArgumentCaptor.forClass(Exception.class);
processManager = new MemoryUsageEstimationProcessManager(
EsExecutors.DIRECT_EXECUTOR_SERVICE,
executorServiceForProcess,
processFactory
);
}
public void testRunJob_EmptyDataFrame() {
when(dataExtractor.collectDataSummary()).thenReturn(new DataFrameDataExtractor.DataSummary(0, NUM_COLS));
processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener);
verify(listener).onFailure(exceptionCaptor.capture());
ElasticsearchException exception = (ElasticsearchException) exceptionCaptor.getValue();
assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST));
assertThat(exception.getMessage(), containsString(TASK_ID));
assertThat(exception.getMessage(), containsString("Unable to estimate memory usage"));
verifyNoMoreInteractions(process, listener);
}
public void testRunJob_NoResults() throws Exception {
when(process.readAnalyticsResults()).thenReturn(List.<MemoryUsageEstimationResult>of().iterator());
processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener);
verify(listener).onFailure(exceptionCaptor.capture());
ElasticsearchException exception = (ElasticsearchException) exceptionCaptor.getValue();
assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
assertThat(exception.getMessage(), containsString(TASK_ID));
assertThat(exception.getMessage(), containsString("no results"));
InOrder inOrder = inOrder(process);
inOrder.verify(process).readAnalyticsResults();
inOrder.verify(process).readError();
inOrder.verify(process).close();
verifyNoMoreInteractions(process, listener);
}
public void testRunJob_MultipleResults() throws Exception {
when(process.readAnalyticsResults()).thenReturn(List.of(PROCESS_RESULT, PROCESS_RESULT).iterator());
processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener);
verify(listener).onFailure(exceptionCaptor.capture());
ElasticsearchException exception = (ElasticsearchException) exceptionCaptor.getValue();
assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
assertThat(exception.getMessage(), containsString(TASK_ID));
assertThat(exception.getMessage(), containsString("more than one result"));
InOrder inOrder = inOrder(process);
inOrder.verify(process).readAnalyticsResults();
inOrder.verify(process).readError();
inOrder.verify(process).close();
verifyNoMoreInteractions(process, listener);
}
public void testRunJob_OneResult_ParseException() throws Exception {
when(process.readAnalyticsResults()).thenThrow(new ElasticsearchParseException("cannot parse result"));
processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener);
verify(listener).onFailure(exceptionCaptor.capture());
ElasticsearchException exception = (ElasticsearchException) exceptionCaptor.getValue();
assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
assertThat(exception.getMessage(), containsString(TASK_ID));
assertThat(exception.getMessage(), containsString("cannot parse result"));
InOrder inOrder = inOrder(process);
inOrder.verify(process).readAnalyticsResults();
inOrder.verify(process).readError();
inOrder.verify(process).close();
verifyNoMoreInteractions(process, listener);
}
public void testRunJob_FailsOnClose() throws Exception {
doThrow(ExceptionsHelper.serverError("some LOG(ERROR) lines coming from cpp process")).when(process).close();
processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener);
verify(listener).onFailure(exceptionCaptor.capture());
ElasticsearchException exception = (ElasticsearchException) exceptionCaptor.getValue();
assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
assertThat(exception.getMessage(), containsString(TASK_ID));
assertThat(exception.getMessage(), containsString("Error while closing process"));
InOrder inOrder = inOrder(process);
inOrder.verify(process).readAnalyticsResults();
inOrder.verify(process).close();
inOrder.verify(process).readError();
verifyNoMoreInteractions(process, listener);
}
public void testRunJob_FailsOnClose_ProcessReportsError() throws Exception {
doThrow(ExceptionsHelper.serverError("some LOG(ERROR) lines coming from cpp process")).when(process).close();
when(process.readError()).thenReturn("Error from inside the process");
processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener);
verify(listener).onFailure(exceptionCaptor.capture());
ElasticsearchException exception = (ElasticsearchException) exceptionCaptor.getValue();
assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
assertThat(exception.getMessage(), containsString(TASK_ID));
assertThat(exception.getMessage(), containsString("Error while closing process"));
assertThat(exception.getMessage(), containsString("some LOG(ERROR) lines coming from cpp process"));
assertThat(exception.getMessage(), containsString("Error from inside the process"));
InOrder inOrder = inOrder(process);
inOrder.verify(process).readAnalyticsResults();
inOrder.verify(process).close();
inOrder.verify(process).readError();
verifyNoMoreInteractions(process, listener);
}
public void testRunJob_Ok() throws Exception {
processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener);
verify(listener).onResponse(resultCaptor.capture());
MemoryUsageEstimationResult result = resultCaptor.getValue();
assertThat(result, equalTo(PROCESS_RESULT));
InOrder inOrder = inOrder(process);
inOrder.verify(process).readAnalyticsResults();
inOrder.verify(process).close();
verifyNoMoreInteractions(process, listener);
}
}
| MemoryUsageEstimationProcessManagerTests |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/JsltComponentBuilderFactory.java | {
"start": 1807,
"end": 6406
} | interface ____ extends ComponentBuilder<JsltComponent> {
/**
* Whether to allow to use resource template from header or not (default
* false). Enabling this allows to specify dynamic templates via message
* header. However this can be seen as a potential security
* vulnerability if the header is coming from a malicious user, so use
* this with care.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param allowTemplateFromHeader the value to set
* @return the dsl builder
*/
default JsltComponentBuilder allowTemplateFromHeader(boolean allowTemplateFromHeader) {
doSetProperty("allowTemplateFromHeader", allowTemplateFromHeader);
return this;
}
/**
* Sets whether to use resource content cache or not.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: producer
*
* @param contentCache the value to set
* @return the dsl builder
*/
default JsltComponentBuilder contentCache(boolean contentCache) {
doSetProperty("contentCache", contentCache);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default JsltComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default JsltComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* JSLT can be extended by plugging in functions written in Java.
*
* The option is a:
* <code>java.util.Collection&lt;com.schibsted.spt.data.jslt.Function&gt;</code> type.
*
* Group: advanced
*
* @param functions the value to set
* @return the dsl builder
*/
default JsltComponentBuilder functions(java.util.Collection<com.schibsted.spt.data.jslt.Function> functions) {
doSetProperty("functions", functions);
return this;
}
/**
* JSLT can be extended by plugging in a custom jslt object filter.
*
* The option is a:
* <code>com.schibsted.spt.data.jslt.filters.JsonFilter</code> type.
*
* Group: advanced
*
* @param objectFilter the value to set
* @return the dsl builder
*/
default JsltComponentBuilder objectFilter(com.schibsted.spt.data.jslt.filters.JsonFilter objectFilter) {
doSetProperty("objectFilter", objectFilter);
return this;
}
}
| JsltComponentBuilder |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java | {
"start": 1690,
"end": 9511
} | class ____ {
@Test
public void testCreateRandomVolume() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolume vol = util.createRandomVolume(StorageType.DISK);
assertNotNull(vol.getUuid());
assertNotNull(vol.getPath());
assertNotNull(vol.getStorageType());
assertFalse(vol.isFailed());
assertFalse(vol.isTransient());
assertTrue(vol.getCapacity() > 0);
assertTrue((vol.getCapacity() - vol.getReserved()) > 0);
assertTrue((vol.getReserved() + vol.getUsed()) < vol.getCapacity());
}
@Test
public void testCreateRandomVolumeSet() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet =
util.createRandomVolumeSet(StorageType.SSD, 10);
assertEquals(10, vSet.getVolumeCount());
assertEquals(StorageType.SSD.toString(), vSet.getVolumes().get(0).getStorageType());
}
@Test
public void testCreateRandomDataNode() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = util.createRandomDataNode(
new StorageType[]{StorageType.DISK, StorageType.RAM_DISK}, 10);
assertNotNull(node.getNodeDataDensity());
}
@Test
public void testDiskQueues() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = util.createRandomDataNode(
new StorageType[]{StorageType.DISK, StorageType.RAM_DISK}, 3);
TreeSet<DiskBalancerVolume> sortedQueue =
node.getVolumeSets().get(StorageType.DISK.toString()).getSortedQueue();
List<DiskBalancerVolume> reverseList = new LinkedList<>();
List<DiskBalancerVolume> highList = new LinkedList<>();
int queueSize = sortedQueue.size();
for (int x = 0; x < queueSize; x++) {
reverseList.add(sortedQueue.first());
highList.add(sortedQueue.first());
}
Collections.reverse(reverseList);
for (int x = 0; x < queueSize; x++) {
assertEquals(reverseList.get(x).getCapacity(),
highList.get(x).getCapacity());
assertEquals(reverseList.get(x).getReserved(),
highList.get(x).getReserved());
assertEquals(reverseList.get(x).getUsed(),
highList.get(x).getUsed());
}
}
@Test
public void testNoBalancingNeededEvenDataSpread() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString());
// create two disks which have exactly same data and isBalancing should
// say we don't need to balance.
DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
v1.setCapacity(DiskBalancerTestUtil.TB);
v1.setReserved(100 * DiskBalancerTestUtil.GB);
v1.setUsed(500 * DiskBalancerTestUtil.GB);
DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
v2.setCapacity(DiskBalancerTestUtil.TB);
v2.setReserved(100 * DiskBalancerTestUtil.GB);
v2.setUsed(500 * DiskBalancerTestUtil.GB);
node.addVolume(v1);
node.addVolume(v2);
for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
assertFalse(vsets.isBalancingNeeded(10.0f));
}
}
@Test
public void testNoBalancingNeededTransientDisks() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString());
// create two disks which have different data sizes, but
// transient. isBalancing should say no balancing needed.
DiskBalancerVolume v1 = util.createRandomVolume(StorageType.RAM_DISK);
v1.setCapacity(DiskBalancerTestUtil.TB);
v1.setReserved(100 * DiskBalancerTestUtil.GB);
v1.setUsed(1 * DiskBalancerTestUtil.GB);
DiskBalancerVolume v2 = util.createRandomVolume(StorageType.RAM_DISK);
v2.setCapacity(DiskBalancerTestUtil.TB);
v2.setReserved(100 * DiskBalancerTestUtil.GB);
v2.setUsed(500 * DiskBalancerTestUtil.GB);
node.addVolume(v1);
node.addVolume(v2);
for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
assertFalse(vsets.isBalancingNeeded(10.0f));
}
}
@Test
public void testNoBalancingNeededFailedDisks() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString());
// create two disks which have which are normal disks, but fail
// one of them. VolumeSet should say no balancing needed.
DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
v1.setCapacity(DiskBalancerTestUtil.TB);
v1.setReserved(100 * DiskBalancerTestUtil.GB);
v1.setUsed(1 * DiskBalancerTestUtil.GB);
v1.setFailed(true);
DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
v2.setCapacity(DiskBalancerTestUtil.TB);
v2.setReserved(100 * DiskBalancerTestUtil.GB);
v2.setUsed(500 * DiskBalancerTestUtil.GB);
node.addVolume(v1);
node.addVolume(v2);
for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
assertFalse(vsets.isBalancingNeeded(10.0f));
}
}
@Test
public void testNeedBalancingUnevenDataSpread() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
v1.setCapacity(DiskBalancerTestUtil.TB);
v1.setReserved(100 * DiskBalancerTestUtil.GB);
v1.setUsed(0);
DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
v2.setCapacity(DiskBalancerTestUtil.TB);
v2.setReserved(100 * DiskBalancerTestUtil.GB);
v2.setUsed(500 * DiskBalancerTestUtil.GB);
node.addVolume(v1);
node.addVolume(v2);
for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
assertTrue(vsets.isBalancingNeeded(10.0f));
}
}
@Test
public void testVolumeSerialize() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolume volume = util.createRandomVolume(StorageType.DISK);
String originalString = volume.toJson();
DiskBalancerVolume parsedVolume =
DiskBalancerVolume.parseJson(originalString);
String parsedString = parsedVolume.toJson();
assertEquals(originalString, parsedString);
}
@Test
public void testClusterSerialize() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
// Create a Cluster with 3 datanodes, 3 disk types and 3 disks in each type
// that is 9 disks in each machine.
DiskBalancerCluster cluster = util.createRandCluster(3, new StorageType[]{
StorageType.DISK, StorageType.RAM_DISK, StorageType.SSD}, 3);
DiskBalancerCluster newCluster =
DiskBalancerCluster.parseJson(cluster.toJson());
assertEquals(cluster.getNodes(), newCluster.getNodes());
assertEquals(cluster.getNodes().size(), newCluster.getNodes().size());
}
@Test
public void testUsageLimitedToCapacity() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
// If usage is greater than capacity, then it should be set to capacity
DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK);
v1.setCapacity(DiskBalancerTestUtil.GB);
v1.setUsed(2 * DiskBalancerTestUtil.GB);
assertEquals(v1.getUsed(), v1.getCapacity());
// If usage is less than capacity, usage should be set to the real usage
DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK);
v2.setCapacity(2*DiskBalancerTestUtil.GB);
v2.setUsed(DiskBalancerTestUtil.GB);
assertEquals(v1.getUsed(), DiskBalancerTestUtil.GB);
}
}
| TestDataModels |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.