language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | quarkusio__quarkus | extensions/micrometer/runtime/src/test/java/io/quarkus/micrometer/runtime/binder/kafka/KafkaEventObserverTest.java | {
"start": 229,
"end": 854
} | class ____ {
@Test
void testAllKafkaClientMetricsClosed() {
KafkaEventObserver sut = new KafkaEventObserver();
KafkaClientMetrics firstClientMetrics = Mockito.mock(KafkaClientMetrics.class);
KafkaClientMetrics secondClientMetrics = Mockito.mock(KafkaClientMetrics.class);
sut.clientMetrics.put(firstClientMetrics, firstClientMetrics);
sut.clientMetrics.put(secondClientMetrics, secondClientMetrics);
sut.onStop(new ShutdownEvent());
Mockito.verify(firstClientMetrics).close();
Mockito.verify(secondClientMetrics).close();
}
}
| KafkaEventObserverTest |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/utils/LogicalTypeMerging.java | {
"start": 36445,
"end": 37035
} | class ____ extends AbstractList<LogicalType> {
private final List<LogicalType> types;
private final int childPos;
ChildTypeView(List<LogicalType> types, int childPos) {
this.types = types;
this.childPos = childPos;
}
@Override
public LogicalType get(int index) {
return types.get(index).getChildren().get(childPos);
}
@Override
public int size() {
return types.size();
}
}
private LogicalTypeMerging() {
// no instantiation
}
}
| ChildTypeView |
java | elastic__elasticsearch | libs/h3/src/main/java/org/elasticsearch/h3/CellBoundary.java | {
"start": 1120,
"end": 2298
} | class ____ {
/** Maximum number of cell boundary vertices; worst case is pentagon:
* 5 original verts + 5 edge crossings
*/
static final int MAX_CELL_BNDRY_VERTS = 10;
/** How many points it holds */
private final int numPoints;
/** The actual points */
private final LatLng[] points;
CellBoundary(LatLng[] points, int numPoints) {
this.points = points;
this.numPoints = numPoints;
}
/** Number of points in this boundary */
public int numPoints() {
return numPoints;
}
/** Return the point at the given position*/
public LatLng getLatLon(int i) {
assert i >= 0 && i < numPoints;
return points[i];
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final CellBoundary that = (CellBoundary) o;
return numPoints == that.numPoints && Arrays.equals(points, that.points);
}
@Override
public int hashCode() {
return Objects.hash(numPoints, Arrays.hashCode(points));
}
}
| CellBoundary |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java | {
"start": 55467,
"end": 71312
} | class ____
extends ShellScriptBuilder {
private static final Pattern VARIABLE_PATTERN = Pattern.compile("%(.*?)%");
private static final Pattern SPLIT_PATTERN = Pattern.compile(":");
private void errorCheck() {
line("@if %errorlevel% neq 0 exit /b %errorlevel%");
}
private void lineWithLenCheck(String... commands) throws IOException {
Shell.checkWindowsCommandLineLength(commands);
line(commands);
}
public WindowsShellScriptBuilder() {
line("@setlocal");
line();
}
@Override
public void command(List<String> command) throws IOException {
lineWithLenCheck("@call ", StringUtils.join(" ", command));
errorCheck();
}
//Dummy implementation
@Override
protected void setStdOut(final Path stdout) throws IOException {
}
//Dummy implementation
@Override
protected void setStdErr(final Path stderr) throws IOException {
}
@Override
public void env(String key, String value) throws IOException {
lineWithLenCheck("@set ", key, "=", value);
errorCheck();
}
@Override
public void whitelistedEnv(String key, String value) throws IOException {
env(key, value);
}
@Override
public void echo(final String echoStr) throws IOException {
lineWithLenCheck("@echo \"", echoStr, "\"");
}
@Override
protected void link(Path src, Path dst) throws IOException {
File srcFile = new File(src.toUri().getPath());
String srcFileStr = srcFile.getPath();
String dstFileStr = new File(dst.toString()).getPath();
lineWithLenCheck(String.format("@%s symlink \"%s\" \"%s\"",
Shell.getWinUtilsPath(), dstFileStr, srcFileStr));
errorCheck();
}
@Override
protected void mkdir(Path path) throws IOException {
lineWithLenCheck(String.format("@if not exist \"%s\" mkdir \"%s\"",
path.toString(), path.toString()));
errorCheck();
}
@Override
public void copyDebugInformation(Path src, Path dest)
throws IOException {
// no need to worry about permissions - in secure mode
// WindowsSecureContainerExecutor will set permissions
// to allow NM to read the file
line("rem Creating copy of launch script");
lineWithLenCheck(String.format("copy \"%s\" \"%s\"", src.toString(),
dest.toString()));
}
@Override
public void listDebugInformation(Path output) throws IOException {
line("rem Determining directory contents");
lineWithLenCheck(
String.format("@echo \"dir:\" > \"%s\"", output.toString()));
lineWithLenCheck(String.format("dir >> \"%s\"", output.toString()));
}
/**
* Parse <code>envVal</code> using cmd/bat-like syntax to extract env
* variables it depends on.
*/
public Set<String> getEnvDependencies(final String envVal) {
if (envVal == null || envVal.isEmpty()) {
return Collections.emptySet();
}
// Example inputs: %var%, %%, %a:b%
Matcher matcher = VARIABLE_PATTERN.matcher(envVal);
final Set<String> deps = new HashSet<>();
while (matcher.find()) {
String match = matcher.group(1);
if (!match.isEmpty()) {
if (match.equals(":")) {
// Special case, variable name can be a single : character
deps.add(match);
} else {
// Either store the variable name before the : string manipulation
// character or the whole match. (%var% -> var, %a:b% -> a)
String[] split = SPLIT_PATTERN.split(match, 2);
if (!split[0].isEmpty()) {
deps.add(split[0]);
}
}
}
}
return deps;
}
}
private static void addToEnvMap(
Map<String, String> envMap, Set<String> envSet,
String envName, String envValue) {
envMap.put(envName, envValue);
envSet.add(envName);
}
public void sanitizeEnv(Map<String, String> environment, Path pwd,
List<Path> appDirs, List<String> userLocalDirs, List<String>
containerLogDirs, Map<Path, List<String>> resources,
Path nmPrivateClasspathJarDir,
Set<String> nmVars) throws IOException {
// Based on discussion in YARN-7654, for ENTRY_POINT enabled
// docker container, we forward user defined environment variables
// without node manager environment variables. This is the reason
// that we skip sanitizeEnv method.
boolean overrideDisable = Boolean.parseBoolean(
environment.get(
Environment.
YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE.
name()));
if (overrideDisable) {
environment.remove("WORK_DIR");
return;
}
/**
* Non-modifiable environment variables
*/
addToEnvMap(environment, nmVars, Environment.CONTAINER_ID.name(),
container.getContainerId().toString());
addToEnvMap(environment, nmVars, Environment.NM_PORT.name(),
String.valueOf(this.context.getNodeId().getPort()));
addToEnvMap(environment, nmVars, Environment.NM_HOST.name(),
this.context.getNodeId().getHost());
addToEnvMap(environment, nmVars, Environment.NM_HTTP_PORT.name(),
String.valueOf(this.context.getHttpPort()));
addToEnvMap(environment, nmVars, Environment.LOCAL_DIRS.name(),
StringUtils.join(",", appDirs));
addToEnvMap(environment, nmVars, Environment.LOCAL_USER_DIRS.name(),
StringUtils.join(",", userLocalDirs));
addToEnvMap(environment, nmVars, Environment.LOG_DIRS.name(),
StringUtils.join(",", containerLogDirs));
addToEnvMap(environment, nmVars, Environment.USER.name(),
container.getUser());
addToEnvMap(environment, nmVars, Environment.LOGNAME.name(),
container.getUser());
addToEnvMap(environment, nmVars, Environment.HOME.name(),
conf.get(
YarnConfiguration.NM_USER_HOME_DIR,
YarnConfiguration.DEFAULT_NM_USER_HOME_DIR
)
);
addToEnvMap(environment, nmVars, Environment.PWD.name(), pwd.toString());
addToEnvMap(environment, nmVars, Environment.LOCALIZATION_COUNTERS.name(),
container.localizationCountersAsString());
if (!Shell.WINDOWS) {
addToEnvMap(environment, nmVars, "JVM_PID", "$$");
}
// TODO: Remove Windows check and use this approach on all platforms after
// additional testing. See YARN-358.
if (Shell.WINDOWS) {
sanitizeWindowsEnv(environment, pwd,
resources, nmPrivateClasspathJarDir);
}
// put AuxiliaryService data to environment
for (Map.Entry<String, ByteBuffer> meta : containerManager
.getAuxServiceMetaData().entrySet()) {
AuxiliaryServiceHelper.setServiceDataIntoEnv(
meta.getKey(), meta.getValue(), environment);
nmVars.add(AuxiliaryServiceHelper.getPrefixServiceName(meta.getKey()));
}
}
/**
* There are some configurations (such as {@value YarnConfiguration#NM_ADMIN_USER_ENV}) whose
* values need to be added to the environment variables.
*
* @param environment The environment variables map to add the configuration values to.
*/
public void addConfigsToEnv(Map<String, String> environment) {
// variables here will be forced in, even if the container has
// specified them. Note: we do not track these in nmVars, to
// allow them to be ordered properly if they reference variables
// defined by the user.
String defEnvStr = conf.get(YarnConfiguration.DEFAULT_NM_ADMIN_USER_ENV);
Apps.setEnvFromInputProperty(environment, YarnConfiguration.NM_ADMIN_USER_ENV, defEnvStr, conf,
File.pathSeparator);
if (!Shell.WINDOWS) {
// maybe force path components
String forcePath = conf.get(YarnConfiguration.NM_ADMIN_FORCE_PATH,
YarnConfiguration.DEFAULT_NM_ADMIN_FORCE_PATH);
if (!forcePath.isEmpty()) {
String userPath = environment.get(Environment.PATH.name());
environment.remove(Environment.PATH.name());
if (userPath == null || userPath.isEmpty()) {
Apps.addToEnvironment(environment, Environment.PATH.name(), forcePath,
File.pathSeparator);
Apps.addToEnvironment(environment, Environment.PATH.name(), "$PATH", File.pathSeparator);
} else {
Apps.addToEnvironment(environment, Environment.PATH.name(), forcePath,
File.pathSeparator);
Apps.addToEnvironment(environment, Environment.PATH.name(), userPath, File.pathSeparator);
}
}
}
}
private void sanitizeWindowsEnv(Map<String, String> environment, Path pwd,
Map<Path, List<String>> resources, Path nmPrivateClasspathJarDir)
throws IOException {
String inputClassPath = environment.get(Environment.CLASSPATH.name());
if (inputClassPath != null && !inputClassPath.isEmpty()) {
//On non-windows, localized resources
//from distcache are available via the classpath as they were placed
//there but on windows they are not available when the classpath
//jar is created and so they "are lost" and have to be explicitly
//added to the classpath instead. This also means that their position
//is lost relative to other non-distcache classpath entries which will
//break things like mapreduce.job.user.classpath.first. An environment
//variable can be set to indicate that distcache entries should come
//first
boolean preferLocalizedJars = Boolean.parseBoolean(
environment.get(Environment.CLASSPATH_PREPEND_DISTCACHE.name())
);
boolean needsSeparator = false;
StringBuilder newClassPath = new StringBuilder();
if (!preferLocalizedJars) {
newClassPath.append(inputClassPath);
needsSeparator = true;
}
// Localized resources do not exist at the desired paths yet, because the
// container launch script has not run to create symlinks yet. This
// means that FileUtil.createJarWithClassPath can't automatically expand
// wildcards to separate classpath entries for each file in the manifest.
// To resolve this, append classpath entries explicitly for each
// resource.
for (Map.Entry<Path, List<String>> entry : resources.entrySet()) {
boolean targetIsDirectory = new File(entry.getKey().toUri().getPath())
.isDirectory();
for (String linkName : entry.getValue()) {
// Append resource.
if (needsSeparator) {
newClassPath.append(File.pathSeparator);
} else {
needsSeparator = true;
}
newClassPath.append(pwd.toString())
.append(Path.SEPARATOR).append(linkName);
// FileUtil.createJarWithClassPath must use File.toURI to convert
// each file to a URI to write into the manifest's classpath. For
// directories, the classpath must have a trailing '/', but
// File.toURI only appends the trailing '/' if it is a directory that
// already exists. To resolve this, add the classpath entries with
// explicit trailing '/' here for any localized resource that targets
// a directory. Then, FileUtil.createJarWithClassPath will guarantee
// that the resulting entry in the manifest's classpath will have a
// trailing '/', and thus refer to a directory instead of a file.
if (targetIsDirectory) {
newClassPath.append(Path.SEPARATOR);
}
}
}
if (preferLocalizedJars) {
if (needsSeparator) {
newClassPath.append(File.pathSeparator);
}
newClassPath.append(inputClassPath);
}
// When the container launches, it takes the parent process's environment
// and then adds/overwrites with the entries from the container launch
// context. Do the same thing here for correct substitution of
// environment variables in the classpath jar manifest.
Map<String, String> mergedEnv = new HashMap<String, String>(
System.getenv());
mergedEnv.putAll(environment);
// this is hacky and temporary - it's to preserve the windows secure
// behavior but enable non-secure windows to properly build the class
// path for access to job.jar/lib/xyz and friends (see YARN-2803)
Path jarDir;
if (exec instanceof WindowsSecureContainerExecutor) {
jarDir = nmPrivateClasspathJarDir;
} else {
jarDir = pwd;
}
String[] jarCp = FileUtil.createJarWithClassPath(
newClassPath.toString(), jarDir, pwd, mergedEnv);
// In a secure cluster the classpath jar must be localized to grant access
Path localizedClassPathJar = exec.localizeClasspathJar(
new Path(jarCp[0]), pwd, container.getUser());
String replacementClassPath = localizedClassPathJar.toString() + jarCp[1];
environment.put(Environment.CLASSPATH.name(), replacementClassPath);
}
}
public static String getExitCodeFile(String pidFile) {
return pidFile + EXIT_CODE_FILE_SUFFIX;
}
private void recordContainerLogDir(ContainerId containerId,
String logDir) throws IOException{
container.setLogDir(logDir);
if (container.isRetryContextSet()) {
context.getNMStateStore().storeContainerLogDir(containerId, logDir);
}
}
private void recordContainerWorkDir(ContainerId containerId,
String workDir) throws IOException{
container.setWorkDir(workDir);
if (container.isRetryContextSet()) {
context.getNMStateStore().storeContainerWorkDir(containerId, workDir);
}
}
private void recordContainerCsiVolumesRootDir(ContainerId containerId,
String volumesRoot) throws IOException {
container.setCsiVolumesRootDir(volumesRoot);
// TODO persistent to the NM store...
}
protected Path getContainerWorkDir() throws IOException {
String containerWorkDir = container.getWorkDir();
if (containerWorkDir == null
|| !dirsHandler.isGoodLocalDir(containerWorkDir)) {
throw new IOException(
"Could not find a good work dir " + containerWorkDir
+ " for container " + container);
}
return new Path(containerWorkDir);
}
/**
* Clean up container's files for container relaunch or cleanup.
*/
protected void cleanupContainerFiles(Path containerWorkDir) {
LOG.debug("cleanup container {} files", containerWorkDir);
// delete ContainerScriptPath
deleteAsUser(new Path(containerWorkDir, CONTAINER_SCRIPT));
// delete TokensPath
deleteAsUser(new Path(containerWorkDir, FINAL_CONTAINER_TOKENS_FILE));
// delete sysfs dir
deleteAsUser(new Path(containerWorkDir, SYSFS_DIR));
// delete symlinks because launch script will create symlinks again
try {
exec.cleanupBeforeRelaunch(container);
} catch (IOException | InterruptedException e) {
LOG.warn("{} exec failed to cleanup", container.getContainerId(), e);
}
}
private void deleteAsUser(Path path) {
try {
exec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(container.getUser())
.setSubDir(path)
.build());
} catch (Exception e) {
LOG.warn("Failed to delete " + path, e);
}
}
/**
* Returns the PID File Path.
*/
Path getPidFilePath() {
return pidFilePath;
}
/**
* Marks the container to be launched only if it was not launched.
*
* @return true if successful; false otherwise.
*/
boolean markLaunched() {
return containerAlreadyLaunched.compareAndSet(false, true);
}
/**
* Returns if the launch is completed or not.
*/
boolean isLaunchCompleted() {
return completed.get();
}
}
| WindowsShellScriptBuilder |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/RouteTemplateParameterDefinition.java | {
"start": 1240,
"end": 2925
} | class ____ {
@XmlAttribute(required = true)
String name;
@XmlAttribute
Boolean required;
@XmlAttribute
String defaultValue;
@XmlAttribute
String description;
public RouteTemplateParameterDefinition() {
}
public RouteTemplateParameterDefinition(String name, String defaultValue, String description) {
this.name = name;
this.description = description;
this.defaultValue = defaultValue;
}
public boolean isRequired() {
// assumed to be required if not set explicit to false
return required == null || required;
}
public String getName() {
return name;
}
/**
* The name of the parameter
*/
public void setName(String name) {
this.name = name;
}
public Boolean getRequired() {
return required;
}
/**
* Whether the parameter is required or not. A parameter is required unless this option is set to false or a default
* value has been configured.
*/
public void setRequired(Boolean required) {
this.required = required;
}
public String getDefaultValue() {
return defaultValue;
}
/**
* Default value of the parameter. If a default value is provided then the parameter is implied not to be required.
*/
public void setDefaultValue(String defaultValue) {
this.defaultValue = defaultValue;
}
public String getDescription() {
return description;
}
/**
* Description of the parameter
*/
public void setDescription(String description) {
this.description = description;
}
}
| RouteTemplateParameterDefinition |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/FieldDeserializerTest10.java | {
"start": 145,
"end": 827
} | class ____ extends TestCase {
public void test_0 () throws Exception {
Assert.assertEquals(Type.Big, JSON.parseObject("{\"id\":\"Big\"\t}", VO.class).id);
Assert.assertEquals(Type.Big, JSON.parseObject("{\"id\":\"Big\"\t}\n\t", VO.class).id);
Assert.assertEquals(Type.Big, JSON.parseObject("{\"id\":\"Big\" }", V1.class).id);
Assert.assertEquals(Type.Big, JSON.parseObject("{\"id\":\"Big\" }\n", V1.class).id);
Assert.assertEquals(Type.Big, JSON.parseObject("{\"id\":\"Big\" }\n\t", V1.class).id);
Assert.assertEquals(Type.Big, JSON.parseObject("{\"id\":\"Big\"\n}", V1.class).id);
}
public static | FieldDeserializerTest10 |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_3591/BeanMapper.java | {
"start": 340,
"end": 542
} | interface ____ {
BeanMapper INSTANCE = Mappers.getMapper( BeanMapper.class );
@Mapping(source = "beans", target = "beans")
BeanDto map(Bean bean, @MappingTarget BeanDto beanDto);
}
| BeanMapper |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/MediaTypeFactory.java | {
"start": 1403,
"end": 4355
} | class ____ {
private static final String MIME_TYPES_FILE_NAME = "/org/springframework/http/mime.types";
private static final MultiValueMap<String, MediaType> fileExtensionToMediaTypes = parseMimeTypes();
private MediaTypeFactory() {
}
/**
* Parse the {@code mime.types} file found in the resources. Format is:
* <code>
* # comments begin with a '#'<br>
* # the format is <mime type> <space separated file extensions><br>
* # for example:<br>
* text/plain txt text<br>
* # this would map file.txt and file.text to<br>
* # the mime type "text/plain"<br>
* </code>
* @return a multi-value map, mapping media types to file extensions.
*/
private static MultiValueMap<String, MediaType> parseMimeTypes() {
InputStream is = MediaTypeFactory.class.getResourceAsStream(MIME_TYPES_FILE_NAME);
Assert.state(is != null, MIME_TYPES_FILE_NAME + " not found in classpath");
try (BufferedReader reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.US_ASCII))) {
MultiValueMap<String, MediaType> result = new LinkedMultiValueMap<>();
String line;
while ((line = reader.readLine()) != null) {
if (line.isEmpty() || line.charAt(0) == '#') {
continue;
}
String[] tokens = StringUtils.tokenizeToStringArray(line, " \t\n\r\f");
MediaType mediaType = MediaType.parseMediaType(tokens[0]);
for (int i = 1; i < tokens.length; i++) {
String fileExtension = tokens[i].toLowerCase(Locale.ROOT);
result.add(fileExtension, mediaType);
}
}
return result;
}
catch (IOException ex) {
throw new IllegalStateException("Could not read " + MIME_TYPES_FILE_NAME, ex);
}
}
/**
* Determine a media type for the given resource, if possible.
* @param resource the resource to introspect
* @return the corresponding media type, or {@code null} if none found
*/
public static Optional<MediaType> getMediaType(@Nullable Resource resource) {
return Optional.ofNullable(resource)
.map(Resource::getFilename)
.flatMap(MediaTypeFactory::getMediaType);
}
/**
* Determine a media type for the given file name, if possible.
* @param filename the file name plus extension
* @return the corresponding media type, or {@code null} if none found
*/
public static Optional<MediaType> getMediaType(@Nullable String filename) {
return getMediaTypes(filename).stream().findFirst();
}
/**
* Determine the media types for the given file name, if possible.
* @param filename the file name plus extension
* @return the corresponding media types, or an empty list if none found
*/
public static List<MediaType> getMediaTypes(@Nullable String filename) {
List<MediaType> mediaTypes = null;
String ext = StringUtils.getFilenameExtension(filename);
if (ext != null) {
mediaTypes = fileExtensionToMediaTypes.get(ext.toLowerCase(Locale.ROOT));
}
return (mediaTypes != null ? mediaTypes : Collections.emptyList());
}
}
| MediaTypeFactory |
java | google__guice | core/test/com/googlecode/guice/BytecodeGenTest.java | {
"start": 15606,
"end": 16715
} | class ____ {
final Throwable caller;
public LogCreator() {
this.caller = new Throwable();
}
void assertIsFastClassInvoked() throws Throwable {
// 2 because the first 2 elements are
// LogCreator.<init>()
// Subclass.<init>()
if (!caller.getStackTrace()[2].getClassName().contains("$$FastClassByGuice$$")) {
throw new AssertionError("Caller was not FastClass").initCause(caller);
}
}
void assertIsReflectionInvoked() throws Throwable {
// Scan for a call to Constructor.newInstance, but stop if we see the test itself.
for (StackTraceElement element : caller.getStackTrace()) {
if (element.getClassName().equals(BytecodeGenTest.class.getName())) {
// break when we hit the test method.
break;
}
if (element.getClassName().equals(Constructor.class.getName())
&& element.getMethodName().equals("newInstance")) {
return;
}
}
throw new AssertionError("Caller was not Constructor.newInstance").initCause(caller);
}
}
public static | LogCreator |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/select/OracleSelectTest21.java | {
"start": 1026,
"end": 4717
} | class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = //
"SELECT EVENT, WAITS, TIME, DECODE(WAITS, NULL, TO_NUMBER(NULL), 0, TO_NUMBER(NULL), TIME/WAITS*1000) AVGWT" +
" , PCTWTT, WAIT_CLASS " + "FROM (SELECT EVENT, WAITS, TIME, PCTWTT, WAIT_CLASS " +
" FROM (" +
" SELECT E.EVENT_NAME EVENT, E.TOTAL_WAITS - NVL(B.TOTAL_WAITS,0) WAITS" +
" , (E.TIME_WAITED_MICRO - NVL(B.TIME_WAITED_MICRO,0)) / 1000000 TIME" +
" , 100 * (E.TIME_WAITED_MICRO - NVL(B.TIME_WAITED_MICRO,0)) / :B1 PCTWTT" +
" , E.WAIT_CLASS WAIT_CLASS " +
" FROM DBA_HIST_SYSTEM_EVENT B, DBA_HIST_SYSTEM_EVENT E " +
" WHERE B.SNAP_ID(+) = :B5 AND E.SNAP_ID = :B4 AND B.DBID(+) = :B3 AND E.DBID = :B3 " +
" AND B.INSTANCE_NUMBER(+) = :B2 AND E.INSTANCE_NUMBER = :B2 AND B.EVENT_ID(+) = E.EVENT_ID " +
" AND E.TOTAL_WAITS > NVL(B.TOTAL_WAITS,0) AND E.WAIT_CLASS != 'Idle' " +
" UNION ALL " +
" SELECT 'CPU time' EVENT, TO_NUMBER(NULL) WAITS" +
" , :B6 /1000000 TIME, 100 * :B6 / :B1 PCTWTT, NULL WAIT_CLASS FROM DUAL WHERE :B6 > 0" +
") ORDER BY TIME DESC, WAITS DESC) " +
"WHERE ROWNUM <= :B7 ";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
statemen.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("DBA_HIST_SYSTEM_EVENT")));
assertTrue(visitor.getColumns().contains(new Column("DBA_HIST_SYSTEM_EVENT", "EVENT_NAME")));
assertTrue(visitor.getColumns().contains(new Column("DBA_HIST_SYSTEM_EVENT", "TOTAL_WAITS")));
assertTrue(visitor.getColumns().contains(new Column("DBA_HIST_SYSTEM_EVENT", "TIME_WAITED_MICRO")));
assertTrue(visitor.getColumns().contains(new Column("DBA_HIST_SYSTEM_EVENT", "SNAP_ID")));
assertTrue(visitor.getColumns().contains(new Column("DBA_HIST_SYSTEM_EVENT", "INSTANCE_NUMBER")));
assertTrue(visitor.getColumns().contains(new Column("DBA_HIST_SYSTEM_EVENT", "DBID")));
assertTrue(visitor.getColumns().contains(new Column("DBA_HIST_SYSTEM_EVENT", "WAIT_CLASS")));
assertTrue(visitor.getColumns().contains(new Column("DBA_HIST_SYSTEM_EVENT", "EVENT_ID")));
assertEquals(8, visitor.getColumns().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "*")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "YEAR")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "order_mode")));
}
}
| OracleSelectTest21 |
java | apache__camel | components/camel-kafka/src/main/java/org/apache/camel/component/kafka/consumer/support/streaming/KafkaRecordStreamingProcessor.java | {
"start": 1646,
"end": 6406
} | class ____ extends KafkaRecordProcessor {
private static final Logger LOG = LoggerFactory.getLogger(KafkaRecordStreamingProcessor.class);
private final boolean autoCommitEnabled;
private final KafkaConfiguration configuration;
private final Processor processor;
private final CommitManager commitManager;
public KafkaRecordStreamingProcessor(KafkaConfiguration configuration, Processor processor, CommitManager commitManager) {
this.autoCommitEnabled = configuration.isAutoCommitEnable();
this.configuration = configuration;
this.processor = processor;
this.commitManager = commitManager;
}
public ProcessingResult processExchange(
KafkaConsumer camelKafkaConsumer, TopicPartition topicPartition, boolean partitionHasNext,
boolean recordHasNext, ConsumerRecord<Object, Object> consumerRecord) {
final Exchange exchange = camelKafkaConsumer.createExchange(false);
Message message = exchange.getMessage();
setupExchangeMessage(message, consumerRecord);
propagateHeaders(configuration, consumerRecord, exchange);
// if not auto commit then we have additional information on the exchange
if (!autoCommitEnabled) {
message.setHeader(KafkaConstants.LAST_RECORD_BEFORE_COMMIT, !recordHasNext);
message.setHeader(KafkaConstants.LAST_POLL_RECORD, !recordHasNext && !partitionHasNext);
}
if (configuration.isAllowManualCommit()) {
// allow Camel users to access the Kafka consumer API to be able to do for example manual commits
KafkaManualCommit manual = commitManager.getManualCommit(exchange, topicPartition, consumerRecord);
message.setHeader(KafkaConstants.MANUAL_COMMIT, manual);
message.setHeader(KafkaConstants.LAST_POLL_RECORD, !recordHasNext && !partitionHasNext);
}
try {
processor.process(exchange);
} catch (Exception e) {
exchange.setException(e);
}
ProcessingResult result;
if (exchange.getException() != null) {
LOG.debug("An exception was thrown for consumerRecord at partition {} and offset {}",
consumerRecord.partition(), consumerRecord.offset());
final ExceptionHandler exceptionHandler = camelKafkaConsumer.getExceptionHandler();
boolean breakOnErrorExit = processException(exchange, topicPartition, consumerRecord, exceptionHandler);
result = new ProcessingResult(
breakOnErrorExit, true, consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset());
} else {
result = new ProcessingResult(
false, exchange.getException() != null, consumerRecord.topic(), consumerRecord.partition(),
consumerRecord.offset());
}
if (!result.isBreakOnErrorHit()) {
commitManager.recordOffset(topicPartition, consumerRecord.offset());
}
// success so release the exchange
camelKafkaConsumer.releaseExchange(exchange, false);
return result;
}
private boolean processException(
Exchange exchange, TopicPartition topicPartition,
ConsumerRecord<Object, Object> consumerRecord, ExceptionHandler exceptionHandler) {
// processing failed due to an unhandled exception, what should we do
if (configuration.isBreakOnFirstError()) {
// we are failing and we should break out
if (LOG.isWarnEnabled()) {
Exception exc = exchange.getException();
LOG.warn("Error during processing {} from topic: {} due to {}", exchange, topicPartition.topic(),
exc.getMessage());
LOG.warn("Will seek consumer to offset {} on partition {} and start polling again.",
consumerRecord.offset(), consumerRecord.partition());
}
// we should just do a commit (vs the original forceCommit)
// when route uses NOOP Commit Manager it will rely
// on the route implementation to explicitly commit offset
// when route uses Synch/Asynch Commit Manager it will
// ALWAYS commit the offset for the failing record
// and will ALWAYS retry it
commitManager.commit(topicPartition);
// continue to next partition
return true;
} else {
// will handle/log the exception and then continue to next
exceptionHandler.handleException("Error during processing", exchange, exchange.getException());
}
return false;
}
}
| KafkaRecordStreamingProcessor |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/zoneddatetime/ZonedDateTimeAssert_isIn_Test.java | {
"start": 1099,
"end": 1617
} | class ____ extends ZonedDateTimeAssertBaseTest {
@Test
void isIn_should_compare_datetimes_in_actual_timezone() {
ZonedDateTime utcDateTime = ZonedDateTime.of(2013, 6, 10, 0, 0, 0, 0, ZoneOffset.UTC);
ZoneId cestTimeZone = ZoneId.of("Europe/Berlin");
ZonedDateTime cestDateTime = ZonedDateTime.of(2013, 6, 10, 2, 0, 0, 0, cestTimeZone);
// cestDateTime and utcDateTime are equals in same timezone
assertThat(utcDateTime).isIn(cestDateTime, ZonedDateTime.now());
}
}
| ZonedDateTimeAssert_isIn_Test |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/shuffle/NettyShuffleDescriptor.java | {
"start": 3420,
"end": 3804
} | interface ____ extends Serializable {
InetSocketAddress getAddress();
int getConnectionIndex();
}
/**
* Remote partition connection information with index to query partition.
*
* <p>Normal connection information with network address and port for connection in case of
* distributed execution.
*/
public static | PartitionConnectionInfo |
java | quarkusio__quarkus | test-framework/common/src/main/java/io/quarkus/test/common/TestHostLauncher.java | {
"start": 391,
"end": 1430
} | class ____ implements ArtifactLauncher {
private String previousHost;
@Override
public void start() throws IOException {
// set 'quarkus.http.host' to ensure that RestAssured targets the proper host
previousHost = System.setProperty("quarkus.http.host", System.getProperty("quarkus.http.test-host"));
}
@Override
public void close() throws IOException {
if (previousHost != null) {
System.setProperty("quarkus.http.host", previousHost);
}
}
@Override
public boolean listensOnSsl() {
return Boolean.parseBoolean(System.getProperty("quarkus.http.test-ssl-enabled", "false"));
}
@Override
public void includeAsSysProps(Map systemProps) {
}
@Override
public void init(InitContext initContext) {
throw new IllegalStateException("Should not be called");
}
@Override
public LaunchResult runToCompletion(String[] args) {
throw new IllegalStateException("Should not be called");
}
}
| TestHostLauncher |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/common/processor/src/main/java/org/jboss/resteasy/reactive/common/processor/EndpointIndexer.java | {
"start": 99551,
"end": 100661
} | class ____ {
private final String path;
private final String[] produces;
private final String[] consumes;
private final Set<String> pathParameters;
private final String streamElementType;
public BasicResourceClassInfo(String path, String[] produces, String[] consumes, Set<String> pathParameters,
String streamElementType) {
this.path = path;
this.produces = produces;
this.consumes = consumes;
this.pathParameters = pathParameters;
this.streamElementType = streamElementType;
}
public String getPath() {
return path;
}
public String[] getProduces() {
return produces;
}
public String[] getConsumes() {
return consumes;
}
public Set<String> getPathParameters() {
return pathParameters;
}
public String getStreamElementType() {
return streamElementType;
}
}
@SuppressWarnings("rawtypes")
public static | BasicResourceClassInfo |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/erroneous/ambiguousmapping/ErroneousWithAmbiguousMethodsMapper.java | {
"start": 806,
"end": 864
} | class ____ {
public LeafDTO leaf;
}
| BranchDTO |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/fulltext/FullTextPredicate.java | {
"start": 892,
"end": 949
} | class ____ extends Expression {
public | FullTextPredicate |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/util/RestClientException.java | {
"start": 1134,
"end": 1815
} | class ____ extends FlinkException {
private static final long serialVersionUID = 937914622022344423L;
private final int responseCode;
public RestClientException(String message, HttpResponseStatus responseStatus) {
super(message);
Preconditions.checkNotNull(responseStatus);
responseCode = responseStatus.code();
}
public RestClientException(String message, Throwable cause, HttpResponseStatus responseStatus) {
super(message, cause);
responseCode = responseStatus.code();
}
public HttpResponseStatus getHttpResponseStatus() {
return HttpResponseStatus.valueOf(responseCode);
}
}
| RestClientException |
java | resilience4j__resilience4j | resilience4j-hedge/src/main/java/io/github/resilience4j/hedge/internal/HedgeResult.java | {
"start": 972,
"end": 1901
} | class ____<T> {
public final Optional<Throwable> throwable;
public final boolean fromPrimary;
public final T value;
private HedgeResult(T value, boolean fromPrimary, Optional<Throwable> throwable) {
this.fromPrimary = fromPrimary;
this.value = value;
this.throwable = throwable;
}
/**
* Create a Hedge result
*
* @param value the return value of the call. Undefined for errors.
* @param fromPrimary whether the return value came from the primary call
* @param throwable an Optional containing any failure that occurred.
* @param <T> the type of the underlying return value
* @return a HedgeResult representing the outcome of the hedging
*/
public static <T> HedgeResult<T> of(T value, boolean fromPrimary, Optional<Throwable> throwable) {
return new HedgeResult<>(value, fromPrimary, throwable);
}
}
| HedgeResult |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/Assertions_assertThat_with_Map_Test.java | {
"start": 961,
"end": 1213
} | class ____ {
@Test
void should_create_Assert() {
AbstractMapAssert<?, ? extends Map<Object, Object>, Object, Object> assertions = Assertions.assertThat(emptyMap());
assertThat(assertions).isNotNull();
}
}
| Assertions_assertThat_with_Map_Test |
java | elastic__elasticsearch | modules/lang-painless/src/test/java/org/elasticsearch/painless/ShiftTests.java | {
"start": 562,
"end": 34411
} | class ____ extends ScriptTestCase {
public void testBasics() {
assertEquals(1 << 2, exec("return 1 << 2;"));
assertEquals(4 >> 2, exec("return 4 >> 2;"));
assertEquals(-1 >>> 29, exec("return -1 >>> 29;"));
assertEquals(4, exec("int x = 1; char y = 2; return x << y;"));
assertEquals(-1, exec("int x = -1; char y = 29; return x >> y;"));
assertEquals(3, exec("int x = -1; char y = 30; return x >>> y;"));
}
public void testLongShifts() {
assertEquals(1L << 2, exec("long x = 1L; int y = 2; return x << y;"));
assertEquals(1 << 2L, exec("int x = 1; long y = 2L; return x << y;"));
assertEquals(4 >> 2L, exec("int x = 4; long y = 2L; return x >> y;"));
assertEquals(4L >> 2, exec("long x = 4L; int y = 2; return x >> y;"));
assertEquals(-1L >>> 29, exec("long x = -1L; int y = 29; return x >>> y;"));
assertEquals(-1 >>> 29L, exec("int x = -1; long y = 29L; return x >>> y;"));
}
public void testLongShiftsConst() {
assertEquals(1L << 2, exec("return 1L << 2;"));
assertEquals(1 << 2L, exec("return 1 << 2L;"));
assertEquals(4 >> 2L, exec("return 4 >> 2L;"));
assertEquals(4L >> 2, exec("return 4L >> 2;"));
assertEquals(-1L >>> 29, exec("return -1L >>> 29;"));
assertEquals(-1 >>> 29L, exec("return -1 >>> 29L;"));
}
public void testBogusShifts() {
expectScriptThrows(ClassCastException.class, () -> { exec("long x = 1L; float y = 2; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("int x = 1; double y = 2L; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("float x = 1F; int y = 2; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("double x = 1D; int y = 2L; return x << y;"); });
}
public void testBogusShiftsConst() {
expectScriptThrows(ClassCastException.class, () -> { exec("return 1L << 2F;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("return 1L << 2.0;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("return 1F << 2;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("return 1D << 2L"); });
}
public void testLshDef() {
assertEquals(2, exec("def x = (byte)1; def y = (byte)1; return x << y"));
assertEquals(2, exec("def x = (short)1; def y = (byte)1; return x << y"));
assertEquals(2, exec("def x = (char)1; def y = (byte)1; return x << y"));
assertEquals(2, exec("def x = (int)1; def y = (byte)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; def y = (byte)1; return x << y"));
assertEquals(2, exec("def x = (byte)1; def y = (short)1; return x << y"));
assertEquals(2, exec("def x = (short)1; def y = (short)1; return x << y"));
assertEquals(2, exec("def x = (char)1; def y = (short)1; return x << y"));
assertEquals(2, exec("def x = (int)1; def y = (short)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; def y = (short)1; return x << y"));
assertEquals(2, exec("def x = (byte)1; def y = (char)1; return x << y"));
assertEquals(2, exec("def x = (short)1; def y = (char)1; return x << y"));
assertEquals(2, exec("def x = (char)1; def y = (char)1; return x << y"));
assertEquals(2, exec("def x = (int)1; def y = (char)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; def y = (char)1; return x << y"));
assertEquals(2, exec("def x = (byte)1; def y = (int)1; return x << y"));
assertEquals(2, exec("def x = (short)1; def y = (int)1; return x << y"));
assertEquals(2, exec("def x = (char)1; def y = (int)1; return x << y"));
assertEquals(2, exec("def x = (int)1; def y = (int)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; def y = (int)1; return x << y"));
assertEquals(2, exec("def x = (byte)1; def y = (long)1; return x << y"));
assertEquals(2, exec("def x = (short)1; def y = (long)1; return x << y"));
assertEquals(2, exec("def x = (char)1; def y = (long)1; return x << y"));
assertEquals(2, exec("def x = (int)1; def y = (long)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; def y = (long)1; return x << y"));
assertEquals(2, exec("def x = (byte)1; def y = (byte)1; return x << y"));
assertEquals(2, exec("def x = (short)1; def y = (short)1; return x << y"));
assertEquals(2, exec("def x = (char)1; def y = (char)1; return x << y"));
assertEquals(2, exec("def x = (int)1; def y = (int)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; def y = (long)1; return x << y"));
}
public void testLshDefTypedLHS() {
assertEquals(2, exec("byte x = (byte)1; def y = (byte)1; return x << y"));
assertEquals(2, exec("short x = (short)1; def y = (byte)1; return x << y"));
assertEquals(2, exec("char x = (char)1; def y = (byte)1; return x << y"));
assertEquals(2, exec("int x = (int)1; def y = (byte)1; return x << y"));
assertEquals(2L, exec("long x = (long)1; def y = (byte)1; return x << y"));
assertEquals(2, exec("byte x = (byte)1; def y = (short)1; return x << y"));
assertEquals(2, exec("short x = (short)1; def y = (short)1; return x << y"));
assertEquals(2, exec("char x = (char)1; def y = (short)1; return x << y"));
assertEquals(2, exec("int x = (int)1; def y = (short)1; return x << y"));
assertEquals(2L, exec("long x = (long)1; def y = (short)1; return x << y"));
assertEquals(2, exec("byte x = (byte)1; def y = (char)1; return x << y"));
assertEquals(2, exec("short x = (short)1; def y = (char)1; return x << y"));
assertEquals(2, exec("char x = (char)1; def y = (char)1; return x << y"));
assertEquals(2, exec("int x = (int)1; def y = (char)1; return x << y"));
assertEquals(2L, exec("long x = (long)1; def y = (char)1; return x << y"));
assertEquals(2, exec("byte x = (byte)1; def y = (int)1; return x << y"));
assertEquals(2, exec("short x = (short)1; def y = (int)1; return x << y"));
assertEquals(2, exec("char x = (char)1; def y = (int)1; return x << y"));
assertEquals(2, exec("int x = (int)1; def y = (int)1; return x << y"));
assertEquals(2L, exec("long x = (long)1; def y = (int)1; return x << y"));
assertEquals(2, exec("byte x = (byte)1; def y = (long)1; return x << y"));
assertEquals(2, exec("short x = (short)1; def y = (long)1; return x << y"));
assertEquals(2, exec("char x = (char)1; def y = (long)1; return x << y"));
assertEquals(2, exec("int x = (int)1; def y = (long)1; return x << y"));
assertEquals(2L, exec("long x = (long)1; def y = (long)1; return x << y"));
assertEquals(2, exec("byte x = (byte)1; def y = (byte)1; return x << y"));
assertEquals(2, exec("short x = (short)1; def y = (short)1; return x << y"));
assertEquals(2, exec("char x = (char)1; def y = (char)1; return x << y"));
assertEquals(2, exec("int x = (int)1; def y = (int)1; return x << y"));
assertEquals(2L, exec("long x = (long)1; def y = (long)1; return x << y"));
}
public void testLshDefTypedRHS() {
assertEquals(2, exec("def x = (byte)1; byte y = (byte)1; return x << y"));
assertEquals(2, exec("def x = (short)1; byte y = (byte)1; return x << y"));
assertEquals(2, exec("def x = (char)1; byte y = (byte)1; return x << y"));
assertEquals(2, exec("def x = (int)1; byte y = (byte)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; byte y = (byte)1; return x << y"));
assertEquals(2, exec("def x = (byte)1; short y = (short)1; return x << y"));
assertEquals(2, exec("def x = (short)1; short y = (short)1; return x << y"));
assertEquals(2, exec("def x = (char)1; short y = (short)1; return x << y"));
assertEquals(2, exec("def x = (int)1; short y = (short)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; short y = (short)1; return x << y"));
assertEquals(2, exec("def x = (byte)1; char y = (char)1; return x << y"));
assertEquals(2, exec("def x = (short)1; char y = (char)1; return x << y"));
assertEquals(2, exec("def x = (char)1; char y = (char)1; return x << y"));
assertEquals(2, exec("def x = (int)1; char y = (char)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; char y = (char)1; return x << y"));
assertEquals(2, exec("def x = (byte)1; int y = (int)1; return x << y"));
assertEquals(2, exec("def x = (short)1; int y = (int)1; return x << y"));
assertEquals(2, exec("def x = (char)1; int y = (int)1; return x << y"));
assertEquals(2, exec("def x = (int)1; int y = (int)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; int y = (int)1; return x << y"));
assertEquals(2, exec("def x = (byte)1; long y = (long)1; return x << y"));
assertEquals(2, exec("def x = (short)1; long y = (long)1; return x << y"));
assertEquals(2, exec("def x = (char)1; long y = (long)1; return x << y"));
assertEquals(2, exec("def x = (int)1; long y = (long)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; long y = (long)1; return x << y"));
assertEquals(2, exec("def x = (byte)1; byte y = (byte)1; return x << y"));
assertEquals(2, exec("def x = (short)1; short y = (short)1; return x << y"));
assertEquals(2, exec("def x = (char)1; char y = (char)1; return x << y"));
assertEquals(2, exec("def x = (int)1; int y = (int)1; return x << y"));
assertEquals(2L, exec("def x = (long)1; long y = (long)1; return x << y"));
}
public void testRshDef() {
assertEquals(2, exec("def x = (byte)4; def y = (byte)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; def y = (byte)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; def y = (byte)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; def y = (byte)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; def y = (byte)1; return x >> y"));
assertEquals(2, exec("def x = (byte)4; def y = (short)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; def y = (short)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; def y = (short)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; def y = (short)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; def y = (short)1; return x >> y"));
assertEquals(2, exec("def x = (byte)4; def y = (char)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; def y = (char)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; def y = (char)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; def y = (char)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; def y = (char)1; return x >> y"));
assertEquals(2, exec("def x = (byte)4; def y = (int)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; def y = (int)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; def y = (int)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; def y = (int)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; def y = (int)1; return x >> y"));
assertEquals(2, exec("def x = (byte)4; def y = (long)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; def y = (long)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; def y = (long)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; def y = (long)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; def y = (long)1; return x >> y"));
assertEquals(2, exec("def x = (byte)4; def y = (byte)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; def y = (short)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; def y = (char)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; def y = (int)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; def y = (long)1; return x >> y"));
}
public void testRshDefTypeLHS() {
assertEquals(2, exec("byte x = (byte)4; def y = (byte)1; return x >> y"));
assertEquals(2, exec("short x = (short)4; def y = (byte)1; return x >> y"));
assertEquals(2, exec("char x = (char)4; def y = (byte)1; return x >> y"));
assertEquals(2, exec("int x = (int)4; def y = (byte)1; return x >> y"));
assertEquals(2L, exec("long x = (long)4; def y = (byte)1; return x >> y"));
assertEquals(2, exec("byte x = (byte)4; def y = (short)1; return x >> y"));
assertEquals(2, exec("short x = (short)4; def y = (short)1; return x >> y"));
assertEquals(2, exec("char x = (char)4; def y = (short)1; return x >> y"));
assertEquals(2, exec("int x = (int)4; def y = (short)1; return x >> y"));
assertEquals(2L, exec("long x = (long)4; def y = (short)1; return x >> y"));
assertEquals(2, exec("byte x = (byte)4; def y = (char)1; return x >> y"));
assertEquals(2, exec("short x = (short)4; def y = (char)1; return x >> y"));
assertEquals(2, exec("char x = (char)4; def y = (char)1; return x >> y"));
assertEquals(2, exec("int x = (int)4; def y = (char)1; return x >> y"));
assertEquals(2L, exec("long x = (long)4; def y = (char)1; return x >> y"));
assertEquals(2, exec("byte x = (byte)4; def y = (int)1; return x >> y"));
assertEquals(2, exec("short x = (short)4; def y = (int)1; return x >> y"));
assertEquals(2, exec("char x = (char)4; def y = (int)1; return x >> y"));
assertEquals(2, exec("int x = (int)4; def y = (int)1; return x >> y"));
assertEquals(2L, exec("long x = (long)4; def y = (int)1; return x >> y"));
assertEquals(2, exec("byte x = (byte)4; def y = (long)1; return x >> y"));
assertEquals(2, exec("short x = (short)4; def y = (long)1; return x >> y"));
assertEquals(2, exec("char x = (char)4; def y = (long)1; return x >> y"));
assertEquals(2, exec("int x = (int)4; def y = (long)1; return x >> y"));
assertEquals(2L, exec("long x = (long)4; def y = (long)1; return x >> y"));
assertEquals(2, exec("byte x = (byte)4; def y = (byte)1; return x >> y"));
assertEquals(2, exec("short x = (short)4; def y = (short)1; return x >> y"));
assertEquals(2, exec("char x = (char)4; def y = (char)1; return x >> y"));
assertEquals(2, exec("int x = (int)4; def y = (int)1; return x >> y"));
assertEquals(2L, exec("long x = (long)4; def y = (long)1; return x >> y"));
}
public void testRshDefTypedLHS() {
assertEquals(2, exec("def x = (byte)4; byte y = (byte)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; byte y = (byte)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; byte y = (byte)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; byte y = (byte)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; byte y = (byte)1; return x >> y"));
assertEquals(2, exec("def x = (byte)4; short y = (short)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; short y = (short)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; short y = (short)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; short y = (short)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; short y = (short)1; return x >> y"));
assertEquals(2, exec("def x = (byte)4; char y = (char)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; char y = (char)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; char y = (char)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; char y = (char)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; char y = (char)1; return x >> y"));
assertEquals(2, exec("def x = (byte)4; int y = (int)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; int y = (int)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; int y = (int)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; int y = (int)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; int y = (int)1; return x >> y"));
assertEquals(2, exec("def x = (byte)4; long y = (long)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; long y = (long)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; long y = (long)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; long y = (long)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; long y = (long)1; return x >> y"));
assertEquals(2, exec("def x = (byte)4; byte y = (byte)1; return x >> y"));
assertEquals(2, exec("def x = (short)4; short y = (short)1; return x >> y"));
assertEquals(2, exec("def x = (char)4; char y = (char)1; return x >> y"));
assertEquals(2, exec("def x = (int)4; int y = (int)1; return x >> y"));
assertEquals(2L, exec("def x = (long)4; long y = (long)1; return x >> y"));
}
public void testUshDef() {
assertEquals(2, exec("def x = (byte)4; def y = (byte)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; def y = (byte)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; def y = (byte)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; def y = (byte)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; def y = (byte)1; return x >>> y"));
assertEquals(2, exec("def x = (byte)4; def y = (short)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; def y = (short)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; def y = (short)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; def y = (short)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; def y = (short)1; return x >>> y"));
assertEquals(2, exec("def x = (byte)4; def y = (char)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; def y = (char)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; def y = (char)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; def y = (char)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; def y = (char)1; return x >>> y"));
assertEquals(2, exec("def x = (byte)4; def y = (int)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; def y = (int)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; def y = (int)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; def y = (int)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; def y = (int)1; return x >>> y"));
assertEquals(2, exec("def x = (byte)4; def y = (long)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; def y = (long)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; def y = (long)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; def y = (long)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; def y = (long)1; return x >>> y"));
assertEquals(2, exec("def x = (byte)4; def y = (byte)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; def y = (short)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; def y = (char)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; def y = (int)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; def y = (long)1; return x >>> y"));
}
public void testUshDefTypedLHS() {
assertEquals(2, exec("byte x = (byte)4; def y = (byte)1; return x >>> y"));
assertEquals(2, exec("short x = (short)4; def y = (byte)1; return x >>> y"));
assertEquals(2, exec("char x = (char)4; def y = (byte)1; return x >>> y"));
assertEquals(2, exec("int x = (int)4; def y = (byte)1; return x >>> y"));
assertEquals(2L, exec("long x = (long)4; def y = (byte)1; return x >>> y"));
assertEquals(2, exec("byte x = (byte)4; def y = (short)1; return x >>> y"));
assertEquals(2, exec("short x = (short)4; def y = (short)1; return x >>> y"));
assertEquals(2, exec("char x = (char)4; def y = (short)1; return x >>> y"));
assertEquals(2, exec("int x = (int)4; def y = (short)1; return x >>> y"));
assertEquals(2L, exec("long x = (long)4; def y = (short)1; return x >>> y"));
assertEquals(2, exec("byte x = (byte)4; def y = (char)1; return x >>> y"));
assertEquals(2, exec("short x = (short)4; def y = (char)1; return x >>> y"));
assertEquals(2, exec("char x = (char)4; def y = (char)1; return x >>> y"));
assertEquals(2, exec("int x = (int)4; def y = (char)1; return x >>> y"));
assertEquals(2L, exec("long x = (long)4; def y = (char)1; return x >>> y"));
assertEquals(2, exec("byte x = (byte)4; def y = (int)1; return x >>> y"));
assertEquals(2, exec("short x = (short)4; def y = (int)1; return x >>> y"));
assertEquals(2, exec("char x = (char)4; def y = (int)1; return x >>> y"));
assertEquals(2, exec("int x = (int)4; def y = (int)1; return x >>> y"));
assertEquals(2L, exec("long x = (long)4; def y = (int)1; return x >>> y"));
assertEquals(2, exec("byte x = (byte)4; def y = (long)1; return x >>> y"));
assertEquals(2, exec("short x = (short)4; def y = (long)1; return x >>> y"));
assertEquals(2, exec("char x = (char)4; def y = (long)1; return x >>> y"));
assertEquals(2, exec("int x = (int)4; def y = (long)1; return x >>> y"));
assertEquals(2L, exec("long x = (long)4; def y = (long)1; return x >>> y"));
assertEquals(2, exec("byte x = (byte)4; def y = (byte)1; return x >>> y"));
assertEquals(2, exec("short x = (short)4; def y = (short)1; return x >>> y"));
assertEquals(2, exec("char x = (char)4; def y = (char)1; return x >>> y"));
assertEquals(2, exec("int x = (int)4; def y = (int)1; return x >>> y"));
assertEquals(2L, exec("long x = (long)4; def y = (long)1; return x >>> y"));
}
public void testUshDefTypedRHS() {
assertEquals(2, exec("def x = (byte)4; byte y = (byte)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; byte y = (byte)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; byte y = (byte)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; byte y = (byte)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; byte y = (byte)1; return x >>> y"));
assertEquals(2, exec("def x = (byte)4; short y = (short)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; short y = (short)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; short y = (short)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; short y = (short)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; short y = (short)1; return x >>> y"));
assertEquals(2, exec("def x = (byte)4; char y = (char)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; char y = (char)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; char y = (char)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; char y = (char)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; char y = (char)1; return x >>> y"));
assertEquals(2, exec("def x = (byte)4; int y = (int)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; int y = (int)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; int y = (int)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; int y = (int)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; int y = (int)1; return x >>> y"));
assertEquals(2, exec("def x = (byte)4; long y = (long)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; long y = (long)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; long y = (long)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; long y = (long)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; long y = (long)1; return x >>> y"));
assertEquals(2, exec("def x = (byte)4; byte y = (byte)1; return x >>> y"));
assertEquals(2, exec("def x = (short)4; short y = (short)1; return x >>> y"));
assertEquals(2, exec("def x = (char)4; char y = (char)1; return x >>> y"));
assertEquals(2, exec("def x = (int)4; int y = (int)1; return x >>> y"));
assertEquals(2L, exec("def x = (long)4; long y = (long)1; return x >>> y"));
}
public void testBogusDefShifts() {
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1L; def y = 2F; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1; def y = 2D; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1F; def y = 2; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1D; def y = 2L; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1L; def y = 2F; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1; def y = 2D; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1F; def y = 2; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1D; def y = 2L; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1L; def y = 2F; return x >>> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1; def y = 2D; return x >>> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1F; def y = 2; return x >>> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1D; def y = 2L; return x >>> y;"); });
}
public void testBogusDefShiftsTypedLHS() {
expectScriptThrows(ClassCastException.class, () -> { exec("long x = 1L; def y = 2F; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("int x = 1; def y = 2D; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("float x = 1F; def y = 2; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("double x = 1D; def y = 2L; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("long x = 1L; def y = 2F; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("int x = 1; def y = 2D; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("float x = 1F; def y = 2; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("double x = 1D; def y = 2L; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("long x = 1L; def y = 2F; return x >>> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("int x = 1; def y = 2D; return x >>> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("float x = 1F; def y = 2; return x >>> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("double x = 1D; def y = 2L; return x >>> y;"); });
}
public void testBogusDefShiftsTypedRHS() {
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1L; float y = 2F; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1; double y = 2D; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1F; int y = 2; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1D; long y = 2L; return x << y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1L; float y = 2F; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1; double y = 2D; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1F; int y = 2; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1D; long y = 2L; return x >> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1L; float y = 2F; return x >>> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1; double y = 2D; return x >>> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1F; int y = 2; return x >>> y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1D; long y = 2L; return x >>> y;"); });
}
public void testLshCompoundAssignment() {
// byte
assertEquals((byte) 60, exec("byte x = 15; x <<= 2; return x;"));
assertEquals((byte) -60, exec("byte x = (byte) -15; x <<= 2; return x;"));
// short
assertEquals((short) 60, exec("short x = 15; x <<= 2; return x;"));
assertEquals((short) -60, exec("short x = (short) -15; x <<= 2; return x;"));
// char
assertEquals((char) 60, exec("char x = (char) 15; x <<= 2; return x;"));
// int
assertEquals(60, exec("int x = 15; x <<= 2; return x;"));
assertEquals(-60, exec("int x = -15; x <<= 2; return x;"));
// long
assertEquals(60L, exec("long x = 15L; x <<= 2; return x;"));
assertEquals(-60L, exec("long x = -15L; x <<= 2; return x;"));
// long shift distance
assertEquals(60, exec("int x = 15; x <<= 2L; return x;"));
assertEquals(-60, exec("int x = -15; x <<= 2L; return x;"));
}
public void testRshCompoundAssignment() {
// byte
assertEquals((byte) 15, exec("byte x = 60; x >>= 2; return x;"));
assertEquals((byte) -15, exec("byte x = (byte) -60; x >>= 2; return x;"));
// short
assertEquals((short) 15, exec("short x = 60; x >>= 2; return x;"));
assertEquals((short) -15, exec("short x = (short) -60; x >>= 2; return x;"));
// char
assertEquals((char) 15, exec("char x = (char) 60; x >>= 2; return x;"));
// int
assertEquals(15, exec("int x = 60; x >>= 2; return x;"));
assertEquals(-15, exec("int x = -60; x >>= 2; return x;"));
// long
assertEquals(15L, exec("long x = 60L; x >>= 2; return x;"));
assertEquals(-15L, exec("long x = -60L; x >>= 2; return x;"));
// long shift distance
assertEquals(15, exec("int x = 60; x >>= 2L; return x;"));
assertEquals(-15, exec("int x = -60; x >>= 2L; return x;"));
}
public void testUshCompoundAssignment() {
// byte
assertEquals((byte) 15, exec("byte x = 60; x >>>= 2; return x;"));
assertEquals((byte) -15, exec("byte x = (byte) -60; x >>>= 2; return x;"));
// short
assertEquals((short) 15, exec("short x = 60; x >>>= 2; return x;"));
assertEquals((short) -15, exec("short x = (short) -60; x >>>= 2; return x;"));
// char
assertEquals((char) 15, exec("char x = (char) 60; x >>>= 2; return x;"));
// int
assertEquals(15, exec("int x = 60; x >>>= 2; return x;"));
assertEquals(-60 >>> 2, exec("int x = -60; x >>>= 2; return x;"));
// long
assertEquals(15L, exec("long x = 60L; x >>>= 2; return x;"));
assertEquals(-60L >>> 2, exec("long x = -60L; x >>>= 2; return x;"));
// long shift distance
assertEquals(15, exec("int x = 60; x >>>= 2L; return x;"));
assertEquals(-60 >>> 2, exec("int x = -60; x >>>= 2L; return x;"));
}
public void testBogusCompoundAssignment() {
expectScriptThrows(ClassCastException.class, () -> { exec("long x = 1L; float y = 2; x <<= y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("int x = 1; double y = 2L; x <<= y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("float x = 1F; int y = 2; x <<= y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("double x = 1D; int y = 2L; x <<= y;"); });
}
public void testBogusCompoundAssignmentConst() {
expectScriptThrows(ClassCastException.class, () -> { exec("int x = 1L; x <<= 2F;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("int x = 1L; x <<= 2.0;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("float x = 1F; x <<= 2;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("double x = 1D; x <<= 2L;"); });
}
public void testBogusCompoundAssignmentDef() {
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1L; float y = 2; x <<= y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("def x = 1; double y = 2L; x <<= y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("float x = 1F; def y = 2; x <<= y;"); });
expectScriptThrows(ClassCastException.class, () -> { exec("double x = 1D; def y = 2L; x <<= y;"); });
}
}
| ShiftTests |
java | apache__kafka | share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorShard.java | {
"start": 7094,
"end": 51587
} | interface
____ this;
}
@Override
public CoordinatorShardBuilder<ShareCoordinatorShard, CoordinatorRecord> withCoordinatorMetrics(CoordinatorMetrics coordinatorMetrics) {
this.coordinatorMetrics = coordinatorMetrics;
return this;
}
@Override
public CoordinatorShardBuilder<ShareCoordinatorShard, CoordinatorRecord> withTopicPartition(TopicPartition topicPartition) {
this.topicPartition = topicPartition;
return this;
}
@Override
@SuppressWarnings("NPathComplexity")
public ShareCoordinatorShard build() {
if (logContext == null) logContext = new LogContext();
if (config == null)
throw new IllegalArgumentException("Config must be set.");
if (snapshotRegistry == null)
throw new IllegalArgumentException("SnapshotRegistry must be set.");
if (coordinatorMetrics == null || !(coordinatorMetrics instanceof ShareCoordinatorMetrics))
throw new IllegalArgumentException("CoordinatorMetrics must be set and be of type ShareCoordinatorMetrics.");
if (topicPartition == null)
throw new IllegalArgumentException("TopicPartition must be set.");
ShareCoordinatorMetricsShard metricsShard = ((ShareCoordinatorMetrics) coordinatorMetrics)
.newMetricsShard(snapshotRegistry, topicPartition);
return new ShareCoordinatorShard(
logContext,
config,
coordinatorMetrics,
metricsShard,
snapshotRegistry,
time
);
}
}
ShareCoordinatorShard(
LogContext logContext,
ShareCoordinatorConfig config,
CoordinatorMetrics coordinatorMetrics,
CoordinatorMetricsShard metricsShard,
SnapshotRegistry snapshotRegistry,
Time time
) {
this(logContext, config, coordinatorMetrics, metricsShard, snapshotRegistry, new ShareCoordinatorOffsetsManager(snapshotRegistry), time);
}
ShareCoordinatorShard(
LogContext logContext,
ShareCoordinatorConfig config,
CoordinatorMetrics coordinatorMetrics,
CoordinatorMetricsShard metricsShard,
SnapshotRegistry snapshotRegistry,
ShareCoordinatorOffsetsManager offsetsManager,
Time time
) {
this.log = logContext.logger(ShareCoordinatorShard.class);
this.config = config;
this.coordinatorMetrics = coordinatorMetrics;
this.metricsShard = metricsShard;
this.shareStateMap = new TimelineHashMap<>(snapshotRegistry, 0);
this.leaderEpochMap = new TimelineHashMap<>(snapshotRegistry, 0);
this.snapshotUpdateCount = new TimelineHashMap<>(snapshotRegistry, 0);
this.stateEpochMap = new TimelineHashMap<>(snapshotRegistry, 0);
this.offsetsManager = offsetsManager;
this.time = time;
}
@Override
public void onLoaded(CoordinatorMetadataImage newImage) {
this.metadataImage = newImage;
coordinatorMetrics.activateMetricsShard(metricsShard);
}
@Override
public void onNewMetadataImage(CoordinatorMetadataImage newImage, CoordinatorMetadataDelta delta) {
this.metadataImage = newImage;
}
@Override
public void onUnloaded() {
coordinatorMetrics.deactivateMetricsShard(metricsShard);
}
@Override
public void replay(long offset, long producerId, short producerEpoch, CoordinatorRecord record) throws RuntimeException {
ApiMessage key = record.key();
ApiMessageAndVersion value = record.value();
try {
switch (CoordinatorRecordType.fromId(key.apiKey())) {
case SHARE_SNAPSHOT:
handleShareSnapshot((ShareSnapshotKey) key, (ShareSnapshotValue) messageOrNull(value), offset);
break;
case SHARE_UPDATE:
handleShareUpdate((ShareUpdateKey) key, (ShareUpdateValue) messageOrNull(value));
break;
default:
// Noop
}
} catch (UnsupportedVersionException ex) {
// Ignore
}
}
private void handleShareSnapshot(ShareSnapshotKey key, ShareSnapshotValue value, long offset) {
SharePartitionKey mapKey = SharePartitionKey.getInstance(key.groupId(), key.topicId(), key.partition());
if (value == null) {
log.debug("Tombstone records received for share partition key: {}", mapKey);
// Consider this a tombstone.
shareStateMap.remove(mapKey);
leaderEpochMap.remove(mapKey);
stateEpochMap.remove(mapKey);
snapshotUpdateCount.remove(mapKey);
} else {
maybeUpdateLeaderEpochMap(mapKey, value.leaderEpoch());
maybeUpdateStateEpochMap(mapKey, value.stateEpoch());
ShareGroupOffset offsetRecord = ShareGroupOffset.fromRecord(value);
// This record is the complete snapshot.
shareStateMap.put(mapKey, offsetRecord);
// If number of share updates is exceeded, then reset it.
if (snapshotUpdateCount.containsKey(mapKey)) {
if (snapshotUpdateCount.get(mapKey) >= config.shareCoordinatorSnapshotUpdateRecordsPerSnapshot()) {
snapshotUpdateCount.put(mapKey, 0);
}
}
}
offsetsManager.updateState(mapKey, offset, value == null);
}
private void handleShareUpdate(ShareUpdateKey key, ShareUpdateValue value) {
SharePartitionKey mapKey = SharePartitionKey.getInstance(key.groupId(), key.topicId(), key.partition());
maybeUpdateLeaderEpochMap(mapKey, value.leaderEpoch());
// Share update does not hold state epoch information.
ShareGroupOffset offsetRecord = ShareGroupOffset.fromRecord(value);
// This is an incremental snapshot,
// so we need to apply it to our current soft state.
shareStateMap.compute(mapKey, (k, v) -> v == null ? offsetRecord : merge(v, value));
snapshotUpdateCount.compute(mapKey, (k, v) -> v == null ? 0 : v + 1);
}
private void maybeUpdateLeaderEpochMap(SharePartitionKey mapKey, int leaderEpoch) {
leaderEpochMap.putIfAbsent(mapKey, leaderEpoch);
if (leaderEpochMap.get(mapKey) < leaderEpoch) {
leaderEpochMap.put(mapKey, leaderEpoch);
}
}
private void maybeUpdateStateEpochMap(SharePartitionKey mapKey, int stateEpoch) {
stateEpochMap.putIfAbsent(mapKey, stateEpoch);
if (stateEpochMap.get(mapKey) < stateEpoch) {
stateEpochMap.put(mapKey, stateEpoch);
}
}
@Override
public void replayEndTransactionMarker(long producerId, short producerEpoch, TransactionResult result) throws RuntimeException {
CoordinatorShard.super.replayEndTransactionMarker(producerId, producerEpoch, result);
}
/**
* This method generates the ShareSnapshotValue record corresponding to the requested topic partition information.
* The generated record is then written to the __share_group_state topic and replayed to the in-memory state
* of the coordinator shard, shareStateMap, by CoordinatorRuntime.
* <p>
* This method as called by the ShareCoordinatorService will be provided with
* the request data which covers only a single key i.e. group1:topic1:partition1. The implementation
* below was done keeping this in mind.
*
* @param request - WriteShareGroupStateRequestData for a single key
* @return CoordinatorResult(records, response)
*/
public CoordinatorResult<WriteShareGroupStateResponseData, CoordinatorRecord> writeState(
WriteShareGroupStateRequestData request
) {
// Records to write (with both key and value of snapshot type), response to caller
// only one key will be there in the request by design.
metricsShard.record(ShareCoordinatorMetrics.SHARE_COORDINATOR_WRITE_SENSOR_NAME);
Optional<CoordinatorResult<WriteShareGroupStateResponseData, CoordinatorRecord>> error = maybeGetWriteStateError(request);
if (error.isPresent()) {
return error.get();
}
WriteShareGroupStateRequestData.WriteStateData topicData = request.topics().get(0);
WriteShareGroupStateRequestData.PartitionData partitionData = topicData.partitions().get(0);
SharePartitionKey key = SharePartitionKey.getInstance(request.groupId(), topicData.topicId(), partitionData.partition());
CoordinatorRecord record = generateShareStateRecord(partitionData, key, false);
// build successful response if record is correctly created
WriteShareGroupStateResponseData responseData = new WriteShareGroupStateResponseData().setResults(
List.of(WriteShareGroupStateResponse.toResponseWriteStateResult(key.topicId(),
List.of(WriteShareGroupStateResponse.toResponsePartitionResult(
key.partition()))
))
);
return new CoordinatorResult<>(List.of(record), responseData);
}
/**
* Method reads data from the soft state and if needed updates the leader epoch.
* It can happen that a read state call for a share partition has a higher leaderEpoch
* value than seen so far.
* In case an update is not required, empty record list will be generated along with a success response.
*
* @param request - represents ReadShareGroupStateRequestData
* @return CoordinatorResult object
*/
public CoordinatorResult<ReadShareGroupStateResponseData, CoordinatorRecord> readStateAndMaybeUpdateLeaderEpoch(
ReadShareGroupStateRequestData request
) {
// Only one key will be there in the request by design.
Optional<ReadShareGroupStateResponseData> error = maybeGetReadStateError(request);
if (error.isPresent()) {
return new CoordinatorResult<>(List.of(), error.get());
}
ReadShareGroupStateRequestData.ReadStateData topicData = request.topics().get(0);
ReadShareGroupStateRequestData.PartitionData partitionData = topicData.partitions().get(0);
Uuid topicId = topicData.topicId();
int partitionId = partitionData.partition();
int leaderEpoch = partitionData.leaderEpoch();
SharePartitionKey key = SharePartitionKey.getInstance(request.groupId(), topicId, partitionId);
ShareGroupOffset offsetValue = shareStateMap.get(key);
List<ReadShareGroupStateResponseData.StateBatch> stateBatches = (offsetValue.stateBatches() != null && !offsetValue.stateBatches().isEmpty()) ?
offsetValue.stateBatches().stream()
.map(
stateBatch -> new ReadShareGroupStateResponseData.StateBatch()
.setFirstOffset(stateBatch.firstOffset())
.setLastOffset(stateBatch.lastOffset())
.setDeliveryState(stateBatch.deliveryState())
.setDeliveryCount(stateBatch.deliveryCount())
).toList() : List.of();
ReadShareGroupStateResponseData responseData = ReadShareGroupStateResponse.toResponseData(
topicId,
partitionId,
offsetValue.startOffset(),
offsetValue.stateEpoch(),
stateBatches
);
// Optimization in case leaderEpoch update is not required.
if (leaderEpoch == -1 ||
(leaderEpochMap.get(key) != null && leaderEpochMap.get(key) == leaderEpoch)) {
return new CoordinatorResult<>(List.of(), responseData);
}
// It is OK to info log this since this reaching this codepoint should be quite infrequent.
log.info("Read with leader epoch update call for key {} having new leader epoch {}.", key, leaderEpoch);
// Recording the sensor here as above if condition will not produce any record.
metricsShard.record(ShareCoordinatorMetrics.SHARE_COORDINATOR_WRITE_SENSOR_NAME);
// Generate record with leaderEpoch info.
WriteShareGroupStateRequestData.PartitionData writePartitionData = new WriteShareGroupStateRequestData.PartitionData()
.setPartition(partitionId)
.setLeaderEpoch(leaderEpoch)
.setStateBatches(List.of())
.setStartOffset(responseData.results().get(0).partitions().get(0).startOffset())
.setDeliveryCompleteCount(offsetValue.deliveryCompleteCount())
.setStateEpoch(responseData.results().get(0).partitions().get(0).stateEpoch());
CoordinatorRecord record = generateShareStateRecord(writePartitionData, key, true);
return new CoordinatorResult<>(List.of(record), responseData);
}
/**
* This method finds the ShareSnapshotValue record corresponding to the requested topic partition from the
* in-memory state of coordinator shard, the shareStateMap.
* <p>
* This method as called by the ShareCoordinatorService will be provided with
* the request data which covers only key i.e. group1:topic1:partition1. The implementation
* below was done keeping this in mind.
*
* @param request - ReadShareGroupStateSummaryRequestData for a single key
* @return CoordinatorResult(records, response)
*/
public CoordinatorResult<ReadShareGroupStateSummaryResponseData, CoordinatorRecord> readStateSummary(
ReadShareGroupStateSummaryRequestData request
) {
// Only one key will be there in the request by design.
Optional<ReadShareGroupStateSummaryResponseData> error = maybeGetReadStateSummaryError(request);
if (error.isPresent()) {
return new CoordinatorResult<>(List.of(), error.get());
}
ReadShareGroupStateSummaryRequestData.ReadStateSummaryData topicData = request.topics().get(0);
ReadShareGroupStateSummaryRequestData.PartitionData partitionData = topicData.partitions().get(0);
Uuid topicId = topicData.topicId();
int partitionId = partitionData.partition();
SharePartitionKey key = SharePartitionKey.getInstance(request.groupId(), topicId, partitionId);
ReadShareGroupStateSummaryResponseData responseData = null;
if (!shareStateMap.containsKey(key)) {
responseData = ReadShareGroupStateSummaryResponse.toResponseData(
topicId,
partitionId,
PartitionFactory.UNINITIALIZED_START_OFFSET,
PartitionFactory.UNINITIALIZED_DELIVERY_COMPLETE_COUNT,
PartitionFactory.DEFAULT_LEADER_EPOCH,
PartitionFactory.DEFAULT_STATE_EPOCH
);
} else {
ShareGroupOffset offsetValue = shareStateMap.get(key);
if (offsetValue == null) {
log.error("Data not found for topic {}, partition {} for group {}, in the in-memory state of share coordinator", topicId, partitionId, request.groupId());
responseData = ReadShareGroupStateSummaryResponse.toErrorResponseData(
topicId,
partitionId,
Errors.UNKNOWN_SERVER_ERROR,
"Data not found for the topics " + topicId + ", partition " + partitionId + " for group " + request.groupId() + ", in the in-memory state of share coordinator"
);
} else {
responseData = ReadShareGroupStateSummaryResponse.toResponseData(
topicId,
partitionId,
offsetValue.startOffset(),
offsetValue.deliveryCompleteCount(),
offsetValue.leaderEpoch(),
offsetValue.stateEpoch()
);
}
}
return new CoordinatorResult<>(List.of(), responseData);
}
/**
* Method which returns the last known redundant offset from the partition
* led by this shard.
*
* @return CoordinatorResult containing empty record list and an Optional<Long> representing the offset.
*/
public CoordinatorResult<Optional<Long>, CoordinatorRecord> lastRedundantOffset() {
return new CoordinatorResult<>(
List.of(),
this.offsetsManager.lastRedundantOffset()
);
}
/**
* This method writes tombstone records corresponding to the requested topic partitions.
* <p>
* This method as called by the ShareCoordinatorService will be provided with
* the request data which covers only key i.e. group1:topic1:partition1. The implementation
* below was done keeping this in mind.
*
* @param request - DeleteShareGroupStateRequestData for a single key
* @return CoordinatorResult(records, response)
*/
public CoordinatorResult<DeleteShareGroupStateResponseData, CoordinatorRecord> deleteState(
DeleteShareGroupStateRequestData request
) {
// Records to write (with both key and value of snapshot type), response to caller
// only one key will be there in the request by design.
Optional<CoordinatorResult<DeleteShareGroupStateResponseData, CoordinatorRecord>> error = maybeGetDeleteStateError(request);
if (error.isPresent()) {
return error.get();
}
DeleteShareGroupStateRequestData.DeleteStateData topicData = request.topics().get(0);
DeleteShareGroupStateRequestData.PartitionData partitionData = topicData.partitions().get(0);
SharePartitionKey key = SharePartitionKey.getInstance(request.groupId(), topicData.topicId(), partitionData.partition());
if (!shareStateMap.containsKey(key)) {
log.warn("Attempted to delete non-existent share partition {}.", key);
return new CoordinatorResult<>(List.of(), new DeleteShareGroupStateResponseData().setResults(
List.of(DeleteShareGroupStateResponse.toResponseDeleteStateResult(key.topicId(),
List.of(DeleteShareGroupStateResponse.toResponsePartitionResult(
key.partition()))
))
));
}
CoordinatorRecord record = generateTombstoneRecord(key);
// build successful response if record is correctly created
DeleteShareGroupStateResponseData responseData = new DeleteShareGroupStateResponseData().setResults(
List.of(DeleteShareGroupStateResponse.toResponseDeleteStateResult(key.topicId(),
List.of(DeleteShareGroupStateResponse.toResponsePartitionResult(
key.partition()))
))
);
return new CoordinatorResult<>(List.of(record), responseData);
}
/**
* This method writes a share snapshot records corresponding to the requested topic partitions.
* <p>
* This method as called by the ShareCoordinatorService will be provided with
* the request data which covers only key i.e. group1:topic1:partition1. The implementation
* below was done keeping this in mind.
*
* @param request - InitializeShareGroupStateRequestData for a single key
* @return CoordinatorResult(records, response)
*/
public CoordinatorResult<InitializeShareGroupStateResponseData, CoordinatorRecord> initializeState(
InitializeShareGroupStateRequestData request
) {
// Records to write (with both key and value of snapshot type), response to caller
// only one key will be there in the request by design.
Optional<CoordinatorResult<InitializeShareGroupStateResponseData, CoordinatorRecord>> error = maybeGetInitializeStateError(request);
if (error.isPresent()) {
return error.get();
}
InitializeShareGroupStateRequestData.InitializeStateData topicData = request.topics().get(0);
InitializeShareGroupStateRequestData.PartitionData partitionData = topicData.partitions().get(0);
SharePartitionKey key = SharePartitionKey.getInstance(request.groupId(), topicData.topicId(), partitionData.partition());
CoordinatorRecord record = generateInitializeStateRecord(partitionData, key);
// build successful response if record is correctly created
InitializeShareGroupStateResponseData responseData = new InitializeShareGroupStateResponseData().setResults(
List.of(InitializeShareGroupStateResponse.toResponseInitializeStateResult(key.topicId(),
List.of(InitializeShareGroupStateResponse.toResponsePartitionResult(
key.partition()))
))
);
return new CoordinatorResult<>(List.of(record), responseData);
}
/**
* Iterates over the soft state to determine the share partitions whose last snapshot is
* older than the allowed time interval. The candidate share partitions are force snapshotted.
*
* @return A result containing snapshot records, if any, and a void response.
*/
public CoordinatorResult<Void, CoordinatorRecord> snapshotColdPartitions() {
long coldSnapshottedPartitionsCount = shareStateMap.values().stream()
.filter(shareGroupOffset -> shareGroupOffset.createTimestamp() - shareGroupOffset.writeTimestamp() != 0)
.count();
// If all share partitions are snapshotted, it means that
// system is quiet and cold snapshotting will not help much.
if (coldSnapshottedPartitionsCount == shareStateMap.size()) {
log.debug("All share snapshot records already cold snapshotted, skipping.");
return new CoordinatorResult<>(List.of(), null);
}
// Some active partitions are there.
List<CoordinatorRecord> records = new ArrayList<>();
shareStateMap.forEach((sharePartitionKey, shareGroupOffset) -> {
long timeSinceLastSnapshot = time.milliseconds() - shareGroupOffset.writeTimestamp();
if (timeSinceLastSnapshot >= config.shareCoordinatorColdPartitionSnapshotIntervalMs()) {
// We need to force create a snapshot here
log.debug("Last snapshot for {} is older than allowed interval (last snapshot delta {}).", sharePartitionKey, timeSinceLastSnapshot);
records.add(ShareCoordinatorRecordHelpers.newShareSnapshotRecord(
sharePartitionKey.groupId(),
sharePartitionKey.topicId(),
sharePartitionKey.partition(),
shareGroupOffset.builderSupplier()
.setSnapshotEpoch(shareGroupOffset.snapshotEpoch() + 1) // We need to increment by one as this is a new snapshot.
.setWriteTimestamp(time.milliseconds())
.build()
));
}
});
return new CoordinatorResult<>(records, null);
}
/**
* Remove share partitions corresponding to the input topic ids, if present.
* @param deletedTopicIds The topic ids which have been deleted
* @return A result containing relevant coordinator records and void response
*/
public CoordinatorResult<Void, CoordinatorRecord> maybeCleanupShareState(Set<Uuid> deletedTopicIds) {
if (deletedTopicIds.isEmpty()) {
return new CoordinatorResult<>(List.of());
}
Set<SharePartitionKey> eligibleKeys = new HashSet<>();
shareStateMap.forEach((key, __) -> {
if (deletedTopicIds.contains(key.topicId())) {
eligibleKeys.add(key);
}
});
return new CoordinatorResult<>(eligibleKeys.stream()
.map(key -> ShareCoordinatorRecordHelpers.newShareStateTombstoneRecord(key.groupId(), key.topicId(), key.partition()))
.toList()
);
}
/**
* Util method to generate a ShareSnapshot or ShareUpdate type record for a key, based on various conditions.
* <p>
* If number of ShareUpdate records for key >= max allowed per snapshot per key or stateEpoch is highest
* seen so far => create a new ShareSnapshot record else create a new ShareUpdate record. This method assumes
* that share partition key is present in shareStateMap since it should be called on initialized share partitions.
*
* @param partitionData - Represents the data which should be written into the share state record.
* @param key - The {@link SharePartitionKey} object.
* @param updateLeaderEpoch - Should the leader epoch be updated, if higher.
* @return {@link CoordinatorRecord} representing ShareSnapshot or ShareUpdate
*/
private CoordinatorRecord generateShareStateRecord(
WriteShareGroupStateRequestData.PartitionData partitionData,
SharePartitionKey key,
boolean updateLeaderEpoch
) {
long timestamp = time.milliseconds();
int updatesPerSnapshotLimit = config.shareCoordinatorSnapshotUpdateRecordsPerSnapshot();
ShareGroupOffset currentState = shareStateMap.get(key); // This method assumes containsKey is true.
int newLeaderEpoch = currentState.leaderEpoch();
if (updateLeaderEpoch) {
newLeaderEpoch = partitionData.leaderEpoch() != -1 ? partitionData.leaderEpoch() : newLeaderEpoch;
}
if (snapshotUpdateCount.getOrDefault(key, 0) >= updatesPerSnapshotLimit) {
// shareStateMap will have the entry as containsKey is true
long newStartOffset = partitionData.startOffset() == -1 ? currentState.startOffset() : partitionData.startOffset();
// Since the number of update records for this share part key exceeds snapshotUpdateRecordsPerSnapshot
// or state epoch has incremented, we should be creating a share snapshot record.
// The incoming partition data could have overlapping state batches, we must merge them.
return ShareCoordinatorRecordHelpers.newShareSnapshotRecord(
key.groupId(), key.topicId(), partitionData.partition(),
new ShareGroupOffset.Builder()
.setSnapshotEpoch(currentState.snapshotEpoch() + 1) // We must increment snapshot epoch as this is new snapshot.
.setStartOffset(newStartOffset)
.setDeliveryCompleteCount(partitionData.deliveryCompleteCount())
.setLeaderEpoch(newLeaderEpoch)
.setStateEpoch(currentState.stateEpoch())
.setStateBatches(mergeBatches(currentState.stateBatches(), partitionData, newStartOffset))
.setCreateTimestamp(timestamp)
.setWriteTimestamp(timestamp)
.build());
} else {
// Share snapshot is present and number of share snapshot update records < snapshotUpdateRecordsPerSnapshot
// so create a share update record.
// The incoming partition data could have overlapping state batches, we must merge them.
return ShareCoordinatorRecordHelpers.newShareUpdateRecord(
key.groupId(), key.topicId(), partitionData.partition(),
new ShareGroupOffset.Builder()
.setSnapshotEpoch(currentState.snapshotEpoch()) // Use same snapshotEpoch as last share snapshot.
.setStartOffset(partitionData.startOffset())
.setDeliveryCompleteCount(partitionData.deliveryCompleteCount())
.setLeaderEpoch(newLeaderEpoch)
.setStateBatches(mergeBatches(List.of(), partitionData))
.build());
}
}
private CoordinatorRecord generateTombstoneRecord(SharePartitionKey key) {
return ShareCoordinatorRecordHelpers.newShareStateTombstoneRecord(
key.groupId(),
key.topicId(),
key.partition()
);
}
private CoordinatorRecord generateInitializeStateRecord(
InitializeShareGroupStateRequestData.PartitionData partitionData,
SharePartitionKey key
) {
// We need to create a new share snapshot here, with
// appropriate state information. We will not be merging
// state here with previous snapshots as init state implies
// fresh start.
int snapshotEpoch = shareStateMap.containsKey(key) ? shareStateMap.get(key).snapshotEpoch() + 1 : 0;
return ShareCoordinatorRecordHelpers.newShareSnapshotRecord(
key.groupId(),
key.topicId(),
key.partition(),
ShareGroupOffset.fromRequest(partitionData, snapshotEpoch, time.milliseconds())
);
}
private List<PersisterStateBatch> mergeBatches(
List<PersisterStateBatch> soFar,
WriteShareGroupStateRequestData.PartitionData partitionData) {
return mergeBatches(soFar, partitionData, partitionData.startOffset());
}
private List<PersisterStateBatch> mergeBatches(
List<PersisterStateBatch> soFar,
WriteShareGroupStateRequestData.PartitionData partitionData,
long startOffset
) {
return new PersisterStateBatchCombiner(soFar, partitionData.stateBatches().stream()
.map(PersisterStateBatch::from)
.toList(),
startOffset
).combineStateBatches();
}
private Optional<CoordinatorResult<WriteShareGroupStateResponseData, CoordinatorRecord>> maybeGetWriteStateError(
WriteShareGroupStateRequestData request
) {
String groupId = request.groupId();
WriteShareGroupStateRequestData.WriteStateData topicData = request.topics().get(0);
WriteShareGroupStateRequestData.PartitionData partitionData = topicData.partitions().get(0);
Uuid topicId = topicData.topicId();
int partitionId = partitionData.partition();
if (topicId == null) {
return Optional.of(getWriteErrorCoordinatorResult(Errors.INVALID_REQUEST, NULL_TOPIC_ID, null, partitionId));
}
if (partitionId < 0) {
return Optional.of(getWriteErrorCoordinatorResult(Errors.INVALID_REQUEST, NEGATIVE_PARTITION_ID, topicId, partitionId));
}
SharePartitionKey mapKey = SharePartitionKey.getInstance(groupId, topicId, partitionId);
if (!shareStateMap.containsKey(mapKey)) {
return Optional.of(getWriteErrorCoordinatorResult(Errors.INVALID_REQUEST, WRITE_UNINITIALIZED_SHARE_PARTITION, topicId, partitionId));
}
if (partitionData.leaderEpoch() != -1 && leaderEpochMap.containsKey(mapKey) && leaderEpochMap.get(mapKey) > partitionData.leaderEpoch()) {
log.error("Write request leader epoch is smaller than last recorded current: {}, requested: {}.", leaderEpochMap.get(mapKey), partitionData.leaderEpoch());
return Optional.of(getWriteErrorCoordinatorResult(Errors.FENCED_LEADER_EPOCH, null, topicId, partitionId));
}
if (partitionData.stateEpoch() != -1 && stateEpochMap.containsKey(mapKey) && stateEpochMap.get(mapKey) > partitionData.stateEpoch()) {
log.info("Write request state epoch is smaller than last recorded current: {}, requested: {}.", stateEpochMap.get(mapKey), partitionData.stateEpoch());
return Optional.of(getWriteErrorCoordinatorResult(Errors.FENCED_STATE_EPOCH, null, topicId, partitionId));
}
if (metadataImage == null) {
log.error("Metadata image is null");
return Optional.of(getWriteErrorCoordinatorResult(Errors.UNKNOWN_TOPIC_OR_PARTITION, null, topicId, partitionId));
}
Optional<CoordinatorMetadataImage.TopicMetadata> topicMetadataOp = metadataImage.topicMetadata(topicId);
if (topicMetadataOp.isEmpty() ||
topicMetadataOp.get().partitionCount() <= partitionId) {
log.error("Topic/TopicPartition not found in metadata image.");
return Optional.of(getWriteErrorCoordinatorResult(Errors.UNKNOWN_TOPIC_OR_PARTITION, null, topicId, partitionId));
}
return Optional.empty();
}
private Optional<ReadShareGroupStateResponseData> maybeGetReadStateError(ReadShareGroupStateRequestData request) {
String groupId = request.groupId();
ReadShareGroupStateRequestData.ReadStateData topicData = request.topics().get(0);
ReadShareGroupStateRequestData.PartitionData partitionData = topicData.partitions().get(0);
Uuid topicId = topicData.topicId();
int partitionId = partitionData.partition();
if (topicId == null) {
log.error("Request topic id is null.");
return Optional.of(ReadShareGroupStateResponse.toErrorResponseData(
null, partitionId, Errors.INVALID_REQUEST, NULL_TOPIC_ID.getMessage()));
}
if (partitionId < 0) {
log.error("Request partition id is negative.");
return Optional.of(ReadShareGroupStateResponse.toErrorResponseData(
topicId, partitionId, Errors.INVALID_REQUEST, NEGATIVE_PARTITION_ID.getMessage()));
}
SharePartitionKey mapKey = SharePartitionKey.getInstance(groupId, topicId, partitionId);
if (!shareStateMap.containsKey(mapKey)) {
log.error("Read on uninitialized share partition {}", mapKey);
return Optional.of(ReadShareGroupStateResponse.toErrorResponseData(
topicId, partitionId, Errors.INVALID_REQUEST, READ_UNINITIALIZED_SHARE_PARTITION.getMessage()));
}
if (leaderEpochMap.containsKey(mapKey) && leaderEpochMap.get(mapKey) > partitionData.leaderEpoch()) {
log.error("Read request leader epoch is smaller than last recorded current: {}, requested: {}.", leaderEpochMap.get(mapKey), partitionData.leaderEpoch());
return Optional.of(ReadShareGroupStateResponse.toErrorResponseData(topicId, partitionId, Errors.FENCED_LEADER_EPOCH, Errors.FENCED_LEADER_EPOCH.message()));
}
if (metadataImage == null) {
log.error("Metadata image is null");
return Optional.of(ReadShareGroupStateResponse.toErrorResponseData(topicId, partitionId, Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.UNKNOWN_TOPIC_OR_PARTITION.message()));
}
Optional<CoordinatorMetadataImage.TopicMetadata> topicMetadataOp = metadataImage.topicMetadata(topicId);
if (topicMetadataOp.isEmpty() ||
topicMetadataOp.get().partitionCount() <= partitionId) {
log.error("Topic/TopicPartition not found in metadata image.");
return Optional.of(ReadShareGroupStateResponse.toErrorResponseData(topicId, partitionId, Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.UNKNOWN_TOPIC_OR_PARTITION.message()));
}
return Optional.empty();
}
private Optional<ReadShareGroupStateSummaryResponseData> maybeGetReadStateSummaryError(ReadShareGroupStateSummaryRequestData request) {
ReadShareGroupStateSummaryRequestData.ReadStateSummaryData topicData = request.topics().get(0);
ReadShareGroupStateSummaryRequestData.PartitionData partitionData = topicData.partitions().get(0);
Uuid topicId = topicData.topicId();
int partitionId = partitionData.partition();
if (topicId == null) {
log.error("Request topic id is null.");
return Optional.of(ReadShareGroupStateSummaryResponse.toErrorResponseData(
null, partitionId, Errors.INVALID_REQUEST, NULL_TOPIC_ID.getMessage()));
}
if (partitionId < 0) {
log.error("Request partition id is negative.");
return Optional.of(ReadShareGroupStateSummaryResponse.toErrorResponseData(
topicId, partitionId, Errors.INVALID_REQUEST, NEGATIVE_PARTITION_ID.getMessage()));
}
if (metadataImage == null) {
log.error("Metadata image is null");
return Optional.of(ReadShareGroupStateSummaryResponse.toErrorResponseData(topicId, partitionId, Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.UNKNOWN_TOPIC_OR_PARTITION.message()));
}
Optional<CoordinatorMetadataImage.TopicMetadata> topicMetadataOp = metadataImage.topicMetadata(topicId);
if (topicMetadataOp.isEmpty() ||
topicMetadataOp.get().partitionCount() <= partitionId) {
log.error("Topic/TopicPartition not found in metadata image.");
return Optional.of(ReadShareGroupStateSummaryResponse.toErrorResponseData(topicId, partitionId, Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.UNKNOWN_TOPIC_OR_PARTITION.message()));
}
return Optional.empty();
}
private Optional<CoordinatorResult<DeleteShareGroupStateResponseData, CoordinatorRecord>> maybeGetDeleteStateError(
DeleteShareGroupStateRequestData request
) {
DeleteShareGroupStateRequestData.DeleteStateData topicData = request.topics().get(0);
DeleteShareGroupStateRequestData.PartitionData partitionData = topicData.partitions().get(0);
Uuid topicId = topicData.topicId();
int partitionId = partitionData.partition();
if (topicId == null) {
return Optional.of(getDeleteErrorCoordinatorResult(Errors.INVALID_REQUEST, NULL_TOPIC_ID, null, partitionId));
}
if (partitionId < 0) {
return Optional.of(getDeleteErrorCoordinatorResult(Errors.INVALID_REQUEST, NEGATIVE_PARTITION_ID, topicId, partitionId));
}
if (metadataImage == null) {
log.error("Metadata image is null");
return Optional.of(getDeleteErrorCoordinatorResult(Errors.UNKNOWN_TOPIC_OR_PARTITION, null, topicId, partitionId));
}
Optional<CoordinatorMetadataImage.TopicMetadata> topicMetadataOp = metadataImage.topicMetadata(topicId);
if (topicMetadataOp.isEmpty() ||
topicMetadataOp.get().partitionCount() <= partitionId) {
log.error("Topic/TopicPartition not found in metadata image.");
return Optional.of(getDeleteErrorCoordinatorResult(Errors.UNKNOWN_TOPIC_OR_PARTITION, null, topicId, partitionId));
}
return Optional.empty();
}
private Optional<CoordinatorResult<InitializeShareGroupStateResponseData, CoordinatorRecord>> maybeGetInitializeStateError(
InitializeShareGroupStateRequestData request
) {
InitializeShareGroupStateRequestData.InitializeStateData topicData = request.topics().get(0);
InitializeShareGroupStateRequestData.PartitionData partitionData = topicData.partitions().get(0);
Uuid topicId = topicData.topicId();
int partitionId = partitionData.partition();
if (topicId == null) {
return Optional.of(getInitializeErrorCoordinatorResult(Errors.INVALID_REQUEST, NULL_TOPIC_ID, null, partitionId));
}
if (partitionId < 0) {
return Optional.of(getInitializeErrorCoordinatorResult(Errors.INVALID_REQUEST, NEGATIVE_PARTITION_ID, topicId, partitionId));
}
SharePartitionKey key = SharePartitionKey.getInstance(request.groupId(), topicId, partitionId);
if (partitionData.stateEpoch() != -1 && stateEpochMap.containsKey(key) && stateEpochMap.get(key) > partitionData.stateEpoch()) {
log.info("Initialize request state epoch is smaller than last recorded current: {}, requested: {}.", stateEpochMap.get(key), partitionData.stateEpoch());
return Optional.of(getInitializeErrorCoordinatorResult(Errors.FENCED_STATE_EPOCH, Errors.FENCED_STATE_EPOCH.exception(), topicId, partitionId));
}
if (metadataImage == null) {
log.error("Metadata image is null");
return Optional.of(getInitializeErrorCoordinatorResult(Errors.UNKNOWN_TOPIC_OR_PARTITION, null, topicId, partitionId));
}
Optional<CoordinatorMetadataImage.TopicMetadata> topicMetadataOp = metadataImage.topicMetadata(topicId);
if (topicMetadataOp.isEmpty() ||
topicMetadataOp.get().partitionCount() <= partitionId) {
log.error("Topic/TopicPartition not found in metadata image.");
return Optional.of(getInitializeErrorCoordinatorResult(Errors.UNKNOWN_TOPIC_OR_PARTITION, null, topicId, partitionId));
}
return Optional.empty();
}
private CoordinatorResult<WriteShareGroupStateResponseData, CoordinatorRecord> getWriteErrorCoordinatorResult(
Errors error,
Exception exception,
Uuid topicId,
int partitionId
) {
String message = exception == null ? error.message() : exception.getMessage();
WriteShareGroupStateResponseData responseData = WriteShareGroupStateResponse.toErrorResponseData(topicId, partitionId, error, message);
return new CoordinatorResult<>(List.of(), responseData);
}
private CoordinatorResult<DeleteShareGroupStateResponseData, CoordinatorRecord> getDeleteErrorCoordinatorResult(
Errors error,
Exception exception,
Uuid topicId,
int partitionId
) {
String message = exception == null ? error.message() : exception.getMessage();
DeleteShareGroupStateResponseData responseData = DeleteShareGroupStateResponse.toErrorResponseData(topicId, partitionId, error, message);
return new CoordinatorResult<>(List.of(), responseData);
}
private CoordinatorResult<InitializeShareGroupStateResponseData, CoordinatorRecord> getInitializeErrorCoordinatorResult(
Errors error,
Exception exception,
Uuid topicId,
int partitionId
) {
String message = exception == null ? error.message() : exception.getMessage();
InitializeShareGroupStateResponseData responseData = InitializeShareGroupStateResponse.toErrorResponseData(topicId, partitionId, error, message);
return new CoordinatorResult<>(List.of(), responseData);
}
// Visible for testing
Integer getLeaderMapValue(SharePartitionKey key) {
return this.leaderEpochMap.get(key);
}
// Visible for testing
Integer getStateEpochMapValue(SharePartitionKey key) {
return this.stateEpochMap.get(key);
}
// Visible for testing
ShareGroupOffset getShareStateMapValue(SharePartitionKey key) {
return this.shareStateMap.get(key);
}
// Visible for testing
CoordinatorMetricsShard getMetricsShard() {
return metricsShard;
}
private static ShareGroupOffset merge(ShareGroupOffset soFar, ShareUpdateValue newData) {
// Snapshot epoch should be same as last share snapshot.
// state epoch is not present
List<PersisterStateBatch> currentBatches = soFar.stateBatches();
long newStartOffset = newData.startOffset() == -1 ? soFar.startOffset() : newData.startOffset();
int newLeaderEpoch = newData.leaderEpoch() == -1 ? soFar.leaderEpoch() : newData.leaderEpoch();
return new ShareGroupOffset.Builder()
.setSnapshotEpoch(soFar.snapshotEpoch())
.setStateEpoch(soFar.stateEpoch())
.setStartOffset(newStartOffset)
.setDeliveryCompleteCount(newData.deliveryCompleteCount())
.setLeaderEpoch(newLeaderEpoch)
.setStateBatches(new PersisterStateBatchCombiner(currentBatches, newData.stateBatches().stream()
.map(ShareCoordinatorShard::toPersisterStateBatch)
.toList(), newStartOffset)
.combineStateBatches())
.setCreateTimestamp(soFar.createTimestamp())
.setWriteTimestamp(soFar.writeTimestamp())
.build();
}
private static ApiMessage messageOrNull(ApiMessageAndVersion apiMessageAndVersion) {
if (apiMessageAndVersion == null) {
return null;
} else {
return apiMessageAndVersion.message();
}
}
/**
* Util function to convert a state batch of type {@link ShareUpdateValue.StateBatch }
* to {@link PersisterStateBatch}.
*
* @param batch - The object representing {@link ShareUpdateValue.StateBatch}
* @return {@link PersisterStateBatch}
*/
private static PersisterStateBatch toPersisterStateBatch(ShareUpdateValue.StateBatch batch) {
return new PersisterStateBatch(
batch.firstOffset(),
batch.lastOffset(),
batch.deliveryState(),
batch.deliveryCount()
);
}
}
| return |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/lock/internal/PostgreSQLLockingSupport.java | {
"start": 896,
"end": 3892
} | class ____ implements LockingSupport, LockingSupport.Metadata, ConnectionLockTimeoutStrategy {
public static final LockingSupport LOCKING_SUPPORT = new PostgreSQLLockingSupport();
private final boolean supportsNoWait;
private final boolean supportsSkipLocked;
public PostgreSQLLockingSupport() {
this( true, true );
}
public PostgreSQLLockingSupport(boolean supportsNoWait, boolean supportsSkipLocked) {
this.supportsNoWait = supportsNoWait;
this.supportsSkipLocked = supportsSkipLocked;
}
@Override
public Metadata getMetadata() {
return this;
}
@Override
public RowLockStrategy getWriteRowLockStrategy() {
return RowLockStrategy.TABLE;
}
@Override
public LockTimeoutType getLockTimeoutType(Timeout timeout) {
return switch ( timeout.milliseconds() ) {
case NO_WAIT_MILLI -> supportsNoWait ? QUERY : LockTimeoutType.NONE;
case SKIP_LOCKED_MILLI -> supportsSkipLocked ? QUERY : LockTimeoutType.NONE;
case WAIT_FOREVER_MILLI -> LockTimeoutType.NONE;
// we can apply a timeout via the connection
default -> LockTimeoutType.CONNECTION;
};
}
@Override
public OuterJoinLockingType getOuterJoinLockingType() {
return OuterJoinLockingType.UNSUPPORTED;
}
@Override
public ConnectionLockTimeoutStrategy getConnectionLockTimeoutStrategy() {
return this;
}
@Override
public Level getSupportedLevel() {
return ConnectionLockTimeoutStrategy.Level.SUPPORTED;
}
@Override
public Timeout getLockTimeout(Connection connection, SessionFactoryImplementor factory) {
return Helper.getLockTimeout(
"select current_setting('lock_timeout', true)",
(resultSet) -> {
// even though lock_timeout is "in milliseconds", `current_setting`
// returns a String form which unfortunately varies depending on
// the actual value:
// * for zero (no timeout), "0" is returned
// * for non-zero, `{timeout-in-seconds}s` is returned (e.g. "4s")
// so we need to "parse" that form here
final String value = resultSet.getString( 1 );
if ( "0".equals( value ) ) {
return Timeouts.WAIT_FOREVER;
}
assert value.endsWith( "s" );
final int secondsValue = Integer.parseInt( value.substring( 0, value.length() - 1 ) );
return Timeout.seconds( secondsValue );
},
connection,
factory
);
}
@Override
public void setLockTimeout(Timeout timeout, Connection connection, SessionFactoryImplementor factory) {
Helper.setLockTimeout(
timeout,
(t) -> {
final int milliseconds = timeout.milliseconds();
if ( milliseconds == SKIP_LOCKED_MILLI ) {
throw new HibernateException( "Connection lock-timeout does not accept skip-locked" );
}
if ( milliseconds == NO_WAIT_MILLI ) {
throw new HibernateException( "Connection lock-timeout does not accept no-wait" );
}
return milliseconds == WAIT_FOREVER_MILLI
? 0
: milliseconds;
},
"set local lock_timeout = %s",
connection,
factory
);
}
}
| PostgreSQLLockingSupport |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/filter/ForwardPathFilter.java | {
"start": 1236,
"end": 1871
} | class ____ implements GlobalFilter, Ordered {
@Override
public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {
Route route = exchange.getAttribute(GATEWAY_ROUTE_ATTR);
if (route != null) {
URI routeUri = route.getUri();
String scheme = routeUri.getScheme();
if (isAlreadyRouted(exchange) || !"forward".equals(scheme)) {
return chain.filter(exchange);
}
exchange = exchange.mutate()
.request(exchange.getRequest().mutate().path(routeUri.getPath()).build())
.build();
}
return chain.filter(exchange);
}
@Override
public int getOrder() {
return 0;
}
}
| ForwardPathFilter |
java | alibaba__nacos | common/src/main/java/com/alibaba/nacos/common/notify/DefaultSharePublisher.java | {
"start": 2492,
"end": 4013
} | class ____ synchronization.
subscribers.remove(subscriber);
lock.lock();
try {
Set<Subscriber> sets = subMappings.get(subSlowEventType);
if (sets != null) {
sets.remove(subscriber);
}
} finally {
lock.unlock();
}
}
@Override
public void receiveEvent(Event event) {
final long currentEventSequence = event.sequence();
// get subscriber set based on the slow EventType.
final Class<? extends SlowEvent> slowEventType = (Class<? extends SlowEvent>) event.getClass();
// Get for Map, the algorithm is O(1).
Set<Subscriber> subscribers = subMappings.get(slowEventType);
if (null == subscribers) {
LOGGER.debug("[NotifyCenter] No subscribers for slow event {}", slowEventType.getName());
return;
}
// Notification single event subscriber
for (Subscriber subscriber : subscribers) {
// Whether to ignore expiration events
if (subscriber.ignoreExpireEvent() && lastEventSequence > currentEventSequence) {
LOGGER.debug("[NotifyCenter] the {} is unacceptable to this subscriber, because had expire",
event.getClass());
continue;
}
// Notify single subscriber for slow event.
notifySubscriber(subscriber, event);
}
}
}
| attributes |
java | elastic__elasticsearch | plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java | {
"start": 17077,
"end": 17876
} | class ____ extends Ec2DiscoveryPlugin {
final ApacheHttpClient.Builder httpClientBuilder;
final Ec2ClientBuilder ec2ClientBuilder;
Ec2DiscoveryPluginMock(Settings settings, ApacheHttpClient.Builder httpClientBuilder, Ec2ClientBuilder ec2ClientBuilder) {
super(settings, new AwsEc2ServiceImpl() {
@Override
ApacheHttpClient.Builder getHttpClientBuilder() {
return httpClientBuilder;
}
@Override
Ec2ClientBuilder getEc2ClientBuilder() {
return ec2ClientBuilder;
}
});
this.httpClientBuilder = httpClientBuilder;
this.ec2ClientBuilder = ec2ClientBuilder;
}
}
}
| Ec2DiscoveryPluginMock |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/domain/sample/PersistableWithIdClassPK.java | {
"start": 826,
"end": 1780
} | class ____ implements Serializable {
@Serial private static final long serialVersionUID = 23126782341L;
private Long first;
private Long second;
public PersistableWithIdClassPK() {
}
public PersistableWithIdClassPK(Long first, Long second) {
this.first = first;
this.second = second;
}
public void setFirst(Long first) {
this.first = first;
}
public void setSecond(Long second) {
this.second = second;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || !(obj.getClass().equals(getClass()))) {
return false;
}
PersistableWithIdClassPK that = (PersistableWithIdClassPK) obj;
return nullSafeEquals(this.first, that.first) && nullSafeEquals(this.second, that.second);
}
@Override
public int hashCode() {
int result = 17;
result += nullSafeHashCode(this.first);
result += nullSafeHashCode(this.second);
return result;
}
}
| PersistableWithIdClassPK |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestProtosLegacy.java | {
"start": 132059,
"end": 132480
} | interface ____
extends com.google.protobuf.MessageOrBuilder {
// required bool shouldSlow = 1;
/**
* <code>required bool shouldSlow = 1;</code>
*/
boolean hasShouldSlow();
/**
* <code>required bool shouldSlow = 1;</code>
*/
boolean getShouldSlow();
}
/**
* Protobuf type {@code hadoop.common.SlowPingRequestProto}
*/
public static final | SlowPingRequestProtoOrBuilder |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/TaskAssignorConvergenceTest.java | {
"start": 4144,
"end": 32834
} | class ____ {
private final Set<TaskId> statelessTasks;
private final Map<TaskId, Long> statefulTaskEndOffsetSums;
private final Map<ProcessId, ClientState> clientStates;
private final Map<ProcessId, ClientState> droppedClientStates;
private final StringBuilder history = new StringBuilder();
public final Map<TaskId, Set<TopicPartition>> partitionsForTask;
public final Map<TaskId, Set<TopicPartition>> changelogPartitionsForTask;
public final Map<Subtopology, Set<TaskId>> tasksForTopicGroup;
public final Cluster fullMetadata;
public final Map<ProcessId, Map<String, Optional<String>>> racksForProcessConsumer;
public final InternalTopicManager internalTopicManager;
private static Harness initializeCluster(final int numStatelessTasks,
final int numStatefulTasks,
final int numClients,
final Supplier<Integer> partitionCountSupplier,
final int numNodes) {
int subtopology = 0;
final Set<TaskId> statelessTasks = new TreeSet<>();
int remainingStatelessTasks = numStatelessTasks;
final List<Node> nodes = getRandomNodes(numNodes);
int nodeIndex = 0;
final Set<PartitionInfo> partitionInfoSet = new HashSet<>();
final Map<TaskId, Set<TopicPartition>> partitionsForTask = new HashMap<>();
final Map<TaskId, Set<TopicPartition>> changelogPartitionsForTask = new HashMap<>();
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = new HashMap<>();
while (remainingStatelessTasks > 0) {
final int partitions = Math.min(remainingStatelessTasks, partitionCountSupplier.get());
for (int i = 0; i < partitions; i++) {
final TaskId taskId = new TaskId(subtopology, i);
statelessTasks.add(taskId);
remainingStatelessTasks--;
final Node[] replica = getRandomReplica(nodes, nodeIndex, i);
partitionInfoSet.add(new PartitionInfo(TOPIC_PREFIX + "_" + subtopology, i, replica[0], replica, replica));
nodeIndex++;
partitionsForTask.put(taskId, Set.of(new TopicPartition(TOPIC_PREFIX + "_" + subtopology, i)));
tasksForTopicGroup.computeIfAbsent(new Subtopology(subtopology, null), k -> new HashSet<>()).add(taskId);
}
subtopology++;
}
final Map<TaskId, Long> statefulTaskEndOffsetSums = new TreeMap<>();
final Map<String, List<TopicPartitionInfo>> topicPartitionInfo = new HashMap<>();
final Set<String> changelogNames = new HashSet<>();
int remainingStatefulTasks = numStatefulTasks;
while (remainingStatefulTasks > 0) {
final String changelogTopicName = CHANGELOG_TOPIC_PREFIX + "_" + subtopology;
changelogNames.add(changelogTopicName);
final int partitions = Math.min(remainingStatefulTasks, partitionCountSupplier.get());
for (int i = 0; i < partitions; i++) {
final TaskId taskId = new TaskId(subtopology, i);
statefulTaskEndOffsetSums.put(taskId, 150000L);
remainingStatefulTasks--;
Node[] replica = getRandomReplica(nodes, nodeIndex, i);
partitionInfoSet.add(new PartitionInfo(TOPIC_PREFIX + "_" + subtopology, i, replica[0], replica, replica));
nodeIndex++;
partitionsForTask.put(taskId, Set.of(new TopicPartition(TOPIC_PREFIX + "_" + subtopology, i)));
changelogPartitionsForTask.put(taskId, Set.of(new TopicPartition(changelogTopicName, i)));
tasksForTopicGroup.computeIfAbsent(new Subtopology(subtopology, null), k -> new HashSet<>()).add(taskId);
final int changelogNodeIndex = random.nextInt(nodes.size());
replica = getRandomReplica(nodes, changelogNodeIndex, i);
final TopicPartitionInfo info = new TopicPartitionInfo(i, replica[0], Arrays.asList(replica[0], replica[1]), Collections.emptyList());
topicPartitionInfo.computeIfAbsent(changelogTopicName, tp -> new ArrayList<>()).add(info);
}
subtopology++;
}
final MockTime time = new MockTime();
final StreamsConfig streamsConfig = new StreamsConfig(configProps(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC));
final MockClientSupplier mockClientSupplier = new MockClientSupplier();
final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(
time,
streamsConfig,
mockClientSupplier.restoreConsumer,
false
);
final InternalTopicManager spyTopicManager = spy(mockInternalTopicManager);
doReturn(topicPartitionInfo).when(spyTopicManager).getTopicPartitionInfo(changelogNames);
final Cluster cluster = new Cluster(
"cluster",
new HashSet<>(nodes),
partitionInfoSet,
Collections.emptySet(),
Collections.emptySet()
);
final Map<ProcessId, ClientState> clientStates = new TreeMap<>();
final Map<ProcessId, Map<String, Optional<String>>> racksForProcessConsumer = new HashMap<>();
for (int i = 0; i < numClients; i++) {
final ProcessId uuid = processIdForInt(i);
clientStates.put(uuid, emptyInstance(uuid, statefulTaskEndOffsetSums));
final String rack = RACK_PREFIX + random.nextInt(nodes.size());
racksForProcessConsumer.put(uuid, mkMap(mkEntry("consumer", Optional.of(rack))));
}
return new Harness(statelessTasks, statefulTaskEndOffsetSums, clientStates, cluster, partitionsForTask, changelogPartitionsForTask, tasksForTopicGroup, racksForProcessConsumer, spyTopicManager);
}
private Harness(final Set<TaskId> statelessTasks,
final Map<TaskId, Long> statefulTaskEndOffsetSums,
final Map<ProcessId, ClientState> clientStates,
final Cluster fullMetadata,
final Map<TaskId, Set<TopicPartition>> partitionsForTask,
final Map<TaskId, Set<TopicPartition>> changelogPartitionsForTask,
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup,
final Map<ProcessId, Map<String, Optional<String>>> racksForProcessConsumer,
final InternalTopicManager internalTopicManager) {
this.statelessTasks = statelessTasks;
this.statefulTaskEndOffsetSums = statefulTaskEndOffsetSums;
this.clientStates = clientStates;
this.fullMetadata = fullMetadata;
this.partitionsForTask = partitionsForTask;
this.changelogPartitionsForTask = changelogPartitionsForTask;
this.tasksForTopicGroup = tasksForTopicGroup;
this.racksForProcessConsumer = racksForProcessConsumer;
this.internalTopicManager = internalTopicManager;
droppedClientStates = new TreeMap<>();
history.append('\n');
history.append("Cluster and application initial state: \n");
history.append("Stateless tasks: ").append(statelessTasks).append('\n');
history.append("Stateful tasks: ").append(statefulTaskEndOffsetSums.keySet()).append('\n');
history.append("Full metadata: ").append(fullMetadata).append('\n');
history.append("Partitions for tasks: ").append(partitionsForTask).append('\n');
history.append("Changelog partitions for tasks: ").append(changelogPartitionsForTask).append('\n');
history.append("Tasks for subtopology: ").append(tasksForTopicGroup).append('\n');
history.append("Racks for process consumer: ").append(racksForProcessConsumer).append('\n');
formatClientStates(true);
history.append("History of the cluster: \n");
}
private void addClient() {
final ProcessId uuid = processIdForInt(clientStates.size() + droppedClientStates.size());
history.append("Adding new node ").append(uuid).append('\n');
clientStates.put(uuid, emptyInstance(uuid, statefulTaskEndOffsetSums));
final int nodeSize = fullMetadata.nodes().size();
final String rack = RACK_PREFIX + random.nextInt(nodeSize);
racksForProcessConsumer.computeIfAbsent(uuid, k -> new HashMap<>()).put("consumer", Optional.of(rack));
}
private static ClientState emptyInstance(final ProcessId uuid, final Map<TaskId, Long> allTaskEndOffsetSums) {
final ClientState clientState = new ClientState(uuid, 1);
clientState.computeTaskLags(uuid, allTaskEndOffsetSums);
return clientState;
}
private void addOrResurrectClientsRandomly(final Random prng, final int limit) {
final int numberToAdd = prng.nextInt(limit);
for (int i = 0; i < numberToAdd; i++) {
final boolean addNew = prng.nextBoolean();
if (addNew || droppedClientStates.isEmpty()) {
addClient();
} else {
final ProcessId uuid = selectRandomElement(prng, droppedClientStates);
history.append("Resurrecting node ").append(uuid).append('\n');
clientStates.put(uuid, droppedClientStates.get(uuid));
droppedClientStates.remove(uuid);
}
}
}
private void dropClient() {
if (clientStates.isEmpty()) {
throw new NoSuchElementException("There are no nodes to drop");
} else {
final ProcessId toDrop = clientStates.keySet().iterator().next();
dropClient(toDrop);
}
}
private void dropRandomClients(final int numNode, final Random prng) {
int dropped = 0;
while (!clientStates.isEmpty() && dropped < numNode) {
final ProcessId toDrop = selectRandomElement(prng, clientStates);
dropClient(toDrop);
dropped++;
}
history.append("Stateless tasks: ").append(statelessTasks).append('\n');
history.append("Stateful tasks: ").append(statefulTaskEndOffsetSums.keySet()).append('\n');
formatClientStates(true);
}
private void dropClient(final ProcessId toDrop) {
final ClientState clientState = clientStates.remove(toDrop);
history.append("Dropping node ").append(toDrop).append(": ").append(clientState).append('\n');
droppedClientStates.put(toDrop, clientState);
}
private static ProcessId selectRandomElement(final Random prng, final Map<ProcessId, ClientState> clients) {
int dropIndex = prng.nextInt(clients.size());
ProcessId toDrop = null;
for (final ProcessId uuid : clients.keySet()) {
if (dropIndex == 0) {
toDrop = uuid;
break;
} else {
dropIndex--;
}
}
return toDrop;
}
/**
* Flip the cluster states from "assigned" to "subscribed" so they can be used for another round of assignments.
*/
private void prepareForNextRebalance() {
final Map<ProcessId, ClientState> newClientStates = new TreeMap<>();
for (final Map.Entry<ProcessId, ClientState> entry : clientStates.entrySet()) {
final ProcessId uuid = entry.getKey();
final ClientState newClientState = new ClientState(uuid, 1);
final ClientState clientState = entry.getValue();
final Map<TaskId, Long> taskOffsetSums = new TreeMap<>();
for (final TaskId taskId : clientState.activeTasks()) {
if (statefulTaskEndOffsetSums.containsKey(taskId)) {
taskOffsetSums.put(taskId, statefulTaskEndOffsetSums.get(taskId));
}
}
for (final TaskId taskId : clientState.standbyTasks()) {
if (statefulTaskEndOffsetSums.containsKey(taskId)) {
taskOffsetSums.put(taskId, statefulTaskEndOffsetSums.get(taskId));
}
}
newClientState.addPreviousActiveTasks(clientState.activeTasks());
newClientState.addPreviousStandbyTasks(clientState.standbyTasks());
newClientState.addPreviousTasksAndOffsetSums("consumer", taskOffsetSums);
newClientState.computeTaskLags(uuid, statefulTaskEndOffsetSums);
newClientStates.put(uuid, newClientState);
}
clientStates.clear();
clientStates.putAll(newClientStates);
}
private void recordConfig(final AssignmentConfigs configuration) {
history.append("Creating assignor with configuration: ")
.append(configuration)
.append('\n');
}
private void recordBefore(final int iteration) {
history.append("Starting Iteration: ").append(iteration).append('\n');
formatClientStates(false);
}
private void recordAfter(final int iteration, final boolean rebalancePending) {
history.append("After assignment: ").append(iteration).append('\n');
history.append("Rebalance pending: ").append(rebalancePending).append('\n');
formatClientStates(true);
history.append('\n');
}
private void formatClientStates(final boolean printUnassigned) {
appendClientStates(history, clientStates);
if (printUnassigned) {
final Set<TaskId> unassignedTasks = new TreeSet<>();
unassignedTasks.addAll(statefulTaskEndOffsetSums.keySet());
unassignedTasks.addAll(statelessTasks);
for (final Map.Entry<ProcessId, ClientState> entry : clientStates.entrySet()) {
unassignedTasks.removeAll(entry.getValue().assignedTasks());
}
history.append("Unassigned Tasks: ").append(unassignedTasks).append('\n');
}
}
}
private int skewThreshold = 1;
public void setUp(final String rackAwareStrategy) {
if (rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY)) {
// We take ceiling of [task_in_subtopology / total_task * original_task_assigned_to_client] as the capacity from
// stage 1 client to stage 2 client which can result in the skew to be at most 2
// For example, suppose there are 2 subtopologies s1 and s2. s1 has 2 tasks [t1, t2], s2 has 1 task t3. There are 2 clients c1 and c2 with
// originally 2 tasks and 1 task. Then the capacity from stage 1 c1 to stage 2 c1 is ceil(2 * 2.0 / 3 * 2) = 2 which can result in both
// t1 and t2 assigned to c1. So the max skew for stateful or stateless assignment could be 2 + 2 = 4.
// Details in https://cwiki.apache.org/confluence/display/KAFKA/KIP-925%3A+Rack+aware+task+assignment+in+Kafka+Streams
skewThreshold = 4;
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void staticAssignmentShouldConvergeWithTheFirstAssignment(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final AssignmentConfigs configs = new AssignmentConfigs(100L,
2,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy);
final Harness harness = Harness.initializeCluster(1, 1, 1, () -> 1, 1);
testForConvergence(harness, configs, 1);
verifyValidAssignment(0, harness);
verifyBalancedAssignment(harness, skewThreshold);
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void assignmentShouldConvergeAfterAddingNode(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final int numStatelessTasks = 7;
final int numStatefulTasks = 11;
final int maxWarmupReplicas = 2;
final int numStandbyReplicas = 0;
final int numNodes = 10;
final AssignmentConfigs configs = new AssignmentConfigs(100L,
maxWarmupReplicas,
numStandbyReplicas,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy);
final Harness harness = Harness.initializeCluster(numStatelessTasks, numStatefulTasks, 1, () -> 5, numNodes);
testForConvergence(harness, configs, 1);
harness.addClient();
// we expect convergence to involve moving each task at most once, and we can move "maxWarmupReplicas" number
// of tasks at once, hence the iteration limit
testForConvergence(harness, configs, numStatefulTasks / maxWarmupReplicas + 1);
verifyValidAssignment(numStandbyReplicas, harness);
// min-cost rack aware assignor doesn't balance subtopology
if (!rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)) {
verifyBalancedAssignment(harness, skewThreshold);
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void droppingNodesShouldConverge(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
final int numStatelessTasks = 11;
final int numStatefulTasks = 13;
final int maxWarmupReplicas = 2;
final int numStandbyReplicas = 0;
final int numNodes = 10;
final AssignmentConfigs configs = new AssignmentConfigs(100L,
maxWarmupReplicas,
numStandbyReplicas,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy);
final Harness harness = Harness.initializeCluster(numStatelessTasks, numStatefulTasks, 7, () -> 5, numNodes);
testForConvergence(harness, configs, 1);
harness.dropClient();
// This time, we allow one extra iteration because the
// first stateful task needs to get shuffled back to the first node
testForConvergence(harness, configs, numStatefulTasks / maxWarmupReplicas + 2);
verifyValidAssignment(numStandbyReplicas, harness);
// min-cost rack aware assignor doesn't balance subtopology
if (!rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)) {
verifyBalancedAssignment(harness, skewThreshold);
}
}
@ParameterizedTest
@ValueSource(strings = {
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY
})
public void randomClusterPerturbationsShouldConverge(final String rackAwareStrategy) {
setUp(rackAwareStrategy);
// do as many tests as we can in 10 seconds
final long deadline = System.currentTimeMillis() + 10_000L;
do {
final long seed = new Random().nextLong();
runRandomizedScenario(seed, rackAwareStrategy);
} while (System.currentTimeMillis() < deadline);
}
private void runRandomizedScenario(final long seed, final String rackAwareStrategy) {
Harness harness = null;
try {
final Random prng = new Random(seed);
// These are all rand(limit)+1 because we need them to be at least 1 and the upper bound is exclusive
final int initialClusterSize = prng.nextInt(10) + 1;
final int numStatelessTasks = prng.nextInt(10) + 1;
final int numStatefulTasks = prng.nextInt(10) + 1;
final int maxWarmupReplicas = prng.nextInt(numStatefulTasks) + 1;
// This one is rand(limit+1) because we _want_ to test zero and the upper bound is exclusive
final int numStandbyReplicas = prng.nextInt(initialClusterSize + 1);
final int numNodes = numStatefulTasks + numStatelessTasks;
final int numberOfEvents = prng.nextInt(10) + 1;
final AssignmentConfigs configs = new AssignmentConfigs(100L,
maxWarmupReplicas,
numStandbyReplicas,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy);
harness = Harness.initializeCluster(
numStatelessTasks,
numStatefulTasks,
initialClusterSize,
() -> prng.nextInt(10) + 1,
numNodes
);
testForConvergence(harness, configs, 1);
verifyValidAssignment(numStandbyReplicas, harness);
// min-cost rack aware assignor doesn't balance subtopology
if (!rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)) {
verifyBalancedAssignment(harness, skewThreshold);
}
for (int i = 0; i < numberOfEvents; i++) {
final int event = prng.nextInt(2);
switch (event) {
case 0:
harness.dropRandomClients(prng.nextInt(initialClusterSize), prng);
break;
case 1:
harness.addOrResurrectClientsRandomly(prng, initialClusterSize);
break;
default:
throw new IllegalStateException("Unexpected event: " + event);
}
if (!harness.clientStates.isEmpty()) {
testForConvergence(harness, configs, 2 * (numStatefulTasks + numStatefulTasks * numStandbyReplicas));
verifyValidAssignment(numStandbyReplicas, harness);
// min-cost rack aware assignor doesn't balance subtopology
if (!rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)) {
verifyBalancedAssignment(harness, skewThreshold);
}
}
}
} catch (final AssertionError t) {
throw new AssertionError(
"Assertion failed in randomized test. Reproduce with: `runRandomizedScenario(" + seed + ")`.",
t
);
} catch (final Throwable t) {
final StringBuilder builder =
new StringBuilder()
.append("Exception in randomized scenario. Reproduce with: `runRandomizedScenario(")
.append(seed)
.append(")`. ");
if (harness != null) {
builder.append(harness.history);
}
throw new AssertionError(builder.toString(), t);
}
}
private static void verifyBalancedAssignment(final Harness harness, final int skewThreshold) {
final Set<TaskId> allStatefulTasks = harness.statefulTaskEndOffsetSums.keySet();
final Map<ProcessId, ClientState> clientStates = harness.clientStates;
final StringBuilder failureContext = harness.history;
assertBalancedActiveAssignment(clientStates, failureContext);
assertBalancedStatefulAssignment(allStatefulTasks, clientStates, failureContext);
final AssignmentTestUtils.TaskSkewReport taskSkewReport = AssignmentTestUtils.analyzeTaskAssignmentBalance(harness.clientStates, skewThreshold);
if (taskSkewReport.totalSkewedTasks() > 0) {
fail("Expected a balanced task assignment, but was: " + taskSkewReport + '\n' + failureContext);
}
}
private static void verifyValidAssignment(final int numStandbyReplicas, final Harness harness) {
final Set<TaskId> statefulTasks = harness.statefulTaskEndOffsetSums.keySet();
final Set<TaskId> statelessTasks = harness.statelessTasks;
final Map<ProcessId, ClientState> assignedStates = harness.clientStates;
final StringBuilder failureContext = harness.history;
assertValidAssignment(numStandbyReplicas, statefulTasks, statelessTasks, assignedStates, failureContext);
}
private static void testForConvergence(final Harness harness,
final AssignmentConfigs configs,
final int iterationLimit) {
final Set<TaskId> allTasks = new TreeSet<>();
allTasks.addAll(harness.statelessTasks);
allTasks.addAll(harness.statefulTaskEndOffsetSums.keySet());
harness.recordConfig(configs);
boolean rebalancePending = true;
int iteration = 0;
final RackAwareTaskAssignor rackAwareTaskAssignor = new RackAwareTaskAssignor(
harness.fullMetadata,
harness.partitionsForTask,
harness.changelogPartitionsForTask,
harness.tasksForTopicGroup,
harness.racksForProcessConsumer,
harness.internalTopicManager,
configs,
TIME
);
while (rebalancePending && iteration < iterationLimit) {
iteration++;
harness.prepareForNextRebalance();
harness.recordBefore(iteration);
rebalancePending = new HighAvailabilityTaskAssignor().assign(
harness.clientStates,
allTasks,
harness.statefulTaskEndOffsetSums.keySet(),
rackAwareTaskAssignor,
configs
);
harness.recordAfter(iteration, rebalancePending);
}
if (rebalancePending) {
fail("Rebalances have not converged after iteration cutoff: " + iterationLimit + harness.history);
}
}
}
| Harness |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/collections/ElementCollectionCachePerfTest.java | {
"start": 1619,
"end": 5015
} | class ____ {
@BeforeAll
public void setUp(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
for ( int i = 0; i < 100; i++ ) {
final String id = UUID.randomUUID().toString();
final Element element = new Element( id );
element.setKeyValueEmbeddable( new KeyValue( "embeddable", "_" + id ) );
element.setAssociation1( new Association( (long) i, "assoc_" + id ) );
final Set<KeyValue> key1Values = new HashSet<>();
key1Values.add( new KeyValue( "key1_1", "_" + id ) );
key1Values.add( new KeyValue( "key1_2", "_" + id ) );
key1Values.add( new KeyValue( "key1_3", "_" + id ) );
element.setKeyValues1( key1Values );
final Set<KeyValue> key2Values = new HashSet<>();
key2Values.add( new KeyValue( "key2_1", "_" + id ) );
key2Values.add( new KeyValue( "key2_2", "_" + id ) );
element.setKeyValues2( key2Values );
final Map<String, KeyValue> map = new HashMap<>();
map.put( "k1", new KeyValue( "k1", "_" + id ) );
map.put( "k2", new KeyValue( "k2", "_" + id ) );
element.setMap( map );
entityManager.persist( element );
}
} );
}
@Test
public void testSelect1(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
assertQuery(
entityManager,
"select e from Element e join fetch e.association1 join fetch e.keyValues1 join fetch e.keyValues2 join fetch e.map"
);
} );
}
@Test
public void testSelect2(EntityManagerFactoryScope scope) {
scope.inTransaction(
entityManager -> {
assertQuery(
entityManager,
"select e from Element e join fetch e.association1 join fetch e.map join fetch e.keyValues1 join fetch e.keyValues2"
);
} );
}
private static void assertQuery(EntityManager entityManager, String query) {
final List<Element> firstResult = entityManager.createQuery( query, Element.class )
.setHint( AvailableHints.HINT_CACHEABLE, true )
.getResultList();
assertResults( firstResult );
final List<Element> secondResult = entityManager.createQuery( query, Element.class )
.setHint( AvailableHints.HINT_CACHEABLE, true )
.getResultList();
assertResults( secondResult );
}
private static void assertResults(List<Element> result) {
for ( Element element : result ) {
final String id = element.getId();
assertThat( element.getAssociation1().getName() ).isEqualTo( "assoc_" + id );
assertThat( element.getKeyValueEmbeddable().getK() ).isEqualTo( "embeddable" );
assertThat( element.getKeyValueEmbeddable().getV() ).isEqualTo( "_" + id );
assertThat( element.getKeyValues1().size() ).isEqualTo( 3 );
assertThat( element.getKeyValues2().size() ).isEqualTo( 2 );
assertThat( element.getMap().size() ).isEqualTo( 2 );
assertThat( element.getKeyValues1() ).containsExactlyInAnyOrder(
new KeyValue( "key1_1", "_" + id ),
new KeyValue( "key1_2", "_" + id ),
new KeyValue( "key1_3", "_" + id )
);
assertThat( element.getKeyValues2() ).containsExactlyInAnyOrder(
new KeyValue( "key2_1", "_" + id ),
new KeyValue( "key2_2", "_" + id )
);
assertThat( element.getMap() ).containsExactly(
new AbstractMap.SimpleEntry<>( "k1", new KeyValue( "k1", "_" + id ) ),
new AbstractMap.SimpleEntry<>( "k2", new KeyValue( "k2", "_" + id ) )
);
}
}
@Entity(name = "Element")
public static | ElementCollectionCachePerfTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java | {
"start": 6008,
"end": 6882
} | class ____ will render the /tasks page
*/
protected Class<? extends View> tasksPage() {
return TasksPage.class;
}
/**
* Render the /tasks page
*/
public void tasks() {
try {
requireJob();
}
catch (Exception e) {
renderText(e.getMessage());
return;
}
if (app.getJob() != null) {
try {
String tt = $(TASK_TYPE);
tt = tt.isEmpty() ? "All" : StringUtils.capitalize(
org.apache.hadoop.util.StringUtils.toLowerCase(
MRApps.taskType(tt).toString()));
setTitle(join(tt, " Tasks for ", $(JOB_ID)));
} catch (Exception e) {
LOG.error("Failed to render tasks page with task type : "
+ $(TASK_TYPE) + " for job id : " + $(JOB_ID), e);
badRequest(e.getMessage());
}
}
render(tasksPage());
}
/**
* @return the | that |
java | apache__maven | its/core-it-support/core-it-plugins/maven-it-plugin-plexus-lifecycle/src/main/java/org/apache/maven/its/plugins/plexuslifecycle/FakeComponent.java | {
"start": 902,
"end": 952
} | interface ____ {
void doNothing();
}
| FakeComponent |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ChainingConstructorIgnoresParameterTest.java | {
"start": 5423,
"end": 5571
} | class ____ {
NonStaticClass(String foo, boolean bar) {}
NonStaticClass(String foo) {
this(foo, false);
}
}
static | NonStaticClass |
java | apache__kafka | server/src/test/java/org/apache/kafka/server/share/CachedSharePartitionTest.java | {
"start": 1046,
"end": 3333
} | class ____ {
@Test
public void testCachedSharePartitionEqualsAndHashCode() {
Uuid topicId = Uuid.randomUuid();
String topicName = "topic";
int partition = 0;
CachedSharePartition cachedSharePartitionWithIdAndName = new
CachedSharePartition(topicName, topicId, partition, false);
CachedSharePartition cachedSharePartitionWithIdAndNoName = new
CachedSharePartition(null, topicId, partition, false);
CachedSharePartition cachedSharePartitionWithDifferentIdAndName = new
CachedSharePartition(topicName, Uuid.randomUuid(), partition, false);
CachedSharePartition cachedSharePartitionWithZeroIdAndName = new
CachedSharePartition(topicName, Uuid.ZERO_UUID, partition, false);
// CachedSharePartitions with valid topic IDs will compare topic ID and partition but not topic name.
assertEquals(cachedSharePartitionWithIdAndName, cachedSharePartitionWithIdAndNoName);
assertEquals(cachedSharePartitionWithIdAndName.hashCode(), cachedSharePartitionWithIdAndNoName.hashCode());
assertNotEquals(cachedSharePartitionWithIdAndName, cachedSharePartitionWithDifferentIdAndName);
assertNotEquals(cachedSharePartitionWithIdAndName.hashCode(), cachedSharePartitionWithDifferentIdAndName.hashCode());
assertNotEquals(cachedSharePartitionWithIdAndName, cachedSharePartitionWithZeroIdAndName);
assertNotEquals(cachedSharePartitionWithIdAndName.hashCode(), cachedSharePartitionWithZeroIdAndName.hashCode());
// CachedSharePartitions with null name and valid IDs will act just like ones with valid names
assertNotEquals(cachedSharePartitionWithIdAndNoName, cachedSharePartitionWithDifferentIdAndName);
assertNotEquals(cachedSharePartitionWithIdAndNoName.hashCode(), cachedSharePartitionWithDifferentIdAndName.hashCode());
assertNotEquals(cachedSharePartitionWithIdAndNoName, cachedSharePartitionWithZeroIdAndName);
assertNotEquals(cachedSharePartitionWithIdAndNoName.hashCode(), cachedSharePartitionWithZeroIdAndName.hashCode());
assertEquals(cachedSharePartitionWithZeroIdAndName.hashCode(), cachedSharePartitionWithZeroIdAndName.hashCode());
}
}
| CachedSharePartitionTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedApproximateSubpartition.java | {
"start": 1397,
"end": 4230
} | class ____ extends PipelinedSubpartition {
private static final Logger LOG =
LoggerFactory.getLogger(PipelinedApproximateSubpartition.class);
@GuardedBy("buffers")
private boolean isPartialBufferCleanupRequired = false;
PipelinedApproximateSubpartition(
int index,
int receiverExclusiveBuffersPerChannel,
int startingBufferSize,
ResultPartition parent) {
super(index, receiverExclusiveBuffersPerChannel, startingBufferSize, parent);
}
/**
* To simply the view releasing threading model, {@link
* PipelinedApproximateSubpartition#releaseView()} is called only before creating a new view.
*
* <p>There is still one corner case when a downstream task fails continuously in a short period
* of time then multiple netty worker threads can createReadView at the same time. TODO: This
* problem will be solved in FLINK-19774
*/
@Override
public PipelinedSubpartitionView createReadView(
BufferAvailabilityListener availabilityListener) {
synchronized (buffers) {
checkState(!isReleased);
releaseView();
LOG.debug(
"{}: Creating read view for subpartition {} of partition {}.",
parent.getOwningTaskName(),
getSubPartitionIndex(),
parent.getPartitionId());
readView = new PipelinedApproximateSubpartitionView(this, availabilityListener);
}
return readView;
}
@Override
Buffer buildSliceBuffer(BufferConsumerWithPartialRecordLength buffer) {
if (isPartialBufferCleanupRequired) {
isPartialBufferCleanupRequired = !buffer.cleanupPartialRecord();
}
return buffer.build();
}
private void releaseView() {
assert Thread.holdsLock(buffers);
if (readView != null) {
// upon reconnecting, two netty threads may require the same view to release
LOG.debug(
"Releasing view of subpartition {} of {}.",
getSubPartitionIndex(),
parent.getPartitionId());
readView.releaseAllResources();
readView = null;
isPartialBufferCleanupRequired = true;
isBlocked = false;
sequenceNumber = 0;
}
}
@Override
public boolean isSupportChannelStateRecover() {
return false;
}
/** for testing only. */
@VisibleForTesting
boolean isPartialBufferCleanupRequired() {
return isPartialBufferCleanupRequired;
}
/** for testing only. */
@VisibleForTesting
void setIsPartialBufferCleanupRequired() {
isPartialBufferCleanupRequired = true;
}
}
| PipelinedApproximateSubpartition |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ext/javatime/ser/ZonedDateTimeSerWithJsonFormat333Test.java | {
"start": 626,
"end": 807
} | class ____ {
@JsonFormat(shape = JsonFormat.Shape.STRING, pattern = "yyyy-MM-dd HH:mm:ss z")
public ZonedDateTime value;
}
public static | ContainerWithPattern333 |
java | apache__maven | impl/maven-impl/src/main/java/org/apache/maven/impl/model/profile/ConditionParser.java | {
"start": 1393,
"end": 1548
} | interface ____ represents an expression function to be applied
* to a list of arguments. Implementers can define custom functions.
*/
public | that |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/string_/StringAssert_isStrictlyBetween_Test.java | {
"start": 792,
"end": 1162
} | class ____ extends StringAssertBaseTest {
@Override
protected StringAssert invoke_api_method() {
return assertions.isStrictlyBetween("bar", "baz");
}
@Override
protected void verify_internal_effects() {
verify(comparables).assertIsBetween(getInfo(assertions), getActual(assertions), "bar", "baz", false, false);
}
}
| StringAssert_isStrictlyBetween_Test |
java | dropwizard__dropwizard | dropwizard-logging/src/main/java/io/dropwizard/logging/common/ResilientOutputStreamBase.java | {
"start": 1188,
"end": 5107
} | class ____ extends OutputStream {
private static final int STATUS_COUNT_LIMIT = 2 * 4;
private int noContextWarning = 0;
private int statusCount = 0;
private Context context;
private RecoveryCoordinator recoveryCoordinator;
protected OutputStream os;
boolean presumedClean = true;
private boolean isPresumedInError() {
// existence of recoveryCoordinator indicates failed state
return (recoveryCoordinator != null && !presumedClean);
}
@Override
public void write(byte[] b, int off, int len) {
if (isPresumedInError()) {
if (!recoveryCoordinator.isTooSoon()) {
attemptRecovery();
}
return; // return regardless of the success of the recovery attempt
}
try {
os.write(b, off, len);
postSuccessfulWrite();
} catch (IOException e) {
postIOFailure(e);
}
}
@Override
public void write(int b) {
if (isPresumedInError()) {
if (!recoveryCoordinator.isTooSoon()) {
attemptRecovery();
}
return; // return regardless of the success of the recovery attempt
}
try {
os.write(b);
postSuccessfulWrite();
} catch (IOException e) {
postIOFailure(e);
}
}
@Override
public void flush() {
if (os != null) {
try {
os.flush();
postSuccessfulWrite();
} catch (IOException e) {
postIOFailure(e);
}
}
}
abstract String getDescription();
abstract OutputStream openNewOutputStream() throws IOException;
private void postSuccessfulWrite() {
if (recoveryCoordinator != null) {
recoveryCoordinator = null;
statusCount = 0;
addStatus(new InfoStatus("Recovered from IO failure on " + getDescription(), this));
}
}
private void postIOFailure(IOException e) {
addStatusIfCountNotOverLimit(new ErrorStatus("IO failure while writing to " + getDescription(), this, e));
presumedClean = false;
if (recoveryCoordinator == null) {
recoveryCoordinator = new RecoveryCoordinator();
}
}
@Override
public void close() throws IOException {
if (os != null) {
os.close();
}
}
private void attemptRecovery() {
try {
close();
} catch (IOException e) {
// Ignored
}
addStatusIfCountNotOverLimit(new InfoStatus("Attempting to recover from IO failure on " + getDescription(), this));
// subsequent writes must always be in append mode
try {
os = openNewOutputStream();
presumedClean = true;
} catch (IOException e) {
addStatusIfCountNotOverLimit(new ErrorStatus("Failed to open " + getDescription(), this, e));
}
}
private void addStatusIfCountNotOverLimit(Status s) {
++statusCount;
if (statusCount < STATUS_COUNT_LIMIT) {
addStatus(s);
}
if (statusCount == STATUS_COUNT_LIMIT) {
addStatus(s);
addStatus(new InfoStatus("Will supress future messages regarding " + getDescription(), this));
}
}
private void addStatus(Status status) {
if (context == null) {
if (noContextWarning++ == 0) {
System.out.println("LOGBACK: No context given for " + this);
}
return;
}
StatusManager sm = context.getStatusManager();
if (sm != null) {
sm.add(status);
}
}
public Context getContext() {
return context;
}
public void setContext(Context context) {
this.context = context;
}
}
| ResilientOutputStreamBase |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java | {
"start": 3064,
"end": 15338
} | class ____ extends ClientBaseWithFixes {
private static final Logger LOG =
LoggerFactory.getLogger(TestRMFailover.class.getName());
private static final HAServiceProtocol.StateChangeRequestInfo req =
new HAServiceProtocol.StateChangeRequestInfo(
HAServiceProtocol.RequestSource.REQUEST_BY_USER);
private static final String RM1_NODE_ID = "rm1";
private static final int RM1_PORT_BASE = 10000;
private static final String RM2_NODE_ID = "rm2";
private static final int RM2_PORT_BASE = 20000;
private Configuration conf;
private MiniYARNCluster cluster;
private ApplicationId fakeAppId;
@BeforeEach
public void setup() throws IOException {
fakeAppId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf);
HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf);
conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true);
cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 1, 1, 1);
}
@AfterEach
public void teardown() {
cluster.stop();
}
private void verifyClientConnection() {
int numRetries = 3;
while(numRetries-- > 0) {
Configuration conf = new YarnConfiguration(this.conf);
YarnClient client = YarnClient.createYarnClient();
client.init(conf);
client.start();
try {
client.getApplications();
return;
} catch (Exception e) {
LOG.error(e.toString());
} finally {
client.stop();
}
}
fail("Client couldn't connect to the Active RM");
}
private void verifyConnections() throws InterruptedException, YarnException {
assertTrue(
cluster.waitForNodeManagersToConnect(20000), "NMs failed to connect to the RM");
verifyClientConnection();
}
private AdminService getAdminService(int index) {
return cluster.getResourceManager(index).getRMContext().getRMAdminService();
}
private void explicitFailover() throws IOException {
int activeRMIndex = cluster.getActiveRMIndex();
int newActiveRMIndex = (activeRMIndex + 1) % 2;
getAdminService(activeRMIndex).transitionToStandby(req);
getAdminService(newActiveRMIndex).transitionToActive(req);
assertEquals(newActiveRMIndex, cluster.getActiveRMIndex(), "Failover failed");
}
private void failover()
throws IOException, InterruptedException, YarnException {
int activeRMIndex = cluster.getActiveRMIndex();
cluster.stopResourceManager(activeRMIndex);
assertEquals(
(activeRMIndex + 1) % 2, cluster.getActiveRMIndex(), "Failover failed");
cluster.restartResourceManager(activeRMIndex);
}
@Test
public void testExplicitFailover()
throws YarnException, InterruptedException, IOException {
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
cluster.init(conf);
cluster.start();
assertFalse(-1 == cluster.getActiveRMIndex(), "RM never turned active");
verifyConnections();
explicitFailover();
verifyConnections();
explicitFailover();
verifyConnections();
}
private void verifyRMTransitionToStandby(ResourceManager rm)
throws InterruptedException {
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return rm.getRMContext().getHAServiceState() ==
HAServiceState.STANDBY;
}
}, 100, 20000);
} catch (TimeoutException e) {
fail("RM didn't transition to Standby.");
}
}
@Test
public void testAutomaticFailover()
throws YarnException, InterruptedException, IOException {
conf.set(YarnConfiguration.RM_CLUSTER_ID, "yarn-test-cluster");
conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, 2000);
cluster.init(conf);
cluster.start();
assertFalse(-1 == cluster.getActiveRMIndex(), "RM never turned active");
verifyConnections();
failover();
verifyConnections();
failover();
verifyConnections();
// Make the current Active handle an RMFatalEvent,
// so it transitions to standby.
ResourceManager rm = cluster.getResourceManager(
cluster.getActiveRMIndex());
rm.getRMContext().getDispatcher().getEventHandler().handle(
new RMFatalEvent(RMFatalEventType.STATE_STORE_FENCED, "test"));
verifyRMTransitionToStandby(rm);
verifyConnections();
}
@Test
public void testWebAppProxyInStandAloneMode() throws YarnException,
InterruptedException, IOException {
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID);
WebAppProxyServer webAppProxyServer = new WebAppProxyServer();
try {
conf.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9099");
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
assertFalse(-1 == cluster.getActiveRMIndex(), "RM never turned active");
verifyConnections();
webAppProxyServer.init(conf);
// Start webAppProxyServer
Assertions.assertEquals(STATE.INITED, webAppProxyServer.getServiceState());
webAppProxyServer.start();
Assertions.assertEquals(STATE.STARTED, webAppProxyServer.getServiceState());
// send httpRequest with fakeApplicationId
// expect to get "Not Found" response and 404 response code
URL wrongUrl = new URL("http://localhost:9099/proxy/" + fakeAppId);
HttpURLConnection proxyConn = (HttpURLConnection) wrongUrl
.openConnection();
proxyConn.connect();
verifyResponse(proxyConn);
explicitFailover();
verifyConnections();
proxyConn.connect();
verifyResponse(proxyConn);
} finally {
webAppProxyServer.stop();
}
}
@Test
public void testEmbeddedWebAppProxy() throws YarnException,
InterruptedException, IOException {
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
cluster.init(conf);
cluster.start();
assertFalse(-1 == cluster.getActiveRMIndex(), "RM never turned active");
verifyConnections();
// send httpRequest with fakeApplicationId
// expect to get "Not Found" response and 404 response code
URL wrongUrl = new URL("http://localhost:18088/proxy/" + fakeAppId);
HttpURLConnection proxyConn = (HttpURLConnection) wrongUrl
.openConnection();
proxyConn.connect();
verifyResponse(proxyConn);
explicitFailover();
verifyConnections();
proxyConn.connect();
verifyResponse(proxyConn);
}
private void verifyResponse(HttpURLConnection response)
throws IOException {
assertEquals("Not Found", response.getResponseMessage());
assertEquals(404, response.getResponseCode());
}
@Test
public void testRMWebAppRedirect() throws YarnException,
InterruptedException, IOException {
cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 0, 1, 1);
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
String rm1Url = "http://localhost:18088";
String rm2Url = "http://localhost:28088";
String redirectURL = getRedirectURL(rm2Url);
// if uri is null, RMWebAppFilter will append a slash at the trail of the redirection url
assertEquals(redirectURL,rm1Url+"/");
redirectURL = getRedirectURL(rm2Url + "/metrics");
assertEquals(redirectURL,rm1Url + "/metrics");
// standby RM links /conf, /stacks, /logLevel, /static, /logs, /jmx, /prom
// /cluster/cluster as well as webService
// /ws/v1/cluster/info should not be redirected to active RM
redirectURL = getRedirectURL(rm2Url + "/cluster/cluster");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/conf");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/stacks");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/logLevel");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/static");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/logs");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/jmx?param1=value1+x¶m2=y");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/prom");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/ws/v1/cluster/info");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/ws/v1/cluster/apps");
assertEquals(redirectURL, rm1Url + "/ws/v1/cluster/apps");
redirectURL = getRedirectURL(rm2Url + "/proxy/" + fakeAppId);
assertNull(redirectURL);
// transit the active RM to standby
// Both of RMs are in standby mode
getAdminService(0).transitionToStandby(req);
// RM2 is expected to send the httpRequest to itself.
// The Header Field: Refresh is expected to be set.
redirectURL = getRefreshURL(rm2Url);
assertTrue(redirectURL != null
&& redirectURL.contains(YarnWebParams.NEXT_REFRESH_INTERVAL)
&& redirectURL.contains(rm2Url));
}
// set up http connection with the given url and get the redirection url from the response
// return null if the url is not redirected
static String getRedirectURL(String url) {
String redirectUrl = null;
try {
HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection();
// do not automatically follow the redirection
// otherwise we get too many redirections exception
conn.setInstanceFollowRedirects(false);
if(conn.getResponseCode() == HttpServletResponse.SC_TEMPORARY_REDIRECT) {
redirectUrl = conn.getHeaderField("Location");
}
} catch (Exception e) {
// throw new RuntimeException(e);
}
return redirectUrl;
}
static String getRefreshURL(String url) {
String redirectUrl = null;
try {
HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection();
// do not automatically follow the redirection
// otherwise we get too many redirections exception
conn.setInstanceFollowRedirects(false);
redirectUrl = conn.getHeaderField("Refresh");
} catch (Exception e) {
// throw new RuntimeException(e);
}
return redirectUrl;
}
/**
* Throw {@link RuntimeException} inside a thread of
* {@link ResourceManager} with HA enabled and check if the
* {@link ResourceManager} is transited to standby state.
*
* @throws InterruptedException if any
*/
@Test
public void testUncaughtExceptionHandlerWithHAEnabled()
throws InterruptedException {
conf.set(YarnConfiguration.RM_CLUSTER_ID, "yarn-test-cluster");
conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
cluster.init(conf);
cluster.start();
assertFalse(-1 == cluster.getActiveRMIndex(), "RM never turned active");
ResourceManager resourceManager = cluster.getResourceManager(
cluster.getActiveRMIndex());
final RMCriticalThreadUncaughtExceptionHandler exHandler =
new RMCriticalThreadUncaughtExceptionHandler(
resourceManager.getRMContext());
// Create a thread and throw a RTE inside it
final RuntimeException rte = new RuntimeException("TestRuntimeException");
final Thread testThread = new SubjectInheritingThread(new Runnable() {
@Override
public void run() {
throw rte;
}
});
testThread.setName("TestThread");
testThread.setUncaughtExceptionHandler(exHandler);
testThread.start();
testThread.join();
verifyRMTransitionToStandby(resourceManager);
}
/**
* Throw {@link RuntimeException} inside a thread of
* {@link ResourceManager} with HA disabled and check
* {@link RMCriticalThreadUncaughtExceptionHandler} instance.
*
* Used {@link ExitUtil} | TestRMFailover |
java | apache__flink | flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/maxwell/MaxwellJsonFormatFactory.java | {
"start": 2799,
"end": 6524
} | class ____
implements DeserializationFormatFactory, SerializationFormatFactory {
public static final String IDENTIFIER = "maxwell-json";
@Override
public DecodingFormat<DeserializationSchema<RowData>> createDecodingFormat(
DynamicTableFactory.Context context, ReadableConfig formatOptions) {
FactoryUtil.validateFactoryOptions(this, formatOptions);
validateDecodingFormatOptions(formatOptions);
final boolean ignoreParseErrors = formatOptions.get(IGNORE_PARSE_ERRORS);
final TimestampFormat timestampFormat =
JsonFormatOptionsUtil.getTimestampFormat(formatOptions);
return new MaxwellJsonDecodingFormat(ignoreParseErrors, timestampFormat);
}
@Override
public EncodingFormat<SerializationSchema<RowData>> createEncodingFormat(
DynamicTableFactory.Context context, ReadableConfig formatOptions) {
FactoryUtil.validateFactoryOptions(this, formatOptions);
validateEncodingFormatOptions(formatOptions);
TimestampFormat timestampFormat = JsonFormatOptionsUtil.getTimestampFormat(formatOptions);
JsonFormatOptions.MapNullKeyMode mapNullKeyMode =
JsonFormatOptionsUtil.getMapNullKeyMode(formatOptions);
String mapNullKeyLiteral = formatOptions.get(JSON_MAP_NULL_KEY_LITERAL);
final boolean encodeDecimalAsPlainNumber =
formatOptions.get(ENCODE_DECIMAL_AS_PLAIN_NUMBER);
final boolean ignoreNullFields = formatOptions.get(ENCODE_IGNORE_NULL_FIELDS);
return new EncodingFormat<SerializationSchema<RowData>>() {
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.newBuilder()
.addContainedKind(RowKind.INSERT)
.addContainedKind(RowKind.UPDATE_BEFORE)
.addContainedKind(RowKind.UPDATE_AFTER)
.addContainedKind(RowKind.DELETE)
.build();
}
@Override
public SerializationSchema<RowData> createRuntimeEncoder(
DynamicTableSink.Context context, DataType consumedDataType) {
final RowType rowType = (RowType) consumedDataType.getLogicalType();
return new MaxwellJsonSerializationSchema(
rowType,
timestampFormat,
mapNullKeyMode,
mapNullKeyLiteral,
encodeDecimalAsPlainNumber,
ignoreNullFields);
}
};
}
@Override
public String factoryIdentifier() {
return IDENTIFIER;
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
return Collections.emptySet();
}
@Override
public Set<ConfigOption<?>> optionalOptions() {
Set<ConfigOption<?>> options = new HashSet<>();
options.add(IGNORE_PARSE_ERRORS);
options.add(TIMESTAMP_FORMAT);
options.add(JSON_MAP_NULL_KEY_MODE);
options.add(JSON_MAP_NULL_KEY_LITERAL);
options.add(ENCODE_DECIMAL_AS_PLAIN_NUMBER);
options.add(ENCODE_IGNORE_NULL_FIELDS);
return options;
}
/** Validator for maxwell decoding format. */
private static void validateDecodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateDecodingFormatOptions(tableOptions);
}
/** Validator for maxwell encoding format. */
private static void validateEncodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateEncodingFormatOptions(tableOptions);
}
}
| MaxwellJsonFormatFactory |
java | spring-projects__spring-boot | configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/name/ConstructorParameterNameAnnotationProperties.java | {
"start": 1062,
"end": 1620
} | class ____ {
/**
* Imports to apply.
*/
private final String imports;
/**
* Whether default mode is enabled.
*/
private final boolean defaultValue;
public ConstructorParameterNameAnnotationProperties(@TestName("import") String imports,
@TestName("default") @TestDefaultValue("true") boolean defaultValue) {
this.imports = imports;
this.defaultValue = defaultValue;
}
public String getImports() {
return this.imports;
}
public boolean isDefaultValue() {
return this.defaultValue;
}
}
| ConstructorParameterNameAnnotationProperties |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/builtin/bean/DatatypeFactory.java | {
"start": 200,
"end": 364
} | class ____ {
public XmlGregorianCalendarFactorizedProperty create() {
return new XmlGregorianCalendarFactorizedProperty( "test" );
}
}
| DatatypeFactory |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/CreditBasedInputBuffersUsageGauge.java | {
"start": 1128,
"end": 2264
} | class ____ extends AbstractBuffersUsageGauge {
private final FloatingBuffersUsageGauge floatingBuffersUsageGauge;
private final ExclusiveBuffersUsageGauge exclusiveBuffersUsageGauge;
public CreditBasedInputBuffersUsageGauge(
FloatingBuffersUsageGauge floatingBuffersUsageGauge,
ExclusiveBuffersUsageGauge exclusiveBuffersUsageGauge,
SingleInputGate[] inputGates) {
super(checkNotNull(inputGates));
this.floatingBuffersUsageGauge = checkNotNull(floatingBuffersUsageGauge);
this.exclusiveBuffersUsageGauge = checkNotNull(exclusiveBuffersUsageGauge);
}
@Override
public int calculateUsedBuffers(SingleInputGate inputGate) {
return floatingBuffersUsageGauge.calculateUsedBuffers(inputGate)
+ exclusiveBuffersUsageGauge.calculateUsedBuffers(inputGate);
}
@Override
public int calculateTotalBuffers(SingleInputGate inputGate) {
return floatingBuffersUsageGauge.calculateTotalBuffers(inputGate)
+ exclusiveBuffersUsageGauge.calculateTotalBuffers(inputGate);
}
}
| CreditBasedInputBuffersUsageGauge |
java | elastic__elasticsearch | x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/ModelDownloadStatusTests.java | {
"start": 465,
"end": 1397
} | class ____ extends AbstractWireSerializingTestCase<ModelDownloadTask.DownloadStatus> {
@Override
protected Writeable.Reader<ModelDownloadTask.DownloadStatus> instanceReader() {
return ModelDownloadTask.DownloadStatus::new;
}
@Override
protected ModelDownloadTask.DownloadStatus createTestInstance() {
return new ModelDownloadTask.DownloadStatus(
new ModelDownloadTask.DownLoadProgress(randomIntBetween(1, 1000), randomIntBetween(0, 1000))
);
}
@Override
protected ModelDownloadTask.DownloadStatus mutateInstance(ModelDownloadTask.DownloadStatus instance) throws IOException {
return new ModelDownloadTask.DownloadStatus(
new ModelDownloadTask.DownLoadProgress(
instance.downloadProgress().totalParts() + 1,
instance.downloadProgress().downloadedParts() - 1
)
);
}
}
| ModelDownloadStatusTests |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/config/plugins/convert/DateTypeConverter.java | {
"start": 2110,
"end": 2518
} | class ____ use
* @return new instance of D or null if there was an error
*/
@SuppressWarnings("unchecked")
public static <D extends Date> D fromMillis(final long millis, final Class<D> type) {
try {
return (D) CONSTRUCTORS.get(type).invoke(millis);
} catch (final Throwable ignored) {
return null;
}
}
private DateTypeConverter() {}
}
| to |
java | google__dagger | javatests/dagger/internal/codegen/ComponentProcessorTest.java | {
"start": 30951,
"end": 31604
} | interface ____ {",
" B b();",
"}");
CompilerTests.daggerCompiler(bFile, arrayComponentFile, bComponentFile)
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(0);
subject.generatedSource(goldenFileRule.goldenSource("test/DaggerBComponent"));
});
}
@Test
public void dependencyNameCollision() throws Exception {
Source a1 = CompilerTests.javaSource("pkg1.A",
"package pkg1;",
"",
"import javax.inject.Inject;",
"",
"public final | BComponent |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java | {
"start": 1152,
"end": 1273
} | class ____ build information about Hadoop components.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public | returns |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MissingOverrideTest.java | {
"start": 2607,
"end": 2765
} | interface ____ {
void f();
}
""")
.addSourceLines(
"Test.java",
"""
public | Super |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/config/plugins/util/ResolverUtilCustomProtocolTest.java | {
"start": 1778,
"end": 1842
} | class ____ custom protocol like bundleresource, vfs, vfszip.
*/
| for |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/jakartaData/java/org/hibernate/processor/test/data/namedquery/BookAuthorRepository.java | {
"start": 341,
"end": 462
} | interface ____ {
List<Book> findByTitleLike(String title);
List<Book> findByTypeIn(Set<Type> types);
}
| BookAuthorRepository |
java | grpc__grpc-java | xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java | {
"start": 62974,
"end": 98949
} | class ____ implements io.grpc.xds.Filter.Provider {
@Override
public String[] typeUrls() {
return new String[]{"test-url"};
}
@Override
public boolean isClientFilter() {
return true;
}
@Override
public TestFilter newInstance(String name) {
return new TestFilter();
}
@Override
public ConfigOrError<SimpleFilterConfig> parseFilterConfig(Message rawProtoMessage) {
return ConfigOrError.fromConfig(new SimpleFilterConfig(rawProtoMessage));
}
@Override
public ConfigOrError<SimpleFilterConfig> parseFilterConfigOverride(Message rawProtoMessage) {
return ConfigOrError.fromConfig(new SimpleFilterConfig(rawProtoMessage));
}
}
}
@Test
public void parseHttpFilter_typedStructMigration() {
filterRegistry.register(new TestFilter.Provider());
Struct rawStruct = Struct.newBuilder()
.putFields("name", Value.newBuilder().setStringValue("default").build())
.build();
HttpFilter httpFilter = HttpFilter.newBuilder()
.setIsOptional(true)
.setTypedConfig(Any.pack(
com.github.udpa.udpa.type.v1.TypedStruct.newBuilder()
.setTypeUrl("test-url")
.setValue(rawStruct)
.build())).build();
FilterConfig config = XdsListenerResource.parseHttpFilter(httpFilter, filterRegistry,
true).getStruct();
assertThat(((SimpleFilterConfig)config).getConfig()).isEqualTo(rawStruct);
HttpFilter httpFilterNewTypeStruct = HttpFilter.newBuilder()
.setIsOptional(true)
.setTypedConfig(Any.pack(
TypedStruct.newBuilder()
.setTypeUrl("test-url")
.setValue(rawStruct)
.build())).build();
config = XdsListenerResource.parseHttpFilter(httpFilterNewTypeStruct, filterRegistry,
true).getStruct();
assertThat(((SimpleFilterConfig)config).getConfig()).isEqualTo(rawStruct);
}
@Test
public void parseOverrideHttpFilter_typedStructMigration() {
filterRegistry.register(new TestFilter.Provider());
Struct rawStruct0 = Struct.newBuilder()
.putFields("name", Value.newBuilder().setStringValue("default0").build())
.build();
Struct rawStruct1 = Struct.newBuilder()
.putFields("name", Value.newBuilder().setStringValue("default1").build())
.build();
Map<String, Any> rawFilterMap = ImmutableMap.of(
"struct-0", Any.pack(
com.github.udpa.udpa.type.v1.TypedStruct.newBuilder()
.setTypeUrl("test-url")
.setValue(rawStruct0)
.build()),
"struct-1", Any.pack(
TypedStruct.newBuilder()
.setTypeUrl("test-url")
.setValue(rawStruct1)
.build())
);
Map<String, FilterConfig> map = XdsRouteConfigureResource.parseOverrideFilterConfigs(
rawFilterMap, filterRegistry).getStruct();
assertThat(((SimpleFilterConfig)map.get("struct-0")).getConfig()).isEqualTo(rawStruct0);
assertThat(((SimpleFilterConfig)map.get("struct-1")).getConfig()).isEqualTo(rawStruct1);
}
@Test
public void parseHttpFilter_unsupportedAndRequired() {
HttpFilter httpFilter = HttpFilter.newBuilder()
.setIsOptional(false)
.setName("unsupported.filter")
.setTypedConfig(Any.pack(StringValue.of("string value")))
.build();
assertThat(XdsListenerResource.parseHttpFilter(httpFilter, filterRegistry, true)
.getErrorDetail()).isEqualTo(
"HttpFilter [unsupported.filter]"
+ "(type.googleapis.com/google.protobuf.StringValue) is required but unsupported "
+ "for client");
}
@Test
public void parseHttpFilter_routerFilterForClient() {
filterRegistry.register(ROUTER_FILTER_PROVIDER);
HttpFilter httpFilter =
HttpFilter.newBuilder()
.setIsOptional(false)
.setName("envoy.router")
.setTypedConfig(Any.pack(Router.getDefaultInstance()))
.build();
FilterConfig config = XdsListenerResource.parseHttpFilter(
httpFilter, filterRegistry, true /* isForClient */).getStruct();
assertThat(config.typeUrl()).isEqualTo(RouterFilter.TYPE_URL);
}
@Test
public void parseHttpFilter_routerFilterForServer() {
filterRegistry.register(ROUTER_FILTER_PROVIDER);
HttpFilter httpFilter =
HttpFilter.newBuilder()
.setIsOptional(false)
.setName("envoy.router")
.setTypedConfig(Any.pack(Router.getDefaultInstance()))
.build();
FilterConfig config = XdsListenerResource.parseHttpFilter(
httpFilter, filterRegistry, false /* isForClient */).getStruct();
assertThat(config.typeUrl()).isEqualTo(RouterFilter.TYPE_URL);
}
@Test
public void parseHttpFilter_faultConfigForClient() {
filterRegistry.register(FAULT_FILTER_PROVIDER);
HttpFilter httpFilter =
HttpFilter.newBuilder()
.setIsOptional(false)
.setName("envoy.fault")
.setTypedConfig(
Any.pack(
HTTPFault.newBuilder()
.setDelay(
FaultDelay.newBuilder()
.setFixedDelay(Durations.fromNanos(1234L)))
.setAbort(
FaultAbort.newBuilder()
.setHttpStatus(300)
.setPercentage(
FractionalPercent.newBuilder()
.setNumerator(10)
.setDenominator(DenominatorType.HUNDRED)))
.build()))
.build();
FilterConfig config = XdsListenerResource.parseHttpFilter(
httpFilter, filterRegistry, true /* isForClient */).getStruct();
assertThat(config).isInstanceOf(FaultConfig.class);
}
@Test
public void parseHttpFilter_faultConfigUnsupportedForServer() {
filterRegistry.register(FAULT_FILTER_PROVIDER);
HttpFilter httpFilter =
HttpFilter.newBuilder()
.setIsOptional(false)
.setName("envoy.fault")
.setTypedConfig(
Any.pack(
HTTPFault.newBuilder()
.setDelay(
FaultDelay.newBuilder()
.setFixedDelay(Durations.fromNanos(1234L)))
.setAbort(
FaultAbort.newBuilder()
.setHttpStatus(300)
.setPercentage(
FractionalPercent.newBuilder()
.setNumerator(10)
.setDenominator(DenominatorType.HUNDRED)))
.build()))
.build();
StructOrError<FilterConfig> config =
XdsListenerResource.parseHttpFilter(httpFilter, filterRegistry, false /* isForClient */);
assertThat(config.getErrorDetail()).isEqualTo(
"HttpFilter [envoy.fault](" + FaultFilter.TYPE_URL + ") is required but "
+ "unsupported for server");
}
@Test
public void parseHttpFilter_rbacConfigForServer() {
filterRegistry.register(RBAC_FILTER_PROVIDER);
HttpFilter httpFilter =
HttpFilter.newBuilder()
.setIsOptional(false)
.setName("envoy.auth")
.setTypedConfig(
Any.pack(
io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC.newBuilder()
.setRules(
RBAC.newBuilder()
.setAction(Action.ALLOW)
.putPolicies(
"allow-all",
Policy.newBuilder()
.addPrincipals(Principal.newBuilder().setAny(true))
.addPermissions(Permission.newBuilder().setAny(true))
.build())
.build())
.build()))
.build();
FilterConfig config = XdsListenerResource.parseHttpFilter(
httpFilter, filterRegistry, false /* isForClient */).getStruct();
assertThat(config).isInstanceOf(RbacConfig.class);
}
@Test
public void parseHttpFilter_rbacConfigUnsupportedForClient() {
filterRegistry.register(RBAC_FILTER_PROVIDER);
HttpFilter httpFilter =
HttpFilter.newBuilder()
.setIsOptional(false)
.setName("envoy.auth")
.setTypedConfig(
Any.pack(
io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC.newBuilder()
.setRules(
RBAC.newBuilder()
.setAction(Action.ALLOW)
.putPolicies(
"allow-all",
Policy.newBuilder()
.addPrincipals(Principal.newBuilder().setAny(true))
.addPermissions(Permission.newBuilder().setAny(true))
.build())
.build())
.build()))
.build();
StructOrError<FilterConfig> config =
XdsListenerResource.parseHttpFilter(httpFilter, filterRegistry, true /* isForClient */);
assertThat(config.getErrorDetail()).isEqualTo(
"HttpFilter [envoy.auth](" + RbacFilter.TYPE_URL + ") is required but "
+ "unsupported for client");
}
@Test
public void parseOverrideRbacFilterConfig() {
filterRegistry.register(RBAC_FILTER_PROVIDER);
RBACPerRoute rbacPerRoute =
RBACPerRoute.newBuilder()
.setRbac(
io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC.newBuilder()
.setRules(
RBAC.newBuilder()
.setAction(Action.ALLOW)
.putPolicies(
"allow-all",
Policy.newBuilder()
.addPrincipals(Principal.newBuilder().setAny(true))
.addPermissions(Permission.newBuilder().setAny(true))
.build())))
.build();
Map<String, Any> configOverrides = ImmutableMap.of("envoy.auth", Any.pack(rbacPerRoute));
Map<String, FilterConfig> parsedConfigs =
XdsRouteConfigureResource.parseOverrideFilterConfigs(configOverrides, filterRegistry)
.getStruct();
assertThat(parsedConfigs).hasSize(1);
assertThat(parsedConfigs).containsKey("envoy.auth");
assertThat(parsedConfigs.get("envoy.auth")).isInstanceOf(RbacConfig.class);
}
@Test
public void parseOverrideFilterConfigs_unsupportedButOptional() {
filterRegistry.register(FAULT_FILTER_PROVIDER);
HTTPFault httpFault = HTTPFault.newBuilder()
.setDelay(FaultDelay.newBuilder().setFixedDelay(Durations.fromNanos(3000)))
.build();
Map<String, Any> configOverrides = ImmutableMap.of(
"envoy.fault",
Any.pack(httpFault),
"unsupported.filter",
Any.pack(io.envoyproxy.envoy.config.route.v3.FilterConfig.newBuilder()
.setIsOptional(true).setConfig(Any.pack(StringValue.of("string value")))
.build()));
Map<String, FilterConfig> parsedConfigs =
XdsRouteConfigureResource.parseOverrideFilterConfigs(configOverrides, filterRegistry)
.getStruct();
assertThat(parsedConfigs).hasSize(1);
assertThat(parsedConfigs).containsKey("envoy.fault");
}
@Test
public void parseOverrideFilterConfigs_unsupportedAndRequired() {
filterRegistry.register(FAULT_FILTER_PROVIDER);
HTTPFault httpFault = HTTPFault.newBuilder()
.setDelay(FaultDelay.newBuilder().setFixedDelay(Durations.fromNanos(3000)))
.build();
Map<String, Any> configOverrides = ImmutableMap.of(
"envoy.fault",
Any.pack(httpFault),
"unsupported.filter",
Any.pack(io.envoyproxy.envoy.config.route.v3.FilterConfig.newBuilder()
.setIsOptional(false).setConfig(Any.pack(StringValue.of("string value")))
.build()));
assertThat(XdsRouteConfigureResource.parseOverrideFilterConfigs(configOverrides, filterRegistry)
.getErrorDetail()).isEqualTo(
"HttpFilter [unsupported.filter]"
+ "(type.googleapis.com/google.protobuf.StringValue) is required but unsupported");
configOverrides = ImmutableMap.of(
"envoy.fault",
Any.pack(httpFault),
"unsupported.filter",
Any.pack(StringValue.of("string value")));
assertThat(XdsRouteConfigureResource.parseOverrideFilterConfigs(configOverrides, filterRegistry)
.getErrorDetail()).isEqualTo(
"HttpFilter [unsupported.filter]"
+ "(type.googleapis.com/google.protobuf.StringValue) is required but unsupported");
}
@Test
public void parseHttpConnectionManager_xffNumTrustedHopsUnsupported()
throws ResourceInvalidException {
@SuppressWarnings("deprecation")
HttpConnectionManager hcm = HttpConnectionManager.newBuilder().setXffNumTrustedHops(2).build();
ResourceInvalidException e = assertThrows(ResourceInvalidException.class,
() -> XdsListenerResource.parseHttpConnectionManager(
hcm, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true)));
assertThat(e).hasMessageThat()
.isEqualTo("HttpConnectionManager with xff_num_trusted_hops unsupported");
}
@Test
public void parseHttpConnectionManager_OriginalIpDetectionExtensionsMustEmpty()
throws ResourceInvalidException {
@SuppressWarnings("deprecation")
HttpConnectionManager hcm = HttpConnectionManager.newBuilder()
.addOriginalIpDetectionExtensions(TypedExtensionConfig.newBuilder().build())
.build();
ResourceInvalidException e = assertThrows(ResourceInvalidException.class, () ->
XdsListenerResource.parseHttpConnectionManager(
hcm, filterRegistry, false, getXdsResourceTypeArgs(true)));
assertThat(e).hasMessageThat()
.isEqualTo("HttpConnectionManager with original_ip_detection_extensions unsupported");
}
@Test
public void parseHttpConnectionManager_missingRdsAndInlinedRouteConfiguration()
throws ResourceInvalidException {
HttpConnectionManager hcm =
HttpConnectionManager.newBuilder()
.setCommonHttpProtocolOptions(
HttpProtocolOptions.newBuilder()
.setMaxStreamDuration(Durations.fromNanos(1000L)))
.addHttpFilters(
HttpFilter.newBuilder().setName("terminal").setTypedConfig(
Any.pack(Router.newBuilder().build())).setIsOptional(true))
.build();
ResourceInvalidException e = assertThrows(ResourceInvalidException.class, () ->
XdsListenerResource.parseHttpConnectionManager(
hcm, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true)));
assertThat(e).hasMessageThat()
.isEqualTo("HttpConnectionManager neither has inlined route_config nor RDS");
}
@Test
public void parseHttpConnectionManager_duplicateHttpFilters() throws ResourceInvalidException {
HttpConnectionManager hcm =
HttpConnectionManager.newBuilder()
.addHttpFilters(
HttpFilter.newBuilder().setName("envoy.filter.foo").setIsOptional(true))
.addHttpFilters(
HttpFilter.newBuilder().setName("envoy.filter.foo").setIsOptional(true))
.addHttpFilters(
HttpFilter.newBuilder().setName("terminal").setTypedConfig(
Any.pack(Router.newBuilder().build())).setIsOptional(true))
.build();
ResourceInvalidException e = assertThrows(ResourceInvalidException.class, () ->
XdsListenerResource.parseHttpConnectionManager(
hcm, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true)));
assertThat(e).hasMessageThat()
.isEqualTo("HttpConnectionManager contains duplicate HttpFilter: envoy.filter.foo");
}
@Test
public void parseHttpConnectionManager_lastNotTerminal() throws ResourceInvalidException {
filterRegistry.register(FAULT_FILTER_PROVIDER);
HttpConnectionManager hcm =
HttpConnectionManager.newBuilder()
.addHttpFilters(
HttpFilter.newBuilder().setName("envoy.filter.foo").setIsOptional(true))
.addHttpFilters(
HttpFilter.newBuilder().setName("envoy.filter.bar").setIsOptional(true)
.setTypedConfig(Any.pack(HTTPFault.newBuilder().build())))
.build();
ResourceInvalidException e = assertThrows(ResourceInvalidException.class, () ->
XdsListenerResource.parseHttpConnectionManager(
hcm, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true)));
assertThat(e).hasMessageThat()
.isEqualTo("The last HttpFilter must be a terminal filter: envoy.filter.bar");
}
@Test
public void parseHttpConnectionManager_terminalNotLast() throws ResourceInvalidException {
filterRegistry.register(ROUTER_FILTER_PROVIDER);
HttpConnectionManager hcm =
HttpConnectionManager.newBuilder()
.addHttpFilters(
HttpFilter.newBuilder().setName("terminal").setTypedConfig(
Any.pack(Router.newBuilder().build())).setIsOptional(true))
.addHttpFilters(
HttpFilter.newBuilder().setName("envoy.filter.foo").setIsOptional(true))
.build();
ResourceInvalidException e = assertThrows(ResourceInvalidException.class, () ->
XdsListenerResource.parseHttpConnectionManager(
hcm, filterRegistry,
true, getXdsResourceTypeArgs(true)));
assertThat(e).hasMessageThat()
.isEqualTo("A terminal HttpFilter must be the last filter: terminal");
}
@Test
public void parseHttpConnectionManager_unknownFilters() throws ResourceInvalidException {
HttpConnectionManager hcm =
HttpConnectionManager.newBuilder()
.addHttpFilters(
HttpFilter.newBuilder().setName("envoy.filter.foo").setIsOptional(true))
.addHttpFilters(
HttpFilter.newBuilder().setName("envoy.filter.bar").setIsOptional(true))
.build();
ResourceInvalidException e = assertThrows(ResourceInvalidException.class, () ->
XdsListenerResource.parseHttpConnectionManager(
hcm, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true)));
assertThat(e).hasMessageThat()
.isEqualTo("The last HttpFilter must be a terminal filter: envoy.filter.bar");
}
@Test
public void parseHttpConnectionManager_emptyFilters() throws ResourceInvalidException {
HttpConnectionManager hcm =
HttpConnectionManager.newBuilder()
.build();
ResourceInvalidException e = assertThrows(ResourceInvalidException.class, () ->
XdsListenerResource.parseHttpConnectionManager(
hcm, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true)));
assertThat(e).hasMessageThat()
.isEqualTo("Missing HttpFilter in HttpConnectionManager.");
}
@Test
public void parseHttpConnectionManager_clusterSpecifierPlugin() throws Exception {
XdsRouteConfigureResource.enableRouteLookup = true;
RouteLookupConfig routeLookupConfig = RouteLookupConfig.newBuilder()
.addGrpcKeybuilders(
GrpcKeyBuilder.newBuilder()
.addNames(Name.newBuilder().setService("service1"))
.addNames(Name.newBuilder().setService("service2"))
.addHeaders(
NameMatcher.newBuilder().setKey("key1").addNames("v1").setRequiredMatch(true)))
.setLookupService("rls-cbt.googleapis.com")
.setLookupServiceTimeout(Durations.fromMillis(1234))
.setCacheSizeBytes(5000)
.addValidTargets("valid-target")
.build();
RouteLookupClusterSpecifier specifier =
RouteLookupClusterSpecifier.newBuilder().setRouteLookupConfig(routeLookupConfig).build();
TypedExtensionConfig typedExtensionConfig = TypedExtensionConfig.newBuilder()
.setName("rls-plugin-1")
.setTypedConfig(Any.pack(specifier))
.build();
io.envoyproxy.envoy.config.route.v3.Route route =
io.envoyproxy.envoy.config.route.v3.Route.newBuilder()
.setName("route-1")
.setMatch(io.envoyproxy.envoy.config.route.v3.RouteMatch.newBuilder().setPrefix(""))
.setRoute(io.envoyproxy.envoy.config.route.v3.RouteAction.newBuilder()
.setClusterSpecifierPlugin("rls-plugin-1"))
.build();
HttpConnectionManager hcm =
HttpConnectionManager.newBuilder()
.setRouteConfig(
RouteConfiguration.newBuilder()
.addClusterSpecifierPlugins(
io.envoyproxy.envoy.config.route.v3.ClusterSpecifierPlugin.newBuilder()
.setExtension(typedExtensionConfig)
.build())
.addVirtualHosts(io.envoyproxy.envoy.config.route.v3.VirtualHost.newBuilder()
.setName("virtual-host-1")
.addRoutes(route)))
.addHttpFilters(
HttpFilter.newBuilder().setName("terminal").setTypedConfig(
Any.pack(Router.newBuilder().build())).setIsOptional(true))
.build();
io.grpc.xds.HttpConnectionManager parsedHcm = XdsListenerResource.parseHttpConnectionManager(
hcm, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true));
VirtualHost virtualHost = Iterables.getOnlyElement(parsedHcm.virtualHosts());
Route parsedRoute = Iterables.getOnlyElement(virtualHost.routes());
NamedPluginConfig namedPluginConfig =
parsedRoute.routeAction().namedClusterSpecifierPluginConfig();
assertThat(namedPluginConfig.name()).isEqualTo("rls-plugin-1");
assertThat(namedPluginConfig.config()).isInstanceOf(RlsPluginConfig.class);
}
@Test
public void parseHttpConnectionManager_duplicatePluginName() throws Exception {
XdsRouteConfigureResource.enableRouteLookup = true;
RouteLookupConfig routeLookupConfig1 = RouteLookupConfig.newBuilder()
.addGrpcKeybuilders(
GrpcKeyBuilder.newBuilder()
.addNames(Name.newBuilder().setService("service1"))
.addNames(Name.newBuilder().setService("service2"))
.addHeaders(
NameMatcher.newBuilder().setKey("key1").addNames("v1").setRequiredMatch(true)))
.setLookupService("rls-cbt.googleapis.com")
.setLookupServiceTimeout(Durations.fromMillis(1234))
.setCacheSizeBytes(5000)
.addValidTargets("valid-target")
.build();
RouteLookupClusterSpecifier specifier1 =
RouteLookupClusterSpecifier.newBuilder().setRouteLookupConfig(routeLookupConfig1).build();
RouteLookupConfig routeLookupConfig2 = RouteLookupConfig.newBuilder()
.addGrpcKeybuilders(
GrpcKeyBuilder.newBuilder()
.addNames(Name.newBuilder().setService("service3"))
.addHeaders(
NameMatcher.newBuilder().setKey("key1").addNames("v1").setRequiredMatch(true)))
.setLookupService("rls-cbt.googleapis.com")
.setLookupServiceTimeout(Durations.fromMillis(1234))
.setCacheSizeBytes(5000)
.addValidTargets("valid-target")
.build();
RouteLookupClusterSpecifier specifier2 =
RouteLookupClusterSpecifier.newBuilder().setRouteLookupConfig(routeLookupConfig2).build();
TypedExtensionConfig typedExtensionConfig = TypedExtensionConfig.newBuilder()
.setName("rls-plugin-1")
.setTypedConfig(Any.pack(specifier1))
.build();
TypedExtensionConfig typedExtensionConfig2 = TypedExtensionConfig.newBuilder()
.setName("rls-plugin-1")
.setTypedConfig(Any.pack(specifier2))
.build();
io.envoyproxy.envoy.config.route.v3.Route route =
io.envoyproxy.envoy.config.route.v3.Route.newBuilder()
.setName("route-1")
.setMatch(io.envoyproxy.envoy.config.route.v3.RouteMatch.newBuilder().setPrefix(""))
.setRoute(io.envoyproxy.envoy.config.route.v3.RouteAction.newBuilder()
.setClusterSpecifierPlugin("rls-plugin-1"))
.build();
HttpConnectionManager hcm =
HttpConnectionManager.newBuilder()
.setRouteConfig(
RouteConfiguration.newBuilder()
.addClusterSpecifierPlugins(
io.envoyproxy.envoy.config.route.v3.ClusterSpecifierPlugin.newBuilder()
.setExtension(typedExtensionConfig)
.build())
.addClusterSpecifierPlugins(
io.envoyproxy.envoy.config.route.v3.ClusterSpecifierPlugin.newBuilder()
.setExtension(typedExtensionConfig2)
.build())
.addVirtualHosts(io.envoyproxy.envoy.config.route.v3.VirtualHost.newBuilder()
.setName("virtual-host-1")
.addRoutes(route)))
.addHttpFilters(
HttpFilter.newBuilder().setName("terminal").setTypedConfig(
Any.pack(Router.newBuilder().build())).setIsOptional(true))
.build();
ResourceInvalidException e = assertThrows(ResourceInvalidException.class, () ->
XdsListenerResource.parseHttpConnectionManager(
hcm, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true)));
assertThat(e).hasMessageThat()
.isEqualTo("Multiple ClusterSpecifierPlugins with the same name: rls-plugin-1");
}
@Test
public void parseHttpConnectionManager_pluginNameNotFound() throws Exception {
XdsRouteConfigureResource.enableRouteLookup = true;
RouteLookupConfig routeLookupConfig = RouteLookupConfig.newBuilder()
.addGrpcKeybuilders(
GrpcKeyBuilder.newBuilder()
.addNames(Name.newBuilder().setService("service1"))
.addNames(Name.newBuilder().setService("service2"))
.addHeaders(
NameMatcher.newBuilder().setKey("key1").addNames("v1").setRequiredMatch(true)))
.setLookupService("rls-cbt.googleapis.com")
.setLookupServiceTimeout(Durations.fromMillis(1234))
.setCacheSizeBytes(5000)
.addValidTargets("valid-target")
.build();
RouteLookupClusterSpecifier specifier =
RouteLookupClusterSpecifier.newBuilder().setRouteLookupConfig(routeLookupConfig).build();
TypedExtensionConfig typedExtensionConfig = TypedExtensionConfig.newBuilder()
.setName("rls-plugin-1")
.setTypedConfig(Any.pack(specifier))
.build();
io.envoyproxy.envoy.config.route.v3.Route route =
io.envoyproxy.envoy.config.route.v3.Route.newBuilder()
.setName("route-1")
.setMatch(io.envoyproxy.envoy.config.route.v3.RouteMatch.newBuilder().setPrefix(""))
.setRoute(io.envoyproxy.envoy.config.route.v3.RouteAction.newBuilder()
.setClusterSpecifierPlugin("invalid-plugin-name"))
.build();
HttpConnectionManager hcm =
HttpConnectionManager.newBuilder()
.setRouteConfig(
RouteConfiguration.newBuilder()
.addClusterSpecifierPlugins(
io.envoyproxy.envoy.config.route.v3.ClusterSpecifierPlugin.newBuilder()
.setExtension(typedExtensionConfig)
.build())
.addVirtualHosts(io.envoyproxy.envoy.config.route.v3.VirtualHost.newBuilder()
.setName("virtual-host-1")
.addRoutes(route)))
.addHttpFilters(
HttpFilter.newBuilder().setName("terminal").setTypedConfig(
Any.pack(Router.newBuilder().build())).setIsOptional(true))
.build();
ResourceInvalidException e = assertThrows(ResourceInvalidException.class, () ->
XdsListenerResource.parseHttpConnectionManager(
hcm, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true)));
assertThat(e).hasMessageThat()
.contains("ClusterSpecifierPlugin for [invalid-plugin-name] not found");
}
@Test
public void parseHttpConnectionManager_optionalPlugin() throws ResourceInvalidException {
XdsRouteConfigureResource.enableRouteLookup = true;
// RLS Plugin, and a route to it.
RouteLookupConfig routeLookupConfig = RouteLookupConfig.newBuilder()
.addGrpcKeybuilders(
GrpcKeyBuilder.newBuilder()
.addNames(Name.newBuilder().setService("service1"))
.addNames(Name.newBuilder().setService("service2"))
.addHeaders(
NameMatcher.newBuilder().setKey("key1").addNames("v1").setRequiredMatch(true)))
.setLookupService("rls-cbt.googleapis.com")
.setLookupServiceTimeout(Durations.fromMillis(1234))
.setCacheSizeBytes(5000)
.addValidTargets("valid-target")
.build();
io.envoyproxy.envoy.config.route.v3.ClusterSpecifierPlugin rlsPlugin =
io.envoyproxy.envoy.config.route.v3.ClusterSpecifierPlugin.newBuilder()
.setExtension(
TypedExtensionConfig.newBuilder()
.setName("rls-plugin-1")
.setTypedConfig(Any.pack(
RouteLookupClusterSpecifier.newBuilder()
.setRouteLookupConfig(routeLookupConfig)
.build())))
.build();
io.envoyproxy.envoy.config.route.v3.Route rlsRoute =
io.envoyproxy.envoy.config.route.v3.Route.newBuilder()
.setName("rls-route-1")
.setMatch(io.envoyproxy.envoy.config.route.v3.RouteMatch.newBuilder().setPrefix(""))
.setRoute(io.envoyproxy.envoy.config.route.v3.RouteAction.newBuilder()
.setClusterSpecifierPlugin("rls-plugin-1"))
.build();
// Unknown optional plugin, and a route to it.
io.envoyproxy.envoy.config.route.v3.ClusterSpecifierPlugin optionalPlugin =
io.envoyproxy.envoy.config.route.v3.ClusterSpecifierPlugin.newBuilder()
.setIsOptional(true)
.setExtension(
TypedExtensionConfig.newBuilder()
.setName("optional-plugin-1")
.setTypedConfig(Any.pack(StringValue.of("unregistered")))
.build())
.build();
io.envoyproxy.envoy.config.route.v3.Route optionalRoute =
io.envoyproxy.envoy.config.route.v3.Route.newBuilder()
.setName("optional-route-1")
.setMatch(io.envoyproxy.envoy.config.route.v3.RouteMatch.newBuilder().setPrefix(""))
.setRoute(io.envoyproxy.envoy.config.route.v3.RouteAction.newBuilder()
.setClusterSpecifierPlugin("optional-plugin-1"))
.build();
// Build and parse the route.
RouteConfiguration routeConfig = RouteConfiguration.newBuilder()
.addClusterSpecifierPlugins(rlsPlugin)
.addClusterSpecifierPlugins(optionalPlugin)
.addVirtualHosts(
io.envoyproxy.envoy.config.route.v3.VirtualHost.newBuilder()
.setName("virtual-host-1")
.addRoutes(rlsRoute)
.addRoutes(optionalRoute))
.build();
io.grpc.xds.HttpConnectionManager parsedHcm = XdsListenerResource.parseHttpConnectionManager(
HttpConnectionManager.newBuilder().setRouteConfig(routeConfig)
.addHttpFilters(
HttpFilter.newBuilder().setName("terminal").setTypedConfig(
Any.pack(Router.newBuilder().build())).setIsOptional(true))
.build(), filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true));
// Verify that the only route left is the one with the registered RLS plugin `rls-plugin-1`,
// while the route with unregistered optional `optional-plugin-`1 has been skipped.
VirtualHost virtualHost = Iterables.getOnlyElement(parsedHcm.virtualHosts());
Route parsedRoute = Iterables.getOnlyElement(virtualHost.routes());
NamedPluginConfig namedPluginConfig =
parsedRoute.routeAction().namedClusterSpecifierPluginConfig();
assertThat(namedPluginConfig.name()).isEqualTo("rls-plugin-1");
assertThat(namedPluginConfig.config()).isInstanceOf(RlsPluginConfig.class);
}
@Test
public void parseHttpConnectionManager_validateRdsConfigSource() throws Exception {
XdsRouteConfigureResource.enableRouteLookup = true;
HttpConnectionManager hcm1 =
HttpConnectionManager.newBuilder()
.setRds(Rds.newBuilder()
.setRouteConfigName("rds-config-foo")
.setConfigSource(
ConfigSource.newBuilder().setAds(AggregatedConfigSource.getDefaultInstance())))
.addHttpFilters(
HttpFilter.newBuilder().setName("terminal").setTypedConfig(
Any.pack(Router.newBuilder().build())).setIsOptional(true))
.build();
XdsListenerResource.parseHttpConnectionManager(
hcm1, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true));
HttpConnectionManager hcm2 =
HttpConnectionManager.newBuilder()
.setRds(Rds.newBuilder()
.setRouteConfigName("rds-config-foo")
.setConfigSource(
ConfigSource.newBuilder().setSelf(SelfConfigSource.getDefaultInstance())))
.addHttpFilters(
HttpFilter.newBuilder().setName("terminal").setTypedConfig(
Any.pack(Router.newBuilder().build())).setIsOptional(true))
.build();
XdsListenerResource.parseHttpConnectionManager(
hcm2, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true));
HttpConnectionManager hcm3 =
HttpConnectionManager.newBuilder()
.setRds(Rds.newBuilder()
.setRouteConfigName("rds-config-foo")
.setConfigSource(
ConfigSource.newBuilder()
.setPathConfigSource(PathConfigSource.newBuilder().setPath("foo-path"))))
.addHttpFilters(
HttpFilter.newBuilder().setName("terminal").setTypedConfig(
Any.pack(Router.newBuilder().build())).setIsOptional(true))
.build();
ResourceInvalidException e = assertThrows(ResourceInvalidException.class, () ->
XdsListenerResource.parseHttpConnectionManager(
hcm3, filterRegistry,
true /* does not matter */, getXdsResourceTypeArgs(true)));
assertThat(e).hasMessageThat().isEqualTo(
"HttpConnectionManager contains invalid RDS: must specify ADS or self ConfigSource");
}
@Test
public void parseClusterSpecifierPlugin_typedStructInTypedExtension() throws Exception {
| Provider |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embeddables/generics/GenericEmbeddedIdentifierMappedSuperclassTest.java | {
"start": 12409,
"end": 12472
} | class ____ extends GroupAccessReport {
}
}
| GroupAccessReportEntity |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/PropertyNamingStrategies.java | {
"start": 439,
"end": 3648
} | class ____
implements java.io.Serializable
{
private static final long serialVersionUID = 3L;
/*
/**********************************************************************
/* Static instances that may be referenced
/**********************************************************************
*/
/**
* Naming convention used in Java, where words other than first are capitalized
* and no separator is used between words. Since this is the native Java naming convention,
* naming strategy will not do any transformation between names in data (JSON) and
* POJOS.
*<p>
* Example external property names would be "numberValue", "namingStrategy", "theDefiniteProof".
*/
public static final PropertyNamingStrategy LOWER_CAMEL_CASE = new LowerCamelCaseStrategy();
/**
* Naming convention used in languages like Pascal, where all words are capitalized
* and no separator is used between words.
* See {@link UpperCamelCaseStrategy} for details.
*<p>
* Example external property names would be "NumberValue", "NamingStrategy", "TheDefiniteProof".
*/
public static final PropertyNamingStrategy UPPER_CAMEL_CASE = new UpperCamelCaseStrategy();
/**
* Naming convention used in languages like C, where words are in lower-case
* letters, separated by underscores.
* See {@link SnakeCaseStrategy} for details.
*<p>
* Example external property names would be "number_value", "naming_strategy", "the_definite_proof".
*/
public static final PropertyNamingStrategy SNAKE_CASE = new SnakeCaseStrategy();
/**
* Naming convention in which the words are in upper-case letters, separated by underscores.
* See {@link UpperSnakeCaseStrategy} for details.
*/
public static final PropertyNamingStrategy UPPER_SNAKE_CASE = new UpperSnakeCaseStrategy();
/**
* Naming convention in which all words of the logical name are in lower case, and
* no separator is used between words.
* See {@link LowerCaseStrategy} for details.
*<p>
* Example external property names would be "numbervalue", "namingstrategy", "thedefiniteproof".
*/
public static final PropertyNamingStrategy LOWER_CASE = new LowerCaseStrategy();
/**
* Naming convention used in languages like Lisp, where words are in lower-case
* letters, separated by hyphens.
* See {@link KebabCaseStrategy} for details.
*<p>
* Example external property names would be "number-value", "naming-strategy", "the-definite-proof".
*/
public static final PropertyNamingStrategy KEBAB_CASE = new KebabCaseStrategy();
/**
* Naming convention widely used as configuration properties name, where words are in
* lower-case letters, separated by dots.
* See {@link LowerDotCaseStrategy} for details.
*<p>
* Example external property names would be "number.value", "naming.strategy", "the.definite.proof".
*/
public static final PropertyNamingStrategy LOWER_DOT_CASE = new LowerDotCaseStrategy();
/*
/**********************************************************************
/* Public base | PropertyNamingStrategies |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/SmokeTests.java | {
"start": 9054,
"end": 10121
} | class ____ {
private Integer id;
private String name;
private Gender gender;
private Gender gender2;
private Gender gender3;
private Component component;
@Id
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Enumerated
public Gender getGender() {
return gender;
}
public void setGender(Gender gender) {
this.gender = gender;
}
@Enumerated(EnumType.STRING)
public Gender getGender2() {
return gender2;
}
public void setGender2(Gender gender2) {
this.gender2 = gender2;
}
@Convert( converter = GenderConverter.class )
@Column( length = 1 )
public Gender getGender3() {
return gender3;
}
public void setGender3(Gender gender3) {
this.gender3 = gender3;
}
@Embedded
public Component getComponent() {
return component;
}
public void setComponent(Component component) {
this.component = component;
}
}
static | SimpleEntity |
java | apache__camel | components/camel-dataformat/src/generated/java/org/apache/camel/component/dataformat/DataFormatEndpointConfigurer.java | {
"start": 737,
"end": 1975
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
DataFormatEndpoint target = (DataFormatEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
DataFormatEndpoint target = (DataFormatEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| DataFormatEndpointConfigurer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/enumerated/mappedSuperclass/EnumeratedWithMappedSuperclassTest.java | {
"start": 4820,
"end": 5602
} | class ____ extends DescriptionEntity {
// @Column(columnDefinition = "varchar", nullable = false, length = 100)
@Enumerated(STRING)
private Nature nature;
@Column(nullable = false)
private Integer rank;
@Column(nullable = false)
private boolean required;
public AddressLevel() { // Do nothing, default constructor needed by JPA / Hibernate
}
public Nature getNature() {
return this.nature;
}
public void setNature(final Nature nature) {
this.nature = nature;
}
public Integer getRank() {
return this.rank;
}
public void setRank(final Integer rank) {
this.rank = rank;
}
public boolean getRequired() {
return this.required;
}
public void isRequired(final boolean required) {
this.required = required;
}
}
}
| AddressLevel |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/Version.java | {
"start": 1389,
"end": 2670
} | class ____ {
public static Version newInstance(int majorVersion, int minorVersion) {
Version version = Records.newRecord(Version.class);
version.setMajorVersion(majorVersion);
version.setMinorVersion(minorVersion);
return version;
}
public abstract int getMajorVersion();
public abstract void setMajorVersion(int majorVersion);
public abstract int getMinorVersion();
public abstract void setMinorVersion(int minorVersion);
public String toString() {
return getMajorVersion() + "." + getMinorVersion();
}
public boolean isCompatibleTo(Version version) {
return getMajorVersion() == version.getMajorVersion();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + getMajorVersion();
result = prime * result + getMinorVersion();
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Version other = (Version) obj;
if (this.getMajorVersion() == other.getMajorVersion()
&& this.getMinorVersion() == other.getMinorVersion()) {
return true;
} else {
return false;
}
}
}
| Version |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/util/matcher/RequestMatcher.java | {
"start": 926,
"end": 1854
} | interface ____ {
/**
* Decides whether the rule implemented by the strategy matches the supplied request.
* @param request the request to check for a match
* @return true if the request matches, false otherwise
*/
boolean matches(HttpServletRequest request);
/**
* Returns a MatchResult for this RequestMatcher. The default implementation returns
* {@link Collections#emptyMap()} when {@link MatchResult#getVariables()} is invoked.
* @return the MatchResult from comparing this RequestMatcher against the
* HttpServletRequest
* @since 5.2
*/
default MatchResult matcher(HttpServletRequest request) {
boolean match = matches(request);
return new MatchResult(match, Collections.emptyMap());
}
/**
* The result of matching against an HttpServletRequest contains the status, true or
* false, of the match and if present, any variables extracted from the match
*
* @since 5.2
*/
| RequestMatcher |
java | google__dagger | javatests/dagger/internal/codegen/ModuleFactoryGeneratorTest.java | {
"start": 22220,
"end": 23343
} | class ____ {",
" @Provides int i() throws Exception {",
" return 0;",
" }",
"",
" @Provides String s() throws Throwable {",
" return \"\";",
" }",
"}");
daggerCompiler(moduleFile)
.compile(
subject -> {
subject.hasErrorCount(2);
subject.hasErrorContaining("@Provides methods may only throw unchecked exceptions")
.onSource(moduleFile)
.onLine(8);
subject.hasErrorContaining("@Provides methods may only throw unchecked exceptions")
.onSource(moduleFile)
.onLine(12);
});
}
@Test
public void providedTypes() {
Source moduleFile =
CompilerTests.javaSource(
"test.TestModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"import java.io.Closeable;",
"import java.util.Set;",
"",
"@Module",
"final | TestModule |
java | qos-ch__slf4j | jcl-over-slf4j/src/main/java/org/apache/commons/logging/LogFactory.java | {
"start": 1847,
"end": 2153
} | class ____ (TCCL), or not. By default, the TCCL is used.
*
* <p>
* This property is not used but preserved here for compatibility.
*/
public static final String TCCL_KEY = "use_tccl";
/**
* The name of the property used to identify the LogFactory implementation
* | loader |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/server/TThreadedSelectorServer.java | {
"start": 10922,
"end": 14960
} | class ____ extends Thread {
// The listen socket to accept on
private final TNonblockingServerTransport serverTransport;
private final Selector acceptSelector;
private final SelectorThreadLoadBalancer threadChooser;
/**
* Set up the AcceptThead
*
* @throws IOException if failed to register selector
*/
public AcceptThread(
TNonblockingServerTransport serverTransport, SelectorThreadLoadBalancer threadChooser)
throws IOException {
this.serverTransport = serverTransport;
this.threadChooser = threadChooser;
this.acceptSelector = SelectorProvider.provider().openSelector();
this.serverTransport.registerSelector(acceptSelector);
}
/**
* The work loop. Selects on the server transport and accepts. If there was a server transport
* that had blocking accepts, and returned on blocking client transports, that should be used
* instead
*/
public void run() {
try {
if (eventHandler_ != null) {
eventHandler_.preServe();
}
while (!stopped_) {
select();
}
} catch (Throwable t) {
LOGGER.error("run() on AcceptThread exiting due to uncaught error", t);
} finally {
try {
acceptSelector.close();
} catch (IOException e) {
LOGGER.error("Got an IOException while closing accept selector!", e);
}
// This will wake up the selector threads
TThreadedSelectorServer.this.stop();
}
}
/** If the selector is blocked, wake it up. */
public void wakeupSelector() {
acceptSelector.wakeup();
}
/**
* Select and process IO events appropriately: If there are connections to be accepted, accept
* them.
*/
private void select() {
try {
// wait for connect events.
acceptSelector.select();
// process the io events we received
Iterator<SelectionKey> selectedKeys = acceptSelector.selectedKeys().iterator();
while (!stopped_ && selectedKeys.hasNext()) {
SelectionKey key = selectedKeys.next();
selectedKeys.remove();
// skip if not valid
if (!key.isValid()) {
continue;
}
if (key.isAcceptable()) {
handleAccept();
} else {
LOGGER.warn("Unexpected state in select! " + key.interestOps());
}
}
} catch (IOException e) {
LOGGER.warn("Got an IOException while selecting!", e);
}
}
/** Accept a new connection. */
private void handleAccept() {
final TNonblockingTransport client = doAccept();
if (client != null) {
// Pass this connection to a selector thread
final SelectorThread targetThread = threadChooser.nextThread();
if (args.acceptPolicy == Args.AcceptPolicy.FAST_ACCEPT || invoker == null) {
doAddAccept(targetThread, client);
} else {
// FAIR_ACCEPT
try {
invoker.submit(
new Runnable() {
public void run() {
doAddAccept(targetThread, client);
}
});
} catch (RejectedExecutionException rx) {
LOGGER.warn("ExecutorService rejected accept registration!", rx);
// close immediately
client.close();
}
}
}
}
private TNonblockingTransport doAccept() {
try {
return serverTransport.accept();
} catch (TTransportException tte) {
// something went wrong accepting.
LOGGER.warn("Exception trying to accept!", tte);
return null;
}
}
private void doAddAccept(SelectorThread thread, TNonblockingTransport client) {
if (!thread.addAcceptedConnection(client)) {
client.close();
}
}
} // AcceptThread
/** The SelectorThread(s) will be doing all the selecting on accepted active connections. */
protected | AcceptThread |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/AutoValueTest.java | {
"start": 105763,
"end": 106013
} | class ____<T> {
public abstract Builder<T> setFoo(int x);
public abstract NonDefaultableInner.Builder<T> innerBuilder();
public abstract OuterWithNonDefaultableInner<T> build();
}
}
@AutoValue
public abstract static | Builder |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java | {
"start": 15473,
"end": 27594
} | class ____ {
final Map<Path, BlockStoragePolicy> map = Maps.newHashMap();
final Path hot = new Path("/hot");
final Path warm = new Path("/warm");
final Path cold = new Path("/cold");
final List<Path> files;
PathPolicyMap(int filesPerDir){
map.put(hot, HOT);
map.put(warm, WARM);
map.put(cold, COLD);
files = new ArrayList<Path>();
for(Path dir : map.keySet()) {
for(int i = 0; i < filesPerDir; i++) {
files.add(new Path(dir, "file" + i));
}
}
}
NamespaceScheme newNamespaceScheme() {
return new NamespaceScheme(Arrays.asList(hot, warm, cold),
files, BLOCK_SIZE/2, null, map);
}
/**
* Move hot files to warm and cold, warm files to hot and cold,
* and cold files to hot and warm.
*/
void moveAround(DistributedFileSystem dfs) throws Exception {
for(Path srcDir : map.keySet()) {
int i = 0;
for(Path dstDir : map.keySet()) {
if (!srcDir.equals(dstDir)) {
final Path src = new Path(srcDir, "file" + i++);
final Path dst = new Path(dstDir, srcDir.getName() + "2" + dstDir.getName());
LOG.info("rename " + src + " to " + dst);
dfs.rename(src, dst);
}
}
}
}
}
/**
* A normal case for Mover: move a file into archival storage
*/
@Test
public void testMigrateFileToArchival() throws Exception {
LOG.info("testMigrateFileToArchival");
final Path foo = new Path("/foo");
Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
policyMap.put(foo, COLD);
NamespaceScheme nsScheme = new NamespaceScheme(null, Arrays.asList(foo),
2*BLOCK_SIZE, null, policyMap);
ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
new MigrationTest(clusterScheme, nsScheme).runBasicTest(true);
}
/**
* Print a big banner in the test log to make debug easier.
*/
static void banner(String string) {
LOG.info("\n\n\n\n================================================\n" +
string + "\n" +
"==================================================\n\n");
}
/**
* Run Mover with arguments specifying files and directories
*/
@Test
public void testMoveSpecificPaths() throws Exception {
LOG.info("testMoveSpecificPaths");
final Path foo = new Path("/foo");
final Path barFile = new Path(foo, "bar");
final Path foo2 = new Path("/foo2");
final Path bar2File = new Path(foo2, "bar2");
Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
policyMap.put(foo, COLD);
policyMap.put(foo2, WARM);
NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(foo, foo2),
Arrays.asList(barFile, bar2File), BLOCK_SIZE, null, policyMap);
ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
test.setupCluster();
try {
test.prepareNamespace();
test.setStoragePolicy();
Map<URI, List<Path>> map = Mover.Cli.getNameNodePathsToMove(test.conf,
"-p", "/foo/bar", "/foo2");
int result = Mover.run(map, test.conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
Thread.sleep(5000);
test.verify(true);
} finally {
test.shutdownCluster();
}
}
/**
* Move an open file into archival storage
*/
@Test
public void testMigrateOpenFileToArchival() throws Exception {
LOG.info("testMigrateOpenFileToArchival");
final Path fooDir = new Path("/foo");
Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
policyMap.put(fooDir, COLD);
NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(fooDir), null,
BLOCK_SIZE, null, policyMap);
ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
test.setupCluster();
// create an open file
banner("writing to file /foo/bar");
final Path barFile = new Path(fooDir, "bar");
DFSTestUtil.createFile(test.dfs, barFile, BLOCK_SIZE, (short) 1, 0L);
FSDataOutputStream out = test.dfs.append(barFile);
out.writeBytes("hello, ");
((DFSOutputStream) out.getWrappedStream()).hsync();
try {
banner("start data migration");
test.setStoragePolicy(); // set /foo to COLD
test.migrate(ExitStatus.SUCCESS);
// make sure the under construction block has not been migrated
LocatedBlocks lbs = test.dfs.getClient().getLocatedBlocks(
barFile.toString(), BLOCK_SIZE);
LOG.info("Locations: " + lbs);
List<LocatedBlock> blks = lbs.getLocatedBlocks();
assertEquals(1, blks.size());
assertEquals(1, blks.get(0).getLocations().length);
banner("finish the migration, continue writing");
// make sure the writing can continue
out.writeBytes("world!");
((DFSOutputStream) out.getWrappedStream()).hsync();
IOUtils.cleanupWithLogger(LOG, out);
lbs = test.dfs.getClient().getLocatedBlocks(
barFile.toString(), BLOCK_SIZE);
LOG.info("Locations: " + lbs);
blks = lbs.getLocatedBlocks();
assertEquals(1, blks.size());
assertEquals(1, blks.get(0).getLocations().length);
banner("finish writing, starting reading");
// check the content of /foo/bar
FSDataInputStream in = test.dfs.open(barFile);
byte[] buf = new byte[13];
// read from offset 1024
in.readFully(BLOCK_SIZE, buf, 0, buf.length);
IOUtils.cleanupWithLogger(LOG, in);
assertEquals("hello, world!", new String(buf));
} finally {
test.shutdownCluster();
}
}
/**
* Test directories with Hot, Warm and Cold polices.
*/
@Test
public void testHotWarmColdDirs() throws Exception {
LOG.info("testHotWarmColdDirs");
PathPolicyMap pathPolicyMap = new PathPolicyMap(3);
NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme();
ClusterScheme clusterScheme = new ClusterScheme();
MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
try {
test.runBasicTest(false);
pathPolicyMap.moveAround(test.dfs);
test.migrate(ExitStatus.SUCCESS);
test.verify(true);
} finally {
test.shutdownCluster();
}
}
private void waitForAllReplicas(int expectedReplicaNum, Path file,
DistributedFileSystem dfs, int retryCount) throws Exception {
LOG.info("Waiting for replicas count " + expectedReplicaNum
+ ", file name: " + file);
for (int i = 0; i < retryCount; i++) {
LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0,
BLOCK_SIZE);
LocatedBlock lb = lbs.get(0);
if (lb.getLocations().length >= expectedReplicaNum) {
return;
} else {
Thread.sleep(1000);
}
}
}
private void setVolumeFull(DataNode dn, StorageType type) {
try (FsDatasetSpi.FsVolumeReferences refs = dn.getFSDataset()
.getFsVolumeReferences()) {
for (FsVolumeSpi fvs : refs) {
FsVolumeImpl volume = (FsVolumeImpl) fvs;
if (volume.getStorageType() == type) {
LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]"
+ volume.getStorageID());
volume.setCapacityForTesting(0);
}
}
} catch (IOException e) {
LOG.error("Unexpected exception by closing FsVolumeReference", e);
}
}
/**
* Test DISK is running out of spaces.
*/
@Test
public void testNoSpaceDisk() throws Exception {
LOG.info("testNoSpaceDisk");
final PathPolicyMap pathPolicyMap = new PathPolicyMap(0);
final NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme();
Configuration conf = new Configuration(DEFAULT_CONF);
final ClusterScheme clusterScheme = new ClusterScheme(conf,
NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
final MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
try {
test.runBasicTest(false);
// create 2 hot files with replication 3
final short replication = 3;
for (int i = 0; i < 2; i++) {
final Path p = new Path(pathPolicyMap.hot, "file" + i);
DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
waitForAllReplicas(replication, p, test.dfs, 10);
}
// set all the DISK volume to full
for (DataNode dn : test.cluster.getDataNodes()) {
setVolumeFull(dn, StorageType.DISK);
DataNodeTestUtils.triggerHeartbeat(dn);
}
// test increasing replication. Since DISK is full,
// new replicas should be stored in ARCHIVE as a fallback storage.
final Path file0 = new Path(pathPolicyMap.hot, "file0");
final Replication r = test.getReplication(file0);
final short newReplication = (short) 5;
test.dfs.setReplication(file0, newReplication);
waitForAllReplicas(newReplication, file0, test.dfs, 10);
test.verifyReplication(file0, r.disk, newReplication - r.disk);
// test creating a cold file and then increase replication
final Path p = new Path(pathPolicyMap.cold, "foo");
DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
waitForAllReplicas(replication, p, test.dfs, 10);
test.verifyReplication(p, 0, replication);
test.dfs.setReplication(p, newReplication);
waitForAllReplicas(newReplication, p, test.dfs, 10);
test.verifyReplication(p, 0, newReplication);
//test move a hot file to warm
final Path file1 = new Path(pathPolicyMap.hot, "file1");
test.dfs.rename(file1, pathPolicyMap.warm);
test.migrate(ExitStatus.NO_MOVE_BLOCK);
test.verifyFile(new Path(pathPolicyMap.warm, "file1"), WARM.getId());
} finally {
test.shutdownCluster();
}
}
/**
* Test ARCHIVE is running out of spaces.
*/
@Test
public void testNoSpaceArchive() throws Exception {
LOG.info("testNoSpaceArchive");
final PathPolicyMap pathPolicyMap = new PathPolicyMap(0);
final NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme();
final ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
final MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
try {
test.runBasicTest(false);
// create 2 hot files with replication 3
final short replication = 3;
for (int i = 0; i < 2; i++) {
final Path p = new Path(pathPolicyMap.cold, "file" + i);
DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
waitForAllReplicas(replication, p, test.dfs, 10);
}
// set all the ARCHIVE volume to full
for (DataNode dn : test.cluster.getDataNodes()) {
setVolumeFull(dn, StorageType.ARCHIVE);
DataNodeTestUtils.triggerHeartbeat(dn);
}
{ // test increasing replication but new replicas cannot be created
// since no more ARCHIVE space.
final Path file0 = new Path(pathPolicyMap.cold, "file0");
final Replication r = test.getReplication(file0);
assertEquals(0, r.disk);
final short newReplication = (short) 5;
test.dfs.setReplication(file0, newReplication);
waitForAllReplicas(r.archive, file0, test.dfs, 10);
test.verifyReplication(file0, 0, r.archive);
}
{ // test creating a hot file
final Path p = new Path(pathPolicyMap.hot, "foo");
DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, (short) 3, 0L);
}
{ //test move a cold file to warm
final Path file1 = new Path(pathPolicyMap.cold, "file1");
test.dfs.rename(file1, pathPolicyMap.warm);
test.migrate(ExitStatus.SUCCESS);
test.verify(true);
}
} finally {
test.shutdownCluster();
}
}
}
| PathPolicyMap |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogsRequest.java | {
"start": 1066,
"end": 4485
} | class ____ {
private ApplicationId appId;
private ApplicationAttemptId appAttemptId;
private String containerId;
private String nodeId;
private String nodeHttpAddress;
private String appOwner;
private boolean appFinished;
private String outputLocalDir;
private Set<String> logTypes;
private long bytes;
private ContainerState containerState;
public ContainerLogsRequest() {}
public ContainerLogsRequest(ContainerLogsRequest request) {
this.setAppId(request.getAppId());
this.setAppAttemptId(request.getAppAttemptId());
this.setAppFinished(request.isAppFinished());
this.setAppOwner(request.getAppOwner());
this.setNodeId(request.getNodeId());
this.setNodeHttpAddress(request.getNodeHttpAddress());
this.setContainerId(request.getContainerId());
this.setOutputLocalDir(request.getOutputLocalDir());
this.setLogTypes(request.getLogTypes());
this.setBytes(request.getBytes());
this.setContainerState(request.getContainerState());
}
public ContainerLogsRequest(ApplicationId applicationId,
ApplicationAttemptId appAttemptId, boolean isAppFinished, String owner,
String address, String httpAddress, String container, String localDir,
Set<String> logs, long bytes, ContainerState containerState) {
this.setAppId(applicationId);
this.setAppAttemptId(appAttemptId);
this.setAppFinished(isAppFinished);
this.setAppOwner(owner);
this.setNodeId(address);
this.setNodeHttpAddress(httpAddress);
this.setContainerId(container);
this.setOutputLocalDir(localDir);
this.setLogTypes(logs);
this.setBytes(bytes);
this.setContainerState(containerState);
}
public ApplicationId getAppId() {
return appId;
}
public void setAppId(ApplicationId appId) {
this.appId = appId;
}
public ApplicationAttemptId getAppAttemptId() {
return this.appAttemptId;
}
public void setAppAttemptId(ApplicationAttemptId appAttemptId) {
this.appAttemptId = appAttemptId;
}
public String getContainerId() {
return containerId;
}
public void setContainerId(String containerId) {
this.containerId = containerId;
}
public String getNodeId() {
return nodeId;
}
public void setNodeId(String nodeAddress) {
this.nodeId = nodeAddress;
}
public String getAppOwner() {
return appOwner;
}
public void setAppOwner(String appOwner) {
this.appOwner = appOwner;
}
public String getNodeHttpAddress() {
return nodeHttpAddress;
}
public void setNodeHttpAddress(String nodeHttpAddress) {
this.nodeHttpAddress = nodeHttpAddress;
}
public boolean isAppFinished() {
return appFinished;
}
public void setAppFinished(boolean appFinished) {
this.appFinished = appFinished;
}
public String getOutputLocalDir() {
return outputLocalDir;
}
public void setOutputLocalDir(String outputLocalDir) {
this.outputLocalDir = outputLocalDir;
}
public Set<String> getLogTypes() {
return logTypes;
}
public void setLogTypes(Set<String> logTypes) {
this.logTypes = logTypes;
}
public long getBytes() {
return bytes;
}
public void setBytes(long bytes) {
this.bytes = bytes;
}
public ContainerState getContainerState() {
return containerState;
}
public void setContainerState(ContainerState containerState) {
this.containerState = containerState;
}
}
| ContainerLogsRequest |
java | quarkusio__quarkus | extensions/security/deployment/src/test/java/io/quarkus/security/test/permissionsallowed/checker/MultipleCheckersForSamePermissionValidationFailureTest.java | {
"start": 1000,
"end": 1412
} | class ____ {
@PermissionsAllowed("some-value")
void securedBean() {
// EMPTY
}
@PermissionChecker("some-value")
boolean checkSomeValue(SecurityIdentity identity) {
return false;
}
@PermissionChecker("some-value")
boolean alsoCheckSomeValue(SecurityIdentity identity) {
return false;
}
}
}
| SecuredBean |
java | apache__hadoop | hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/SampleTaggedMapOutput.java | {
"start": 1271,
"end": 1779
} | class ____ extends TaggedMapOutput {
private Text data;
public SampleTaggedMapOutput() {
this.data = new Text("");
}
public SampleTaggedMapOutput(Text data) {
this.data = data;
}
public Writable getData() {
return data;
}
public void write(DataOutput out) throws IOException {
this.tag.write(out);
this.data.write(out);
}
public void readFields(DataInput in) throws IOException {
this.tag.readFields(in);
this.data.readFields(in);
}
}
| SampleTaggedMapOutput |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java | {
"start": 8733,
"end": 9298
} | enum ____ {
ACTIVE("active"),
PAUSED("paused");
private final String name;
Status(String name) {
this.name = name;
}
public static Status fromString(String value) {
return switch (value) {
case "active" -> Status.ACTIVE;
case "paused" -> Status.PAUSED;
default -> throw new IllegalArgumentException("unexpected status value [" + value + "]");
};
}
}
}
}
| Status |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java | {
"start": 6882,
"end": 7328
} | class ____ will render the /task page
*/
protected Class<? extends View> taskPage() {
return TaskPage.class;
}
/**
* Render the /task page
*/
public void task() {
try {
requireTask();
}
catch (Exception e) {
renderText(e.getMessage());
return;
}
if (app.getTask() != null) {
setTitle(join("Attempts for ", $(TASK_ID)));
}
render(taskPage());
}
/**
* @return the | that |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeinfo/FractionalTypeInfoTest.java | {
"start": 969,
"end": 1322
} | class ____ extends TypeInformationTestBase<FractionalTypeInfo<?>> {
@Override
protected FractionalTypeInfo<?>[] getTestData() {
return new FractionalTypeInfo<?>[] {
(FractionalTypeInfo<?>) BasicTypeInfo.FLOAT_TYPE_INFO,
(FractionalTypeInfo<?>) BasicTypeInfo.DOUBLE_TYPE_INFO
};
}
}
| FractionalTypeInfoTest |
java | apache__kafka | raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientAutoJoinTest.java | {
"start": 1408,
"end": 13040
} | class ____ {
@Test
public void testAutoRemoveOldVoter() throws Exception {
final var leader = replicaKey(randomReplicaId(), true);
final var oldFollower = replicaKey(leader.id() + 1, true);
final var newFollowerKey = replicaKey(oldFollower.id(), true);
final int epoch = 1;
final var context = new RaftClientTestContext.Builder(
newFollowerKey.id(),
newFollowerKey.directoryId().get()
)
.withRaftProtocol(KIP_853_PROTOCOL)
.withStartingVoters(
VoterSetTest.voterSet(Stream.of(leader, oldFollower)), KRaftVersion.KRAFT_VERSION_1
)
.withElectedLeader(epoch, leader.id())
.withAutoJoin(true)
.withCanBecomeVoter(true)
.build();
context.advanceTimeAndCompleteFetch(epoch, leader.id(), true);
// the next request should be a remove voter request
pollAndDeliverRemoveVoter(context, oldFollower);
// after sending a remove voter the next request should be a fetch
context.advanceTimeAndCompleteFetch(epoch, leader.id(), true);
// the replica should send remove voter again because the fetch did not update the voter set
pollAndDeliverRemoveVoter(context, oldFollower);
}
@Test
public void testAutoAddNewVoter() throws Exception {
final var leader = replicaKey(randomReplicaId(), true);
final var follower = replicaKey(leader.id() + 1, true);
final var newVoter = replicaKey(follower.id() + 1, true);
final int epoch = 1;
final var context = new RaftClientTestContext.Builder(
newVoter.id(),
newVoter.directoryId().get()
)
.withRaftProtocol(KIP_853_PROTOCOL)
.withStartingVoters(
VoterSetTest.voterSet(Stream.of(leader, follower)), KRaftVersion.KRAFT_VERSION_1
)
.withElectedLeader(epoch, leader.id())
.withAutoJoin(true)
.withCanBecomeVoter(true)
.build();
context.advanceTimeAndCompleteFetch(epoch, leader.id(), true);
// the next request should be an add voter request
pollAndSendAddVoter(context, newVoter);
// expire the add voter request, the next request should be a fetch
context.advanceTimeAndCompleteFetch(epoch, leader.id(), true);
// the replica should send add voter again because the completed fetch
// did not update the voter set, and its timer has expired
final var addVoterRequest = pollAndSendAddVoter(context, newVoter);
// deliver the add voter response, this is possible before a completed fetch because of KIP-1186
context.deliverResponse(
addVoterRequest.correlationId(),
addVoterRequest.destination(),
RaftUtil.addVoterResponse(Errors.NONE, Errors.NONE.message())
);
// verify the replica can perform a fetch to commit the new voter set
pollAndDeliverFetchToUpdateVoterSet(
context,
epoch,
VoterSetTest.voterSet(Stream.of(leader, newVoter))
);
}
@Test
public void testObserverRemovesOldVoterAndAutoJoins() throws Exception {
final var leader = replicaKey(randomReplicaId(), true);
final var oldFollower = replicaKey(leader.id() + 1, true);
final var newFollowerKey = replicaKey(oldFollower.id(), true);
final int epoch = 1;
final var context = new RaftClientTestContext.Builder(
newFollowerKey.id(),
newFollowerKey.directoryId().get()
)
.withRaftProtocol(KIP_853_PROTOCOL)
.withStartingVoters(
VoterSetTest.voterSet(Stream.of(leader, oldFollower)), KRaftVersion.KRAFT_VERSION_1
)
.withElectedLeader(epoch, leader.id())
.withAutoJoin(true)
.withCanBecomeVoter(true)
.build();
// advance time and complete a fetch to trigger the remove voter request
context.advanceTimeAndCompleteFetch(epoch, leader.id(), true);
// the next request should be a remove voter request
pollAndDeliverRemoveVoter(context, oldFollower);
// after sending a remove voter the next request should be a fetch
// this fetch will remove the old follower from the voter set
pollAndDeliverFetchToUpdateVoterSet(
context,
epoch,
VoterSetTest.voterSet(Stream.of(leader))
);
// advance time and complete a fetch to trigger the add voter request
context.advanceTimeAndCompleteFetch(epoch, leader.id(), true);
// the next request should be an add voter request
final var addVoterRequest = pollAndSendAddVoter(context, newFollowerKey);
// deliver the add voter response, this is possible before a completed fetch because of KIP-1186
context.deliverResponse(
addVoterRequest.correlationId(),
addVoterRequest.destination(),
RaftUtil.addVoterResponse(Errors.NONE, Errors.NONE.message())
);
// verify the replica can perform a fetch to commit the new voter set
pollAndDeliverFetchToUpdateVoterSet(
context,
epoch,
VoterSetTest.voterSet(Stream.of(leader, newFollowerKey))
);
// advance time and complete a fetch and expire the update voter set timer
// the next request should be a fetch because the log voter configuration is up-to-date
context.advanceTimeAndCompleteFetch(epoch, leader.id(), true);
context.pollUntilRequest();
context.assertSentFetchRequest();
}
@Test
public void testObserversDoNotAutoJoin() throws Exception {
final var leader = replicaKey(randomReplicaId(), true);
final var follower = replicaKey(leader.id() + 1, true);
final var newObserver = replicaKey(follower.id() + 1, true);
final int epoch = 1;
final var context = new RaftClientTestContext.Builder(
newObserver.id(),
newObserver.directoryId().get()
)
.withRaftProtocol(KIP_853_PROTOCOL)
.withStartingVoters(
VoterSetTest.voterSet(Stream.of(leader, follower)), KRaftVersion.KRAFT_VERSION_1
)
.withElectedLeader(epoch, leader.id())
.withAutoJoin(true)
.withCanBecomeVoter(false)
.build();
context.advanceTimeAndCompleteFetch(epoch, leader.id(), true);
context.time.sleep(context.fetchTimeoutMs - 1);
context.pollUntilRequest();
// When canBecomeVoter == false, the replica should not send an add voter request
final var fetchRequest = context.assertSentFetchRequest();
context.assertFetchRequestData(fetchRequest, epoch, 0L, 0, context.client.highWatermark());
}
@Test
public void testObserverDoesNotAddItselfWhenAutoJoinDisabled() throws Exception {
final var leader = replicaKey(randomReplicaId(), true);
final var follower = replicaKey(leader.id() + 1, true);
final var observer = replicaKey(follower.id() + 1, true);
final int epoch = 1;
final var context = new RaftClientTestContext.Builder(
observer.id(),
observer.directoryId().get()
)
.withRaftProtocol(KIP_853_PROTOCOL)
.withStartingVoters(
VoterSetTest.voterSet(Stream.of(leader, follower)), KRaftVersion.KRAFT_VERSION_1
)
.withElectedLeader(epoch, leader.id())
.withAutoJoin(false)
.withCanBecomeVoter(true)
.build();
context.advanceTimeAndCompleteFetch(epoch, leader.id(), true);
context.time.sleep(context.fetchTimeoutMs - 1);
context.pollUntilRequest();
// When autoJoin == false, the replica should not send an add voter request
final var fetchRequest = context.assertSentFetchRequest();
context.assertFetchRequestData(fetchRequest, epoch, 0L, 0, context.client.highWatermark());
}
@Test
public void testObserverDoesNotAutoJoinWithKRaftVersion0() throws Exception {
final var leader = replicaKey(randomReplicaId(), true);
final var follower = replicaKey(leader.id() + 1, true);
final var observer = replicaKey(follower.id() + 1, true);
final int epoch = 1;
final var context = new RaftClientTestContext.Builder(
observer.id(),
observer.directoryId().get()
)
.withRaftProtocol(KIP_595_PROTOCOL)
.withStartingVoters(
VoterSetTest.voterSet(Stream.of(leader, follower)), KRaftVersion.KRAFT_VERSION_0
)
.withElectedLeader(epoch, leader.id())
.withAutoJoin(true)
.withCanBecomeVoter(true)
.build();
context.advanceTimeAndCompleteFetch(epoch, leader.id(), true);
context.time.sleep(context.fetchTimeoutMs - 1);
context.pollUntilRequest();
// When kraft.version == 0, the replica should not send an add voter request
final var fetchRequest = context.assertSentFetchRequest();
context.assertFetchRequestData(fetchRequest, epoch, 0L, 0, context.client.highWatermark());
}
private void pollAndDeliverRemoveVoter(
RaftClientTestContext context,
ReplicaKey oldFollower
) throws Exception {
context.pollUntilRequest();
final var removeRequest = context.assertSentRemoveVoterRequest(oldFollower);
context.deliverResponse(
removeRequest.correlationId(),
removeRequest.destination(),
RaftUtil.removeVoterResponse(Errors.NONE, Errors.NONE.message())
);
}
private RaftRequest.Outbound pollAndSendAddVoter(
RaftClientTestContext context,
ReplicaKey newVoter
) throws Exception {
context.pollUntilRequest();
return context.assertSentAddVoterRequest(
newVoter,
context.client.quorum().localVoterNodeOrThrow().listeners()
);
}
private void pollAndDeliverFetchToUpdateVoterSet(
RaftClientTestContext context,
int epoch,
VoterSet newVoterSet
) throws Exception {
context.pollUntilRequest();
final var fetchRequest = context.assertSentFetchRequest();
context.assertFetchRequestData(
fetchRequest,
epoch,
context.log.endOffset().offset(),
context.log.lastFetchedEpoch(),
context.client.highWatermark()
);
// deliver the fetch response with the updated voter set
context.deliverResponse(
fetchRequest.correlationId(),
fetchRequest.destination(),
context.fetchResponse(
epoch,
fetchRequest.destination().id(),
MemoryRecords.withVotersRecord(
context.log.endOffset().offset(),
context.time.milliseconds(),
epoch,
BufferSupplier.NO_CACHING.get(300),
newVoterSet.toVotersRecord((short) 0)
),
context.log.endOffset().offset() + 1,
Errors.NONE
)
);
// poll kraft to update the replica's voter set
context.client.poll();
}
private int randomReplicaId() {
return ThreadLocalRandom.current().nextInt(1025);
}
}
| KafkaRaftClientAutoJoinTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/ThrowableAssertBaseTest.java | {
"start": 867,
"end": 1329
} | class ____ extends BaseTestTemplate<ThrowableAssert<Throwable>, Throwable> {
protected Throwables throwables;
@Override
protected ThrowableAssert<Throwable> create_assertions() {
return new ThrowableAssert<>(new Throwable("throwable message"));
}
@Override
protected void inject_internal_objects() {
super.inject_internal_objects();
throwables = mock(Throwables.class);
assertions.throwables = throwables;
}
}
| ThrowableAssertBaseTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueHistogram.java | {
"start": 1128,
"end": 1290
} | class ____ a value aggregator that computes the
* histogram of a sequence of strings.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public | implements |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit/jupiter/nested/ConstructorInjectionTestMethodScopedExtensionContextNestedTests.java | {
"start": 2970,
"end": 3283
} | class ____ {
final String bar;
AutowiredConstructorParameterTests(@Autowired String bar) {
this.bar = bar;
}
@Test
void nestedTest() {
assertThat(foo).isEqualTo("bar");
assertThat(bar).isEqualTo("bar");
}
}
@Nested
@SpringJUnitConfig(NestedConfig.class)
| AutowiredConstructorParameterTests |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/resource/CachingResourceResolverTests.java | {
"start": 1433,
"end": 5545
} | class ____ {
private Cache cache;
private ResourceResolverChain chain;
private List<Resource> locations;
@BeforeEach
void setup() {
this.cache = new ConcurrentMapCache("resourceCache");
List<ResourceResolver> resolvers = new ArrayList<>();
resolvers.add(new CachingResourceResolver(this.cache));
resolvers.add(new PathResourceResolver());
this.chain = new DefaultResourceResolverChain(resolvers);
this.locations = new ArrayList<>();
this.locations.add(new ClassPathResource("test/", getClass()));
}
@Test
void resolveResourceInternal() {
Resource expected = new ClassPathResource("test/bar.css", getClass());
Resource actual = this.chain.resolveResource(null, "bar.css", this.locations);
assertThat(actual).isNotSameAs(expected);
assertThat(actual).isEqualTo(expected);
}
@Test
void resolveResourceInternalFromCache() {
Resource expected = mock();
this.cache.put(resourceKey("bar.css"), expected);
Resource actual = this.chain.resolveResource(null, "bar.css", this.locations);
assertThat(actual).isSameAs(expected);
}
@Test
void resolveResourceInternalNoMatch() {
assertThat(this.chain.resolveResource(null, "invalid.css", this.locations)).isNull();
}
@Test
void resolverUrlPath() {
String expected = "/foo.css";
String actual = this.chain.resolveUrlPath(expected, this.locations);
assertThat(actual).isEqualTo(expected);
}
@Test
void resolverUrlPathFromCache() {
String expected = "cached-imaginary.css";
this.cache.put(CachingResourceResolver.RESOLVED_URL_PATH_CACHE_KEY_PREFIX + "imaginary.css", expected);
String actual = this.chain.resolveUrlPath("imaginary.css", this.locations);
assertThat(actual).isEqualTo(expected);
}
@Test
void resolverUrlPathNoMatch() {
assertThat(this.chain.resolveUrlPath("invalid.css", this.locations)).isNull();
}
@Test
void resolveResourceAcceptEncodingInCacheKey(GzippedFiles gzippedFiles) {
String file = "bar.css";
gzippedFiles.create(file);
// 1. Resolve plain resource
MockHttpServletRequest request = new MockHttpServletRequest("GET", file);
Resource expected = this.chain.resolveResource(request, file, this.locations);
String cacheKey = resourceKey(file);
assertThat(this.cache.get(cacheKey).get()).isSameAs(expected);
// 2. Resolve with Accept-Encoding
request = new MockHttpServletRequest("GET", file);
request.addHeader("Accept-Encoding", "gzip ; a=b , deflate , br ; c=d ");
expected = this.chain.resolveResource(request, file, this.locations);
cacheKey = resourceKey(file + "+encoding=br,gzip");
assertThat(this.cache.get(cacheKey).get()).isSameAs(expected);
// 3. Resolve with Accept-Encoding but no matching codings
request = new MockHttpServletRequest("GET", file);
request.addHeader("Accept-Encoding", "deflate");
expected = this.chain.resolveResource(request, file, this.locations);
cacheKey = resourceKey(file);
assertThat(this.cache.get(cacheKey).get()).isSameAs(expected);
}
@Test
void resolveResourceNoAcceptEncoding() {
String file = "bar.css";
MockHttpServletRequest request = new MockHttpServletRequest("GET", file);
Resource expected = this.chain.resolveResource(request, file, this.locations);
String cacheKey = resourceKey(file);
Object actual = this.cache.get(cacheKey).get();
assertThat(actual).isEqualTo(expected);
}
@Test
void resolveResourceMatchingEncoding() {
Resource resource = mock();
Resource gzipped = mock();
this.cache.put(resourceKey("bar.css"), resource);
this.cache.put(resourceKey("bar.css+encoding=gzip"), gzipped);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "bar.css");
assertThat(this.chain.resolveResource(request, "bar.css", this.locations)).isSameAs(resource);
request = new MockHttpServletRequest("GET", "bar.css");
request.addHeader("Accept-Encoding", "gzip");
assertThat(this.chain.resolveResource(request, "bar.css", this.locations)).isSameAs(gzipped);
}
private static String resourceKey(String key) {
return CachingResourceResolver.RESOLVED_RESOURCE_CACHE_KEY_PREFIX + key;
}
}
| CachingResourceResolverTests |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java | {
"start": 1602,
"end": 5169
} | class ____ {
@AfterEach
public void cleanUp() {
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
}
/**
* Tests setting the rpc port to the same as the web port to test that
* an exception
* is thrown when trying to re-use the same port
*/
@Test
@Timeout(value = 300)
public void testThatMatchingRPCandHttpPortsThrowException()
throws IOException {
assertThrows(BindException.class, () -> {
NameNode nameNode = null;
try {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
Random rand = new Random();
final int port = 30000 + rand.nextInt(30000);
// set both of these to the same port. It should fail.
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port);
DFSTestUtil.formatNameNode(conf);
nameNode = new NameNode(conf);
} finally {
if (nameNode != null) {
nameNode.stop();
}
}
});
}
/**
* Tests setting the rpc port to a different as the web port that an
* exception is NOT thrown
*/
@Test
@Timeout(value = 300)
public void testThatDifferentRPCandHttpPortsAreOK()
throws IOException {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
Random rand = new Random();
// A few retries in case the ports we choose are in use.
for (int i = 0; i < 5; ++i) {
final int port1 = 30000 + rand.nextInt(10000);
final int port2 = port1 + 1 + rand.nextInt(10000);
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port1);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port2);
DFSTestUtil.formatNameNode(conf);
NameNode nameNode = null;
try {
nameNode = new NameNode(conf); // should be OK!
break;
} catch(BindException be) {
continue; // Port in use? Try another.
} finally {
if (nameNode != null) {
nameNode.stop();
}
}
}
}
/**
* HDFS-3013: NameNode format command doesn't pick up
* dfs.namenode.name.dir.NameServiceId configuration.
*/
@Test
@Timeout(value = 300)
public void testGenericKeysForNameNodeFormat()
throws IOException {
Configuration conf = new HdfsConfiguration();
// Set ephemeral ports
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
"127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
// Set a nameservice-specific configuration for name dir
File dir = new File(MiniDFSCluster.getBaseDirectory(),
"testGenericKeysForNameNodeFormat");
if (dir.exists()) {
FileUtil.fullyDelete(dir);
}
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
dir.getAbsolutePath());
// Format and verify the right dir is formatted.
DFSTestUtil.formatNameNode(conf);
GenericTestUtils.assertExists(dir);
// Ensure that the same dir is picked up by the running NN
NameNode nameNode = new NameNode(conf);
nameNode.stop();
}
}
| TestValidateConfigurationSettings |
java | bumptech__glide | annotation/compiler/src/main/java/com/bumptech/glide/annotation/compiler/IndexerGenerator.java | {
"start": 912,
"end": 1146
} | class ____ a LibraryGlideModule looks like this:
*
* <pre>
* <code>
* {@literal @com.bumptech.glide.annotation.compiler.Index(}
* modules = "com.bumptech.glide.integration.okhttp3.OkHttpLibraryGlideModule"
* )
* public | with |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/resource/VersionResourceResolver.java | {
"start": 2540,
"end": 9193
} | class ____ extends AbstractResourceResolver {
private final AntPathMatcher pathMatcher = new AntPathMatcher();
/** Map from path pattern -> VersionStrategy. */
private final Map<String, VersionStrategy> versionStrategyMap = new LinkedHashMap<>();
/**
* Set a Map with URL paths as keys and {@code VersionStrategy} as values.
* <p>Supports direct URL matches and Ant-style pattern matches. For syntax
* details, see the {@link org.springframework.util.AntPathMatcher} javadoc.
* @param map a map with URLs as keys and version strategies as values
*/
public void setStrategyMap(Map<String, VersionStrategy> map) {
this.versionStrategyMap.clear();
this.versionStrategyMap.putAll(map);
}
/**
* Return the map with version strategies keyed by path pattern.
*/
public Map<String, VersionStrategy> getStrategyMap() {
return this.versionStrategyMap;
}
/**
* Insert a content-based version in resource URLs that match the given path
* patterns. The version is computed from the content of the file, for example,
* {@code "css/main-e36d2e05253c6c7085a91522ce43a0b4.css"}. This is a good
* default strategy to use except when it cannot be, for example when using
* JavaScript module loaders, use {@link #addFixedVersionStrategy} instead
* for serving JavaScript files.
* @param pathPatterns one or more resource URL path patterns,
* relative to the pattern configured with the resource handler
* @return the current instance for chained method invocation
* @see ContentVersionStrategy
*/
public VersionResourceResolver addContentVersionStrategy(String... pathPatterns) {
addVersionStrategy(new ContentVersionStrategy(), pathPatterns);
return this;
}
/**
* Insert a fixed, prefix-based version in resource URLs that match the given
* path patterns, for example: <code>"{version}/js/main.js"</code>. This is useful (vs.
* content-based versions) when using JavaScript module loaders.
* <p>The version may be a random number, the current date, or a value
* fetched from a git commit sha, a property file, or environment variable
* and set with SpEL expressions in the configuration (for example, see {@code @Value}
* in Java config).
* <p>If not done already, variants of the given {@code pathPatterns}, prefixed with
* the {@code version} will be also configured. For example, adding a {@code "/js/**"} path pattern
* will also configure automatically a {@code "/v1.0.0/js/**"} with {@code "v1.0.0"} the
* {@code version} String given as an argument.
* @param version a version string
* @param pathPatterns one or more resource URL path patterns,
* relative to the pattern configured with the resource handler
* @return the current instance for chained method invocation
* @see FixedVersionStrategy
*/
public VersionResourceResolver addFixedVersionStrategy(String version, String... pathPatterns) {
List<String> patternsList = Arrays.asList(pathPatterns);
List<String> prefixedPatterns = new ArrayList<>(pathPatterns.length);
String versionPrefix = "/" + version;
for (String pattern : patternsList) {
prefixedPatterns.add(pattern);
if (!pattern.startsWith(versionPrefix) && !patternsList.contains(versionPrefix + pattern)) {
prefixedPatterns.add(versionPrefix + pattern);
}
}
return addVersionStrategy(new FixedVersionStrategy(version), StringUtils.toStringArray(prefixedPatterns));
}
/**
* Register a custom VersionStrategy to apply to resource URLs that match the
* given path patterns.
* @param strategy the custom strategy
* @param pathPatterns one or more resource URL path patterns,
* relative to the pattern configured with the resource handler
* @return the current instance for chained method invocation
* @see VersionStrategy
*/
public VersionResourceResolver addVersionStrategy(VersionStrategy strategy, String... pathPatterns) {
for (String pattern : pathPatterns) {
getStrategyMap().put(pattern, strategy);
}
return this;
}
@Override
protected @Nullable Resource resolveResourceInternal(@Nullable HttpServletRequest request, String requestPath,
List<? extends Resource> locations, ResourceResolverChain chain) {
Resource resolved = chain.resolveResource(request, requestPath, locations);
if (resolved != null) {
return resolved;
}
VersionStrategy versionStrategy = getStrategyForPath(requestPath);
if (versionStrategy == null) {
return null;
}
String candidateVersion = versionStrategy.extractVersion(requestPath);
if (!StringUtils.hasLength(candidateVersion)) {
return null;
}
String simplePath = versionStrategy.removeVersion(requestPath, candidateVersion);
Resource baseResource = chain.resolveResource(request, simplePath, locations);
if (baseResource == null) {
return null;
}
String actualVersion = versionStrategy.getResourceVersion(baseResource);
if (candidateVersion.equals(actualVersion)) {
return new FileNameVersionedResource(baseResource, candidateVersion);
}
else {
if (logger.isTraceEnabled()) {
logger.trace("Found resource for \"" + requestPath + "\", but version [" +
candidateVersion + "] does not match");
}
return null;
}
}
@Override
protected @Nullable String resolveUrlPathInternal(String resourceUrlPath,
List<? extends Resource> locations, ResourceResolverChain chain) {
String baseUrl = chain.resolveUrlPath(resourceUrlPath, locations);
if (StringUtils.hasText(baseUrl)) {
VersionStrategy versionStrategy = getStrategyForPath(resourceUrlPath);
if (versionStrategy == null) {
return baseUrl;
}
Resource resource = chain.resolveResource(null, baseUrl, locations);
Assert.state(resource != null, "Unresolvable resource");
String version = versionStrategy.getResourceVersion(resource);
return versionStrategy.addVersion(baseUrl, version);
}
return baseUrl;
}
/**
* Find a {@code VersionStrategy} for the request path of the requested resource.
* @return an instance of a {@code VersionStrategy} or null if none matches that request path
*/
protected @Nullable VersionStrategy getStrategyForPath(String requestPath) {
String path = "/".concat(requestPath);
List<String> matchingPatterns = new ArrayList<>();
for (String pattern : this.versionStrategyMap.keySet()) {
if (this.pathMatcher.match(pattern, path)) {
matchingPatterns.add(pattern);
}
}
if (!matchingPatterns.isEmpty()) {
Comparator<String> comparator = this.pathMatcher.getPatternComparator(path);
matchingPatterns.sort(comparator);
return this.versionStrategyMap.get(matchingPatterns.get(0));
}
return null;
}
private static | VersionResourceResolver |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/CharSetUtils.java | {
"start": 1181,
"end": 8557
} | class ____ {
/**
* Takes an argument in set-syntax, see evaluateSet,
* and identifies whether any of the characters are present in the specified string.
*
* <pre>
* CharSetUtils.containsAny(null, *) = false
* CharSetUtils.containsAny("", *) = false
* CharSetUtils.containsAny(*, null) = false
* CharSetUtils.containsAny(*, "") = false
* CharSetUtils.containsAny("hello", "k-p") = true
* CharSetUtils.containsAny("hello", "a-d") = false
* </pre>
*
* @see CharSet#getInstance(String...) for set-syntax.
* @param str String to look for characters in, may be null
* @param set String[] set of characters to identify, may be null
* @return whether or not the characters in the set are in the primary string
* @since 3.2
*/
public static boolean containsAny(final String str, final String... set) {
if (StringUtils.isEmpty(str) || deepEmpty(set)) {
return false;
}
final CharSet chars = CharSet.getInstance(set);
for (final char c : str.toCharArray()) {
if (chars.contains(c)) {
return true;
}
}
return false;
}
/**
* Takes an argument in set-syntax, see evaluateSet,
* and returns the number of characters present in the specified string.
*
* <pre>
* CharSetUtils.count(null, *) = 0
* CharSetUtils.count("", *) = 0
* CharSetUtils.count(*, null) = 0
* CharSetUtils.count(*, "") = 0
* CharSetUtils.count("hello", "k-p") = 3
* CharSetUtils.count("hello", "a-e") = 1
* </pre>
*
* @see CharSet#getInstance(String...) for set-syntax.
* @param str String to count characters in, may be null
* @param set String[] set of characters to count, may be null
* @return the character count, zero if null string input
*/
public static int count(final String str, final String... set) {
if (StringUtils.isEmpty(str) || deepEmpty(set)) {
return 0;
}
final CharSet chars = CharSet.getInstance(set);
int count = 0;
for (final char c : str.toCharArray()) {
if (chars.contains(c)) {
count++;
}
}
return count;
}
/**
* Determines whether or not all the Strings in an array are
* empty or not.
*
* @param strings String[] whose elements are being checked for emptiness
* @return whether or not the String is empty
*/
private static boolean deepEmpty(final String[] strings) {
return Streams.of(strings).allMatch(StringUtils::isEmpty);
}
/**
* Takes an argument in set-syntax, see evaluateSet,
* and deletes any of characters present in the specified string.
*
* <pre>
* CharSetUtils.delete(null, *) = null
* CharSetUtils.delete("", *) = ""
* CharSetUtils.delete(*, null) = *
* CharSetUtils.delete(*, "") = *
* CharSetUtils.delete("hello", "hl") = "eo"
* CharSetUtils.delete("hello", "le") = "ho"
* </pre>
*
* @see CharSet#getInstance(String...) for set-syntax.
* @param str String to delete characters from, may be null
* @param set String[] set of characters to delete, may be null
* @return the modified String, {@code null} if null string input
*/
public static String delete(final String str, final String... set) {
if (StringUtils.isEmpty(str) || deepEmpty(set)) {
return str;
}
return modify(str, set, false);
}
/**
* Takes an argument in set-syntax, see evaluateSet,
* and keeps any of characters present in the specified string.
*
* <pre>
* CharSetUtils.keep(null, *) = null
* CharSetUtils.keep("", *) = ""
* CharSetUtils.keep(*, null) = ""
* CharSetUtils.keep(*, "") = ""
* CharSetUtils.keep("hello", "hl") = "hll"
* CharSetUtils.keep("hello", "le") = "ell"
* </pre>
*
* @see CharSet#getInstance(String...) for set-syntax.
* @param str String to keep characters from, may be null
* @param set String[] set of characters to keep, may be null
* @return the modified String, {@code null} if null string input
* @since 2.0
*/
public static String keep(final String str, final String... set) {
if (str == null) {
return null;
}
if (str.isEmpty() || deepEmpty(set)) {
return StringUtils.EMPTY;
}
return modify(str, set, true);
}
/**
* Implements delete and keep.
*
* @param str String to modify characters within
* @param set String[] set of characters to modify
* @param expect whether to evaluate on match, or non-match
* @return the modified String, not null
*/
private static String modify(final String str, final String[] set, final boolean expect) {
final CharSet chars = CharSet.getInstance(set);
final StringBuilder buffer = new StringBuilder(str.length());
final char[] chrs = str.toCharArray();
for (final char chr : chrs) {
if (chars.contains(chr) == expect) {
buffer.append(chr);
}
}
return buffer.toString();
}
/**
* Squeezes any repetitions of a character that is mentioned in the
* supplied set.
*
* <pre>
* CharSetUtils.squeeze(null, *) = null
* CharSetUtils.squeeze("", *) = ""
* CharSetUtils.squeeze(*, null) = *
* CharSetUtils.squeeze(*, "") = *
* CharSetUtils.squeeze("hello", "k-p") = "helo"
* CharSetUtils.squeeze("hello", "a-e") = "hello"
* </pre>
*
* @see CharSet#getInstance(String...) for set-syntax.
* @param str the string to squeeze, may be null
* @param set the character set to use for manipulation, may be null
* @return the modified String, {@code null} if null string input
*/
public static String squeeze(final String str, final String... set) {
if (StringUtils.isEmpty(str) || deepEmpty(set)) {
return str;
}
final CharSet chars = CharSet.getInstance(set);
final StringBuilder buffer = new StringBuilder(str.length());
final char[] chrs = str.toCharArray();
final int sz = chrs.length;
char lastChar = chrs[0];
char ch;
Character inChars = null;
Character notInChars = null;
buffer.append(lastChar);
for (int i = 1; i < sz; i++) {
ch = chrs[i];
if (ch == lastChar) {
if (inChars != null && ch == inChars) {
continue;
}
if (notInChars == null || ch != notInChars) {
if (chars.contains(ch)) {
inChars = ch;
continue;
}
notInChars = ch;
}
}
buffer.append(ch);
lastChar = ch;
}
return buffer.toString();
}
/**
* CharSetUtils instances should NOT be constructed in standard programming.
* Instead, the | CharSetUtils |
java | quarkusio__quarkus | extensions/amazon-lambda/common-runtime/src/main/java/io/quarkus/amazon/lambda/runtime/ClientContextImpl.java | {
"start": 343,
"end": 1394
} | class ____ implements ClientContext {
private ClientImpl impl;
private Map<String, String> cust;
private Map<String, String> env;
@JsonGetter("client")
public ClientImpl getImpl() {
return impl;
}
@JsonSetter("client")
public void setImpl(ClientImpl impl) {
this.impl = impl;
}
@JsonGetter("custom")
public Map<String, String> getCust() {
return cust;
}
@JsonSetter("custom")
public void setCust(Map<String, String> cust) {
this.cust = cust;
}
@JsonGetter("env")
public Map<String, String> getEnv() {
return env;
}
@JsonSetter("env")
public void setEnv(Map<String, String> env) {
this.env = env;
}
@Override
@JsonIgnore
public Client getClient() {
return impl;
}
@Override
@JsonIgnore
public Map<String, String> getCustom() {
return cust;
}
@Override
@JsonIgnore
public Map<String, String> getEnvironment() {
return env;
}
}
| ClientContextImpl |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java | {
"start": 3289,
"end": 16065
} | class ____ extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singleton(SearchTimeoutPlugin.class);
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).build();
}
@Override
protected void setupSuiteScopeCluster() throws Exception {
super.setupSuiteScopeCluster();
indexRandom(true, "test", randomIntBetween(20, 50));
}
/**
* Test the scenario where the query times out before starting to collect documents, verify that partial hits are not returned
*/
public void testTopHitsTimeoutBeforeCollecting() {
// setting the timeout is necessary only because we check that if a TimeExceededException is thrown, a timeout was set
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.SECONDS))
.setQuery(new BulkScorerTimeoutQuery(false));
ElasticsearchAssertions.assertResponse(searchRequestBuilder, searchResponse -> {
assertThat(searchResponse.isTimedOut(), equalTo(true));
assertEquals(0, searchResponse.getShardFailures().length);
assertEquals(0, searchResponse.getFailedShards());
assertThat(searchResponse.getSuccessfulShards(), greaterThan(0));
assertEquals(searchResponse.getSuccessfulShards(), searchResponse.getTotalShards());
// timeout happened before we could collect any doc, total hits is 0 and no hits are returned
assertEquals(0, searchResponse.getHits().getTotalHits().value());
assertEquals(0, searchResponse.getHits().getHits().length);
});
}
/**
* Test the scenario where the query times out while collecting documents, verify that partial hits results are returned
*/
public void testTopHitsTimeoutWhileCollecting() {
// setting the timeout is necessary only because we check that if a TimeExceededException is thrown, a timeout was set
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.SECONDS))
.setQuery(new BulkScorerTimeoutQuery(true));
ElasticsearchAssertions.assertResponse(searchRequestBuilder, searchResponse -> {
assertThat(searchResponse.isTimedOut(), equalTo(true));
assertEquals(0, searchResponse.getShardFailures().length);
assertEquals(0, searchResponse.getFailedShards());
assertThat(searchResponse.getSuccessfulShards(), greaterThan(0));
assertEquals(searchResponse.getSuccessfulShards(), searchResponse.getTotalShards());
assertThat(searchResponse.getHits().getTotalHits().value(), greaterThan(0L));
assertThat(searchResponse.getHits().getHits().length, greaterThan(0));
});
}
/**
* Test the scenario where the query times out before starting to collect documents, verify that partial aggs results are not returned
*/
public void testAggsTimeoutBeforeCollecting() {
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").setSize(0)
// setting the timeout is necessary only because we check that if a TimeExceededException is thrown, a timeout was set
.setTimeout(new TimeValue(10, TimeUnit.SECONDS))
.setQuery(new BulkScorerTimeoutQuery(false))
.addAggregation(new TermsAggregationBuilder("terms").field("field.keyword"));
ElasticsearchAssertions.assertResponse(searchRequestBuilder, searchResponse -> {
assertThat(searchResponse.isTimedOut(), equalTo(true));
assertEquals(0, searchResponse.getShardFailures().length);
assertEquals(0, searchResponse.getFailedShards());
assertThat(searchResponse.getSuccessfulShards(), greaterThan(0));
assertEquals(searchResponse.getSuccessfulShards(), searchResponse.getTotalShards());
assertEquals(0, searchResponse.getHits().getTotalHits().value());
assertEquals(0, searchResponse.getHits().getHits().length);
StringTerms terms = searchResponse.getAggregations().get("terms");
// timeout happened before we could collect any doc, total hits is 0 and no buckets are returned
assertEquals(0, terms.getBuckets().size());
});
}
/**
* Test the scenario where the query times out while collecting documents, verify that partial aggs results are returned
*/
public void testAggsTimeoutWhileCollecting() {
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").setSize(0)
// setting the timeout is necessary only because we check that if a TimeExceededException is thrown, a timeout was set
.setTimeout(new TimeValue(10, TimeUnit.SECONDS))
.setQuery(new BulkScorerTimeoutQuery(true))
.addAggregation(new TermsAggregationBuilder("terms").field("field.keyword"));
ElasticsearchAssertions.assertResponse(searchRequestBuilder, searchResponse -> {
assertThat(searchResponse.isTimedOut(), equalTo(true));
assertEquals(0, searchResponse.getShardFailures().length);
assertEquals(0, searchResponse.getFailedShards());
assertThat(searchResponse.getSuccessfulShards(), greaterThan(0));
assertEquals(searchResponse.getSuccessfulShards(), searchResponse.getTotalShards());
assertThat(searchResponse.getHits().getTotalHits().value(), greaterThan(0L));
assertEquals(0, searchResponse.getHits().getHits().length);
StringTerms terms = searchResponse.getAggregations().get("terms");
assertEquals(1, terms.getBuckets().size());
StringTerms.Bucket bucket = terms.getBuckets().get(0);
assertEquals("value", bucket.getKeyAsString());
assertThat(bucket.getDocCount(), greaterThan(0L));
});
}
/**
* Test the scenario where the suggest phase (part of the query phase) times out, yet there are results
* available coming from executing the query and aggs on each shard.
*/
public void testSuggestTimeoutWithPartialResults() {
SuggestBuilder suggestBuilder = new SuggestBuilder();
suggestBuilder.setGlobalText("text");
TimeoutSuggestionBuilder timeoutSuggestionBuilder = new TimeoutSuggestionBuilder();
suggestBuilder.addSuggestion("suggest", timeoutSuggestionBuilder);
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").suggest(suggestBuilder)
.addAggregation(new TermsAggregationBuilder("terms").field("field.keyword"));
ElasticsearchAssertions.assertResponse(searchRequestBuilder, searchResponse -> {
assertThat(searchResponse.isTimedOut(), equalTo(true));
assertEquals(0, searchResponse.getShardFailures().length);
assertEquals(0, searchResponse.getFailedShards());
assertThat(searchResponse.getSuccessfulShards(), greaterThan(0));
assertEquals(searchResponse.getSuccessfulShards(), searchResponse.getTotalShards());
assertThat(searchResponse.getHits().getTotalHits().value(), greaterThan(0L));
assertThat(searchResponse.getHits().getHits().length, greaterThan(0));
StringTerms terms = searchResponse.getAggregations().get("terms");
assertEquals(1, terms.getBuckets().size());
StringTerms.Bucket bucket = terms.getBuckets().get(0);
assertEquals("value", bucket.getKeyAsString());
assertThat(bucket.getDocCount(), greaterThan(0L));
});
}
/**
* Test the scenario where the rescore phase (part of the query phase) times out, yet there are results
* available coming from executing the query and aggs on each shard.
*/
public void testRescoreTimeoutWithPartialResults() {
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").setRescorer(new TimeoutRescorerBuilder())
.addAggregation(new TermsAggregationBuilder("terms").field("field.keyword"));
ElasticsearchAssertions.assertResponse(searchRequestBuilder, searchResponse -> {
assertThat(searchResponse.isTimedOut(), equalTo(true));
assertEquals(0, searchResponse.getShardFailures().length);
assertEquals(0, searchResponse.getFailedShards());
assertThat(searchResponse.getSuccessfulShards(), greaterThan(0));
assertEquals(searchResponse.getSuccessfulShards(), searchResponse.getTotalShards());
assertThat(searchResponse.getHits().getTotalHits().value(), greaterThan(0L));
assertThat(searchResponse.getHits().getHits().length, greaterThan(0));
StringTerms terms = searchResponse.getAggregations().get("terms");
assertEquals(1, terms.getBuckets().size());
StringTerms.Bucket bucket = terms.getBuckets().get(0);
assertEquals("value", bucket.getKeyAsString());
assertThat(bucket.getDocCount(), greaterThan(0L));
});
}
public void testPartialResultsIntolerantTimeoutBeforeCollecting() {
ElasticsearchException ex = expectThrows(
ElasticsearchException.class,
prepareSearch("test")
// setting the timeout is necessary only because we check that if a TimeExceededException is thrown, a timeout was set
.setTimeout(new TimeValue(10, TimeUnit.SECONDS))
.setQuery(new BulkScorerTimeoutQuery(false))
.setAllowPartialSearchResults(false) // this line causes timeouts to report failures
);
assertTrue(ex.toString().contains("Time exceeded"));
assertEquals(429, ex.status().getStatus());
}
public void testPartialResultsIntolerantTimeoutWhileCollecting() {
ElasticsearchException ex = expectThrows(
ElasticsearchException.class,
prepareSearch("test")
// setting the timeout is necessary only because we check that if a TimeExceededException is thrown, a timeout was set
.setTimeout(new TimeValue(10, TimeUnit.SECONDS))
.setQuery(new BulkScorerTimeoutQuery(true))
.setAllowPartialSearchResults(false) // this line causes timeouts to report failures
);
assertTrue(ex.toString().contains("Time exceeded"));
assertEquals(429, ex.status().getStatus());
}
public void testPartialResultsIntolerantTimeoutWhileSuggestingOnly() {
SuggestBuilder suggestBuilder = new SuggestBuilder();
suggestBuilder.setGlobalText("text");
TimeoutSuggestionBuilder timeoutSuggestionBuilder = new TimeoutSuggestionBuilder();
suggestBuilder.addSuggestion("suggest", timeoutSuggestionBuilder);
ElasticsearchException ex = expectThrows(
ElasticsearchException.class,
prepareSearch("test").suggest(suggestBuilder).setAllowPartialSearchResults(false) // this line causes timeouts to report
// failures
);
assertTrue(ex.toString().contains("Time exceeded"));
assertEquals(429, ex.status().getStatus());
}
public void testPartialResultsIntolerantTimeoutWhileSuggesting() {
SuggestBuilder suggestBuilder = new SuggestBuilder();
suggestBuilder.setGlobalText("text");
TimeoutSuggestionBuilder timeoutSuggestionBuilder = new TimeoutSuggestionBuilder();
suggestBuilder.addSuggestion("suggest", timeoutSuggestionBuilder);
ElasticsearchException ex = expectThrows(
ElasticsearchException.class,
prepareSearch("test").setQuery(new TermQueryBuilder("field", "value"))
.suggest(suggestBuilder)
.setAllowPartialSearchResults(false) // this line causes timeouts to report failures
);
assertTrue(ex.toString().contains("Time exceeded"));
assertEquals(429, ex.status().getStatus());
}
public void testPartialResultsIntolerantTimeoutWhileRescoring() {
ElasticsearchException ex = expectThrows(
ElasticsearchException.class,
prepareSearch("test").setQuery(new TermQueryBuilder("field", "value"))
.setRescorer(new TimeoutRescorerBuilder())
.setAllowPartialSearchResults(false) // this line causes timeouts to report failures
);
assertTrue(ex.toString().contains("Time exceeded"));
assertEquals(429, ex.status().getStatus());
}
public static final | SearchTimeoutIT |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/tenantlongid/TenantLongIdTest.java | {
"start": 1210,
"end": 4054
} | class ____ implements SessionFactoryProducer {
private static final Long mine = 1L;
private static final Long yours = 2L;
Long currentTenant;
@AfterEach
public void cleanup(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Override
public SessionFactoryImplementor produceSessionFactory(MetadataImplementor model) {
final SessionFactoryBuilder sessionFactoryBuilder = model.getSessionFactoryBuilder();
sessionFactoryBuilder.applyCurrentTenantIdentifierResolver( new CurrentTenantIdentifierResolver<Long>() {
@Override
public Long resolveCurrentTenantIdentifier() {
return currentTenant;
}
@Override
public boolean validateExistingCurrentSessions() {
return false;
}
} );
return (SessionFactoryImplementor) sessionFactoryBuilder.build();
}
@Test
public void test(SessionFactoryScope scope) {
currentTenant = mine;
Client client = new Client("Gavin");
Account acc = new Account(client);
scope.inTransaction( session -> {
session.persist(client);
session.persist(acc);
} );
scope.inTransaction( session -> {
assertNotNull( session.find(Account.class, acc.id) );
assertEquals( 1, session.createQuery("from Account").getResultList().size() );
} );
assertEquals(mine, acc.tenantId);
currentTenant = yours;
scope.inTransaction( session -> {
//HHH-16830 Sessions applies tenantId filter on find()
assertNull( session.find(Account.class, acc.id) );
assertEquals( 0, session.createQuery("from Account").getResultList().size() );
session.disableFilter(TenantIdBinder.FILTER_NAME);
assertNotNull( session.find(Account.class, acc.id) );
assertEquals( 1, session.createQuery("from Account").getResultList().size() );
} );
}
@Test
public void testErrorOnInsert(SessionFactoryScope scope) {
currentTenant = mine;
Client client = new Client("Gavin");
Account acc = new Account(client);
acc.tenantId = yours;
try {
scope.inTransaction( session -> {
session.persist(client);
session.persist(acc);
} );
fail("should have thrown");
}
catch (Throwable e) {
assertTrue( e instanceof PropertyValueException );
}
}
@Test
public void testErrorOnUpdate(SessionFactoryScope scope) {
currentTenant = mine;
Client client = new Client("Gavin");
Account acc = new Account(client);
scope.inTransaction( session -> {
session.persist(client);
session.persist(acc);
acc.tenantId = yours;
client.tenantId = yours;
client.name = "Steve";
} );
//TODO: it would be better if this were an error
scope.inTransaction( session -> {
Account account = session.find(Account.class, acc.id);
assertNotNull(account);
assertEquals( mine, acc.tenantId );
assertEquals( "Steve", acc.client.name );
assertEquals( mine, acc.client.tenantId );
} );
}
}
| TenantLongIdTest |
java | quarkusio__quarkus | extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/config/build/OTelBuildConfig.java | {
"start": 3325,
"end": 4569
} | enum ____ {
/**
* All the security events.
*/
ALL(SecurityEvent.class),
/**
* Authentication success event.
*/
AUTHENTICATION_SUCCESS(AuthenticationSuccessEvent.class),
/**
* Authentication failure event.
*/
AUTHENTICATION_FAILURE(AuthenticationFailureEvent.class),
/**
* Authorization success event.
*/
AUTHORIZATION_SUCCESS(AuthorizationSuccessEvent.class),
/**
* Authorization failure event.
*/
AUTHORIZATION_FAILURE(AuthorizationFailureEvent.class),
/**
* Any other security event. For example the OpenId Connect security event belongs here.
*/
OTHER(SecurityEvent.class);
private final Class<? extends SecurityEvent> observedType;
SecurityEventType(Class<? extends SecurityEvent> observedType) {
this.observedType = observedType;
}
public Class<? extends SecurityEvent> getObservedType() {
return observedType;
}
}
}
}
| SecurityEventType |
java | apache__logging-log4j2 | log4j-layout-template-json/src/main/java/org/apache/logging/log4j/layout/template/json/resolver/MarkerResolver.java | {
"start": 1443,
"end": 3828
} | class ____ implements EventResolver {
private static final TemplateResolver<LogEvent> NAME_RESOLVER =
(final LogEvent logEvent, final JsonWriter jsonWriter) -> {
final Marker marker = logEvent.getMarker();
if (marker == null) {
jsonWriter.writeNull();
} else {
jsonWriter.writeString(marker.getName());
}
};
private static final TemplateResolver<LogEvent> PARENTS_RESOLVER =
(final LogEvent logEvent, final JsonWriter jsonWriter) -> {
// Short-circuit if there are no parents
final Marker marker = logEvent.getMarker();
if (marker == null || !marker.hasParents()) {
jsonWriter.writeNull();
return;
}
// Write parents
final Marker[] parents = marker.getParents();
jsonWriter.writeArrayStart();
for (int parentIndex = 0; parentIndex < parents.length; parentIndex++) {
if (parentIndex > 0) {
jsonWriter.writeSeparator();
}
final Marker parentMarker = parents[parentIndex];
jsonWriter.writeString(parentMarker.getName());
}
jsonWriter.writeArrayEnd();
};
private final TemplateResolver<LogEvent> internalResolver;
MarkerResolver(final TemplateResolverConfig config) {
this.internalResolver = createInternalResolver(config);
}
private TemplateResolver<LogEvent> createInternalResolver(final TemplateResolverConfig config) {
final String fieldName = config.getString("field");
if ("name".equals(fieldName)) {
return NAME_RESOLVER;
}
if ("parents".equals(fieldName)) {
return PARENTS_RESOLVER;
}
throw new IllegalArgumentException("unknown field: " + config);
}
static String getName() {
return "marker";
}
@Override
public boolean isResolvable(final LogEvent logEvent) {
return logEvent.getMarker() != null;
}
@Override
public void resolve(final LogEvent logEvent, final JsonWriter jsonWriter) {
internalResolver.resolve(logEvent, jsonWriter);
}
}
| MarkerResolver |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldContainSubsequence.java | {
"start": 1266,
"end": 4874
} | class ____ extends BasicErrorMessageFactory {
public static ShouldContainSubsequence actualDoesNotHaveEnoughElementsToContainSubsequence(Object actual, Object subsequence) {
return new ShouldContainSubsequence(actual, subsequence);
}
private ShouldContainSubsequence(Object actual, Object subsequence) {
super("%nExpecting actual to contain the specified subsequence but actual does not have enough elements to contain it, actual size is %s when subsequence size is %s%nactual:%n %s%nsubsequence:%n %s",
sizeOfArrayOrIterable(actual), sizeOf(subsequence), actual, subsequence);
}
public static ShouldContainSubsequence actualDoesNotHaveEnoughElementsLeftToContainSubsequence(Object actual,
Object subsequence,
int actualIndex,
int subsequenceIndex) {
return new ShouldContainSubsequence(actual, subsequence, actualIndex, subsequenceIndex);
}
private ShouldContainSubsequence(Object actual, Object subsequence, int actualIndex, int subsequenceIndex) {
super("%nExpecting actual to contain the specified subsequence but actual does not have enough elements left to compare after reaching element %s out of %s with %s subsequence element(s) still to find."
+ "%nactual:%n %s%nsubsequence:%n %s",
actualIndex + 1, sizeOfArrayOrIterable(actual), sizeOf(subsequence) - subsequenceIndex, actual, subsequence);
}
private static Object sizeOfArrayOrIterable(Object actual) {
return isArray(actual) ? Arrays.sizeOf(actual) : IterableUtil.sizeOf((Iterable<?>) actual);
}
/**
* Creates a new <code>{@link ShouldContainSubsequence}</code>.
*
* @param actual the actual value in the failed assertion.
* @param subsequence the subsequence of values expected to be in {@code actual}.
* @param subsequenceIndex the index of the first token in {@code subsequence} that was not found in {@code actual}.
* @param comparisonStrategy the {@link ComparisonStrategy} used to evaluate assertion.
* @return the created {@code ErrorMessageFactory}.
*/
public static ShouldContainSubsequence shouldContainSubsequence(Object actual, Object subsequence, int subsequenceIndex,
ComparisonStrategy comparisonStrategy) {
return new ShouldContainSubsequence(actual, subsequence, subsequenceIndex, comparisonStrategy);
}
private ShouldContainSubsequence(Object actual, Object subsequence, int subsequenceIndex,
ComparisonStrategy comparisonStrategy) {
// Failed to find token at subsequence index %s in actual:%n %s
super("%nExpecting actual to contain the specified subsequence but failed to find the element at subsequence index %s in actual"
+ describeComparisonStrategy(comparisonStrategy) + ":%n"
+ "subsequence element not found in actual:%n"
+ " %s%n"
+ "actual:%n"
+ " %s%n"
+ "subsequence:%n %s",
subsequenceIndex, Array.get(subsequence, subsequenceIndex), actual, subsequence);
}
private static String describeComparisonStrategy(ComparisonStrategy comparisonStrategy) {
return comparisonStrategy == StandardComparisonStrategy.instance() ? ""
: " when comparing elements using " + comparisonStrategy;
}
}
| ShouldContainSubsequence |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ReservationUpdateResponse.java | {
"start": 1530,
"end": 1775
} | class ____ {
@Private
@Unstable
public static ReservationUpdateResponse newInstance() {
ReservationUpdateResponse response =
Records.newRecord(ReservationUpdateResponse.class);
return response;
}
}
| ReservationUpdateResponse |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexTilerTests.java | {
"start": 1319,
"end": 16281
} | class ____ extends GeoGridTilerTestCase<GeoHexGridTiler> {
@Override
protected GeoHexGridTiler getGridTiler(GeoBoundingBox bbox, int precision) {
return GeoHexGridTiler.makeGridTiler(precision, bbox);
}
@Override
protected int maxPrecision() {
return H3.MAX_H3_RES;
}
@Override
protected Rectangle getCell(double lon, double lat, int precision) {
return H3CartesianUtil.toBoundingBox(H3.geoToH3(lat, lon, precision));
}
/** The H3 tilers does not produce rectangular tiles, and some tests assume this */
@Override
protected boolean isRectangularTiler() {
return false;
}
@Override
protected long getCellsForDiffPrecision(int precisionDiff) {
return GeoHexGridTiler.calcMaxAddresses(precisionDiff);
}
public void testLargeShape() throws Exception {
// We have a shape and a tile both covering all mercator space, so we expect all level0 H3 cells to match
Rectangle shapeRectangle = new Rectangle(-180, 180, 90, -90);
GeoShapeValues.GeoShapeValue value = geoShapeValue(shapeRectangle);
GeoBoundingBox boundingBox = new GeoBoundingBox(
new GeoPoint(shapeRectangle.getMaxLat(), shapeRectangle.getMinLon()),
new GeoPoint(shapeRectangle.getMinLat(), shapeRectangle.getMaxLon())
);
for (int precision = 0; precision < 4; precision++) {
GeoShapeCellValues values = new GeoShapeCellValues(
makeGeoShapeValues(value),
getGridTiler(boundingBox, precision),
NOOP_BREAKER
);
assertTrue(values.advanceExact(0));
int numTiles = values.docValueCount();
int expectedTiles = expectedBuckets(value, precision, boundingBox);
assertThat(expectedTiles, equalTo(numTiles));
}
}
public void testLargeShapeWithBounds() throws Exception {
// We have a shape covering all space
Rectangle shapeRectangle = new Rectangle(-180, 180, 90, -90);
GeoShapeValues.GeoShapeValue value = geoShapeValue(shapeRectangle);
Point point = GeometryTestUtils.randomPoint();
int res = randomIntBetween(0, H3.MAX_H3_RES - 4);
long h3 = H3.geoToH3(point.getLat(), point.getLon(), res);
Rectangle tile = H3CartesianUtil.toBoundingBox(h3);
GeoBoundingBox boundingBox = new GeoBoundingBox(
new GeoPoint(
GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(tile.getMaxLat())),
GeoEncodingUtils.decodeLongitude(GeoEncodingUtils.encodeLongitude(tile.getMinLon()))
),
new GeoPoint(
GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(tile.getMinLat())),
GeoEncodingUtils.decodeLongitude(GeoEncodingUtils.encodeLongitude(tile.getMaxLon()))
)
);
for (int precision = res; precision < res + 4; precision++) {
String msg = "Failed " + WellKnownText.toWKT(point) + " at resolution " + res + " with precision " + precision;
GeoShapeCellValues values = new GeoShapeCellValues(
makeGeoShapeValues(value),
getGridTiler(boundingBox, precision),
NOOP_BREAKER
);
assertTrue(values.advanceExact(0));
long[] h3bins = ArrayUtil.copyOfSubArray(values.getValues(), 0, values.docValueCount());
assertCorner(h3bins, new Point(tile.getMinLon(), tile.getMinLat()), precision, msg);
assertCorner(h3bins, new Point(tile.getMaxLon(), tile.getMinLat()), precision, msg);
assertCorner(h3bins, new Point(tile.getMinLon(), tile.getMaxLat()), precision, msg);
assertCorner(h3bins, new Point(tile.getMaxLon(), tile.getMaxLat()), precision, msg);
}
}
// Polygons with bounds inside the South Pole cell break a tiler optimization
public void testTroublesomeShapeAlmostWithinSouthPole_BoundedGeoShapeCellValues() throws Exception {
int precision = 1;
String polygon = """
POLYGON((180.0 -90.0, 180.0 -73.80002960532788, 1.401298464324817E-45 -73.80002960532788,
1.401298464324817E-45 -90.0, 180.0 -90.0))""";
GeoBoundingBox geoBoundingBox = new GeoBoundingBox(
new GeoPoint(19.585157879020088, 0.9999999403953552),
new GeoPoint(-90.0, -26.405694642531472)
);
Geometry geometry = WellKnownText.fromWKT(StandardValidator.instance(true), true, polygon);
GeoShapeValues.GeoShapeValue value = geoShapeValue(geometry);
GeoShapeCellValues cellValues = new GeoShapeCellValues(
makeGeoShapeValues(value),
getGridTiler(geoBoundingBox, precision),
NOOP_BREAKER
);
assertTrue(cellValues.advanceExact(0));
int numBuckets = cellValues.docValueCount();
int expected = expectedBuckets(value, precision, geoBoundingBox);
assertThat("[" + precision + "] bucket count", numBuckets, equalTo(expected));
}
// Polygons with bounds inside the South Pole cell break a tiler optimization
public void testTroublesomeShapeAlmostWithinSouthPoleCell_UnboundedGeoShapeCellValues() throws Exception {
int precision = 0;
String polygon = """
POLYGON((1.7481549674935762E-110 -90.0, 180.0 -90.0, 180.0 -75.113250736563,
1.7481549674935762E-110 -75.113250736563, 1.7481549674935762E-110 -90.0))""";
Geometry geometry = WellKnownText.fromWKT(StandardValidator.instance(true), true, polygon);
GeoShapeValues.GeoShapeValue value = geoShapeValue(geometry);
GeoShapeCellValues unboundedCellValues = new GeoShapeCellValues(makeGeoShapeValues(value), getGridTiler(precision), NOOP_BREAKER);
assertTrue(unboundedCellValues.advanceExact(0));
int numBuckets = unboundedCellValues.docValueCount();
int expected = expectedBuckets(value, precision, null);
assertThat("[" + precision + "] bucket count", numBuckets, equalTo(expected));
}
// Polygons with bounds inside the North Pole cell break a tiler optimization
public void testTroublesomeShapeAlmostWithinNorthPoleCell_UnboundedGeoShapeCellValues() throws Exception {
int precision = 1;
String polygon = """
POLYGON((36.98661841690625 69.44049730644747, 180.0 69.44049730644747,
180.0 90.0, 36.98661841690625 90.0, 36.98661841690625 69.44049730644747))""";
Geometry geometry = WellKnownText.fromWKT(StandardValidator.instance(true), true, polygon);
GeoShapeValues.GeoShapeValue value = geoShapeValue(geometry);
GeoShapeCellValues unboundedCellValues = new GeoShapeCellValues(makeGeoShapeValues(value), getGridTiler(precision), NOOP_BREAKER);
assertTrue(unboundedCellValues.advanceExact(0));
int numBuckets = unboundedCellValues.docValueCount();
int expected = expectedBuckets(value, precision, null);
assertThat("[" + precision + "] bucket count", numBuckets, equalTo(expected));
}
public void testTroublesomePolarCellLevel1_UnboundedGeoShapeCellValues() throws Exception {
int precision = 1;
String polygon = "BBOX (-84.24596376729815, 43.36113427778119, 90.0, 83.51476833522361)";
Geometry geometry = WellKnownText.fromWKT(StandardValidator.instance(true), true, polygon);
GeoShapeValues.GeoShapeValue value = geoShapeValue(geometry);
GeoShapeCellValues unboundedCellValues = new GeoShapeCellValues(makeGeoShapeValues(value), getGridTiler(precision), NOOP_BREAKER);
assertTrue(unboundedCellValues.advanceExact(0));
int numBuckets = unboundedCellValues.docValueCount();
int expected = expectedBuckets(value, precision, null);
assertThat("[" + precision + "] bucket count", numBuckets, equalTo(expected));
}
public void testTroublesomeCellLevel2_BoundedGeoShapeCellValues() throws Exception {
int precision = 2;
String wkt = """
GEOMETRYCOLLECTION (
GEOMETRYCOLLECTION (
POINT(-170 0),
POINT (-178.5 0)
)
)
""";
GeoBoundingBox boundingBox = new GeoBoundingBox(new GeoPoint(4E-4, 179.999), new GeoPoint(-4E-4, -179.999));
Geometry geometry = WellKnownText.fromWKT(StandardValidator.instance(true), true, wkt);
GeoShapeValues.GeoShapeValue value = geoShapeValue(geometry);
GeoShapeCellValues cellValues = new GeoShapeCellValues(
makeGeoShapeValues(value),
getGridTiler(boundingBox, precision),
NOOP_BREAKER
);
assertTrue(cellValues.advanceExact(0));
int numBuckets = cellValues.docValueCount();
int expected = expectedBuckets(value, precision, boundingBox);
assertThat("[" + precision + "] bucket count", numBuckets, equalTo(expected));
}
public void testTroublesomeCellLevel4_BoundedGeoShapeCellValues() throws Exception {
int precision = 4;
String polygon = "POLYGON ((150.0 70.0, 150.0 85.91811374669217, 168.77544806565834 85.91811374669217, 150.0 70.0))";
Geometry geometry = WellKnownText.fromWKT(StandardValidator.instance(true), true, polygon);
GeoBoundingBox boundingBox = new GeoBoundingBox(
new GeoPoint(86.17678739494652, 172.21916569181505),
new GeoPoint(83.01600086049713, 179)
);
GeoShapeValues.GeoShapeValue value = geoShapeValue(geometry);
GeoShapeCellValues cellValues = new GeoShapeCellValues(
makeGeoShapeValues(value),
getGridTiler(boundingBox, precision),
NOOP_BREAKER
);
assertTrue(cellValues.advanceExact(0));
int numBuckets = cellValues.docValueCount();
int expected = expectedBuckets(value, precision, boundingBox);
assertThat("[" + precision + "] bucket count", numBuckets, equalTo(expected));
}
public void testIssue96057() throws Exception {
int precision = 3;
Geometry geometry = new Polygon(
new LinearRing(
new double[] { 47.0, 47.0, -98.41711495022405, -98.41711495022405, 47.0 },
new double[] { -43.27504297314639, 23.280704041384652, 23.280704041384652, -43.27504297314639, -43.27504297314639 }
)
);
GeoBoundingBox geoBoundingBox = new GeoBoundingBox(
new GeoPoint(-44.363846082646845, 55.61563600452277),
new GeoPoint(-75.8747796394427, 42.12290817616412)
);
GeoShapeValues.GeoShapeValue value = geoShapeValue(geometry);
GeoShapeCellValues cellValues = new GeoShapeCellValues(
makeGeoShapeValues(value),
getGridTiler(geoBoundingBox, precision),
NOOP_BREAKER
);
assertTrue(cellValues.advanceExact(0));
int numBuckets = cellValues.docValueCount();
int expected = expectedBuckets(value, precision, geoBoundingBox);
assertThat(numBuckets, equalTo(expected));
}
private void assertCorner(long[] h3bins, Point point, int precision, String msg) throws IOException {
GeoShapeValues.GeoShapeValue cornerValue = geoShapeValue(point);
GeoShapeCellValues cornerValues = new GeoShapeCellValues(makeGeoShapeValues(cornerValue), getGridTiler(precision), NOOP_BREAKER);
assertTrue(cornerValues.advanceExact(0));
long[] h3binsCorner = ArrayUtil.copyOfSubArray(cornerValues.getValues(), 0, cornerValues.docValueCount());
for (long corner : h3binsCorner) {
assertTrue(msg, Arrays.binarySearch(h3bins, corner) != -1);
}
}
@Override
protected void assertSetValuesBruteAndRecursive(Geometry geometry) throws Exception {
int precision = randomIntBetween(1, 4);
GeoHexGridTiler tiler = getGridTiler(precision);
GeoShapeValues.GeoShapeValue value = geoShapeValue(geometry);
GeoShapeCellValues recursiveValues = new GeoShapeCellValues(null, tiler, NOOP_BREAKER);
int recursiveCount = tiler.setValuesByRecursion(recursiveValues, value, value.boundingBox());
GeoShapeCellValues bruteForceValues = new GeoShapeCellValues(null, tiler, NOOP_BREAKER);
int bruteForceCount = 0;
for (long h3 : H3.getLongRes0Cells()) {
bruteForceCount = addBruteForce(tiler, bruteForceValues, value, h3, precision, bruteForceCount);
}
long[] recursive = Arrays.copyOf(recursiveValues.getValues(), recursiveCount);
long[] bruteForce = Arrays.copyOf(bruteForceValues.getValues(), bruteForceCount);
Arrays.sort(recursive);
Arrays.sort(bruteForce);
assertArrayEquals(geometry.toString(), recursive, bruteForce);
}
private int addBruteForce(
GeoHexGridTiler tiler,
GeoShapeCellValues values,
GeoShapeValues.GeoShapeValue geoValue,
long h3,
int precision,
int valueIndex
) throws IOException {
if (H3.getResolution(h3) == precision) {
if (tiler.relateTile(geoValue, h3) != GeoRelation.QUERY_DISJOINT) {
values.resizeCell(valueIndex + 1);
values.add(valueIndex++, h3);
}
} else {
for (long child : H3.h3ToChildren(h3)) {
valueIndex = addBruteForce(tiler, values, geoValue, child, precision, valueIndex);
}
}
return valueIndex;
}
@Override
protected int expectedBuckets(GeoShapeValues.GeoShapeValue geoValue, int precision, GeoBoundingBox bbox) throws Exception {
GeoHexGridTiler bounded = bbox == null ? null : getGridTiler(bbox, precision);
GeoHexGridTiler predicate = getGridTiler(precision);
return computeBuckets(H3.getLongRes0Cells(), bounded, predicate, geoValue, precision);
}
private int computeBuckets(
long[] children,
GeoHexGridTiler bounded,
GeoHexGridTiler predicate,
GeoShapeValues.GeoShapeValue geoValue,
int finalPrecision
) throws IOException {
int count = 0;
for (long child : children) {
if (H3.getResolution(child) == finalPrecision) {
if (intersects(child, geoValue, bounded, predicate)) {
count++;
}
} else {
count += computeBuckets(H3.h3ToChildren(child), bounded, predicate, geoValue, finalPrecision);
}
}
return count;
}
private boolean intersects(long h3, GeoShapeValues.GeoShapeValue geoValue, GeoHexGridTiler bounded, GeoHexGridTiler predicate)
throws IOException {
if (bounded != null && bounded.h3IntersectsBounds(h3) == false) {
return false;
}
return predicate.relateTile(geoValue, h3) != GeoRelation.QUERY_DISJOINT;
}
}
| GeoHexTilerTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/EventHubsEndpointBuilderFactory.java | {
"start": 47108,
"end": 47420
} | interface ____
extends
AdvancedEventHubsEndpointConsumerBuilder,
AdvancedEventHubsEndpointProducerBuilder {
default EventHubsEndpointBuilder basic() {
return (EventHubsEndpointBuilder) this;
}
}
public | AdvancedEventHubsEndpointBuilder |
java | apache__camel | components/camel-spring-parent/camel-spring-ai/camel-spring-ai-chat/src/main/java/org/apache/camel/component/springai/chat/SpringAiChatConfiguration.java | {
"start": 9071,
"end": 11261
} | class ____ use for BEAN output format conversion. Required when outputFormat is BEAN.
*/
public void setOutputClass(Class<?> outputClass) {
this.outputClass = outputClass;
}
public String getTags() {
return tags;
}
/**
* Tags for discovering and calling Camel route tools. When provided, the chat component will automatically register
* tools from camel-spring-ai-tools routes matching these tags.
*/
public void setTags(String tags) {
this.tags = tags;
}
public String getSafeguardSensitiveWords() {
return safeguardSensitiveWords;
}
/**
* Comma-separated list of sensitive words for SafeGuard advisor. When provided, the SafeGuard advisor will be
* enabled to prevent generation of content containing these words.
*/
public void setSafeguardSensitiveWords(String safeguardSensitiveWords) {
this.safeguardSensitiveWords = safeguardSensitiveWords;
}
public String getSafeguardFailureResponse() {
return safeguardFailureResponse;
}
/**
* Failure response message for SafeGuard advisor when sensitive content is detected. If not specified, a default
* message will be used.
*/
public void setSafeguardFailureResponse(String safeguardFailureResponse) {
this.safeguardFailureResponse = safeguardFailureResponse;
}
public Integer getSafeguardOrder() {
return safeguardOrder;
}
/**
* Order of execution for SafeGuard advisor. Lower numbers execute first. Default is 0.
*/
public void setSafeguardOrder(Integer safeguardOrder) {
this.safeguardOrder = safeguardOrder;
}
public List<Advisor> getAdvisors() {
return advisors;
}
/**
* List of custom advisors to add to the ChatClient. These advisors will be added after the built-in advisors
* (SimpleLogger, SafeGuard, ChatMemory, QuestionAnswer) in the order they are provided in the list.
*/
public void setAdvisors(List<Advisor> advisors) {
this.advisors = advisors;
}
public Class<?> getEntityClass() {
return entityClass;
}
/**
* The Java | to |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/test/proxy/ProxyBase.java | {
"start": 695,
"end": 2700
} | class ____<P extends ProxyBase<P>> {
private Supplier<String> username;
protected int port;
protected String lastUri;
protected String forceUri;
protected List<String> localAddresses = Collections.synchronizedList(new ArrayList<>());
protected long successDelayMillis = 0;
public ProxyBase() {
port = defaultPort();
}
public P username(String username) {
this.username = () -> username;
return (P) this;
}
public P username(Supplier<String> username) {
this.username = username;
return (P) this;
}
public P username(Collection<String> username) {
Iterator<String> it = username.iterator();
this.username = () -> it.hasNext() ? it.next() : null;
return (P) this;
}
public String nextUserName() {
return username != null ? username.get() : null;
}
public P port(int port) {
this.port = port;
return (P)this;
}
public int port() {
return port;
}
public abstract int defaultPort();
public String lastLocalAddress() {
int idx = localAddresses.size();
return idx == 0 ? null : localAddresses.get(idx - 1);
}
public List<String> localAddresses() {
return localAddresses;
}
/**
* check the last accessed host:ip
*
* @return the lastUri
*/
public String getLastUri() {
return lastUri;
}
/**
* check the last HTTP method
*
* @return the last method
*/
public HttpMethod getLastMethod() {
throw new UnsupportedOperationException();
}
/**
* force uri to connect to a given string (e.g. "localhost:4443") this is used to simulate a host that only resolves
* on the proxy
*/
public void setForceUri(String uri) {
forceUri = uri;
}
public MultiMap getLastRequestHeaders() {
throw new UnsupportedOperationException();
}
public abstract ProxyBase start(Vertx vertx) throws Exception;
public abstract void stop();
public void successDelayMillis(long delayMillis) {
this.successDelayMillis = delayMillis;
}
}
| ProxyBase |
java | apache__logging-log4j2 | log4j-iostreams/src/main/java/org/apache/logging/log4j/io/LoggerPrintWriter.java | {
"start": 1849,
"end": 6390
} | class ____ extends PrintWriter {
private static final String FQCN = LoggerPrintWriter.class.getName();
private final InternalPrintWriter writer;
protected LoggerPrintWriter(
final ExtendedLogger logger,
final boolean autoFlush,
final String fqcn,
final Level level,
final Marker marker) {
super(new StringWriter());
writer = new InternalPrintWriter(logger, autoFlush, fqcn == null ? FQCN : fqcn, level, marker);
}
protected LoggerPrintWriter(
final Writer writer,
final boolean autoFlush,
final ExtendedLogger logger,
final String fqcn,
final Level level,
final Marker marker) {
super(writer);
this.writer = new InternalPrintWriter(writer, autoFlush, logger, fqcn == null ? FQCN : fqcn, level, marker);
}
@Override
public LoggerPrintWriter append(final char c) {
writer.append(c);
return this;
}
@Override
public LoggerPrintWriter append(final CharSequence csq) {
writer.append(csq);
return this;
}
@Override
public LoggerPrintWriter append(final CharSequence csq, final int start, final int end) {
writer.append(csq, start, end);
return this;
}
@Override
public boolean checkError() {
return writer.checkError();
}
@Override
public void close() {
writer.close();
}
@Override
public void flush() {
writer.flush();
}
@Override
public LoggerPrintWriter format(final Locale l, final String format, final Object... args) {
writer.format(l, format, args);
return this;
}
@Override
public LoggerPrintWriter format(final String format, final Object... args) {
writer.format(format, args);
return this;
}
@Override
public void print(final boolean b) {
writer.print(b);
}
@Override
public void print(final char c) {
writer.print(c);
}
@Override
public void print(final char[] s) {
writer.print(s);
}
@Override
public void print(final double d) {
writer.print(d);
}
@Override
public void print(final float f) {
writer.print(f);
}
@Override
public void print(final int i) {
writer.print(i);
}
@Override
public void print(final long l) {
writer.print(l);
}
@Override
public void print(final Object obj) {
writer.print(obj);
}
@Override
public void print(final String s) {
writer.print(s);
}
@Override
public LoggerPrintWriter printf(final Locale l, final String format, final Object... args) {
writer.printf(l, format, args);
return this;
}
@Override
public LoggerPrintWriter printf(final String format, final Object... args) {
writer.printf(format, args);
return this;
}
@Override
public void println() {
writer.println();
}
@Override
public void println(final boolean x) {
writer.println(x);
}
@Override
public void println(final char x) {
writer.println(x);
}
@Override
public void println(final char[] x) {
writer.println(x);
}
@Override
public void println(final double x) {
writer.println(x);
}
@Override
public void println(final float x) {
writer.println(x);
}
@Override
public void println(final int x) {
writer.println(x);
}
@Override
public void println(final long x) {
writer.println(x);
}
@Override
public void println(final Object x) {
writer.println(x);
}
@Override
public void println(final String x) {
writer.println(x);
}
@Override
public String toString() {
return LoggerPrintWriter.class.getSimpleName() + writer.toString();
}
@Override
public void write(final char[] buf) {
writer.write(buf);
}
@Override
public void write(final char[] buf, final int off, final int len) {
writer.write(buf, off, len);
}
@Override
public void write(final int c) {
writer.write(c);
}
@Override
public void write(final String s) {
writer.write(s);
}
@Override
public void write(final String s, final int off, final int len) {
writer.write(s, off, len);
}
}
| LoggerPrintWriter |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java | {
"start": 1224,
"end": 5290
} | class ____ {
@Test
public void testToList() {
assertEquals(List.of(1, 2, 3, 4), Replicas.toList(new int[] {1, 2, 3, 4}));
assertEquals(List.of(), Replicas.toList(Replicas.NONE));
assertEquals(List.of(2), Replicas.toList(new int[] {2}));
}
@Test
public void testToArray() {
assertArrayEquals(new int[] {3, 2, 1}, Replicas.toArray(List.of(3, 2, 1)));
assertArrayEquals(new int[] {}, Replicas.toArray(List.of()));
assertArrayEquals(new int[] {2}, Replicas.toArray(List.of(2)));
}
@Test
public void testClone() {
assertArrayEquals(new int[]{3, 2, 1}, Replicas.clone(new int[]{3, 2, 1}));
assertArrayEquals(new int[]{}, Replicas.clone(new int[]{}));
assertArrayEquals(new int[]{2}, Replicas.clone(new int[]{2}));
}
@Test
public void testValidate() {
assertTrue(Replicas.validate(new int[] {}));
assertTrue(Replicas.validate(new int[] {3}));
assertTrue(Replicas.validate(new int[] {3, 1, 2, 6}));
assertFalse(Replicas.validate(new int[] {3, 3}));
assertFalse(Replicas.validate(new int[] {4, -1, 3}));
assertFalse(Replicas.validate(new int[] {-1}));
assertFalse(Replicas.validate(new int[] {3, 1, 2, 6, 1}));
assertTrue(Replicas.validate(new int[] {1, 100}));
}
@Test
public void testValidateIsr() {
assertTrue(Replicas.validateIsr(new int[] {}, new int[] {}));
assertTrue(Replicas.validateIsr(new int[] {1, 2, 3}, new int[] {}));
assertTrue(Replicas.validateIsr(new int[] {1, 2, 3}, new int[] {1, 2, 3}));
assertTrue(Replicas.validateIsr(new int[] {3, 1, 2}, new int[] {2, 1}));
assertFalse(Replicas.validateIsr(new int[] {3, 1, 2}, new int[] {4, 1}));
assertFalse(Replicas.validateIsr(new int[] {1, 2, 4}, new int[] {4, 4}));
}
@Test
public void testContains() {
assertTrue(Replicas.contains(new int[] {3, 0, 1}, 0));
assertFalse(Replicas.contains(new int[] {}, 0));
assertTrue(Replicas.contains(new int[] {1}, 1));
}
@Test
public void testCopyWithout() {
assertArrayEquals(new int[] {}, Replicas.copyWithout(new int[] {}, 0));
assertArrayEquals(new int[] {}, Replicas.copyWithout(new int[] {1}, 1));
assertArrayEquals(new int[] {1, 3}, Replicas.copyWithout(new int[] {1, 2, 3}, 2));
assertArrayEquals(new int[] {4, 1}, Replicas.copyWithout(new int[] {4, 2, 2, 1}, 2));
}
@Test
public void testCopyWithout2() {
assertArrayEquals(new int[] {}, Replicas.copyWithout(new int[] {}, new int[] {}));
assertArrayEquals(new int[] {}, Replicas.copyWithout(new int[] {1}, new int[] {1}));
assertArrayEquals(new int[] {1, 3},
Replicas.copyWithout(new int[] {1, 2, 3}, new int[]{2, 4}));
assertArrayEquals(new int[] {4},
Replicas.copyWithout(new int[] {4, 2, 2, 1}, new int[]{2, 1}));
}
@Test
public void testCopyWith() {
assertArrayEquals(new int[] {-1}, Replicas.copyWith(new int[] {}, -1));
assertArrayEquals(new int[] {1, 2, 3, 4}, Replicas.copyWith(new int[] {1, 2, 3}, 4));
}
@Test
public void testToSet() {
assertEquals(Set.of(), Replicas.toSet(new int[] {}));
assertEquals(Set.of(3, 1, 5),
Replicas.toSet(new int[] {1, 3, 5}));
assertEquals(Set.of(1, 2, 10),
Replicas.toSet(new int[] {1, 1, 2, 10, 10}));
}
@Test
public void testContains2() {
assertTrue(Replicas.contains(List.of(), Replicas.NONE));
assertFalse(Replicas.contains(List.of(), new int[] {1}));
assertTrue(Replicas.contains(List.of(1, 2, 3), new int[] {3, 2, 1}));
assertTrue(Replicas.contains(List.of(1, 2, 3, 4), new int[] {3}));
assertTrue(Replicas.contains(List.of(1, 2, 3, 4), new int[] {3, 1}));
assertFalse(Replicas.contains(List.of(1, 2, 3, 4), new int[] {3, 1, 7}));
assertTrue(Replicas.contains(List.of(1, 2, 3, 4), new int[] {}));
}
}
| ReplicasTest |
java | micronaut-projects__micronaut-core | management/src/main/java/io/micronaut/management/endpoint/env/EnvironmentFilterSpecification.java | {
"start": 5178,
"end": 5248
} | enum ____ {
HIDE,
MASK,
PLAIN
}
}
| FilterResult |
java | netty__netty | transport/src/test/java/io/netty/channel/local/LocalChannelTest.java | {
"start": 37568,
"end": 48277
} | class ____ extends ChannelInboundHandlerAdapter {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
logger.info(String.format("Received message: %s", msg));
ReferenceCountUtil.safeRelease(msg);
}
}
@Test
public void testNotLeakBuffersWhenCloseByRemotePeer() throws Exception {
Bootstrap cb = new Bootstrap();
ServerBootstrap sb = new ServerBootstrap();
cb.group(sharedGroup)
.channel(LocalChannel.class)
.handler(new SimpleChannelInboundHandler<ByteBuf>() {
@Override
public void channelActive(final ChannelHandlerContext ctx) throws Exception {
ctx.writeAndFlush(ctx.alloc().buffer().writeZero(100));
}
@Override
public void channelRead0(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception {
// Just drop the buffer
}
});
sb.group(sharedGroup)
.channel(LocalServerChannel.class)
.childHandler(new ChannelInitializer<LocalChannel>() {
@Override
public void initChannel(LocalChannel ch) throws Exception {
ch.pipeline().addLast(new SimpleChannelInboundHandler<ByteBuf>() {
@Override
public void channelRead0(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception {
while (buffer.isReadable()) {
// Fill the ChannelOutboundBuffer with multiple buffers
ctx.write(buffer.readRetainedSlice(1));
}
// Flush and so transfer the written buffers to the inboundBuffer of the remote peer.
// After this point the remote peer is responsible to release all the buffers.
ctx.flush();
// This close call will trigger the remote peer close as well.
ctx.close();
}
});
}
});
Channel sc = null;
LocalChannel cc = null;
try {
// Start server
sc = sb.bind(TEST_ADDRESS).sync().channel();
// Connect to the server
cc = (LocalChannel) cb.connect(sc.localAddress()).sync().channel();
// Close the channel
closeChannel(cc);
assertTrue(cc.inboundBuffer.isEmpty());
closeChannel(sc);
} finally {
closeChannel(cc);
closeChannel(sc);
}
}
private static void writeAndFlushReadOnSuccess(final ChannelHandlerContext ctx, Object msg) {
ctx.writeAndFlush(msg).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) {
if (future.isSuccess()) {
ctx.read();
}
}
});
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testAutoReadDisabledSharedGroup() throws Exception {
testAutoReadDisabled(sharedGroup, sharedGroup);
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testAutoReadDisabledDifferentGroup() throws Exception {
testAutoReadDisabled(group1, group2);
}
private static void testAutoReadDisabled(EventLoopGroup serverGroup, EventLoopGroup clientGroup) throws Exception {
final CountDownLatch latch = new CountDownLatch(100);
Bootstrap cb = new Bootstrap();
ServerBootstrap sb = new ServerBootstrap();
cb.group(serverGroup)
.channel(LocalChannel.class)
.option(ChannelOption.AUTO_READ, false)
.handler(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(final ChannelHandlerContext ctx) throws Exception {
writeAndFlushReadOnSuccess(ctx, "test");
}
@Override
public void channelRead(final ChannelHandlerContext ctx, Object msg) throws Exception {
writeAndFlushReadOnSuccess(ctx, msg);
}
});
sb.group(clientGroup)
.channel(LocalServerChannel.class)
.childOption(ChannelOption.AUTO_READ, false)
.childHandler(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(final ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
@Override
public void channelRead(final ChannelHandlerContext ctx, Object msg) throws Exception {
latch.countDown();
if (latch.getCount() > 0) {
writeAndFlushReadOnSuccess(ctx, msg);
}
}
});
Channel sc = null;
Channel cc = null;
try {
// Start server
sc = sb.bind(TEST_ADDRESS).sync().channel();
cc = cb.connect(TEST_ADDRESS).sync().channel();
latch.await();
} finally {
closeChannel(cc);
closeChannel(sc);
}
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testMaxMessagesPerReadRespectedWithAutoReadSharedGroup() throws Exception {
testMaxMessagesPerReadRespected(sharedGroup, sharedGroup, true);
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testMaxMessagesPerReadRespectedWithoutAutoReadSharedGroup() throws Exception {
testMaxMessagesPerReadRespected(sharedGroup, sharedGroup, false);
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testMaxMessagesPerReadRespectedWithAutoReadDifferentGroup() throws Exception {
testMaxMessagesPerReadRespected(group1, group2, true);
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testMaxMessagesPerReadRespectedWithoutAutoReadDifferentGroup() throws Exception {
testMaxMessagesPerReadRespected(group1, group2, false);
}
private static void testMaxMessagesPerReadRespected(
EventLoopGroup serverGroup, EventLoopGroup clientGroup, final boolean autoRead) throws Exception {
final CountDownLatch countDownLatch = new CountDownLatch(5);
Bootstrap cb = new Bootstrap();
ServerBootstrap sb = new ServerBootstrap();
cb.group(serverGroup)
.channel(LocalChannel.class)
.option(ChannelOption.AUTO_READ, autoRead)
.option(ChannelOption.MAX_MESSAGES_PER_READ, 1)
.handler(new ChannelReadHandler(countDownLatch, autoRead));
sb.group(clientGroup)
.channel(LocalServerChannel.class)
.childHandler(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(final ChannelHandlerContext ctx) {
for (int i = 0; i < 10; i++) {
ctx.write(i);
}
ctx.flush();
}
});
Channel sc = null;
Channel cc = null;
try {
// Start server
sc = sb.bind(TEST_ADDRESS).sync().channel();
cc = cb.connect(TEST_ADDRESS).sync().channel();
countDownLatch.await();
} finally {
closeChannel(cc);
closeChannel(sc);
}
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testServerMaxMessagesPerReadRespectedWithAutoReadSharedGroup() throws Exception {
testServerMaxMessagesPerReadRespected(sharedGroup, sharedGroup, true);
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testServerMaxMessagesPerReadRespectedWithoutAutoReadSharedGroup() throws Exception {
testServerMaxMessagesPerReadRespected(sharedGroup, sharedGroup, false);
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testServerMaxMessagesPerReadRespectedWithAutoReadDifferentGroup() throws Exception {
testServerMaxMessagesPerReadRespected(group1, group2, true);
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testServerMaxMessagesPerReadRespectedWithoutAutoReadDifferentGroup() throws Exception {
testServerMaxMessagesPerReadRespected(group1, group2, false);
}
private void testServerMaxMessagesPerReadRespected(
EventLoopGroup serverGroup, EventLoopGroup clientGroup, final boolean autoRead) throws Exception {
final CountDownLatch countDownLatch = new CountDownLatch(5);
Bootstrap cb = new Bootstrap();
ServerBootstrap sb = new ServerBootstrap();
cb.group(serverGroup)
.channel(LocalChannel.class)
.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) {
// NOOP
}
});
sb.group(clientGroup)
.channel(LocalServerChannel.class)
.option(ChannelOption.AUTO_READ, autoRead)
.option(ChannelOption.MAX_MESSAGES_PER_READ, 1)
.handler(new ChannelReadHandler(countDownLatch, autoRead))
.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) {
// NOOP
}
});
Channel sc = null;
Channel cc = null;
try {
// Start server
sc = sb.bind(TEST_ADDRESS).sync().channel();
for (int i = 0; i < 5; i++) {
try {
cc = cb.connect(TEST_ADDRESS).sync().channel();
} finally {
closeChannel(cc);
}
}
countDownLatch.await();
} finally {
closeChannel(sc);
}
}
private static final | TestHandler |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java | {
"start": 12429,
"end": 18539
} | class ____ extends Random {
private final AtomicInteger index = new AtomicInteger(0);
private final List<Long> results = List.of(
0L,
0L,
2336837413447398698L,
1758400403264101670L,
4341931186263415792L,
6389410885970711333L,
7265008559332826740L,
3478747443029687715L
);
@Override
public long nextLong() {
int curIndex = index.getAndIncrement();
return results.get(curIndex % results.size());
}
}
@Test
public void testCopierGenerateValidDirectoryId() {
MetaPropertiesMockRandom random = new MetaPropertiesMockRandom();
MetaPropertiesEnsemble.Copier copier = new MetaPropertiesEnsemble.Copier(EMPTY);
copier.setRandom(random);
copier.logDirProps().put("/tmp/dir1",
new MetaProperties.Builder().
setVersion(MetaPropertiesVersion.V1).
setClusterId("PpYMbsoRQV-589isZzNzEw").
setNodeId(0).
setDirectoryId(new Uuid(2336837413447398698L, 1758400403264101670L)).
build());
copier.logDirProps().put("/tmp/dir2",
new MetaProperties.Builder().
setVersion(MetaPropertiesVersion.V1).
setClusterId("PpYMbsoRQV-589isZzNzEw").
setNodeId(0).
setDirectoryId(new Uuid(4341931186263415792L, 6389410885970711333L)).
build());
// Verify that we ignore the non-safe IDs, or the IDs that have already been used,
// when invoking generateValidDirectoryId.
assertEquals(new Uuid(7265008559332826740L, 3478747443029687715L),
copier.generateValidDirectoryId());
}
@Test
public void testCopierVerificationFailsOnEmptyAndErrorOverlap() {
MetaPropertiesEnsemble.Copier copier = new MetaPropertiesEnsemble.Copier(EMPTY);
copier.emptyLogDirs().add("/tmp/foo");
copier.errorLogDirs().add("/tmp/foo");
assertEquals("Error: log directory /tmp/foo is in both emptyLogDirs and errorLogDirs.",
assertThrows(RuntimeException.class, copier::verify).getMessage());
}
@Test
public void testCopierVerificationFailsOnEmptyAndLogDirsOverlap() {
MetaPropertiesEnsemble.Copier copier = new MetaPropertiesEnsemble.Copier(EMPTY);
copier.emptyLogDirs().add("/tmp/foo");
copier.logDirProps().put("/tmp/foo", new MetaProperties.Builder().build());
assertEquals("Error: log directory /tmp/foo is in both emptyLogDirs and logDirProps.",
assertThrows(RuntimeException.class, copier::verify).getMessage());
}
@Test
public void testCopierVerificationFailsOnErrorAndLogDirsOverlap() {
MetaPropertiesEnsemble.Copier copier = new MetaPropertiesEnsemble.Copier(EMPTY);
copier.errorLogDirs().add("/tmp/foo");
copier.logDirProps().put("/tmp/foo", new MetaProperties.Builder().build());
assertEquals("Error: log directory /tmp/foo is in both errorLogDirs and logDirProps.",
assertThrows(RuntimeException.class, copier::verify).getMessage());
}
private static final List<MetaProperties> SAMPLE_META_PROPS_LIST = List.of(
new MetaProperties.Builder().
setVersion(MetaPropertiesVersion.V1).
setClusterId("AtgGav8yQjiaJ3rTXE7VCA").
setNodeId(1).
setDirectoryId(Uuid.fromString("s33AdXtkR8Gf_xRO-R_dpA")).
build(),
new MetaProperties.Builder().
setVersion(MetaPropertiesVersion.V1).
setClusterId("AtgGav8yQjiaJ3rTXE7VCA").
setNodeId(1).
setDirectoryId(Uuid.fromString("oTM53yT_SbSfzlvkh_PfVA")).
build(),
new MetaProperties.Builder().
setVersion(MetaPropertiesVersion.V1).
setClusterId("AtgGav8yQjiaJ3rTXE7VCA").
setNodeId(1).
setDirectoryId(Uuid.fromString("FcUhIv2mTzmLqGkVEabyag")).
build());
@Test
public void testCopierWriteLogDirChanges() throws Exception {
MetaPropertiesEnsemble.Loader loader = new MetaPropertiesEnsemble.Loader();
loader.addMetadataLogDir(createLogDir(SAMPLE_META_PROPS_LIST.get(0)));
MetaPropertiesEnsemble ensemble = loader.load();
MetaPropertiesEnsemble.Copier copier = new MetaPropertiesEnsemble.Copier(ensemble);
String newLogDir1 = createEmptyLogDir();
copier.logDirProps().put(newLogDir1, SAMPLE_META_PROPS_LIST.get(1));
String newLogDir2 = createEmptyLogDir();
copier.logDirProps().put(newLogDir2, SAMPLE_META_PROPS_LIST.get(2));
copier.writeLogDirChanges();
assertEquals(SAMPLE_META_PROPS_LIST.get(1).toProperties(), PropertiesUtils.readPropertiesFile(
new File(newLogDir1, META_PROPERTIES_NAME).getAbsolutePath()));
assertEquals(SAMPLE_META_PROPS_LIST.get(2).toProperties(), PropertiesUtils.readPropertiesFile(
new File(newLogDir2, META_PROPERTIES_NAME).getAbsolutePath()));
}
@Test
public void testCopierWriteChanged() throws Exception {
MetaPropertiesEnsemble.Loader loader = new MetaPropertiesEnsemble.Loader();
String dir0 = createLogDir(SAMPLE_META_PROPS_LIST.get(0));
loader.addMetadataLogDir(dir0);
String dir1 = createLogDir(SAMPLE_META_PROPS_LIST.get(1));
loader.addLogDirs(List.of(dir0, dir1));
MetaPropertiesEnsemble ensemble = loader.load();
MetaPropertiesEnsemble.Copier copier = new MetaPropertiesEnsemble.Copier(ensemble);
copier.setLogDirProps(dir0, SAMPLE_META_PROPS_LIST.get(2));
copier.writeLogDirChanges();
assertEquals(SAMPLE_META_PROPS_LIST.get(2).toProperties(), PropertiesUtils.readPropertiesFile(
new File(dir0, META_PROPERTIES_NAME).getAbsolutePath()));
assertEquals(SAMPLE_META_PROPS_LIST.get(1).toProperties(), PropertiesUtils.readPropertiesFile(
new File(dir1, META_PROPERTIES_NAME).getAbsolutePath()));
}
}
| MetaPropertiesMockRandom |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/hhh14276/NestedIdClassDerivedIdentifiersTest.java | {
"start": 549,
"end": 973
} | class ____ {
@Test
public void testMapping() {
final Configuration configuration = new Configuration()
.setProperty( GLOBALLY_QUOTED_IDENTIFIERS, Boolean.TRUE )
.addAnnotatedClasses( PlayerStat.class, Score.class );
try (SessionFactory sessionFactory = configuration.buildSessionFactory()) {
sessionFactory.inTransaction( (session) -> {
// do nothing...
} );
}
}
}
| NestedIdClassDerivedIdentifiersTest |
java | apache__camel | core/camel-core-processor/src/main/java/org/apache/camel/processor/resume/NoOffsetException.java | {
"start": 1025,
"end": 1449
} | class ____ extends RuntimeCamelException {
private final Exchange exchange;
public NoOffsetException(Exchange exchange) {
super("There was no " + Exchange.OFFSET + " header defined on the message exchange: " + exchange);
this.exchange = exchange;
}
/**
* The exchange which caused this failure
*/
public Exchange getExchange() {
return exchange;
}
}
| NoOffsetException |
java | apache__rocketmq | broker/src/test/java/org/apache/rocketmq/broker/processor/ConsumerManageProcessorTest.java | {
"start": 3614,
"end": 15302
} | class ____ {
private ConsumerManageProcessor consumerManageProcessor;
@Mock
private ChannelHandlerContext handlerContext;
@Spy
private BrokerController brokerController = new BrokerController(new BrokerConfig(), new NettyServerConfig(), new NettyClientConfig(), new MessageStoreConfig());
@Mock
private MessageStore messageStore;
@Mock
private Channel channel;
@Mock
private ConsumerOffsetManager consumerOffsetManager;
@Mock
private BrokerOuterAPI brokerOuterAPI;
@Mock
private RpcClient rpcClient;
@Mock
private Future<RpcResponse> responseFuture;
@Mock
private TopicQueueMappingContext mappingContext;
private String topic = "FooBar";
private String group = "FooBarGroup";
@Before
public void init() throws RpcException {
brokerController.setMessageStore(messageStore);
TopicConfigManager topicConfigManager = new TopicConfigManager(brokerController);
topicConfigManager.getTopicConfigTable().put(topic, new TopicConfig(topic));
when(brokerController.getTopicConfigManager()).thenReturn(topicConfigManager);
SubscriptionGroupManager subscriptionGroupManager = new SubscriptionGroupManager(brokerController);
subscriptionGroupManager.getSubscriptionGroupTable().put(group, new SubscriptionGroupConfig());
when(brokerController.getSubscriptionGroupManager()).thenReturn(subscriptionGroupManager);
consumerManageProcessor = new ConsumerManageProcessor(brokerController);
when(brokerController.getBrokerOuterAPI()).thenReturn(brokerOuterAPI);
when(brokerOuterAPI.getRpcClient()).thenReturn(rpcClient);
when(rpcClient.invoke(any(),anyLong())).thenReturn(responseFuture);
TopicQueueMappingDetail topicQueueMappingDetail = new TopicQueueMappingDetail();
topicQueueMappingDetail.setBname("BrokerA");
when(mappingContext.getMappingDetail()).thenReturn(topicQueueMappingDetail);
}
@Test
public void testUpdateConsumerOffset_InvalidTopic() throws Exception {
RemotingCommand request = buildUpdateConsumerOffsetRequest(group, "InvalidTopic", 0, 0);
RemotingCommand response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.TOPIC_NOT_EXIST);
}
@Test
public void testUpdateConsumerOffset_GroupNotExist() throws Exception {
RemotingCommand request = buildUpdateConsumerOffsetRequest("NotExistGroup", topic, 0, 0);
RemotingCommand response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUBSCRIPTION_GROUP_NOT_EXIST);
}
@Test
public void testUpdateConsumerOffset() throws RemotingCommandException {
when(brokerController.getConsumerOffsetManager()).thenReturn(consumerOffsetManager);
when(consumerOffsetManager.hasOffsetReset(anyString(),anyString(),anyInt())).thenReturn(true);
RemotingCommand request = buildUpdateConsumerOffsetRequest(group, topic, 0, 0);
RemotingCommand response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
when(consumerOffsetManager.hasOffsetReset(anyString(),anyString(),anyInt())).thenReturn(false);
response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testGetConsumerListByGroup() throws RemotingCommandException {
GetConsumerListByGroupRequestHeader requestHeader = new GetConsumerListByGroupRequestHeader();
requestHeader.setConsumerGroup(group);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_CONSUMER_LIST_BY_GROUP, requestHeader);
request.makeCustomHeaderToNet();
RemotingCommand response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR);
brokerController.getConsumerManager().getConsumerTable().put(group,new ConsumerGroupInfo(group));
response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR);
ConsumerGroupInfo consumerGroupInfo =
this.brokerController.getConsumerManager().getConsumerGroupInfo(
requestHeader.getConsumerGroup());
consumerGroupInfo.getChannelInfoTable().put(channel,new ClientChannelInfo(channel));
response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testQueryConsumerOffset() throws RemotingCommandException, ExecutionException, InterruptedException {
RemotingCommand request = buildQueryConsumerOffsetRequest(group, topic, 0, true);
RemotingCommand response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.QUERY_NOT_FOUND);
when(brokerController.getConsumerOffsetManager()).thenReturn(consumerOffsetManager);
when(consumerOffsetManager.queryOffset(anyString(),anyString(),anyInt())).thenReturn(0L);
response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
when(consumerOffsetManager.queryOffset(anyString(),anyString(),anyInt())).thenReturn(-1L);
when(messageStore.getMinOffsetInQueue(anyString(),anyInt())).thenReturn(-1L);
when(messageStore.checkInMemByConsumeOffset(anyString(),anyInt(),anyLong(),anyInt())).thenReturn(true);
response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
TopicQueueMappingManager topicQueueMappingManager = mock(TopicQueueMappingManager.class);
when(brokerController.getTopicQueueMappingManager()).thenReturn(topicQueueMappingManager);
when(topicQueueMappingManager.buildTopicQueueMappingContext(any(QueryConsumerOffsetRequestHeader.class))).thenReturn(mappingContext);
response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.NOT_LEADER_FOR_QUEUE);
List<LogicQueueMappingItem> items = new ArrayList<>();
LogicQueueMappingItem item1 = createLogicQueueMappingItem("BrokerC", 0, 0L, 0L);
items.add(item1);
when(mappingContext.getMappingItemList()).thenReturn(items);
when(mappingContext.getLeaderItem()).thenReturn(item1);
when(mappingContext.getCurrentItem()).thenReturn(item1);
when(mappingContext.isLeader()).thenReturn(true);
response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
LogicQueueMappingItem item2 = createLogicQueueMappingItem("BrokerA", 0, 0L, 0L);
items.add(item2);
QueryConsumerOffsetResponseHeader queryConsumerOffsetResponseHeader = new QueryConsumerOffsetResponseHeader();
queryConsumerOffsetResponseHeader.setOffset(0L);
RpcResponse rpcResponse = new RpcResponse(ResponseCode.SUCCESS,queryConsumerOffsetResponseHeader,null);
when(responseFuture.get()).thenReturn(rpcResponse);
response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
queryConsumerOffsetResponseHeader.setOffset(-1L);
rpcResponse = new RpcResponse(ResponseCode.SUCCESS,queryConsumerOffsetResponseHeader,null);
when(responseFuture.get()).thenReturn(rpcResponse);
response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.QUERY_NOT_FOUND);
}
@Test
public void testRewriteRequestForStaticTopic() throws RpcException, ExecutionException, InterruptedException {
UpdateConsumerOffsetRequestHeader requestHeader = new UpdateConsumerOffsetRequestHeader();
requestHeader.setConsumerGroup(group);
requestHeader.setTopic(topic);
requestHeader.setQueueId(0);
requestHeader.setCommitOffset(0L);
RemotingCommand response = consumerManageProcessor.rewriteRequestForStaticTopic(requestHeader, mappingContext);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.NOT_LEADER_FOR_QUEUE);
List<LogicQueueMappingItem> items = new ArrayList<>();
LogicQueueMappingItem item = createLogicQueueMappingItem("BrokerC", 0, 0L, 0L);
items.add(item);
when(mappingContext.getMappingItemList()).thenReturn(items);
when(mappingContext.isLeader()).thenReturn(true);
RpcResponse rpcResponse = new RpcResponse(ResponseCode.SUCCESS,new UpdateConsumerOffsetResponseHeader(),null);
when(responseFuture.get()).thenReturn(rpcResponse);
response = consumerManageProcessor.rewriteRequestForStaticTopic(requestHeader, mappingContext);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
public RemotingCommand buildQueryConsumerOffsetRequest(String group, String topic, int queueId,boolean setZeroIfNotFound) {
QueryConsumerOffsetRequestHeader requestHeader = new QueryConsumerOffsetRequestHeader();
requestHeader.setConsumerGroup(group);
requestHeader.setTopic(topic);
requestHeader.setQueueId(queueId);
requestHeader.setSetZeroIfNotFound(setZeroIfNotFound);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_CONSUMER_OFFSET, requestHeader);
request.makeCustomHeaderToNet();
return request;
}
public LogicQueueMappingItem createLogicQueueMappingItem(String brokerName, int queueId, long startOffset, long logicOffset) {
LogicQueueMappingItem item = new LogicQueueMappingItem();
item.setBname(brokerName);
item.setQueueId(queueId);
item.setStartOffset(startOffset);
item.setLogicOffset(logicOffset);
return item;
}
private RemotingCommand buildUpdateConsumerOffsetRequest(String group, String topic, int queueId, long offset) {
UpdateConsumerOffsetRequestHeader requestHeader = new UpdateConsumerOffsetRequestHeader();
requestHeader.setConsumerGroup(group);
requestHeader.setTopic(topic);
requestHeader.setQueueId(queueId);
requestHeader.setCommitOffset(offset);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.UPDATE_CONSUMER_OFFSET, requestHeader);
request.makeCustomHeaderToNet();
return request;
}
}
| ConsumerManageProcessorTest |
java | apache__flink | flink-core/src/test/java/org/apache/flink/util/CloseableIteratorTest.java | {
"start": 1156,
"end": 2923
} | class ____ {
private static final String[] ELEMENTS = new String[] {"element-1", "element-2"};
@Test
void testFlattenEmpty() throws Exception {
List<CloseableIterator<?>> iterators =
asList(
CloseableIterator.flatten(),
CloseableIterator.flatten(CloseableIterator.empty()),
CloseableIterator.flatten(CloseableIterator.flatten()));
for (CloseableIterator<?> i : iterators) {
assertThat(i).isExhausted();
i.close();
}
}
@Test
void testFlattenIteration() {
CloseableIterator<String> iterator =
CloseableIterator.flatten(
CloseableIterator.ofElement(ELEMENTS[0], unused -> {}),
CloseableIterator.ofElement(ELEMENTS[1], unused -> {}));
List<String> iterated = new ArrayList<>();
iterator.forEachRemaining(iterated::add);
assertThat(iterated.toArray()).isEqualTo(ELEMENTS);
}
@Test
void testFlattenErrorHandling() {
List<String> closed = new ArrayList<>();
CloseableIterator<String> iterator =
CloseableIterator.flatten(
CloseableIterator.ofElement(
ELEMENTS[0],
e -> {
closed.add(e);
throw new TestException();
}),
CloseableIterator.ofElement(ELEMENTS[1], closed::add));
assertThatThrownBy(iterator::close).isInstanceOf(TestException.class);
assertThat(closed.toArray()).isEqualTo(ELEMENTS);
}
private static | CloseableIteratorTest |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/gwt/GwtCompilationTest.java | {
"start": 1473,
"end": 2287
} | interface ____ {",
" boolean serializable() default false;",
"}");
/**
* Test where the serialized properties don't include generics, so no {@code @SuppressWarnings}
* annotation is needed. We explicitly check that one is not included anyway, because Eclipse for
* example can be configured to complain about unnecessary warning suppression.
*/
@Test
public void testBasic() {
JavaFileObject javaFileObject =
JavaFileObjects.forSourceLines(
"foo.bar.Baz",
"package foo.bar;",
"",
"import com.google.auto.value.AutoValue;",
"import com.google.annotations.GwtCompatible;",
"",
"@AutoValue",
"@GwtCompatible(serializable = true)",
"public abstract | GwtCompatible |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.