language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/EqualsIncompatibleTypeTest.java | {
"start": 12939,
"end": 13024
} | interface ____<K extends EntityKey<K>> extends Comparable<K> {}
static final | EntityKey |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/suite/engine/testsuites/LifecycleMethodsSuites.java | {
"start": 4730,
"end": 4892
} | class ____ {
@AfterSuite
static String nonVoidAfterSuite() {
fail("Should not be called");
return "";
}
}
@TestSuite
public static | NonVoidAfterSuite |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/BindableRuntimeHintsRegistrarTests.java | {
"start": 15073,
"end": 15193
} | class ____ {
public List<Address> getAllAddresses() {
return Collections.emptyList();
}
}
public static | WithList |
java | apache__flink | flink-core/src/test/java/org/apache/flink/types/NormalizableKeyTest.java | {
"start": 1045,
"end": 6044
} | class ____ {
@Test
void testIntValue() {
IntValue int0 = new IntValue(10);
IntValue int1 = new IntValue(10);
IntValue int2 = new IntValue(-10);
IntValue int3 = new IntValue(255);
IntValue int4 = new IntValue(Integer.MAX_VALUE);
IntValue int5 = new IntValue(Integer.MAX_VALUE & 0xff800000);
IntValue int6 = new IntValue(Integer.MIN_VALUE);
IntValue int7 = new IntValue(Integer.MIN_VALUE & 0xff800000);
for (int length = 2; length <= 4; length++) {
assertNormalizableKey(int0, int1, length);
assertNormalizableKey(int0, int2, length);
assertNormalizableKey(int0, int3, length);
assertNormalizableKey(int0, int4, length);
assertNormalizableKey(int0, int5, length);
assertNormalizableKey(int0, int6, length);
assertNormalizableKey(int0, int7, length);
assertNormalizableKey(int4, int5, length);
assertNormalizableKey(int6, int7, length);
}
}
@Test
void testLongValue() {
LongValue long0 = new LongValue(10);
LongValue long1 = new LongValue(10);
LongValue long2 = new LongValue(-10);
LongValue long3 = new LongValue(255);
LongValue long4 = new LongValue(Long.MAX_VALUE);
LongValue long5 = new LongValue(Long.MAX_VALUE & 0xff80000000000000L);
LongValue long6 = new LongValue(Long.MIN_VALUE);
LongValue long7 = new LongValue(Long.MIN_VALUE & 0xff80000000000000L);
for (int length = 2; length <= 8; length++) {
assertNormalizableKey(long0, long1, length);
assertNormalizableKey(long0, long2, length);
assertNormalizableKey(long0, long3, length);
assertNormalizableKey(long0, long4, length);
assertNormalizableKey(long0, long5, length);
assertNormalizableKey(long0, long6, length);
assertNormalizableKey(long0, long7, length);
assertNormalizableKey(long4, long5, length);
assertNormalizableKey(long6, long7, length);
}
}
@Test
void testStringValue() {
StringValue string0 = new StringValue("This is a test");
StringValue string1 = new StringValue("This is a test with some longer String");
StringValue string2 = new StringValue("This is a tesa");
StringValue string3 = new StringValue("This");
StringValue string4 = new StringValue("Ünlaut ßtring µ avec é y ¢");
for (int length = 5; length <= 15; length += 10) {
assertNormalizableKey(string0, string1, length);
assertNormalizableKey(string0, string2, length);
assertNormalizableKey(string0, string3, length);
assertNormalizableKey(string0, string4, length);
}
}
@Test
void testPactNull() {
final NullValue pn1 = new NullValue();
final NullValue pn2 = new NullValue();
assertNormalizableKey(pn1, pn2, 0);
}
@Test
void testPactChar() {
final CharValue c1 = new CharValue((char) 0);
final CharValue c2 = new CharValue((char) 1);
final CharValue c3 = new CharValue((char) 0xff);
final CharValue c4 = new CharValue(Character.MAX_VALUE);
final CharValue c5 = new CharValue((char) (Character.MAX_VALUE + (char) 1));
final CharValue c6 = new CharValue(Character.MAX_HIGH_SURROGATE);
final CharValue c7 = new CharValue(Character.MAX_LOW_SURROGATE);
final CharValue c8 = new CharValue(Character.MAX_SURROGATE);
CharValue[] allChars = new CharValue[] {c1, c2, c3, c4, c5, c6, c7, c8};
for (int i = 0; i < 5; i++) {
// self checks
for (CharValue allChar1 : allChars) {
for (CharValue allChar : allChars) {
assertNormalizableKey(allChar1, allChar, i);
}
}
}
}
@SuppressWarnings("unchecked")
private <T extends Comparable<T>> void assertNormalizableKey(
NormalizableKey<T> key1, NormalizableKey<T> key2, int len) {
byte[] normalizedKeys = new byte[32];
MemorySegment wrapper = MemorySegmentFactory.wrap(normalizedKeys);
key1.copyNormalizedKey(wrapper, 0, len);
key2.copyNormalizedKey(wrapper, len, len);
for (int i = 0; i < len; i++) {
int comp;
int normKey1 = normalizedKeys[i] & 0xFF;
int normKey2 = normalizedKeys[len + i] & 0xFF;
if ((comp = (normKey1 - normKey2)) != 0) {
assertThat(Math.signum(key1.compareTo((T) key2)) != Math.signum(comp))
.isFalse()
.describedAs(
"Normalized key comparison differs from actual key comparison");
return;
}
}
assertThat(key1.compareTo((T) key2) == 0 || key1.getMaxNormalizedKeyLen() > len).isTrue();
}
}
| NormalizableKeyTest |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/dynsql/Parameter.java | {
"start": 757,
"end": 1421
} | class ____ {
private String schema;
private List<Integer> ids;
private boolean enabled;
public String getFred() {
// added this method to check for bug with DynamicContext
// IBATIS-777
throw new RuntimeException("This method should not be called.");
}
public String getSchema() {
return schema;
}
public void setSchema(String schema) {
this.schema = schema;
}
public List<Integer> getIds() {
return ids;
}
public void setIds(List<Integer> ids) {
this.ids = ids;
}
public boolean isEnabled() {
return enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
}
| Parameter |
java | apache__camel | components/camel-rss/src/test/java/org/apache/camel/component/rss/RssFilterTest.java | {
"start": 1201,
"end": 2241
} | class ____ extends CamelTestSupport {
@Test
public void testFilterOutNonCamelPosts() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(6);
mock.assertIsSatisfied();
}
@Override
protected void bindToRegistry(Registry registry) {
registry.bind("myFilterBean", new FilterBean());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// See RssFilterWithXPathTest for an example of how to do this with XPath
// START SNIPPET: ex1
// only entries with Camel in the title will get through the filter
from("rss:file:src/test/data/rss20.xml?splitEntries=true&delay=100").filter()
.method("myFilterBean", "titleContainsCamel").to("mock:result");
// END SNIPPET: ex1
}
};
}
// START SNIPPET: ex2
public static | RssFilterTest |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/response/IbmWatsonxRankedResponseEntity.java | {
"start": 1482,
"end": 6851
} | class ____ {
private static final Logger logger = LogManager.getLogger(IbmWatsonxRankedResponseEntity.class);
/**
* Parses the IBM watsonx ranked response.
*
* For a request like:
* "model": "rerank-english-v2.0",
* "query": "database",
* "return_documents": true,
* "top_n": 3,
* "input": ["greenland", "google","john", "mysql","potter", "grammar"]
* <p>
* The response will look like (without whitespace):
* {
* "rerank": [
* {
* "index": 3,
* "relevance_score": 0.7989932
* },
* {
* "index": 5,
* "relevance_score": 0.61281824
* },
* {
* "index": 1,
* "relevance_score": 0.5762553
* },
* {
* "index": 4,
* "relevance_score": 0.47395563
* },
* {
* "index": 0,
* "relevance_score": 0.4338926
* },
* {
* "index": 2,
* "relevance_score": 0.42638257
* }
* ],
* }
*
* @param response the http response from IBM watsonx
* @return the parsed response
* @throws IOException if there is an error parsing the response
*/
public static InferenceServiceResults fromResponse(HttpResult response) throws IOException {
var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE);
try (XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, response.body())) {
moveToFirstToken(jsonParser);
XContentParser.Token token = jsonParser.currentToken();
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, jsonParser);
positionParserAtTokenAfterField(jsonParser, "results", FAILED_TO_FIND_FIELD_TEMPLATE); // TODO error message
token = jsonParser.currentToken();
if (token == XContentParser.Token.START_ARRAY) {
return new RankedDocsResults(parseList(jsonParser, IbmWatsonxRankedResponseEntity::parseRankedDocObject));
} else {
throwUnknownToken(token, jsonParser);
}
// This should never be reached. The above code should either return successfully or hit the throwUnknownToken
// or throw a parsing exception
throw new IllegalStateException("Reached an invalid state while parsing the Watsonx response");
}
}
private static RankedDocsResults.RankedDoc parseRankedDocObject(XContentParser parser) throws IOException {
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser);
int index = -1;
float score = -1;
String documentText = null;
parser.nextToken();
while (parser.currentToken() != XContentParser.Token.END_OBJECT) {
if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
switch (parser.currentName()) {
case "index":
parser.nextToken(); // move to VALUE_NUMBER
index = parser.intValue();
parser.nextToken(); // move to next FIELD_NAME or END_OBJECT
break;
case "score":
parser.nextToken(); // move to VALUE_NUMBER
score = parser.floatValue();
parser.nextToken(); // move to next FIELD_NAME or END_OBJECT
break;
case "input":
parser.nextToken(); // move to START_OBJECT; document text is wrapped in an object
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser);
do {
if (parser.currentToken() == XContentParser.Token.FIELD_NAME && parser.currentName().equals("text")) {
parser.nextToken(); // move to VALUE_STRING
documentText = parser.text();
}
} while (parser.nextToken() != XContentParser.Token.END_OBJECT);
parser.nextToken();// move past END_OBJECT
// parser should now be at the next FIELD_NAME or END_OBJECT
break;
default:
throwUnknownField(parser.currentName(), parser);
}
} else {
parser.nextToken();
}
}
if (index == -1) {
logger.warn("Failed to find required field [index] in Watsonx rerank response");
}
if (score == -1) {
logger.warn("Failed to find required field [relevance_score] in Watsonx rerank response");
}
// documentText may or may not be present depending on the request parameter
return new RankedDocsResults.RankedDoc(index, score, documentText);
}
private IbmWatsonxRankedResponseEntity() {}
static String FAILED_TO_FIND_FIELD_TEMPLATE = "Failed to find required field [%s] in Watsonx rerank response";
}
| IbmWatsonxRankedResponseEntity |
java | spring-projects__spring-boot | module/spring-boot-webmvc/src/main/java/org/springframework/boot/webmvc/actuate/endpoint/web/WebMvcEndpointHandlerMapping.java | {
"start": 2275,
"end": 3567
} | class ____ extends AbstractWebMvcEndpointHandlerMapping {
private final EndpointLinksResolver linksResolver;
/**
* Creates a new {@code WebMvcEndpointHandlerMapping} instance that provides mappings
* for the given endpoints.
* @param endpointMapping the base mapping for all endpoints
* @param endpoints the web endpoints
* @param endpointMediaTypes media types consumed and produced by the endpoints
* @param corsConfiguration the CORS configuration for the endpoints or {@code null}
* @param linksResolver resolver for determining links to available endpoints
* @param shouldRegisterLinksMapping whether the links endpoint should be registered
*/
public WebMvcEndpointHandlerMapping(EndpointMapping endpointMapping, Collection<ExposableWebEndpoint> endpoints,
EndpointMediaTypes endpointMediaTypes, @Nullable CorsConfiguration corsConfiguration,
EndpointLinksResolver linksResolver, boolean shouldRegisterLinksMapping) {
super(endpointMapping, endpoints, endpointMediaTypes, corsConfiguration, shouldRegisterLinksMapping);
this.linksResolver = linksResolver;
setOrder(-100);
}
@Override
protected LinksHandler getLinksHandler() {
return new WebMvcLinksHandler();
}
/**
* Handler for root endpoint providing links.
*/
| WebMvcEndpointHandlerMapping |
java | quarkusio__quarkus | extensions/kubernetes-config/runtime/src/main/java/io/quarkus/kubernetes/config/runtime/ConfigMapConfigSourceUtil.java | {
"start": 1862,
"end": 2795
} | class ____ extends MapBackedConfigSource {
private static final String NAME_FORMAT = "ConfigMapStringInputPropertiesConfigSource[configMap=%s,file=%s]";
ConfigMapStringInputPropertiesConfigSource(String configMapName, String fileName, String input, int ordinal) {
super(String.format(NAME_FORMAT, configMapName, fileName), readProperties(input), ordinal);
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private static Map<String, String> readProperties(String rawData) {
try (StringReader br = new StringReader(rawData)) {
final Properties properties = new Properties();
properties.load(br);
return (Map<String, String>) (Map) properties;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
private static | ConfigMapStringInputPropertiesConfigSource |
java | redisson__redisson | redisson-spring-data/redisson-spring-data-16/src/test/java/org/redisson/RedisRunner.java | {
"start": 4358,
"end": 28733
} | enum ____ {
K,
E,
g,
$,
l,
s,
h,
z,
x,
e,
A
}
private final LinkedHashMap<REDIS_OPTIONS, String> options = new LinkedHashMap<>();
protected static RedisRunner.RedisProcess defaultRedisInstance;
private static int defaultRedisInstanceExitCode;
private String path = "";
private String defaultDir = Paths.get("").toString();
private boolean nosave = false;
private boolean randomDir = false;
private ArrayList<String> bindAddr = new ArrayList<>();
private int port = 6379;
private int retryCount = Integer.MAX_VALUE;
private boolean randomPort = false;
private String sentinelFile;
private String clusterFile;
{
this.options.put(REDIS_OPTIONS.BINARY_PATH, RedissonRuntimeEnvironment.redisBinaryPath);
}
/**
* To change the <b>redisBinary</b> system property for running the test,
* use <i>argLine</i> option from surefire plugin:
*
* $ mvn -DargLine="-DredisBinary=`which redis-server`" -Punit-test clean \
* verify
*
* @param configPath
* @return Process running redis instance
* @throws IOException
* @throws InterruptedException
* @see
* <a href="http://maven.apache.org/surefire/maven-surefire-plugin/test-mojo.html#argLine">
* http://maven.apache.org/surefire/maven-surefire-plugin/test-mojo.html#argLine</a>
*/
public static RedisProcess runRedisWithConfigFile(String configPath) throws IOException, InterruptedException {
URL resource = RedisRunner.class.getResource(configPath);
return runWithOptions(new RedisRunner(), RedissonRuntimeEnvironment.redisBinaryPath, resource.getFile());
}
private static RedisProcess runWithOptions(RedisRunner runner, String... options) throws IOException, InterruptedException {
List<String> launchOptions = Arrays.stream(options)
.map(x -> Arrays.asList(x.split(" "))).flatMap(x -> x.stream())
.collect(Collectors.toList());
System.out.println("REDIS LAUNCH OPTIONS: " + Arrays.toString(launchOptions.toArray()));
ProcessBuilder master = new ProcessBuilder(launchOptions)
.redirectErrorStream(true)
.directory(new File(RedissonRuntimeEnvironment.tempDir));
Process p = master.start();
new Thread(() -> {
BufferedReader reader = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line;
try {
while (p.isAlive() && (line = reader.readLine()) != null && !RedissonRuntimeEnvironment.isTravis) {
System.out.println("REDIS PROCESS: " + line);
}
} catch (IOException ex) {
System.out.println("Exception: " + ex.getLocalizedMessage());
}
}).start();
Thread.sleep(1500);
return new RedisProcess(p, runner);
}
public RedisProcess run() throws IOException, InterruptedException, FailedToStartRedisException {
if (!options.containsKey(REDIS_OPTIONS.DIR)) {
addConfigOption(REDIS_OPTIONS.DIR, defaultDir);
}
if (randomPort) {
for (int i = 0; i < retryCount; i++) {
this.port = findFreePort();
addConfigOption(REDIS_OPTIONS.PORT, this.port);
try {
return runAndCheck();
} catch (FailedToStartRedisException e) {
}
}
throw new FailedToStartRedisException();
} else {
return runAndCheck();
}
}
public RedisProcess runAndCheck() throws IOException, InterruptedException, FailedToStartRedisException {
List<String> args = new ArrayList(options.values());
if (sentinelFile != null && sentinelFile.length() > 0) {
String confFile = defaultDir + File.separator + sentinelFile;
try (PrintWriter printer = new PrintWriter(new FileWriter(confFile))) {
args.stream().forEach((arg) -> {
if (arg.contains("--")) {
printer.println(arg.replace("--", ""));
}
});
}
args = args.subList(0, 1);
args.add(confFile);
args.add("--sentinel");
}
RedisProcess rp = runWithOptions(this, args.toArray(new String[0]));
if (!isCluster()
&& rp.redisProcess.waitFor(1000, TimeUnit.MILLISECONDS)) {
throw new FailedToStartRedisException();
}
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
rp.stop();
}));
return rp;
}
public boolean hasOption(REDIS_OPTIONS option) {
return options.containsKey(option);
}
private void addConfigOption(REDIS_OPTIONS option, Object... args) {
StringBuilder sb = new StringBuilder("--")
.append(option.toString()
.replaceAll("_", "-")
.replaceAll("\\$", " ")
.toLowerCase())
.append(" ")
.append(Arrays.stream(args).map(Object::toString)
.collect(Collectors.joining(" ")));
this.options.put(option,
option.isAllowMultiple()
? sb.insert(0, this.options.getOrDefault(option, "")).toString()
: sb.toString());
}
private String convertBoolean(boolean b) {
return b ? "yes" : "no";
}
public RedisRunner daemonize(boolean daemonize) {
addConfigOption(REDIS_OPTIONS.DAEMONIZE, convertBoolean(daemonize));
return this;
}
public RedisRunner pidfile(String pidfile) {
addConfigOption(REDIS_OPTIONS.PIDFILE, pidfile);
return this;
}
public RedisRunner port(int port) {
this.port = port;
this.randomPort = false;
addConfigOption(REDIS_OPTIONS.PORT, port);
return this;
}
public RedisRunner randomPort() {
return randomPort(Integer.MAX_VALUE);
}
public RedisRunner randomPort(int retryCount) {
this.randomPort = true;
this.retryCount = retryCount;
options.remove(REDIS_OPTIONS.PORT);
return this;
}
public int getPort() {
return this.port;
}
public RedisRunner tcpBacklog(long tcpBacklog) {
addConfigOption(REDIS_OPTIONS.TCP_BACKLOG, tcpBacklog);
return this;
}
public RedisRunner bind(String bind) {
this.bindAddr.add(bind);
addConfigOption(REDIS_OPTIONS.BIND, bind);
return this;
}
public ArrayList<String> getBindAddr() {
return this.bindAddr;
}
public RedisRunner unixsocket(String unixsocket) {
addConfigOption(REDIS_OPTIONS.UNIXSOCKET, unixsocket);
return this;
}
public RedisRunner unixsocketperm(int unixsocketperm) {
addConfigOption(REDIS_OPTIONS.UNIXSOCKETPERM, unixsocketperm);
return this;
}
public RedisRunner timeout(long timeout) {
addConfigOption(REDIS_OPTIONS.TIMEOUT, timeout);
return this;
}
public RedisRunner tcpKeepalive(long tcpKeepalive) {
addConfigOption(REDIS_OPTIONS.TCP_KEEPALIVE, tcpKeepalive);
return this;
}
public RedisRunner loglevel(LOGLEVEL_OPTIONS loglevel) {
addConfigOption(REDIS_OPTIONS.LOGLEVEL, loglevel.toString());
return this;
}
public RedisRunner logfile(String logfile) {
addConfigOption(REDIS_OPTIONS.LOGLEVEL, logfile);
return this;
}
public RedisRunner syslogEnabled(boolean syslogEnabled) {
addConfigOption(REDIS_OPTIONS.SYSLOG_ENABLED, convertBoolean(syslogEnabled));
return this;
}
public RedisRunner syslogIdent(String syslogIdent) {
addConfigOption(REDIS_OPTIONS.SYSLOG_IDENT, syslogIdent);
return this;
}
public RedisRunner syslogFacility(SYSLOG_FACILITY_OPTIONS syslogFacility) {
addConfigOption(REDIS_OPTIONS.SYSLOG_IDENT, syslogFacility.toString());
return this;
}
public RedisRunner databases(int databases) {
addConfigOption(REDIS_OPTIONS.DATABASES, databases);
return this;
}
public RedisRunner save(long seconds, long changes) {
if (!nosave) {
addConfigOption(REDIS_OPTIONS.SAVE, seconds, changes);
}
return this;
}
/**
* Phantom option
*
* @return RedisRunner
*/
public RedisRunner nosave() {
this.nosave = true;
options.remove(REDIS_OPTIONS.SAVE);
// addConfigOption(REDIS_OPTIONS.SAVE, "''");
return this;
}
public RedisRunner stopWritesOnBgsaveError(boolean stopWritesOnBgsaveError) {
addConfigOption(REDIS_OPTIONS.STOP_WRITES_ON_BGSAVE_ERROR, convertBoolean(stopWritesOnBgsaveError));
return this;
}
public RedisRunner rdbcompression(boolean rdbcompression) {
addConfigOption(REDIS_OPTIONS.RDBCOMPRESSION, convertBoolean(rdbcompression));
return this;
}
public RedisRunner rdbchecksum(boolean rdbchecksum) {
addConfigOption(REDIS_OPTIONS.RDBCHECKSUM, convertBoolean(rdbchecksum));
return this;
}
public RedisRunner dbfilename(String dbfilename) {
addConfigOption(REDIS_OPTIONS.DBFILENAME, dbfilename);
return this;
}
public RedisRunner dir(String dir) {
if (!randomDir) {
addConfigOption(REDIS_OPTIONS.DIR, dir);
this.path = dir;
}
return this;
}
/**
* Phantom option
*
* @return RedisRunner
*/
public RedisRunner randomDir() {
this.randomDir = true;
options.remove(REDIS_OPTIONS.DIR);
makeRandomDefaultDir();
addConfigOption(REDIS_OPTIONS.DIR, "\"" + defaultDir + "\"");
return this;
}
public RedisRunner slaveof(Inet4Address masterip, int port) {
addConfigOption(REDIS_OPTIONS.SLAVEOF, masterip.getHostAddress(), port);
return this;
}
public RedisRunner slaveof(String masterip, int port) {
addConfigOption(REDIS_OPTIONS.SLAVEOF, masterip, port);
return this;
}
public RedisRunner masterauth(String masterauth) {
addConfigOption(REDIS_OPTIONS.MASTERAUTH, masterauth);
return this;
}
public RedisRunner slaveServeStaleData(boolean slaveServeStaleData) {
addConfigOption(REDIS_OPTIONS.SLAVE_SERVE_STALE_DATA, convertBoolean(slaveServeStaleData));
return this;
}
public RedisRunner slaveReadOnly(boolean slaveReadOnly) {
addConfigOption(REDIS_OPTIONS.SLAVE_READ_ONLY, convertBoolean(slaveReadOnly));
return this;
}
public RedisRunner replDisklessSync(boolean replDisklessSync) {
addConfigOption(REDIS_OPTIONS.REPL_DISKLESS_SYNC, convertBoolean(replDisklessSync));
return this;
}
public RedisRunner replDisklessSyncDelay(long replDisklessSyncDelay) {
addConfigOption(REDIS_OPTIONS.REPL_DISKLESS_SYNC_DELAY, replDisklessSyncDelay);
return this;
}
public RedisRunner replPingSlavePeriod(long replPingSlavePeriod) {
addConfigOption(REDIS_OPTIONS.REPL_PING_SLAVE_PERIOD, replPingSlavePeriod);
return this;
}
public RedisRunner replTimeout(long replTimeout) {
addConfigOption(REDIS_OPTIONS.REPL_TIMEOUT, replTimeout);
return this;
}
public RedisRunner replDisableTcpNodelay(boolean replDisableTcpNodelay) {
addConfigOption(REDIS_OPTIONS.REPL_DISABLE_TCP_NODELAY, convertBoolean(replDisableTcpNodelay));
return this;
}
public RedisRunner replBacklogSize(String replBacklogSize) {
addConfigOption(REDIS_OPTIONS.REPL_BACKLOG_SIZE, replBacklogSize);
return this;
}
public RedisRunner replBacklogTtl(long replBacklogTtl) {
addConfigOption(REDIS_OPTIONS.REPL_BACKLOG_TTL, replBacklogTtl);
return this;
}
public RedisRunner slavePriority(long slavePriority) {
addConfigOption(REDIS_OPTIONS.SLAVE_PRIORITY, slavePriority);
return this;
}
public RedisRunner minSlaveToWrite(long minSlaveToWrite) {
addConfigOption(REDIS_OPTIONS.MIN_SLAVES_TO_WRITE, minSlaveToWrite);
return this;
}
public RedisRunner minSlaveMaxLag(long minSlaveMaxLag) {
addConfigOption(REDIS_OPTIONS.MIN_SLAVES_MAX_LAG, minSlaveMaxLag);
return this;
}
public RedisRunner requirepass(String requirepass) {
addConfigOption(REDIS_OPTIONS.REQUIREPASS, requirepass);
return this;
}
public RedisRunner renameCommand(String renameCommand) {
addConfigOption(REDIS_OPTIONS.RENAME_COMMAND, renameCommand);
return this;
}
public RedisRunner maxclients(long maxclients) {
addConfigOption(REDIS_OPTIONS.MAXCLIENTS, maxclients);
return this;
}
public RedisRunner maxmemory(String maxmemory) {
addConfigOption(REDIS_OPTIONS.MAXMEMORY, maxmemory);
return this;
}
public RedisRunner maxmemoryPolicy(MAX_MEMORY_POLICY_OPTIONS maxmemoryPolicy) {
addConfigOption(REDIS_OPTIONS.MAXMEMORY, maxmemoryPolicy.toString());
return this;
}
public RedisRunner maxmemorySamples(long maxmemorySamples) {
addConfigOption(REDIS_OPTIONS.MAXMEMORY, maxmemorySamples);
return this;
}
public RedisRunner appendonly(boolean appendonly) {
addConfigOption(REDIS_OPTIONS.APPENDONLY, convertBoolean(appendonly));
return this;
}
public RedisRunner appendfilename(String appendfilename) {
addConfigOption(REDIS_OPTIONS.APPENDFILENAME, appendfilename);
return this;
}
public RedisRunner appendfsync(APPEND_FSYNC_MODE_OPTIONS appendfsync) {
addConfigOption(REDIS_OPTIONS.APPENDFSYNC, appendfsync.toString());
return this;
}
public RedisRunner noAppendfsyncOnRewrite(boolean noAppendfsyncOnRewrite) {
addConfigOption(REDIS_OPTIONS.NO_APPENDFSYNC_ON_REWRITE, convertBoolean(noAppendfsyncOnRewrite));
return this;
}
public RedisRunner autoAofRewritePercentage(int autoAofRewritePercentage) {
addConfigOption(REDIS_OPTIONS.AUTO_AOF_REWRITE_PERCENTAGE, autoAofRewritePercentage);
return this;
}
public RedisRunner autoAofRewriteMinSize(String autoAofRewriteMinSize) {
addConfigOption(REDIS_OPTIONS.AUTO_AOF_REWRITE_MIN_SIZE, autoAofRewriteMinSize);
return this;
}
public RedisRunner aofLoadTruncated(boolean aofLoadTruncated) {
addConfigOption(REDIS_OPTIONS.AOF_LOAD_TRUNCATED, convertBoolean(aofLoadTruncated));
return this;
}
public RedisRunner luaTimeLimit(long luaTimeLimit) {
addConfigOption(REDIS_OPTIONS.AOF_LOAD_TRUNCATED, luaTimeLimit);
return this;
}
public RedisRunner clusterEnabled(boolean clusterEnabled) {
addConfigOption(REDIS_OPTIONS.CLUSTER_ENABLED, convertBoolean(clusterEnabled));
return this;
}
public RedisRunner clusterConfigFile(String clusterConfigFile) {
addConfigOption(REDIS_OPTIONS.CLUSTER_CONFIG_FILE, clusterConfigFile);
this.clusterFile = clusterConfigFile;
return this;
}
public RedisRunner clusterNodeTimeout(long clusterNodeTimeout) {
addConfigOption(REDIS_OPTIONS.CLUSTER_NODE_TIMEOUT, clusterNodeTimeout);
return this;
}
public RedisRunner clusterSlaveValidityFactor(long clusterSlaveValidityFactor) {
addConfigOption(REDIS_OPTIONS.CLUSTER_SLAVE_VALIDITY_FACTOR, clusterSlaveValidityFactor);
return this;
}
public RedisRunner clusterMigrationBarrier(long clusterMigrationBarrier) {
addConfigOption(REDIS_OPTIONS.CLUSTER_MIGRATION_BARRIER, clusterMigrationBarrier);
return this;
}
public RedisRunner clusterRequireFullCoverage(boolean clusterRequireFullCoverage) {
addConfigOption(REDIS_OPTIONS.CLUSTER_REQUIRE_FULL_COVERAGE, convertBoolean(clusterRequireFullCoverage));
return this;
}
public RedisRunner slowlogLogSlowerThan(long slowlogLogSlowerThan) {
addConfigOption(REDIS_OPTIONS.SLOWLOG_LOG_SLOWER_THAN, slowlogLogSlowerThan);
return this;
}
public RedisRunner slowlogMaxLen(long slowlogMaxLen) {
addConfigOption(REDIS_OPTIONS.SLOWLOG_MAX_LEN, slowlogMaxLen);
return this;
}
public RedisRunner latencyMonitorThreshold(long latencyMonitorThreshold) {
addConfigOption(REDIS_OPTIONS.LATENCY_MONITOR_THRESHOLD, latencyMonitorThreshold);
return this;
}
public RedisRunner notifyKeyspaceEvents(KEYSPACE_EVENTS_OPTIONS... notifyKeyspaceEvents) {
String existing = this.options.getOrDefault(REDIS_OPTIONS.NOTIFY_KEYSPACE_EVENTS, "");
String events = Arrays.stream(notifyKeyspaceEvents)
.collect(StringBuilder::new, StringBuilder::append, StringBuilder::append).toString();
addConfigOption(REDIS_OPTIONS.NOTIFY_KEYSPACE_EVENTS,
existing.contains(events)
? existing
: (existing + events));
return this;
}
public RedisRunner hashMaxZiplistEntries(long hashMaxZiplistEntries) {
addConfigOption(REDIS_OPTIONS.HASH_MAX_ZIPLIST_ENTRIES, hashMaxZiplistEntries);
return this;
}
public RedisRunner hashMaxZiplistValue(long hashMaxZiplistValue) {
addConfigOption(REDIS_OPTIONS.HASH_MAX_ZIPLIST_VALUE, hashMaxZiplistValue);
return this;
}
public RedisRunner listMaxZiplistEntries(long listMaxZiplistEntries) {
addConfigOption(REDIS_OPTIONS.LIST_MAX_ZIPLIST_ENTRIES, listMaxZiplistEntries);
return this;
}
public RedisRunner listMaxZiplistValue(long listMaxZiplistValue) {
addConfigOption(REDIS_OPTIONS.LIST_MAX_ZIPLIST_VALUE, listMaxZiplistValue);
return this;
}
public RedisRunner setMaxIntsetEntries(long setMaxIntsetEntries) {
addConfigOption(REDIS_OPTIONS.SET_MAX_INTSET_ENTRIES, setMaxIntsetEntries);
return this;
}
public RedisRunner zsetMaxZiplistEntries(long zsetMaxZiplistEntries) {
addConfigOption(REDIS_OPTIONS.ZSET_MAX_ZIPLIST_ENTRIES, zsetMaxZiplistEntries);
return this;
}
public RedisRunner zsetMaxZiplistValue(long zsetMaxZiplistValue) {
addConfigOption(REDIS_OPTIONS.ZSET_MAX_ZIPLIST_VALUE, zsetMaxZiplistValue);
return this;
}
public RedisRunner hllSparseMaxBytes(long hllSparseMaxBytes) {
addConfigOption(REDIS_OPTIONS.HLL_SPARSE_MAX_BYTES, hllSparseMaxBytes);
return this;
}
public RedisRunner activerehashing(boolean activerehashing) {
addConfigOption(REDIS_OPTIONS.ACTIVEREHASHING, convertBoolean(activerehashing));
return this;
}
public RedisRunner clientOutputBufferLimit$Normal(String hardLimit, String softLimit, long softSeconds) {
addConfigOption(REDIS_OPTIONS.CLIENT_OUTPUT_BUFFER_LIMIT$NORMAL, hardLimit, softLimit, softSeconds);
return this;
}
public RedisRunner clientOutputBufferLimit$Slave(String hardLimit, String softLimit, long softSeconds) {
addConfigOption(REDIS_OPTIONS.CLIENT_OUTPUT_BUFFER_LIMIT$SLAVE, hardLimit, softLimit, softSeconds);
return this;
}
public RedisRunner clientOutputBufferLimit$Pubsub(String hardLimit, String softLimit, long softSeconds) {
addConfigOption(REDIS_OPTIONS.CLIENT_OUTPUT_BUFFER_LIMIT$PUBSUB, hardLimit, softLimit, softSeconds);
return this;
}
public RedisRunner hz(int hz) {
addConfigOption(REDIS_OPTIONS.HZ, hz);
return this;
}
public RedisRunner aofRewriteIncrementalFsync(boolean aofRewriteIncrementalFsync) {
addConfigOption(REDIS_OPTIONS.AOF_REWRITE_INCREMENTAL_FSYNC, convertBoolean(aofRewriteIncrementalFsync));
return this;
}
public RedisRunner protectedMode(boolean protectedMode) {
addConfigOption(REDIS_OPTIONS.PROTECTED_MODE, convertBoolean(protectedMode));
return this;
}
public RedisRunner sentinel() {
sentinelFile = "sentinel_conf_" + UUID.randomUUID() + ".conf";
return this;
}
public RedisRunner sentinelAnnounceIP(String sentinelAnnounceIP) {
addConfigOption(REDIS_OPTIONS.SENTINEL$ANNOUNCE_IP, sentinelAnnounceIP);
return this;
}
public RedisRunner sentinelAnnouncePort(int sentinelAnnouncePort) {
addConfigOption(REDIS_OPTIONS.SENTINEL$ANNOUNCE_PORT, sentinelAnnouncePort);
return this;
}
public RedisRunner sentinelMonitor(String masterName, String ip, int port, int quorum) {
addConfigOption(REDIS_OPTIONS.SENTINEL$MONITOR, masterName, ip, port, quorum);
return this;
}
public RedisRunner sentinelAuthPass(String masterName, String password) {
addConfigOption(REDIS_OPTIONS.SENTINEL$AUTH_PASS, masterName, password);
return this;
}
public RedisRunner sentinelDownAfterMilliseconds(String masterName, long downAfterMilliseconds) {
addConfigOption(REDIS_OPTIONS.SENTINEL$DOWN_AFTER_MILLISECONDS, masterName, downAfterMilliseconds);
return this;
}
public RedisRunner sentinelParallelSyncs(String masterName, int numSlaves) {
addConfigOption(REDIS_OPTIONS.SENTINEL$PARALLEL_SYNCS, masterName, numSlaves);
return this;
}
public RedisRunner sentinelFailoverTimeout(String masterName, long failoverTimeout) {
addConfigOption(REDIS_OPTIONS.SENTINEL$FAILOVER_TIMEOUT, masterName, failoverTimeout);
return this;
}
public RedisRunner sentinelNotificationScript(String masterName, String scriptPath) {
addConfigOption(REDIS_OPTIONS.SENTINEL$NOTIFICATION_SCRIPT, masterName, scriptPath);
return this;
}
public RedisRunner sentinelClientReconfigScript(String masterName, String scriptPath) {
addConfigOption(REDIS_OPTIONS.SENTINEL$CLIENT_RECONFIG_SCRIPT, masterName, scriptPath);
return this;
}
public boolean isSentinel() {
return this.sentinelFile != null;
}
public boolean isCluster() {
return this.clusterFile != null;
}
public boolean isRandomDir() {
return this.randomDir;
}
public boolean isNosave() {
return this.nosave;
}
public String defaultDir() {
return this.defaultDir;
}
public String dir() {
return isRandomDir() ? defaultDir() : this.path;
}
public String getInitialBindAddr() {
return bindAddr.size() > 0 ? bindAddr.get(0) : "localhost";
}
public boolean deleteDBfileDir() {
File f = new File(defaultDir);
if (f.exists()) {
System.out.println("REDIS RUNNER: Deleting directory " + f.getAbsolutePath());
return f.delete();
}
return false;
}
public boolean deleteSentinelFile() {
File f = new File(defaultDir + File.separator + sentinelFile);
if (f.exists()) {
System.out.println("REDIS RUNNER: Deleting sentinel config file " + f.getAbsolutePath());
return f.delete();
}
return false;
}
public boolean deleteClusterFile() {
File f = new File(clusterFile);
if (f.exists() && isRandomDir()) {
System.out.println("REDIS RUNNER: Deleting cluster config file " + f.getAbsolutePath());
return f.delete();
}
return false;
}
private void makeRandomDefaultDir() {
File f = new File(RedissonRuntimeEnvironment.tempDir + File.separator + UUID.randomUUID());
if (f.exists()) {
makeRandomDefaultDir();
} else {
System.out.println("REDIS RUNNER: Making directory " + f.getAbsolutePath());
f.mkdirs();
this.defaultDir = f.getAbsolutePath();
if (RedissonRuntimeEnvironment.isWindows) {
defaultDir = defaultDir.replace("\\", "\\\\");
}
}
}
public static final | KEYSPACE_EVENTS_OPTIONS |
java | apache__camel | components/camel-braintree/src/test/java/org/apache/camel/component/braintree/AbstractBraintreeTestSupport.java | {
"start": 1908,
"end": 7499
} | class ____ extends CamelTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(AbstractBraintreeTestSupport.class);
private static final String TEST_OPTIONS_PROPERTIES = "/test-options.properties";
private AuthenticationType authenticationType;
private BraintreeGateway gateway;
protected AbstractBraintreeTestSupport() {
this.gateway = null;
}
@Override
protected CamelContext createCamelContext() throws Exception {
final CamelContext context = super.createCamelContext();
// add BraintreeComponent to Camel context
final BraintreeComponent component = new BraintreeComponent(context);
component.setConfiguration(buildBraintreeConfiguration(context));
context.addComponent("braintree", component);
return context;
}
protected void addOptionIfMissing(Map<String, Object> options, String name, String envName) {
if (!options.containsKey(name)) {
String value = System.getenv(envName);
if (ObjectHelper.isNotEmpty(value)) {
options.put(name, value);
}
}
}
protected BraintreeConfiguration buildBraintreeConfiguration(CamelContext context) throws Exception {
final Properties properties = TestSupport.loadExternalProperties(getClass(), TEST_OPTIONS_PROPERTIES);
Map<String, Object> options = new HashMap<>();
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
options.put(entry.getKey().toString(), entry.getValue());
}
AuthenticationType configurationType = getAuthenticationType();
LOG.info(String.format("Test using %s configuration profile", configurationType));
switch (configurationType) {
case PUBLIC_PRIVATE_KEYS:
addOptionIfMissing(options, "environment", "CAMEL_BRAINTREE_ENVIRONMENT");
addOptionIfMissing(options, "merchantId", "CAMEL_BRAINTREE_MERCHANT_ID");
addOptionIfMissing(options, "publicKey", "CAMEL_BRAINTREE_PUBLIC_KEY");
addOptionIfMissing(options, "privateKey", "CAMEL_BRAINTREE_PRIVATE_KEY");
options.remove("accessToken");
options.remove("clientId");
break;
case ACCESS_TOKEN:
addOptionIfMissing(options, "accessToken", "CAMEL_BRAINTREE_ACCESS_TOKEN");
options.remove("environment");
options.remove("merchantId");
options.remove("publicKey");
options.remove("privateKey");
break;
default:
throw new IllegalArgumentException("Unsupported configuration type");
}
final BraintreeConfiguration configuration = new BraintreeConfiguration();
configuration.setHttpLogLevel(BraintreeLogHandler.DEFAULT_LOGGER_LEVEL.getName());
configuration.setHttpLogName(BraintreeLogHandler.DEFAULT_LOGGER_NAME);
PropertyBindingSupport.bindProperties(context, configuration, options);
return configuration;
}
protected AuthenticationType getAuthenticationType() {
if (authenticationType == null) {
authenticationType = parseAuthenticationType();
}
return authenticationType;
}
protected boolean checkAuthenticationType(AuthenticationType authenticationType) {
return getAuthenticationType().equals(authenticationType);
}
private AuthenticationType parseAuthenticationType() {
String authenticationTypeString = System.getProperty("braintreeAuthenticationType");
if (authenticationTypeString != null) {
AuthenticationType authenticationType = AuthenticationType.valueOf(authenticationTypeString);
if (authenticationType != null) {
return authenticationType;
}
}
return AuthenticationType.PUBLIC_PRIVATE_KEYS;
}
@SuppressWarnings("unchecked")
protected <T> T requestBodyAndHeaders(String endpointUri, Object body, Map<String, Object> headers)
throws CamelExecutionException {
return (T) template().requestBodyAndHeaders(endpointUri, body, headers);
}
protected <T> T requestBodyAndHeaders(String endpointUri, Object body, Map<String, Object> headers, Class<T> type)
throws CamelExecutionException {
return template().requestBodyAndHeaders(endpointUri, body, headers, type);
}
@SuppressWarnings("unchecked")
protected <T> T requestBody(String endpoint, Object body) throws CamelExecutionException {
return (T) template().requestBody(endpoint, body);
}
protected <T> T requestBody(String endpoint, Object body, Class<T> type) throws CamelExecutionException {
return template().requestBody(endpoint, body, type);
}
protected static BraintreeApiName getApiName(Class<? extends ApiMethod> apiMethod) {
return BraintreeApiCollection.getCollection().getApiName(apiMethod);
}
protected static String getApiNameAsString(Class<? extends ApiMethod> apiMethod) {
return getApiName(apiMethod).getName();
}
protected final BraintreeComponent getBraintreeComponent() {
return (BraintreeComponent) context().getComponent("braintree");
}
protected final synchronized BraintreeGateway getGateway() {
if (gateway == null) {
gateway = getBraintreeComponent().getConfiguration().newBraintreeGateway();
}
return gateway;
}
protected final | AbstractBraintreeTestSupport |
java | quarkusio__quarkus | independent-projects/arc/runtime/src/main/java/io/quarkus/arc/impl/AbstractInstanceHandle.java | {
"start": 447,
"end": 3000
} | class ____<T> implements InstanceHandle<T> {
@SuppressWarnings("rawtypes")
private static final AtomicIntegerFieldUpdater<AbstractInstanceHandle> DESTROYED_UPDATER = AtomicIntegerFieldUpdater
.newUpdater(AbstractInstanceHandle.class, "destroyed");
protected final InjectableBean<T> bean;
private final CreationalContext<T> creationalContext;
private final CreationalContext<?> parentCreationalContext;
private final Consumer<T> destroyLogic;
// values: 0="not destroyed", 1="destroyed"
private volatile int destroyed;
AbstractInstanceHandle(InjectableBean<T> bean, CreationalContext<T> creationalContext,
CreationalContext<?> parentCreationalContext, Consumer<T> destroyLogic) {
this.bean = bean;
this.creationalContext = creationalContext;
this.parentCreationalContext = parentCreationalContext;
this.destroyLogic = destroyLogic;
}
@Override
public T get() {
if (destroyed != 0) {
throw new IllegalStateException("Instance already destroyed");
}
return instanceInternal();
}
@Override
public InjectableBean<T> getBean() {
return bean;
}
protected abstract boolean isInstanceCreated();
protected abstract T instanceInternal();
@Override
public void destroy() {
if (isInstanceCreated() && DESTROYED_UPDATER.compareAndSet(this, 0, 1)) {
if (destroyLogic != null) {
destroyLogic.accept(instanceInternal());
} else if (bean != null) {
if (bean.getScope().equals(Dependent.class)) {
destroyInternal();
} else {
InjectableContext context = Arc.requireContainer().getActiveContext(bean.getScope());
if (context == null) {
throw new ContextNotActiveException(
"Cannot destroy instance of " + bean + " - no active context found for: " + bean.getScope());
}
context.destroy(bean);
}
}
}
}
protected void destroyInternal() {
if (parentCreationalContext != null) {
parentCreationalContext.release();
} else {
bean.destroy(instanceInternal(), creationalContext);
}
}
@Override
public String toString() {
return getClass().getSimpleName() + " [bean=" + bean + ", destroyed=" + (destroyed != 0) + "]";
}
}
| AbstractInstanceHandle |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/clickhouse/ast/ClickhouseColumnCodec.java | {
"start": 227,
"end": 1078
} | class ____ extends ClickhouseColumnConstraint {
private SQLExpr expr;
public ClickhouseColumnCodec() {
super();
}
public SQLExpr getExpr() {
return expr;
}
public void setExpr(SQLExpr expr) {
this.expr = expr;
}
@Override
protected void accept0(SQLASTVisitor v) {
if (v instanceof CKASTVisitor) {
CKASTVisitor vv = (CKASTVisitor) v;
if (vv.visit(this)) {
acceptChild(vv, expr);
}
vv.endVisit(this);
}
}
@Override
public ClickhouseColumnCodec clone() {
ClickhouseColumnCodec clickhouseColumnCodec = new ClickhouseColumnCodec();
super.cloneTo(clickhouseColumnCodec);
clickhouseColumnCodec.setExpr(expr.clone());
return clickhouseColumnCodec;
}
}
| ClickhouseColumnCodec |
java | apache__camel | components/camel-wal/src/main/java/org/apache/camel/component/wal/TransactionLog.java | {
"start": 1749,
"end": 2726
} | class ____ {
private final int index;
private final int layer;
private final boolean isRollingOver;
public LayerInfo(int index, int layer, boolean isRollingOver) {
this.index = index;
this.layer = layer;
this.isRollingOver = isRollingOver;
}
public int getIndex() {
return index;
}
public int getLayer() {
return layer;
}
public boolean isRollingOver() {
return isRollingOver;
}
@Override
public String toString() {
return "LayerInfo{" +
"index=" + index +
", layer=" + layer +
", isRollingOver=" + isRollingOver +
'}';
}
}
/**
* A container for an in-memory entry that can be used to determine the layer where the record is as well as obtain
* it's entry.
*/
static | LayerInfo |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/dataset/BigDataSetManualTest.java | {
"start": 1175,
"end": 2130
} | class ____ extends ContextTestSupport {
protected final SimpleDataSet dataSet = new SimpleDataSet(20000);
@Test
public void testDataSet() throws Exception {
// data set will itself set its assertions so we should just
// assert that all mocks is ok
MockEndpoint.assertIsSatisfied(context, 30, TimeUnit.SECONDS);
}
@Override
protected Registry createCamelRegistry() throws Exception {
Registry answer = super.createCamelRegistry();
answer.bind("foo", dataSet);
return answer;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// start this first to make sure the "direct:foo" consumer is
// ready
from("direct:foo").to("dataset:foo");
from("dataset:foo").to("direct:foo");
}
};
}
}
| BigDataSetManualTest |
java | apache__kafka | storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java | {
"start": 3794,
"end": 4292
} | class ____ of <code>RemoteStorageManager</code> implementation.";
public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_PROP = "remote.log.storage.manager.class.path";
public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_DOC = "Class path of the <code>RemoteStorageManager</code> implementation. " +
"If specified, the RemoteStorageManager implementation and its dependent libraries will be loaded by a dedicated " +
"classloader which searches this | name |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/spi/VerticleFactory.java | {
"start": 2069,
"end": 2686
} | class ____
* @param promise the promise to complete with the result
* @deprecated deprecated, instead implement {@link #createVerticle2(String, ClassLoader, Promise)}
*/
@Deprecated
default void createVerticle(String verticleName, ClassLoader classLoader, Promise<Callable<Verticle>> promise) {
promise.fail("Should not be called, now deploys deployable");
}
/**
* Create a verticle instance. If this method is likely to be slow then make sure it is run on a
* worker thread by {@link Vertx#executeBlocking}.
*
* @param verticleName The verticle name
* @param classLoader The | loader |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/config/DualCamelContextEndpointOutsideTest.java | {
"start": 1337,
"end": 2664
} | class ____ extends SpringTestSupport {
@Override
protected AbstractXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/spring/config/DualCamelContextEndpointOutsideTest.xml");
}
@Test
public void testDualCamelContextEndpoint() throws Exception {
CamelContext camelA = applicationContext.getBean("camel-A", CamelContext.class);
assertNotNull(camelA);
CamelContext camelB = applicationContext.getBean("camel-B", CamelContext.class);
assertNotNull(camelB);
MockEndpoint mockA = camelA.getEndpoint("mock:mock1", MockEndpoint.class);
mockA.expectedBodiesReceived("Hello A");
MockEndpoint mockB = camelB.getEndpoint("mock:mock2", MockEndpoint.class);
mockB.expectedBodiesReceived("Hello B");
ProducerTemplate producer1 = camelA.createProducerTemplate();
producer1.sendBody("direct:start1", "Hello A");
ProducerTemplate producer2 = camelB.createProducerTemplate();
producer2.sendBody("direct:start2", "Hello B");
// make sure we properly stop the services we created
ServiceHelper.stopService(producer1, producer2);
mockA.assertIsSatisfied();
mockB.assertIsSatisfied();
}
}
| DualCamelContextEndpointOutsideTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java | {
"start": 1125,
"end": 6506
} | class ____ {
public static final Map<String, String> SASL_PRIVACY_PROPS =
WhitelistBasedResolver.getSaslProperties(new Configuration());
@Test
public void testFixedVariableAndLocalWhiteList() throws IOException {
String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
TestFileBasedIPList.createFileWithEntries ("fixedwhitelist.txt", fixedIps);
String[] variableIps = {"10.222.0.0/16", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("variablewhitelist.txt", variableIps);
Configuration conf = new Configuration();
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE ,
"fixedwhitelist.txt");
conf.setBoolean(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE,
true);
conf.setLong(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,
1);
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE ,
"variablewhitelist.txt");
WhitelistBasedResolver wqr = new WhitelistBasedResolver ();
wqr.setConf(conf);
assertEquals (wqr.getDefaultProperties(),
wqr.getServerProperties(InetAddress.getByName("10.119.103.112")));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.119.103.113"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.221.103.121"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.221.104.0"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.222.103.121"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.223.104.0"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.113.221.221"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.222"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("127.0.0.1"));
TestFileBasedIPList.removeFile("fixedwhitelist.txt");
TestFileBasedIPList.removeFile("variablewhitelist.txt");
}
/**
* Add a bunch of subnets and IPSs to the whitelist
* Check for inclusion in whitelist
* Check for exclusion from whitelist
*/
@Test
public void testFixedAndLocalWhiteList() throws IOException {
String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
TestFileBasedIPList.createFileWithEntries ("fixedwhitelist.txt", fixedIps);
String[] variableIps = {"10.222.0.0/16", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("variablewhitelist.txt", variableIps);
Configuration conf = new Configuration();
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE ,
"fixedwhitelist.txt");
conf.setBoolean(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE,
false);
conf.setLong(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,
100);
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE ,
"variablewhitelist.txt");
WhitelistBasedResolver wqr = new WhitelistBasedResolver();
wqr.setConf(conf);
assertEquals (wqr.getDefaultProperties(),
wqr.getServerProperties(InetAddress.getByName("10.119.103.112")));
assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.119.103.113"));
assertEquals(wqr.getDefaultProperties(), wqr.getServerProperties("10.221.103.121"));
assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.221.104.0"));
assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.222.103.121"));
assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.223.104.0"));
assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.221"));
assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.222"));
assertEquals(wqr.getDefaultProperties(), wqr.getServerProperties("127.0.0.1"));
TestFileBasedIPList.removeFile("fixedwhitelist.txt");
TestFileBasedIPList.removeFile("variablewhitelist.txt");
}
/**
* Add a bunch of subnets and IPSs to the whitelist
* Check for inclusion in whitelist with a null value
*/
@Test
public void testNullIPAddress() throws IOException {
String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
TestFileBasedIPList.createFileWithEntries ("fixedwhitelist.txt", fixedIps);
String[] variableIps = {"10.222.0.0/16", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("variablewhitelist.txt", variableIps);
Configuration conf = new Configuration();
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE ,
"fixedwhitelist.txt");
conf.setBoolean(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE,
true);
conf.setLong(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,
100);
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE ,
"variablewhitelist.txt");
WhitelistBasedResolver wqr = new WhitelistBasedResolver();
wqr.setConf(conf);
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties((InetAddress)null));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties((String)null));
TestFileBasedIPList.removeFile("fixedwhitelist.txt");
TestFileBasedIPList.removeFile("variablewhitelist.txt");
}
}
| TestWhitelistBasedResolver |
java | elastic__elasticsearch | build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/InMemoryJavaCompiler.java | {
"start": 5167,
"end": 5312
} | class ____ the given name and source code.
*
* @param className The name of the class
* @param sourceCode The source code for the | with |
java | quarkusio__quarkus | extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/client/ClientEndpointTest.java | {
"start": 6209,
"end": 7402
} | class ____ {
static final CountDownLatch OPEN_LATCH = new CountDownLatch(1);
static final AtomicReference<Connection> CONNECTION = new AtomicReference<>();
static final CountDownLatch PING_LATCH = new CountDownLatch(1);
static final CountDownLatch MESSAGE_LATCH = new CountDownLatch(2);
static final List<String> MESSAGES = new CopyOnWriteArrayList<>();
static final CountDownLatch CLOSED_LATCH = new CountDownLatch(1);
@OnOpen
void onOpen(Connection connection) {
CONNECTION.set(connection);
OPEN_LATCH.countDown();
}
@OnTextMessage
void onMessage(@PathParam String name, String message, WebSocketClientConnection connection) {
if (!name.equals(connection.pathParam("name"))) {
throw new IllegalArgumentException();
}
MESSAGES.add(name + ":" + message);
MESSAGE_LATCH.countDown();
}
@OnPingMessage
void onPing(Buffer message) {
PING_LATCH.countDown();
}
@OnClose
void close() {
CLOSED_LATCH.countDown();
}
}
}
| ClientEndpoint |
java | redisson__redisson | redisson/src/test/java/org/redisson/RedissonKeysTest.java | {
"start": 992,
"end": 18223
} | class ____ extends RedisDockerTest {
@Test
public void testNewObjectListener() {
testWithParams(redisson -> {
AtomicReference<String> ref = new AtomicReference<>();
int id = redisson.getKeys().addListener(new NewObjectListener() {
@Override
public void onNew(String name) {
ref.set(name);
}
});
redisson.getBucket("test").set("123");
Awaitility.waitAtMost(Duration.ofMillis(500)).untilAsserted(() -> {
assertThat(ref.get()).isEqualTo("test");
});
}, NOTIFY_KEYSPACE_EVENTS, "En");
}
@Test
public void testDeleteListener() {
testWithParams(redisson -> {
AtomicReference<String> ref = new AtomicReference<>();
int id = redisson.getKeys().addListener(new DeletedObjectListener() {
@Override
public void onDeleted(String name) {
ref.set(name);
}
});
redisson.getBucket("test").set("123");
redisson.getBucket("test").delete();
Awaitility.waitAtMost(Duration.ofMillis(500)).untilAsserted(() -> {
assertThat(ref.getAndSet(null)).isEqualTo("test");
});
redisson.getKeys().removeListener(id);
redisson.getBucket("test2").set("123");
redisson.getBucket("test2").delete();
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
assertThat(ref.get()).isNull();
}, NOTIFY_KEYSPACE_EVENTS, "Eg");
}
@Test
public void testFlushListener() throws InterruptedException {
Config c = redisson.getConfig();
c.setProtocol(Protocol.RESP3);
RedissonClient r = Redisson.create(c);
AtomicInteger counter = new AtomicInteger();
int id = r.getKeys().addListener((FlushListener) address -> {
assertThat(address).isNotNull();
counter.incrementAndGet();
});
int id2 = r.getKeys().addListener((FlushListener) address -> {
assertThat(address).isNotNull();
counter.incrementAndGet();
});
r.getKeys().flushall();
Awaitility.waitAtMost(Duration.ofMillis(500)).untilAsserted(() -> {
assertThat(counter.get()).isEqualTo(2);
});
r.getKeys().removeListener(id);
r.getKeys().removeListener(id2);
r.getKeys().flushall();
Thread.sleep(100);
assertThat(counter.get()).isEqualTo(2);
r.shutdown();
}
@Test
public void testReadKeys() {
for (int i = 0; i < 10; i++) {
redisson.getBucket("test" + i).set(i);
}
Iterable<String> keys = redisson.getKeys().getKeysWithLimit(3);
assertThat(keys).hasSize(3);
Iterable<String> keys2 = redisson.getKeys().getKeysWithLimit(20);
assertThat(keys2).hasSize(10);
}
@Test
public void testReadKeysPattern() {
for (int i = 0; i < 10; i++) {
redisson.getBucket("test" + i).set(i);
}
for (int i = 0; i < 5; i++) {
redisson.getBucket("red" + i).set(i);
}
Iterable<String> keys = redisson.getKeys().getKeysWithLimit("test*", 3);
assertThat(keys).hasSize(3);
Iterable<String> keys2 = redisson.getKeys().getKeysWithLimit("test*", 20);
assertThat(keys2).hasSize(10);
Iterable<String> keys3 = redisson.getKeys().getKeysWithLimit("red*", 3);
assertThat(keys3).hasSize(3);
Iterable<String> keys4 = redisson.getKeys().getKeysWithLimit("red*", 10);
assertThat(keys4).hasSize(5);
}
@Test
public void testTouch() {
redisson.getSet("test").add("1");
redisson.getSet("test10").add("1");
assertThat(redisson.getKeys().touch("test")).isEqualTo(1);
assertThat(redisson.getKeys().touch("test", "test2")).isEqualTo(1);
assertThat(redisson.getKeys().touch("test3", "test2")).isEqualTo(0);
assertThat(redisson.getKeys().touch("test3", "test10", "test")).isEqualTo(2);
}
@Test
public void testExistsInCluster() {
testInCluster(redisson -> {
int size = 10000;
List<String> list = new ArrayList<>();
for (int i = 0; i < size; i++) {
list.add("test" + i);
redisson.getBucket("test" + i).set(i);
}
assertThat(redisson.getKeys().countExists("test1", "test2", "test34", "test45", "asdfl;jasf")).isEqualTo(4);
long deletedSize = redisson.getKeys().delete(list.toArray(new String[list.size()]));
assertThat(deletedSize).isEqualTo(size);
});
}
@Test
public void testExists() {
redisson.getSet("test").add("1");
redisson.getSet("test10").add("1");
assertThat(redisson.getKeys().countExists("test")).isEqualTo(1);
assertThat(redisson.getKeys().countExists("test", "test2")).isEqualTo(1);
assertThat(redisson.getKeys().countExists("test3", "test2")).isEqualTo(0);
assertThat(redisson.getKeys().countExists("test3", "test10", "test")).isEqualTo(2);
}
@Test
public void testType() {
redisson.getSet("test").add("1");
assertThat(redisson.getKeys().getType("test")).isEqualTo(RType.SET);
assertThat(redisson.getKeys().getType("test1")).isNull();
}
@Test
public void testEmptyKeys() {
Iterable<String> keysIterator = redisson.getKeys().getKeys(KeysScanOptions.defaults().pattern("test*").chunkSize(10));
// Iterable<String> keysIterator = redisson.getKeys().getKeysByPattern("test*", 10);
assertThat(keysIterator.iterator().hasNext()).isFalse();
}
@Test
public void testKeysByPattern() {
testInCluster(redisson -> {
int size = 10000;
for (int i = 0; i < size; i++) {
redisson.getBucket("test" + i).set(i);
}
assertThat(redisson.getKeys().count()).isEqualTo(size);
Long noOfKeysDeleted = 0L;
int chunkSize = 20;
// Iterable<String> keysIterator = redisson.getKeys().getKeysByPattern("test*", chunkSize);
Iterable<String> keysIterator = redisson.getKeys().getKeys(KeysScanOptions.defaults().pattern("test*").chunkSize(chunkSize));
Set<String> keys = new HashSet<>();
for (String key : keysIterator) {
keys.add(key);
if (keys.size() % chunkSize == 0) {
long res = redisson.getKeys().delete(keys.toArray(new String[keys.size()]));
assertThat(res).isEqualTo(chunkSize);
noOfKeysDeleted += res;
keys.clear();
}
}
//Delete remaining keys
if (!keys.isEmpty()) {
noOfKeysDeleted += redisson.getKeys().delete(keys.toArray(new String[keys.size()]));
}
assertThat(noOfKeysDeleted).isEqualTo(size);
});
}
@Test
public void testKeysType() {
redisson.getBucket("test1").set("someValue");
redisson.getBucket("test2").set("someValue");
RStream<Object, Object> s = redisson.getStream("test12");
s.createGroup(StreamCreateGroupArgs.name("g").makeStream());
s.add(StreamAddArgs.entry("1", "2"));
Iterable<String> iter = redisson.getKeys().getKeys(KeysScanOptions.defaults().type(RType.OBJECT));
assertThat(iter).containsOnly("test1", "test2");
Iterable<String> iter2 = redisson.getKeys().getKeys(KeysScanOptions.defaults().type(RType.STREAM));
assertThat(iter2).containsOnly("test12");
}
@Test
public void testKeysIterablePattern() {
redisson.getBucket("test1").set("someValue");
redisson.getBucket("test2").set("someValue");
redisson.getBucket("test12").set("someValue");
Iterator<String> iterator = redisson.getKeys().getKeys(KeysScanOptions.defaults().pattern("test?")).iterator();
assertThat(iterator).toIterable().containsOnly("test1", "test2");
}
@Test
public void testKeysIterable() {
Set<String> keys = new HashSet<String>();
for (int i = 0; i < 115; i++) {
String key = "key" + Math.random();
RBucket<String> bucket = redisson.getBucket(key);
keys.add(key);
bucket.set("someValue");
}
Iterator<String> iterator = redisson.getKeys().getKeys().iterator();
for (; iterator.hasNext();) {
String key = iterator.next();
keys.remove(redisson.getConfig().useSingleServer().getNameMapper().map(key));
iterator.remove();
}
Assertions.assertEquals(0, keys.size());
Assertions.assertFalse(redisson.getKeys().getKeys().iterator().hasNext());
}
@Test
public void testKeysAsyncIterable() {
Set<String> keys = new HashSet<String>();
for (int i = 0; i < 115; i++) {
String key = "key" + Math.random();
RBucket<String> bucket = redisson.getBucket(key);
bucket.set("someValue");
}
AsyncIterator<String> iterator = redisson.getKeys().getKeysAsync();
CompletionStage<Void> f = iterateAll(iterator, keys);
f.toCompletableFuture().join();
assertThat(redisson.getKeys().count()).isEqualTo(keys.size());
}
@Test
public void testKeysAsyncIterablePattern() {
Set<String> keys = new HashSet<String>();
for (int i = 0; i < 115; i++) {
String key = "key" + Math.random();
RBucket<String> bucket = redisson.getBucket(key);
bucket.set("someValue");
}
int limit = 23;
AsyncIterator<String> iterator = redisson.getKeys().getKeysAsync(KeysScanOptions.defaults().limit(limit));
CompletionStage<Void> f = iterateAll(iterator, keys);
f.toCompletableFuture().join();
assertThat(limit).isEqualTo(keys.size());
}
public CompletionStage<Void> iterateAll(AsyncIterator<String> iterator, Set<String> keys) {
return iterator.hasNext().thenCompose(r -> {
if (r) {
return iterator.next().thenCompose(k -> {
keys.add(k);
return iterateAll(iterator, keys);
});
} else {
return CompletableFuture.completedFuture(null);
}
});
}
@Test
public void testRandomKey() {
RBucket<String> bucket = redisson.getBucket("test1");
bucket.set("someValue1");
RBucket<String> bucket2 = redisson.getBucket("test2");
bucket2.set("someValue2");
assertThat(redisson.getKeys().randomKey()).isIn("test1", "test2");
redisson.getKeys().delete("test1");
Assertions.assertEquals("test2", redisson.getKeys().randomKey());
redisson.getKeys().flushdb();
Assertions.assertNull(redisson.getKeys().randomKey());
}
@Test
public void testDeleteInCluster() {
testInCluster(redisson -> {
int size = 10000;
List<String> list = new ArrayList<>();
for (int i = 0; i < size; i++) {
list.add("test" + i);
redisson.getBucket("test" + i).set(i);
}
long deletedSize = redisson.getKeys().delete(list.toArray(new String[list.size()]));
assertThat(deletedSize).isEqualTo(size);
});
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testDeleteByPattern(boolean unlinkMode) {
RBucket<String> bucket = redisson.getBucket("test0");
bucket.set("someValue3");
assertThat(bucket.isExists()).isTrue();
RBucket<String> bucket2 = redisson.getBucket("test9");
bucket2.set("someValue4");
assertThat(bucket.isExists()).isTrue();
RMap<String, String> map = redisson.getMap("test2");
map.fastPut("1", "2");
assertThat(map.isExists()).isTrue();
RMap<String, String> map2 = redisson.getMap("test3");
map2.fastPut("1", "5");
assertThat(map2.isExists()).isTrue();
if(unlinkMode) {
assertThat(redisson.getKeys().unlinkByPattern("test?")).isEqualTo(4);
assertThat(redisson.getKeys().unlinkByPattern("test?")).isZero();
} else {
assertThat(redisson.getKeys().deleteByPattern("test?")).isEqualTo(4);
assertThat(redisson.getKeys().deleteByPattern("test?")).isZero();
}
assertThat(redisson.getKeys().count()).isZero();
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testDeleteByPatternBatch(boolean unlinkMode) {
RBucket<String> bucket = redisson.getBucket("test0");
bucket.set("someValue3");
assertThat(bucket.isExists()).isTrue();
RBucket<String> bucket2 = redisson.getBucket("test9");
bucket2.set("someValue4");
assertThat(bucket.isExists()).isTrue();
RMap<String, String> map = redisson.getMap("test2");
map.fastPut("1", "2");
assertThat(map.isExists()).isTrue();
RMap<String, String> map2 = redisson.getMap("test3");
map2.fastPut("1", "5");
assertThat(map2.isExists()).isTrue();
RBatch batch = redisson.createBatch();
if(unlinkMode) {
batch.getKeys().unlinkByPatternAsync("test?");
} else {
batch.getKeys().deleteByPatternAsync("test?");
}
BatchResult<?> r = batch.execute();
Assertions.assertEquals(4L, r.getResponses().get(0));
}
@Test
public void testFindKeys() {
RBucket<String> bucket = redisson.getBucket("test1");
bucket.set("someValue");
RMap<String, String> map = redisson.getMap("test2");
map.fastPut("1", "2");
Iterable<String> keys = redisson.getKeys().getKeys(KeysScanOptions.defaults().pattern("test?"));
// Iterable<String> keys = redisson.getKeys().getKeysByPattern("test?");
assertThat(keys).containsOnly("test1", "test2");
Iterable<String> keys2 = redisson.getKeys().getKeys(KeysScanOptions.defaults().pattern("test"));
// Iterable<String> keys2 = redisson.getKeys().getKeysByPattern("test");
assertThat(keys2).isEmpty();
}
@Test
public void testMassDelete() {
RBucket<String> bucket0 = redisson.getBucket("test0");
bucket0.set("someValue");
RBucket<String> bucket1 = redisson.getBucket("test1");
bucket1.set("someValue");
RBucket<String> bucket2 = redisson.getBucket("test2");
bucket2.set("someValue");
RBucket<String> bucket3 = redisson.getBucket("test3");
bucket3.set("someValue");
RBucket<String> bucket10 = redisson.getBucket("test10");
bucket10.set("someValue");
RBucket<String> bucket12 = redisson.getBucket("test12");
bucket12.set("someValue");
RMap<String, String> map = redisson.getMap("map2");
map.fastPut("1", "2");
Assertions.assertEquals(7, redisson.getKeys().delete("test0", "test1", "test2", "test3", "test10", "test12", "map2"));
Assertions.assertEquals(0, redisson.getKeys().delete("test0", "test1", "test2", "test3", "test10", "test12", "map2"));
}
@Test
public void testCount() {
Long s = redisson.getKeys().count();
assertThat(s).isEqualTo(0);
redisson.getBucket("test1").set(23);
s = redisson.getKeys().count();
assertThat(s).isEqualTo(1);
}
@Test
public void testMigrate(){
String password = "123456";
GenericContainer<?> redis = createRedis("--requirepass " + password);
redis.start();
Config config = createConfigWithPassword(redis, password);
RedissonClient r2 = Redisson.create(config);
List<String> keys = Arrays.asList("{testMigrate}key1", "{testMigrate}key2");
for (String key : keys) {
redisson.getBucket(key).set(key);
r2.getBucket(key).delete();
}
redisson.getKeys()
.migrate(MigrateArgs.keys(keys.toArray(new String[0]))
.host("host.docker.internal")
.port(redis.getFirstMappedPort())
.database(0)
.timeout(5000)
.password(password)
.mode(MigrateMode.COPY_AND_REPLACE));
for (String key : keys) {
assertThat(key.equals(r2.getBucket(key).get()));
}
}
protected static Config createConfigWithPassword(GenericContainer<?> container, String password) {
Config config = new Config();
config.setProtocol(protocol);
config.useSingleServer()
.setAddress("redis://127.0.0.1:" + container.getFirstMappedPort())
.setPassword(password)
;
return config;
}
}
| RedissonKeysTest |
java | elastic__elasticsearch | x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliErrorsIT.java | {
"start": 490,
"end": 882
} | class ____ extends ErrorsTestCase {
@Override
protected Settings restClientSettings() {
return RestSqlIT.securitySettings();
}
@Override
protected String getProtocol() {
return RestSqlIT.SSL_ENABLED ? "https" : "http";
}
@Override
protected SecurityConfig securityConfig() {
return CliSecurityIT.adminSecurityConfig();
}
}
| CliErrorsIT |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/junit4/statements/RunAfterTestExecutionCallbacks.java | {
"start": 1628,
"end": 3491
} | class ____ extends Statement {
private final Statement next;
private final Object testInstance;
private final Method testMethod;
private final TestContextManager testContextManager;
/**
* Construct a new {@code RunAfterTestExecutionCallbacks} statement.
* @param next the next {@code Statement} in the execution chain
* @param testInstance the current test instance (never {@code null})
* @param testMethod the test method which has just been executed on the
* test instance
* @param testContextManager the TestContextManager upon which to call
* {@code afterTestExecution()}
*/
public RunAfterTestExecutionCallbacks(Statement next, Object testInstance, Method testMethod,
TestContextManager testContextManager) {
this.next = next;
this.testInstance = testInstance;
this.testMethod = testMethod;
this.testContextManager = testContextManager;
}
/**
* Evaluate the next {@link Statement} in the execution chain (typically an
* instance of {@link RunBeforeTestExecutionCallbacks}), catching any exceptions
* thrown, and then invoke {@link TestContextManager#afterTestExecution} supplying
* the first caught exception (if any).
* <p>If the invocation of {@code afterTestExecution()} throws an exception, that
* exception will also be tracked. Multiple exceptions will be combined into a
* {@link MultipleFailureException}.
*/
@Override
public void evaluate() throws Throwable {
Throwable testException = null;
List<Throwable> errors = new ArrayList<>();
try {
this.next.evaluate();
}
catch (Throwable ex) {
testException = ex;
errors.add(ex);
}
try {
this.testContextManager.afterTestExecution(this.testInstance, this.testMethod, testException);
}
catch (Throwable ex) {
errors.add(ex);
}
MultipleFailureException.assertEmpty(errors);
}
}
| RunAfterTestExecutionCallbacks |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/InputSelection.java | {
"start": 7486,
"end": 9425
} | class ____ {
private long inputMask = 0;
/**
* Returns a {@code Builder} that uses the input mask of the specified {@code selection} as
* the initial mask.
*/
public static Builder from(InputSelection selection) {
Builder builder = new Builder();
builder.inputMask = selection.inputMask;
return builder;
}
/**
* Selects an input identified by the given {@code inputId}.
*
* @param inputId the input id numbered starting from 1 to 64, and `1` indicates the first
* input. Specially, `-1` indicates all inputs.
* @return a reference to this object.
*/
public Builder select(int inputId) {
if (inputId > 0 && inputId <= 64) {
inputMask |= 1L << (inputId - 1);
} else if (inputId == -1L) {
inputMask = -1L;
} else {
throw new IllegalArgumentException(
"The inputId must be in the range of 1 to 64, or be -1.");
}
return this;
}
/**
* Build normalized mask, if all inputs were manually selected, inputMask will be normalized
* to -1.
*/
public InputSelection build(int inputCount) {
long allSelectedMask = (1L << inputCount) - 1;
if (inputMask == allSelectedMask) {
inputMask = -1;
} else if (inputMask > allSelectedMask) {
throw new IllegalArgumentException(
String.format(
"inputMask [%d] selects more than expected number of inputs [%d]",
inputMask, inputCount));
}
return build();
}
public InputSelection build() {
return new InputSelection(inputMask);
}
}
}
| Builder |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/filter/factory/RewriteLocationResponseHeaderGatewayFilterFactory.java | {
"start": 4057,
"end": 7263
} | class ____
extends AbstractGatewayFilterFactory<RewriteLocationResponseHeaderGatewayFilterFactory.Config> {
private static final String STRIP_VERSION_KEY = "stripVersion";
private static final String LOCATION_HEADER_NAME_KEY = "locationHeaderName";
private static final String HOST_VALUE_KEY = "hostValue";
private static final String PROTOCOLS_KEY = "protocols";
private static final Pattern VERSIONED_PATH = Pattern.compile("^/v\\d+/.*");
private static final String DEFAULT_PROTOCOLS = "https?|ftps?";
private static final Pattern DEFAULT_HOST_PORT = compileHostPortPattern(DEFAULT_PROTOCOLS);
private static final Pattern DEFAULT_HOST_PORT_VERSION = compileHostPortVersionPattern(DEFAULT_PROTOCOLS);
public RewriteLocationResponseHeaderGatewayFilterFactory() {
super(Config.class);
}
private static Pattern compileHostPortPattern(String protocols) {
return Pattern.compile("(?<=^(?:" + protocols + ")://)[^:/]+(?::\\d+)?(?=/)");
}
private static Pattern compileHostPortVersionPattern(String protocols) {
return Pattern.compile("(?<=^(?:" + protocols + ")://)[^:/]+(?::\\d+)?(?:/v\\d+)?(?=/)");
}
@Override
public List<String> shortcutFieldOrder() {
return Arrays.asList(STRIP_VERSION_KEY, LOCATION_HEADER_NAME_KEY, HOST_VALUE_KEY, PROTOCOLS_KEY);
}
@Override
public GatewayFilter apply(Config config) {
return new GatewayFilter() {
@Override
public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {
return chain.filter(exchange).then(Mono.fromRunnable(() -> rewriteLocation(exchange, config)));
}
@Override
public String toString() {
// @formatter:off
return filterToStringCreator(
RewriteLocationResponseHeaderGatewayFilterFactory.this)
.append("stripVersion", config.stripVersion)
.append("locationHeaderName", config.locationHeaderName)
.append("hostValue", config.hostValue)
.append("protocols", config.protocols)
.toString();
// @formatter:on
}
};
}
void rewriteLocation(ServerWebExchange exchange, Config config) {
final String location = exchange.getResponse().getHeaders().getFirst(config.getLocationHeaderName());
final String host = config.getHostValue() != null ? config.getHostValue()
: exchange.getRequest().getHeaders().getFirst(HttpHeaders.HOST);
final String path = exchange.getRequest().getURI().getPath();
if (location != null && host != null) {
final String fixedLocation = fixedLocation(location, host, path, config.getStripVersion(),
config.getHostPortPattern(), config.getHostPortVersionPattern());
exchange.getResponse().getHeaders().set(config.getLocationHeaderName(), fixedLocation);
}
}
String fixedLocation(String location, String host, String path, StripVersion stripVersion, Pattern hostPortPattern,
Pattern hostPortVersionPattern) {
final boolean doStrip = StripVersion.ALWAYS_STRIP.equals(stripVersion)
|| (StripVersion.AS_IN_REQUEST.equals(stripVersion) && !VERSIONED_PATH.matcher(path).matches());
final Pattern pattern = doStrip ? hostPortVersionPattern : hostPortPattern;
return pattern.matcher(location).replaceFirst(host);
}
public | RewriteLocationResponseHeaderGatewayFilterFactory |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/TaskPool.java | {
"start": 4229,
"end": 4604
} | interface ____<I, E extends Exception> {
/**
* process a failure.
* @param item item the task is processing
* @param exception the exception which was raised.
* @throws E Exception of type E
*/
void run(I item, Exception exception) throws E;
}
/**
* Builder for task execution.
* @param <I> item type
*/
public static | FailureTask |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/ref/RefTest_for_huanxige.java | {
"start": 824,
"end": 2459
} | class ____ implements Serializable {
private Long id;
private Long processInsId;
private String name;
private String displayName;
private Integer status;
private String type;
private Boolean success;
private Boolean tail;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Long getProcessInsId() {
return processInsId;
}
public void setProcessInsId(Long processInsId) {
this.processInsId = processInsId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDisplayName() {
return displayName;
}
public void setDisplayName(String displayName) {
this.displayName = displayName;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public Boolean getSuccess() {
return success;
}
public void setSuccess(Boolean success) {
this.success = success;
}
public Boolean getTail() {
return tail;
}
public void setTail(Boolean tail) {
this.tail = tail;
}
}
}
| ProcessNodeInstanceDto |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/common/bytes/ReleasableBytesReferenceStreamInputTests.java | {
"start": 1201,
"end": 5770
} | class ____ extends AbstractStreamTests {
private final List<ReleasableBytesReference> opened = new ArrayList<>();
private final Set<Exception> openTraces = Collections.newSetFromMap(new IdentityHashMap<>());
@After
public void checkAllClosed() throws Exception {
// Decrement one time to simulate closing the netty buffer after the stream is captured
for (ReleasableBytesReference r : opened) {
r.decRef();
}
// Now that we've decremented, we expect all streams will have been closed
Iterator<Exception> iter = openTraces.iterator();
if (iter.hasNext()) {
throw new Exception("didn't close iterator - cause is opening location", iter.next());
}
for (ReleasableBytesReference r : opened) {
assertThat(r.hasReferences(), equalTo(false));
}
}
@Override
protected StreamInput getStreamInput(BytesReference bytesReference) throws IOException {
// Grab an exception at the opening location, so we can throw it if we don't close
Exception trace = new Exception();
openTraces.add(trace);
ReleasableBytesReference counted = new ReleasableBytesReference(bytesReference, () -> openTraces.remove(trace));
/*
* Grab a reference to the bytes ref we're using, so we can close it after the
* test to simulate the underlying netter butter closing after the test.
*/
opened.add(counted);
return counted.streamInput();
}
public void testBigIntArrayLivesAfterReleasableIsDecremented() throws IOException {
IntArray testData = BigArrays.NON_RECYCLING_INSTANCE.newIntArray(1, false);
testData.set(0, 1);
BytesStreamOutput out = new BytesStreamOutput();
testData.writeTo(out);
ReleasableBytesReference ref = wrapAsReleasable(out.bytes());
try (IntArray in = IntArray.readFrom(ref.streamInput())) {
ref.decRef();
assertThat(ref.hasReferences(), equalTo(true));
assertThat(in.size(), equalTo(testData.size()));
assertThat(in.get(0), equalTo(1));
}
assertThat(ref.hasReferences(), equalTo(false));
}
public void testBigDoubleArrayLivesAfterReleasableIsDecremented() throws IOException {
DoubleArray testData = BigArrays.NON_RECYCLING_INSTANCE.newDoubleArray(1, false);
testData.set(0, 1);
BytesStreamOutput out = new BytesStreamOutput();
testData.writeTo(out);
ReleasableBytesReference ref = wrapAsReleasable(out.bytes());
try (DoubleArray in = DoubleArray.readFrom(ref.streamInput())) {
ref.decRef();
assertThat(ref.hasReferences(), equalTo(true));
assertThat(in.size(), equalTo(testData.size()));
assertThat(in.get(0), equalTo(1.0));
}
assertThat(ref.hasReferences(), equalTo(false));
}
public void testBigLongArrayLivesAfterReleasableIsDecremented() throws IOException {
LongArray testData = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(1, false);
testData.set(0, 1);
BytesStreamOutput out = new BytesStreamOutput();
testData.writeTo(out);
ReleasableBytesReference ref = wrapAsReleasable(out.bytes());
try (LongArray in = LongArray.readFrom(ref.streamInput())) {
ref.decRef();
assertThat(ref.hasReferences(), equalTo(true));
assertThat(in.size(), equalTo(testData.size()));
assertThat(in.get(0), equalTo(1L));
}
assertThat(ref.hasReferences(), equalTo(false));
}
public void testBigByteArrayLivesAfterReleasableIsDecremented() throws IOException {
ByteArray testData = BigArrays.NON_RECYCLING_INSTANCE.newByteArray(1, false);
testData.set(0L, (byte) 1);
BytesStreamOutput out = new BytesStreamOutput();
testData.writeTo(out);
ReleasableBytesReference ref = wrapAsReleasable(out.bytes());
try (ByteArray in = ByteArray.readFrom(ref.streamInput())) {
ref.decRef();
assertThat(ref.hasReferences(), equalTo(true));
assertThat(in.size(), equalTo(testData.size()));
assertThat(in.get(0), equalTo((byte) 1));
}
assertThat(ref.hasReferences(), equalTo(false));
}
public static ReleasableBytesReference wrapAsReleasable(BytesReference bytesReference) {
return new ReleasableBytesReference(bytesReference, () -> {});
}
}
| ReleasableBytesReferenceStreamInputTests |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/bugpatterns/BugChecker.java | {
"start": 14501,
"end": 14629
} | interface ____ extends Suppressible {
Description matchClass(ClassTree tree, VisitorState state);
}
public | ClassTreeMatcher |
java | quarkusio__quarkus | tcks/microprofile-config/src/test/java/io/quarkus/tck/config/ConfigApplicationArchiveProcessor.java | {
"start": 661,
"end": 857
} | class ____ a
* library jar too. As a result, we get ambiguous dependency when trying to instantiate the test class. Unfortunately, we can't
* use an ApplicationArchiveProcessor to remove the test | to |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/InternalClusterTestPlugin.java | {
"start": 947,
"end": 2029
} | class ____ implements Plugin<Project> {
public static final String SOURCE_SET_NAME = "internalClusterTest";
@Override
public void apply(Project project) {
project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class);
var buildParams = loadBuildParams(project).get();
TaskProvider<Test> internalClusterTest = GradleUtils.addTestSourceSet(project, SOURCE_SET_NAME);
internalClusterTest.configure(task -> {
// Set GC options to mirror defaults in jvm.options
if (buildParams.getRuntimeJavaVersion().get().compareTo(JavaVersion.VERSION_14) < 0) {
task.jvmArgs("-XX:+UseConcMarkSweepGC", "-XX:CMSInitiatingOccupancyFraction=75", "-XX:+UseCMSInitiatingOccupancyOnly");
} else {
task.jvmArgs("-XX:+UseG1GC");
}
});
// TODO: fix usages of IT tests depending on Tests methods so this extension is not necessary
GradleUtils.extendSourceSet(project, SourceSet.TEST_SOURCE_SET_NAME, SOURCE_SET_NAME);
}
}
| InternalClusterTestPlugin |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/reference/ReferenceBeanBuilder.java | {
"start": 1650,
"end": 2386
} | class ____ {
*
* @Bean
* public ReferenceBean<HelloService> helloService() {
* return new ReferenceBeanBuilder()
* .setGroup("demo")
* .build();
* }
*
* @Bean
* public ReferenceBean<HelloService> helloService2() {
* return new ReferenceBean();
* }
*
* @Bean
* public ReferenceBean<GenericService> genericHelloService() {
* return new ReferenceBeanBuilder()
* .setGroup("demo")
* .setInterface(HelloService.class)
* .build();
* }
*
* }
* </pre>
*
* Step 2: Inject ReferenceBean by @Autowired
* <pre class="code">
* public | ReferenceConfiguration |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/config/AbstractConfigTest.java | {
"start": 35797,
"end": 38253
} | class ____ extends AbstractConfig {
static final Class<?> DEFAULT_CLASS = FakeMetricsReporter.class;
static final Class<?> VISIBLE_CLASS = JmxReporter.class;
static final Class<?> RESTRICTED_CLASS = ConfiguredFakeMetricsReporter.class;
private static final ConfigDef CONFIG;
static {
CONFIG = new ConfigDef().define("class.prop", Type.CLASS, DEFAULT_CLASS, Importance.HIGH, "docs")
.define("list.prop", Type.LIST, Collections.singletonList(DEFAULT_CLASS), Importance.HIGH, "docs");
}
public ClassTestConfig() {
super(CONFIG, new Properties());
}
public ClassTestConfig(Object classPropOverride, Object listPropOverride) {
super(CONFIG, overrideProps(classPropOverride, listPropOverride));
}
void checkInstances(Class<?> expectedClassPropClass, Class<?>... expectedListPropClasses) {
assertEquals(expectedClassPropClass, getConfiguredInstance("class.prop", MetricsReporter.class).getClass());
List<?> list = getConfiguredInstances("list.prop", MetricsReporter.class);
for (int i = 0; i < list.size(); i++)
assertEquals(expectedListPropClasses[i], list.get(i).getClass());
}
static void testOverrides() {
ClassTestConfig testConfig1 = new ClassTestConfig(RESTRICTED_CLASS, Arrays.asList(VISIBLE_CLASS, RESTRICTED_CLASS));
testConfig1.checkInstances(RESTRICTED_CLASS, VISIBLE_CLASS, RESTRICTED_CLASS);
ClassTestConfig testConfig2 = new ClassTestConfig(RESTRICTED_CLASS.getName(), Arrays.asList(VISIBLE_CLASS.getName(), RESTRICTED_CLASS.getName()));
testConfig2.checkInstances(RESTRICTED_CLASS, VISIBLE_CLASS, RESTRICTED_CLASS);
ClassTestConfig testConfig3 = new ClassTestConfig(RESTRICTED_CLASS.getName(), VISIBLE_CLASS.getName() + "," + RESTRICTED_CLASS.getName());
testConfig3.checkInstances(RESTRICTED_CLASS, VISIBLE_CLASS, RESTRICTED_CLASS);
}
private static Map<String, Object> overrideProps(Object classProp, Object listProp) {
Map<String, Object> props = new HashMap<>();
if (classProp != null)
props.put("class.prop", classProp);
if (listProp != null)
props.put("list.prop", listProp);
return props;
}
}
private static | ClassTestConfig |
java | quarkusio__quarkus | extensions/devui/deployment-spi/src/main/java/io/quarkus/devui/spi/page/FooterPageBuildItem.java | {
"start": 92,
"end": 446
} | class ____ extends AbstractPageBuildItem {
public FooterPageBuildItem() {
super();
}
public FooterPageBuildItem(PageBuilder... pageBuilder) {
super(pageBuilder);
}
public FooterPageBuildItem(String customIdentifier, PageBuilder... pageBuilder) {
super(customIdentifier, pageBuilder);
}
}
| FooterPageBuildItem |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementFactory.java | {
"start": 1879,
"end": 2363
} | class ____ the specified name could be found.
*/
public static PlacementRule getPlacementRule(String ruleStr,
Configuration conf)
throws ClassNotFoundException {
Class<? extends PlacementRule> ruleClass = Class.forName(ruleStr)
.asSubclass(PlacementRule.class);
LOG.info("Using PlacementRule implementation - " + ruleClass);
return ReflectionUtils.newInstance(ruleClass, conf);
}
/**
* Create a new {@link PlacementRule} based on the rule | with |
java | quarkusio__quarkus | extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/telemetry/MetricsAsserter.java | {
"start": 1716,
"end": 11234
} | enum ____ {
INBOUND,
OUTBOUND
}
int serverSentCount;
int serverReceivedCount;
int serverReceivedCountBytes;
int serverSentCountBytes;
int clientReceivedCount;
int clientSentCount;
int clientSentCountBytes;
int clientReceivedCountBytes;
int clientErrorCount;
int serverErrorCount;
int clientConnectionOpenedCount;
int serverConnectionOpenedCount;
void assertTotalMetricsForAllPaths(int serverErrorsDelta, int clientErrorsDelta, int serverReceivedCountDelta,
int serverReceivedCountBytesDelta, int serverSentCountBytesDelta, int clientSentCountDelta,
int clientSentCountBytesDelta, int clientReceivedCountBytesDelta, int serverSentCountDelta,
int clientReceivedCountDelta) {
serverReceivedCount += serverReceivedCountDelta;
serverReceivedCountBytes += serverReceivedCountBytesDelta;
serverSentCount += serverSentCountDelta;
serverSentCountBytes += serverSentCountBytesDelta;
clientSentCount += clientSentCountDelta;
clientSentCountBytes += clientSentCountBytesDelta;
clientReceivedCount += clientReceivedCountDelta;
clientReceivedCountBytes += clientReceivedCountBytesDelta;
clientErrorCount += clientErrorsDelta;
serverErrorCount += serverErrorsDelta;
Awaitility.await().atMost(Duration.ofSeconds(12)).untilAsserted(() -> getMetrics()
.body(assertServerConnectionOpenedTotal(serverConnectionOpenedCount))
.body(assertClientConnectionOpenedTotal(clientConnectionOpenedCount))
.body(assertServerErrorTotal(serverErrorCount))
.body(assertClientErrorTotal(clientErrorCount))
.body(assertClientMessagesCountReceived(clientReceivedCount))
.body(assertClientMessagesCountBytesSent(clientSentCountBytes))
.body(assertClientMessagesCountBytesReceived(clientReceivedCountBytes))
.body(assertClientMessagesCountSent(clientSentCount))
.body(assertServerMessagesCountBytesReceived(serverReceivedCountBytes))
.body(assertServerMessagesCountBytesSent(serverSentCountBytes))
.body(assertServerMessagesCountReceived(serverReceivedCount))
.body(assertServerMessagesCountSent(serverSentCount)));
}
static Matcher<String> assertClientMessagesCountBytesSent(String path, int clientSentCountBytes) {
return assertTotal(CLIENT_BYTES, clientSentCountBytes, path, OUTBOUND);
}
static Matcher<String> assertClientMessagesCountBytesReceived(String path, int clientReceivedCountBytes) {
return assertTotal(CLIENT_BYTES, clientReceivedCountBytes, path, INBOUND);
}
static Matcher<String> assertClientMessagesCountSent(String path, int clientSentCount) {
return assertTotal(CLIENT_COUNT, clientSentCount, path, OUTBOUND);
}
static Matcher<String> assertClientMessagesCountReceived(int clientSentCount) {
return assertTotal(CLIENT_COUNT, clientSentCount, null, INBOUND);
}
static Matcher<String> assertClientMessagesCountReceived(String path, int clientSentCount) {
return assertTotal(CLIENT_COUNT, clientSentCount, path, INBOUND);
}
static Matcher<String> assertServerMessagesCountSent(int serverReceivedCount) {
return assertServerMessagesCountSent(null, serverReceivedCount);
}
static Matcher<String> assertServerMessagesCountSent(String path, int serverReceivedCount) {
return assertTotal(SERVER_COUNT, serverReceivedCount, path, OUTBOUND);
}
static Matcher<String> assertServerMessagesCountReceived(String path, int serverReceivedCount) {
return assertTotal(SERVER_COUNT, serverReceivedCount, path, INBOUND);
}
static Matcher<String> assertServerMessagesCountBytesSent(String path, int serverSentCountBytes) {
return assertTotal(SERVER_BYTES, serverSentCountBytes, path, OUTBOUND);
}
static Matcher<String> assertServerMessagesCountBytesReceived(String path, int serverReceivedCountBytes) {
return assertTotal(SERVER_BYTES, serverReceivedCountBytes, path, INBOUND);
}
static Matcher<String> assertServerErrorTotal(String path, int serverErrorCount) {
return assertTotal(SERVER_ENDPOINT_COUNT_ERRORS, serverErrorCount, path, null);
}
static Matcher<String> assertClientErrorTotal(String path, int clientErrorCount) {
return assertTotal(CLIENT_ENDPOINT_COUNT_ERRORS, clientErrorCount, path, null);
}
static Matcher<String> assertServerConnectionOpeningFailedTotal(String path, int serverConnectionOpeningFailedCount) {
return assertTotal(SERVER_CONNECTION_ON_OPEN_ERROR, serverConnectionOpeningFailedCount, path, null);
}
static Matcher<String> assertServerConnectionOpenedTotal(int serverConnectionOpenedCount) {
return assertServerConnectionOpenedTotal(null, serverConnectionOpenedCount);
}
static Matcher<String> assertClientConnectionOpenedTotal(int clientConnectionOpenedCount) {
return assertClientConnectionOpenedTotal(null, clientConnectionOpenedCount);
}
static Matcher<String> assertClientMessagesCountBytesSent(int clientSentCountBytes) {
return assertClientMessagesCountBytesSent(null, clientSentCountBytes);
}
static Matcher<String> assertClientMessagesCountBytesReceived(int clientReceivedCountBytes) {
return assertClientMessagesCountBytesReceived(null, clientReceivedCountBytes);
}
static Matcher<String> assertClientMessagesCountSent(int clientSentCount) {
return assertClientMessagesCountSent(null, clientSentCount);
}
static Matcher<String> assertServerMessagesCountReceived(int serverReceivedCount) {
return assertServerMessagesCountReceived(null, serverReceivedCount);
}
static Matcher<String> assertServerMessagesCountBytesSent(int serverSentCountBytes) {
return assertServerMessagesCountBytesSent(null, serverSentCountBytes);
}
static Matcher<String> assertServerMessagesCountBytesReceived(int serverReceivedCountBytes) {
return assertServerMessagesCountBytesReceived(null, serverReceivedCountBytes);
}
static Matcher<String> assertServerErrorTotal(int serverErrorCount) {
return assertServerErrorTotal(null, serverErrorCount);
}
static Matcher<String> assertClientErrorTotal(int clientErrorCount) {
return assertClientErrorTotal(null, clientErrorCount);
}
static Matcher<String> assertServerConnectionOpenedTotal(String path, int serverConnectionOpenedCount) {
return assertTotal(SERVER_CONNECTION_OPENED, serverConnectionOpenedCount, path, null);
}
static Matcher<String> assertClientConnectionOpenedTotal(String path, int clientConnectionOpenedCount) {
return assertTotal(CLIENT_CONNECTION_OPENED, clientConnectionOpenedCount, path, null);
}
static Matcher<String> assertServerConnectionClosedTotal(String path, int serverConnectionClosedCount) {
return assertTotal(SERVER_CONNECTION_CLOSED, serverConnectionClosedCount, path, null);
}
static Matcher<String> assertClientConnectionClosedTotal(String path, int clientConnectionClosedCount) {
return assertTotal(CLIENT_CONNECTION_CLOSED, clientConnectionClosedCount, path, null);
}
private static Matcher<String> assertTotal(String metricKey, int expectedCount, String path, Direction direction) {
var prometheusFormatKey = "%s_total".formatted(toPrometheusFormat(metricKey));
return new BaseMatcher<>() {
@Override
public boolean matches(Object o) {
if (o instanceof String str) {
var sameKeyMultipleTags = str
.lines()
.filter(l -> l.contains(prometheusFormatKey))
.filter(l -> path == null || l.contains(path)) // filter by path
.filter(l -> direction == null || l.contains(direction.toString()))
.map(String::trim)
.toList();
// quarkus_websockets_server_messages_count_received_total{<<some path tag>>} 2.0
// quarkus_websockets_server_messages_count_received_total{<<different path tag>>} 5.0
// = 7
var totalSum = sameKeyMultipleTags
.stream()
.map(l -> l.substring(l.lastIndexOf(" ")).trim())
.map(Double::parseDouble)
.map(Double::intValue)
.reduce(0, Integer::sum);
return totalSum == expectedCount;
}
return false;
}
@Override
public void describeTo(Description description) {
description.appendText(
"Key '%s' with value '%d' and direction '%s'".formatted(prometheusFormatKey, expectedCount, direction));
}
};
}
private static String toPrometheusFormat(String dottedMicrometerFormat) {
return dottedMicrometerFormat.replace(".", "_").replace("-", "_");
}
static int stringToBytes(String... messages) {
return Arrays.stream(messages).map(msg -> msg.getBytes(StandardCharsets.UTF_8)).map(s -> s.length).reduce(0,
Integer::sum);
}
}
| Direction |
java | apache__maven | impl/maven-impl/src/main/java/org/apache/maven/impl/model/profile/ConditionProfileActivator.java | {
"start": 1657,
"end": 1905
} | class ____ responsible for activating profiles based on conditions specified in the profile's activation section.
* It evaluates the condition expression and determines whether the profile should be active.
*/
@Named("condition")
@Singleton
public | is |
java | quarkusio__quarkus | test-framework/junit5/src/main/java/io/quarkus/test/junit/DisableIfBuiltWithGraalVMNewerThan.java | {
"start": 756,
"end": 833
} | interface ____ {
GraalVMVersion value();
}
| DisableIfBuiltWithGraalVMNewerThan |
java | apache__camel | core/camel-main/src/test/java/org/apache/camel/main/MainIoCTest.java | {
"start": 4215,
"end": 4270
} | class ____ {
// noop
}
public static | MyBar |
java | alibaba__fastjson | src/test/java/com/alibaba/json/test/a/IncomingDataPointBenchmark_file.java | {
"start": 270,
"end": 915
} | class ____ {
static String json;
public static void main(String[] args) throws Exception {
File file = new File("/Users/wenshao/Downloads/datalist");
json = FileUtils.readFileToString(file);
for (int i = 0; i < 10; ++i) {
perf();
}
}
public static void perf() {
long start = System.currentTimeMillis();
for (int i = 0; i < 1000; ++i) {
JSON.parseArray(json, IncomingDataPoint.class);
}
long millis = System.currentTimeMillis() - start;
System.out.println("IncomingDataPoint millis : " + millis);
}
}
| IncomingDataPointBenchmark_file |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryDefaultInEnumSwitchTest.java | {
"start": 12648,
"end": 13246
} | enum ____ {
ONE,
TWO,
THREE,
UNRECOGNIZED
}
boolean m(Case c) {
switch (c) {
case ONE:
case TWO:
return true;
case THREE:
default:
// This is a comment
System.out.println("Test");
}
return false;
}
}
""")
.addOutputLines(
"out/Test.java",
"""
| Case |
java | google__auto | value/src/test/java/com/google/auto/value/extension/toprettystring/ToPrettyStringValidatorTest.java | {
"start": 2220,
"end": 2981
} | class ____ {",
" @ToPrettyString",
" CharSequence toPretty() {",
" return new String();",
" }",
"}",
"");
Compilation compilation = compile(file);
assertThat(compilation).failed();
assertThat(compilation).hadErrorCount(1);
assertThat(compilation)
.hadErrorContaining("must return String")
.inFile(file)
.onLineContaining("CharSequence toPretty()");
}
@Test
public void noParameters() {
JavaFileObject file =
JavaFileObjects.forSourceLines(
"test.Test",
"package test;",
"",
"import com.google.auto.value.extension.toprettystring.ToPrettyString;",
"",
" | Test |
java | google__dagger | javatests/dagger/internal/codegen/ScopingValidationTest.java | {
"start": 12043,
"end": 12385
} | class ____ {",
" @Inject UsedInRootBlueScoped() {}",
"}"),
CompilerTests.javaSource(
"test.RedScope",
"package test;",
"",
"import javax.inject.Scope;",
"",
"@Scope",
"@ | UsedInRootBlueScoped |
java | grpc__grpc-java | xds/src/test/java/io/grpc/xds/MetadataLoadBalancerProvider.java | {
"start": 2978,
"end": 4253
} | class ____ extends ForwardingLoadBalancer {
private final MetadataHelper helper;
private final LoadBalancer delegateLb;
MetadataLoadBalancer(MetadataHelper helper, LoadBalancer delegateLb) {
this.helper = helper;
this.delegateLb = delegateLb;
}
@Override
protected LoadBalancer delegate() {
return delegateLb;
}
@Override
public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) {
MetadataLoadBalancerConfig config
= (MetadataLoadBalancerConfig) resolvedAddresses.getLoadBalancingPolicyConfig();
helper.setMetadata(config.metadataKey, config.metadataValue);
delegateLb.handleResolvedAddresses(resolvedAddresses);
}
@Override
public Status acceptResolvedAddresses(ResolvedAddresses resolvedAddresses) {
MetadataLoadBalancerConfig config
= (MetadataLoadBalancerConfig) resolvedAddresses.getLoadBalancingPolicyConfig();
helper.setMetadata(config.metadataKey, config.metadataValue);
return delegateLb.acceptResolvedAddresses(resolvedAddresses);
}
}
/**
* Wraps the picker that is provided when the balancing change updates with the {@link
* MetadataPicker} that injects the metadata entry.
*/
static | MetadataLoadBalancer |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/combinertest/OldAPICombinerTest.java | {
"start": 2138,
"end": 5790
} | class ____ {
private FileSystem fs;
private String inputpath;
@Test
public void testWordCountCombinerWithOldAPI() throws Exception {
final Configuration nativeConf = ScenarioConfiguration.getNativeConfiguration();
nativeConf.addResource(TestConstants.COMBINER_CONF_PATH);
final String nativeoutput = TestConstants.NATIVETASK_OLDAPI_COMBINER_TEST_NATIVE_OUTPUTPATH;
final JobConf nativeJob = getOldAPIJobconf(nativeConf, "nativeCombinerWithOldAPI",
inputpath, nativeoutput);
RunningJob nativeRunning = JobClient.runJob(nativeJob);
Counter nativeReduceGroups = nativeRunning.getCounters().findCounter(
TaskCounter.REDUCE_INPUT_RECORDS);
final Configuration normalConf = ScenarioConfiguration.getNormalConfiguration();
normalConf.addResource(TestConstants.COMBINER_CONF_PATH);
final String normaloutput = TestConstants.NATIVETASK_OLDAPI_COMBINER_TEST_NORMAL_OUTPUTPATH;
final JobConf normalJob = getOldAPIJobconf(normalConf, "normalCombinerWithOldAPI",
inputpath, normaloutput);
RunningJob normalRunning = JobClient.runJob(normalJob);
Counter normalReduceGroups = normalRunning.getCounters().findCounter(
TaskCounter.REDUCE_INPUT_RECORDS);
final boolean compareRet = ResultVerifier.verify(nativeoutput, normaloutput);
assertThat(compareRet)
.withFailMessage(
"file compare result: if they are the same ,then return true")
.isTrue();
assertThat(nativeReduceGroups.getValue())
.withFailMessage("The input reduce record count must be same")
.isEqualTo(normalReduceGroups.getValue());
}
@BeforeEach
public void startUp() throws Exception {
assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
assumeTrue(NativeRuntime.isNativeLibraryLoaded());
final ScenarioConfiguration conf = new ScenarioConfiguration();
conf.addcombinerConf();
this.fs = FileSystem.get(conf);
this.inputpath = TestConstants.NATIVETASK_COMBINER_TEST_INPUTDIR + "/wordcount";
if (!fs.exists(new Path(inputpath))) {
new TestInputFile(conf.getInt(TestConstants.NATIVETASK_COMBINER_WORDCOUNT_FILESIZE, 1000000),
Text.class.getName(),
Text.class.getName(), conf).createSequenceTestFile(inputpath, 1, (byte)('a'));
}
}
@AfterAll
public static void cleanUp() throws IOException {
final FileSystem fs = FileSystem.get(new ScenarioConfiguration());
fs.delete(new Path(TestConstants.NATIVETASK_COMBINER_TEST_DIR), true);
fs.close();
}
private static JobConf getOldAPIJobconf(Configuration configuration, String name,
String input, String output)
throws Exception {
final JobConf jobConf = new JobConf(configuration);
final FileSystem fs = FileSystem.get(configuration);
if (fs.exists(new Path(output))) {
fs.delete(new Path(output), true);
}
fs.close();
jobConf.setJobName(name);
jobConf.setOutputKeyClass(Text.class);
jobConf.setOutputValueClass(IntWritable.class);
jobConf.setMapperClass(WordCountWithOldAPI.TokenizerMapperWithOldAPI.class);
jobConf.setCombinerClass(WordCountWithOldAPI.IntSumReducerWithOldAPI.class);
jobConf.setReducerClass(WordCountWithOldAPI.IntSumReducerWithOldAPI.class);
jobConf.setInputFormat(SequenceFileInputFormat.class);
jobConf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(jobConf, new Path(input));
FileOutputFormat.setOutputPath(jobConf, new Path(output));
return jobConf;
}
}
| OldAPICombinerTest |
java | google__error-prone | docgen/src/main/java/com/google/errorprone/BugPatternFileGenerator.java | {
"start": 2017,
"end": 8004
} | class ____ implements LineProcessor<List<BugPatternInstance>> {
private final Path outputDir;
private final Path explanationDir;
private final Map<String, BugPatternInstance> result;
private final Function<BugPatternInstance, SeverityLevel> severityRemapper;
/** Controls whether yaml front-matter is generated. */
private final boolean generateFrontMatter;
/** The base url for links to bugpatterns. */
private final @Nullable String baseUrl;
private final Set<String> ignore;
private final Set<String> seen = new HashSet<>();
public BugPatternFileGenerator(
Path bugpatternDir,
Path explanationDir,
boolean generateFrontMatter,
String baseUrl,
Set<String> ignore,
Function<BugPatternInstance, SeverityLevel> severityRemapper) {
this.outputDir = bugpatternDir;
this.explanationDir = explanationDir;
this.severityRemapper = severityRemapper;
this.generateFrontMatter = generateFrontMatter;
this.baseUrl = baseUrl;
this.ignore = ignore;
result = new HashMap<>();
}
@CanIgnoreReturnValue
@Override
public boolean processLine(String line) throws IOException {
if (!seen.add(line)) {
return true;
}
BugPatternInstance pattern = new Gson().fromJson(line, BugPatternInstance.class);
if (ignore.contains(pattern.className)) {
return true;
}
pattern.severity = severityRemapper.apply(pattern);
var existing = result.put(pattern.name, pattern);
if (existing != null) {
throw new AssertionError(
String.format(
"Duplicate entry for %s: %s and %s",
pattern.name, existing.className, pattern.className));
}
// replace spaces in filename with underscores
Path checkPath = Paths.get(pattern.name.replace(' ', '_') + ".md");
try (Writer writer = Files.newBufferedWriter(outputDir.resolve(checkPath), UTF_8)) {
// load side-car explanation file, if it exists
Path sidecarExplanation = explanationDir.resolve(checkPath);
if (Files.exists(sidecarExplanation)) {
if (!pattern.explanation.isEmpty()) {
throw new AssertionError(
String.format(
"%s specifies an explanation via @BugPattern and side-car", pattern.name));
}
pattern.explanation = new String(Files.readAllBytes(sidecarExplanation), UTF_8).trim();
}
// Construct an appropriate page for this {@code BugPattern}. Include altNames if
// there are any, and explain the correct way to suppress.
ImmutableMap.Builder<String, Object> templateData =
ImmutableMap.<String, Object>builder()
.put("tags", Joiner.on(", ").join(pattern.tags))
.put("severity", pattern.severity)
.put("name", pattern.name)
.put("className", pattern.className)
.put("summary", pattern.summary.trim())
.put("altNames", Joiner.on(", ").join(pattern.altNames))
.put("explanation", pattern.explanation.trim());
if (baseUrl != null) {
templateData.put("baseUrl", baseUrl);
}
if (generateFrontMatter) {
ImmutableMap<String, String> frontmatterData =
ImmutableMap.<String, String>builder()
.put("title", pattern.name)
.put("summary", pattern.summary)
.put("layout", "bugpattern")
.put("tags", Joiner.on(", ").join(pattern.tags))
.put("severity", pattern.severity.toString())
.buildOrThrow();
DumperOptions options = new DumperOptions();
options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
Yaml yaml =
new Yaml(
new SafeConstructor(new LoaderOptions()),
new Representer(new DumperOptions()),
options);
Writer yamlWriter = new StringWriter();
yamlWriter.write("---\n");
yaml.dump(frontmatterData, yamlWriter);
yamlWriter.write("---\n");
templateData.put("frontmatter", yamlWriter.toString());
}
if (pattern.documentSuppression) {
String suppressionString;
if (pattern.suppressionAnnotations.length == 0) {
suppressionString = "This check may not be suppressed.";
} else {
suppressionString =
pattern.suppressionAnnotations.length == 1
? "Suppress false positives by adding the suppression annotation %s to the "
+ "enclosing element."
: "Suppress false positives by adding one of these suppression annotations to "
+ "the enclosing element: %s";
suppressionString =
String.format(
suppressionString,
Arrays.stream(pattern.suppressionAnnotations)
.map((String anno) -> standardizeAnnotation(anno, pattern.name))
.collect(Collectors.joining(", ")));
}
templateData.put("suppression", suppressionString);
}
MustacheFactory mf = new DefaultMustacheFactory();
Mustache mustache = mf.compile("com/google/errorprone/resources/bugpattern.mustache");
mustache.execute(writer, templateData.buildOrThrow());
}
return true;
}
private String standardizeAnnotation(String fullAnnotationName, String patternName) {
String annotationName =
fullAnnotationName.endsWith(".class")
? fullAnnotationName.substring(0, fullAnnotationName.length() - ".class".length())
: fullAnnotationName;
if (annotationName.equals(SuppressWarnings.class.getName())) {
annotationName = SuppressWarnings.class.getSimpleName() + "(\"" + patternName + "\")";
}
return "`@" + annotationName + "`";
}
@Override
public List<BugPatternInstance> getResult() {
return result.values().stream().collect(toCollection(ArrayList::new));
}
}
| BugPatternFileGenerator |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/DeeplyNestedTest.java | {
"start": 4594,
"end": 5220
} | class ____ {
static final ImmutableList<Integer> XS =
new ImmutableList.Builder<Integer>()
.add(1)
.add(2)
.add(3)
.add(4)
.add(5)
.add(6)
.add(7)
.add(8)
.add(9)
.add(10)
.build();
}
""")
.addOutputLines(
"Test.java",
"""
import com.google.common.collect.ImmutableList;
| Test |
java | elastic__elasticsearch | x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java | {
"start": 2873,
"end": 48912
} | class ____ extends AbstractXpackFullClusterRestartTestCase {
public static final int UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION = 6;
public static final int SECURITY_EXPECTED_INDEX_FORMAT_VERSION = 6;
public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) {
super(upgradeStatus);
}
@Override
protected Settings restClientSettings() {
String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8));
return Settings.builder()
.put(ThreadContext.PREFIX + ".Authorization", token)
// we increase the timeout here to 90 seconds to handle long waits for a green
// cluster health. the waits for green need to be longer than a minute to
// account for delayed shards
.put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s")
.build();
}
/**
* Tests that a single document survives. Super basic smoke test.
*/
public void testSingleDoc() throws IOException {
String docLocation = "/testsingledoc/_doc/1";
String doc = "{\"test\": \"test\"}";
if (isRunningAgainstOldCluster()) {
Request createDoc = new Request("PUT", docLocation);
createDoc.addParameter("refresh", "true");
createDoc.setJsonEntity(doc);
createDoc.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(fieldNamesFieldOk()));
client().performRequest(createDoc);
}
Request getRequest = new Request("GET", docLocation);
assertThat(toStr(client().performRequest(getRequest)), containsString(doc));
}
public void testSecurityNativeRealm() throws Exception {
if (isRunningAgainstOldCluster()) {
createUser(true);
createRole(true);
} else {
waitForYellow(".security");
final Request getSettingsRequest = new Request("GET", "/.security/_settings/index.format");
getSettingsRequest.setOptions(systemIndexWarningHandlerOptions(".security-7"));
Response settingsResponse = client().performRequest(getSettingsRequest);
Map<String, Object> settingsResponseMap = entityAsMap(settingsResponse);
logger.info("settings response map {}", settingsResponseMap);
final String concreteSecurityIndex;
if (settingsResponseMap.isEmpty()) {
fail("The security index does not have the expected setting [index.format]");
} else {
concreteSecurityIndex = settingsResponseMap.keySet().iterator().next();
Map<?, ?> indexSettingsMap = (Map<?, ?>) settingsResponseMap.get(concreteSecurityIndex);
Map<?, ?> settingsMap = (Map<?, ?>) indexSettingsMap.get("settings");
logger.info("settings map {}", settingsMap);
if (settingsMap.containsKey("index")) {
int format = Integer.parseInt(String.valueOf(((Map<?, ?>) settingsMap.get("index")).get("format")));
assertEquals("The security index needs to be upgraded", SECURITY_EXPECTED_INDEX_FORMAT_VERSION, format);
}
}
// create additional user and role
createUser(false);
createRole(false);
}
assertUserInfo(isRunningAgainstOldCluster());
assertRoleInfo(isRunningAgainstOldCluster());
}
public void testWatcher() throws Exception {
if (isRunningAgainstOldCluster()) {
logger.info("Adding a watch on old cluster {}", getOldClusterVersion());
Request createBwcWatch = new Request("PUT", "/_watcher/watch/bwc_watch");
createBwcWatch.setJsonEntity(loadWatch("simple-watch.json"));
client().performRequest(createBwcWatch);
logger.info("Adding a watch with \"fun\" throttle periods on old cluster");
Request createBwcThrottlePeriod = new Request("PUT", "/_watcher/watch/bwc_throttle_period");
createBwcThrottlePeriod.setJsonEntity(loadWatch("throttle-period-watch.json"));
client().performRequest(createBwcThrottlePeriod);
logger.info("Adding a watch with \"fun\" read timeout on old cluster");
Request createFunnyTimeout = new Request("PUT", "/_watcher/watch/bwc_funny_timeout");
createFunnyTimeout.setJsonEntity(loadWatch("funny-timeout-watch.json"));
client().performRequest(createFunnyTimeout);
logger.info("Waiting for watch results index to fill up...");
try {
waitForYellow(".watches,bwc_watch_index,.watcher-history*");
} catch (ResponseException e) {
{
String rsp = toStr(client().performRequest(new Request("GET", "/_cluster/state")));
logger.info("cluster_state_response=\n{}", rsp);
}
{
Request request = new Request("GET", "/_watcher/stats/_all");
request.addParameter("emit_stacktraces", "true");
String rsp = toStr(client().performRequest(request));
logger.info("watcher_stats_response=\n{}", rsp);
}
throw e;
}
waitForHits("bwc_watch_index", 2);
waitForHits(".watcher-history*", 2);
logger.info("Done creating watcher-related indices");
} else {
logger.info("testing against {}", getOldClusterVersion());
try {
waitForYellow(".watches,bwc_watch_index,.watcher-history*");
} catch (ResponseException e) {
String rsp = toStr(client().performRequest(new Request("GET", "/_cluster/state")));
logger.info("cluster_state_response=\n{}", rsp);
throw e;
}
logger.info("checking that the Watches index is the correct version");
// Verify .watches index format:
var getClusterStateResponse = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state/metadata/.watches")));
Map<?, ?> indices = ObjectPath.eval("metadata.indices", getClusterStateResponse);
var dotWatchesIndex = indices.get(".watches"); // ObjectPath.eval(...) doesn't handle keys containing .
var indexFormat = Integer.parseInt(ObjectPath.eval("settings.index.format", dotWatchesIndex));
assertEquals("The watches index needs to be upgraded", UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION, indexFormat);
// Wait for watcher to actually start....
startWatcher();
try {
assertOldTemplatesAreDeleted();
assertWatchIndexContentsWork();
assertBasicWatchInteractions();
} finally {
/* Shut down watcher after every test because watcher can be a bit finicky about shutting down when the node shuts
* down. This makes super sure it shuts down *and* causes the test to fail in a sensible spot if it doesn't shut down.
*/
stopWatcher();
}
}
}
@SuppressWarnings("unchecked")
public void testWatcherWithApiKey() throws Exception {
final Request getWatchStatusRequest = new Request("GET", "/_watcher/watch/watch_with_api_key");
if (isRunningAgainstOldCluster()) {
final Request createApiKeyRequest = new Request("PUT", "/_security/api_key");
createApiKeyRequest.setJsonEntity("""
{
"name": "key-1",
"role_descriptors": {
"r": {
"cluster": [ "all" ],
"indices": [
{
"names": [ "*" ],
"privileges": [ "all" ]
}
]
}
}
}""");
final Response response = client().performRequest(createApiKeyRequest);
final Map<String, Object> createApiKeyResponse = entityAsMap(response);
Request createWatchWithApiKeyRequest = new Request("PUT", "/_watcher/watch/watch_with_api_key");
createWatchWithApiKeyRequest.setJsonEntity(loadWatch("logging-watch.json"));
final byte[] keyBytes = (createApiKeyResponse.get("id") + ":" + createApiKeyResponse.get("api_key")).getBytes(
StandardCharsets.UTF_8
);
final String authHeader = "ApiKey " + Base64.getEncoder().encodeToString(keyBytes);
createWatchWithApiKeyRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", authHeader));
client().performRequest(createWatchWithApiKeyRequest);
assertBusy(() -> {
final Map<String, Object> getWatchStatusResponse = entityAsMap(client().performRequest(getWatchStatusRequest));
final Map<String, Object> status = (Map<String, Object>) getWatchStatusResponse.get("status");
assertEquals("executed", status.get("execution_state"));
}, 30, TimeUnit.SECONDS);
} else {
logger.info("testing against {}", getOldClusterVersion());
try {
waitForYellow(".watches,.watcher-history*");
} catch (ResponseException e) {
String rsp = toStr(client().performRequest(new Request("GET", "/_cluster/state")));
logger.info("cluster_state_response=\n{}", rsp);
throw e;
}
// Wait for watcher to actually start....
startWatcher();
try {
final Map<String, Object> getWatchStatusResponse = entityAsMap(client().performRequest(getWatchStatusRequest));
final Map<String, Object> status = (Map<String, Object>) getWatchStatusResponse.get("status");
final int version = (int) status.get("version");
final AtomicBoolean versionIncreased = new AtomicBoolean();
final AtomicBoolean executed = new AtomicBoolean();
assertBusy(() -> {
final Map<String, Object> newGetWatchStatusResponse = entityAsMap(client().performRequest(getWatchStatusRequest));
final Map<String, Object> newStatus = (Map<String, Object>) newGetWatchStatusResponse.get("status");
if (false == versionIncreased.get() && version < (int) newStatus.get("version")) {
versionIncreased.set(true);
}
if (false == executed.get() && "executed".equals(newStatus.get("execution_state"))) {
executed.set(true);
}
assertThat(
"version increased: [" + versionIncreased.get() + "], executed: [" + executed.get() + "]",
versionIncreased.get() && executed.get(),
is(true)
);
}, 30, TimeUnit.SECONDS);
} finally {
stopWatcher();
}
}
}
public void testServiceAccountApiKey() throws IOException {
if (isRunningAgainstOldCluster()) {
final Request createServiceTokenRequest = new Request("POST", "/_security/service/elastic/fleet-server/credential/token");
final Response createServiceTokenResponse = client().performRequest(createServiceTokenRequest);
assertOK(createServiceTokenResponse);
@SuppressWarnings("unchecked")
final String serviceToken = ((Map<String, String>) responseAsMap(createServiceTokenResponse).get("token")).get("value");
final Request createApiKeyRequest = new Request("PUT", "/_security/api_key");
createApiKeyRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "Bearer " + serviceToken));
createApiKeyRequest.setJsonEntity("{\"name\":\"key-1\"}");
final Response createApiKeyResponse = client().performRequest(createApiKeyRequest);
final Map<String, Object> createApiKeyResponseMap = entityAsMap(createApiKeyResponse);
final String authHeader = "ApiKey "
+ Base64.getEncoder()
.encodeToString(
(createApiKeyResponseMap.get("id") + ":" + createApiKeyResponseMap.get("api_key")).getBytes(StandardCharsets.UTF_8)
);
final Request indexRequest = new Request("PUT", "/api_keys/_doc/key-1");
indexRequest.setJsonEntity("{\"auth_header\":\"" + authHeader + "\"}");
assertOK(client().performRequest(indexRequest));
} else {
final Request getRequest = new Request("GET", "/api_keys/_doc/key-1");
final Response getResponse = client().performRequest(getRequest);
assertOK(getResponse);
final Map<String, Object> getResponseMap = responseAsMap(getResponse);
@SuppressWarnings("unchecked")
final String authHeader = ((Map<String, String>) getResponseMap.get("_source")).get("auth_header");
final Request mainRequest = new Request("GET", "/");
mainRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", authHeader));
assertOK(client().performRequest(mainRequest));
final Request getUserRequest = new Request("GET", "/_security/user");
getUserRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", authHeader));
final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(getUserRequest));
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403));
assertThat(e.getMessage(), containsString("is unauthorized"));
}
}
public void testApiKeySuperuser() throws IOException {
if (isRunningAgainstOldCluster()) {
final Request createUserRequest = new Request("PUT", "/_security/user/api_key_super_creator");
createUserRequest.setJsonEntity("""
{
"password" : "l0ng-r4nd0m-p@ssw0rd",
"roles" : [ "superuser", "monitoring_user" ]
}""");
client().performRequest(createUserRequest);
// Create API key
final Request createApiKeyRequest = new Request("PUT", "/_security/api_key");
createApiKeyRequest.setOptions(
RequestOptions.DEFAULT.toBuilder()
.addHeader(
"Authorization",
UsernamePasswordToken.basicAuthHeaderValue(
"api_key_super_creator",
new SecureString("l0ng-r4nd0m-p@ssw0rd".toCharArray())
)
)
);
createApiKeyRequest.setJsonEntity("""
{
"name": "super_legacy_key"
}""");
final Map<String, Object> createApiKeyResponse = entityAsMap(client().performRequest(createApiKeyRequest));
final byte[] keyBytes = (createApiKeyResponse.get("id") + ":" + createApiKeyResponse.get("api_key")).getBytes(
StandardCharsets.UTF_8
);
final String apiKeyAuthHeader = "ApiKey " + Base64.getEncoder().encodeToString(keyBytes);
// Save the API key info across restart
final Request saveApiKeyRequest = new Request("PUT", "/api_keys/_doc/super_legacy_key");
saveApiKeyRequest.setJsonEntity("{\"auth_header\":\"" + apiKeyAuthHeader + "\"}");
assertOK(client().performRequest(saveApiKeyRequest));
} else {
final Request getRequest = new Request("GET", "/api_keys/_doc/super_legacy_key");
final Map<String, Object> getResponseMap = responseAsMap(client().performRequest(getRequest));
@SuppressWarnings("unchecked")
final String apiKeyAuthHeader = ((Map<String, String>) getResponseMap.get("_source")).get("auth_header");
// read is ok
final Request searchRequest = new Request("GET", ".security/_search");
searchRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader));
assertOK(client().performRequest(searchRequest));
// write must not be allowed
final Request indexRequest = new Request("POST", ".security/_doc");
indexRequest.setJsonEntity("""
{
"doc_type": "foo"
}""");
indexRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader));
final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(indexRequest));
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403));
assertThat(e.getMessage(), containsString("is unauthorized"));
}
}
/**
* Tests that a RollUp job created on a old cluster is correctly restarted after the upgrade.
*/
public void testRollupAfterRestart() throws Exception {
if (isRunningAgainstOldCluster()) {
// create dummy rollup index to circumvent the check that prohibits rollup usage in empty clusters:
{
Request req = new Request("PUT", "dummy-rollup-index");
req.setJsonEntity("""
{
"mappings":{
"_meta": {
"_rollup":{
"my-id": {}
}
}
}
}
""");
req.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(fieldNamesFieldOk()));
client().performRequest(req);
}
final int numDocs = 59;
final int year = randomIntBetween(1970, 2018);
// index documents for the rollup job
final StringBuilder bulk = new StringBuilder();
for (int i = 0; i < numDocs; i++) {
bulk.append("{\"index\":{\"_index\":\"rollup-docs\"}}\n");
String date = Strings.format("%04d-01-01T00:%02d:00Z", year, i);
bulk.append("{\"timestamp\":\"").append(date).append("\",\"value\":").append(i).append("}\n");
}
bulk.append("\r\n");
final Request bulkRequest = new Request("POST", "/_bulk");
bulkRequest.setJsonEntity(bulk.toString());
bulkRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(fieldNamesFieldOk()));
client().performRequest(bulkRequest);
// create the rollup job
final Request createRollupJobRequest = new Request("PUT", "/_rollup/job/rollup-job-test");
createRollupJobRequest.setOptions(ROLLUP_REQUESTS_OPTIONS);
createRollupJobRequest.setJsonEntity("""
{
"index_pattern": "rollup-*",
"rollup_index": "results-rollup",
"cron": "*/30 * * * * ?",
"page_size": 100,
"groups": {
"date_histogram": {
"field": "timestamp",
"fixed_interval": "5m"
}
},
"metrics": [
{
"field": "value",
"metrics": [ "min", "max", "sum" ]
}
]
}""");
Map<String, Object> createRollupJobResponse = entityAsMap(client().performRequest(createRollupJobRequest));
assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE));
// start the rollup job
final Request startRollupJobRequest = new Request("POST", "/_rollup/job/rollup-job-test/_start");
startRollupJobRequest.setOptions(ROLLUP_REQUESTS_OPTIONS);
Map<String, Object> startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest));
assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE));
assertRollUpJob("rollup-job-test");
} else {
final Request clusterHealthRequest = new Request("GET", "/_cluster/health");
clusterHealthRequest.addParameter("wait_for_status", "yellow");
clusterHealthRequest.addParameter("wait_for_no_relocating_shards", "true");
clusterHealthRequest.addParameter("wait_for_no_initializing_shards", "true");
Map<String, Object> clusterHealthResponse = entityAsMap(client().performRequest(clusterHealthRequest));
assertThat(clusterHealthResponse.get("timed_out"), equalTo(Boolean.FALSE));
assertRollUpJob("rollup-job-test");
}
}
public void testTransformLegacyTemplateCleanup() throws Exception {
if (isRunningAgainstOldCluster()) {
// create the source index
final Request createIndexRequest = new Request("PUT", "customers");
createIndexRequest.setJsonEntity("""
{
"mappings": {
"properties": {
"customer_id": {
"type": "keyword"
},
"price": {
"type": "double"
}
}
}
}""");
createIndexRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(fieldNamesFieldOk()));
Map<String, Object> createIndexResponse = entityAsMap(client().performRequest(createIndexRequest));
assertThat(createIndexResponse.get("acknowledged"), equalTo(Boolean.TRUE));
// create a transform
final Request createTransformRequest = new Request("PUT", "_transform/transform-full-cluster-restart-test");
createTransformRequest.setJsonEntity("""
{
"source": {
"index": "customers"
},
"description": "testing",
"dest": {
"index": "max_price"
},
"pivot": {
"group_by": {
"customer_id": {
"terms": {
"field": "customer_id"
}
}
},
"aggregations": {
"max_price": {
"max": {
"field": "price"
}
}
}
}
}""");
Map<String, Object> createTransformResponse = entityAsMap(client().performRequest(createTransformRequest));
assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE));
} else {
// legacy index templates created in previous releases should not be present anymore
assertBusy(() -> {
Request request = new Request("GET", "/_template/.transform-*,.data-frame-*");
try {
Response response = client().performRequest(request);
Map<String, Object> responseLevel = entityAsMap(response);
assertNotNull(responseLevel);
assertThat(responseLevel.keySet(), empty());
} catch (ResponseException e) {
// not found is fine
assertThat(
"Unexpected failure getting templates: " + e.getResponse().getStatusLine(),
e.getResponse().getStatusLine().getStatusCode(),
is(404)
);
}
});
}
}
public void testSlmPolicyAndStats() throws IOException {
SnapshotLifecyclePolicy slmPolicy = new SnapshotLifecyclePolicy(
"test-policy",
"test-policy",
"* * * 31 FEB ? *",
"test-repo",
Collections.singletonMap("indices", Collections.singletonList("*")),
null
);
if (isRunningAgainstOldCluster()) {
Request createRepoRequest = new Request("PUT", "_snapshot/test-repo");
String repoCreateJson = "{" + " \"type\": \"fs\"," + " \"settings\": {" + " \"location\": \"test-repo\"" + " }" + "}";
createRepoRequest.setJsonEntity(repoCreateJson);
Request createSlmPolicyRequest = new Request("PUT", "_slm/policy/test-policy");
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
String createSlmPolicyJson = Strings.toString(slmPolicy.toXContent(builder, null));
createSlmPolicyRequest.setJsonEntity(createSlmPolicyJson);
}
client().performRequest(createRepoRequest);
client().performRequest(createSlmPolicyRequest);
}
if (isRunningAgainstOldCluster() == false) {
Request getSlmPolicyRequest = new Request("GET", "_slm/policy/test-policy");
Response response = client().performRequest(getSlmPolicyRequest);
Map<String, Object> responseMap = entityAsMap(response);
Map<?, ?> policy = (Map<?, ?>) ((Map<?, ?>) responseMap.get("test-policy")).get("policy");
assertEquals(slmPolicy.getName(), policy.get("name"));
assertEquals(slmPolicy.getRepository(), policy.get("repository"));
assertEquals(slmPolicy.getSchedule(), policy.get("schedule"));
assertEquals(slmPolicy.getConfig(), policy.get("config"));
}
if (isRunningAgainstOldCluster() == false) {
Response response = client().performRequest(new Request("GET", "_slm/stats"));
XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue());
try (
XContentParser parser = xContentType.xContent()
.createParser(XContentParserConfiguration.EMPTY, response.getEntity().getContent())
) {
assertEquals(new SnapshotLifecycleStats(), SnapshotLifecycleStats.parse(parser));
}
}
}
private String loadWatch(String watch) throws IOException {
return StreamsUtils.copyToStringFromClasspath("/org/elasticsearch/xpack/restart/" + watch);
}
private void assertOldTemplatesAreDeleted() throws IOException {
Map<String, Object> templates = entityAsMap(client().performRequest(new Request("GET", "/_template")));
assertThat(templates.keySet(), not(hasItems(is("watches"), startsWith("watch-history"), is("triggered_watches"))));
}
private void assertWatchIndexContentsWork() throws Exception {
// Fetch a basic watch
Request getRequest = new Request("GET", "_watcher/watch/bwc_watch");
Map<String, Object> bwcWatch = entityAsMap(client().performRequest(getRequest));
logger.error("-----> {}", bwcWatch);
assertThat(bwcWatch.get("found"), equalTo(true));
Map<?, ?> source = (Map<?, ?>) bwcWatch.get("watch");
assertEquals(1000, source.get("throttle_period_in_millis"));
int timeout = (int) timeValueSeconds(100).millis();
assertThat(ObjectPath.eval("input.search.timeout_in_millis", source), equalTo(timeout));
assertThat(ObjectPath.eval("actions.index_payload.transform.search.timeout_in_millis", source), equalTo(timeout));
assertThat(ObjectPath.eval("actions.index_payload.index.index", source), equalTo("bwc_watch_index"));
assertThat(ObjectPath.eval("actions.index_payload.index.timeout_in_millis", source), equalTo(timeout));
// Fetch a watch with "fun" throttle periods
getRequest = new Request("GET", "_watcher/watch/bwc_throttle_period");
bwcWatch = entityAsMap(client().performRequest(getRequest));
assertThat(bwcWatch.get("found"), equalTo(true));
source = (Map<?, ?>) bwcWatch.get("watch");
assertEquals(timeout, source.get("throttle_period_in_millis"));
assertThat(ObjectPath.eval("actions.index_payload.throttle_period_in_millis", source), equalTo(timeout));
/*
* Fetch a watch with a funny timeout to verify loading fractional time
* values.
*/
bwcWatch = entityAsMap(client().performRequest(new Request("GET", "_watcher/watch/bwc_funny_timeout")));
assertThat(bwcWatch.get("found"), equalTo(true));
source = (Map<?, ?>) bwcWatch.get("watch");
Map<String, Object> attachments = ObjectPath.eval("actions.work.email.attachments", source);
Map<?, ?> attachment = (Map<?, ?>) attachments.get("test_report.pdf");
Map<String, Object> request = ObjectPath.eval("http.request", attachment);
assertEquals(timeout, request.get("read_timeout_millis"));
assertEquals("https", request.get("scheme"));
assertEquals("example.com", request.get("host"));
assertEquals("{{ctx.metadata.report_url}}", request.get("path"));
assertEquals(8443, request.get("port"));
Map<String, String> basic = ObjectPath.eval("auth.basic", request);
assertThat(basic, hasEntry("username", "Aladdin"));
// password doesn't come back because it is hidden
assertThat(basic, hasEntry(is("password"), anyOf(startsWith("::es_encrypted::"), is("::es_redacted::"))));
Request searchRequest = new Request("GET", ".watcher-history*/_search");
if (isRunningAgainstOldCluster() == false) {
searchRequest.addParameter(RestSearchAction.TOTAL_HITS_AS_INT_PARAM, "true");
}
Map<String, Object> history = entityAsMap(client().performRequest(searchRequest));
Map<?, ?> hits = (Map<?, ?>) history.get("hits");
assertThat((Integer) hits.get("total"), greaterThanOrEqualTo(2));
}
private void assertBasicWatchInteractions() throws Exception {
String watch = """
{
"trigger": {
"schedule": {
"interval": "1s"
}
},
"input": {
"none": {}
},
"condition": {
"always": {}
},
"actions": {
"awesome": {
"logging": {
"level": "info",
"text": "test"
}
}
}
}""";
Request createWatchRequest = new Request("PUT", "_watcher/watch/new_watch");
createWatchRequest.setJsonEntity(watch);
Map<String, Object> createWatch = entityAsMap(client().performRequest(createWatchRequest));
logger.info("create watch {}", createWatch);
assertThat(createWatch.get("created"), equalTo(true));
assertThat(createWatch.get("_version"), equalTo(1));
Map<String, Object> updateWatch = entityAsMap(client().performRequest(createWatchRequest));
assertThat(updateWatch.get("created"), equalTo(false));
assertThat((int) updateWatch.get("_version"), greaterThanOrEqualTo(2));
Map<String, Object> get = entityAsMap(client().performRequest(new Request("GET", "_watcher/watch/new_watch")));
assertThat(get.get("found"), equalTo(true));
Map<?, ?> source = (Map<?, ?>) get.get("watch");
Map<String, Object> logging = ObjectPath.eval("actions.awesome.logging", source);
assertEquals("info", logging.get("level"));
assertEquals("test", logging.get("text"));
}
private void waitForYellow(String indexName) throws IOException {
Request request = new Request("GET", "/_cluster/health/" + indexName);
request.addParameter("wait_for_status", "yellow");
request.addParameter("timeout", "30s");
request.addParameter("wait_for_no_relocating_shards", "true");
request.addParameter("wait_for_no_initializing_shards", "true");
Map<String, Object> response = entityAsMap(client().performRequest(request));
assertThat(response.get("timed_out"), equalTo(Boolean.FALSE));
}
private void waitForHits(String indexName, int expectedHits) throws Exception {
Request request = new Request("GET", "/" + indexName + "/_search");
request.addParameter("ignore_unavailable", "true");
request.addParameter("size", "0");
assertBusy(() -> {
try {
Map<String, Object> response = entityAsMap(client().performRequest(request));
Map<?, ?> hits = (Map<?, ?>) response.get("hits");
logger.info("Hits are: {}", hits);
Integer total;
total = (Integer) ((Map<?, ?>) hits.get("total")).get("value");
assertThat(total, greaterThanOrEqualTo(expectedHits));
} catch (IOException ioe) {
if (ioe instanceof ResponseException) {
Response response = ((ResponseException) ioe).getResponse();
if (RestStatus.fromCode(response.getStatusLine().getStatusCode()) == RestStatus.SERVICE_UNAVAILABLE) {
fail("shards are not yet active");
}
}
throw ioe;
}
}, 30, TimeUnit.SECONDS);
}
private void startWatcher() throws Exception {
Map<String, Object> startWatchResponse = entityAsMap(client().performRequest(new Request("POST", "_watcher/_start")));
assertThat(startWatchResponse.get("acknowledged"), equalTo(Boolean.TRUE));
assertBusy(() -> {
Map<String, Object> statsWatchResponse = entityAsMap(client().performRequest(new Request("GET", "_watcher/stats")));
List<?> states = ((List<?>) statsWatchResponse.get("stats")).stream()
.map(o -> ((Map<?, ?>) o).get("watcher_state"))
.collect(Collectors.toList());
assertThat(states, everyItem(is("started")));
});
}
private void stopWatcher() throws Exception {
Map<String, Object> stopWatchResponse = entityAsMap(client().performRequest(new Request("POST", "_watcher/_stop")));
assertThat(stopWatchResponse.get("acknowledged"), equalTo(Boolean.TRUE));
assertBusy(() -> {
Map<String, Object> statsStoppedWatchResponse = entityAsMap(client().performRequest(new Request("GET", "_watcher/stats")));
List<?> states = ((List<?>) statsStoppedWatchResponse.get("stats")).stream()
.map(o -> ((Map<?, ?>) o).get("watcher_state"))
.collect(Collectors.toList());
assertThat(states, everyItem(is("stopped")));
});
}
static String toStr(Response response) throws IOException {
return EntityUtils.toString(response.getEntity());
}
private void createUser(final boolean oldCluster) throws Exception {
final String id = oldCluster ? "preupgrade_user" : "postupgrade_user";
Request request = new Request("PUT", "/_security/user/" + id);
request.setJsonEntity(Strings.format("""
{
"password" : "l0ng-r4nd0m-p@ssw0rd",
"roles" : [ "admin", "other_role1" ],
"full_name" : "%s",
"email" : "%s@example.com",
"enabled": true
}""", randomAlphaOfLength(5), id));
client().performRequest(request);
}
private void createRole(final boolean oldCluster) throws Exception {
final String id = oldCluster ? "preupgrade_role" : "postupgrade_role";
Request request = new Request("PUT", "/_security/role/" + id);
request.setJsonEntity("""
{
"run_as": [ "abc" ],
"cluster": [ "monitor" ],
"indices": [
{
"names": [ "events-*" ],
"privileges": [ "read" ],
"field_security" : {
"grant" : [ "category", "@timestamp", "message" ]
},
"query": "{\\"match\\": {\\"category\\": \\"click\\"}}"
}
]
}""");
client().performRequest(request);
}
private void assertUserInfo(final boolean oldCluster) throws Exception {
final String user = oldCluster ? "preupgrade_user" : "postupgrade_user";
Request request = new Request("GET", "/_security/user/" + user);
;
Map<String, Object> response = entityAsMap(client().performRequest(request));
Map<?, ?> userInfo = (Map<?, ?>) response.get(user);
assertEquals(user + "@example.com", userInfo.get("email"));
assertNotNull(userInfo.get("full_name"));
assertNotNull(userInfo.get("roles"));
}
private void assertRoleInfo(final boolean oldCluster) throws Exception {
final String role = oldCluster ? "preupgrade_role" : "postupgrade_role";
Map<?, ?> response = (Map<?, ?>) entityAsMap(client().performRequest(new Request("GET", "/_security/role/" + role))).get(role);
assertNotNull(response.get("run_as"));
assertNotNull(response.get("cluster"));
assertNotNull(response.get("indices"));
}
private void assertRollUpJob(final String rollupJob) throws Exception {
final Matcher<?> expectedStates = anyOf(equalTo("indexing"), equalTo("started"));
waitForRollUpJob(rollupJob, expectedStates);
// check that the rollup job is started using the RollUp API
final Request getRollupJobRequest = new Request("GET", "_rollup/job/" + rollupJob);
getRollupJobRequest.setOptions(ROLLUP_REQUESTS_OPTIONS);
Map<String, Object> getRollupJobResponse = entityAsMap(client().performRequest(getRollupJobRequest));
Map<?, ?> job = getJob(getRollupJobResponse, rollupJob);
assertNotNull(job);
assertThat(ObjectPath.eval("status.job_state", job), expectedStates);
// check that the rollup job is started using the Tasks API
final Request taskRequest = new Request("GET", "_tasks");
taskRequest.addParameter("detailed", "true");
taskRequest.addParameter("actions", "xpack/rollup/*");
Map<String, Object> taskResponse = entityAsMap(client().performRequest(taskRequest));
Map<?, ?> taskResponseNodes = (Map<?, ?>) taskResponse.get("nodes");
Map<?, ?> taskResponseNode = (Map<?, ?>) taskResponseNodes.values().iterator().next();
Map<?, ?> taskResponseTasks = (Map<?, ?>) taskResponseNode.get("tasks");
Map<?, ?> taskResponseStatus = (Map<?, ?>) taskResponseTasks.values().iterator().next();
assertThat(ObjectPath.eval("status.job_state", taskResponseStatus), expectedStates);
// check that the rollup job is started using the Cluster State API
final Request clusterStateRequest = new Request("GET", "_cluster/state/metadata");
Map<String, Object> clusterStateResponse = entityAsMap(client().performRequest(clusterStateRequest));
List<Map<String, Object>> rollupJobTasks = ObjectPath.eval("metadata.persistent_tasks.tasks", clusterStateResponse);
boolean hasRollupTask = false;
for (Map<String, Object> task : rollupJobTasks) {
if (ObjectPath.eval("id", task).equals(rollupJob)) {
hasRollupTask = true;
final String jobStateField = "task.xpack/rollup/job.state.job_state";
assertThat(
"Expected field [" + jobStateField + "] to be started or indexing in " + task.get("id"),
ObjectPath.eval(jobStateField, task),
expectedStates
);
break;
}
}
if (hasRollupTask == false) {
fail("Expected persistent task for [" + rollupJob + "] but none found.");
}
}
private void waitForRollUpJob(final String rollupJob, final Matcher<?> expectedStates) throws Exception {
assertBusy(() -> {
final Request getRollupJobRequest = new Request("GET", "/_rollup/job/" + rollupJob);
getRollupJobRequest.setOptions(ROLLUP_REQUESTS_OPTIONS);
Response getRollupJobResponse = client().performRequest(getRollupJobRequest);
assertThat(getRollupJobResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
Map<?, ?> job = getJob(getRollupJobResponse, rollupJob);
assertNotNull(job);
assertThat(ObjectPath.eval("status.job_state", job), expectedStates);
}, 30L, TimeUnit.SECONDS);
}
private Map<?, ?> getJob(Response response, String targetJobId) throws IOException {
return getJob(ESRestTestCase.entityAsMap(response), targetJobId);
}
private Map<?, ?> getJob(Map<String, Object> jobsMap, String targetJobId) throws IOException {
List<?> jobs = (List<?>) XContentMapValues.extractValue("jobs", jobsMap);
if (jobs == null) {
return null;
}
for (Object entry : jobs) {
Map<?, ?> job = (Map<?, ?>) entry;
String jobId = (String) ((Map<?, ?>) job.get("config")).get("id");
if (jobId.equals(targetJobId)) {
return job;
}
}
return null;
}
@SuppressWarnings("unchecked")
public void testDataStreams() throws Exception {
if (isRunningAgainstOldCluster()) {
createComposableTemplate(client(), "dst", "ds");
Request indexRequest = new Request("POST", "/ds/_doc/1?op_type=create&refresh");
XContentBuilder builder = JsonXContent.contentBuilder()
.startObject()
.field("f", "v")
.field("@timestamp", System.currentTimeMillis())
.endObject();
indexRequest.setJsonEntity(Strings.toString(builder));
indexRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(fieldNamesFieldOk()));
assertOK(client().performRequest(indexRequest));
}
// It's quite possible that this test will run where the data stream backing index is
// created on one day, and then checked on a subsequent day. To avoid this failing the
// test, we store the timestamp used when the document is indexed, then when we go to
// check the backing index name, we retrieve the time and use it for the backing index
// name resolution.
Request getDoc = new Request("GET", "/ds/_search");
Map<String, Object> doc = entityAsMap(client().performRequest(getDoc));
logger.info("--> doc: {}", doc);
Map<String, Object> hits = (Map<String, Object>) doc.get("hits");
Map<String, Object> docBody = (Map<String, Object>) ((List<Object>) hits.get("hits")).get(0);
Long timestamp = (Long) ((Map<String, Object>) docBody.get("_source")).get("@timestamp");
logger.info("--> parsed out timestamp of {}", timestamp);
Request getDataStream = new Request("GET", "/_data_stream/ds");
Response response = client().performRequest(getDataStream);
assertOK(response);
List<Object> dataStreams = (List<Object>) entityAsMap(response).get("data_streams");
assertEquals(1, dataStreams.size());
Map<String, Object> ds = (Map<String, Object>) dataStreams.get(0);
List<Map<String, String>> indices = (List<Map<String, String>>) ds.get("indices");
assertEquals("ds", ds.get("name"));
assertEquals(1, indices.size());
assertEquals(DataStreamTestHelper.getLegacyDefaultBackingIndexName("ds", 1, timestamp), indices.get(0).get("index_name"));
assertNumHits("ds", 1, 1);
}
/**
* Ignore the warning about the {@code _field_names} field. We intentionally
* turn that field off sometimes. And other times old versions spuriously
* send it back to us.
*/
private WarningsHandler fieldNamesFieldOk() {
return warnings -> switch (warnings.size()) {
case 0 -> false; // old versions don't return a warning
case 1 -> false == warnings.get(0).contains("_field_names");
default -> true;
};
}
private static void createComposableTemplate(RestClient client, String templateName, String indexPattern) throws IOException {
StringEntity templateJSON = new StringEntity(Strings.format("""
{
"index_patterns": "%s",
"data_stream": {}
}""", indexPattern), ContentType.APPLICATION_JSON);
Request createIndexTemplateRequest = new Request("PUT", "_index_template/" + templateName);
createIndexTemplateRequest.setEntity(templateJSON);
client.performRequest(createIndexTemplateRequest);
}
private RequestOptions.Builder systemIndexWarningHandlerOptions(String index) {
return RequestOptions.DEFAULT.toBuilder()
.setWarningsHandler(
w -> w.size() > 0
&& w.contains(
"this request accesses system indices: ["
+ index
+ "], but in a future major "
+ "version, direct access to system indices will be prevented by default"
) == false
);
}
}
| FullClusterRestartIT |
java | apache__maven | compat/maven-compat/src/main/java/org/apache/maven/repository/legacy/resolver/transform/DefaultArtifactTransformationManager.java | {
"start": 1600,
"end": 3694
} | class ____ implements ArtifactTransformationManager {
private List<ArtifactTransformation> artifactTransformations;
@Inject
public DefaultArtifactTransformationManager(Map<String, ArtifactTransformation> artifactTransformations) {
this.artifactTransformations = Stream.of("release", "latest", "snapshot")
.map(artifactTransformations::get)
.filter(Objects::nonNull)
.collect(Collectors.toList());
}
@Override
public void transformForResolve(Artifact artifact, RepositoryRequest request)
throws ArtifactResolutionException, ArtifactNotFoundException {
for (ArtifactTransformation transform : artifactTransformations) {
transform.transformForResolve(artifact, request);
}
}
@Override
public void transformForResolve(
Artifact artifact, List<ArtifactRepository> remoteRepositories, ArtifactRepository localRepository)
throws ArtifactResolutionException, ArtifactNotFoundException {
for (ArtifactTransformation transform : artifactTransformations) {
transform.transformForResolve(artifact, remoteRepositories, localRepository);
}
}
@Override
public void transformForInstall(Artifact artifact, ArtifactRepository localRepository)
throws ArtifactInstallationException {
for (ArtifactTransformation transform : artifactTransformations) {
transform.transformForInstall(artifact, localRepository);
}
}
@Override
public void transformForDeployment(
Artifact artifact, ArtifactRepository remoteRepository, ArtifactRepository localRepository)
throws ArtifactDeploymentException {
for (ArtifactTransformation transform : artifactTransformations) {
transform.transformForDeployment(artifact, remoteRepository, localRepository);
}
}
@Override
public List<ArtifactTransformation> getArtifactTransformations() {
return artifactTransformations;
}
}
| DefaultArtifactTransformationManager |
java | quarkusio__quarkus | extensions/funqy/funqy-google-cloud-functions/deployment/src/main/java/io/quarkus/funqy/gcp/functions/deployment/bindings/FunqyCloudFunctionsBuildStep.java | {
"start": 981,
"end": 2459
} | class ____ {
private static final String FEATURE_NAME = "funqy-google-cloud-functions";
@BuildStep
public FeatureBuildItem feature() {
return new FeatureBuildItem(FEATURE_NAME);
}
@BuildStep
public RunTimeConfigurationDefaultBuildItem disableBanner() {
// the banner is not displayed well inside the Google Cloud Function logs
return new RunTimeConfigurationDefaultBuildItem("quarkus.banner.enabled", "false");
}
@BuildStep
@Record(STATIC_INIT)
public void init(List<FunctionBuildItem> functions,
FunqyCloudFunctionsBindingRecorder recorder,
Optional<FunctionInitializedBuildItem> hasFunctions,
BeanContainerBuildItem beanContainer) {
if (!hasFunctions.isPresent())
return;
recorder.init(beanContainer.getValue());
}
@BuildStep
@Record(RUNTIME_INIT)
public void choose(FunqyCloudFunctionsBindingRecorder recorder) {
recorder.chooseInvoker();
}
@BuildStep
public void markObjectMapperUnremovable(BuildProducer<UnremovableBeanBuildItem> unremovable) {
unremovable.produce(new UnremovableBeanBuildItem(
new UnremovableBeanBuildItem.BeanClassNameExclusion(ObjectMapper.class.getName())));
unremovable.produce(new UnremovableBeanBuildItem(
new UnremovableBeanBuildItem.BeanClassNameExclusion(ObjectMapperProducer.class.getName())));
}
}
| FunqyCloudFunctionsBuildStep |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/framework/AopProxy.java | {
"start": 1239,
"end": 1431
} | class ____.
* @return the new proxy object (never {@code null})
* @see Thread#getContextClassLoader()
*/
Object getProxy();
/**
* Create a new proxy object.
* <p>Uses the given | loader |
java | apache__rocketmq | common/src/main/java/org/apache/rocketmq/common/thread/FutureTaskExtThreadPoolExecutor.java | {
"start": 1181,
"end": 1771
} | class ____ extends ThreadPoolExecutor {
public FutureTaskExtThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime,
TimeUnit unit,
BlockingQueue<Runnable> workQueue,
ThreadFactory threadFactory,
RejectedExecutionHandler handler) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
}
@Override
protected <T> RunnableFuture<T> newTaskFor(final Runnable runnable, final T value) {
return new FutureTaskExt<>(runnable, value);
}
}
| FutureTaskExtThreadPoolExecutor |
java | apache__kafka | connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/ReplicationPolicy.java | {
"start": 967,
"end": 3659
} | interface ____ {
/**
* Returns the remote topic name for the given topic and source cluster alias.
*/
String formatRemoteTopic(String sourceClusterAlias, String topic);
/**
* Returns the source cluster alias of given topic.
* Returns null if the given topic is not a remote topic.
*/
String topicSource(String topic);
/**
* Return the name of the given topic on the source cluster.
* <p>
* Topics may be replicated multiple hops, so the immediately upstream topic may itself be a remote topic.
* <p>
* Returns null if the given topic is not a remote topic.
*/
String upstreamTopic(String topic);
/**
* Returns the name of the original topic, which may have been replicated multiple hops.
* Returns the topic if it is not a remote topic.
*/
default String originalTopic(String topic) {
String upstream = upstreamTopic(topic);
if (upstream == null || upstream.equals(topic)) {
return topic;
} else {
return originalTopic(upstream);
}
}
/**
* Returns the name of heartbeats topic.
*/
default String heartbeatsTopic() {
return "heartbeats";
}
/**
* Returns the name of the offset-syncs topic for given cluster alias.
*/
default String offsetSyncsTopic(String clusterAlias) {
return "mm2-offset-syncs." + clusterAlias + ".internal";
}
/**
* Returns the name of the checkpoints topic for given cluster alias.
*/
default String checkpointsTopic(String clusterAlias) {
return clusterAlias + ".checkpoints.internal";
}
/**
* Returns true if the topic is a heartbeats topic
*/
default boolean isHeartbeatsTopic(String topic) {
return heartbeatsTopic().equals(originalTopic(topic));
}
/**
* Returns true if the topic is a checkpoints topic.
*/
default boolean isCheckpointsTopic(String topic) {
return topic.endsWith(".checkpoints.internal");
}
/**
* Returns true if the topic is one of MirrorMaker internal topics.
* This is used to make sure the topic doesn't need to be replicated.
*/
default boolean isMM2InternalTopic(String topic) {
return topic.startsWith("mm2") && topic.endsWith(".internal") || isCheckpointsTopic(topic);
}
/**
* Returns true if the topic is considered an internal topic.
*/
default boolean isInternalTopic(String topic) {
boolean isKafkaInternalTopic = topic.startsWith("__") || topic.startsWith(".");
return isMM2InternalTopic(topic) || isKafkaInternalTopic;
}
}
| ReplicationPolicy |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java | {
"start": 18241,
"end": 23442
} | class ____ extends BaseTransition {
// whether the container failed before launched by AM or not.
boolean failedBeforeLaunching = false;
public ContainerStoppedTransition(boolean failedBeforeLaunching) {
this.failedBeforeLaunching = failedBeforeLaunching;
}
public ContainerStoppedTransition() {
this(false);
}
@Override
public void transition(ComponentInstance compInstance,
ComponentInstanceEvent event) {
Component comp = compInstance.component;
ContainerStatus status = event.getStatus();
// status is not available when upgrade fails
String containerDiag = compInstance.getCompInstanceId() + ": " + (
failedBeforeLaunching ? FAILED_BEFORE_LAUNCH_DIAG :
(status != null ? status.getDiagnostics() : UPGRADE_FAILED));
compInstance.diagnostics.append(containerDiag + System.lineSeparator());
compInstance.cancelContainerStatusRetriever();
compInstance.cancelLclRetriever();
if (compInstance.getState().equals(READY)) {
compInstance.component.decContainersReady(true);
}
compInstance.component.decRunningContainers();
// Should we fail (terminate) the service?
boolean shouldFailService = false;
final ServiceScheduler scheduler = comp.getScheduler();
scheduler.getAmRMClient().releaseAssignedContainer(
event.getContainerId());
// Check if it exceeds the failure threshold, but only if health threshold
// monitor is not enabled
if (!comp.isHealthThresholdMonitorEnabled()
&& comp.currentContainerFailure.get()
> comp.maxContainerFailurePerComp) {
String exitDiag = MessageFormat.format(
"[COMPONENT {0}]: Failed {1} times, exceeded the limit - {2}. "
+ "Shutting down now... "
+ System.lineSeparator(), comp.getName(),
comp.currentContainerFailure.get(),
comp.maxContainerFailurePerComp);
compInstance.diagnostics.append(exitDiag);
// append to global diagnostics that will be reported to RM.
scheduler.getDiagnostics().append(containerDiag);
scheduler.getDiagnostics().append(exitDiag);
LOG.warn(exitDiag);
compInstance.getContainerSpec().setState(ContainerState.FAILED);
comp.getComponentSpec().setState(ComponentState.FAILED);
comp.getScheduler().getApp().setState(ServiceState.FAILED);
if (compInstance.timelineServiceEnabled) {
// record in ATS
compInstance.scheduler.getServiceTimelinePublisher()
.componentInstanceFinished(compInstance.getContainer().getId(),
failedBeforeLaunching || status == null ? -1 :
status.getExitStatus(),
ContainerState.FAILED, containerDiag);
// mark other component-instances/containers as STOPPED
for (ContainerId containerId : scheduler.getLiveInstances()
.keySet()) {
if (!compInstance.container.getId().equals(containerId)
&& !isFinalState(compInstance.getContainerSpec().getState())) {
compInstance.getContainerSpec().setState(ContainerState.STOPPED);
compInstance.scheduler.getServiceTimelinePublisher()
.componentInstanceFinished(containerId,
KILLED_AFTER_APP_COMPLETION, ContainerState.STOPPED,
scheduler.getDiagnostics().toString());
}
}
compInstance.scheduler.getServiceTimelinePublisher()
.componentFinished(comp.getComponentSpec(), ComponentState.FAILED,
scheduler.getSystemClock().getTime());
compInstance.scheduler.getServiceTimelinePublisher()
.serviceAttemptUnregistered(comp.getContext(),
FinalApplicationStatus.FAILED,
scheduler.getDiagnostics().toString());
}
shouldFailService = true;
}
if (!failedBeforeLaunching) {
// clean up registry
// If the container failed before launching, no need to cleanup
// registry,
// because it was not registered before.
// hdfs dir content will be overwritten when a new container gets
// started,
// so no need remove.
compInstance.scheduler.executorService.submit(
() -> compInstance.cleanupRegistry(event.getContainerId()));
}
// remove the failed ContainerId -> CompInstance mapping
scheduler.removeLiveCompInstance(event.getContainerId());
// According to component restart policy, handle container restart
// or finish the service (if all components finished)
handleComponentInstanceRelaunch(compInstance, event,
failedBeforeLaunching, containerDiag);
if (shouldFailService) {
scheduler.getTerminationHandler().terminate(-1);
}
}
}
public static boolean isFinalState(ContainerState state) {
return ContainerState.FAILED.equals(state) || ContainerState.STOPPED
.equals(state) || ContainerState.SUCCEEDED.equals(state);
}
private static | ContainerStoppedTransition |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/uniqueconstraint/Building.java | {
"start": 301,
"end": 609
} | class ____ {
public Long height;
private Room room;
public Long getHeight() {
return height;
}
public void setHeight(Long height) {
this.height = height;
}
@ManyToOne(optional = false)
public Room getRoom() {
return room;
}
public void setRoom(Room room) {
this.room = room;
}
}
| Building |
java | apache__camel | components/camel-bean-validator/src/test/java/org/apache/camel/component/bean/validator/CarWithoutAnnotations.java | {
"start": 862,
"end": 1557
} | class ____ implements Car {
private String manufacturer;
private String licensePlate;
public CarWithoutAnnotations(String manufacturer, String licencePlate) {
this.manufacturer = manufacturer;
this.licensePlate = licencePlate;
}
@Override
public String getManufacturer() {
return manufacturer;
}
@Override
public void setManufacturer(String manufacturer) {
this.manufacturer = manufacturer;
}
@Override
public String getLicensePlate() {
return licensePlate;
}
@Override
public void setLicensePlate(String licensePlate) {
this.licensePlate = licensePlate;
}
}
| CarWithoutAnnotations |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/jdk/EnumDeserializer.java | {
"start": 878,
"end": 938
} | class ____ Strings and Integers.
*/
@JacksonStdImpl
public | from |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/beans/inheritance/BankService.java | {
"start": 100,
"end": 154
} | class ____ extends AbstractService<String> {
}
| BankService |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/processor/src/main/java/org/jboss/resteasy/reactive/server/processor/ServerEndpointIndexer.java | {
"start": 13091,
"end": 13494
} | interface ____ JAX-RS endpoint defining annotations and also from implementors.
*
* @return method that returns endpoint response
*/
public static MethodInfo findEndpointImplementation(MethodInfo methodInfo, ClassInfo actualEndpointClass, IndexView index) {
// provided that 'actualEndpointClass' is requested from CDI via InstanceHandler factory
// we know that this | with |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/NavigableMapTestSuiteBuilder.java | {
"start": 5987,
"end": 7205
} | class ____<K, V>
extends ForwardingTestMapGenerator<K, V> implements TestSortedMapGenerator<K, V> {
DescendingTestMapGenerator(TestSortedMapGenerator<K, V> delegate) {
super(delegate);
}
@Override
public NavigableMap<K, V> create(Object... entries) {
NavigableMap<K, V> map = (NavigableMap<K, V>) delegate.create(entries);
return map.descendingMap();
}
@Override
public Iterable<Entry<K, V>> order(List<Entry<K, V>> insertionOrder) {
insertionOrder = castOrCopyToList(delegate.order(insertionOrder));
reverse(insertionOrder);
return insertionOrder;
}
TestSortedMapGenerator<K, V> delegate() {
return (TestSortedMapGenerator<K, V>) delegate;
}
@Override
public Entry<K, V> belowSamplesLesser() {
return delegate().aboveSamplesGreater();
}
@Override
public Entry<K, V> belowSamplesGreater() {
return delegate().aboveSamplesLesser();
}
@Override
public Entry<K, V> aboveSamplesLesser() {
return delegate().belowSamplesGreater();
}
@Override
public Entry<K, V> aboveSamplesGreater() {
return delegate().belowSamplesLesser();
}
}
}
| DescendingTestMapGenerator |
java | quarkusio__quarkus | independent-projects/tools/analytics-common/src/main/java/io/quarkus/analytics/dto/segment/ContextBuilder.java | {
"start": 3222,
"end": 3798
} | class ____ {
public static final String APP_NAME = "app.name";
public static final String MAVEN_VERSION = "maven.version";
public static final String GRADLE_VERSION = "gradle.version";
public static final String QUARKUS_VERSION = "quarkus.version";
public static final String GRAALVM_VERSION_VERSION = "graalvm.version.version";
public static final String GRAALVM_VERSION_JAVA = "graalvm.version.java";
public static final String GRAALVM_VERSION_DISTRIBUTION = "graalvm.version.distribution";
}
}
| CommonSystemProperties |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ClassCanBeStaticTest.java | {
"start": 16280,
"end": 16394
} | class ____ {
void f() {}
}
}
""")
.doTest();
}
}
| Inner |
java | apache__logging-log4j2 | log4j-web/src/main/java/org/apache/logging/log4j/web/Log4jWebInitializerImpl.java | {
"start": 2260,
"end": 13300
} | class ____ extends AbstractLifeCycle implements Log4jWebLifeCycle {
private static final String WEB_INF = "/WEB-INF/";
static {
if (Loader.isClassAvailable("org.apache.logging.log4j.core.web.JNDIContextFilter")) {
throw new IllegalStateException("You are using Log4j 2 in a web application with the old, extinct "
+ "log4j-web artifact. This is not supported and could cause serious runtime problems. Please"
+ "remove the log4j-web JAR file from your application.");
}
}
private final Map<String, String> map = new ConcurrentHashMap<>();
private final StrSubstitutor substitutor = new ConfigurationStrSubstitutor(new Interpolator(map));
private final ServletContext servletContext;
private String name;
private NamedContextSelector namedContextSelector;
private LoggerContext loggerContext;
private Log4jWebInitializerImpl(final ServletContext servletContext) {
this.servletContext = servletContext;
this.map.put("hostName", NetUtils.getLocalHostname());
}
/**
* Initializes the Log4jWebLifeCycle attribute of a ServletContext. Those who wish to obtain this object should use
* the {@link org.apache.logging.log4j.web.WebLoggerContextUtils#getWebLifeCycle(javax.servlet.ServletContext)}
* method instead.
*
* @param servletContext
* the ServletContext to initialize
* @return a new Log4jWebLifeCycle
* @since 2.0.1
*/
protected static Log4jWebInitializerImpl initialize(final ServletContext servletContext) {
final Log4jWebInitializerImpl initializer = new Log4jWebInitializerImpl(servletContext);
servletContext.setAttribute(SUPPORT_ATTRIBUTE, initializer);
return initializer;
}
@Override
public synchronized void start() {
if (this.isStopped() || this.isStopping()) {
throw new IllegalStateException("Cannot start this Log4jWebInitializerImpl after it was stopped.");
}
// only do this once
if (this.isInitialized()) {
super.setStarting();
this.name = this.substitutor.replace(this.servletContext.getInitParameter(LOG4J_CONTEXT_NAME));
final String location =
this.substitutor.replace(this.servletContext.getInitParameter(LOG4J_CONFIG_LOCATION));
final boolean isJndi =
"true".equalsIgnoreCase(this.servletContext.getInitParameter(IS_LOG4J_CONTEXT_SELECTOR_NAMED));
if (isJndi) {
this.initializeJndi(location);
} else {
this.initializeNonJndi(location);
}
if (this.loggerContext instanceof AsyncLoggerContext) {
((AsyncLoggerContext) this.loggerContext).setUseThreadLocals(false);
}
this.servletContext.setAttribute(CONTEXT_ATTRIBUTE, this.loggerContext);
super.setStarted();
}
}
private void initializeJndi(final String location) {
final URI configLocation = getConfigURI(location);
if (this.name == null) {
throw new IllegalStateException("A log4jContextName context parameter is required");
}
LoggerContext context;
final LoggerContextFactory factory = LogManager.getFactory();
if (factory instanceof Log4jContextFactory) {
final ContextSelector selector = ((Log4jContextFactory) factory).getSelector();
if (selector instanceof NamedContextSelector) {
this.namedContextSelector = (NamedContextSelector) selector;
context = this.namedContextSelector.locateContext(
this.name, WebLoggerContextUtils.createExternalEntry(this.servletContext), configLocation);
ContextAnchor.THREAD_CONTEXT.set(context);
if (context.isInitialized()) {
context.start();
}
ContextAnchor.THREAD_CONTEXT.remove();
} else {
LOGGER.warn("Potential problem: Selector is not an instance of NamedContextSelector.");
return;
}
} else {
LOGGER.warn("Potential problem: LoggerContextFactory is not an instance of Log4jContextFactory.");
return;
}
this.loggerContext = context;
LOGGER.debug(
"Created logger context for [{}] using [{}].",
this.name,
context.getClass().getClassLoader());
}
private void initializeNonJndi(final String location) {
if (this.name == null) {
this.name = this.servletContext.getServletContextName();
LOGGER.debug("Using the servlet context name \"{}\".", this.name);
}
if (this.name == null) {
this.name = this.servletContext.getContextPath();
LOGGER.debug("Using the servlet context context-path \"{}\".", this.name);
}
if (this.name == null && location == null) {
LOGGER.error("No Log4j context configuration provided. This is very unusual.");
this.name = new SimpleDateFormat("yyyyMMdd_HHmmss.SSS").format(new Date());
}
if (location != null && location.contains(",")) {
final List<URI> uris = getConfigURIs(location);
this.loggerContext = Configurator.initialize(
this.name,
this.getClassLoader(),
uris,
WebLoggerContextUtils.createExternalEntry(this.servletContext));
return;
}
final URI uri = getConfigURI(location);
this.loggerContext = Configurator.initialize(
this.name, this.getClassLoader(), uri, WebLoggerContextUtils.createExternalEntry(this.servletContext));
}
private List<URI> getConfigURIs(final String location) {
final String[] parts = location.split(",");
final List<URI> uris = new ArrayList<>(parts.length);
for (final String part : parts) {
final URI uri = getConfigURI(part);
if (uri != null) {
uris.add(uri);
}
}
return uris;
}
private URI getConfigURI(final String location) {
try {
String configLocation = location;
if (configLocation == null) {
final String[] paths = prefixSet(servletContext.getResourcePaths(WEB_INF), WEB_INF + "log4j2");
LOGGER.debug(
"getConfigURI found resource paths {} in servletContext at [{}]",
Arrays.toString(paths),
WEB_INF);
if (paths.length == 1) {
configLocation = paths[0];
} else if (paths.length > 1) {
final String prefix = WEB_INF + "log4j2-" + this.name + ".";
boolean found = false;
for (final String str : paths) {
if (str.startsWith(prefix)) {
configLocation = str;
found = true;
break;
}
}
if (!found) {
configLocation = paths[0];
}
}
}
if (configLocation != null) {
final URL url = servletContext.getResource(configLocation);
if (url != null) {
final URI uri = url.toURI();
LOGGER.debug("getConfigURI found resource [{}] in servletContext at [{}]", uri, configLocation);
return uri;
}
}
} catch (final Exception ex) {
// Just try passing the location.
}
if (location != null) {
try {
final URI correctedFilePathUri = NetUtils.toURI(location);
LOGGER.debug("getConfigURI found [{}] in servletContext at [{}]", correctedFilePathUri, location);
return correctedFilePathUri;
} catch (final Exception e) {
LOGGER.error("Unable to convert configuration location [{}] to a URI", location, e);
}
}
return null;
}
/**
* Collects strings starting with the given {@code prefix} from the given {@code set}.
*
* @param set a (nullable) set of strings
* @param prefix a prefix to look for in the string set
* @return an array of the matching strings from the given set
*/
@SuppressWarnings("SameParameterValue")
private static String[] prefixSet(final Set<String> set, final String prefix) {
if (set == null) {
return Strings.EMPTY_ARRAY;
}
return set.stream().filter(string -> string.startsWith(prefix)).toArray(String[]::new);
}
@Override
public synchronized boolean stop(final long timeout, final TimeUnit timeUnit) {
if (!this.isStarted() && !this.isStopped()) {
throw new IllegalStateException("Cannot stop this Log4jWebInitializer because it has not started.");
}
// only do this once
if (this.isStarted()) {
this.setStopping();
if (this.loggerContext != null) {
LOGGER.debug("Removing LoggerContext for [{}].", this.name);
this.servletContext.removeAttribute(CONTEXT_ATTRIBUTE);
if (this.namedContextSelector != null) {
this.namedContextSelector.removeContext(this.name);
}
this.loggerContext.stop(timeout, timeUnit);
this.loggerContext.setExternalContext(null);
this.loggerContext = null;
}
this.setStopped();
}
return super.stop(timeout, timeUnit);
}
@Override
public void setLoggerContext() {
if (this.loggerContext != null) {
ContextAnchor.THREAD_CONTEXT.set(this.loggerContext);
}
}
@Override
public void clearLoggerContext() {
ContextAnchor.THREAD_CONTEXT.remove();
}
@Override
public void wrapExecution(final Runnable runnable) {
this.setLoggerContext();
try {
runnable.run();
} finally {
this.clearLoggerContext();
}
}
private ClassLoader getClassLoader() {
try {
// if container is Servlet 3.0, use its getClassLoader method
// this may look odd, but the call below will throw NoSuchMethodError if user is on Servlet 2.5
// we compile against 3.0 to support Log4jServletContainerInitializer, but we don't require 3.0
return this.servletContext.getClassLoader();
} catch (final Throwable ignore) {
// LOG4J2-248: use TCCL if possible
return LoaderUtil.getThreadContextClassLoader();
}
}
}
| Log4jWebInitializerImpl |
java | playframework__playframework | documentation/manual/working/javaGuide/advanced/routing/code/javaguide/binder/controllers/Users.java | {
"start": 255,
"end": 418
} | class ____ extends Controller {
public Result list() {
return ok("List Users");
}
public Result get(Long id) {
return ok("Get user by id");
}
}
| Users |
java | dropwizard__dropwizard | dropwizard-jersey/src/main/java/io/dropwizard/jersey/errors/EarlyEofExceptionMapper.java | {
"start": 650,
"end": 1065
} | class ____ implements ExceptionMapper<EofException> {
private static final Logger LOGGER = LoggerFactory.getLogger(EarlyEofExceptionMapper.class);
@Override
public Response toResponse(EofException e) {
LOGGER.debug("EOF Exception encountered - client disconnected during stream processing.", e);
return Response.status(Response.Status.BAD_REQUEST).build();
}
}
| EarlyEofExceptionMapper |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/component/ResourceEndpoint.java | {
"start": 1740,
"end": 7598
} | class ____ extends ProcessorEndpoint implements ManagedResourceEndpointMBean {
protected final Logger log = LoggerFactory.getLogger(getClass());
private volatile byte[] buffer;
@UriPath(description = "Path to the resource."
+ " You can prefix with: classpath, file, http, ref, or bean."
+ " classpath, file and http loads the resource using these protocols (classpath is default)."
+ " ref will lookup the resource in the registry."
+ " bean will call a method on a bean to be used as the resource."
+ " For bean you can specify the method name after dot, eg bean:myBean.myMethod.")
@Metadata(required = true, supportFileReference = true)
private String resourceUri;
@UriParam(defaultValue = "true", description = "Sets whether to use resource content cache or not")
private boolean contentCache;
@UriParam(defaultValue = "false", description = "Sets whether the context map should allow access to all details."
+ " By default only the message body and headers can be accessed."
+ " This option can be enabled for full access to the current Exchange and CamelContext."
+ " Doing so impose a potential security risk as this opens access to the full power of CamelContext API.")
private boolean allowContextMapAll;
private final Lock lock = new ReentrantLock();
public ResourceEndpoint() {
}
public ResourceEndpoint(String endpointUri, Component component, String resourceUri) {
super(endpointUri, component);
this.resourceUri = resourceUri;
}
/**
* Gets the resource as an input stream considering the cache flag as well.
* <p/>
* If cache is enabled then the resource content is cached in an internal buffer and this content is returned to
* avoid loading the resource over and over again.
*
* @return the input stream
* @throws IOException is thrown if error loading the content of the resource to the local cache buffer
*/
public InputStream getResourceAsInputStream() throws IOException {
// try to get the resource input stream
if (isContentCache()) {
lock.lock();
try {
if (buffer == null) {
log.debug("Reading resource: {} into the content cache", resourceUri);
try (InputStream is = getResourceAsInputStreamWithoutCache()) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
IOHelper.copy(IOHelper.buffered(is), bos);
buffer = bos.toByteArray();
}
}
} finally {
lock.unlock();
}
log.debug("Using resource: {} from the content cache", resourceUri);
return new ByteArrayInputStream(buffer);
}
return getResourceAsInputStreamWithoutCache();
}
protected InputStream getResourceAsInputStreamWithoutCache() throws IOException {
return loadResource(resourceUri);
}
/**
* Loads the given resource.
*
* @param uri uri of the resource.
* @return the loaded resource
* @throws IOException is thrown if resource is not found or cannot be loaded
*/
protected InputStream loadResource(String uri) throws IOException {
return ResourceHelper.resolveMandatoryResourceAsInputStream(getCamelContext(), uri);
}
@Override
public boolean isContentCache() {
return contentCache;
}
@Override
public void clearContentCache() {
log.debug("Clearing resource: {} from the content cache", resourceUri);
buffer = null;
}
public boolean isContentCacheCleared() {
return buffer == null;
}
/**
* Whether the context map is limited to only include the message body and headers
*/
public boolean isAllowContextMapAll() {
return allowContextMapAll;
}
/**
* Sets whether the context map should allow access to all details. By default only the message body and headers can
* be accessed. This option can be enabled for full access to the current Exchange and CamelContext. Doing so impose
* a potential security risk as this opens access to the full power of CamelContext API.
*/
public void setAllowContextMapAll(boolean allowContextMapAll) {
this.allowContextMapAll = allowContextMapAll;
}
@Override
public String getCamelId() {
return getCamelContext().getName();
}
@Override
public String getCamelManagementName() {
return getCamelContext().getManagementName();
}
@Override
public String getState() {
return getStatus().name();
}
/**
* Sets whether to use resource content cache or not.
*/
@Override
public void setContentCache(boolean contentCache) {
this.contentCache = contentCache;
}
public String getResourceUri() {
return resourceUri;
}
/**
* Path to the resource.
* <p/>
* You can prefix with: classpath, file, http, ref, or bean. classpath, file and http loads the resource using these
* protocols (classpath is default). ref will lookup the resource in the registry. bean will call a method on a bean
* to be used as the resource. For bean you can specify the method name after dot, eg bean:myBean.myMethod
*
* @param resourceUri the resource path
*/
public void setResourceUri(String resourceUri) {
this.resourceUri = resourceUri;
}
}
| ResourceEndpoint |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/ExecNodeGraphValidator.java | {
"start": 1163,
"end": 1798
} | class ____ extends AbstractExecNodeExactlyOnceVisitor {
@Override
protected void visitNode(ExecNode<?> node) {
if (node instanceof StreamExecLookupJoin) {
// We need to do this as TemporalTableSourceSpec might be initiated with legacy tables.
StreamExecLookupJoin streamExecLookupJoin = (StreamExecLookupJoin) node;
if (streamExecLookupJoin.getTemporalTableSourceSpec().getTableSourceSpec() == null) {
throw new ValidationException("TemporalTableSourceSpec can not be serialized.");
}
}
super.visitInputs(node);
}
}
| ExecNodeGraphValidator |
java | apache__logging-log4j2 | log4j-jpa/src/main/java/org/apache/logging/log4j/core/appender/db/jpa/AbstractLogEventWrapperEntity.java | {
"start": 1678,
"end": 2110
} | class ____ all of its
* accessor methods (as needed) to map them to the proper table and columns. Accessors you do not want persisted should
* be annotated with {@link Transient @Transient}. All accessors should call {@link #getWrappedEvent()} and delegate the
* call to the underlying event. Users may want to instead extend {@link BasicLogEventEntity}, which takes care of all
* of this for you.
* </p>
* <p>
* The concrete | and |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/creator/JSONCreatorTest7.java | {
"start": 598,
"end": 898
} | class ____ {
private final List<Value> values;
@JSONCreator
public Entity(@JSONField(name = "values") List<Value> values){
this.values = values;
}
public List<Value> getValues() {
return values;
}
}
public static | Entity |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/hive/HiveCreateTableTest_44.java | {
"start": 903,
"end": 3663
} | class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = "--\n-- Sample on record\n" +
"-- {\"bid\":\"360\",\"request_id\":\"12c6b4b7c7d590fc\",\"hour\":\"2018121315\",\"time\":\"1544684400\",\"tagid\":\"2256906\",\"plan_id\":\"636102\",\"sid\":\"288\",\"creative_id\":\"198\",\"cookie_id\":\"7af422884e2fab197c9dfd068181ac0d\",\"ip\":\"117.136.89.121\",\"price\":\"0.2800\",\"bid_price\":\"0.1400\",\"user_agent\":\"Dalvik/2.1.0 (Linux; U; Android 6.0.1; OPPO A57 Build/MMB29M)\",\"refer\":\"\"}\n\n" +
"CREATE EXTERNAL TABLE IF NOT EXISTS `data2`.`table1`(`bid` int, `bid_price` double, `cookie_id` binary, `creative_id` int, `hour` bigint, `ip` string, `plan_id` bigint, `price` double, `refer` binary, `request_id` binary, `sid` int, `tagid` bigint, `time` bigint, `user_agent` string, ) STORED AS JSON LOCATION 'oss://aliyun-oa-query-results-1863300811734283-cn-hangzhou/data/json_data/' TBLPROPERTIES ( \n'skip.header.line.count'='0',\n'recursive.directories'='true');";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, DbType.hive, SQLParserFeature.KeepComments);
SQLStatement stmt = statementList.get(0);
assertEquals("--\n" +
"-- Sample on record\n" +
"-- {\"bid\":\"360\",\"request_id\":\"12c6b4b7c7d590fc\",\"hour\":\"2018121315\",\"time\":\"1544684400\",\"tagid\":\"2256906\",\"plan_id\":\"636102\",\"sid\":\"288\",\"creative_id\":\"198\",\"cookie_id\":\"7af422884e2fab197c9dfd068181ac0d\",\"ip\":\"117.136.89.121\",\"price\":\"0.2800\",\"bid_price\":\"0.1400\",\"user_agent\":\"Dalvik/2.1.0 (Linux; U; Android 6.0.1; OPPO A57 Build/MMB29M)\",\"refer\":\"\"}\n" +
"CREATE EXTERNAL TABLE IF NOT EXISTS `data2`.`table1` (\n" +
"\t`bid` int,\n" +
"\t`bid_price` double,\n" +
"\t`cookie_id` binary,\n" +
"\t`creative_id` int,\n" +
"\t`hour` bigint,\n" +
"\t`ip` string,\n" +
"\t`plan_id` bigint,\n" +
"\t`price` double,\n" +
"\t`refer` binary,\n" +
"\t`request_id` binary,\n" +
"\t`sid` int,\n" +
"\t`tagid` bigint,\n" +
"\t`time` bigint,\n" +
"\t`user_agent` string\n" +
")\n" +
"STORED AS JSON\n" +
"LOCATION 'oss://aliyun-oa-query-results-1863300811734283-cn-hangzhou/data/json_data/'\n" +
"TBLPROPERTIES (\n" +
"\t'skip.header.line.count' = '0',\n" +
"\t'recursive.directories' = 'true'\n" +
");", stmt.toString());
}
}
| HiveCreateTableTest_44 |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/objects/data/AddressDto.java | {
"start": 700,
"end": 918
} | class ____ {
public int number = 1;
@Override
public int hashCode() {
return Objects.hash(number);
}
@Override
public String toString() {
return "AddressDto [number=" + number + "]";
}
}
| AddressDto |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/codec/vectors/diskbbq/ES920DiskBBQVectorsFormatTests.java | {
"start": 2735,
"end": 14790
} | class ____ extends BaseKnnVectorsFormatTestCase {
static {
LogConfigurator.loadLog4jPlugins();
LogConfigurator.configureESLogging(); // native access requires logging to be initialized
}
private KnnVectorsFormat format;
@Before
@Override
public void setUp() throws Exception {
if (rarely()) {
format = new ES920DiskBBQVectorsFormat(
random().nextInt(2 * MIN_VECTORS_PER_CLUSTER, ES920DiskBBQVectorsFormat.MAX_VECTORS_PER_CLUSTER),
random().nextInt(8, ES920DiskBBQVectorsFormat.MAX_CENTROIDS_PER_PARENT_CLUSTER),
DenseVectorFieldMapper.ElementType.FLOAT,
random().nextBoolean()
);
} else {
// run with low numbers to force many clusters with parents
format = new ES920DiskBBQVectorsFormat(
random().nextInt(MIN_VECTORS_PER_CLUSTER, 2 * MIN_VECTORS_PER_CLUSTER),
random().nextInt(MIN_CENTROIDS_PER_PARENT_CLUSTER, 8),
DenseVectorFieldMapper.ElementType.FLOAT,
random().nextBoolean()
);
}
super.setUp();
}
@Override
protected VectorSimilarityFunction randomSimilarity() {
return RandomPicks.randomFrom(
random(),
List.of(
VectorSimilarityFunction.DOT_PRODUCT,
VectorSimilarityFunction.EUCLIDEAN,
VectorSimilarityFunction.MAXIMUM_INNER_PRODUCT
)
);
}
@Override
protected VectorEncoding randomVectorEncoding() {
return VectorEncoding.FLOAT32;
}
@Override
public void testSearchWithVisitedLimit() {
throw new AssumptionViolatedException("ivf doesn't enforce visitation limit");
}
@Override
protected Codec getCodec() {
return TestUtil.alwaysKnnVectorsFormat(format);
}
@Override
protected void assertOffHeapByteSize(LeafReader r, String fieldName) throws IOException {
var fieldInfo = r.getFieldInfos().fieldInfo(fieldName);
if (r instanceof CodecReader codecReader) {
KnnVectorsReader knnVectorsReader = codecReader.getVectorReader();
if (knnVectorsReader instanceof PerFieldKnnVectorsFormat.FieldsReader fieldsReader) {
knnVectorsReader = fieldsReader.getFieldReader(fieldName);
}
var offHeap = knnVectorsReader.getOffHeapByteSize(fieldInfo);
long totalByteSize = offHeap.values().stream().mapToLong(Long::longValue).sum();
assertThat(offHeap.size(), equalTo(3));
assertThat(totalByteSize, equalTo(offHeap.values().stream().mapToLong(Long::longValue).sum()));
} else {
throw new AssertionError("unexpected:" + r.getClass());
}
}
@Override
public void testAdvance() throws Exception {
// TODO re-enable with hierarchical IVF, clustering as it is is flaky
}
public void testToString() {
KnnVectorsFormat format = new ES920DiskBBQVectorsFormat(128, 4);
assertThat(format, hasToString("ES920DiskBBQVectorsFormat(vectorPerCluster=128)"));
}
public void testLimits() {
expectThrows(IllegalArgumentException.class, () -> new ES920DiskBBQVectorsFormat(MIN_VECTORS_PER_CLUSTER - 1, 16));
expectThrows(IllegalArgumentException.class, () -> new ES920DiskBBQVectorsFormat(MAX_VECTORS_PER_CLUSTER + 1, 16));
expectThrows(IllegalArgumentException.class, () -> new ES920DiskBBQVectorsFormat(128, MIN_CENTROIDS_PER_PARENT_CLUSTER - 1));
expectThrows(IllegalArgumentException.class, () -> new ES920DiskBBQVectorsFormat(128, MAX_CENTROIDS_PER_PARENT_CLUSTER + 1));
}
public void testSimpleOffHeapSize() throws IOException {
float[] vector = randomVector(random().nextInt(12, 500));
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
Document doc = new Document();
doc.add(new KnnFloatVectorField("f", vector, VectorSimilarityFunction.EUCLIDEAN));
w.addDocument(doc);
w.commit();
try (IndexReader reader = DirectoryReader.open(w)) {
LeafReader r = getOnlyLeafReader(reader);
if (r instanceof CodecReader codecReader) {
KnnVectorsReader knnVectorsReader = codecReader.getVectorReader();
if (knnVectorsReader instanceof PerFieldKnnVectorsFormat.FieldsReader fieldsReader) {
knnVectorsReader = fieldsReader.getFieldReader("f");
}
var fieldInfo = r.getFieldInfos().fieldInfo("f");
var offHeap = knnVectorsReader.getOffHeapByteSize(fieldInfo);
assertEquals(3, offHeap.size());
}
}
}
}
public void testDirectIOBackwardsCompatibleRead() throws IOException {
try (Directory dir = newDirectory()) {
IndexWriterConfig bwcConfig = newIndexWriterConfig();
bwcConfig.setCodec(TestUtil.alwaysKnnVectorsFormat(new ES920DiskBBQVectorsFormat() {
@Override
public KnnVectorsWriter fieldsWriter(SegmentWriteState state) throws IOException {
return version0FieldsWriter(state);
}
}));
try (IndexWriter w = new IndexWriter(dir, bwcConfig)) {
// just testing the metadata here, don't need to do anything fancy
float[] vector = randomVector(1024);
Document doc = new Document();
doc.add(new KnnFloatVectorField("f", vector, VectorSimilarityFunction.EUCLIDEAN));
w.addDocument(doc);
w.commit();
try (IndexReader reader = DirectoryReader.open(w)) {
LeafReader r = getOnlyLeafReader(reader);
FloatVectorValues vectorValues = r.getFloatVectorValues("f");
KnnVectorValues.DocIndexIterator iterator = vectorValues.iterator();
assertEquals(0, iterator.nextDoc());
assertArrayEquals(vector, vectorValues.vectorValue(0), 0);
assertEquals(NO_MORE_DOCS, iterator.nextDoc());
}
}
}
}
public void testFewVectorManyTimes() throws IOException {
int numDifferentVectors = random().nextInt(1, 20);
float[][] vectors = new float[numDifferentVectors][];
int dimensions = random().nextInt(12, 500);
for (int i = 0; i < numDifferentVectors; i++) {
vectors[i] = randomVector(dimensions);
}
int numDocs = random().nextInt(100, 10_000);
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
for (int i = 0; i < numDocs; i++) {
float[] vector = vectors[random().nextInt(numDifferentVectors)];
Document doc = new Document();
doc.add(new KnnFloatVectorField("f", vector, VectorSimilarityFunction.EUCLIDEAN));
w.addDocument(doc);
}
w.commit();
if (rarely()) {
w.forceMerge(1);
}
try (IndexReader reader = DirectoryReader.open(w)) {
List<LeafReaderContext> subReaders = reader.leaves();
for (LeafReaderContext r : subReaders) {
LeafReader leafReader = r.reader();
float[] vector = randomVector(dimensions);
TopDocs topDocs = leafReader.searchNearestVectors(
"f",
vector,
10,
AcceptDocs.fromLiveDocs(leafReader.getLiveDocs(), leafReader.maxDoc()),
Integer.MAX_VALUE
);
assertEquals(Math.min(leafReader.maxDoc(), 10), topDocs.scoreDocs.length);
}
}
}
}
public void testOneRepeatedVector() throws IOException {
int dimensions = random().nextInt(12, 500);
float[] repeatedVector = randomVector(dimensions);
int numDocs = random().nextInt(100, 10_000);
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
for (int i = 0; i < numDocs; i++) {
float[] vector = random().nextInt(3) == 0 ? repeatedVector : randomVector(dimensions);
Document doc = new Document();
doc.add(new KnnFloatVectorField("f", vector, VectorSimilarityFunction.EUCLIDEAN));
w.addDocument(doc);
}
w.commit();
if (rarely()) {
w.forceMerge(1);
}
try (IndexReader reader = DirectoryReader.open(w)) {
List<LeafReaderContext> subReaders = reader.leaves();
for (LeafReaderContext r : subReaders) {
LeafReader leafReader = r.reader();
float[] vector = randomVector(dimensions);
TopDocs topDocs = leafReader.searchNearestVectors(
"f",
vector,
10,
AcceptDocs.fromLiveDocs(leafReader.getLiveDocs(), leafReader.maxDoc()),
Integer.MAX_VALUE
);
assertEquals(Math.min(leafReader.maxDoc(), 10), topDocs.scoreDocs.length);
}
}
}
}
// this is a modified version of lucene's TestSearchWithThreads test case
public void testWithThreads() throws Exception {
final int numThreads = random().nextInt(2, 5);
final int numSearches = atLeast(100);
final int numDocs = atLeast(1000);
final int dimensions = random().nextInt(12, 500);
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
for (int docCount = 0; docCount < numDocs; docCount++) {
final Document doc = new Document();
doc.add(new KnnFloatVectorField("f", randomVector(dimensions), VectorSimilarityFunction.EUCLIDEAN));
w.addDocument(doc);
}
w.forceMerge(1);
try (IndexReader reader = DirectoryReader.open(w)) {
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[numThreads];
for (int threadID = 0; threadID < numThreads; threadID++) {
threads[threadID] = new Thread(() -> {
try {
long totSearch = 0;
for (; totSearch < numSearches && failed.get() == false; totSearch++) {
float[] vector = randomVector(dimensions);
LeafReader leafReader = getOnlyLeafReader(reader);
leafReader.searchNearestVectors(
"f",
vector,
10,
AcceptDocs.fromLiveDocs(leafReader.getLiveDocs(), leafReader.maxDoc()),
Integer.MAX_VALUE
);
}
assertTrue(totSearch > 0);
} catch (Exception exc) {
failed.set(true);
throw new RuntimeException(exc);
}
});
threads[threadID].setDaemon(true);
}
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
}
}
}
}
| ES920DiskBBQVectorsFormatTests |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/feature/Features.java | {
"start": 3853,
"end": 5619
} | interface ____<V extends BaseVersionRange> {
/**
* Convert the map representation of an object of type <V>, to an object of type <V>.
*
* @param baseVersionRangeMap the map representation of a BaseVersionRange object.
*
* @return the object of type <V>
*/
V fromMap(Map<String, Short> baseVersionRangeMap);
}
private static <V extends BaseVersionRange> Features<V> fromFeaturesMap(
Map<String, Map<String, Short>> featuresMap, MapToBaseVersionRangeConverter<V> converter) {
return new Features<>(featuresMap.entrySet().stream().collect(
Collectors.toMap(
Map.Entry::getKey,
entry -> converter.fromMap(entry.getValue()))));
}
/**
* Converts from a map to Features<SupportedVersionRange>.
*
* @param featuresMap the map representation of a Features<SupportedVersionRange> object,
* generated using the toMap() API.
*
* @return the Features<SupportedVersionRange> object
*/
public static Features<SupportedVersionRange> fromSupportedFeaturesMap(
Map<String, Map<String, Short>> featuresMap) {
return fromFeaturesMap(featuresMap, SupportedVersionRange::fromMap);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof Features)) {
return false;
}
final Features<?> that = (Features<?>) other;
return Objects.equals(this.features, that.features);
}
@Override
public int hashCode() {
return Objects.hash(features);
}
}
| MapToBaseVersionRangeConverter |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangeType.java | {
"start": 10053,
"end": 10491
} | class ____ extends AbstractChangePoint {
public static final String NAME = "spike";
public Spike(double pValue, int changePoint) {
super(pValue, changePoint);
}
public Spike(StreamInput in) throws IOException {
super(in);
}
@Override
public String getName() {
return NAME;
}
}
/**
* Indicates a dip occurred
*/
| Spike |
java | apache__camel | components/camel-disruptor/src/main/java/org/apache/camel/component/disruptor/DisruptorConsumer.java | {
"start": 7760,
"end": 9113
} | class ____ extends AbstractLifecycleAwareExchangeEventHandler {
private final int ordinal;
private final int concurrentConsumers;
ConsumerEventHandler(final int ordinal, final int concurrentConsumers) {
this.ordinal = ordinal;
this.concurrentConsumers = concurrentConsumers;
}
@Override
public void onEvent(final ExchangeEvent event, final long sequence, final boolean endOfBatch) throws Exception {
// Consumer threads are managed at the endpoint to achieve the optimal performance.
// However, both multiple consumers (pub-sub style multicasting) as well as 'worker-pool' consumers dividing
// exchanges amongst them are scheduled on their own threads and are provided with all exchanges.
// To prevent duplicate exchange processing by worker-pool event handlers, they are all given an ordinal,
// which can be used to determine whether he should process the exchange, or leave it for his brethren.
//see http://code.google.com/p/disruptor/wiki/FrequentlyAskedQuestions#How_do_you_arrange_a_Disruptor_with_multiple_consumers_so_that_e
if (sequence % concurrentConsumers == ordinal) {
process(event.getSynchronizedExchange());
}
}
}
}
| ConsumerEventHandler |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/issues/AdviceWithPropertyPlaceholderTest.java | {
"start": 1093,
"end": 2350
} | class ____ extends ContextTestSupport {
@Test
public void testAdvicePropertyPlaceholder() throws Exception {
Properties props = new Properties();
props.put("myPattern", "seda*");
props.put("myEnd", "mock:result");
PropertiesComponent pc = context.getPropertiesComponent();
pc.setInitialProperties(props);
AdviceWith.adviceWith(context, null, r -> {
r.mockEndpointsAndSkip("{{myPattern}}");
r.weaveAddLast().to("{{myEnd}}");
});
getMockEndpoint("mock:seda:a").expectedMessageCount(1);
getMockEndpoint("mock:seda:b").expectedMessageCount(1);
getMockEndpoint("mock:seda:c").expectedMessageCount(1);
getMockEndpoint("mock:result").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.to("seda:a")
.to("seda:b")
.to("seda:c");
}
};
}
}
| AdviceWithPropertyPlaceholderTest |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/support/ReplaceOverride.java | {
"start": 1200,
"end": 2573
} | class ____ extends MethodOverride {
private final String methodReplacerBeanName;
private final List<String> typeIdentifiers = new ArrayList<>();
/**
* Construct a new ReplaceOverride.
* @param methodName the name of the method to override
* @param methodReplacerBeanName the bean name of the {@link MethodReplacer}
*/
public ReplaceOverride(String methodName, String methodReplacerBeanName) {
super(methodName);
Assert.notNull(methodReplacerBeanName, "Method replacer bean name must not be null");
this.methodReplacerBeanName = methodReplacerBeanName;
}
/**
* Construct a new ReplaceOverride.
* @param methodName the name of the method to override
* @param methodReplacerBeanName the bean name of the {@link MethodReplacer}
* @param typeIdentifiers a list of type identifiers for parameter types
* @since 6.2.9
*/
public ReplaceOverride(String methodName, String methodReplacerBeanName, List<String> typeIdentifiers) {
super(methodName);
Assert.notNull(methodReplacerBeanName, "Method replacer bean name must not be null");
this.methodReplacerBeanName = methodReplacerBeanName;
this.typeIdentifiers.addAll(typeIdentifiers);
}
/**
* Return the name of the bean implementing MethodReplacer.
*/
public String getMethodReplacerBeanName() {
return this.methodReplacerBeanName;
}
/**
* Add a fragment of a | ReplaceOverride |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/provider/ProviderTest.java | {
"start": 769,
"end": 3019
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(HelloResource.class, HelloClient2.class, HelloClient.class, GlobalRequestFilter.class,
GlobalResponseFilter.class, GlobalRequestFilterConstrainedToServer.class,
GlobalFeature.class)
.addAsResource(
new StringAsset(setUrlForClass(HelloClient.class)
+ setUrlForClass(HelloClient2.class)),
"application.properties"));
@RestClient
HelloClient helloClient;
@TestHTTPResource
URI baseUri;
@AfterEach
public void cleanUp() {
GlobalRequestFilter.abort = false;
GlobalFeature.called = false;
}
@Test
void shouldNotRegisterFeatureAutomatically() {
Response response = helloClient.echo("Michał");
assertThat(response.getStatus()).isEqualTo(GlobalResponseFilter.STATUS);
assertThat(GlobalFeature.called).isFalse();
}
@Test
void shouldUseGlobalRequestFilterForInjectedClient() {
GlobalRequestFilter.abort = true;
Response response = helloClient.echo("Michał");
assertThat(response.getStatus()).isEqualTo(GlobalRequestFilter.STATUS);
}
@Test
void shouldUseGlobalResponseFilterForInjectedClient() {
Response response = helloClient.echo("Michał");
assertThat(response.getStatus()).isEqualTo(GlobalResponseFilter.STATUS);
}
@Test
void shouldUseGlobalRequestFilterForBuiltClient() {
GlobalRequestFilter.abort = true;
Response response = helloClient().echo("Michał");
assertThat(response.getStatus()).isEqualTo(GlobalRequestFilter.STATUS);
}
@Test
void shouldUseGlobalResponseFilterForBuiltClient() {
Response response = helloClient().echo("Michał");
assertThat(response.getStatus()).isEqualTo(GlobalResponseFilter.STATUS);
}
private HelloClient helloClient() {
return RestClientBuilder.newBuilder()
.baseUri(baseUri)
.build(HelloClient.class);
}
}
| ProviderTest |
java | bumptech__glide | integration/volley/src/main/java/com/bumptech/glide/integration/volley/VolleyRequestFactory.java | {
"start": 343,
"end": 689
} | interface ____ {
/**
* Returns a Volley request for the given image url. The given future should be put as a listener
* or called when the request completes.
*/
Request<byte[]> create(
String url,
DataCallback<? super InputStream> callback,
Priority priority,
Map<String, String> headers);
}
| VolleyRequestFactory |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldBlockLoaderTests.java | {
"start": 1273,
"end": 3868
} | class ____ extends BlockLoaderTestCase {
public ShapeFieldBlockLoaderTests(Params params) {
super("shape", List.of(new ShapeDataSourceHandler()), params);
}
@Override
public void testBlockLoaderOfMultiField() throws IOException {
// Multi fields are noop for shape.
}
@Override
@SuppressWarnings("unchecked")
protected Object expected(Map<String, Object> fieldMapping, Object value, TestContext testContext) {
if (value instanceof List<?> == false) {
return convert(value);
}
// TODO FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS is currently not covered, it needs special logic
// As a result we always load from source (stored or fallback synthetic) and they should work the same.
var resultList = ((List<Object>) value).stream().map(this::convert).filter(Objects::nonNull).toList();
return maybeFoldList(resultList);
}
private Object convert(Object value) {
if (value instanceof String s) {
return toWKB(fromWKT(s));
}
if (value instanceof Map<?, ?> m) {
return toWKB(fromGeoJson(m));
}
// Malformed values are excluded
return null;
}
private Geometry fromWKT(String s) {
try {
return WellKnownText.fromWKT(StandardValidator.instance(true), false, s);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@SuppressWarnings("unchecked")
private Geometry fromGeoJson(Map<?, ?> map) {
try {
var parser = new MapXContentParser(
xContentRegistry(),
LoggingDeprecationHandler.INSTANCE,
(Map<String, Object>) map,
XContentType.JSON
);
parser.nextToken();
return GeoJson.fromXContent(StandardValidator.instance(true), false, true, parser);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private BytesRef toWKB(Geometry geometry) {
return new BytesRef(WellKnownBinary.toWKB(geometry, ByteOrder.LITTLE_ENDIAN));
}
@Override
protected Collection<? extends Plugin> getPlugins() {
var plugin = new LocalStateSpatialPlugin();
plugin.loadExtensions(new ExtensiblePlugin.ExtensionLoader() {
@Override
public <T> List<T> loadExtensions(Class<T> extensionPointType) {
return List.of();
}
});
return Collections.singletonList(plugin);
}
}
| ShapeFieldBlockLoaderTests |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/ParameterResolverTests.java | {
"start": 17055,
"end": 17160
} | class ____", testInfo.getDisplayName());
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
static | name |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/bugs/ConfusedSignatureTest.java | {
"start": 322,
"end": 918
} | class ____ {
@Test
public void
should_mock_method_which_has_generic_return_type_in_superclass_and_concrete_one_in_interface() {
Sub mock = mock(Sub.class);
// The following line resulted in
// org.mockito.exceptions.misusing.MissingMethodInvocationException:
// when() requires an argument which has to be 'a method call on a mock'.
// Presumably confused by the interface/superclass signatures.
when(mock.getFoo()).thenReturn("Hello");
assertThat(mock.getFoo()).isEqualTo("Hello");
}
public | ConfusedSignatureTest |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/processor/state/FileStateRepository.java | {
"start": 1725,
"end": 1865
} | class ____ a file-based implementation of a {@link StateRepository}.
*/
@ManagedResource(description = "File based state repository")
public | is |
java | elastic__elasticsearch | x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliFetchSizeIT.java | {
"start": 463,
"end": 721
} | class ____ extends FetchSizeTestCase {
@ClassRule
public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster();
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
}
| CliFetchSizeIT |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/components/mappedsuperclass/AuditedEmbeddableWithNoDeclaredDataTest.java | {
"start": 936,
"end": 2397
} | class ____ {
private long id;
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
this.id = scope.fromTransaction( entityManager -> {
final EntityWithAuditedEmbeddableWithNoDeclaredData entity = new EntityWithAuditedEmbeddableWithNoDeclaredData();
entity.setName( "Entity 1" );
entity.setValue( new AuditedEmbeddableWithNoDeclaredData( 42 ) );
entityManager.persist( entity );
return entity.getId();
} );
}
@Test
public void testEmbeddableThatExtendsAuditedMappedSuperclass(EntityManagerFactoryScope scope) {
scope.inEntityManager( entityManager -> {
final EntityWithAuditedEmbeddableWithNoDeclaredData entity = entityManager.find(
EntityWithAuditedEmbeddableWithNoDeclaredData.class,
id
);
final AuditReader auditReader = AuditReaderFactory.get( entityManager );
final List<Number> revisions = auditReader.getRevisions( EntityWithAuditedEmbeddableWithNoDeclaredData.class, id );
assertThat( revisions ).hasSize( 1 );
final EntityWithAuditedEmbeddableWithNoDeclaredData entityRevision1 = auditReader.find(
EntityWithAuditedEmbeddableWithNoDeclaredData.class,
id,
revisions.get( 0 )
);
assertThat( entityRevision1.getName() ).isEqualTo( entity.getName() );
// All fields should be audited because the mapped superclass is annotated
assertThat( entityRevision1.getValue().getCode() ).isEqualTo( 42 );
} );
}
}
| AuditedEmbeddableWithNoDeclaredDataTest |
java | netty__netty | example/src/main/java/io/netty/example/http2/helloworld/client/Http2Client.java | {
"start": 2842,
"end": 7607
} | class ____ {
static final boolean SSL = System.getProperty("ssl") != null;
static final String HOST = System.getProperty("host", "127.0.0.1");
static final int PORT = Integer.parseInt(System.getProperty("port", SSL? "8443" : "8080"));
static final String URL = System.getProperty("url", "/whatever");
static final String URL2 = System.getProperty("url2");
static final String URL2DATA = System.getProperty("url2data", "test data!");
public static void main(String[] args) throws Exception {
// Configure SSL.
final SslContext sslCtx;
if (SSL) {
SslProvider provider = OpenSsl.isAlpnSupported() ? SslProvider.OPENSSL : SslProvider.JDK;
sslCtx = SslContextBuilder.forClient()
.sslProvider(provider)
/* NOTE: the cipher filter may not include all ciphers required by the HTTP/2 specification.
* Please refer to the HTTP/2 specification for cipher requirements. */
.ciphers(Http2SecurityUtil.CIPHERS, SupportedCipherSuiteFilter.INSTANCE)
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.applicationProtocolConfig(new ApplicationProtocolConfig(
Protocol.ALPN,
// NO_ADVERTISE is currently the only mode supported by both OpenSsl and JDK providers.
SelectorFailureBehavior.NO_ADVERTISE,
// ACCEPT is currently the only mode supported by both OpenSsl and JDK providers.
SelectedListenerFailureBehavior.ACCEPT,
ApplicationProtocolNames.HTTP_2,
ApplicationProtocolNames.HTTP_1_1))
.build();
} else {
sslCtx = null;
}
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
Http2ClientInitializer initializer = new Http2ClientInitializer(sslCtx, Integer.MAX_VALUE);
try {
// Configure the client.
Bootstrap b = new Bootstrap();
b.group(group);
b.channel(NioSocketChannel.class);
b.option(ChannelOption.SO_KEEPALIVE, true);
b.remoteAddress(HOST, PORT);
b.handler(initializer);
// Start the client.
Channel channel = b.connect().syncUninterruptibly().channel();
System.out.println("Connected to [" + HOST + ':' + PORT + ']');
// Wait for the HTTP/2 upgrade to occur.
Http2SettingsHandler http2SettingsHandler = initializer.settingsHandler();
http2SettingsHandler.awaitSettings(5, TimeUnit.SECONDS);
HttpResponseHandler responseHandler = initializer.responseHandler();
int streamId = 3;
HttpScheme scheme = SSL ? HttpScheme.HTTPS : HttpScheme.HTTP;
AsciiString hostName = new AsciiString(HOST + ':' + PORT);
System.err.println("Sending request(s)...");
if (URL != null) {
// Create a simple GET request.
FullHttpRequest request = new DefaultFullHttpRequest(HTTP_1_1, GET, URL, Unpooled.EMPTY_BUFFER);
request.headers().add(HttpHeaderNames.HOST, hostName);
request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), scheme.name());
request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.GZIP);
request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.DEFLATE);
responseHandler.put(streamId, channel.write(request), channel.newPromise());
streamId += 2;
}
if (URL2 != null) {
// Create a simple POST request with a body.
FullHttpRequest request = new DefaultFullHttpRequest(HTTP_1_1, POST, URL2,
wrappedBuffer(URL2DATA.getBytes(CharsetUtil.UTF_8)));
request.headers().add(HttpHeaderNames.HOST, hostName);
request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), scheme.name());
request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.GZIP);
request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.DEFLATE);
responseHandler.put(streamId, channel.write(request), channel.newPromise());
}
channel.flush();
responseHandler.awaitResponses(5, TimeUnit.SECONDS);
System.out.println("Finished HTTP/2 request(s)");
// Wait until the connection is closed.
channel.close().syncUninterruptibly();
} finally {
group.shutdownGracefully();
}
}
}
| Http2Client |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/resource/transaction/jta/JtaPlatformInaccessibleImpl.java | {
"start": 642,
"end": 1766
} | class ____ implements JtaPlatform {
private final boolean preferExceptions;
public JtaPlatformInaccessibleImpl(boolean preferExceptions) {
this.preferExceptions = preferExceptions;
}
@Override
public @Nullable TransactionManager retrieveTransactionManager() {
if ( preferExceptions ) {
throw new JtaPlatformInaccessibleException();
}
return null;
}
@Override
public @Nullable UserTransaction retrieveUserTransaction() {
if ( preferExceptions ) {
throw new JtaPlatformInaccessibleException();
}
return null;
}
@Override
public @Nullable Object getTransactionIdentifier(Transaction transaction) {
if ( preferExceptions ) {
throw new JtaPlatformInaccessibleException();
}
return null;
}
@Override
public boolean canRegisterSynchronization() {
return false;
}
@Override
public void registerSynchronization(Synchronization synchronization) {
if ( preferExceptions ) {
throw new JtaPlatformInaccessibleException();
}
}
@Override
public int getCurrentStatus() throws SystemException {
return Status.STATUS_NO_TRANSACTION;
}
public static | JtaPlatformInaccessibleImpl |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/ClientTransportFactory.java | {
"start": 1213,
"end": 3499
} | interface ____ extends Closeable {
/**
* Creates an unstarted transport for exclusive use. Ownership of {@code options} is passed to the
* callee; the caller should not reuse or read from the options after this method is called.
*
* @param serverAddress the address that the transport is connected to
* @param options additional configuration
* @param channelLogger logger for the transport.
*/
ConnectionClientTransport newClientTransport(
SocketAddress serverAddress,
ClientTransportOptions options,
ChannelLogger channelLogger);
/**
* Returns an executor for scheduling provided by the transport. The service should be configured
* to allow cancelled scheduled runnables to be GCed.
*
* <p>The executor should not be used after the factory has been closed. The caller should ensure
* any outstanding tasks are cancelled before the factory is closed. However, it is a
* <a href="https://github.com/grpc/grpc-java/issues/1981">known issue</a> that ClientCallImpl may
* use this executor after close, so implementations should not go out of their way to prevent
* usage.
*/
ScheduledExecutorService getScheduledExecutorService();
/**
* Swaps to a new ChannelCredentials with all other settings unchanged. Returns null if the
* ChannelCredentials is not supported by the current ClientTransportFactory settings.
*/
@CheckReturnValue
@Nullable
SwapChannelCredentialsResult swapChannelCredentials(ChannelCredentials channelCreds);
/**
* Releases any resources.
*
* <p>After this method has been called, it's no longer valid to call
* {@link #newClientTransport}. No guarantees about thread-safety are made.
*/
@Override
void close();
/**
* Returns the {@link SocketAddress} types this transport supports.
*/
Collection<Class<? extends SocketAddress>> getSupportedSocketAddressTypes();
/**
* Options passed to {@link #newClientTransport}. Although it is safe to save this object if
* received, it is generally expected that the useful fields are copied and then the options
* object is discarded. This allows using {@code final} for those fields as well as avoids
* retaining unused objects contained in the options.
*/
final | ClientTransportFactory |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/utils/LegacyTypeInfoDataTypeConverter.java | {
"start": 5664,
"end": 21178
} | class ____ {
private static final Map<TypeInformation<?>, DataType> typeInfoDataTypeMap = new HashMap<>();
private static final Map<DataType, TypeInformation<?>> dataTypeTypeInfoMap = new HashMap<>();
static {
addMapping(Types.STRING, DataTypes.STRING().bridgedTo(String.class));
addMapping(Types.BOOLEAN, DataTypes.BOOLEAN().bridgedTo(Boolean.class));
addMapping(Types.BYTE, DataTypes.TINYINT().bridgedTo(Byte.class));
addMapping(Types.SHORT, DataTypes.SMALLINT().bridgedTo(Short.class));
addMapping(Types.INT, DataTypes.INT().bridgedTo(Integer.class));
addMapping(Types.LONG, DataTypes.BIGINT().bridgedTo(Long.class));
addMapping(Types.FLOAT, DataTypes.FLOAT().bridgedTo(Float.class));
addMapping(Types.DOUBLE, DataTypes.DOUBLE().bridgedTo(Double.class));
addMapping(Types.BIG_DEC, createLegacyType(DECIMAL, Types.BIG_DEC));
addMapping(Types.LOCAL_DATE, DataTypes.DATE().bridgedTo(LocalDate.class));
addMapping(Types.LOCAL_TIME, DataTypes.TIME(0).bridgedTo(LocalTime.class));
addMapping(Types.LOCAL_DATE_TIME, DataTypes.TIMESTAMP(3).bridgedTo(LocalDateTime.class));
addMapping(
Types.INSTANT,
DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3).bridgedTo(Instant.class));
addMapping(Types.SQL_DATE, DataTypes.DATE().bridgedTo(java.sql.Date.class));
addMapping(Types.SQL_TIME, DataTypes.TIME(0).bridgedTo(java.sql.Time.class));
addMapping(Types.SQL_TIMESTAMP, DataTypes.TIMESTAMP(3).bridgedTo(java.sql.Timestamp.class));
addMapping(
TimeIntervalTypeInfo.INTERVAL_MONTHS,
DataTypes.INTERVAL(DataTypes.MONTH()).bridgedTo(Integer.class));
addMapping(
TimeIntervalTypeInfo.INTERVAL_MILLIS,
DataTypes.INTERVAL(DataTypes.SECOND(3)).bridgedTo(Long.class));
addMapping(
PrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO,
DataTypes.ARRAY(DataTypes.BOOLEAN().notNull().bridgedTo(boolean.class))
.bridgedTo(boolean[].class));
addMapping(
PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO,
DataTypes.BYTES().bridgedTo(byte[].class));
addMapping(
PrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO,
DataTypes.ARRAY(DataTypes.SMALLINT().notNull().bridgedTo(short.class))
.bridgedTo(short[].class));
addMapping(
PrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO,
DataTypes.ARRAY(DataTypes.INT().notNull().bridgedTo(int.class))
.bridgedTo(int[].class));
addMapping(
PrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO,
DataTypes.ARRAY(DataTypes.BIGINT().notNull().bridgedTo(long.class))
.bridgedTo(long[].class));
addMapping(
PrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO,
DataTypes.ARRAY(DataTypes.FLOAT().notNull().bridgedTo(float.class))
.bridgedTo(float[].class));
addMapping(
PrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO,
DataTypes.ARRAY(DataTypes.DOUBLE().notNull().bridgedTo(double.class))
.bridgedTo(double[].class));
}
private static void addMapping(TypeInformation<?> typeInfo, DataType dataType) {
Preconditions.checkArgument(!typeInfoDataTypeMap.containsKey(typeInfo));
typeInfoDataTypeMap.put(typeInfo, dataType);
dataTypeTypeInfoMap.put(dataType, typeInfo);
}
public static DataType toDataType(TypeInformation<?> typeInfo) {
// time indicators first as their hashCode/equals is shared with those of regular timestamps
if (typeInfo instanceof TimeIndicatorTypeInfo) {
return convertToTimeAttributeType((TimeIndicatorTypeInfo) typeInfo);
}
final DataType foundDataType = typeInfoDataTypeMap.get(typeInfo);
if (foundDataType != null) {
return foundDataType;
}
if (typeInfo instanceof RowTypeInfo) {
return convertToRowType((RowTypeInfo) typeInfo);
} else if (typeInfo instanceof ObjectArrayTypeInfo) {
return convertToArrayType(
typeInfo.getTypeClass(),
((ObjectArrayTypeInfo<?, ?>) typeInfo).getComponentInfo());
} else if (typeInfo instanceof BasicArrayTypeInfo) {
return createLegacyType(ARRAY, typeInfo);
} else if (typeInfo instanceof MultisetTypeInfo) {
return convertToMultisetType(((MultisetTypeInfo<?>) typeInfo).getElementTypeInfo());
} else if (typeInfo instanceof MapTypeInfo) {
return convertToMapType((MapTypeInfo<?, ?>) typeInfo);
} else if (typeInfo instanceof CompositeType || isRowData(typeInfo)) {
return createLegacyType(STRUCTURED_TYPE, typeInfo);
}
return createLegacyType(RAW, typeInfo);
}
public static TypeInformation<?> toLegacyTypeInfo(DataType dataType) {
// time indicators first as their hashCode/equals is shared with those of regular timestamps
if (canConvertToTimeAttributeTypeInfo(dataType)) {
return convertToTimeAttributeTypeInfo(dataType.getLogicalType());
}
// check in the map but relax the nullability constraint as every not null data type can be
// stored in the corresponding nullable type information
final TypeInformation<?> foundTypeInfo =
dataTypeTypeInfoMap.get(
dataType.nullable()
.bridgedTo(primitiveToWrapper(dataType.getConversionClass())));
if (foundTypeInfo != null) {
return foundTypeInfo;
}
// we are relaxing the constraint for DECIMAL, CHAR, VARCHAR, TIMESTAMP_WITHOUT_TIME_ZONE to
// support value literals in legacy planner
LogicalType logicalType = dataType.getLogicalType();
if (logicalType.is(DECIMAL)) {
return Types.BIG_DEC;
} else if (logicalType.is(CHAR)) {
return Types.STRING;
} else if (logicalType.is(VARCHAR)) {
return Types.STRING;
} else if (logicalType.is(VARIANT)) {
return Types.VARIANT;
}
// relax the precision constraint as Timestamp can store the highest precision
else if (logicalType.is(TIMESTAMP_WITHOUT_TIME_ZONE)
&& dataType.getConversionClass() == Timestamp.class) {
return Types.SQL_TIMESTAMP;
}
// relax the precision constraint as LocalDateTime can store the highest precision
else if (logicalType.is(TIMESTAMP_WITHOUT_TIME_ZONE)
&& dataType.getConversionClass() == LocalDateTime.class) {
return Types.LOCAL_DATE_TIME;
}
// convert proctime back
else if (logicalType.is(TIMESTAMP_WITH_LOCAL_TIME_ZONE)
&& dataType.getConversionClass() == Timestamp.class) {
return Types.SQL_TIMESTAMP;
}
// relax the precision constraint as LocalTime can store the highest precision
else if (logicalType.is(TIME_WITHOUT_TIME_ZONE)
&& dataType.getConversionClass() == LocalTime.class) {
return Types.LOCAL_TIME;
} else if (canConvertToLegacyTypeInfo(dataType)) {
return convertToLegacyTypeInfo(dataType);
} else if (canConvertToRowTypeInfo(dataType)) {
return convertToRowTypeInfo((FieldsDataType) dataType);
}
// this could also match for basic array type info but this is covered by legacy type info
else if (canConvertToObjectArrayTypeInfo(dataType)) {
return convertToObjectArrayTypeInfo((CollectionDataType) dataType);
} else if (canConvertToMultisetTypeInfo(dataType)) {
return convertToMultisetTypeInfo((CollectionDataType) dataType);
} else if (canConvertToMapTypeInfo(dataType)) {
return convertToMapTypeInfo((KeyValueDataType) dataType);
}
// makes the raw type accessible in the legacy planner
else if (canConvertToRawTypeInfo(dataType)) {
return convertToRawTypeInfo(dataType);
}
throw new TableException(
String.format(
"Unsupported conversion from data type '%s' (conversion class: %s) to type information. Only data types "
+ "that originated from type information fully support a reverse conversion.",
dataType, dataType.getConversionClass().getName()));
}
private static DataType createLegacyType(
LogicalTypeRoot typeRoot, TypeInformation<?> typeInfo) {
return new AtomicDataType(new LegacyTypeInformationType<>(typeRoot, typeInfo))
.bridgedTo(typeInfo.getTypeClass());
}
private static DataType convertToTimeAttributeType(
TimeIndicatorTypeInfo timeIndicatorTypeInfo) {
if (timeIndicatorTypeInfo.isEventTime()) {
return new AtomicDataType(new TimestampType(true, TimestampKind.ROWTIME, 3))
.bridgedTo(java.sql.Timestamp.class);
} else {
return new AtomicDataType(new LocalZonedTimestampType(true, TimestampKind.PROCTIME, 3))
.bridgedTo(java.time.Instant.class);
}
}
private static boolean canConvertToTimeAttributeTypeInfo(DataType dataType) {
if (dataType.getLogicalType().is(TIMESTAMP_WITHOUT_TIME_ZONE)) {
return ((TimestampType) dataType.getLogicalType()).getKind() != TimestampKind.REGULAR;
} else if (dataType.getLogicalType().is(TIMESTAMP_WITH_LOCAL_TIME_ZONE)) {
return ((LocalZonedTimestampType) dataType.getLogicalType()).getKind()
!= TimestampKind.REGULAR;
} else {
return false;
}
}
private static TypeInformation<?> convertToTimeAttributeTypeInfo(LogicalType type) {
if (isRowtimeAttribute(type)) {
return TimeIndicatorTypeInfo.ROWTIME_INDICATOR;
} else {
return TimeIndicatorTypeInfo.PROCTIME_INDICATOR;
}
}
private static DataType convertToRowType(RowTypeInfo rowTypeInfo) {
final String[] fieldNames = rowTypeInfo.getFieldNames();
final DataTypes.Field[] fields =
IntStream.range(0, rowTypeInfo.getArity())
.mapToObj(
i -> {
DataType fieldType = toDataType(rowTypeInfo.getTypeAt(i));
return DataTypes.FIELD(fieldNames[i], fieldType);
})
.toArray(DataTypes.Field[]::new);
return DataTypes.ROW(fields).bridgedTo(Row.class);
}
private static boolean canConvertToRowTypeInfo(DataType dataType) {
return dataType.getLogicalType().is(ROW)
&& dataType.getConversionClass().equals(Row.class)
&& ((RowType) dataType.getLogicalType())
.getFields().stream().noneMatch(f -> f.getDescription().isPresent());
}
private static TypeInformation<?> convertToRowTypeInfo(FieldsDataType fieldsDataType) {
final RowType rowType = (RowType) fieldsDataType.getLogicalType();
final String[] fieldNames =
rowType.getFields().stream().map(RowType.RowField::getName).toArray(String[]::new);
final TypeInformation<?>[] fieldTypes =
fieldsDataType.getChildren().stream()
.map(LegacyTypeInfoDataTypeConverter::toLegacyTypeInfo)
.toArray(TypeInformation[]::new);
return Types.ROW_NAMED(fieldNames, fieldTypes);
}
private static DataType convertToArrayType(
Class<?> arrayClass, TypeInformation<?> elementTypeInfo) {
return DataTypes.ARRAY(toDataType(elementTypeInfo)).bridgedTo(arrayClass);
}
private static boolean canConvertToObjectArrayTypeInfo(DataType dataType) {
return dataType.getLogicalType().is(ARRAY) && dataType.getConversionClass().isArray();
}
private static TypeInformation<?> convertToObjectArrayTypeInfo(
CollectionDataType collectionDataType) {
// Types.OBJECT_ARRAY would return a basic type info for strings
return ObjectArrayTypeInfo.getInfoFor(
toLegacyTypeInfo(collectionDataType.getElementDataType()));
}
private static DataType convertToMultisetType(TypeInformation<?> elementTypeInfo) {
return DataTypes.MULTISET(toDataType(elementTypeInfo)).bridgedTo(Map.class);
}
private static boolean canConvertToMultisetTypeInfo(DataType dataType) {
return dataType.getLogicalType().is(MULTISET) && dataType.getConversionClass() == Map.class;
}
private static TypeInformation<?> convertToMultisetTypeInfo(
CollectionDataType collectionDataType) {
return new MultisetTypeInfo<>(toLegacyTypeInfo(collectionDataType.getElementDataType()));
}
private static DataType convertToMapType(MapTypeInfo<?, ?> typeInfo) {
return DataTypes.MAP(
toDataType(typeInfo.getKeyTypeInfo()),
toDataType(typeInfo.getValueTypeInfo()))
.bridgedTo(Map.class);
}
private static boolean canConvertToMapTypeInfo(DataType dataType) {
return dataType.getLogicalType().is(MAP) && dataType.getConversionClass() == Map.class;
}
private static TypeInformation<?> convertToMapTypeInfo(KeyValueDataType dataType) {
return Types.MAP(
toLegacyTypeInfo(dataType.getKeyDataType()),
toLegacyTypeInfo(dataType.getValueDataType()));
}
private static boolean canConvertToLegacyTypeInfo(DataType dataType) {
return dataType.getLogicalType() instanceof LegacyTypeInformationType;
}
private static TypeInformation<?> convertToLegacyTypeInfo(DataType dataType) {
return ((LegacyTypeInformationType<?>) dataType.getLogicalType()).getTypeInformation();
}
private static boolean canConvertToRawTypeInfo(DataType dataType) {
final LogicalType type = dataType.getLogicalType();
return type.is(RAW) && dataType.getConversionClass() == type.getDefaultConversion();
}
private static TypeInformation<?> convertToRawTypeInfo(DataType dataType) {
final LogicalType type = dataType.getLogicalType();
if (type instanceof TypeInformationRawType) {
return ((TypeInformationRawType<?>) dataType.getLogicalType()).getTypeInformation();
}
return new GenericTypeInfo<>(type.getDefaultConversion());
}
/**
* Temporary solution to enable tests with type information and internal data structures until
* we drop all legacy types.
*/
private static boolean isRowData(TypeInformation<?> typeInfo) {
if (!(typeInfo instanceof DataTypeQueryable)) {
return false;
}
final DataType dataType = ((DataTypeQueryable) typeInfo).getDataType();
return dataType.getConversionClass() == RowData.class;
}
private LegacyTypeInfoDataTypeConverter() {
// no instantiation
}
}
| LegacyTypeInfoDataTypeConverter |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/admin/SpringApplicationAdminMXBean.java | {
"start": 896,
"end": 1952
} | interface ____ {
/**
* Specify if the application has fully started and is now ready.
* @return {@code true} if the application is ready
* @see org.springframework.boot.context.event.ApplicationReadyEvent
*/
boolean isReady();
/**
* Specify if the application runs in an embedded web container. Return {@code false}
* on a web application that hasn't fully started yet, so it is preferable to wait for
* the application to be {@link #isReady() ready}.
* @return {@code true} if the application runs in an embedded web container
* @see #isReady()
*/
boolean isEmbeddedWebApplication();
/**
* Return the value of the specified key from the application
* {@link org.springframework.core.env.Environment Environment}.
* @param key the property key
* @return the property value or {@code null} if it does not exist
*/
@Nullable String getProperty(String key);
/**
* Shutdown the application.
* @see org.springframework.context.ConfigurableApplicationContext#close()
*/
void shutdown();
}
| SpringApplicationAdminMXBean |
java | apache__camel | components/camel-huawei/camel-huaweicloud-imagerecognition/src/main/java/org/apache/camel/component/huaweicloud/image/ImageRecognitionEndpoint.java | {
"start": 1758,
"end": 10291
} | class ____ extends DefaultEndpoint {
@UriPath(
description = "Name of Image Recognition operation to perform, including celebrityRecognition and tagRecognition",
displayName = "Operation name", label = "producer")
@Metadata(required = true)
private String operation;
@UriParam(description = "Configuration object for cloud service authentication",
displayName = "Service Configuration", secret = true)
@Metadata(required = false)
private ServiceKeys serviceKeys;
@UriParam(description = "Access key for the cloud user", displayName = "Account access key (AK)", secret = true)
@Metadata(required = true)
private String accessKey;
@UriParam(description = "Secret key for the cloud user", displayName = "Account secret key (SK)", secret = true)
@Metadata(required = true)
private String secretKey;
@UriParam(description = "Cloud project ID", displayName = "Project ID")
@Metadata(required = true)
private String projectId;
@UriParam(description = "Proxy server ip/hostname", displayName = "Proxy server host")
@Metadata(required = false)
private String proxyHost;
@UriParam(description = "Proxy server port", displayName = "Proxy server port")
@Metadata(required = false)
private int proxyPort;
@UriParam(description = "Proxy authentication user", displayName = "Proxy user", secret = true)
@Metadata(required = false)
private String proxyUser;
@UriParam(description = "Proxy authentication password", displayName = "Proxy password", secret = true)
@Metadata(required = false)
private String proxyPassword;
@UriParam(description = "Ignore SSL verification", displayName = "SSL Verification Ignored",
defaultValue = "false", label = "security")
@Metadata(required = false)
private boolean ignoreSslVerification;
@UriParam(
description = "Image Recognition service region. Currently only cn-north-1 and cn-north-4 are supported. This is lower precedence than endpoint based configuration.",
displayName = "Service region")
@Metadata(required = true)
private String region;
@UriParam(
description = "Fully qualified Image Recognition service url. Carries higher precedence than region based configuration.",
displayName = "Service endpoint")
@Metadata(required = false)
private String endpoint;
@UriParam(
description = "Indicates the Base64 character string converted from the image. The size cannot exceed 10 MB. The image resolution of the narrow sides must be greater than 15 pixels, and that of the wide sides cannot exceed 4096 pixels."
+ "The supported image formats include JPG, PNG, and BMP. \n"
+ "Configure either this parameter or imageUrl, and this one carries higher precedence than imageUrl.",
displayName = "Image in Base64")
@Metadata(required = false)
private String imageContent;
@UriParam(description = "Indicates the URL of an image. The options are as follows:\n"
+ "HTTP/HTTPS URLs on the public network\n"
+ "OBS URLs. To use OBS data, authorization is required, including service authorization, temporary authorization, and anonymous public authorization. For details, see Configuring the Access Permission of OBS. \n"
+ "Configure either this parameter or imageContent, and this one carries lower precedence than imageContent.",
displayName = "Image Url")
@Metadata(required = false)
private String imageUrl;
@UriParam(
description = "Indicates the language of the returned tags when the operation is tagRecognition, including zh and en.",
displayName = "Tag Language", defaultValue = "zh")
@Metadata(required = false)
private String tagLanguage = ImageRecognitionConstants.TAG_LANGUAGE_ZH;
@UriParam(description = "Indicates the threshold of confidence.\n"
+ "When the operation is tagRecognition, this parameter ranges from 0 to 100. Tags whose confidence score is lower than the threshold will not be returned. The default value is 60.\n"
+ "When the operation is celebrityRecognition, this parameter ranges from 0 to 1. Labels whose confidence score is lower than the threshold will not be returned. The default value is 0.48.",
displayName = "Threshold of confidence")
@Metadata(required = false)
private float threshold = -1;
@UriParam(description = "Indicates the maximum number of the returned tags when the operation is tagRecognition.",
displayName = "Tag Limit", defaultValue = "50")
@Metadata(required = false)
private int tagLimit = ImageRecognitionConstants.DEFAULT_TAG_LIMIT;
private ImageClient imageClient;
public ImageRecognitionEndpoint() {
}
public ImageRecognitionEndpoint(String uri, String operation, ImageRecognitionComponent component) {
super(uri, component);
this.operation = operation;
}
@Override
public Producer createProducer() throws Exception {
return new ImageRecognitionProducer(this);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
throw new UnsupportedOperationException("consumer endpoint is not supported");
}
public String getOperation() {
return operation;
}
public void setOperation(String operation) {
this.operation = operation;
}
public ServiceKeys getServiceKeys() {
return serviceKeys;
}
public void setServiceKeys(ServiceKeys serviceKeys) {
this.serviceKeys = serviceKeys;
}
public String getAccessKey() {
return accessKey;
}
public void setAccessKey(String accessKey) {
this.accessKey = accessKey;
}
public String getSecretKey() {
return secretKey;
}
public void setSecretKey(String secretKey) {
this.secretKey = secretKey;
}
public String getProjectId() {
return projectId;
}
public void setProjectId(String projectId) {
this.projectId = projectId;
}
public String getProxyHost() {
return proxyHost;
}
public void setProxyHost(String proxyHost) {
this.proxyHost = proxyHost;
}
public int getProxyPort() {
return proxyPort;
}
public void setProxyPort(int proxyPort) {
this.proxyPort = proxyPort;
}
public String getProxyUser() {
return proxyUser;
}
public void setProxyUser(String proxyUser) {
this.proxyUser = proxyUser;
}
public String getProxyPassword() {
return proxyPassword;
}
public void setProxyPassword(String proxyPassword) {
this.proxyPassword = proxyPassword;
}
public String getRegion() {
return region;
}
public void setRegion(String region) {
this.region = region;
}
public String getEndpoint() {
return endpoint;
}
public void setEndpoint(String endpoint) {
this.endpoint = endpoint;
}
public String getImageContent() {
return imageContent;
}
public void setImageContent(String imageContent) {
this.imageContent = imageContent;
}
public String getImageUrl() {
return imageUrl;
}
public void setImageUrl(String imageUrl) {
this.imageUrl = imageUrl;
}
public String getTagLanguage() {
return tagLanguage;
}
public void setTagLanguage(String tagLanguage) {
this.tagLanguage = tagLanguage;
}
public float getThreshold() {
return threshold;
}
public void setThreshold(float threshold) {
this.threshold = threshold;
}
public int getTagLimit() {
return tagLimit;
}
public void setTagLimit(int tagLimit) {
this.tagLimit = tagLimit;
}
public ImageClient getImageClient() {
return imageClient;
}
public void setImageClient(ImageClient imageClient) {
this.imageClient = imageClient;
}
public boolean isIgnoreSslVerification() {
return ignoreSslVerification;
}
public void setIgnoreSslVerification(boolean ignoreSslVerification) {
this.ignoreSslVerification = ignoreSslVerification;
}
}
| ImageRecognitionEndpoint |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/tests/StreamsEosTest.java | {
"start": 1040,
"end": 3514
} | class ____ {
/**
* args ::= kafka propFileName command
* command := "run" | "process" | "verify"
*/
public static void main(final String[] args) throws IOException {
if (args.length < 2) {
System.err.println("StreamsEosTest are expecting two parameters: propFile, command; but only see " + args.length + " parameter");
Exit.exit(1);
}
final String propFileName = args[0];
final String command = args[1];
final Properties streamsProperties = Utils.loadProps(propFileName);
final String kafka = streamsProperties.getProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG);
final String processingGuarantee = streamsProperties.getProperty(StreamsConfig.PROCESSING_GUARANTEE_CONFIG);
if (kafka == null) {
System.err.println("No bootstrap kafka servers specified in " + StreamsConfig.BOOTSTRAP_SERVERS_CONFIG);
Exit.exit(1);
}
if ("process".equals(command) || "process-complex".equals(command)) {
if (!StreamsConfig.EXACTLY_ONCE_V2.equals(processingGuarantee)) {
System.err.println("processingGuarantee must be " + StreamsConfig.EXACTLY_ONCE_V2);
Exit.exit(1);
}
}
System.out.println("StreamsTest instance started");
System.out.println("kafka=" + kafka);
System.out.println("props=" + streamsProperties);
System.out.println("command=" + command);
System.out.flush();
if (command == null || propFileName == null) {
Exit.exit(-1);
}
switch (command) {
case "run":
EosTestDriver.generate(kafka);
break;
case "process":
new EosTestClient(streamsProperties, false).start();
break;
case "process-complex":
new EosTestClient(streamsProperties, true).start();
break;
case "verify":
EosTestDriver.verify(kafka, false, streamsProperties.getProperty("group.protocol"));
break;
case "verify-complex":
EosTestDriver.verify(kafka, true, streamsProperties.getProperty("group.protocol"));
break;
default:
System.out.println("unknown command: " + command);
System.out.flush();
Exit.exit(-1);
}
}
}
| StreamsEosTest |
java | spring-projects__spring-boot | module/spring-boot-data-mongodb/src/test/java/org/springframework/boot/data/mongodb/autoconfigure/DataMongoReactiveAndBlockingRepositoriesAutoConfigurationTests.java | {
"start": 3093,
"end": 3660
} | class ____ implements ImportSelector {
@Override
public String[] selectImports(AnnotationMetadata importingClassMetadata) {
List<String> names = new ArrayList<>();
for (Class<?> type : new Class<?>[] { MongoAutoConfiguration.class, MongoReactiveAutoConfiguration.class,
DataMongoAutoConfiguration.class, DataMongoRepositoriesAutoConfiguration.class,
DataMongoReactiveAutoConfiguration.class, DataMongoReactiveRepositoriesAutoConfiguration.class }) {
names.add(type.getName());
}
return StringUtils.toStringArray(names);
}
}
}
| Registrar |
java | apache__kafka | storage/src/main/java/org/apache/kafka/storage/internals/log/StorageAction.java | {
"start": 860,
"end": 1046
} | interface ____ used to execute any storage related operations.
*
* @param <T> return type for execute operation
* @param <E> Exception type to be thrown
*/
@FunctionalInterface
public | is |
java | google__dagger | javatests/artifacts/dagger-ksp/java-app/src/main/java/app/AssistedInjectClasses.java | {
"start": 1135,
"end": 1186
} | class ____ {
@Inject
Bar() {}
}
static | Bar |
java | netty__netty | codec-http2/src/main/java/io/netty/handler/codec/http2/UniformStreamByteDistributor.java | {
"start": 1348,
"end": 1592
} | class ____ a minimum chunk size that will be allocated to each stream. While
* fewer streams may be written to in each call to {@link #distribute(int, Writer)}, doing this
* should improve the goodput on each written stream.
*/
public final | uses |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/IncreaseSerializationTests.java | {
"start": 563,
"end": 1467
} | class ____ extends AbstractExpressionSerializationTests<Increase> {
@Override
protected Increase createTestInstance() {
Source source = randomSource();
Expression field = randomChild();
Expression timestamp = randomChild();
return new Increase(source, field, timestamp);
}
@Override
protected Increase mutateInstance(Increase instance) throws IOException {
Source source = randomSource();
Expression field = instance.field();
Expression timestamp = instance.timestamp();
switch (between(0, 1)) {
case 0 -> field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild);
case 1 -> timestamp = randomValueOtherThan(timestamp, AbstractExpressionSerializationTests::randomChild);
}
return new Increase(source, field, timestamp);
}
}
| IncreaseSerializationTests |
java | alibaba__nacos | api/src/test/java/com/alibaba/nacos/api/naming/remote/request/NamingFuzzyWatchSyncRequestTest.java | {
"start": 1306,
"end": 4836
} | class ____ {
private static final String GROUP_KEY_PATTERN = "groupKeyPattern";
private static final String SYNC_TYPE = "syncType";
private static final String SERVICE_KEY = "serviceKey";
private static final String CHANGED_TYPE = "changedType";
private static ObjectMapper mapper;
@BeforeAll
static void setUp() throws Exception {
mapper = new ObjectMapper();
mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
}
@Test
void testSerialize() throws JsonProcessingException {
Set<Context> contexts = new HashSet<>();
Context context = Context.build(SERVICE_KEY, CHANGED_TYPE);
contexts.add(context);
NamingFuzzyWatchSyncRequest request = new NamingFuzzyWatchSyncRequest(GROUP_KEY_PATTERN, SYNC_TYPE, contexts);
request.setTotalBatch(2);
request.setCurrentBatch(1);
String json = mapper.writeValueAsString(request);
assertTrue(json.contains("\"groupKeyPattern\":\"" + GROUP_KEY_PATTERN + "\""));
assertTrue(json.contains("\"syncType\":\"" + SYNC_TYPE + "\""));
assertTrue(json.contains("\"contexts\":[{"));
assertTrue(json.contains("\"serviceKey\":\"" + SERVICE_KEY + "\""));
assertTrue(json.contains("\"changedType\":\"" + CHANGED_TYPE + "\""));
assertTrue(json.contains("\"totalBatch\":2"));
assertTrue(json.contains("\"currentBatch\":1"));
assertTrue(json.contains("\"module\":\"" + NAMING_MODULE + "\""));
}
@Test
void testDeserialize() throws JsonProcessingException {
String json = "{\"headers\":{},\"groupKeyPattern\":\"groupKeyPattern\",\"contexts\":[{\"serviceKey\":\"serviceKey\","
+ "\"changedType\":\"changedType\"}],\"totalBatch\":2,\"currentBatch\":1,\"syncType\":\"syncType\",\"module\":\"naming\"}";
NamingFuzzyWatchSyncRequest actual = mapper.readValue(json, NamingFuzzyWatchSyncRequest.class);
assertEquals(GROUP_KEY_PATTERN, actual.getGroupKeyPattern());
assertEquals(SYNC_TYPE, actual.getSyncType());
assertEquals(2, actual.getTotalBatch());
assertEquals(1, actual.getCurrentBatch());
assertEquals(NAMING_MODULE, actual.getModule());
assertEquals(1, actual.getContexts().size());
Context context = actual.getContexts().iterator().next();
assertEquals(SERVICE_KEY, context.getServiceKey());
assertEquals(CHANGED_TYPE, context.getChangedType());
}
@Test
void testBuildSyncNotifyRequest() {
Set<Context> contexts = new HashSet<>();
Context context = Context.build(SERVICE_KEY, CHANGED_TYPE);
contexts.add(context);
NamingFuzzyWatchSyncRequest request = NamingFuzzyWatchSyncRequest.buildSyncNotifyRequest(
GROUP_KEY_PATTERN, SYNC_TYPE, contexts, 3, 2);
assertEquals(GROUP_KEY_PATTERN, request.getGroupKeyPattern());
assertEquals(SYNC_TYPE, request.getSyncType());
assertEquals(3, request.getTotalBatch());
assertEquals(2, request.getCurrentBatch());
assertEquals(1, request.getContexts().size());
Context actualContext = request.getContexts().iterator().next();
assertEquals(SERVICE_KEY, actualContext.getServiceKey());
assertEquals(CHANGED_TYPE, actualContext.getChangedType());
}
} | NamingFuzzyWatchSyncRequestTest |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/pattern/RootThrowablePatternConverterTest.java | {
"start": 2042,
"end": 4442
} | class ____ extends AbstractPropertyTest {
PropertyTest() {
super("%rEx", THROWING_METHOD);
}
}
private static final List<String> EXPECTED_FULL_STACK_TRACE_LINES = asList(
"[CIRCULAR REFERENCE: foo.TestFriendlyException: r_c [localized]]",
"Wrapped by: foo.TestFriendlyException: r_c_c [localized]",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" ... 3 more",
"Wrapped by: foo.TestFriendlyException: r_c [localized]",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" ... 2 more",
" Suppressed: [CIRCULAR REFERENCE: foo.TestFriendlyException: r_c [localized]]",
" Wrapped by: foo.TestFriendlyException: r_c_s [localized]",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" ... 3 more",
"Wrapped by: foo.TestFriendlyException: r [localized]",
" at " + TestFriendlyException.NAMED_MODULE_STACK_TRACE_ELEMENT,
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" at foo.TestFriendlyException.<clinit>(TestFriendlyException.java:0)",
" at " + TestFriendlyException.ORG_APACHE_REPLACEMENT_STACK_TRACE_ELEMENT,
" Suppressed: foo.TestFriendlyException: r_s_c [localized]",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" ... 3 more",
" Wrapped by: foo.TestFriendlyException: r_s [localized]",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" ... 2 more",
" Suppressed: foo.TestFriendlyException: r_s_s [localized]",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" at foo.TestFriendlyException.create(TestFriendlyException.java:0)",
" ... 3 more");
@Nested
| PropertyTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.