language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/message/ExtendedThreadInformationTest.java | {
"start": 1426,
"end": 3250
} | class ____ {
@Test
void testMessage() {
final ThreadDumpMessage msg = new ThreadDumpMessage("Testing");
final String message = msg.getFormattedMessage();
// System.out.print(message);
assertTrue(message.contains(" Id="), "No header");
}
@ParameterizedTest
@EnumSource(Thread.State.class)
void testMessageWithNullStackTrace(final Thread.State state) {
obtainMessageWithMissingStackTrace(state, null);
}
@ParameterizedTest
@EnumSource(Thread.State.class)
void testMessageWithEmptyStackTrace(final Thread.State state) {
obtainMessageWithMissingStackTrace(state, new StackTraceElement[0]);
}
private void obtainMessageWithMissingStackTrace(final Thread.State state, final StackTraceElement[] stackTrace) {
// setup
final String threadName = "the thread name";
final long threadId = 23523L;
final ThreadInfo threadInfo = mock(ThreadInfo.class);
when(threadInfo.getStackTrace()).thenReturn(stackTrace);
when(threadInfo.getThreadName()).thenReturn(threadName);
when(threadInfo.getThreadId()).thenReturn(threadId);
when(threadInfo.isSuspended()).thenReturn(true);
when(threadInfo.isInNative()).thenReturn(true);
when(threadInfo.getThreadState()).thenReturn(state);
// given
final ExtendedThreadInformation sut = new ExtendedThreadInformation(threadInfo);
// when
final StringBuilder result = new StringBuilder();
sut.printThreadInfo(result);
// then
assertThat(result.toString(), containsString(threadName));
assertThat(result.toString(), containsString(state.name()));
assertThat(result.toString(), containsString(String.valueOf(threadId)));
}
}
| ExtendedThreadInformationTest |
java | apache__camel | components/camel-huawei/camel-huaweicloud-dms/src/main/java/org/apache/camel/component/huaweicloud/dms/models/UpdateInstanceRequestBody.java | {
"start": 1025,
"end": 4637
} | class ____ {
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonProperty(value = "name")
private String name;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonProperty(value = "description")
private String description;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonProperty(value = "maintain_begin")
private String maintainBegin;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonProperty(value = "maintain_end")
private String maintainEnd;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonProperty(value = "security_group_id")
private String securityGroupId;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonProperty(value = "enable_publicip")
private Boolean enablePublicip;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonProperty(value = "publicip_id")
private String publicipId;
public UpdateInstanceRequestBody withName(String name) {
this.name = name;
return this;
}
public UpdateInstanceRequestBody withDescription(String description) {
this.description = description;
return this;
}
public UpdateInstanceRequestBody withMaintainBegin(String maintainBegin) {
this.maintainBegin = maintainBegin;
return this;
}
public UpdateInstanceRequestBody withMaintainEnd(String maintainEnd) {
this.maintainEnd = maintainEnd;
return this;
}
public UpdateInstanceRequestBody withSecurityGroupId(String securityGroupId) {
this.securityGroupId = securityGroupId;
return this;
}
public UpdateInstanceRequestBody withEnablePublicip(Boolean enablePublicip) {
this.enablePublicip = enablePublicip;
return this;
}
public UpdateInstanceRequestBody withPublicipId(String publicipId) {
this.publicipId = publicipId;
return this;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getMaintainBegin() {
return maintainBegin;
}
public void setMaintainBegin(String maintainBegin) {
this.maintainBegin = maintainBegin;
}
public String getMaintainEnd() {
return maintainEnd;
}
public void setMaintainEnd(String maintainEnd) {
this.maintainEnd = maintainEnd;
}
public String getSecurityGroupId() {
return securityGroupId;
}
public void setSecurityGroupId(String securityGroupId) {
this.securityGroupId = securityGroupId;
}
public Boolean getEnablePublicip() {
return enablePublicip;
}
public void setEnablePublicip(Boolean enablePublicip) {
this.enablePublicip = enablePublicip;
}
public String getPublicipId() {
return publicipId;
}
public void setPublicipId(String publicipId) {
this.publicipId = publicipId;
}
@Override
public String toString() {
return "UpdateInstanceRequestBody{" +
"name='" + name + '\'' +
", description='" + description + '\'' +
", maintainBegin='" + maintainBegin + '\'' +
", maintainEnd='" + maintainEnd + '\'' +
", securityGroupId='" + securityGroupId + '\'' +
", enablePublicip=" + enablePublicip +
", publicipId='" + publicipId + '\'' +
'}';
}
}
| UpdateInstanceRequestBody |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/proxy/inlinedirtychecking/User.java | {
"start": 588,
"end": 1332
} | class ____ extends BaseEntity {
@Column(unique = true, nullable = false)
@NotNull
private String email;
private String name;
@ManyToMany(fetch = FetchType.LAZY)
@JoinTable(name = "user_in_role", joinColumns = @JoinColumn(name = "userid"), inverseJoinColumns = @JoinColumn(name = "roleid"))
public Set<Role> roles = new HashSet<>();
public String getEmail() {
return email;
}
public void setEmail(String email) {
this.email = email;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Set<Role> getRoles() {
return roles;
}
public void setRoles(Set<Role> roles) {
this.roles = roles;
}
public void addRole(Role role) {
this.roles.add( role );
}
}
| User |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidatorTest.java | {
"start": 1468,
"end": 4222
} | class ____ extends JwtValidatorTest {
@Override
protected JwtValidator createJwtValidator(AccessTokenBuilder builder) {
CloseableVerificationKeyResolver resolver = (jws, nestingContext) -> builder.jwk().getKey();
return new BrokerJwtValidator(resolver);
}
@Test
public void testRsaEncryptionAlgorithm() throws Exception {
PublicJsonWebKey jwk = createRsaJwk();
testEncryptionAlgorithm(jwk, AlgorithmIdentifiers.RSA_USING_SHA256);
}
@Test
public void testEcdsaEncryptionAlgorithm() throws Exception {
PublicJsonWebKey jwk = createEcJwk();
testEncryptionAlgorithm(jwk, AlgorithmIdentifiers.ECDSA_USING_P256_CURVE_AND_SHA256);
}
@Test
public void testInvalidEncryptionAlgorithm() throws Exception {
PublicJsonWebKey jwk = createRsaJwk();
assertThrowsWithMessage(InvalidAlgorithmException.class,
() -> testEncryptionAlgorithm(jwk, "fake"),
"fake is an unknown, unsupported or unavailable alg algorithm");
}
@Test
public void testMissingSubShouldBeValid() throws Exception {
String subClaimName = "client_id";
String subject = "otherSub";
PublicJsonWebKey jwk = createRsaJwk();
AccessTokenBuilder tokenBuilder = new AccessTokenBuilder()
.jwk(jwk)
.alg(AlgorithmIdentifiers.RSA_USING_SHA256)
.addCustomClaim(subClaimName, subject)
.subjectClaimName(subClaimName)
.subject(null);
JwtValidator validator = createJwtValidator(tokenBuilder);
Map<String, ?> saslConfigs = getSaslConfigs(SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME, subClaimName);
validator.configure(saslConfigs, OAUTHBEARER_MECHANISM, getJaasConfigEntries());
// Validation should succeed (e.g. signature verification) even if sub claim is missing
OAuthBearerToken token = validator.validate(tokenBuilder.build());
assertEquals(subject, token.principalName());
}
private void testEncryptionAlgorithm(PublicJsonWebKey jwk, String alg) throws Exception {
AccessTokenBuilder builder = new AccessTokenBuilder().jwk(jwk).alg(alg);
JwtValidator validator = createJwtValidator(builder);
validator.configure(getSaslConfigs(), OAUTHBEARER_MECHANISM, getJaasConfigEntries());
String accessToken = builder.build();
OAuthBearerToken token = validator.validate(accessToken);
assertEquals(builder.subject(), token.principalName());
assertEquals(builder.issuedAtSeconds() * 1000, token.startTimeMs());
assertEquals(builder.expirationSeconds() * 1000, token.lifetimeMs());
assertEquals(1, token.scope().size());
}
}
| BrokerJwtValidatorTest |
java | elastic__elasticsearch | x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestStatsBuilder.java | {
"start": 612,
"end": 2966
} | class ____ implements Releasable {
private LongArray counts;
private DoubleArray sums;
private DoubleArray compensations;
private DoubleArray sumOfSqrs;
private DoubleArray sumOfSqrCompensations;
TTestStatsBuilder(BigArrays bigArrays) {
counts = bigArrays.newLongArray(1, true);
boolean success = false;
try {
sums = bigArrays.newDoubleArray(1, true);
compensations = bigArrays.newDoubleArray(1, true);
sumOfSqrs = bigArrays.newDoubleArray(1, true);
sumOfSqrCompensations = bigArrays.newDoubleArray(1, true);
success = true;
} finally {
if (success == false) {
close();
}
}
}
public TTestStats get(long bucket) {
return new TTestStats(counts.get(bucket), sums.get(bucket), sumOfSqrs.get(bucket));
}
public long getSize() {
return counts.size();
}
public void grow(BigArrays bigArrays, long buckets) {
if (buckets >= counts.size()) {
long overSize = BigArrays.overSize(buckets);
counts = bigArrays.resize(counts, overSize);
sums = bigArrays.resize(sums, overSize);
compensations = bigArrays.resize(compensations, overSize);
sumOfSqrs = bigArrays.resize(sumOfSqrs, overSize);
sumOfSqrCompensations = bigArrays.resize(sumOfSqrCompensations, overSize);
}
}
public void addValue(CompensatedSum compSum, CompensatedSum compSumOfSqr, long bucket, double val) {
counts.increment(bucket, 1);
double sum = sums.get(bucket);
double compensation = compensations.get(bucket);
compSum.reset(sum, compensation);
double sumOfSqr = sumOfSqrs.get(bucket);
double sumOfSqrCompensation = sumOfSqrCompensations.get(bucket);
compSumOfSqr.reset(sumOfSqr, sumOfSqrCompensation);
compSum.add(val);
compSumOfSqr.add(val * val);
sums.set(bucket, compSum.value());
compensations.set(bucket, compSum.delta());
sumOfSqrs.set(bucket, compSumOfSqr.value());
sumOfSqrCompensations.set(bucket, compSumOfSqr.delta());
}
@Override
public void close() {
Releasables.close(counts, sums, compensations, sumOfSqrs, sumOfSqrCompensations);
}
}
| TTestStatsBuilder |
java | quarkusio__quarkus | integration-tests/test-extension/tests/src/test/resources-filtered/projects/project-using-test-template-from-extension-with-bytecode-changes/src/test/java/org/acme/TemplatedNormalTest.java | {
"start": 1035,
"end": 1999
} | class ____ the test execution"
+ Arrays.toString(myAnnotations) + " vs " + Arrays.toString(
contextAnnotations));
}
@TestTemplate
@ExtendWith(MyContextProvider.class)
void classloaderIntrospectionTestTemplate(ExtensionContext context) {
ClassLoader loader = this.getClass()
.getClassLoader();
ClassLoader contextLoader = context.getRequiredTestClass()
.getClassLoader();
Assertions.assertEquals(loader, contextLoader,
"The test template is using a different classloader to the actual test.");
}
@TestTemplate
@ExtendWith(MyContextProvider.class)
void contextAnnotationCheckingTestTemplate(ExtensionContext context) {
// We don't expect to see the annotations because we don't have a @QuarkusTest annotation, but the basic test should work
Assertions.assertEquals(true, true);
}
}
| than |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java | {
"start": 15869,
"end": 22557
} | class ____{
static final String POLICY_FILE = "java.policy";
static final String SECURITY_DEBUG = " -Djava.security.debug=all";
static final String SECURITY_FLAG = "-Djava.security.manager";
static final String POLICY_APPEND_FLAG = "-Djava.security.policy=";
static final String POLICY_FLAG = POLICY_APPEND_FLAG + "=";
static final String JAVA_CMD = "/bin/java ";
static final String JVM_SECURITY_CMD =
JAVA_CMD + SECURITY_FLAG + " " + POLICY_FLAG;
static final String STRIP_POLICY_FLAG = POLICY_APPEND_FLAG + "[^ ]+";
static final String CONTAINS_JAVA_CMD = "\\$" + JAVA_HOME + JAVA_CMD + ".*";
static final String MULTI_COMMAND_REGEX =
"(?s).*(" + //command read as single line
"(&[^>]|&&)|(\\|{1,2})|(\\|&)|" + //Matches '&','&&','|','||' and '|&'
"(`[^`]+`)|(\\$\\([^)]+\\))|" + //Matches occurrences of $() or ``
"(;)" + //Matches end of statement ';'
").*";
static final String CLEAN_CMD_REGEX =
"(" + SECURITY_FLAG + ")|" +
"(" + STRIP_POLICY_FLAG + ")";
static final String FILE_PERMISSION_FORMAT = " permission "
+ FilePermission.class.getCanonicalName()
+ " \"%1$s" + SEPARATOR + "-\", \"%2$s\";%n";
static final String HADOOP_HOME_PERMISSION = "%ngrant codeBase \"file:"
+ Paths.get(System.getProperty(SYSPROP_HADOOP_HOME_DIR))
+ SEPARATOR + "-\" {%n" +
" permission " + AllPermission.class.getCanonicalName() + ";%n};%n";
static final Logger LOG =
LoggerFactory.getLogger(NMContainerPolicyUtils.class);
/**
* Write new policy file to policyOutStream which will include read access
* to localize resources. Optionally a default policyFilePath can be
* specified to append a custom policy implementation to the new policy file
* @param policyOutStream OutputStream pointing to java.policy file
* @param localDirs Container local directories
* @param resources List of local container resources
* @param conf YARN configuration
* @throws IOException - If policy file generation is unable to read the
* base policy file or if it is unable to create a new policy file.
*/
static void generatePolicyFile(OutputStream policyOutStream,
List<String> localDirs, List<String> groupPolicyPaths,
Map<org.apache.hadoop.fs.Path, List<String>> resources,
Configuration conf)
throws IOException {
String policyFilePath =
conf.get(YarnConfiguration.YARN_CONTAINER_SANDBOX_POLICY);
String filePermissions =
conf.get(YarnConfiguration.YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS,
YarnConfiguration.DEFAULT_YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS);
Set<String> cacheDirs = new HashSet<>();
for(org.apache.hadoop.fs.Path path : resources.keySet()) {
cacheDirs.add(path.getParent().toString());
}
if (groupPolicyPaths != null) {
for(String policyPath : groupPolicyPaths) {
Files.copy(Paths.get(policyPath), policyOutStream);
}
} else if (policyFilePath == null) {
IOUtils.copyBytes(
NMContainerPolicyUtils.class.getResourceAsStream("/" + POLICY_FILE),
policyOutStream, conf, false);
} else {
Files.copy(Paths.get(policyFilePath), policyOutStream);
}
Formatter filePermissionFormat = new Formatter(policyOutStream,
StandardCharsets.UTF_8.name());
filePermissionFormat.format(HADOOP_HOME_PERMISSION);
filePermissionFormat.format("grant {%n");
for(String localDir : localDirs) {
filePermissionFormat.format(
FILE_PERMISSION_FORMAT, localDir, filePermissions);
}
for(String cacheDir : cacheDirs) {
filePermissionFormat.format(
FILE_PERMISSION_FORMAT, cacheDir, filePermissions);
}
filePermissionFormat.format("};%n");
filePermissionFormat.flush();
}
/**
* Modify command to enable the Java Security Manager and specify
* java.policy file. Will modify the passed commands to strip any
* existing java security configurations. Expects a java command to be the
* first and only executable provided in enforcing mode. In passive mode
* any commands with '||' or '&&' will not be modified.
* @param commands List of container commands
* @param env Container environment variables
* @param policyPath Path to the container specific policy file
* @param sandboxMode (enforcing, permissive, disabled) Determines
* whether non-java containers will be launched
* @throws ContainerExecutionException - Exception thrown if
* JVM Sandbox enabled in 'enforcing' mode and a non-java command is
* provided in the list of commands
*/
static void appendSecurityFlags(List<String> commands,
Map<String, String> env, Path policyPath, SandboxMode sandboxMode)
throws ContainerExecutionException {
for(int i = 0; i < commands.size(); i++){
String command = commands.get(i);
if(validateJavaHome(env.get(JAVA_HOME.name()))
&& command.matches(CONTAINS_JAVA_CMD)
&& !command.matches(MULTI_COMMAND_REGEX)){
command = command.replaceAll(CLEAN_CMD_REGEX, "");
String securityString = JVM_SECURITY_CMD + policyPath + " ";
if(LOG.isDebugEnabled()) {
securityString += SECURITY_DEBUG;
}
commands.set(i, command.replaceFirst(JAVA_CMD, securityString));
} else if (sandboxMode == SandboxMode.enforcing){
throw new ContainerExecutionException(
"Only JVM containers permitted in YARN sandbox mode (enforcing). "
+ "The following command can not be executed securely: " + command);
} else if (sandboxMode == SandboxMode.permissive){
LOG.warn("The container will run without the java security manager"
+ " due to an unsupported container command. The command"
+ " will be permitted to run in Sandbox permissive mode: "
+ command);
}
}
}
private static boolean validateJavaHome(String containerJavaHome)
throws ContainerExecutionException{
if (System.getenv(JAVA_HOME.name()) == null) {
throw new ContainerExecutionException(
"JAVA_HOME is not set for NodeManager");
}
if (containerJavaHome == null) {
throw new ContainerExecutionException(
"JAVA_HOME is not set for container");
}
return System.getenv(JAVA_HOME.name()).equals(containerJavaHome);
}
}
}
| NMContainerPolicyUtils |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/consumer/RemoteInputChannelTest.java | {
"start": 92075,
"end": 92869
} | class ____
implements PartitionProducerStateProvider {
private boolean isInvoked;
private final ResultPartitionID partitionId;
TestPartitionProducerStateProvider(ResultPartitionID partitionId) {
this.partitionId = checkNotNull(partitionId);
}
@Override
public void requestPartitionProducerState(
IntermediateDataSetID intermediateDataSetId,
ResultPartitionID resultPartitionId,
Consumer<? super ResponseHandle> responseConsumer) {
assertThat(resultPartitionId).isEqualTo(partitionId);
isInvoked = true;
}
boolean isInvoked() {
return isInvoked;
}
}
private static final | TestPartitionProducerStateProvider |
java | dropwizard__dropwizard | dropwizard-jersey/src/test/java/io/dropwizard/jersey/guava/OptionalFormParamResourceTest.java | {
"start": 724,
"end": 3839
} | class ____ extends AbstractJerseyTest {
@Override
protected Application configure() {
return DropwizardResourceConfig.forTesting()
.register(OptionalFormParamResource.class)
.register(MyMessageParamConverterProvider.class);
}
@Test
void shouldReturnDefaultMessageWhenMessageIsNotPresent() {
final String defaultMessage = "Default Message";
final Response response = target("/optional/message").request().post(Entity.form(new MultivaluedStringMap()));
assertThat(response.readEntity(String.class)).isEqualTo(defaultMessage);
}
@Test
void shouldReturnMessageWhenMessageBlank() {
final Form form = new Form("message", "");
final Response response = target("/optional/message").request().post(Entity.form(form));
assertThat(response.readEntity(String.class)).isEmpty();
}
@Test
void shouldReturnMessageWhenMessageIsPresent() {
final String customMessage = "Custom Message";
final Form form = new Form("message", customMessage);
final Response response = target("/optional/message").request().post(Entity.form(form));
assertThat(response.readEntity(String.class)).isEqualTo(customMessage);
}
@Test
void shouldReturnDefaultMessageWhenMyMessageIsNotPresent() {
final String defaultMessage = "My Default Message";
final Response response = target("/optional/my-message").request().post(Entity.form(new MultivaluedStringMap()));
assertThat(response.readEntity(String.class)).isEqualTo(defaultMessage);
}
@Test
void shouldReturnMyMessageWhenMyMessageIsPresent() {
final String myMessage = "My Message";
final Form form = new Form("mymessage", myMessage);
final Response response = target("/optional/my-message").request().post(Entity.form(form));
assertThat(response.readEntity(String.class)).isEqualTo(myMessage);
}
@Test
void shouldThrowBadRequestExceptionWhenInvalidUUIDIsPresent() {
final String invalidUUID = "invalid-uuid";
final Form form = new Form("uuid", invalidUUID);
final Response response = target("/optional/uuid").request().post(Entity.form(form));
assertThat(response.getStatus()).isEqualTo(Response.Status.BAD_REQUEST.getStatusCode());
}
@Test
void shouldReturnDefaultUUIDWhenUUIDIsNotPresent() {
final String defaultUUID = "d5672fa8-326b-40f6-bf71-d9dacf44bcdc";
final Response response = target("/optional/uuid").request().post(Entity.form(new MultivaluedStringMap()));
assertThat(response.readEntity(String.class)).isEqualTo(defaultUUID);
}
@Test
void shouldReturnUUIDWhenValidUUIDIsPresent() {
final String uuid = "fd94b00d-bd50-46b3-b42f-905a9c9e7d78";
final Form form = new Form("uuid", uuid);
final Response response = target("/optional/uuid").request().post(Entity.form(form));
assertThat(response.readEntity(String.class)).isEqualTo(uuid);
}
@Path("/optional")
public static | OptionalFormParamResourceTest |
java | apache__camel | components/camel-xpath/src/main/java/org/apache/camel/language/xpath/XPathBuilder.java | {
"start": 3828,
"end": 13196
} | class ____ extends ServiceSupport
implements CamelContextAware, Expression, Predicate,
NamespaceAware, ExpressionResultTypeAware {
private static final Logger LOG = LoggerFactory.getLogger(XPathBuilder.class);
private static final String SAXON_OBJECT_MODEL_URI = "http://saxon.sf.net/jaxp/xpath/om";
private static final String SAXON_FACTORY_CLASS_NAME = "net.sf.saxon.xpath.XPathFactoryImpl";
private static final String OBTAIN_ALL_NS_XPATH = "//*/namespace::*";
private static volatile XPathFactory defaultXPathFactory;
private CamelContext camelContext;
private final Lock lock = new ReentrantLock();
private final Queue<XPathExpression> pool = new ConcurrentLinkedQueue<>();
private final Queue<XPathExpression> poolLogNamespaces = new ConcurrentLinkedQueue<>();
private final String text;
private final ThreadLocal<Exchange> exchange = new ThreadLocal<>();
private final MessageVariableResolver variableResolver = new MessageVariableResolver(exchange);
private final Map<String, String> namespaces = new ConcurrentHashMap<>();
private boolean preCompile = true;
private boolean threadSafety;
private volatile XPathFactory xpathFactory;
private volatile Class<?> documentType = Document.class;
// For some reason the default expression of "a/b" on a document such as
// <a><b>1</b><b>2</b></a>
// will evaluate as just "1" by default which is bizarre. So by default
// let's assume XPath expressions result in nodesets.
private volatile Class<?> resultType;
private volatile QName resultQName = XPathConstants.NODESET;
private volatile boolean useSaxon;
private volatile String objectModelUri;
private volatile String factoryClassName;
private volatile DefaultNamespaceContext namespaceContext;
private volatile boolean logNamespaces;
private volatile XPathFunctionResolver functionResolver;
private volatile XPathFunction bodyFunction;
private volatile XPathFunction headerFunction;
private volatile XPathFunction outBodyFunction;
private volatile XPathFunction outHeaderFunction;
private volatile XPathFunction propertiesFunction;
private volatile XPathFunction simpleFunction;
private volatile Expression source;
/**
* @param text The XPath expression
*/
public XPathBuilder(String text) {
this.text = text;
}
/**
* @param text The XPath expression
* @return A new XPathBuilder object
*/
public static XPathBuilder xpath(String text) {
return new XPathBuilder(text);
}
/**
* @param text The XPath expression
* @param resultType The result type that the XPath expression will return.
* @return A new XPathBuilder object
*/
public static XPathBuilder xpath(String text, Class<?> resultType) {
XPathBuilder builder = new XPathBuilder(text);
if (resultType != null) {
builder.setResultType(resultType);
}
return builder;
}
@Override
public void init(CamelContext context) {
if (preCompile && pool.isEmpty()) {
LOG.trace("PreCompiling new XPathExpression and adding to pool during initialization");
try {
XPathExpression xpathExpression = createXPathExpression();
pool.add(xpathExpression);
} catch (XPathExpressionException e) {
throw RuntimeCamelException.wrapRuntimeException(e);
}
}
}
@Override
public String toString() {
return "XPath: " + text;
}
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
@Override
public boolean matches(Exchange exchange) {
try {
Object booleanResult = evaluateAs(exchange, XPathConstants.BOOLEAN);
return exchange.getContext().getTypeConverter().convertTo(Boolean.class, booleanResult);
} finally {
// remove the thread local after usage
this.exchange.remove();
}
}
@Override
public <T> T evaluate(Exchange exchange, Class<T> type) {
try {
Object result = evaluate(exchange);
return exchange.getContext().getTypeConverter().convertTo(type, exchange, result);
} finally {
// remove the thread local after usage
this.exchange.remove();
}
}
/**
* Matches the given xpath using the provided body.
*
* @param context the camel context
* @param body the body
* @return <tt>true</tt> if matches, <tt>false</tt> otherwise
*/
public boolean matches(CamelContext context, Object body) {
ObjectHelper.notNull(context, "CamelContext");
// create a dummy Exchange to use during matching
Exchange dummy = new DefaultExchange(context);
dummy.getIn().setBody(body);
try {
return matches(dummy);
} finally {
// remove the thread local after usage
exchange.remove();
}
}
/**
* Evaluates the given xpath using the provided body.
* <p/>
* The evaluation uses by default {@link javax.xml.xpath.XPathConstants#NODESET} as the type used during xpath
* evaluation. The output from xpath is then afterwards type converted using Camel's type converter to the given
* type.
* <p/>
* If you want to evaluate xpath using a different type, then call {@link #setResultType(Class)} prior to calling
* this evaluate method.
*
* @param context the camel context
* @param body the body
* @param type the type to return
* @return result of the evaluation
*/
public <T> T evaluate(CamelContext context, Object body, Class<T> type) {
ObjectHelper.notNull(context, "CamelContext");
// create a dummy Exchange to use during evaluation
Exchange dummy = new DefaultExchange(context);
dummy.getIn().setBody(body);
try {
return evaluate(dummy, type);
} finally {
// remove the thread local after usage
exchange.remove();
}
}
/**
* Evaluates the given xpath using the provided body as a String return type.
*
* @param context the camel context
* @param body the body
* @return result of the evaluation
*/
public String evaluate(CamelContext context, Object body) {
ObjectHelper.notNull(context, "CamelContext");
// create a dummy Exchange to use during evaluation
Exchange dummy = new DefaultExchange(context);
dummy.getIn().setBody(body);
setResultQName(XPathConstants.STRING);
setResultType(String.class);
try {
return evaluate(dummy, String.class);
} finally {
// remove the thread local after usage
this.exchange.remove();
}
}
// Builder methods
// -------------------------------------------------------------------------
/**
* Sets the expression result type to {@link XPathConstants#BOOLEAN}
*
* @return the current builder
*/
public XPathBuilder booleanResult() {
resultQName = XPathConstants.BOOLEAN;
return this;
}
/**
* Sets the expression result type to {@link XPathConstants#NODE}
*
* @return the current builder
*/
public XPathBuilder nodeResult() {
resultQName = XPathConstants.NODE;
return this;
}
/**
* Sets the expression result type to {@link XPathConstants#NODESET}
*
* @return the current builder
*/
public XPathBuilder nodeSetResult() {
resultQName = XPathConstants.NODESET;
return this;
}
/**
* Sets the expression result type to {@link XPathConstants#NUMBER}
*
* @return the current builder
*/
public XPathBuilder numberResult() {
resultQName = XPathConstants.NUMBER;
return this;
}
/**
* Sets the expression result type to {@link XPathConstants#STRING}
*
* @return the current builder
*/
public XPathBuilder stringResult() {
resultQName = XPathConstants.STRING;
return this;
}
/**
* Sets the expression result type to the given {@code resultType}
*
* @return the current builder
*/
public XPathBuilder resultType(Class<?> resultType) {
setResultType(resultType);
return this;
}
/**
* Sets the object model URI to use
*
* @return the current builder
*/
public XPathBuilder objectModel(String uri) {
// Careful! Setting the Object Model URI this way will set the *Default*
// XPath Factory, which since is a static field,
// will set the XPath Factory system-wide. Decide what to do, as
// changing this behaviour can break compatibility. Provided the
// setObjectModel which changes
// this instance's XPath Factory rather than the static field
this.objectModelUri = uri;
return this;
}
/**
* Sets the factory | XPathBuilder |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/bean/override/mockito/MockitoSpyBeanForFactoryBeanIntegrationTests.java | {
"start": 2738,
"end": 2850
} | class ____ implements TestBean {
@Override
public String hello() {
return "hi";
}
}
static | TestBeanImpl |
java | spring-projects__spring-boot | module/spring-boot-health/src/main/java/org/springframework/boot/health/autoconfigure/actuate/endpoint/AvailabilityProbesHealthEndpointGroups.java | {
"start": 1680,
"end": 5433
} | class ____ implements HealthEndpointGroups, AdditionalPathsMapper {
private final HealthEndpointGroups groups;
private final Map<String, HealthEndpointGroup> probeGroups;
private final Set<String> names;
private static final String LIVENESS = "liveness";
private static final String READINESS = "readiness";
AvailabilityProbesHealthEndpointGroups(HealthEndpointGroups groups, boolean addAdditionalPaths) {
Assert.notNull(groups, "'groups' must not be null");
this.groups = groups;
this.probeGroups = createProbeGroups(addAdditionalPaths);
Set<String> names = new LinkedHashSet<>(groups.getNames());
names.addAll(this.probeGroups.keySet());
this.names = Collections.unmodifiableSet(names);
}
private Map<String, HealthEndpointGroup> createProbeGroups(boolean addAdditionalPaths) {
Map<String, HealthEndpointGroup> probeGroups = new LinkedHashMap<>();
probeGroups.put(LIVENESS, getOrCreateProbeGroup(addAdditionalPaths, LIVENESS, "/livez", "livenessState"));
probeGroups.put(READINESS, getOrCreateProbeGroup(addAdditionalPaths, READINESS, "/readyz", "readinessState"));
return Collections.unmodifiableMap(probeGroups);
}
private HealthEndpointGroup getOrCreateProbeGroup(boolean addAdditionalPath, String name, String path,
String members) {
HealthEndpointGroup group = this.groups.get(name);
if (group != null) {
return determineAdditionalPathForExistingGroup(addAdditionalPath, path, group);
}
AdditionalHealthEndpointPath additionalPath = (!addAdditionalPath) ? null
: AdditionalHealthEndpointPath.of(WebServerNamespace.SERVER, path);
return new AvailabilityProbesHealthEndpointGroup(additionalPath, members);
}
private HealthEndpointGroup determineAdditionalPathForExistingGroup(boolean addAdditionalPath, String path,
HealthEndpointGroup group) {
if (addAdditionalPath && group.getAdditionalPath() == null) {
AdditionalHealthEndpointPath additionalPath = AdditionalHealthEndpointPath.of(WebServerNamespace.SERVER,
path);
return new DelegatingAvailabilityProbesHealthEndpointGroup(group, additionalPath);
}
return group;
}
@Override
public HealthEndpointGroup getPrimary() {
return this.groups.getPrimary();
}
@Override
public Set<String> getNames() {
return this.names;
}
@Override
public @Nullable HealthEndpointGroup get(String name) {
HealthEndpointGroup group = this.groups.get(name);
if (group == null || isProbeGroup(name)) {
group = this.probeGroups.get(name);
}
return group;
}
private boolean isProbeGroup(String name) {
return name.equals(LIVENESS) || name.equals(READINESS);
}
@Override
public @Nullable List<String> getAdditionalPaths(EndpointId endpointId, WebServerNamespace webServerNamespace) {
if (!HealthEndpoint.ID.equals(endpointId)) {
return null;
}
List<String> additionalPaths = new ArrayList<>();
if (this.groups instanceof AdditionalPathsMapper additionalPathsMapper) {
List<String> mappedAdditionalPaths = getAdditionalPaths(endpointId, webServerNamespace,
additionalPathsMapper);
if (mappedAdditionalPaths != null) {
additionalPaths.addAll(mappedAdditionalPaths);
}
}
additionalPaths.addAll(this.probeGroups.values()
.stream()
.map(HealthEndpointGroup::getAdditionalPath)
.filter(Objects::nonNull)
.filter((additionalPath) -> additionalPath.hasNamespace(webServerNamespace))
.map(AdditionalHealthEndpointPath::getValue)
.toList());
return additionalPaths;
}
private static @Nullable List<String> getAdditionalPaths(EndpointId endpointId,
WebServerNamespace webServerNamespace, AdditionalPathsMapper additionalPathsMapper) {
return additionalPathsMapper.getAdditionalPaths(endpointId, webServerNamespace);
}
}
| AvailabilityProbesHealthEndpointGroups |
java | spring-projects__spring-boot | loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/IndexedLayers.java | {
"start": 1408,
"end": 3982
} | class ____ implements Layers {
private final Map<String, List<String>> layers = new LinkedHashMap<>();
private final String indexFileLocation;
IndexedLayers(String indexFile, String indexFileLocation) {
this.indexFileLocation = indexFileLocation;
String[] lines = Arrays.stream(indexFile.split("\n"))
.map((line) -> line.replace("\r", ""))
.filter(StringUtils::hasText)
.toArray(String[]::new);
List<String> contents = null;
for (String line : lines) {
if (line.startsWith("- ")) {
contents = new ArrayList<>();
this.layers.put(line.substring(3, line.length() - 2), contents);
}
else if (line.startsWith(" - ")) {
Assert.state(contents != null, "Contents must not be null. Check if the index file is malformed!");
contents.add(line.substring(5, line.length() - 1));
}
else {
throw new IllegalStateException("Layer index file is malformed");
}
}
Assert.state(!this.layers.isEmpty(), "Empty layer index file loaded");
}
@Override
public String getApplicationLayerName() {
return getLayer(this.indexFileLocation);
}
@Override
public Iterator<String> iterator() {
return this.layers.keySet().iterator();
}
@Override
public String getLayer(String name) {
for (Map.Entry<String, List<String>> entry : this.layers.entrySet()) {
for (String candidate : entry.getValue()) {
if (candidate.equals(name) || (candidate.endsWith("/") && name.startsWith(candidate))) {
return entry.getKey();
}
}
}
throw new IllegalStateException("No layer defined in index for file '" + name + "'");
}
/**
* Get an {@link IndexedLayers} instance of possible.
* @param context the context
* @return an {@link IndexedLayers} instance or {@code null} if this not a layered
* jar.
*/
static @Nullable IndexedLayers get(Context context) {
try (JarFile jarFile = new JarFile(context.getArchiveFile())) {
Manifest manifest = jarFile.getManifest();
if (manifest == null) {
return null;
}
String indexFileLocation = manifest.getMainAttributes().getValue("Spring-Boot-Layers-Index");
if (indexFileLocation == null) {
return null;
}
ZipEntry entry = jarFile.getEntry(indexFileLocation);
if (entry == null) {
return null;
}
String indexFile = StreamUtils.copyToString(jarFile.getInputStream(entry), StandardCharsets.UTF_8);
return new IndexedLayers(indexFile, indexFileLocation);
}
catch (FileNotFoundException | NoSuchFileException ex) {
return null;
}
catch (IOException ex) {
throw new IllegalStateException(ex);
}
}
}
| IndexedLayers |
java | elastic__elasticsearch | qa/serverless-system-properties/src/test/java/org/elasticsearch/transport/ServerlessTransportHandshakeTests.java | {
"start": 1769,
"end": 5098
} | class ____ extends ESTestCase {
private static ThreadPool threadPool;
@BeforeClass
public static void startThreadPool() {
threadPool = new TestThreadPool(ServerlessTransportHandshakeTests.class.getSimpleName());
}
private final List<TransportService> transportServices = new ArrayList<>();
private TransportService startServices(String nodeNameAndId, Settings settings, TransportInterceptor transportInterceptor) {
TcpTransport transport = new Netty4Transport(
settings,
TransportVersion.current(),
threadPool,
new NetworkService(Collections.emptyList()),
PageCacheRecycler.NON_RECYCLING_INSTANCE,
new NamedWriteableRegistry(Collections.emptyList()),
new NoneCircuitBreakerService(),
new SharedGroupFactory(settings)
);
TransportService transportService = new MockTransportService(
settings,
transport,
threadPool,
transportInterceptor,
(boundAddress) -> DiscoveryNodeUtils.builder(nodeNameAndId)
.name(nodeNameAndId)
.address(boundAddress.publishAddress())
.roles(emptySet())
.version(VersionInformation.CURRENT)
.build(),
null,
Collections.emptySet(),
nodeNameAndId
);
transportService.start();
transportService.acceptIncomingRequests();
transportServices.add(transportService);
return transportService;
}
@After
public void tearDown() throws Exception {
for (TransportService transportService : transportServices) {
transportService.close();
}
super.tearDown();
}
@AfterClass
public static void terminateThreadPool() {
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
// since static must set to null to be eligible for collection
threadPool = null;
}
public void testAcceptsMismatchedServerlessBuildHashWithoutWarning() {
assumeTrue("Current build needs to be a snapshot", Build.current().isSnapshot());
final var transportInterceptorA = new BuildHashModifyingTransportInterceptor();
final var transportInterceptorB = new BuildHashModifyingTransportInterceptor();
final Settings settings = Settings.builder()
.put("cluster.name", "a")
.put(IGNORE_DESERIALIZATION_ERRORS_SETTING.getKey(), true) // suppress assertions to test production error-handling
.build();
final TransportService transportServiceA = startServices("TS_A", settings, transportInterceptorA);
final TransportService transportServiceB = startServices("TS_B", settings, transportInterceptorB);
MockLog.assertThatLogger(() -> {
AbstractSimpleTransportTestCase.connectToNode(transportServiceA, transportServiceB.getLocalNode(), TestProfiles.LIGHT_PROFILE);
assertTrue(transportServiceA.nodeConnected(transportServiceB.getLocalNode()));
},
TransportService.class,
new MockLog.UnseenEventExpectation("incompatible wire format log", TransportService.class.getCanonicalName(), Level.WARN, "*")
);
}
}
| ServerlessTransportHandshakeTests |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/inference/strategies/ObjectOfInputTypeStrategy.java | {
"start": 3850,
"end": 4288
} | class ____, but was %s.",
classArgumentType.asSummaryString());
throw new ValidationException(errorMessage);
}
final Optional<String> argumentValue = callContext.getArgumentValue(0, String.class);
if (argumentValue.isEmpty()) {
final String errorMessage =
"The first argument must be a non-nullable character string literal representing the | name |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/HazelcastListEndpointBuilderFactory.java | {
"start": 22544,
"end": 22876
} | interface ____
extends
AdvancedHazelcastListEndpointConsumerBuilder,
AdvancedHazelcastListEndpointProducerBuilder {
default HazelcastListEndpointBuilder basic() {
return (HazelcastListEndpointBuilder) this;
}
}
public | AdvancedHazelcastListEndpointBuilder |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/bytecode/ProxyTest.java | {
"start": 1268,
"end": 2481
} | class ____ {
@Test
void testMain() throws Exception {
Proxy proxy = Proxy.getProxy(ITest.class, ITest.class);
ITest instance = (ITest) proxy.newInstance((proxy1, method, args) -> {
if ("getName".equals(method.getName())) {
assertEquals(args.length, 0);
} else if ("setName".equals(method.getName())) {
assertEquals(args.length, 2);
assertEquals(args[0], "qianlei");
assertEquals(args[1], "hello");
}
return null;
});
assertNull(instance.getName());
instance.setName("qianlei", "hello");
}
@Test
void testCglibProxy() throws Exception {
ITest test = (ITest) Proxy.getProxy(ITest.class).newInstance((proxy, method, args) -> {
return null;
});
Enhancer enhancer = new Enhancer();
enhancer.setSuperclass(test.getClass());
enhancer.setCallback((MethodInterceptor) (obj, method, args, proxy) -> null);
try {
enhancer.create();
} catch (IllegalArgumentException e) {
e.printStackTrace();
Assertions.fail();
}
}
public | ProxyTest |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_1881/VehicleDtoMapper.java | {
"start": 703,
"end": 1164
} | class ____ {
private String name;
private VehiclePropertiesDto vehicleProperties;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public VehiclePropertiesDto getVehicleProperties() {
return vehicleProperties;
}
public void setVehicleProperties(VehiclePropertiesDto vehicleProperties) {
this.vehicleProperties = vehicleProperties;
}
}
| VehicleDto |
java | elastic__elasticsearch | libs/native/src/main/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctions.java | {
"start": 863,
"end": 4289
} | interface ____ {
/**
* Produces a method handle returning the dot product of byte (unsigned int7) vectors.
*
* <p> Unsigned int7 byte vectors have values in the range of 0 to 127 (inclusive).
*
* <p> The type of the method handle will have {@code int} as return type, The type of
* its first and second arguments will be {@code MemorySegment}, whose contents is the
* vector data bytes. The third argument is the length of the vector data.
*/
MethodHandle dotProductHandle7u();
/**
* Produces a method handle which computes the dot product of several byte (unsigned
* int7) vectors. This bulk operation can be used to compute the dot product between a
* single query vector and a number of other vectors.
*
* <p> Unsigned int7 byte vectors have values in the range of 0 to 127 (inclusive).
*
* <p> The type of the method handle will have {@code void} as return type. The type of
* its first and second arguments will be {@code MemorySegment}, the former contains the
* vector data bytes for several vectors, while the latter just a single vector. The
* type of the third argument is an int, representing the dimensions of each vector. The
* type of the fourth argument is an int, representing the number of vectors in the
* first argument. The type of the final argument is a MemorySegment, into which the
* computed dot product float values will be stored.
*/
MethodHandle dotProductHandle7uBulk();
/**
* Produces a method handle returning the square distance of byte (unsigned int7) vectors.
*
* <p> Unsigned int7 byte vectors have values in the range of 0 to 127 (inclusive).
*
* <p> The type of the method handle will have {@code int} as return type, The type of
* its first and second arguments will be {@code MemorySegment}, whose contents is the
* vector data bytes. The third argument is the length of the vector data.
*/
MethodHandle squareDistanceHandle7u();
/**
* Produces a method handle returning the cosine of float32 vectors.
*
* <p> The type of the method handle will have {@code float} as return type, The type of
* its first and second arguments will be {@code MemorySegment}, whose contents is the
* vector data floats. The third argument is the length of the vector data - number of
* 4-byte float32 elements.
*/
MethodHandle cosineHandleFloat32();
/**
* Produces a method handle returning the dot product of float32 vectors.
*
* <p> The type of the method handle will have {@code float} as return type, The type of
* its first and second arguments will be {@code MemorySegment}, whose contents is the
* vector data floats. The third argument is the length of the vector data - number of
* 4-byte float32 elements.
*/
MethodHandle dotProductHandleFloat32();
/**
* Produces a method handle returning the square distance of float32 vectors.
*
* <p> The type of the method handle will have {@code float} as return type, The type of
* its first and second arguments will be {@code MemorySegment}, whose contents is the
* vector data floats. The third argument is the length of the vector data - number of
* 4-byte float32 elements.
*/
MethodHandle squareDistanceHandleFloat32();
}
| VectorSimilarityFunctions |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/UnnestSetReturningFunctionTypeResolver.java | {
"start": 1450,
"end": 8774
} | class ____ implements SetReturningFunctionTypeResolver {
protected final @Nullable String defaultBasicArrayColumnName;
protected final String defaultIndexSelectionExpression;
public UnnestSetReturningFunctionTypeResolver(@Nullable String defaultBasicArrayColumnName, String defaultIndexSelectionExpression) {
this.defaultBasicArrayColumnName = defaultBasicArrayColumnName;
this.defaultIndexSelectionExpression = defaultIndexSelectionExpression;
}
@Override
public AnonymousTupleType<?> resolveTupleType(List<? extends SqmTypedNode<?>> arguments, TypeConfiguration typeConfiguration) {
final SqmTypedNode<?> arrayArgument = arguments.get( 0 );
final SqmExpressible<?> expressible = arrayArgument.getExpressible();
if ( expressible == null ) {
throw new IllegalArgumentException( "Couldn't determine array type of argument to function 'unnest'" );
}
if ( !( expressible.getSqmType() instanceof BasicPluralType<?,?> pluralType ) ) {
throw new IllegalArgumentException( "Argument passed to function 'unnest' is not a BasicPluralType. Found: " + expressible );
}
final BasicType<?> elementType = pluralType.getElementType();
final SqmBindableType<?>[] componentTypes;
final String[] componentNames;
if ( elementType.getJdbcType() instanceof AggregateJdbcType aggregateJdbcType
&& aggregateJdbcType.getEmbeddableMappingType() != null ) {
final EmbeddableMappingType embeddableMappingType = aggregateJdbcType.getEmbeddableMappingType();
componentTypes = determineComponentTypes( embeddableMappingType );
componentNames = new String[componentTypes.length];
final int numberOfAttributeMappings = embeddableMappingType.getNumberOfAttributeMappings();
int index = 0;
for ( int i = 0; i < numberOfAttributeMappings; i++ ) {
final AttributeMapping attributeMapping = embeddableMappingType.getAttributeMapping( i );
if ( attributeMapping.getMappedType() instanceof SqmExpressible<?> ) {
componentNames[index++] = attributeMapping.getAttributeName();
}
}
assert index == componentNames.length - 1;
componentTypes[index] = typeConfiguration.getBasicTypeForJavaType( Long.class );
componentNames[index] = CollectionPart.Nature.INDEX.getName();
}
else {
componentTypes = new SqmBindableType<?>[]{ elementType, typeConfiguration.getBasicTypeForJavaType( Long.class ) };
componentNames = new String[]{ CollectionPart.Nature.ELEMENT.getName(), CollectionPart.Nature.INDEX.getName() };
}
return new AnonymousTupleType<>( componentTypes, componentNames );
}
@Override
public SelectableMapping[] resolveFunctionReturnType(
List<? extends SqlAstNode> arguments,
String tableIdentifierVariable,
boolean lateral,
boolean withOrdinality,
SqmToSqlAstConverter converter) {
final Expression expression = (Expression) arguments.get( 0 );
final JdbcMappingContainer expressionType = expression.getExpressionType();
if ( expressionType == null ) {
throw new IllegalArgumentException( "Couldn't determine array type of argument to function 'unnest'" );
}
if ( !( expressionType.getSingleJdbcMapping() instanceof BasicPluralType<?,?> pluralType ) ) {
throw new IllegalArgumentException( "Argument passed to function 'unnest' is not a BasicPluralType. Found: " + expressionType );
}
final SelectableMapping indexMapping = withOrdinality ? new SelectableMappingImpl(
"",
defaultIndexSelectionExpression,
new SelectablePath( CollectionPart.Nature.INDEX.getName() ),
null,
null,
null,
null,
null,
null,
null,
null,
false,
false,
false,
false,
false,
false,
converter.getCreationContext().getTypeConfiguration().getBasicTypeForJavaType( Long.class )
) : null;
final BasicType<?> elementType = pluralType.getElementType();
final SelectableMapping[] returnType;
if ( elementType.getJdbcType() instanceof AggregateJdbcType aggregateJdbcType
&& aggregateJdbcType.getEmbeddableMappingType() != null ) {
final EmbeddableMappingType embeddableMappingType = aggregateJdbcType.getEmbeddableMappingType();
final int jdbcValueCount = embeddableMappingType.getJdbcValueCount();
returnType = new SelectableMapping[jdbcValueCount + (indexMapping == null ? 0 : 1)];
for ( int i = 0; i < jdbcValueCount; i++ ) {
final SelectableMapping selectableMapping = embeddableMappingType.getJdbcValueSelectable( i );
final String selectableName = selectableMapping.getSelectableName();
returnType[i] = new SelectableMappingImpl(
selectableMapping.getContainingTableExpression(),
selectableName,
new SelectablePath( selectableName ),
null,
null,
selectableMapping.getColumnDefinition(),
selectableMapping.getLength(),
selectableMapping.getArrayLength(),
selectableMapping.getPrecision(),
selectableMapping.getScale(),
selectableMapping.getTemporalPrecision(),
selectableMapping.isLob(),
true,
false,
false,
false,
selectableMapping.isFormula(),
selectableMapping.getJdbcMapping()
);
if ( indexMapping != null ) {
returnType[jdbcValueCount] = indexMapping;
}
}
}
else {
final String elementSelectionExpression = defaultBasicArrayColumnName == null
? tableIdentifierVariable
: defaultBasicArrayColumnName;
final SelectableMapping elementMapping;
if ( expressionType instanceof SqlTypedMapping typedMapping ) {
elementMapping = new SelectableMappingImpl(
"",
elementSelectionExpression,
new SelectablePath( CollectionPart.Nature.ELEMENT.getName() ),
null,
null,
typedMapping.getColumnDefinition(),
typedMapping.getLength(),
typedMapping.getArrayLength(),
typedMapping.getPrecision(),
typedMapping.getScale(),
typedMapping.getTemporalPrecision(),
typedMapping.isLob(),
true,
false,
false,
false,
false,
elementType
);
}
else {
elementMapping = new SelectableMappingImpl(
"",
elementSelectionExpression,
new SelectablePath( CollectionPart.Nature.ELEMENT.getName() ),
null,
null,
null,
null,
null,
null,
null,
null,
false,
true,
false,
false,
false,
false,
elementType
);
}
if ( indexMapping == null ) {
returnType = new SelectableMapping[]{ elementMapping };
}
else {
returnType = new SelectableMapping[] {elementMapping, indexMapping};
}
}
return returnType;
}
private static SqmBindableType<?>[] determineComponentTypes(EmbeddableMappingType embeddableMappingType) {
final int numberOfAttributeMappings = embeddableMappingType.getNumberOfAttributeMappings();
final ArrayList<SqmBindableType<?>> expressibles = new ArrayList<>( numberOfAttributeMappings + 1 );
for ( int i = 0; i < numberOfAttributeMappings; i++ ) {
final AttributeMapping attributeMapping = embeddableMappingType.getAttributeMapping( i );
final MappingType mappedType = attributeMapping.getMappedType();
if ( mappedType instanceof SqmBindableType<?> sqmExpressible ) {
expressibles.add( sqmExpressible );
}
}
return expressibles.toArray( new SqmBindableType<?>[expressibles.size() + 1] );
}
}
| UnnestSetReturningFunctionTypeResolver |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java | {
"start": 41332,
"end": 41906
} | class ____ extends AbstractService {
StagingDirCleaningService() {
super(StagingDirCleaningService.class.getName());
}
@Override
protected void serviceStop() throws Exception {
try {
if(isLastAMRetry) {
cleanupStagingDir();
} else {
LOG.info("Skipping cleaning up the staging dir. "
+ "assuming AM will be retried.");
}
} catch (IOException io) {
LOG.error("Failed to cleanup staging dir: ", io);
}
super.serviceStop();
}
}
public | StagingDirCleaningService |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/cluster/RedisClusterPubSubAsyncCommandsImpl.java | {
"start": 2097,
"end": 3475
} | class ____<K, V> extends RedisPubSubAsyncCommandsImpl<K, V>
implements RedisClusterPubSubAsyncCommands<K, V> {
/**
* Initialize a new connection.
*
* @param connection the connection .
* @param codec Codec used to encode/decode keys and values.
*/
public RedisClusterPubSubAsyncCommandsImpl(StatefulRedisPubSubConnection<K, V> connection, RedisCodec<K, V> codec) {
super(connection, codec);
}
@Override
public StatefulRedisClusterPubSubConnectionImpl<K, V> getStatefulConnection() {
return (StatefulRedisClusterPubSubConnectionImpl<K, V>) super.getStatefulConnection();
}
@SuppressWarnings("unchecked")
@Override
public PubSubAsyncNodeSelection<K, V> nodes(Predicate<RedisClusterNode> predicate) {
PubSubAsyncNodeSelection<K, V> selection = new StaticPubSubAsyncNodeSelection<>(getStatefulConnection(), predicate);
NodeSelectionInvocationHandler h = new NodeSelectionInvocationHandler((AbstractNodeSelection<?, ?, ?, ?>) selection,
RedisPubSubAsyncCommands.class, ASYNC);
return (PubSubAsyncNodeSelection<K, V>) Proxy.newProxyInstance(NodeSelectionSupport.class.getClassLoader(),
new Class<?>[] { NodeSelectionPubSubAsyncCommands.class, PubSubAsyncNodeSelection.class }, h);
}
private static | RedisClusterPubSubAsyncCommandsImpl |
java | elastic__elasticsearch | x-pack/plugin/text-structure/src/test/java/org/elasticsearch/xpack/textstructure/structurefinder/XmlTextStructureFinderTests.java | {
"start": 437,
"end": 2254
} | class ____ extends TextStructureTestCase {
private final TextStructureFinderFactory factory = new XmlTextStructureFinderFactory();
public void testCreateConfigsGivenGoodXml() throws Exception {
assertTrue(factory.canCreateFromSample(explanation, XML_SAMPLE, 0.0));
String charset = randomFrom(POSSIBLE_CHARSETS);
Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset);
TextStructureFinder structureFinder = factory.createFromSample(
explanation,
XML_SAMPLE,
charset,
hasByteOrderMarker,
TextStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT,
TextStructureOverrides.EMPTY_OVERRIDES,
NOOP_TIMEOUT_CHECKER
);
TextStructure structure = structureFinder.getStructure();
assertEquals(TextStructure.Format.XML, structure.getFormat());
assertEquals(charset, structure.getCharset());
if (hasByteOrderMarker == null) {
assertNull(structure.getHasByteOrderMarker());
} else {
assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker());
}
assertNull(structure.getExcludeLinesPattern());
assertEquals("^\\s*<log4j:event", structure.getMultilineStartPattern());
assertNull(structure.getDelimiter());
assertNull(structure.getQuote());
assertNull(structure.getHasHeaderRow());
assertNull(structure.getShouldTrimFields());
assertNull(structure.getGrokPattern());
assertEquals("timestamp", structure.getTimestampField());
assertEquals(Collections.singletonList("UNIX_MS"), structure.getJodaTimestampFormats());
assertEquals(Collections.singleton("properties"), structure.getMappings().keySet());
}
}
| XmlTextStructureFinderTests |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webmvc/src/main/java/org/springframework/cloud/gateway/server/mvc/predicate/GatewayRequestPredicates.java | {
"start": 11445,
"end": 12696
} | class ____ implements RequestPredicate {
private final String header;
private final @Nullable Pattern pattern;
HeaderRequestPredicate(String header, @Nullable String regexp) {
this.header = header;
this.pattern = (StringUtils.hasText(regexp)) ? Pattern.compile(regexp) : null;
}
@Override
public boolean test(ServerRequest request) {
if (CorsUtils.isPreFlightRequest(request.servletRequest())) {
return true;
}
List<String> values = request.headers().header(header);
if (values.isEmpty()) {
return false;
}
// values is now guaranteed to not be empty
if (pattern != null) {
// check if a header value matches
for (String value : values) {
if (pattern.asMatchPredicate().test(value)) {
return true;
}
}
return false;
}
// there is a value and since regexp is empty, we only check existence.
return true;
}
@Override
public void accept(RequestPredicates.Visitor visitor) {
if (pattern != null) {
visitor.header(header, pattern.pattern());
}
else {
visitor.header(header, "");
}
}
@Override
public String toString() {
return String.format("Header: %s regexp=%s", header, pattern);
}
}
private static | HeaderRequestPredicate |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/config/PrefixedConfigurationTest.java | {
"start": 975,
"end": 2363
} | class ____ {
@Test
void testPrefixedConfiguration() {
Map<String, String> props = new LinkedHashMap<>();
props.put("dubbo.protocol.name", "dubbo");
props.put("dubbo.protocol.port", "1234");
props.put("dubbo.protocols.rest.port", "2345");
InmemoryConfiguration inmemoryConfiguration = new InmemoryConfiguration();
inmemoryConfiguration.addProperties(props);
// prefixed over InmemoryConfiguration
PrefixedConfiguration prefixedConfiguration =
new PrefixedConfiguration(inmemoryConfiguration, "dubbo.protocol");
Assertions.assertEquals("dubbo", prefixedConfiguration.getProperty("name"));
Assertions.assertEquals("1234", prefixedConfiguration.getProperty("port"));
prefixedConfiguration = new PrefixedConfiguration(inmemoryConfiguration, "dubbo.protocols.rest");
Assertions.assertEquals("2345", prefixedConfiguration.getProperty("port"));
// prefixed over composite configuration
CompositeConfiguration compositeConfiguration = new CompositeConfiguration();
compositeConfiguration.addConfiguration(inmemoryConfiguration);
prefixedConfiguration = new PrefixedConfiguration(compositeConfiguration, "dubbo.protocols.rest");
Assertions.assertEquals("2345", prefixedConfiguration.getProperty("port"));
}
}
| PrefixedConfigurationTest |
java | google__dagger | javatests/dagger/internal/codegen/ComponentProcessorTest.java | {
"start": 20235,
"end": 20753
} | interface ____ {",
" Child build();",
" }",
"}");
CompilerTests.daggerCompiler(component, module, subcomponent)
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(0);
subject.generatedSource(goldenFileRule.goldenSource("test/DaggerParent"));
});
}
@Test
public void testDefaultPackage() {
Source aClass = CompilerTests.javaSource("AClass", " | Builder |
java | apache__camel | test-infra/camel-test-infra-opensearch/src/test/java/org/apache/camel/test/infra/opensearch/services/OpenSearchServiceFactory.java | {
"start": 1067,
"end": 2452
} | class ____ extends SingletonService<OpenSearchService> implements OpenSearchService {
public SingletonOpenSearchService(OpenSearchService service, String name) {
super(service, name);
}
@Override
public int getPort() {
return getService().getPort();
}
public String getOpenSearchHost() {
return getService().getOpenSearchHost();
}
@Override
public String getHttpHostAddress() {
return getService().getHttpHostAddress();
}
@Override
public String getUsername() {
return getService().getUsername();
}
@Override
public String getPassword() {
return getService().getPassword();
}
}
private OpenSearchServiceFactory() {
}
public static SimpleTestServiceBuilder<OpenSearchService> builder() {
return new SimpleTestServiceBuilder<>("opensearch");
}
public static OpenSearchService createService() {
return builder()
.addLocalMapping(OpenSearchLocalContainerService::new)
.addRemoteMapping(RemoteOpenSearchService::new)
.build();
}
public static OpenSearchService createSingletonService() {
return SingletonServiceHolder.INSTANCE;
}
private static | SingletonOpenSearchService |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/TsidBuilder.java | {
"start": 11458,
"end": 11625
} | interface ____ describes how objects of a complex type are added to a TSID.
*
* @param <T> the type of the value
*/
@FunctionalInterface
public | that |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KuduEndpointBuilderFactory.java | {
"start": 5379,
"end": 7939
} | interface ____ {
/**
* Kudu (camel-kudu)
* Interact with Apache Kudu, a free and open source column-oriented
* data store of the Apache Hadoop ecosystem.
*
* Category: database,iot,cloud
* Since: 3.0
* Maven coordinates: org.apache.camel:camel-kudu
*
* @return the dsl builder for the headers' name.
*/
default KuduHeaderNameBuilder kudu() {
return KuduHeaderNameBuilder.INSTANCE;
}
/**
* Kudu (camel-kudu)
* Interact with Apache Kudu, a free and open source column-oriented
* data store of the Apache Hadoop ecosystem.
*
* Category: database,iot,cloud
* Since: 3.0
* Maven coordinates: org.apache.camel:camel-kudu
*
* Syntax: <code>kudu:host:port/tableName</code>
*
* Path parameter: host (required)
* Host of the server to connect to
*
* Path parameter: port (required)
* Port of the server to connect to
*
* Path parameter: tableName
* Table to connect to
*
* @param path host:port/tableName
* @return the dsl builder
*/
default KuduEndpointBuilder kudu(String path) {
return KuduEndpointBuilderFactory.endpointBuilder("kudu", path);
}
/**
* Kudu (camel-kudu)
* Interact with Apache Kudu, a free and open source column-oriented
* data store of the Apache Hadoop ecosystem.
*
* Category: database,iot,cloud
* Since: 3.0
* Maven coordinates: org.apache.camel:camel-kudu
*
* Syntax: <code>kudu:host:port/tableName</code>
*
* Path parameter: host (required)
* Host of the server to connect to
*
* Path parameter: port (required)
* Port of the server to connect to
*
* Path parameter: tableName
* Table to connect to
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path host:port/tableName
* @return the dsl builder
*/
default KuduEndpointBuilder kudu(String componentName, String path) {
return KuduEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the Kudu component.
*/
public static | KuduBuilders |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 983256,
"end": 989258
} | class ____ extends YamlDeserializerBase<SecurityDefinition> {
public SecurityDefinitionDeserializer() {
super(SecurityDefinition.class);
}
@Override
protected SecurityDefinition newInstance() {
return new SecurityDefinition();
}
@Override
protected boolean setProperty(SecurityDefinition target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "key": {
String val = asText(node);
target.setKey(val);
break;
}
case "scopes": {
String val = asText(node);
target.setScopes(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "serviceCallConfiguration",
types = org.apache.camel.model.cloud.ServiceCallConfigurationDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "Service Call Configuration",
description = "Remote service call configuration",
deprecated = true,
properties = {
@YamlProperty(name = "blacklistServiceFilter", type = "object:org.apache.camel.model.cloud.BlacklistServiceCallServiceFilterConfiguration", oneOf = "serviceFilterConfiguration"),
@YamlProperty(name = "cachingServiceDiscovery", type = "object:org.apache.camel.model.cloud.CachingServiceCallServiceDiscoveryConfiguration", oneOf = "serviceDiscoveryConfiguration"),
@YamlProperty(name = "combinedServiceDiscovery", type = "object:org.apache.camel.model.cloud.CombinedServiceCallServiceDiscoveryConfiguration", oneOf = "serviceDiscoveryConfiguration"),
@YamlProperty(name = "combinedServiceFilter", type = "object:org.apache.camel.model.cloud.CombinedServiceCallServiceFilterConfiguration", oneOf = "serviceFilterConfiguration"),
@YamlProperty(name = "component", type = "string", defaultValue = "http", description = "The component to use.", displayName = "Component"),
@YamlProperty(name = "consulServiceDiscovery", type = "object:org.apache.camel.model.cloud.ConsulServiceCallServiceDiscoveryConfiguration", oneOf = "serviceDiscoveryConfiguration"),
@YamlProperty(name = "customServiceFilter", type = "object:org.apache.camel.model.cloud.CustomServiceCallServiceFilterConfiguration", oneOf = "serviceFilterConfiguration"),
@YamlProperty(name = "defaultLoadBalancer", type = "object:org.apache.camel.model.cloud.DefaultServiceCallServiceLoadBalancerConfiguration", oneOf = "loadBalancerConfiguration"),
@YamlProperty(name = "dnsServiceDiscovery", type = "object:org.apache.camel.model.cloud.DnsServiceCallServiceDiscoveryConfiguration", oneOf = "serviceDiscoveryConfiguration"),
@YamlProperty(name = "expression", type = "object:org.apache.camel.model.cloud.ServiceCallExpressionConfiguration", description = "Configures the Expression using the given configuration.", displayName = "Expression", oneOf = "expression"),
@YamlProperty(name = "expressionRef", type = "string", description = "Set a reference to a custom Expression to use.", displayName = "Expression Ref"),
@YamlProperty(name = "healthyServiceFilter", type = "object:org.apache.camel.model.cloud.HealthyServiceCallServiceFilterConfiguration", oneOf = "serviceFilterConfiguration"),
@YamlProperty(name = "id", type = "string", description = "The id of this node", displayName = "Id"),
@YamlProperty(name = "kubernetesServiceDiscovery", type = "object:org.apache.camel.model.cloud.KubernetesServiceCallServiceDiscoveryConfiguration", oneOf = "serviceDiscoveryConfiguration"),
@YamlProperty(name = "loadBalancerRef", type = "string", description = "Sets a reference to a custom ServiceLoadBalancer to use.", displayName = "Load Balancer Ref"),
@YamlProperty(name = "passThroughServiceFilter", type = "object:org.apache.camel.model.cloud.PassThroughServiceCallServiceFilterConfiguration", oneOf = "serviceFilterConfiguration"),
@YamlProperty(name = "pattern", type = "enum:InOnly,InOut", description = "Sets the optional ExchangePattern used to invoke this endpoint", displayName = "Pattern"),
@YamlProperty(name = "serviceChooserRef", type = "string", description = "Sets a reference to a custom ServiceChooser to use.", displayName = "Service Chooser Ref"),
@YamlProperty(name = "serviceDiscoveryRef", type = "string", description = "Sets a reference to a custom ServiceDiscovery to use.", displayName = "Service Discovery Ref"),
@YamlProperty(name = "serviceFilterRef", type = "string", description = "Sets a reference to a custom ServiceFilter to use.", displayName = "Service Filter Ref"),
@YamlProperty(name = "staticServiceDiscovery", type = "object:org.apache.camel.model.cloud.StaticServiceCallServiceDiscoveryConfiguration", oneOf = "serviceDiscoveryConfiguration"),
@YamlProperty(name = "uri", type = "string", description = "The uri of the endpoint to send to. The uri can be dynamic computed using the simple language expression.", displayName = "Uri"),
@YamlProperty(name = "zookeeperServiceDiscovery", type = "object:org.apache.camel.model.cloud.ZooKeeperServiceCallServiceDiscoveryConfiguration", oneOf = "serviceDiscoveryConfiguration")
}
)
public static | SecurityDefinitionDeserializer |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/action/PostAnalyticsEventDebugResponseBWCSerializingTests.java | {
"start": 715,
"end": 2123
} | class ____ extends AbstractBWCWireSerializationTestCase<
PostAnalyticsEventAction.Response> {
@Override
protected Writeable.Reader<PostAnalyticsEventAction.Response> instanceReader() {
return PostAnalyticsEventAction.Response::readFromStreamInput;
}
@Override
protected PostAnalyticsEventAction.Response createTestInstance() {
return new PostAnalyticsEventAction.DebugResponse(randomBoolean(), randomAnalyticsEvent());
}
@Override
protected PostAnalyticsEventAction.Response mutateInstance(PostAnalyticsEventAction.Response instance) throws IOException {
PostAnalyticsEventAction.DebugResponse debugResponse = (PostAnalyticsEventAction.DebugResponse) instance;
boolean isAccepted = instance.isAccepted();
AnalyticsEvent analyticsEvent = debugResponse.analyticsEvent();
switch (between(0, 1)) {
case 0 -> isAccepted = isAccepted == false;
case 1 -> analyticsEvent = randomValueOtherThan(analyticsEvent, () -> randomAnalyticsEvent());
}
return new PostAnalyticsEventAction.DebugResponse(isAccepted, analyticsEvent);
}
@Override
protected PostAnalyticsEventAction.Response mutateInstanceForVersion(
PostAnalyticsEventAction.Response instance,
TransportVersion version
) {
return instance;
}
}
| PostAnalyticsEventDebugResponseBWCSerializingTests |
java | micronaut-projects__micronaut-core | http-client/src/main/java/io/micronaut/http/client/netty/CancellableMonoSink.java | {
"start": 1178,
"end": 5011
} | class ____<T> implements Publisher<T>, Sinks.One<T>, Subscription, PoolSink<T> {
private static final Object EMPTY = new Object();
private final ReentrantLock lock = new ReentrantLock();
@Nullable
private final BlockHint blockHint;
private T value;
private Throwable failure;
private boolean complete = false;
private boolean cancelled = false;
private Subscriber<? super T> subscriber = null;
private boolean subscriberWaiting = false;
CancellableMonoSink(@Nullable BlockHint blockHint) {
this.blockHint = blockHint;
}
@Override
@Nullable
public BlockHint getBlockHint() {
return blockHint;
}
@Override
public void subscribe(Subscriber<? super T> s) {
lock.lock();
try {
if (this.subscriber != null) {
s.onError(new IllegalStateException("Only one subscriber allowed"));
}
subscriber = s;
subscriber.onSubscribe(this);
} finally {
lock.unlock();
}
}
private void tryForward() {
if (subscriberWaiting && complete && !cancelled) {
if (failure == null) {
if (value != EMPTY) {
subscriber.onNext(value);
}
subscriber.onComplete();
} else {
subscriber.onError(failure);
}
}
}
@Override
public Sinks.@NonNull EmitResult tryEmitValue(T value) {
lock.lock();
try {
if (complete) {
return Sinks.EmitResult.FAIL_OVERFLOW;
} else {
this.value = value;
complete = true;
tryForward();
return Sinks.EmitResult.OK;
}
} finally {
lock.unlock();
}
}
@Override
public void emitValue(T value, Sinks.@NonNull EmitFailureHandler failureHandler) {
throw new UnsupportedOperationException();
}
@SuppressWarnings("unchecked")
@Override
public Sinks.@NonNull EmitResult tryEmitEmpty() {
return tryEmitValue((T) EMPTY);
}
@Override
public Sinks.@NonNull EmitResult tryEmitError(@NonNull Throwable error) {
lock.lock();
try {
if (complete) {
return Sinks.EmitResult.FAIL_OVERFLOW;
} else {
this.failure = error;
complete = true;
tryForward();
return Sinks.EmitResult.OK;
}
} finally {
lock.unlock();
}
}
@Override
public void emitEmpty(Sinks.@NonNull EmitFailureHandler failureHandler) {
throw new UnsupportedOperationException();
}
@Override
public void emitError(@NonNull Throwable error, Sinks.@NonNull EmitFailureHandler failureHandler) {
throw new UnsupportedOperationException();
}
@Override
public int currentSubscriberCount() {
lock.lock();
try {
return subscriber == null ? 0 : 1;
} finally {
lock.unlock();
}
}
@NonNull
@Override
public Mono<T> asMono() {
return Mono.from(this);
}
@Override
public Object scanUnsafe(@NonNull Attr key) {
return null;
}
@Override
public void request(long n) {
lock.lock();
try {
if (n > 0 && !subscriberWaiting) {
subscriberWaiting = true;
tryForward();
}
} finally {
lock.unlock();
}
}
@Override
public void cancel() {
lock.lock();
try {
complete = true;
cancelled = true;
} finally {
lock.unlock();
}
}
}
| CancellableMonoSink |
java | apache__camel | components/camel-beanio/src/test/java/org/apache/camel/dataformat/beanio/SpringBeanIODataFormatSimpleTest.java | {
"start": 1277,
"end": 3508
} | class ____ extends CamelSpringTestSupport {
private static final String FIXED_DATA = "Joe,Smith,Developer,75000,10012009" + Constants.LS
+ "Jane,Doe,Architect,80000,01152008" + Constants.LS
+ "Jon,Anderson,Manager,85000,03182007" + Constants.LS;
@Override
protected AbstractApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/dataformat/beanio/SpringBeanIODataFormatSimpleTest.xml");
}
@Test
void testMarshal() throws Exception {
List<Employee> employees = getEmployees();
MockEndpoint mock = getMockEndpoint("mock:beanio-marshal");
mock.expectedBodiesReceived(FIXED_DATA);
template.sendBody("direct:marshal", employees);
mock.assertIsSatisfied();
}
@Test
void testUnmarshal() throws Exception {
List<Employee> employees = getEmployees();
MockEndpoint mock = getMockEndpoint("mock:beanio-unmarshal");
mock.expectedBodiesReceived(employees);
template.sendBody("direct:unmarshal", FIXED_DATA);
mock.assertIsSatisfied();
}
private List<Employee> getEmployees() throws ParseException {
List<Employee> employees = new ArrayList<>();
Employee one = new Employee();
one.setFirstName("Joe");
one.setLastName("Smith");
one.setTitle("Developer");
one.setSalary(75000);
one.setHireDate(new SimpleDateFormat("MMddyyyy").parse("10012009"));
employees.add(one);
Employee two = new Employee();
two.setFirstName("Jane");
two.setLastName("Doe");
two.setTitle("Architect");
two.setSalary(80000);
two.setHireDate(new SimpleDateFormat("MMddyyyy").parse("01152008"));
employees.add(two);
Employee three = new Employee();
three.setFirstName("Jon");
three.setLastName("Anderson");
three.setTitle("Manager");
three.setSalary(85000);
three.setHireDate(new SimpleDateFormat("MMddyyyy").parse("03182007"));
employees.add(three);
return employees;
}
}
| SpringBeanIODataFormatSimpleTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/floatarrays/FloatArrays_assertContainsAnyOf_Test.java | {
"start": 978,
"end": 1476
} | class ____ extends FloatArraysBaseTest {
private Arrays internalArrays;
@BeforeEach
@Override
public void setUp() {
super.setUp();
internalArrays = mock(Arrays.class);
setArrays(internalArrays);
}
@Test
void should_delegate_to_internal_Arrays() {
arrays.assertContainsAnyOf(someInfo(), actual, new float[] { 1, 2.0f, 3 });
verify(internalArrays).assertContainsAnyOf(someInfo(), failures, actual, new float[] { 1, 2, 3 });
}
}
| FloatArrays_assertContainsAnyOf_Test |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/web/servlet/result/PrintingResultHandler.java | {
"start": 9784,
"end": 9917
} | interface ____ {
void printHeading(String heading);
void printValue(String label, @Nullable Object value);
}
}
| ResultValuePrinter |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/BaseClientToAMTokenSecretManager.java | {
"start": 1454,
"end": 1577
} | class ____ strictly for the purpose of inherit/extend and
* register with Hadoop RPC.
*/
@Public
@Evolving
public abstract | is |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/annotation/AbstractAnnotationMetadataBuilder.java | {
"start": 82230,
"end": 83534
} | class ____ {
@Nullable
private final T annotationType;
private final AnnotationValue<?> annotationValue;
private ProcessedAnnotation(@Nullable T annotationType,
AnnotationValue<?> annotationValue) {
this.annotationType = annotationType;
this.annotationValue = annotationValue;
}
public ProcessedAnnotation withAnnotationValue(AnnotationValue<?> annotationValue) {
return new ProcessedAnnotation(annotationType, annotationValue);
}
public ProcessedAnnotation withAnnotationType(T annotationType) {
return new ProcessedAnnotation(annotationType, annotationValue);
}
public ProcessedAnnotation mutateAnnotationValue(Function<AnnotationValueBuilder<?>, AnnotationValueBuilder<?>> fn) {
return new ProcessedAnnotation(annotationType, fn.apply(annotationValue.mutate()).build());
}
@Nullable
public T getAnnotationType() {
return annotationType;
}
public AnnotationValue<?> getAnnotationValue() {
return annotationValue;
}
}
/**
* The caching entry.
*
* @author Denis Stepanov
* @since 4.0.0
*/
public | ProcessedAnnotation |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/type/ReferenceType.java | {
"start": 353,
"end": 8046
} | class ____ extends SimpleType
{
private static final long serialVersionUID = 1L;
protected final JavaType _referencedType;
/**
* Essential type used for type ids, for example if type id is needed for
* referencing type with polymorphic handling. Typically initialized when
* a {@link SimpleType} is upgraded into reference type, but NOT changed
* if being sub-classed.
*/
protected final JavaType _anchorType;
protected ReferenceType(Class<?> cls, TypeBindings bindings,
JavaType superClass, JavaType[] superInts, JavaType refType,
JavaType anchorType,
Object valueHandler, Object typeHandler, boolean asStatic)
{
super(cls, bindings, superClass, superInts, Objects.hashCode(refType),
valueHandler, typeHandler, asStatic);
_referencedType = refType;
_anchorType = (anchorType == null) ? this : anchorType;
}
/**
* Constructor used when upgrading into this type (via {@link #upgradeFrom},
* the usual way for {@link ReferenceType}s to come into existence.
* Sets up what is considered the "base" reference type
*/
protected ReferenceType(TypeBase base, JavaType refType)
{
super(base);
_referencedType = refType;
// we'll establish this as the anchor type
_anchorType = this;
}
/**
* Factory method that can be used to "upgrade" a basic type into collection-like
* one; usually done via {@link TypeModifier}
*
* @param baseType Resolved non-reference type (usually {@link SimpleType}) that is being upgraded
* @param refdType Referenced type; usually the first and only type parameter, but not necessarily
*
* @since 2.7
*/
public static ReferenceType upgradeFrom(JavaType baseType, JavaType refdType) {
if (refdType == null) {
throw new IllegalArgumentException("Missing referencedType");
}
// 19-Oct-2015, tatu: Not sure if and how other types could be used as base;
// will cross that bridge if and when need be
if (baseType instanceof TypeBase base) {
return new ReferenceType(base, refdType);
}
throw new IllegalArgumentException("Cannot upgrade from an instance of "+baseType.getClass());
}
public static ReferenceType construct(Class<?> cls, TypeBindings bindings,
JavaType superClass, JavaType[] superInts, JavaType refType)
{
return new ReferenceType(cls, bindings, superClass, superInts,
refType, null, null, null, false);
}
@Override
public JavaType withContentType(JavaType contentType) {
if (_referencedType == contentType) {
return this;
}
return new ReferenceType(_class, _bindings, _superClass, _superInterfaces,
contentType, _anchorType, _valueHandler, _typeHandler, _asStatic);
}
@Override
public ReferenceType withTypeHandler(Object h)
{
if (h == _typeHandler) {
return this;
}
return new ReferenceType(_class, _bindings, _superClass, _superInterfaces,
_referencedType, _anchorType, _valueHandler, h, _asStatic);
}
@Override
public ReferenceType withContentTypeHandler(Object h)
{
if (h == _referencedType.getTypeHandler()) {
return this;
}
return new ReferenceType(_class, _bindings, _superClass, _superInterfaces,
_referencedType.withTypeHandler(h), _anchorType,
_valueHandler, _typeHandler, _asStatic);
}
@Override
public ReferenceType withValueHandler(Object h) {
if (h == _valueHandler) {
return this;
}
return new ReferenceType(_class, _bindings,
_superClass, _superInterfaces, _referencedType, _anchorType,
h, _typeHandler,_asStatic);
}
@Override
public ReferenceType withContentValueHandler(Object h) {
if (h == _referencedType.getValueHandler()) {
return this;
}
return new ReferenceType(_class, _bindings,
_superClass, _superInterfaces, _referencedType.withValueHandler(h),
_anchorType, _valueHandler, _typeHandler, _asStatic);
}
@Override
public ReferenceType withStaticTyping() {
if (_asStatic) {
return this;
}
return new ReferenceType(_class, _bindings, _superClass, _superInterfaces,
_referencedType.withStaticTyping(), _anchorType,
_valueHandler, _typeHandler, true);
}
@Override
public JavaType refine(Class<?> rawType, TypeBindings bindings,
JavaType superClass, JavaType[] superInterfaces) {
return new ReferenceType(rawType, _bindings,
superClass, superInterfaces, _referencedType, _anchorType,
_valueHandler, _typeHandler, _asStatic);
}
@Override
protected String buildCanonicalName()
{
StringBuilder sb = new StringBuilder();
sb.append(_class.getName());
if ((_referencedType != null) && _hasNTypeParameters(1)) {
sb.append('<');
sb.append(_referencedType.toCanonical());
sb.append('>');
}
return sb.toString();
}
/*
/**********************************************************
/* Public API overrides
/**********************************************************
*/
@Override
public JavaType getContentType() {
return _referencedType;
}
@Override
public JavaType getReferencedType() {
return _referencedType;
}
@Override
public boolean hasContentType() {
return true;
}
@Override
public boolean isReferenceType() {
return true;
}
@Override
public StringBuilder getErasedSignature(StringBuilder sb) {
return _classSignature(_class, sb, true);
}
@Override
public StringBuilder getGenericSignature(StringBuilder sb)
{
_classSignature(_class, sb, false);
sb.append('<');
sb = _referencedType.getGenericSignature(sb);
sb.append(">;");
return sb;
}
/*
/**********************************************************
/* Extended API
/**********************************************************
*/
public JavaType getAnchorType() {
return _anchorType;
}
/**
* Convenience accessor that allows checking whether this is the anchor type
* itself; if not, it must be one of supertypes that is also a {@link ReferenceType}
*/
public boolean isAnchorType() {
return (_anchorType == this);
}
/*
/**********************************************************
/* Standard methods
/**********************************************************
*/
@Override
public String toString()
{
return new StringBuilder(40)
.append("[reference type, class ")
.append(buildCanonicalName())
.append('<')
.append(_referencedType)
.append('>')
.append(']')
.toString();
}
@Override
public boolean equals(Object o)
{
if (o == this) return true;
if (o == null) return false;
if (o.getClass() != getClass()) return false;
ReferenceType other = (ReferenceType) o;
if (other._class != _class) return false;
// Otherwise actually mostly worry about referenced type
return _referencedType.equals(other._referencedType);
}
}
| ReferenceType |
java | alibaba__druid | druid-spring-boot-3-starter/src/test/java/com/alibaba/druid/spring/boot3/demo/service/UserService.java | {
"start": 117,
"end": 171
} | interface ____ {
User findById(Long id);
}
| UserService |
java | google__error-prone | core/src/main/java/com/google/errorprone/ErrorProneJavaCompiler.java | {
"start": 989,
"end": 1367
} | class ____ extends BaseErrorProneJavaCompiler {
public ErrorProneJavaCompiler() {
this(JavacTool.create());
}
ErrorProneJavaCompiler(JavaCompiler javacTool) {
super(javacTool, BuiltInCheckerSuppliers.defaultChecks());
}
public ErrorProneJavaCompiler(ScannerSupplier scannerSupplier) {
super(JavacTool.create(), scannerSupplier);
}
}
| ErrorProneJavaCompiler |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/characters/Characters_assertGreaterThanOrEqualTo_Test.java | {
"start": 1451,
"end": 3998
} | class ____ extends CharactersBaseTest {
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> characters.assertGreaterThanOrEqualTo(someInfo(), null, 'a'))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_actual_is_greater_than_other() {
characters.assertGreaterThanOrEqualTo(someInfo(), 'b', 'a');
}
@Test
void should_pass_if_actual_is_equal_to_other() {
characters.assertGreaterThanOrEqualTo(someInfo(), 'b', 'b');
}
@Test
void should_fail_if_actual_is_less_than_other() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> characters.assertGreaterThanOrEqualTo(info, 'a', 'b'));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeGreaterOrEqual('a', 'b'));
}
@Test
void should_fail_if_actual_is_null_according_to_custom_comparison_strategy() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> charactersWithCaseInsensitiveComparisonStrategy.assertGreaterThanOrEqualTo(someInfo(),
null,
'a'))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_actual_is_greater_than_other_according_to_custom_comparison_strategy() {
charactersWithCaseInsensitiveComparisonStrategy.assertGreaterThanOrEqualTo(someInfo(), 'B', 'a');
}
@Test
void should_pass_if_actual_is_equal_to_other_according_to_custom_comparison_strategy() {
charactersWithCaseInsensitiveComparisonStrategy.assertGreaterThanOrEqualTo(someInfo(), 'B', 'b');
}
@Test
void should_fail_if_actual_is_less_than_other_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> charactersWithCaseInsensitiveComparisonStrategy.assertGreaterThanOrEqualTo(info, 'a',
'B'));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeGreaterOrEqual('a', 'B', caseInsensitiveComparisonStrategy));
}
}
| Characters_assertGreaterThanOrEqualTo_Test |
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/io/FilePermissions.java | {
"start": 1074,
"end": 3218
} | class ____ {
private FilePermissions() {
}
/**
* Return the integer representation of the file permissions for a path, where the
* integer value conforms to the
* <a href="https://en.wikipedia.org/wiki/Umask">umask</a> octal notation.
* @param path the file path
* @return the integer representation
* @throws IOException if path permissions cannot be read
*/
public static int umaskForPath(Path path) throws IOException {
Assert.notNull(path, "'path' must not be null");
PosixFileAttributeView attributeView = Files.getFileAttributeView(path, PosixFileAttributeView.class);
Assert.state(attributeView != null, "Unsupported file type for retrieving Posix attributes");
return posixPermissionsToUmask(attributeView.readAttributes().permissions());
}
/**
* Return the integer representation of a set of Posix file permissions, where the
* integer value conforms to the
* <a href="https://en.wikipedia.org/wiki/Umask">umask</a> octal notation.
* @param permissions the set of {@code PosixFilePermission}s
* @return the integer representation
*/
public static int posixPermissionsToUmask(Collection<PosixFilePermission> permissions) {
Assert.notNull(permissions, "'permissions' must not be null");
int owner = permissionToUmask(permissions, PosixFilePermission.OWNER_EXECUTE, PosixFilePermission.OWNER_WRITE,
PosixFilePermission.OWNER_READ);
int group = permissionToUmask(permissions, PosixFilePermission.GROUP_EXECUTE, PosixFilePermission.GROUP_WRITE,
PosixFilePermission.GROUP_READ);
int other = permissionToUmask(permissions, PosixFilePermission.OTHERS_EXECUTE, PosixFilePermission.OTHERS_WRITE,
PosixFilePermission.OTHERS_READ);
return Integer.parseInt("" + owner + group + other, 8);
}
private static int permissionToUmask(Collection<PosixFilePermission> permissions, PosixFilePermission execute,
PosixFilePermission write, PosixFilePermission read) {
int value = 0;
if (permissions.contains(execute)) {
value += 1;
}
if (permissions.contains(write)) {
value += 2;
}
if (permissions.contains(read)) {
value += 4;
}
return value;
}
}
| FilePermissions |
java | spring-projects__spring-boot | module/spring-boot-jdbc/src/dockerTest/java/org/springframework/boot/jdbc/docker/compose/MySqlJdbcDockerComposeConnectionDetailsFactoryIntegrationTests.java | {
"start": 1168,
"end": 1767
} | class ____ {
@DockerComposeTest(composeFile = "mysql-compose.yaml", image = TestImage.MYSQL)
void runCreatesConnectionDetails(JdbcConnectionDetails connectionDetails) {
assertConnectionDetails(connectionDetails);
}
private void assertConnectionDetails(JdbcConnectionDetails connectionDetails) {
assertThat(connectionDetails.getUsername()).isEqualTo("myuser");
assertThat(connectionDetails.getPassword()).isEqualTo("secret");
assertThat(connectionDetails.getJdbcUrl()).startsWith("jdbc:mysql://").endsWith("/mydatabase");
}
}
| MySqlJdbcDockerComposeConnectionDetailsFactoryIntegrationTests |
java | quarkusio__quarkus | extensions/hibernate-search-orm-outbox-polling/runtime/src/main/java/io/quarkus/hibernate/search/orm/outboxpolling/runtime/HibernateSearchOutboxPollingBuildTimeConfigPersistenceUnit.java | {
"start": 834,
"end": 1162
} | interface ____ {
/**
* Configuration for the "agent" entity mapping.
*/
EntityMappingAgentConfig agent();
/**
* Configuration for the "outbox event" entity mapping.
*/
EntityMappingOutboxEventConfig outboxEvent();
}
@ConfigGroup
| EntityMappingConfig |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/awt/RectangleDeserializerTest.java | {
"start": 293,
"end": 2471
} | class ____ extends TestCase {
public void test_0 () throws Exception {
AwtCodec.instance.getFastMatchToken();
Assert.assertNull(JSON.parseObject("null", Rectangle.class));
Assert.assertNull(JSON.parseArray("null", Rectangle.class));
Assert.assertNull(JSON.parseArray("[null]", Rectangle.class).get(0));
Assert.assertNull(JSON.parseObject("{\"value\":null}", VO.class).getValue());
}
public void test_stack_error_0() throws Exception {
Exception error = null;
try {
JSON.parseObject("[]", Rectangle.class);
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_stack_error_1() throws Exception {
Exception error = null;
try {
JSON.parseObject("{33:22}", Rectangle.class);
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_stack_error_2() throws Exception {
Exception error = null;
try {
JSON.parseObject("{\"name\":22}", Rectangle.class);
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_stack_error_3() throws Exception {
Exception error = null;
try {
JSON.parseObject("{\"style\":true}", Rectangle.class);
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_stack_error_4() throws Exception {
Exception error = null;
try {
JSON.parseObject("{\"size\":\"33\"}", Rectangle.class);
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public void test_stack_error_5() throws Exception {
Exception error = null;
try {
JSON.parseObject("{\"xxx\":22}", Font.class);
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
public static | RectangleDeserializerTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/convert/CoerceFloatToIntTest.java | {
"start": 731,
"end": 18636
} | class ____
{
private final ObjectMapper DEFAULT_MAPPER = newJsonMapper();
private final ObjectReader READER_LEGACY_FAIL = DEFAULT_MAPPER.reader()
.without(DeserializationFeature.ACCEPT_FLOAT_AS_INT);
private final ObjectMapper MAPPER_TO_EMPTY = jsonMapperBuilder()
.withCoercionConfig(LogicalType.Integer, cfg ->
cfg.setCoercion(CoercionInputShape.Float, CoercionAction.AsEmpty))
.build();
private final ObjectMapper MAPPER_TRY_CONVERT = jsonMapperBuilder()
.withCoercionConfig(LogicalType.Integer, cfg ->
cfg.setCoercion(CoercionInputShape.Float, CoercionAction.TryConvert))
.build();
private final ObjectMapper MAPPER_TO_NULL = jsonMapperBuilder()
.withCoercionConfig(LogicalType.Integer, cfg ->
cfg.setCoercion(CoercionInputShape.Float, CoercionAction.AsNull))
.build();
private final ObjectMapper MAPPER_TO_FAIL = jsonMapperBuilder()
.withCoercionConfig(LogicalType.Integer, cfg ->
cfg.setCoercion(CoercionInputShape.Float, CoercionAction.Fail))
.build();
/*
/********************************************************
/* Test methods, defaults (legacy)
/********************************************************
*/
@Test
public void testLegacyDoubleToIntCoercion() throws Exception
{
// by default, should be ok
Integer I = DEFAULT_MAPPER.readValue(" 1.25 ", Integer.class);
assertEquals(1, I.intValue());
{
IntWrapper w = DEFAULT_MAPPER.readValue("{\"i\":-2.25 }", IntWrapper.class);
assertEquals(-2, w.i);
int[] arr = DEFAULT_MAPPER.readValue("[ 1.25 ]", int[].class);
assertEquals(1, arr[0]);
}
Long L = DEFAULT_MAPPER.readValue(" 3.33 ", Long.class);
assertEquals(3L, L.longValue());
{
LongWrapper w = DEFAULT_MAPPER.readValue("{\"l\":-2.25 }", LongWrapper.class);
assertEquals(-2L, w.l);
long[] arr = DEFAULT_MAPPER.readValue("[ 1.25 ]", long[].class);
assertEquals(1, arr[0]);
}
Short S = DEFAULT_MAPPER.readValue("42.33", Short.class);
assertEquals(42, S.intValue());
BigInteger biggie = DEFAULT_MAPPER.readValue("95.3", BigInteger.class);
assertEquals(95L, biggie.longValue());
}
// [databind#5319]
@Test
public void testLegacyDoubleToIntCoercionJsonNodeToInteger() throws Exception
{
final JsonNodeFactory nodeF = DEFAULT_MAPPER.getNodeFactory();
assertEquals(1,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(1.25), Integer.class));
assertEquals(-2,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(-2.5f), Integer.class));
assertEquals(3,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(BigDecimal.valueOf(3.75)), Integer.class));
assertEquals(1,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(1.25), Integer.TYPE));
assertEquals(-2,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(-2.5f), Integer.TYPE));
assertEquals(3,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(BigDecimal.valueOf(3.75)), Integer.TYPE));
}
// [databind#5319]
@Test
public void testLegacyDoubleToIntCoercionJsonNodeToLong() throws Exception
{
final JsonNodeFactory nodeF = DEFAULT_MAPPER.getNodeFactory();
assertEquals(1L,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(1.25), Long.class));
assertEquals(-2L,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(-2.5f), Long.class));
assertEquals(3L,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(BigDecimal.valueOf(3.75)), Long.class));
assertEquals(1L,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(1.25), Long.TYPE));
assertEquals(-2L,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(-2.5f), Long.TYPE));
assertEquals(3L,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(BigDecimal.valueOf(3.75)), Long.TYPE));
}
// [databind#5319]
@Test
public void testLegacyDoubleToIntCoercionJsonNodeToBigInteger() throws Exception
{
final JsonNodeFactory nodeF = DEFAULT_MAPPER.getNodeFactory();
assertEquals(BigInteger.valueOf(1L),
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(1.25), BigInteger.class));
assertEquals(BigInteger.valueOf(-2L),
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(-2.5f), BigInteger.class));
assertEquals(BigInteger.valueOf(3L),
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(BigDecimal.valueOf(3.75)), BigInteger.class));
}
// [databind#5340]
@Test
public void testLegacyFPToIntCoercionJsonNodeToByte() throws Exception
{
final JsonNodeFactory nodeF = DEFAULT_MAPPER.getNodeFactory();
assertEquals((byte) 1,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(1.25), Byte.class));
assertEquals((byte) -2,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(-2.5f), Byte.class));
assertEquals((byte) 3,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(BigDecimal.valueOf(3.75)), Byte.class));
assertEquals((byte) 1,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(1.25), Byte.TYPE));
assertEquals((byte) -2,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(-2.5f), Byte.TYPE));
assertEquals((byte) 3,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(BigDecimal.valueOf(3.75)), Byte.TYPE));
}
// [databind#5340]
@Test
public void testLegacyFPToIntCoercionJsonNodeToShort() throws Exception
{
final JsonNodeFactory nodeF = DEFAULT_MAPPER.getNodeFactory();
assertEquals((short) 1,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(1.25), Short.class));
assertEquals((short) -2,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(-2.5f), Short.class));
assertEquals((short) 3,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(BigDecimal.valueOf(3.75)), Short.class));
assertEquals((short) 1,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(1.25), Short.TYPE));
assertEquals((short) -2,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(-2.5f), Short.TYPE));
assertEquals((short) 3,
DEFAULT_MAPPER.treeToValue(nodeF.numberNode(BigDecimal.valueOf(3.75)), Short.TYPE));
}
@Test
public void testLegacyFailDoubleToInt() throws Exception
{
_verifyCoerceFail(READER_LEGACY_FAIL, Integer.class, "1.5", "java.lang.Integer");
_verifyCoerceFail(READER_LEGACY_FAIL, Integer.TYPE, "1.5", "int");
_verifyCoerceFail(READER_LEGACY_FAIL, IntWrapper.class, "{\"i\":-2.25 }", "int");
_verifyCoerceFail(READER_LEGACY_FAIL, int[].class, "[ 2.5 ]", "to `int` value");
}
@Test
public void testLegacyFailDoubleToLong() throws Exception
{
_verifyCoerceFail(READER_LEGACY_FAIL, Long.class, "0.5");
_verifyCoerceFail(READER_LEGACY_FAIL, Long.TYPE, "-2.5");
_verifyCoerceFail(READER_LEGACY_FAIL, LongWrapper.class, "{\"l\": 7.7 }");
_verifyCoerceFail(READER_LEGACY_FAIL, long[].class, "[ -1.35 ]", "to `long` value");
}
@Test
public void testLegacyFailDoubleToOther() throws Exception
{
_verifyCoerceFail(READER_LEGACY_FAIL, Byte.class, "0.5");
_verifyCoerceFail(READER_LEGACY_FAIL, Byte.TYPE, "-2.5");
_verifyCoerceFail(READER_LEGACY_FAIL, byte[].class, "[ -1.35 ]", "to `byte` value");
_verifyCoerceFail(READER_LEGACY_FAIL, Short.class, "0.5");
_verifyCoerceFail(READER_LEGACY_FAIL, Short.TYPE, "-2.5");
_verifyCoerceFail(READER_LEGACY_FAIL, short[].class, "[ -1.35 ]", "to `short` value");
_verifyCoerceFail(READER_LEGACY_FAIL, BigInteger.class, "25236.256");
_verifyCoerceFail(READER_LEGACY_FAIL, AtomicLong.class, "25236.256");
}
/*
/********************************************************
/* Test methods, legacy, correct exception type
/********************************************************
*/
// [databind#2804]
@Test
public void testLegacyFail2804() throws Exception
{
_testLegacyFail2804("5.5", Integer.class);
_testLegacyFail2804("5.0", Long.class);
_testLegacyFail2804("1234567890123456789.0", BigInteger.class);
_testLegacyFail2804("[4, 5.5, 6]", "5.5",
new TypeReference<List<Integer>>() {});
_testLegacyFail2804("{\"key1\": 4, \"key2\": 5.5}", "5.5",
new TypeReference<Map<String, Integer>>() {});
}
private void _testLegacyFail2804(String value, Class<?> type) throws Exception {
_testLegacyFail2804(value, DEFAULT_MAPPER.constructType(type), value);
}
private void _testLegacyFail2804(String doc, String probValue,
TypeReference<?> type) throws Exception {
_testLegacyFail2804(doc, DEFAULT_MAPPER.constructType(type), probValue);
}
private void _testLegacyFail2804(String doc, JavaType targetType,
String probValue) throws Exception {
try {
READER_LEGACY_FAIL.forType(targetType).readValue(doc);
fail("Should not pass");
} catch (InvalidFormatException ex) {
verifyException(ex, probValue);
} catch (MismatchedInputException ex) {
fail("Should get subtype, got: "+ex);
}
}
/*
/********************************************************
/* Test methods, CoerceConfig, to null
/********************************************************
*/
@Test
public void testCoerceConfigFloatToNull() throws Exception
{
assertNull(MAPPER_TO_NULL.readValue("1.5", Integer.class));
// `null` not possible for primitives, must use empty (aka default) value
assertEquals(Integer.valueOf(0), MAPPER_TO_NULL.readValue("1.5", Integer.TYPE));
{
IntWrapper w = MAPPER_TO_NULL.readValue( "{\"i\":-2.25 }", IntWrapper.class);
assertEquals(0, w.i);
int[] ints = MAPPER_TO_NULL.readValue("[ 2.5 ]", int[].class);
assertEquals(1, ints.length);
assertEquals(0, ints[0]);
}
assertNull(MAPPER_TO_NULL.readValue("2.5", Long.class));
assertEquals(Long.valueOf(0L), MAPPER_TO_NULL.readValue("-4.25", Long.TYPE));
{
LongWrapper w = MAPPER_TO_NULL.readValue( "{\"l\":-2.25 }", LongWrapper.class);
assertEquals(0L, w.l);
long[] l = MAPPER_TO_NULL.readValue("[ 2.5 ]", long[].class);
assertEquals(1, l.length);
assertEquals(0L, l[0]);
}
assertNull(MAPPER_TO_NULL.readValue("2.5", Short.class));
assertEquals(Short.valueOf((short) 0), MAPPER_TO_NULL.readValue("-4.25", Short.TYPE));
{
short[] s = MAPPER_TO_NULL.readValue("[ 2.5 ]", short[].class);
assertEquals(1, s.length);
assertEquals((short) 0, s[0]);
}
assertNull(MAPPER_TO_NULL.readValue("2.5", Byte.class));
assertEquals(Byte.valueOf((byte) 0), MAPPER_TO_NULL.readValue("-4.25", Byte.TYPE));
{
byte[] arr = MAPPER_TO_NULL.readValue("[ 2.5 ]", byte[].class);
assertEquals(1, arr.length);
assertEquals((byte) 0, arr[0]);
}
assertNull(MAPPER_TO_NULL.readValue("2.5", BigInteger.class));
{
BigInteger[] arr = MAPPER_TO_NULL.readValue("[ 2.5 ]", BigInteger[].class);
assertEquals(1, arr.length);
assertNull(arr[0]);
}
}
/*
/********************************************************
/* Test methods, CoerceConfig, to empty
/********************************************************
*/
@Test
public void testCoerceConfigFloatToEmpty() throws Exception
{
assertEquals(Integer.valueOf(0), MAPPER_TO_EMPTY.readValue("1.2", Integer.class));
assertEquals(Integer.valueOf(0), MAPPER_TO_EMPTY.readValue("1.5", Integer.TYPE));
{
IntWrapper w = MAPPER_TO_EMPTY.readValue( "{\"i\":-2.25 }", IntWrapper.class);
assertEquals(0, w.i);
int[] ints = MAPPER_TO_EMPTY.readValue("[ 2.5 ]", int[].class);
assertEquals(1, ints.length);
assertEquals(0, ints[0]);
}
assertEquals(Long.valueOf(0), MAPPER_TO_EMPTY.readValue("1.2", Long.class));
assertEquals(Long.valueOf(0), MAPPER_TO_EMPTY.readValue("1.5", Long.TYPE));
{
LongWrapper w = MAPPER_TO_EMPTY.readValue( "{\"l\":-2.25 }", LongWrapper.class);
assertEquals(0L, w.l);
long[] l = MAPPER_TO_EMPTY.readValue("[ 2.5 ]", long[].class);
assertEquals(1, l.length);
assertEquals(0L, l[0]);
}
assertEquals(Short.valueOf((short)0), MAPPER_TO_EMPTY.readValue("1.2", Short.class));
assertEquals(Short.valueOf((short) 0), MAPPER_TO_EMPTY.readValue("1.5", Short.TYPE));
assertEquals(Byte.valueOf((byte)0), MAPPER_TO_EMPTY.readValue("1.2", Byte.class));
assertEquals(Byte.valueOf((byte) 0), MAPPER_TO_EMPTY.readValue("1.5", Byte.TYPE));
assertEquals(BigInteger.valueOf(0L), MAPPER_TO_EMPTY.readValue("124.5", BigInteger.class));
}
/*
/********************************************************
/* Test methods, CoerceConfig, coerce
/********************************************************
*/
@Test
public void testCoerceConfigFloatSuccess() throws Exception
{
assertEquals(Integer.valueOf(1), MAPPER_TRY_CONVERT.readValue("1.2", Integer.class));
assertEquals(Integer.valueOf(3), MAPPER_TRY_CONVERT.readValue("3.4", Integer.TYPE));
{
IntWrapper w = MAPPER_TRY_CONVERT.readValue( "{\"i\":-2.25 }", IntWrapper.class);
assertEquals(-2, w.i);
int[] ints = MAPPER_TRY_CONVERT.readValue("[ 22.10 ]", int[].class);
assertEquals(1, ints.length);
assertEquals(22, ints[0]);
}
assertEquals(Long.valueOf(1), MAPPER_TRY_CONVERT.readValue("1.2", Long.class));
assertEquals(Long.valueOf(1), MAPPER_TRY_CONVERT.readValue("1.5", Long.TYPE));
{
LongWrapper w = MAPPER_TRY_CONVERT.readValue( "{\"l\":-2.25 }", LongWrapper.class);
assertEquals(-2L, w.l);
long[] l = MAPPER_TRY_CONVERT.readValue("[ 2.2 ]", long[].class);
assertEquals(1, l.length);
assertEquals(2L, l[0]);
}
assertEquals(Short.valueOf((short)1), MAPPER_TRY_CONVERT.readValue("1.2", Short.class));
assertEquals(Short.valueOf((short) 19), MAPPER_TRY_CONVERT.readValue("19.2", Short.TYPE));
assertEquals(Byte.valueOf((byte)1), MAPPER_TRY_CONVERT.readValue("1.2", Byte.class));
assertEquals(Byte.valueOf((byte) 1), MAPPER_TRY_CONVERT.readValue("1.5", Byte.TYPE));
assertEquals(BigInteger.valueOf(124L), MAPPER_TRY_CONVERT.readValue("124.2", BigInteger.class));
}
/*
/********************************************************
/* Test methods, CoerceConfig, fail
/********************************************************
*/
@Test
public void testCoerceConfigFailFromFloat() throws Exception
{
_verifyCoerceFail(MAPPER_TO_FAIL, Integer.class, "1.5");
_verifyCoerceFail(MAPPER_TO_FAIL, Integer.TYPE, "1.5");
_verifyCoerceFail(MAPPER_TO_FAIL, IntWrapper.class, "{\"i\":-2.25 }", "int");
_verifyCoerceFail(MAPPER_TO_FAIL, int[].class, "[ 2.5 ]", "to `int` value");
_verifyCoerceFail(MAPPER_TO_FAIL, Long.class, "0.5");
_verifyCoerceFail(MAPPER_TO_FAIL, Long.TYPE, "-2.5");
_verifyCoerceFail(MAPPER_TO_FAIL, LongWrapper.class, "{\"l\": 7.7 }");
_verifyCoerceFail(MAPPER_TO_FAIL, long[].class, "[ -1.35 ]", "to `long` value");
_verifyCoerceFail(MAPPER_TO_FAIL, Short.class, "0.5");
_verifyCoerceFail(MAPPER_TO_FAIL, Short.TYPE, "-2.5");
_verifyCoerceFail(MAPPER_TO_FAIL, short[].class, "[ -1.35 ]", "to `short` value");
_verifyCoerceFail(MAPPER_TO_FAIL, Byte.class, "0.5");
_verifyCoerceFail(MAPPER_TO_FAIL, Byte.TYPE, "-2.5");
_verifyCoerceFail(MAPPER_TO_FAIL, byte[].class, "[ -1.35 ]", "to `byte` value");
_verifyCoerceFail(MAPPER_TO_FAIL, BigInteger.class, "25236.256");
}
/*
/********************************************************
/* Helper methods
/********************************************************
*/
private void _verifyCoerceFail(ObjectMapper m, Class<?> targetType,
String doc) throws Exception
{
_verifyCoerceFail(m.reader(), targetType, doc, targetType.getName());
}
private void _verifyCoerceFail(ObjectMapper m, Class<?> targetType,
String doc, String targetTypeDesc) throws Exception
{
_verifyCoerceFail(m.reader(), targetType, doc, targetTypeDesc);
}
private void _verifyCoerceFail(ObjectReader r, Class<?> targetType,
String doc) throws Exception
{
_verifyCoerceFail(r, targetType, doc, targetType.getName());
}
private void _verifyCoerceFail(ObjectReader r, Class<?> targetType,
String doc, String targetTypeDesc) throws Exception
{
try {
r.forType(targetType).readValue(doc);
fail("Should not accept Float for "+targetType.getName()+" by default");
} catch (MismatchedInputException e) {
verifyException(e, "Cannot coerce Floating-point");
verifyException(e, targetTypeDesc);
}
}
}
| CoerceFloatToIntTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java | {
"start": 1204,
"end": 2625
} | class ____ {
protected final List<String> argumentList = new ArrayList<>(20);
/**
* Add an entry to the command list
* @param args arguments -these will be converted strings
*/
public void add(Object... args) {
for (Object arg : args) {
argumentList.add(arg.toString());
}
}
// Get the number of arguments
public int size() {
return argumentList.size();
}
/**
* Append the output and error files to the tail of the command
* @param stdout out
* @param stderr error. Set this to null to append into stdout
*/
public void addOutAndErrFiles(String stdout, String stderr) {
Preconditions.checkNotNull(stdout, "Null output file");
Preconditions.checkState(!stdout.isEmpty(), "output filename invalid");
// write out the path output
argumentList.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" +
stdout);
if (stderr != null) {
argumentList.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" +
stderr);
} else {
argumentList.add("2>&1");
}
}
/**
* This just returns the command line
* @see #build()
* @return the command line
*/
@Override
public String toString() {
return build();
}
/**
* Build the command line
* @return the command line
*/
public String build() {
return ServiceUtils.join(argumentList, " ");
}
}
| CommandLineBuilder |
java | google__gson | gson/src/test/java/com/google/gson/functional/JsonAdapterAnnotationOnFieldsTest.java | {
"start": 9853,
"end": 10069
} | class ____ {
@JsonAdapter(LongToStringTypeAdapterFactory.class)
final long part;
private GadgetWithPrimitivePart(long part) {
this.part = part;
}
}
private static final | GadgetWithPrimitivePart |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/IncludeWithDeserTest.java | {
"start": 573,
"end": 652
} | class ____
{
@JsonIncludeProperties({"y", "z"})
static | IncludeWithDeserTest |
java | quarkusio__quarkus | extensions/smallrye-graphql/deployment/src/test/java/io/quarkus/smallrye/graphql/deployment/ui/DisabledTest.java | {
"start": 280,
"end": 692
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addAsResource(new StringAsset("quarkus.smallrye-graphql.ui.enabled=false"), "application.properties"));
@Test
public void shouldUseDefaultConfig() {
RestAssured.when().get("/graphql-ui").then().statusCode(404);
}
}
| DisabledTest |
java | spring-projects__spring-boot | module/spring-boot-micrometer-tracing-brave/src/main/java/org/springframework/boot/micrometer/tracing/brave/autoconfigure/BravePropagationConfigurations.java | {
"start": 3079,
"end": 6746
} | class ____ {
private final TracingProperties tracingProperties;
PropagationWithBaggage(TracingProperties tracingProperties) {
this.tracingProperties = tracingProperties;
}
@Bean
@ConditionalOnMissingBean
BaggagePropagation.FactoryBuilder propagationFactoryBuilder(
ObjectProvider<BaggagePropagationCustomizer> baggagePropagationCustomizers) {
// There's a chicken-and-egg problem here: to create a builder, we need a
// factory. But the CompositePropagationFactory needs data from the builder.
// We create a throw-away builder with a throw-away factory, and then copy the
// config to the real builder.
FactoryBuilder throwAwayBuilder = BaggagePropagation.newFactoryBuilder(createThrowAwayFactory());
baggagePropagationCustomizers.orderedStream()
.forEach((customizer) -> customizer.customize(throwAwayBuilder));
CompositePropagationFactory propagationFactory = CompositePropagationFactory.create(
this.tracingProperties.getPropagation(),
new BraveBaggageManager(this.tracingProperties.getBaggage().getTagFields(),
this.tracingProperties.getBaggage().getRemoteFields()),
LocalBaggageFields.extractFrom(throwAwayBuilder));
FactoryBuilder builder = BaggagePropagation.newFactoryBuilder(propagationFactory);
throwAwayBuilder.configs().forEach(builder::add);
return builder;
}
private Factory createThrowAwayFactory() {
return new Factory() {
@Override
public @Nullable Propagation<String> get() {
return null;
}
};
}
@Bean
BaggagePropagationCustomizer remoteFieldsBaggagePropagationCustomizer() {
return (builder) -> {
List<String> remoteFields = this.tracingProperties.getBaggage().getRemoteFields();
for (String fieldName : remoteFields) {
builder.add(BaggagePropagationConfig.SingleBaggageField.remote(BaggageField.create(fieldName)));
}
List<String> localFields = this.tracingProperties.getBaggage().getLocalFields();
for (String localFieldName : localFields) {
builder.add(BaggagePropagationConfig.SingleBaggageField.local(BaggageField.create(localFieldName)));
}
};
}
@Bean
@ConditionalOnMissingBean
@ConditionalOnEnabledTracingExport
Factory propagationFactory(BaggagePropagation.FactoryBuilder factoryBuilder) {
return factoryBuilder.build();
}
@Bean
@ConditionalOnMissingBean
CorrelationScopeDecorator.Builder mdcCorrelationScopeDecoratorBuilder(
ObjectProvider<CorrelationScopeCustomizer> correlationScopeCustomizers) {
CorrelationScopeDecorator.Builder builder = MDCScopeDecorator.newBuilder();
correlationScopeCustomizers.orderedStream().forEach((customizer) -> customizer.customize(builder));
return builder;
}
@Bean
@Order(0)
@ConditionalOnBooleanProperty(name = "management.tracing.baggage.correlation.enabled", matchIfMissing = true)
CorrelationScopeCustomizer correlationFieldsCorrelationScopeCustomizer() {
return (builder) -> {
Correlation correlationProperties = this.tracingProperties.getBaggage().getCorrelation();
for (String field : correlationProperties.getFields()) {
BaggageField baggageField = BaggageField.create(field);
SingleCorrelationField correlationField = SingleCorrelationField.newBuilder(baggageField)
.flushOnUpdate()
.build();
builder.add(correlationField);
}
};
}
@Bean
@ConditionalOnMissingBean(CorrelationScopeDecorator.class)
ScopeDecorator correlationScopeDecorator(CorrelationScopeDecorator.Builder builder) {
return builder.build();
}
}
/**
* Propagates neither traces nor baggage.
*/
@Configuration(proxyBeanMethods = false)
static | PropagationWithBaggage |
java | spring-projects__spring-framework | spring-aop/src/test/java/org/springframework/aop/framework/PrototypeTargetTests.java | {
"start": 2530,
"end": 2738
} | class ____ implements TestBean {
private static int constructionCount = 0;
public TestBeanImpl() {
constructionCount++;
}
@Override
public void doSomething() {
}
}
public static | TestBeanImpl |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/proxy/CallableStatementProxyImplTest.java | {
"start": 5279,
"end": 18600
} | class ____ extends CallableStatementProxyImpl {
private FakeCallableStatement(ConnectionProxy connection, CallableStatement statement, String sql, long id) {
super(connection, statement, sql, id);
}
@Override
public ResultSet getResultSet() throws SQLException {
return null;
}
@Override
public void cancel() throws SQLException {
}
@Override
public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException {
}
@Override
public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException {
}
@Override
public void registerOutParameter(String parameterIndex, int sqlType, String typeName) throws SQLException {
}
@Override
public void registerOutParameter(String parameterIndex, int sqlType) throws SQLException {
}
@Override
public void registerOutParameter(String parameterIndex, int sqlType, int scale) throws SQLException {
}
@Override
public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException {
}
@Override
public Clob getClob(int parameterIndex) throws SQLException {
return null;
}
@Override
public void setRowId(int parameterIndex, RowId x) throws SQLException {
}
@Override
public void setRef(int parameterIndex, Ref x) throws SQLException {
}
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
}
@Override
public void setNString(int parameterIndex, String x) throws SQLException {
}
@Override
public void setNCharacterStream(int parameterIndex, Reader x) throws SQLException {
}
@Override
public void setNCharacterStream(int parameterIndex, Reader x, long length) throws SQLException {
}
@Override
public void setNClob(int parameterIndex, NClob x) throws SQLException {
}
@Override
public void setNClob(int parameterIndex, Reader x) throws SQLException {
}
@Override
public void setNClob(int parameterIndex, Reader x, long length) throws SQLException {
}
@Override
public void setArray(int parameterIndex, Array x) throws SQLException {
}
@Override
public void setURL(int parameterIndex, URL x) throws SQLException {
}
@Override
public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
}
@Override
public java.net.URL getURL(int parameterIndex) throws SQLException {
return null;
}
@Override
public void setSQLXML(int parameterIndex, SQLXML x) throws SQLException {
}
@Override
public void setURL(String parameterName, java.net.URL val) throws SQLException {
}
@Override
public void setNull(String parameterName, int sqlType) throws SQLException {
}
@Override
public void setBoolean(String parameterName, boolean x) throws SQLException {
}
@Override
public void setByte(String parameterName, byte x) throws SQLException {
}
@Override
public void setShort(String parameterName, short x) throws SQLException {
}
@Override
public void setInt(String parameterName, int x) throws SQLException {
}
@Override
public void setLong(String parameterName, long x) throws SQLException {
}
@Override
public void setFloat(String parameterName, float x) throws SQLException {
}
@Override
public void setDouble(String parameterName, double x) throws SQLException {
}
@Override
public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException {
}
@Override
public void setString(String parameterName, String x) throws SQLException {
}
@Override
public void setBytes(String parameterName, byte[] x) throws SQLException {
}
@Override
public void setDate(String parameterName, java.sql.Date x) throws SQLException {
}
@Override
public void setTime(String parameterName, java.sql.Time x) throws SQLException {
}
@Override
public void setTimestamp(String parameterName, java.sql.Timestamp x) throws SQLException {
}
@Override
public void setAsciiStream(String parameterName, java.io.InputStream x, int length) throws SQLException {
}
@Override
public void setBinaryStream(String parameterName, java.io.InputStream x, int length) throws SQLException {
}
@Override
public void setObject(String parameterName, Object x, int targetSqlType, int scale) throws SQLException {
}
@Override
public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException {
}
@Override
public void setObject(String parameterName, Object x) throws SQLException {
}
@Override
public void setCharacterStream(String parameterName, java.io.Reader reader, int length) throws SQLException {
}
@Override
public void setDate(String parameterName, java.sql.Date x, Calendar cal) throws SQLException {
}
@Override
public void setTime(String parameterName, java.sql.Time x, Calendar cal) throws SQLException {
}
@Override
public void setTimestamp(String parameterName, java.sql.Timestamp x, Calendar cal) throws SQLException {
}
@Override
public void setNull(String parameterName, int sqlType, String typeName) throws SQLException {
}
@Override
public String getString(String parameterName) throws SQLException {
return null;
}
@Override
public boolean getBoolean(String parameterName) throws SQLException {
return true;
}
@Override
public byte getByte(String parameterName) throws SQLException {
return 0;
}
@Override
public short getShort(String parameterName) throws SQLException {
return 0;
}
@Override
public int getInt(String parameterName) throws SQLException {
return 0;
}
@Override
public long getLong(String parameterName) throws SQLException {
return 0;
}
@Override
public float getFloat(String parameterName) throws SQLException {
return 0;
}
@Override
public double getDouble(String parameterName) throws SQLException {
return 0;
}
@Override
public byte[] getBytes(String parameterName) throws SQLException {
return null;
}
@Override
public java.sql.Date getDate(String parameterName) throws SQLException {
return null;
}
@Override
public java.sql.Time getTime(String parameterName) throws SQLException {
return null;
}
@Override
public java.sql.Timestamp getTimestamp(String parameterName) throws SQLException {
return null;
}
@Override
public Object getObject(String parameterName) throws SQLException {
return null;
}
@Override
public BigDecimal getBigDecimal(String parameterName) throws SQLException {
return null;
}
@Override
public Object getObject(String parameterName, java.util.Map<String, Class<?>> map) throws SQLException {
return null;
}
@Override
public Ref getRef(String parameterName) throws SQLException {
return null;
}
@Override
public Blob getBlob(String parameterName) throws SQLException {
return null;
}
@Override
public Clob getClob(String parameterName) throws SQLException {
return null;
}
@Override
public Array getArray(String parameterName) throws SQLException {
return null;
}
@Override
public java.sql.Date getDate(String parameterName, Calendar cal) throws SQLException {
return null;
}
@Override
public java.sql.Time getTime(String parameterName, Calendar cal) throws SQLException {
return null;
}
@Override
public java.sql.Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException {
return null;
}
@Override
public java.net.URL getURL(String parameterName) throws SQLException {
return null;
}
@Override
public RowId getRowId(int parameterIndex) throws SQLException {
return null;
}
@Override
public RowId getRowId(String parameterName) throws SQLException {
return null;
}
@Override
public void setRowId(String parameterName, RowId x) throws SQLException {
}
@Override
public void setNString(String parameterName, String value) throws SQLException {
}
@Override
public void setNCharacterStream(String parameterName, Reader value, long length) throws SQLException {
}
@Override
public void setNClob(String parameterName, NClob value) throws SQLException {
}
@Override
public void setClob(String parameterName, Reader reader, long length) throws SQLException {
}
@Override
public void setBlob(String parameterName, InputStream inputStream, long length) throws SQLException {
}
@Override
public void setNClob(String parameterName, Reader reader, long length) throws SQLException {
}
@Override
public NClob getNClob(int parameterIndex) throws SQLException {
return null;
}
@Override
public NClob getNClob(String parameterName) throws SQLException {
return null;
}
@Override
public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException {
}
@Override
public SQLXML getSQLXML(int parameterIndex) throws SQLException {
return null;
}
@Override
public SQLXML getSQLXML(String parameterName) throws SQLException {
return null;
}
@Override
public String getNString(int parameterIndex) throws SQLException {
return null;
}
@Override
public String getNString(String parameterName) throws SQLException {
return null;
}
@Override
public java.io.Reader getNCharacterStream(int parameterIndex) throws SQLException {
return null;
}
@Override
public java.io.Reader getNCharacterStream(String parameterName) throws SQLException {
return null;
}
@Override
public java.io.Reader getCharacterStream(int parameterIndex) throws SQLException {
return null;
}
@Override
public java.io.Reader getCharacterStream(String parameterName) throws SQLException {
return null;
}
@Override
public void setBlob(String parameterName, Blob x) throws SQLException {
}
@Override
public void setClob(String parameterName, Clob x) throws SQLException {
}
@Override
public void setAsciiStream(String parameterName, java.io.InputStream x, long length) throws SQLException {
}
@Override
public void setBinaryStream(String parameterName, java.io.InputStream x, long length) throws SQLException {
}
@Override
public void setCharacterStream(String parameterName, java.io.Reader reader, long length) throws SQLException {
}
@Override
public void setAsciiStream(String parameterName, java.io.InputStream x) throws SQLException {
}
@Override
public void setBinaryStream(String parameterName, java.io.InputStream x) throws SQLException {
}
@Override
public void setCharacterStream(String parameterName, java.io.Reader reader) throws SQLException {
}
@Override
public void setNCharacterStream(String parameterName, Reader value) throws SQLException {
}
@Override
public void setClob(String parameterName, Reader reader) throws SQLException {
}
@Override
public void setBlob(String parameterName, InputStream inputStream) throws SQLException {
}
@Override
public void setNClob(String parameterName, Reader reader) throws SQLException {
}
}
}
| FakeCallableStatement |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/cascade2/Other.java | {
"start": 209,
"end": 418
} | class ____ {
private Long id;
private Parent owner;
public Long getId() {
return id;
}
public Parent getOwner() {
return owner;
}
public void setOwner(Parent owner) {
this.owner = owner;
}
}
| Other |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java | {
"start": 3566,
"end": 25438
} | class ____
implements FSNamesystemMBean, NameNodeMXBean, NameNodeStatusMXBean {
private static final Logger LOG =
LoggerFactory.getLogger(NamenodeBeanMetrics.class);
/** Instance of the Router being monitored. */
private final Router router;
/** FSNamesystem bean. */
private ObjectName fsBeanName;
/** FSNamesystemState bean. */
private ObjectName fsStateBeanName;
/** NameNodeInfo bean. */
private ObjectName nnInfoBeanName;
/** NameNodeStatus bean. */
private ObjectName nnStatusBeanName;
/** Timeout to get the DN report. */
private final long dnReportTimeOut;
/** DN type -> full DN report in JSON. */
private final LoadingCache<DatanodeReportType, String> dnCache;
public NamenodeBeanMetrics(Router router) {
this.router = router;
try {
// TODO this needs to be done with the Metrics from FSNamesystem
StandardMBean bean = new StandardMBean(this, FSNamesystemMBean.class);
this.fsBeanName = MBeans.register("NameNode", "FSNamesystem", bean);
LOG.info("Registered FSNamesystem MBean: {}", this.fsBeanName);
} catch (NotCompliantMBeanException e) {
throw new RuntimeException("Bad FSNamesystem MBean setup", e);
}
try {
StandardMBean bean = new StandardMBean(this, FSNamesystemMBean.class);
this.fsStateBeanName =
MBeans.register("NameNode", "FSNamesystemState", bean);
LOG.info("Registered FSNamesystemState MBean: {}", this.fsStateBeanName);
} catch (NotCompliantMBeanException e) {
throw new RuntimeException("Bad FSNamesystemState MBean setup", e);
}
try {
StandardMBean bean = new StandardMBean(this, NameNodeMXBean.class);
this.nnInfoBeanName = MBeans.register("NameNode", "NameNodeInfo", bean);
LOG.info("Registered NameNodeInfo MBean: {}", this.nnInfoBeanName);
} catch (NotCompliantMBeanException e) {
throw new RuntimeException("Bad NameNodeInfo MBean setup", e);
}
try {
StandardMBean bean = new StandardMBean(this, NameNodeStatusMXBean.class);
this.nnStatusBeanName =
MBeans.register("NameNode", "NameNodeStatus", bean);
LOG.info("Registered NameNodeStatus MBean: {}", this.nnStatusBeanName);
} catch (NotCompliantMBeanException e) {
throw new RuntimeException("Bad NameNodeStatus MBean setup", e);
}
// Initialize the cache for the DN reports
Configuration conf = router.getConfig();
this.dnReportTimeOut = conf.getTimeDuration(
RBFConfigKeys.DN_REPORT_TIME_OUT,
RBFConfigKeys.DN_REPORT_TIME_OUT_MS_DEFAULT, TimeUnit.MILLISECONDS);
long dnCacheExpire = conf.getTimeDuration(
RBFConfigKeys.DN_REPORT_CACHE_EXPIRE,
RBFConfigKeys.DN_REPORT_CACHE_EXPIRE_MS_DEFAULT, TimeUnit.MILLISECONDS);
this.dnCache = CacheBuilder.newBuilder()
.expireAfterWrite(dnCacheExpire, TimeUnit.MILLISECONDS)
.build(
new CacheLoader<DatanodeReportType, String>() {
@Override
public String load(DatanodeReportType type) throws Exception {
return getNodesImpl(type);
}
});
}
/**
* De-register the JMX interfaces.
*/
public void close() {
if (fsStateBeanName != null) {
MBeans.unregister(fsStateBeanName);
fsStateBeanName = null;
}
if (nnInfoBeanName != null) {
MBeans.unregister(nnInfoBeanName);
nnInfoBeanName = null;
}
// Remove the NameNode status bean
if (nnStatusBeanName != null) {
MBeans.unregister(nnStatusBeanName);
nnStatusBeanName = null;
}
}
private RBFMetrics getRBFMetrics() throws IOException {
RBFMetrics metrics = getRouter().getMetrics();
if (metrics == null) {
throw new IOException("Federated metrics is not initialized");
}
return metrics;
}
/////////////////////////////////////////////////////////
// NameNodeMXBean
/////////////////////////////////////////////////////////
@Override
public String getVersion() {
return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision();
}
@Override
public String getSoftwareVersion() {
return VersionInfo.getVersion();
}
@Override
public long getUsed() {
try {
return getRBFMetrics().getUsedCapacity();
} catch (IOException e) {
LOG.debug("Failed to get the used capacity", e);
}
return 0;
}
@Override
public long getFree() {
try {
return getRBFMetrics().getRemainingCapacity();
} catch (IOException e) {
LOG.debug("Failed to get remaining capacity", e);
}
return 0;
}
@Override
public long getTotal() {
try {
return getRBFMetrics().getTotalCapacity();
} catch (IOException e) {
LOG.debug("Failed to Get total capacity", e);
}
return 0;
}
@Override
public long getProvidedCapacity() {
try {
return getRBFMetrics().getProvidedSpace();
} catch (IOException e) {
LOG.debug("Failed to get provided capacity", e);
}
return 0;
}
@Override
public String getSafemode() {
try {
return getRBFMetrics().getSafemode();
} catch (IOException e) {
return "Failed to get safemode status. Please check router"
+ "log for more detail.";
}
}
@Override
public boolean isUpgradeFinalized() {
// We assume the upgrade is always finalized in a federated biew
return true;
}
@Override
public RollingUpgradeInfo.Bean getRollingUpgradeStatus() {
return null;
}
@Override
public long getNonDfsUsedSpace() {
return 0;
}
@Override
public float getPercentUsed() {
return DFSUtilClient.getPercentUsed(getCapacityUsed(), getCapacityTotal());
}
@Override
public float getPercentRemaining() {
return DFSUtilClient.getPercentUsed(
getCapacityRemaining(), getCapacityTotal());
}
@Override
public long getCacheUsed() {
return 0;
}
@Override
public long getCacheCapacity() {
return 0;
}
@Override
public long getBlockPoolUsedSpace() {
return 0;
}
@Override
public float getPercentBlockPoolUsed() {
return 0;
}
@Override
public long getTotalBlocks() {
try {
return getRBFMetrics().getNumBlocks();
} catch (IOException e) {
LOG.debug("Failed to get number of blocks", e);
}
return 0;
}
@Override
public long getNumberOfMissingBlocks() {
try {
return getRBFMetrics().getNumOfMissingBlocks();
} catch (IOException e) {
LOG.debug("Failed to get number of missing blocks", e);
}
return 0;
}
@Override
@Deprecated
public long getPendingReplicationBlocks() {
try {
return getRBFMetrics().getNumOfBlocksPendingReplication();
} catch (IOException e) {
LOG.debug("Failed to get number of blocks pending replica", e);
}
return 0;
}
@Override
public long getPendingReconstructionBlocks() {
try {
return getRBFMetrics().getNumOfBlocksPendingReplication();
} catch (IOException e) {
LOG.debug("Failed to get number of blocks pending replica", e);
}
return 0;
}
@Override
@Deprecated
public long getUnderReplicatedBlocks() {
try {
return getRBFMetrics().getNumOfBlocksUnderReplicated();
} catch (IOException e) {
LOG.debug("Failed to get number of blocks under replicated", e);
}
return 0;
}
@Override
public long getLowRedundancyBlocks() {
try {
return getRBFMetrics().getNumOfBlocksUnderReplicated();
} catch (IOException e) {
LOG.debug("Failed to get number of blocks under replicated", e);
}
return 0;
}
@Override
public long getPendingDeletionBlocks() {
try {
return getRBFMetrics().getNumOfBlocksPendingDeletion();
} catch (IOException e) {
LOG.debug("Failed to get number of blocks pending deletion", e);
}
return 0;
}
@Override
public long getScheduledReplicationBlocks() {
try {
return getRBFMetrics().getScheduledReplicationBlocks();
} catch (IOException e) {
LOG.debug("Failed to get number of scheduled replication blocks", e);
}
return 0;
}
@Override
public long getNumberOfMissingBlocksWithReplicationFactorOne() {
try {
return getRBFMetrics().getNumberOfMissingBlocksWithReplicationFactorOne();
} catch (IOException e) {
LOG.debug("Failed to get number of missing blocks with replication "
+ "factor one.", e);
}
return 0;
}
@Override
public long getNumberOfBadlyDistributedBlocks() {
try {
return getRBFMetrics().getNumberOfBadlyDistributedBlocks();
} catch (IOException e) {
LOG.debug("Failed to get number of badly distributed blocks", e);
}
return 0;
}
@Override
public long getHighestPriorityLowRedundancyReplicatedBlocks() {
try {
return getRBFMetrics().getHighestPriorityLowRedundancyReplicatedBlocks();
} catch (IOException e) {
LOG.debug("Failed to get number of highest priority low redundancy "
+ "replicated blocks.", e);
}
return 0;
}
@Override
public long getHighestPriorityLowRedundancyECBlocks() {
try {
return getRBFMetrics().getHighestPriorityLowRedundancyECBlocks();
} catch (IOException e) {
LOG.debug("Failed to get number of highest priority low redundancy EC "
+ "blocks.", e);
}
return 0;
}
@Override
public String getCorruptFiles() {
return "N/A";
}
@Override
public int getCorruptFilesCount() {
try {
return getRBFMetrics().getCorruptFilesCount();
} catch (IOException e) {
LOG.debug("Failed to get number of corrupt files", e);
}
return 0;
}
@Override
public int getThreads() {
return ManagementFactory.getThreadMXBean().getThreadCount();
}
@Override
public String getLiveNodes() {
return this.getNodes(DatanodeReportType.LIVE);
}
@Override
public String getDeadNodes() {
return this.getNodes(DatanodeReportType.DEAD);
}
@Override
public String getDecomNodes() {
return this.getNodes(DatanodeReportType.DECOMMISSIONING);
}
/**
* Get all the nodes in the federation from a particular type. Getting this
* information is expensive and we use a cache.
* @param type Type of the datanodes to check.
* @return JSON with the nodes.
*/
private String getNodes(final DatanodeReportType type) {
try {
return this.dnCache.get(type);
} catch (ExecutionException e) {
LOG.error("Cannot get the DN storage report for {}", type, e);
}
// If we cannot get the report, return empty JSON
return "{}";
}
/**
* Get all the nodes in the federation from a particular type.
* @param type Type of the datanodes to check.
* @return JSON with the nodes.
*/
private String getNodesImpl(final DatanodeReportType type) {
final Map<String, Map<String, Object>> info = new HashMap<>();
try {
RouterClientProtocol clientProtocol =
this.router.getRpcServer().getClientProtocolModule();
DatanodeStorageReport[] datanodeStorageReports =
clientProtocol.getDatanodeStorageReport(type, false, dnReportTimeOut);
if (router.getRpcServer().isAsync()) {
datanodeStorageReports = syncReturn(DatanodeStorageReport[].class);
}
for (DatanodeStorageReport datanodeStorageReport : datanodeStorageReports) {
DatanodeInfo node = datanodeStorageReport.getDatanodeInfo();
StorageReport[] storageReports = datanodeStorageReport.getStorageReports();
Map<String, Object> innerinfo = new HashMap<>();
innerinfo.put("infoAddr", node.getInfoAddr());
innerinfo.put("infoSecureAddr", node.getInfoSecureAddr());
innerinfo.put("xferaddr", node.getXferAddr());
innerinfo.put("location", node.getNetworkLocation());
innerinfo.put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse(""));
innerinfo.put("lastContact", getLastContact(node));
innerinfo.put("usedSpace", node.getDfsUsed());
innerinfo.put("adminState", node.getAdminState().toString());
innerinfo.put("nonDfsUsedSpace", node.getNonDfsUsed());
innerinfo.put("capacity", node.getCapacity());
innerinfo.put("numBlocks", node.getNumBlocks());
innerinfo.put("version", (node.getSoftwareVersion() == null ?
"UNKNOWN" : node.getSoftwareVersion()));
innerinfo.put("used", node.getDfsUsed());
innerinfo.put("remaining", node.getRemaining());
innerinfo.put("blockScheduled", -1); // node.getBlocksScheduled()
innerinfo.put("blockPoolUsed", node.getBlockPoolUsed());
innerinfo.put("blockPoolUsedPercent", node.getBlockPoolUsedPercent());
innerinfo.put("volfails", -1); // node.getVolumeFailures()
innerinfo.put("blockPoolUsedPercentStdDev",
Util.getBlockPoolUsedPercentStdDev(storageReports));
innerinfo.put("lastBlockReport", getLastBlockReport(node));
info.put(node.getXferAddrWithHostname(),
Collections.unmodifiableMap(innerinfo));
}
} catch (StandbyException e) {
LOG.error("Cannot get {} nodes, Router in safe mode", type);
} catch (SubClusterTimeoutException e) {
LOG.error("Cannot get {} nodes, subclusters timed out responding", type);
} catch (Exception e) {
LOG.error("Cannot get " + type + " nodes", e);
}
return JSON.toString(info);
}
@Override
public String getClusterId() {
try {
return getNamespaceInfo(FederationNamespaceInfo::getClusterId).toString();
} catch (IOException e) {
LOG.error("Cannot fetch cluster ID metrics {}", e.getMessage());
return "";
}
}
@Override
public String getBlockPoolId() {
try {
return
getNamespaceInfo(FederationNamespaceInfo::getBlockPoolId).toString();
} catch (IOException e) {
LOG.error("Cannot fetch block pool ID metrics {}", e.getMessage());
return "";
}
}
/**
* Build a set of unique values found in all namespaces.
*
* @param f Method reference of the appropriate FederationNamespaceInfo
* getter function
* @return Set of unique string values found in all discovered namespaces.
* @throws IOException if the query could not be executed.
*/
private Collection<String> getNamespaceInfo(
Function<FederationNamespaceInfo, String> f) throws IOException {
StateStoreService stateStore = router.getStateStore();
MembershipStore membershipStore =
stateStore.getRegisteredRecordStore(MembershipStore.class);
GetNamespaceInfoRequest request = GetNamespaceInfoRequest.newInstance();
GetNamespaceInfoResponse response =
membershipStore.getNamespaceInfo(request);
return response.getNamespaceInfo().stream()
.map(f)
.collect(Collectors.toSet());
}
@Override
public String getNameDirStatuses() {
return "N/A";
}
@Override
public String getNodeUsage() {
return "N/A";
}
@Override
public String getNameJournalStatus() {
return "N/A";
}
@Override
public String getJournalTransactionInfo() {
return "N/A";
}
@Override
public long getNNStartedTimeInMillis() {
try {
return getRouter().getStartTime();
} catch (IOException e) {
LOG.debug("Failed to get the router startup time", e);
}
return 0;
}
@Override
public String getCompileInfo() {
return VersionInfo.getDate() + " by " + VersionInfo.getUser() +
" from " + VersionInfo.getBranch();
}
@Override
public int getDistinctVersionCount() {
return 0;
}
@Override
public Map<String, Integer> getDistinctVersions() {
return null;
}
/////////////////////////////////////////////////////////
// FSNamesystemMBean
/////////////////////////////////////////////////////////
@Override
public String getFSState() {
// We assume is not in safe mode
return "Operational";
}
@Override
public long getBlocksTotal() {
return this.getTotalBlocks();
}
@Override
public long getCapacityTotal() {
return this.getTotal();
}
@Override
public long getCapacityRemaining() {
return this.getFree();
}
@Override
public long getCapacityUsed() {
return this.getUsed();
}
@Override
public long getProvidedCapacityTotal() {
return getProvidedCapacity();
}
@Override
public long getFilesTotal() {
try {
return getRBFMetrics().getNumFiles();
} catch (IOException e) {
LOG.debug("Failed to get number of files", e);
}
return 0;
}
@Override
public int getTotalLoad() {
return -1;
}
@Override
public int getNumLiveDataNodes() {
try {
return getRBFMetrics().getNumLiveNodes();
} catch (IOException e) {
LOG.debug("Failed to get number of live nodes", e);
}
return 0;
}
@Override
public int getNumDeadDataNodes() {
try {
return getRBFMetrics().getNumDeadNodes();
} catch (IOException e) {
LOG.debug("Failed to get number of dead nodes", e);
}
return 0;
}
@Override
public int getNumStaleDataNodes() {
try {
return getRBFMetrics().getNumStaleNodes();
} catch (IOException e) {
LOG.debug("Failed to get number of stale nodes", e);
}
return 0;
}
@Override
public int getNumDecomLiveDataNodes() {
try {
return getRBFMetrics().getNumDecomLiveNodes();
} catch (IOException e) {
LOG.debug("Failed to get the number of live decommissioned datanodes",
e);
}
return 0;
}
@Override
public int getNumDecomDeadDataNodes() {
try {
return getRBFMetrics().getNumDecomDeadNodes();
} catch (IOException e) {
LOG.debug("Failed to get the number of dead decommissioned datanodes",
e);
}
return 0;
}
@Override
public int getNumDecommissioningDataNodes() {
try {
return getRBFMetrics().getNumDecommissioningNodes();
} catch (IOException e) {
LOG.debug("Failed to get number of decommissioning nodes", e);
}
return 0;
}
@Override
public int getNumInMaintenanceLiveDataNodes() {
try {
return getRBFMetrics().getNumInMaintenanceLiveDataNodes();
} catch (IOException e) {
LOG.debug("Failed to get number of live in maintenance nodes", e);
}
return 0;
}
@Override
public int getNumInMaintenanceDeadDataNodes() {
try {
return getRBFMetrics().getNumInMaintenanceDeadDataNodes();
} catch (IOException e) {
LOG.debug("Failed to get number of dead in maintenance nodes", e);
}
return 0;
}
@Override
public int getNumEnteringMaintenanceDataNodes() {
try {
return getRBFMetrics().getNumEnteringMaintenanceDataNodes();
} catch (IOException e) {
LOG.debug("Failed to get number of entering maintenance nodes", e);
}
return 0;
}
@Override
public int getNumInServiceLiveDataNodes() {
return 0;
}
@Override
public int getVolumeFailuresTotal() {
return 0;
}
@Override
public long getEstimatedCapacityLostTotal() {
return 0;
}
@Override
public String getSnapshotStats() {
return null;
}
@Override
public long getMaxObjects() {
return 0;
}
@Override
public long getBlockDeletionStartTime() {
return -1;
}
@Override
public int getNumStaleStorages() {
return -1;
}
@Override
public String getTopUserOpCounts() {
return "N/A";
}
@Override
public int getFsLockQueueLength() {
return 0;
}
@Override
public long getTotalSyncCount() {
return 0;
}
@Override
public String getTotalSyncTimes() {
return "";
}
private long getLastContact(DatanodeInfo node) {
return (now() - node.getLastUpdate()) / 1000;
}
private long getLastBlockReport(DatanodeInfo node) {
return (now() - node.getLastBlockReportTime()) / 60000;
}
/////////////////////////////////////////////////////////
// NameNodeStatusMXBean
/////////////////////////////////////////////////////////
@Override
public String getNNRole() {
return NamenodeRole.NAMENODE.toString();
}
@Override
public String getState() {
return HAServiceState.ACTIVE.toString();
}
@Override
public String getHostAndPort() {
return NetUtils.getHostPortString(router.getRpcServerAddress());
}
@Override
public boolean isSecurityEnabled() {
try {
return getRBFMetrics().isSecurityEnabled();
} catch (IOException e) {
LOG.debug("Failed to get security status", e);
}
return false;
}
@Override
public long getLastHATransitionTime() {
return 0;
}
@Override
public long getBytesWithFutureGenerationStamps() {
return 0;
}
@Override
public String getSlowPeersReport() {
return "N/A";
}
@Override
public String getSlowDisksReport() {
return "N/A";
}
@Override
public long getNumberOfSnapshottableDirs() {
return 0;
}
@Override
public String getEnteringMaintenanceNodes() {
return "{}";
}
@Override
public String getNameDirSize() {
return "N/A";
}
@Override
public int getNumEncryptionZones() {
return 0;
}
@Override
public String getVerifyECWithTopologyResult() {
return null;
}
@Override
public long getCurrentTokensCount() {
return 0;
}
@Override
public int getPendingSPSPaths() {
try {
return getRBFMetrics().getPendingSPSPaths();
} catch (IOException e) {
LOG.debug("Failed to get number of paths to be processed by sps", e);
}
return 0;
}
@Override
public float getReconstructionQueuesInitProgress() {
return 0;
}
private Router getRouter() throws IOException {
if (this.router == null) {
throw new IOException("Router is not initialized");
}
return this.router;
}
}
| NamenodeBeanMetrics |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/flush/AutoFlushBeforeLoadTest.java | {
"start": 2763,
"end": 3541
} | class ____ {
@Id
private Long id;
@NaturalId
private String name;
@OneToOne(mappedBy = "owner", fetch = FetchType.LAZY)
private DriversLicense driversLicense;
@OneToMany(mappedBy = "person", cascade = CascadeType.ALL)
private List<Phone> phones = new ArrayList<>();
public Person() {
}
public Person(Long id, String name) {
this.id = id;
this.name = name;
}
public Long getId() {
return id;
}
public String getName() {
return name;
}
public List<Phone> getPhones() {
return phones;
}
public DriversLicense getDriversLicense() {
return driversLicense;
}
public void setDriversLicense(DriversLicense driversLicense) {
this.driversLicense = driversLicense;
}
}
@Entity(name = "Phone")
public static | Person |
java | dropwizard__dropwizard | dropwizard-servlets/src/main/java/io/dropwizard/servlets/assets/ResourceURL.java | {
"start": 462,
"end": 5455
} | class ____ {
private ResourceURL() { /* singleton */ }
/**
* Returns true if the URL passed to it corresponds to a directory. This is slightly tricky due to some quirks
* of the {@link JarFile} API. Only jar:// and file:// URLs are supported.
*
* @param resourceURL the URL to check
* @return true if resource is a directory
*/
public static boolean isDirectory(URL resourceURL) throws URISyntaxException {
final String protocol = resourceURL.getProtocol();
switch (protocol) {
case "jar":
try {
final JarURLConnection jarConnection = (JarURLConnection) resourceURL.openConnection();
final JarEntry entry = jarConnection.getJarEntry();
if (entry.isDirectory()) {
return true;
}
// WARNING! Heuristics ahead.
// It turns out that JarEntry#isDirectory() really just tests whether the filename ends in a '/'.
// If you try to open the same URL without a trailing '/', it'll succeed — but the result won't be
// what you want. We try to get around this by calling getInputStream() on the file inside the jar.
// This seems to return null for directories (though that behavior is undocumented as far as I
// can tell). If you have a better idea, please improve this.
final String relativeFilePath = entry.getName();
final JarFile jarFile = jarConnection.getJarFile();
final ZipEntry zipEntry = jarFile.getEntry(relativeFilePath);
final InputStream inputStream = jarFile.getInputStream(zipEntry);
return inputStream == null;
} catch (IOException e) {
throw new ResourceNotFoundException(e);
}
case "file":
return new File(resourceURL.toURI()).isDirectory();
default:
throw new IllegalArgumentException("Unsupported protocol " + resourceURL.getProtocol() +
" for resource " + resourceURL);
}
}
/**
* Appends a trailing '/' to a {@link URL} object. Does not append a slash if one is already present.
*
* @param originalURL The URL to append a slash to
* @return a new URL object that ends in a slash
*/
public static URL appendTrailingSlash(URL originalURL) {
try {
return originalURL.getPath().endsWith("/") ? originalURL :
new URL(originalURL.getProtocol(),
originalURL.getHost(),
originalURL.getPort(),
originalURL.getFile() + '/');
} catch (MalformedURLException ignored) { // shouldn't happen
throw new IllegalArgumentException("Invalid resource URL: " + originalURL);
}
}
/**
* Returns the last modified time for file:// and jar:// URLs. This is slightly tricky for a couple of reasons:
* 1) calling getConnection on a {@link URLConnection} to a file opens an {@link InputStream} to that file that
* must then be closed — though this is not true for {@code URLConnection}s to jar resources
* 2) calling getLastModified on {@link JarURLConnection}s returns the last modified time of the jar file, rather
* than the file within
*
* @param resourceURL the URL to return the last modified time for
* @return the last modified time of the resource, expressed as the number of milliseconds since the epoch, or 0
* if there was a problem
*/
public static long getLastModified(URL resourceURL) {
final String protocol = resourceURL.getProtocol();
switch (protocol) {
case "jar":
try {
final JarURLConnection jarConnection = (JarURLConnection) resourceURL.openConnection();
final JarEntry entry = jarConnection.getJarEntry();
return entry.getTime();
} catch (IOException ignored) {
}
return 0;
case "file":
URLConnection connection = null;
try {
connection = resourceURL.openConnection();
return connection.getLastModified();
} catch (IOException ignored) {
} finally {
if (connection != null) {
try {
connection.getInputStream().close();
} catch (IOException ignored) {
}
}
}
return 0;
default:
throw new IllegalArgumentException("Unsupported protocol " + protocol + " for resource " + resourceURL);
}
}
}
| ResourceURL |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/observers/SerializedObserverTest.java | {
"start": 18688,
"end": 20357
} | class ____ implements Runnable {
private final CountDownLatch latch;
private final Observer<String> observer;
private final int numStringsToSend;
final AtomicInteger produced;
private final CountDownLatch running;
OnNextThread(Observer<String> observer, int numStringsToSend, CountDownLatch latch, CountDownLatch running) {
this(observer, numStringsToSend, new AtomicInteger(), latch, running);
}
OnNextThread(Observer<String> observer, int numStringsToSend, AtomicInteger produced) {
this(observer, numStringsToSend, produced, null, null);
}
OnNextThread(Observer<String> observer, int numStringsToSend, AtomicInteger produced, CountDownLatch latch, CountDownLatch running) {
this.observer = observer;
this.numStringsToSend = numStringsToSend;
this.produced = produced;
this.latch = latch;
this.running = running;
}
OnNextThread(Observer<String> observer, int numStringsToSend) {
this(observer, numStringsToSend, new AtomicInteger());
}
@Override
public void run() {
if (running != null) {
running.countDown();
}
for (int i = 0; i < numStringsToSend; i++) {
observer.onNext(Thread.currentThread().getId() + "-" + i);
if (latch != null) {
latch.countDown();
}
produced.incrementAndGet();
}
}
}
/**
* A thread that will call onError or onNext.
*/
public static | OnNextThread |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-api/src/test/java/org/apache/dubbo/remoting/transport/DecodeHandlerTest.java | {
"start": 1226,
"end": 2237
} | class ____ {
@Test
void test() throws Exception {
ChannelHandler handler = Mockito.mock(ChannelHandler.class);
Channel channel = Mockito.mock(Channel.class);
DecodeHandler decodeHandler = new DecodeHandler(handler);
MockData mockData = new MockData();
decodeHandler.received(channel, mockData);
Assertions.assertTrue(mockData.isDecoded());
MockData mockRequestData = new MockData();
Request request = new Request(1);
request.setData(mockRequestData);
decodeHandler.received(channel, request);
Assertions.assertTrue(mockRequestData.isDecoded());
MockData mockResponseData = new MockData();
Response response = new Response(1);
response.setResult(mockResponseData);
decodeHandler.received(channel, response);
Assertions.assertTrue(mockResponseData.isDecoded());
mockData.setThrowEx(true);
decodeHandler.received(channel, mockData);
}
| DecodeHandlerTest |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/junit/jupiter/DisabledIfCondition.java | {
"start": 1568,
"end": 1897
} | class ____ test method and the configured expression evaluates
* to {@code true}.
*/
@Override
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) {
return evaluateAnnotation(DisabledIf.class, DisabledIf::expression, DisabledIf::reason,
DisabledIf::loadContext, false, context);
}
}
| or |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/internal/client/AbstractClientBase.java | {
"start": 7033,
"end": 18394
} | interface ____ {
void onResponse(InputStream response, Map<String, String> headers, SalesforceException ex);
}
protected void doHttpRequest(final Request request, final ClientResponseCallback callback) {
// Highly memory inefficient,
// but buffer the request content to allow it to be replayed for
// authentication retries
final Request.Content content = request.getBody();
if (content instanceof InputStreamRequestContent) {
InputStreamRequestContent inputStreamRequestContent = (InputStreamRequestContent) content;
final List<ByteBuffer> buffers = new ArrayList<>();
while (true) {
Content.Chunk chunk = inputStreamRequestContent.read();
if (chunk.isLast()) {
break;
} else {
buffers.add(chunk.getByteBuffer());
}
}
request.body(new ByteBufferRequestContent(buffers.toArray(new ByteBuffer[0])));
buffers.clear();
}
inflightRequests.register();
// execute the request
request.send(new BufferingResponseListener(httpClient.getMaxContentLength()) {
@Override
public void onComplete(Result result) {
try {
Response response = result.getResponse();
final Map<String, String> headers = determineHeadersFrom(response);
if (result.isFailed()) {
// Failure!!!
// including Salesforce errors reported as exception
// from SalesforceSecurityHandler
Throwable failure = result.getFailure();
if (failure instanceof SalesforceException) {
httpClient.getWorkerPool()
.execute(() -> callback.onResponse(null, headers, (SalesforceException) failure));
} else {
final String msg = String.format("Unexpected error {%s:%s} executing {%s:%s}", response.getStatus(),
response.getReason(), request.getMethod(),
request.getURI());
httpClient.getWorkerPool().execute(() -> callback.onResponse(null, headers,
new SalesforceException(msg, response.getStatus(), failure)));
}
} else {
// HTTP error status
final int status = response.getStatus();
HttpRequest request
= (HttpRequest) ((HttpRequest) result.getRequest()).getConversation()
.getAttribute(SalesforceSecurityHandler.AUTHENTICATION_REQUEST_ATTRIBUTE);
if (status == HttpStatus.BAD_REQUEST_400 && request != null) {
// parse login error
ContentResponse contentResponse
= new HttpContentResponse(response, getContent(), getMediaType(), getEncoding());
try {
session.parseLoginResponse(contentResponse, getContentAsString());
final String msg = String.format("Unexpected Error {%s:%s} executing {%s:%s}", status,
response.getReason(), request.getMethod(), request.getURI());
httpClient.getWorkerPool()
.execute(() -> callback.onResponse(null, headers, new SalesforceException(msg, null)));
} catch (SalesforceException e) {
final String msg = String.format("Error {%s:%s} executing {%s:%s}", status,
response.getReason(), request.getMethod(), request.getURI());
httpClient.getWorkerPool().execute(() -> callback.onResponse(null, headers,
new SalesforceException(msg, response.getStatus(), e)));
}
} else if (status < HttpStatus.OK_200 || status >= HttpStatus.MULTIPLE_CHOICES_300) {
// Salesforce HTTP failure!
final SalesforceException exception = createRestException(response, getContentAsInputStream());
// for APIs that return body on status 400, such as
// Composite API we need content as well
httpClient.getWorkerPool()
.execute(() -> callback.onResponse(getContentAsInputStream(), headers, exception));
} else {
// Success!!!
httpClient.getWorkerPool()
.execute(() -> callback.onResponse(getContentAsInputStream(), headers, null));
}
}
} finally {
inflightRequests.arriveAndDeregister();
}
}
@Override
public InputStream getContentAsInputStream() {
if (getContent().length == 0) {
return null;
}
return super.getContentAsInputStream();
}
});
}
public void setAccessToken(String accessToken) {
this.accessToken = accessToken;
}
public void setInstanceUrl(String instanceUrl) {
this.instanceUrl = instanceUrl;
}
@Override
public HttpClient getHttpClient() {
return httpClient;
}
final List<RestError> readErrorsFrom(
final InputStream responseContent, final ObjectMapper objectMapper)
throws IOException {
final List<RestError> restErrors;
restErrors = objectMapper.readValue(responseContent, TypeReferences.REST_ERROR_LIST_TYPE);
return restErrors;
}
protected abstract void setAccessToken(Request request);
protected SalesforceException createRestException(Response response, InputStream responseContent) {
// get status code and reason phrase
final int statusCode = response.getStatus();
String reason = response.getReason();
if (reason == null || reason.isEmpty()) {
reason = HttpStatus.getMessage(statusCode);
}
try {
if (responseContent != null && responseContent.available() > 0) {
final List<String> choices;
// return list of choices as error message for 300
if (statusCode == HttpStatus.MULTIPLE_CHOICES_300) {
choices = objectMapper.readValue(responseContent, TypeReferences.STRING_LIST_TYPE);
return new SalesforceMultipleChoicesException(reason, statusCode, choices);
} else {
List<RestError> restErrors = null;
String body = null;
try {
restErrors = readErrorsFrom(responseContent, objectMapper);
} catch (IOException ignored) {
// ok. could be a custom response
}
try {
responseContent.reset();
body = IOUtils.toString(responseContent, StandardCharsets.UTF_8);
responseContent.reset();
} catch (Exception t) {
log.warn("Unable to reset HTTP response content input stream.");
}
if (statusCode == HttpStatus.NOT_FOUND_404) {
return new NoSuchSObjectException(restErrors);
}
return new SalesforceException(
restErrors, statusCode,
"Unexpected error: " + reason + ". See exception `errors` property for detail. " + body,
responseContent);
}
}
} catch (IOException | RuntimeException e) {
// log and ignore
String msg = "Unexpected Error parsing error response body + [" + responseContent + "] : "
+ e.getMessage();
log.warn(msg, e);
}
// just report HTTP status info
return new SalesforceException("Unexpected error: " + reason + ", with content: " + responseContent, statusCode);
}
static Map<String, String> determineHeadersFrom(final Response response) {
final HttpFields headers = response.getHeaders();
final Map<String, String> answer = new LinkedHashMap<>();
for (final HttpField header : headers) {
final String headerName = header.getName();
if (headerName.startsWith("Sforce")) {
answer.put(headerName, header.getValue());
}
}
// don't set the response code to "0" and the response text to null if there's a response timeout
if (response.getStatus() != 0) {
answer.put(Exchange.HTTP_RESPONSE_CODE, String.valueOf(response.getStatus()));
answer.put(Exchange.HTTP_RESPONSE_TEXT, response.getReason());
}
return answer;
}
private static void addHeadersTo(final Request request, final Map<String, List<String>> headers) {
if (headers == null || headers.isEmpty()) {
return;
}
request.headers(requestHeaders -> {
for (Entry<String, List<String>> header : headers.entrySet()) {
requestHeaders.put(header.getKey(), header.getValue());
}
});
}
static Map<String, List<String>> determineHeaders(final Exchange exchange) {
final Message inboundMessage = exchange.getIn();
final Map<String, Object> headers = inboundMessage.getHeaders();
final Map<String, List<String>> answer = new HashMap<>();
for (final String headerName : headers.keySet()) {
final String headerNameLowercase = headerName.toLowerCase(Locale.US);
if (headerNameLowercase.startsWith("sforce") || headerNameLowercase.startsWith("x-sfdc")) {
final Object headerValue = inboundMessage.getHeader(headerName);
if (headerValue instanceof String) {
answer.put(headerName, Collections.singletonList((String) headerValue));
} else if (headerValue instanceof String[]) {
answer.put(headerName, Arrays.asList((String[]) headerValue));
} else if (headerValue instanceof Collection) {
answer.put(headerName,
((Collection<?>) headerValue).stream().map(String::valueOf).collect(Collectors.<String> toList()));
} else {
throw new IllegalArgumentException(
"Given value for header `" + headerName + "`, is not String, String array or a Collection");
}
}
}
return answer;
}
}
| ClientResponseCallback |
java | apache__camel | core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/CamelInternalProcessor.java | {
"start": 44910,
"end": 48596
} | class ____ implements CamelInternalProcessorAdvice<StopWatch> {
private final TraceAdviceEventNotifier notifier;
private final Tracer tracer;
private final NamedNode processorDefinition;
private final NamedRoute routeDefinition;
private final Synchronization tracingAfterRoute;
private final boolean skip;
public TracingAdvice(CamelContext camelContext, Tracer tracer, NamedNode processorDefinition,
NamedRoute routeDefinition, boolean first) {
this.tracer = tracer;
this.processorDefinition = processorDefinition;
this.routeDefinition = routeDefinition;
this.tracingAfterRoute
= routeDefinition != null
? new TracingAfterRoute(tracer, routeDefinition.getRouteId(), routeDefinition) : null;
boolean rest;
boolean template;
if (routeDefinition != null) {
rest = routeDefinition.isCreatedFromRest();
template = routeDefinition.isCreatedFromTemplate();
} else {
rest = false;
template = false;
}
// optimize whether to skip this route or not
if (rest && !tracer.isTraceRests()) {
this.skip = true;
} else if (template && !tracer.isTraceTemplates()) {
this.skip = true;
} else {
this.skip = false;
}
this.notifier = getOrCreateEventNotifier(camelContext);
}
private TraceAdviceEventNotifier getOrCreateEventNotifier(CamelContext camelContext) {
// use a single instance of this event notifier
for (EventNotifier en : camelContext.getManagementStrategy().getEventNotifiers()) {
if (en instanceof TraceAdviceEventNotifier traceAdviceEventNotifier) {
return traceAdviceEventNotifier;
}
}
TraceAdviceEventNotifier answer = new TraceAdviceEventNotifier();
camelContext.getManagementStrategy().addEventNotifier(answer);
return answer;
}
@Override
public StopWatch before(Exchange exchange) throws Exception {
if (!skip && tracer.isEnabled()) {
// to capture if the exchange was sent to an endpoint during this event
notifier.before(exchange);
if (tracingAfterRoute != null) {
// add before route and after route tracing but only once per route, so check if there is already an existing
boolean contains = exchange.getUnitOfWork().containsSynchronization(tracingAfterRoute);
if (!contains) {
tracer.traceBeforeRoute(routeDefinition, exchange);
exchange.getExchangeExtension().addOnCompletion(tracingAfterRoute);
}
}
tracer.traceBeforeNode(processorDefinition, exchange);
return new StopWatch();
}
return null;
}
@Override
public void after(Exchange exchange, StopWatch data) throws Exception {
if (data != null) {
Endpoint endpoint = notifier.after(exchange);
long elapsed = data.taken();
if (endpoint != null) {
tracer.traceSentNode(processorDefinition, exchange, endpoint, elapsed);
}
tracer.traceAfterNode(processorDefinition, exchange);
}
}
private static final | TracingAdvice |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/ManagedSnapshotContext.java | {
"start": 1218,
"end": 1898
} | interface ____ {
/**
* Returns the ID of the checkpoint for which the snapshot is taken.
*
* <p>The checkpoint ID is guaranteed to be strictly monotonously increasing across checkpoints.
* For two completed checkpoints <i>A</i> and <i>B</i>, {@code ID_B > ID_A} means that
* checkpoint <i>B</i> subsumes checkpoint <i>A</i>, i.e., checkpoint <i>B</i> contains a later
* state than checkpoint <i>A</i>.
*/
long getCheckpointId();
/**
* Returns timestamp (wall clock time) when the master node triggered the checkpoint for which
* the state snapshot is taken.
*/
long getCheckpointTimestamp();
}
| ManagedSnapshotContext |
java | apache__camel | components/camel-kafka/src/test/java/org/apache/camel/component/kafka/integration/KafkaConsumerAuthIT.java | {
"start": 3151,
"end": 8316
} | class ____ {
public static final String TOPIC = "test-auth-full";
@Order(1)
@RegisterExtension
public static ContainerLocalAuthKafkaService service = new ContainerLocalAuthKafkaService("/kafka-jaas.config");
@Order(2)
@RegisterExtension
public static CamelContextExtension contextExtension = new DefaultCamelContextExtension();
protected static AdminClient kafkaAdminClient;
private static final Logger LOG = LoggerFactory.getLogger(KafkaConsumerAuthIT.class);
private org.apache.kafka.clients.producer.KafkaProducer<String, String> producer;
@BeforeEach
public void before() {
Properties props = KafkaTestUtil.getDefaultProperties(service);
props.put(SaslConfigs.SASL_JAAS_CONFIG,
ContainerLocalAuthKafkaService.generateSimpleSaslJaasConfig("camel", "camel-secret"));
props.put("security.protocol", "SASL_PLAINTEXT");
props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
try {
producer = new org.apache.kafka.clients.producer.KafkaProducer<>(props);
} catch (Exception e) {
fail(e.getMessage());
}
MockConsumerInterceptor.recordsCaptured.clear();
}
@BeforeEach
public void setKafkaAdminClient() {
if (kafkaAdminClient == null) {
kafkaAdminClient = KafkaAdminUtil.createAdminClient(service);
}
}
@AfterEach
public void after() {
if (producer != null) {
producer.close();
}
// clean all test topics
kafkaAdminClient.deleteTopics(Collections.singletonList(TOPIC)).all();
}
@RouteFixture
public void createRouteBuilder(CamelContext context) throws Exception {
context.addRoutes(createRouteBuilder());
}
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
final String simpleSaslJaasConfig
= ContainerLocalAuthKafkaService.generateSimpleSaslJaasConfig("camel", "camel-secret");
fromF("kafka:%s"
+ "?brokers=%s&groupId=%s&autoOffsetReset=earliest&keyDeserializer=org.apache.kafka.common.serialization.StringDeserializer"
+ "&valueDeserializer=org.apache.kafka.common.serialization.StringDeserializer&clientId=camel-kafka-auth-test"
+ "&autoCommitIntervalMs=1000&pollTimeoutMs=1000&autoCommitEnable=true&interceptorClasses=%s"
+ "&saslMechanism=PLAIN&securityProtocol=SASL_PLAINTEXT&saslJaasConfig=%s", TOPIC,
service.getBootstrapServers(),
"KafkaConsumerAuthIT", "org.apache.camel.component.kafka.MockConsumerInterceptor", simpleSaslJaasConfig)
.process(
exchange -> LOG.trace("Captured on the processor: {}", exchange.getMessage().getBody()))
.routeId("full-it").to(KafkaTestUtil.MOCK_RESULT);
}
};
}
@DisplayName("Tests that Camel can adequately connect and consume from an authenticated Kafka instance")
@Timeout(30)
@Order(1)
@Test
public void kafkaMessageIsConsumedByCamel() throws InterruptedException {
MockEndpoint to = contextExtension.getMockEndpoint(KafkaTestUtil.MOCK_RESULT);
String propagatedHeaderKey = "PropagatedCustomHeader";
byte[] propagatedHeaderValue = "propagated header value".getBytes();
String skippedHeaderKey = "CamelSkippedHeader";
to.expectedMessageCount(5);
to.expectedBodiesReceivedInAnyOrder("message-0", "message-1", "message-2", "message-3", "message-4");
// The LAST_RECORD_BEFORE_COMMIT header should not be configured on any
// exchange because autoCommitEnable=true
to.expectedHeaderValuesReceivedInAnyOrder(KafkaConstants.LAST_RECORD_BEFORE_COMMIT, null, null, null, null, null);
to.expectedHeaderReceived(propagatedHeaderKey, propagatedHeaderValue);
populateKafkaTopic(propagatedHeaderKey, propagatedHeaderValue);
to.assertIsSatisfied(3000);
assertEquals(5, MockConsumerInterceptor.recordsCaptured.stream()
.flatMap(i -> StreamSupport.stream(i.records(TOPIC).spliterator(), false)).count());
Map<String, Object> headers = to.getExchanges().get(0).getIn().getHeaders();
assertFalse(headers.containsKey(skippedHeaderKey), "Should not receive skipped header");
assertTrue(headers.containsKey(propagatedHeaderKey), "Should receive propagated header");
}
private void populateKafkaTopic(String propagatedHeaderKey, byte[] propagatedHeaderValue) {
for (int k = 0; k < 5; k++) {
String msg = "message-" + k;
ProducerRecord<String, String> data = new ProducerRecord<>(TOPIC, "1", msg);
data.headers().add(new RecordHeader("CamelSkippedHeader", "skipped header value".getBytes()));
data.headers().add(new RecordHeader(propagatedHeaderKey, propagatedHeaderValue));
producer.send(data);
}
}
}
| KafkaConsumerAuthIT |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java | {
"start": 1961,
"end": 2749
} | class ____ ALSO related protocol buffer
* wire protocol definition in InterDatanodeProtocol.proto.
*
* For more details on protocol buffer wire protocol, please see
* .../org/apache/hadoop/hdfs/protocolPB/overview.html
*/
public static final long versionID = 6L;
/**
* Initialize a replica recovery.
*
* @return actual state of the replica on this data-node or
* null if data-node does not have the replica.
*/
ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException;
/**
* Update replica with the new generation stamp and length.
*/
String updateReplicaUnderRecovery(ExtendedBlock oldBlock, long recoveryId,
long newBlockId, long newLength)
throws IOException;
}
| and |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/sql/BuiltInSqlOperator.java | {
"start": 1708,
"end": 3591
} | interface ____ {
/**
* @see BuiltInFunctionDefinition#getVersion()
*/
Optional<Integer> getVersion();
/**
* @see BuiltInFunctionDefinition#isInternal()
*/
boolean isInternal();
/**
* @see BuiltInFunctionDefinition#getQualifiedName()
*/
String getQualifiedName();
// --------------------------------------------------------------------------------------------
static Optional<Integer> unwrapVersion(SqlOperator operator) {
if (operator instanceof BuiltInSqlOperator) {
final BuiltInSqlOperator builtInSqlOperator = (BuiltInSqlOperator) operator;
return builtInSqlOperator.isInternal()
? Optional.empty()
: builtInSqlOperator.getVersion();
}
return Optional.of(DEFAULT_VERSION);
}
static boolean unwrapIsInternal(SqlOperator operator) {
if (operator instanceof BuiltInSqlOperator) {
return ((BuiltInSqlOperator) operator).isInternal();
}
return false;
}
static String toQualifiedName(SqlOperator operator) {
if (operator instanceof BuiltInSqlOperator) {
final BuiltInSqlOperator builtInSqlOperator = (BuiltInSqlOperator) operator;
return builtInSqlOperator.getQualifiedName();
}
return qualifyFunctionName(operator.getName(), DEFAULT_VERSION);
}
static String extractNameFromQualifiedName(String qualifiedName) {
// supports all various kinds of qualified names
// $FUNC$1 => FUNC
// $IS NULL$1 => IS NULL
// $$CALCITE_INTERNAL$1 => $CALCITE_INTERNAL
int versionPos = qualifiedName.length() - 1;
while (Character.isDigit(qualifiedName.charAt(versionPos))) {
versionPos--;
}
return qualifiedName.substring(1, versionPos);
}
}
| BuiltInSqlOperator |
java | resilience4j__resilience4j | resilience4j-circuitbreaker/src/main/java/io/github/resilience4j/circuitbreaker/IllegalStateTransitionException.java | {
"start": 179,
"end": 994
} | class ____ extends RuntimeException {
private final String name;
private final CircuitBreaker.State fromState;
private final CircuitBreaker.State toState;
IllegalStateTransitionException(String name, CircuitBreaker.State fromState,
CircuitBreaker.State toState) {
super(String
.format("CircuitBreaker '%s' tried an illegal state transition from %s to %s", name,
fromState.toString(), toState.toString()));
this.name = name;
this.fromState = fromState;
this.toState = toState;
}
public CircuitBreaker.State getFromState() {
return fromState;
}
public CircuitBreaker.State getToState() {
return toState;
}
public String getName() {
return name;
}
}
| IllegalStateTransitionException |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginsRecommenders.java | {
"start": 4938,
"end": 5499
} | class ____ implements ConfigDef.Recommender {
@Override
public List<Object> validValues(String name, Map<String, Object> parsedConfig) {
if (plugins == null) {
return List.of();
}
return plugins.converters().stream()
.map(PluginDesc::pluginClass).distinct().collect(Collectors.toList());
}
@Override
public boolean visible(String name, Map<String, Object> parsedConfig) {
return true;
}
}
public | ConverterPluginRecommender |
java | quarkusio__quarkus | integration-tests/hibernate-search-standalone-elasticsearch/src/main/java/io/quarkus/it/hibernate/search/standalone/elasticsearch/search/stub/DatastoreStub.java | {
"start": 213,
"end": 419
} | class ____ {
final Map<Class<?>, Map<Long, Object>> entities = new LinkedHashMap<>();
public DatastoreConnectionStub connect() {
return new DatastoreConnectionStub(this);
}
}
| DatastoreStub |
java | quarkusio__quarkus | extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/tracing/cdi/WithSpanInterceptor.java | {
"start": 8013,
"end": 9684
} | class ____ implements AttributesExtractor<MethodRequest, Void> {
private static final ClassMethodNameAttributesExtractor INSTANCE = new ClassMethodNameAttributesExtractor();
private ClassMethodNameAttributesExtractor() {
//no-op
}
@Override
public void onStart(AttributesBuilder attributesBuilder, Context context, MethodRequest methodRequest) {
attributesBuilder.put(CODE_FUNCTION_NAME,
methodRequest.getMethod().getDeclaringClass().getName() + "." +
methodRequest.getMethod().getName());
}
@Override
public void onEnd(AttributesBuilder attributesBuilder, Context context, MethodRequest methodRequest, Void unused,
Throwable throwable) {
// no-op
}
}
private static boolean isUni(Class<?> clazz) {
return Uni.class.isAssignableFrom(clazz);
}
private static boolean isMulti(Class<?> clazz) {
return Multi.class.isAssignableFrom(clazz);
}
private static boolean isCompletionStage(Class<?> clazz) {
return CompletionStage.class.isAssignableFrom(clazz);
}
private static SpanKind spanKindFromMethod(Set<Annotation> annotations) {
SpanKind spanKind = null;
for (Annotation annotation : annotations) {
if (annotation instanceof WithSpan) {
spanKind = ((WithSpan) annotation).kind();
break;
}
}
if (spanKind == null) {
return SpanKind.INTERNAL;
}
return spanKind;
}
private static final | ClassMethodNameAttributesExtractor |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/main/java/org/hibernate/processor/util/TypeUtils.java | {
"start": 24480,
"end": 26835
} | class ____ as string or {@code null} if no targetEntity is here or if equals to void
*/
public static @Nullable String getTargetEntity(List<? extends AnnotationMirror> annotations) {
for ( AnnotationMirror mirror : annotations ) {
if ( isAnnotationMirrorOfType( mirror, ELEMENT_COLLECTION ) ) {
return getFullyQualifiedClassNameOfTargetEntity( mirror, "targetClass" );
}
else if ( isAnnotationMirrorOfType( mirror, ONE_TO_MANY )
|| isAnnotationMirrorOfType( mirror, MANY_TO_MANY )
|| isAnnotationMirrorOfType( mirror, MANY_TO_ONE )
|| isAnnotationMirrorOfType( mirror, ONE_TO_ONE ) ) {
return getFullyQualifiedClassNameOfTargetEntity( mirror, "targetEntity" );
}
else if ( isAnnotationMirrorOfType( mirror, "org.hibernate.annotations.TargetEmbeddable") ) {
return getFullyQualifiedClassNameOfTargetEntity( mirror, "value" );
}
}
return null;
}
public static String propertyName(Element element) {
switch ( element.getKind() ) {
case FIELD:
return element.getSimpleName().toString();
case METHOD:
final Name name = element.getSimpleName();
if ( name.length() > 3 && name.subSequence( 0, 3 ).equals( "get" ) ) {
return decapitalize( name.subSequence( 3, name.length() ).toString() );
}
else if ( name.length() > 2 && name.subSequence( 0, 2 ).equals( "is" ) ) {
return decapitalize( name.subSequence( 2, name.length() ).toString() );
}
else {
return decapitalize( name.toString() );
}
default:
return element.getSimpleName() + "/* " + element.getKind() + " */";
}
}
public static @Nullable Element findMappedSuperElement(Metamodel entity, Context context) {
final Element element = entity.getElement();
if ( element instanceof TypeElement typeElement ) {
TypeMirror superClass = typeElement.getSuperclass();
//superclass of Object is of NoType which returns some other kind
while ( superClass.getKind() == TypeKind.DECLARED ) {
final DeclaredType declaredType = (DeclaredType) superClass;
final TypeElement superClassElement = (TypeElement) declaredType.asElement();
if ( extendsSuperMetaModel( superClassElement, entity.isMetaComplete(), context ) ) {
return superClassElement;
}
superClass = superClassElement.getSuperclass();
}
}
return null;
}
/**
* Checks whether this metamodel | name |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/builder/BuilderTest3.java | {
"start": 588,
"end": 883
} | class ____ {
private int id;
private String name;
public int getId() {
return id;
}
public String getName() {
return name;
}
}
@JSONPOJOBuilder(withPrefix="kk", buildMethod="mmm")
public static | VO |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/feature/FeaturesTest5.java | {
"start": 304,
"end": 907
} | class ____ extends TestCase {
public void test_0() throws Exception {
SerializeConfig config = new SerializeConfig();
config.setAsmEnable(false);
String text = JSON.toJSONString(new Entity(), config);
Assert.assertEquals("{\"value\":false}", text);
}
public void test_1() throws Exception {
SerializeConfig config = new SerializeConfig();
config.setAsmEnable(true);
String text = JSON.toJSONString(new Entity(), config);
Assert.assertEquals("{\"value\":false}", text);
}
public static | FeaturesTest5 |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/utils/ExecNodeMetadataUtilTest.java | {
"start": 15152,
"end": 16351
} | class ____ extends AbstractDummyNode {
@JsonCreator
protected DummyNode(
ExecNodeContext context,
ReadableConfig persistedConfig,
List<InputProperty> properties,
LogicalType outputType,
String description) {
super(context, persistedConfig, properties, outputType, description);
}
}
@ExecNodeMetadata(
name = "dummy-node-multiple-annotations",
version = 1,
consumedOptions = {"option1", "option2"},
minPlanVersion = FlinkVersion.v1_13,
minStateVersion = FlinkVersion.v1_13)
@ExecNodeMetadata(
name = "dummy-node-multiple-annotations",
version = 2,
consumedOptions = {"option11", "option22"},
minPlanVersion = FlinkVersion.v1_14,
minStateVersion = FlinkVersion.v1_14)
@ExecNodeMetadata(
name = "dummy-node-multiple-annotations",
version = 3,
consumedOptions = {"option111", "option222"},
minPlanVersion = FlinkVersion.v1_15,
minStateVersion = FlinkVersion.v1_15)
private static | DummyNode |
java | quarkusio__quarkus | core/runtime/src/main/java/io/quarkus/runtime/configuration/SystemOnlySourcesConfigBuilder.java | {
"start": 100,
"end": 426
} | class ____ implements ConfigBuilder {
@Override
public SmallRyeConfigBuilder configBuilder(final SmallRyeConfigBuilder builder) {
return builder.setAddDefaultSources(false).addSystemSources();
}
@Override
public int priority() {
return Integer.MAX_VALUE;
}
}
| SystemOnlySourcesConfigBuilder |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/features/FeatureUtil.java | {
"start": 8507,
"end": 9255
} | class ____
* method.
* @throws ConflictingRequirementsException if the requirements are mutually inconsistent.
*/
public static TesterRequirements buildDeclaredTesterRequirements(AnnotatedElement classOrMethod)
throws ConflictingRequirementsException {
TesterRequirements requirements = new TesterRequirements();
Iterable<Annotation> testerAnnotations = getTesterAnnotations(classOrMethod);
for (Annotation testerAnnotation : testerAnnotations) {
TesterRequirements moreRequirements = buildTesterRequirements(testerAnnotation);
incorporateRequirements(requirements, moreRequirements, testerAnnotation);
}
return requirements;
}
/**
* Find all the tester annotations declared on a tester | or |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java | {
"start": 1426,
"end": 38544
} | class ____ extends AbstractBWCWireSerializationTestCase<Response> {
@Override
protected Response createTestInstance() {
return createInstance();
}
@Override
protected Response mutateInstance(Response instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
public static Response createInstance() {
int listSize = randomInt(10);
List<Response.TrainedModelStats> trainedModelStats = Stream.generate(() -> randomAlphaOfLength(10))
.limit(listSize)
.map(
id -> new Response.TrainedModelStats(
id,
randomBoolean() ? TrainedModelSizeStatsTests.createRandom() : null,
randomBoolean() ? randomIngestStats() : null,
randomIntBetween(0, 10),
randomBoolean() ? InferenceStatsTests.createTestInstance(id, null) : null,
randomBoolean() ? AssignmentStatsTests.randomDeploymentStats() : null
)
)
.collect(Collectors.toList());
return new Response(new QueryPage<>(trainedModelStats, randomLongBetween(listSize, 1000), RESULTS_FIELD));
}
public static IngestStats randomIngestStats() {
List<String> pipelineIds = Stream.generate(() -> randomAlphaOfLength(10)).limit(randomIntBetween(0, 10)).toList();
return new IngestStats(
new IngestStats.Stats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()),
pipelineIds.stream()
.map(id -> new IngestStats.PipelineStat(ProjectId.DEFAULT, id, randomStats(), randomByteStats()))
.collect(Collectors.toList()),
pipelineIds.isEmpty()
? Map.of()
: Map.of(
ProjectId.DEFAULT,
pipelineIds.stream().collect(Collectors.toMap(Function.identity(), v -> randomProcessorStats()))
)
);
}
private static IngestStats.Stats randomStats() {
return new IngestStats.Stats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong());
}
private static IngestStats.ByteStats randomByteStats() {
return new IngestStats.ByteStats(randomNonNegativeLong(), randomNonNegativeLong());
}
private static List<IngestStats.ProcessorStat> randomProcessorStats() {
return Stream.generate(() -> randomAlphaOfLength(10))
.limit(randomIntBetween(0, 10))
.map(name -> new IngestStats.ProcessorStat(name, "inference", randomStats()))
.collect(Collectors.toList());
}
@Override
protected Writeable.Reader<Response> instanceReader() {
return Response::new;
}
@Override
protected Response mutateInstanceForVersion(Response instance, TransportVersion version) {
if (version.before(TransportVersions.V_8_0_0)) {
return new Response(
new QueryPage<>(
instance.getResources()
.results()
.stream()
.map(
stats -> new Response.TrainedModelStats(
stats.getModelId(),
null,
new IngestStats(
stats.getIngestStats().totalStats(),
stats.getIngestStats()
.pipelineStats()
.stream()
.map(
pipelineStat -> new IngestStats.PipelineStat(
ProjectId.DEFAULT,
pipelineStat.pipelineId(),
pipelineStat.stats(),
IngestStats.ByteStats.IDENTITY
)
)
.toList(),
stats.getIngestStats().processorStats()
),
stats.getPipelineCount(),
stats.getInferenceStats(),
null
)
)
.toList(),
instance.getResources().count(),
RESULTS_FIELD
)
);
} else if (version.before(TransportVersions.V_8_1_0)) {
return new Response(
new QueryPage<>(
instance.getResources()
.results()
.stream()
.map(
stats -> new Response.TrainedModelStats(
stats.getModelId(),
stats.getModelSizeStats(),
new IngestStats(
stats.getIngestStats().totalStats(),
stats.getIngestStats()
.pipelineStats()
.stream()
.map(
pipelineStat -> new IngestStats.PipelineStat(
ProjectId.DEFAULT,
pipelineStat.pipelineId(),
pipelineStat.stats(),
IngestStats.ByteStats.IDENTITY
)
)
.toList(),
stats.getIngestStats().processorStats()
),
stats.getPipelineCount(),
stats.getInferenceStats(),
stats.getDeploymentStats() == null
? null
: new AssignmentStats(
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getThreadsPerAllocation(),
stats.getDeploymentStats().getNumberOfAllocations(),
null,
stats.getDeploymentStats().getQueueCapacity(),
null,
stats.getDeploymentStats().getStartTime(),
stats.getDeploymentStats()
.getNodeStats()
.stream()
.map(
nodeStats -> new AssignmentStats.NodeStats(
nodeStats.getNode(),
nodeStats.getInferenceCount().orElse(null),
nodeStats.getAvgInferenceTime().orElse(null),
null,
nodeStats.getLastAccess(),
nodeStats.getPendingCount(),
0,
null,
0,
0,
nodeStats.getRoutingState(),
nodeStats.getStartTime(),
null,
null,
0L,
0L,
null,
null
)
)
.toList(),
Priority.NORMAL
)
)
)
.toList(),
instance.getResources().count(),
RESULTS_FIELD
)
);
} else if (version.before(TransportVersions.V_8_2_0)) {
return new Response(
new QueryPage<>(
instance.getResources()
.results()
.stream()
.map(
stats -> new Response.TrainedModelStats(
stats.getModelId(),
stats.getModelSizeStats(),
new IngestStats(
stats.getIngestStats().totalStats(),
stats.getIngestStats()
.pipelineStats()
.stream()
.map(
pipelineStat -> new IngestStats.PipelineStat(
ProjectId.DEFAULT,
pipelineStat.pipelineId(),
pipelineStat.stats(),
IngestStats.ByteStats.IDENTITY
)
)
.toList(),
stats.getIngestStats().processorStats()
),
stats.getPipelineCount(),
stats.getInferenceStats(),
stats.getDeploymentStats() == null
? null
: new AssignmentStats(
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getThreadsPerAllocation(),
stats.getDeploymentStats().getNumberOfAllocations(),
null,
stats.getDeploymentStats().getQueueCapacity(),
null,
stats.getDeploymentStats().getStartTime(),
stats.getDeploymentStats()
.getNodeStats()
.stream()
.map(
nodeStats -> new AssignmentStats.NodeStats(
nodeStats.getNode(),
nodeStats.getInferenceCount().orElse(null),
nodeStats.getAvgInferenceTime().orElse(null),
null,
nodeStats.getLastAccess(),
nodeStats.getPendingCount(),
nodeStats.getErrorCount(),
null,
nodeStats.getRejectedExecutionCount(),
nodeStats.getTimeoutCount(),
nodeStats.getRoutingState(),
nodeStats.getStartTime(),
nodeStats.getThreadsPerAllocation(),
nodeStats.getNumberOfAllocations(),
0L,
0L,
null,
null
)
)
.toList(),
Priority.NORMAL
)
)
)
.toList(),
instance.getResources().count(),
RESULTS_FIELD
)
);
} else if (version.before(TransportVersions.V_8_4_0)) {
return new Response(
new QueryPage<>(
instance.getResources()
.results()
.stream()
.map(
stats -> new Response.TrainedModelStats(
stats.getModelId(),
stats.getModelSizeStats(),
new IngestStats(
stats.getIngestStats().totalStats(),
stats.getIngestStats()
.pipelineStats()
.stream()
.map(
pipelineStat -> new IngestStats.PipelineStat(
ProjectId.DEFAULT,
pipelineStat.pipelineId(),
pipelineStat.stats(),
IngestStats.ByteStats.IDENTITY
)
)
.toList(),
stats.getIngestStats().processorStats()
),
stats.getPipelineCount(),
stats.getInferenceStats(),
stats.getDeploymentStats() == null
? null
: new AssignmentStats(
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getThreadsPerAllocation(),
stats.getDeploymentStats().getNumberOfAllocations(),
null,
stats.getDeploymentStats().getQueueCapacity(),
null,
stats.getDeploymentStats().getStartTime(),
stats.getDeploymentStats()
.getNodeStats()
.stream()
.map(
nodeStats -> new AssignmentStats.NodeStats(
nodeStats.getNode(),
nodeStats.getInferenceCount().orElse(null),
nodeStats.getAvgInferenceTime().orElse(null),
null,
nodeStats.getLastAccess(),
nodeStats.getPendingCount(),
nodeStats.getErrorCount(),
null,
nodeStats.getRejectedExecutionCount(),
nodeStats.getTimeoutCount(),
nodeStats.getRoutingState(),
nodeStats.getStartTime(),
nodeStats.getThreadsPerAllocation(),
nodeStats.getNumberOfAllocations(),
nodeStats.getPeakThroughput(),
nodeStats.getThroughputLastPeriod(),
nodeStats.getAvgInferenceTimeLastPeriod(),
null
)
)
.toList(),
Priority.NORMAL
)
)
)
.toList(),
instance.getResources().count(),
RESULTS_FIELD
)
);
} else if (version.before(TransportVersions.V_8_5_0)) {
return new Response(
new QueryPage<>(
instance.getResources()
.results()
.stream()
.map(
stats -> new Response.TrainedModelStats(
stats.getModelId(),
stats.getModelSizeStats(),
new IngestStats(
stats.getIngestStats().totalStats(),
stats.getIngestStats()
.pipelineStats()
.stream()
.map(
pipelineStat -> new IngestStats.PipelineStat(
ProjectId.DEFAULT,
pipelineStat.pipelineId(),
pipelineStat.stats(),
IngestStats.ByteStats.IDENTITY
)
)
.toList(),
stats.getIngestStats().processorStats()
),
stats.getPipelineCount(),
stats.getInferenceStats(),
stats.getDeploymentStats() == null
? null
: new AssignmentStats(
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getThreadsPerAllocation(),
stats.getDeploymentStats().getNumberOfAllocations(),
null,
stats.getDeploymentStats().getQueueCapacity(),
stats.getDeploymentStats().getCacheSize(),
stats.getDeploymentStats().getStartTime(),
stats.getDeploymentStats()
.getNodeStats()
.stream()
.map(
nodeStats -> new AssignmentStats.NodeStats(
nodeStats.getNode(),
nodeStats.getInferenceCount().orElse(null),
nodeStats.getAvgInferenceTime().orElse(null),
null,
nodeStats.getLastAccess(),
nodeStats.getPendingCount(),
nodeStats.getErrorCount(),
nodeStats.getCacheHitCount().orElse(null),
nodeStats.getRejectedExecutionCount(),
nodeStats.getTimeoutCount(),
nodeStats.getRoutingState(),
nodeStats.getStartTime(),
nodeStats.getThreadsPerAllocation(),
nodeStats.getNumberOfAllocations(),
nodeStats.getPeakThroughput(),
nodeStats.getThroughputLastPeriod(),
nodeStats.getAvgInferenceTimeLastPeriod(),
nodeStats.getCacheHitCountLastPeriod().orElse(null)
)
)
.toList(),
Priority.NORMAL
)
)
)
.toList(),
instance.getResources().count(),
RESULTS_FIELD
)
);
} else if (version.before(TransportVersions.V_8_6_0)) {
// priority added
return new Response(
new QueryPage<>(
instance.getResources()
.results()
.stream()
.map(
stats -> new Response.TrainedModelStats(
stats.getModelId(),
stats.getModelSizeStats(),
new IngestStats(
stats.getIngestStats().totalStats(),
stats.getIngestStats()
.pipelineStats()
.stream()
.map(
pipelineStat -> new IngestStats.PipelineStat(
ProjectId.DEFAULT,
pipelineStat.pipelineId(),
pipelineStat.stats(),
IngestStats.ByteStats.IDENTITY
)
)
.toList(),
stats.getIngestStats().processorStats()
),
stats.getPipelineCount(),
stats.getInferenceStats(),
stats.getDeploymentStats() == null
? null
: new AssignmentStats(
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getThreadsPerAllocation(),
stats.getDeploymentStats().getNumberOfAllocations(),
null,
stats.getDeploymentStats().getQueueCapacity(),
stats.getDeploymentStats().getCacheSize(),
stats.getDeploymentStats().getStartTime(),
stats.getDeploymentStats()
.getNodeStats()
.stream()
.map(
nodeStats -> new AssignmentStats.NodeStats(
nodeStats.getNode(),
nodeStats.getInferenceCount().orElse(null),
nodeStats.getAvgInferenceTime().orElse(null),
nodeStats.getAvgInferenceTimeExcludingCacheHit().orElse(null),
nodeStats.getLastAccess(),
nodeStats.getPendingCount(),
nodeStats.getErrorCount(),
nodeStats.getCacheHitCount().orElse(null),
nodeStats.getRejectedExecutionCount(),
nodeStats.getTimeoutCount(),
nodeStats.getRoutingState(),
nodeStats.getStartTime(),
nodeStats.getThreadsPerAllocation(),
nodeStats.getNumberOfAllocations(),
nodeStats.getPeakThroughput(),
nodeStats.getThroughputLastPeriod(),
nodeStats.getAvgInferenceTimeLastPeriod(),
nodeStats.getCacheHitCountLastPeriod().orElse(null)
)
)
.toList(),
Priority.NORMAL
)
)
)
.toList(),
instance.getResources().count(),
RESULTS_FIELD
)
);
} else if (version.before(TransportVersions.V_8_8_0)) {
// deployment_id added
return new Response(
new QueryPage<>(
instance.getResources()
.results()
.stream()
.map(
stats -> new Response.TrainedModelStats(
stats.getModelId(),
stats.getModelSizeStats(),
new IngestStats(
stats.getIngestStats().totalStats(),
stats.getIngestStats()
.pipelineStats()
.stream()
.map(
pipelineStat -> new IngestStats.PipelineStat(
ProjectId.DEFAULT,
pipelineStat.pipelineId(),
pipelineStat.stats(),
IngestStats.ByteStats.IDENTITY
)
)
.toList(),
stats.getIngestStats().processorStats()
),
stats.getPipelineCount(),
stats.getInferenceStats(),
stats.getDeploymentStats() == null
? null
: new AssignmentStats(
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getThreadsPerAllocation(),
stats.getDeploymentStats().getNumberOfAllocations(),
null,
stats.getDeploymentStats().getQueueCapacity(),
stats.getDeploymentStats().getCacheSize(),
stats.getDeploymentStats().getStartTime(),
stats.getDeploymentStats()
.getNodeStats()
.stream()
.map(
nodeStats -> new AssignmentStats.NodeStats(
nodeStats.getNode(),
nodeStats.getInferenceCount().orElse(null),
nodeStats.getAvgInferenceTime().orElse(null),
nodeStats.getAvgInferenceTimeExcludingCacheHit().orElse(null),
nodeStats.getLastAccess(),
nodeStats.getPendingCount(),
nodeStats.getErrorCount(),
nodeStats.getCacheHitCount().orElse(null),
nodeStats.getRejectedExecutionCount(),
nodeStats.getTimeoutCount(),
nodeStats.getRoutingState(),
nodeStats.getStartTime(),
nodeStats.getThreadsPerAllocation(),
nodeStats.getNumberOfAllocations(),
nodeStats.getPeakThroughput(),
nodeStats.getThroughputLastPeriod(),
nodeStats.getAvgInferenceTimeLastPeriod(),
nodeStats.getCacheHitCountLastPeriod().orElse(null)
)
)
.toList(),
stats.getDeploymentStats().getPriority()
)
)
)
.toList(),
instance.getResources().count(),
RESULTS_FIELD
)
);
} else if (version.before(TransportVersions.V_8_15_0)) {
// added ByteStats to IngestStats.PipelineStat
return new Response(
new QueryPage<>(
instance.getResources()
.results()
.stream()
.map(
stats -> new Response.TrainedModelStats(
stats.getModelId(),
stats.getModelSizeStats(),
new IngestStats(
stats.getIngestStats().totalStats(),
stats.getIngestStats()
.pipelineStats()
.stream()
.map(
pipelineStat -> new IngestStats.PipelineStat(
ProjectId.DEFAULT,
pipelineStat.pipelineId(),
pipelineStat.stats(),
IngestStats.ByteStats.IDENTITY
)
)
.toList(),
stats.getIngestStats().processorStats()
),
stats.getPipelineCount(),
stats.getInferenceStats(),
stats.getDeploymentStats() == null
? null
: new AssignmentStats(
stats.getDeploymentStats().getDeploymentId(),
stats.getDeploymentStats().getModelId(),
stats.getDeploymentStats().getThreadsPerAllocation(),
stats.getDeploymentStats().getNumberOfAllocations(),
null,
stats.getDeploymentStats().getQueueCapacity(),
stats.getDeploymentStats().getCacheSize(),
stats.getDeploymentStats().getStartTime(),
stats.getDeploymentStats()
.getNodeStats()
.stream()
.map(
nodeStats -> new AssignmentStats.NodeStats(
nodeStats.getNode(),
nodeStats.getInferenceCount().orElse(null),
nodeStats.getAvgInferenceTime().orElse(null),
nodeStats.getAvgInferenceTimeExcludingCacheHit().orElse(null),
nodeStats.getLastAccess(),
nodeStats.getPendingCount(),
nodeStats.getErrorCount(),
nodeStats.getCacheHitCount().orElse(null),
nodeStats.getRejectedExecutionCount(),
nodeStats.getTimeoutCount(),
nodeStats.getRoutingState(),
nodeStats.getStartTime(),
nodeStats.getThreadsPerAllocation(),
nodeStats.getNumberOfAllocations(),
nodeStats.getPeakThroughput(),
nodeStats.getThroughputLastPeriod(),
nodeStats.getAvgInferenceTimeLastPeriod(),
nodeStats.getCacheHitCountLastPeriod().orElse(null)
)
)
.toList(),
stats.getDeploymentStats().getPriority()
)
)
)
.toList(),
instance.getResources().count(),
RESULTS_FIELD
)
);
}
return instance;
}
}
| GetTrainedModelsStatsActionResponseTests |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java | {
"start": 662,
"end": 1340
} | class ____ extends AStatement {
private final AExpression valueNode;
public SReturn(int identifier, Location location, AExpression valueNode) {
super(identifier, location);
this.valueNode = valueNode;
}
public AExpression getValueNode() {
return valueNode;
}
@Override
public <Scope> void visit(UserTreeVisitor<Scope> userTreeVisitor, Scope scope) {
userTreeVisitor.visitReturn(this, scope);
}
@Override
public <Scope> void visitChildren(UserTreeVisitor<Scope> userTreeVisitor, Scope scope) {
if (valueNode != null) {
valueNode.visit(userTreeVisitor, scope);
}
}
}
| SReturn |
java | spring-projects__spring-boot | configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationprocessor/fieldvalues/AbstractFieldValuesProcessorTests.java | {
"start": 1484,
"end": 1601
} | class ____ {@link FieldValuesParser} tests.
*
* @author Phillip Webb
* @author Stephane Nicoll
*/
public abstract | for |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldBePrimitive.java | {
"start": 880,
"end": 1382
} | class ____ extends BasicErrorMessageFactory {
/**
* Creates a new instance of <code>{@link ShouldBePrimitive }</code>.
*
* @param actual the actual value in the failed assertion.
* @return the created {@code ErrorMessageFactory}.
*/
public static BasicErrorMessageFactory shouldBePrimitive(Class<?> actual) {
return new ShouldBePrimitive(actual);
}
private ShouldBePrimitive(Class<?> actual) {
super("%nExpecting %s to be a primitive type", actual);
}
}
| ShouldBePrimitive |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/io/network/api/serialization/EventSerializerTest.java | {
"start": 2641,
"end": 9763
} | class ____ {
private final AbstractEvent[] events = {
EndOfPartitionEvent.INSTANCE,
EndOfSuperstepEvent.INSTANCE,
new EndOfData(StopMode.DRAIN),
new EndOfData(StopMode.NO_DRAIN),
new CheckpointBarrier(
1678L,
4623784L,
new CheckpointOptions(
CheckpointType.CHECKPOINT,
CheckpointStorageLocationReference.getDefault())),
new CheckpointBarrier(
1678L,
4623784L,
new CheckpointOptions(
CheckpointType.FULL_CHECKPOINT,
CheckpointStorageLocationReference.getDefault())),
new CheckpointBarrier(
1678L,
4623784L,
new CheckpointOptions(
SavepointType.savepoint(SavepointFormatType.CANONICAL),
CheckpointStorageLocationReference.getDefault())),
new CheckpointBarrier(
1678L,
4623784L,
new CheckpointOptions(
SavepointType.suspend(SavepointFormatType.CANONICAL),
CheckpointStorageLocationReference.getDefault())),
new CheckpointBarrier(
1678L,
4623784L,
new CheckpointOptions(
SavepointType.terminate(SavepointFormatType.CANONICAL),
CheckpointStorageLocationReference.getDefault())),
new CheckpointBarrier(
1678L,
4623784L,
new CheckpointOptions(
SavepointType.savepoint(SavepointFormatType.NATIVE),
CheckpointStorageLocationReference.getDefault())),
new CheckpointBarrier(
1678L,
4623784L,
new CheckpointOptions(
SavepointType.suspend(SavepointFormatType.NATIVE),
CheckpointStorageLocationReference.getDefault())),
new CheckpointBarrier(
1678L,
4623784L,
new CheckpointOptions(
SavepointType.terminate(SavepointFormatType.NATIVE),
CheckpointStorageLocationReference.getDefault())),
new TestTaskEvent(Math.random(), 12361231273L),
new CancelCheckpointMarker(287087987329842L),
new EventAnnouncement(
new CheckpointBarrier(
42L,
1337L,
CheckpointOptions.alignedWithTimeout(
CheckpointType.CHECKPOINT,
CheckpointStorageLocationReference.getDefault(),
10)),
44),
new SubtaskConnectionDescriptor(23, 42),
EndOfSegmentEvent.INSTANCE,
new RecoveryMetadata(3),
new WatermarkEvent(new LongWatermark(42L, "test"), false),
new WatermarkEvent(new BoolWatermark(true, "test"), true),
new WatermarkEvent(new BoolWatermark(true, "test"), true),
EndOfInputChannelStateEvent.INSTANCE,
EndOfOutputChannelStateEvent.INSTANCE,
};
@Test
void testSerializeDeserializeEvent() throws Exception {
for (AbstractEvent evt : events) {
ByteBuffer serializedEvent = EventSerializer.toSerializedEvent(evt);
assertThat(serializedEvent.hasRemaining()).isTrue();
AbstractEvent deserialized =
EventSerializer.fromSerializedEvent(
serializedEvent, getClass().getClassLoader());
assertThat(deserialized).isNotNull().isEqualTo(evt);
}
}
@Test
void testToBufferConsumer() throws IOException {
for (AbstractEvent evt : events) {
BufferConsumer bufferConsumer = EventSerializer.toBufferConsumer(evt, false);
assertThat(bufferConsumer.isBuffer()).isFalse();
assertThat(bufferConsumer.isFinished()).isTrue();
assertThat(bufferConsumer.isDataAvailable()).isTrue();
assertThat(bufferConsumer.isRecycled()).isFalse();
if (evt instanceof CheckpointBarrier) {
assertThat(bufferConsumer.build().getDataType().isBlockingUpstream()).isTrue();
} else if (evt instanceof EndOfData) {
assertThat(bufferConsumer.build().getDataType())
.isEqualTo(Buffer.DataType.END_OF_DATA);
} else if (evt instanceof EndOfPartitionEvent) {
assertThat(bufferConsumer.build().getDataType())
.isEqualTo(Buffer.DataType.END_OF_PARTITION);
} else if (evt instanceof WatermarkEvent) {
if (((WatermarkEvent) evt).isAligned()) {
assertThat(bufferConsumer.build().getDataType())
.isEqualTo(Buffer.DataType.ALIGNED_WATERMARK_EVENT);
} else {
assertThat(bufferConsumer.build().getDataType())
.isEqualTo(Buffer.DataType.UNALIGNED_WATERMARK_EVENT);
}
} else if (evt instanceof EndOfOutputChannelStateEvent) {
assertThat(bufferConsumer.build().getDataType())
.isEqualTo(Buffer.DataType.RECOVERY_COMPLETION);
} else {
assertThat(bufferConsumer.build().getDataType())
.isEqualTo(Buffer.DataType.EVENT_BUFFER);
}
}
}
@Test
void testToBuffer() throws IOException {
for (AbstractEvent evt : events) {
Buffer buffer = EventSerializer.toBuffer(evt, false);
assertThat(buffer.isBuffer()).isFalse();
assertThat(buffer.readableBytes()).isGreaterThan(0);
assertThat(buffer.isRecycled()).isFalse();
if (evt instanceof CheckpointBarrier) {
assertThat(buffer.getDataType().isBlockingUpstream()).isTrue();
} else if (evt instanceof EndOfData) {
assertThat(buffer.getDataType()).isEqualTo(Buffer.DataType.END_OF_DATA);
} else if (evt instanceof EndOfPartitionEvent) {
assertThat(buffer.getDataType()).isEqualTo(Buffer.DataType.END_OF_PARTITION);
} else if (evt instanceof WatermarkEvent) {
if (((WatermarkEvent) evt).isAligned()) {
assertThat(buffer.getDataType())
.isEqualTo(Buffer.DataType.ALIGNED_WATERMARK_EVENT);
} else {
assertThat(buffer.getDataType())
.isEqualTo(Buffer.DataType.UNALIGNED_WATERMARK_EVENT);
}
} else if (evt instanceof EndOfOutputChannelStateEvent) {
assertThat(buffer.getDataType()).isEqualTo(Buffer.DataType.RECOVERY_COMPLETION);
} else {
assertThat(buffer.getDataType()).isEqualTo(Buffer.DataType.EVENT_BUFFER);
}
}
}
}
| EventSerializerTest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/operators/testutils/RandomIntPairGenerator.java | {
"start": 1015,
"end": 2065
} | class ____ implements MutableObjectIterator<IntPair> {
private final long seed;
private final long numRecords;
private Random rnd;
private long count;
public RandomIntPairGenerator(long seed) {
this(seed, Long.MAX_VALUE);
}
public RandomIntPairGenerator(long seed, long numRecords) {
this.seed = seed;
this.numRecords = numRecords;
this.rnd = new Random(seed);
}
@Override
public IntPair next(IntPair reuse) {
if (this.count++ < this.numRecords) {
reuse.setKey(this.rnd.nextInt());
reuse.setValue(this.rnd.nextInt());
return reuse;
} else {
return null;
}
}
@Override
public IntPair next() {
if (this.count++ < this.numRecords) {
return new IntPair(this.rnd.nextInt(), this.rnd.nextInt());
} else {
return null;
}
}
public void reset() {
this.rnd = new Random(this.seed);
this.count = 0;
}
}
| RandomIntPairGenerator |
java | quarkusio__quarkus | integration-tests/spring-data-rest/src/test/java/io/quarkus/it/spring/data/rest/SpringDataRestIT.java | {
"start": 119,
"end": 173
} | class ____ extends SpringDataRestTest {
}
| SpringDataRestIT |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/lifecycle/internal/ReactorBuildStatus.java | {
"start": 1217,
"end": 2383
} | class ____ {
private final ProjectDependencyGraph projectDependencyGraph;
private final Collection<String> blackListedProjects = Collections.synchronizedSet(new HashSet<>());
private volatile boolean halted = false;
public ReactorBuildStatus(ProjectDependencyGraph projectDependencyGraph) {
this.projectDependencyGraph = projectDependencyGraph;
}
public boolean isBlackListed(MavenProject project) {
return blackListedProjects.contains(BuilderCommon.getKey(project));
}
public void blackList(MavenProject project) {
if (blackListedProjects.add(BuilderCommon.getKey(project)) && projectDependencyGraph != null) {
for (MavenProject downstreamProject : projectDependencyGraph.getDownstreamProjects(project, true)) {
blackListedProjects.add(BuilderCommon.getKey(downstreamProject));
}
}
}
public void halt() {
halted = true;
}
public boolean isHalted() {
return halted;
}
public boolean isHaltedOrBlacklisted(MavenProject mavenProject) {
return isBlackListed(mavenProject) || isHalted();
}
}
| ReactorBuildStatus |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit4/SpringJUnit4ClassRunnerTests.java | {
"start": 2978,
"end": 3087
} | interface ____ {
@AliasFor(annotation = Timed.class)
long millis() default 1000;
}
}
| MetaTimedWithOverride |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/llama/completion/LlamaChatCompletionModel.java | {
"start": 1155,
"end": 5467
} | class ____ extends LlamaModel {
/**
* Constructor for creating a LlamaChatCompletionModel with specified parameters.
* @param inferenceEntityId the unique identifier for the inference entity
* @param taskType the type of task this model is designed for
* @param service the name of the inference service
* @param serviceSettings the settings for the inference service, specific to chat completion
* @param secrets the secret settings for the model, such as API keys or tokens
* @param context the context for parsing configuration settings
*/
public LlamaChatCompletionModel(
String inferenceEntityId,
TaskType taskType,
String service,
Map<String, Object> serviceSettings,
Map<String, Object> secrets,
ConfigurationParseContext context
) {
this(
inferenceEntityId,
taskType,
service,
LlamaChatCompletionServiceSettings.fromMap(serviceSettings, context),
retrieveSecretSettings(secrets)
);
}
/**
* Constructor for creating a LlamaChatCompletionModel with specified parameters.
* @param inferenceEntityId the unique identifier for the inference entity
* @param taskType the type of task this model is designed for
* @param service the name of the inference service
* @param serviceSettings the settings for the inference service, specific to chat completion
* @param secrets the secret settings for the model, such as API keys or tokens
*/
public LlamaChatCompletionModel(
String inferenceEntityId,
TaskType taskType,
String service,
LlamaChatCompletionServiceSettings serviceSettings,
SecretSettings secrets
) {
super(
new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, EmptyTaskSettings.INSTANCE),
new ModelSecrets(secrets)
);
setPropertiesFromServiceSettings(serviceSettings);
}
/**
* Factory method to create a LlamaChatCompletionModel with overridden model settings based on the request.
* If the request does not specify a model, the original model is returned.
*
* @param model the original LlamaChatCompletionModel
* @param request the UnifiedCompletionRequest containing potential overrides
* @return a new LlamaChatCompletionModel with overridden settings or the original model if no overrides are specified
*/
public static LlamaChatCompletionModel of(LlamaChatCompletionModel model, UnifiedCompletionRequest request) {
if (request.model() == null) {
// If no model id is specified in the request, return the original model
return model;
}
var originalModelServiceSettings = model.getServiceSettings();
var overriddenServiceSettings = new LlamaChatCompletionServiceSettings(
request.model(),
originalModelServiceSettings.uri(),
originalModelServiceSettings.rateLimitSettings()
);
return new LlamaChatCompletionModel(
model.getInferenceEntityId(),
model.getTaskType(),
model.getConfigurations().getService(),
overriddenServiceSettings,
model.getSecretSettings()
);
}
private void setPropertiesFromServiceSettings(LlamaChatCompletionServiceSettings serviceSettings) {
this.uri = serviceSettings.uri();
this.rateLimitSettings = serviceSettings.rateLimitSettings();
}
/**
* Returns the service settings specific to Llama chat completion.
*
* @return the LlamaChatCompletionServiceSettings associated with this model
*/
@Override
public LlamaChatCompletionServiceSettings getServiceSettings() {
return (LlamaChatCompletionServiceSettings) super.getServiceSettings();
}
/**
* Accepts a visitor that creates an executable action for this Llama chat completion model.
*
* @param creator the visitor that creates the executable action
* @return an ExecutableAction representing this model
*/
@Override
public ExecutableAction accept(LlamaActionVisitor creator) {
return creator.create(this);
}
}
| LlamaChatCompletionModel |
java | elastic__elasticsearch | modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java | {
"start": 6002,
"end": 75084
} | class ____ implements ClusterStateListener, Closeable, SchedulerEngine.Listener {
public static final String DATA_STREAM_LIFECYCLE_POLL_INTERVAL = "data_streams.lifecycle.poll_interval";
public static final Setting<TimeValue> DATA_STREAM_LIFECYCLE_POLL_INTERVAL_SETTING = Setting.timeSetting(
DATA_STREAM_LIFECYCLE_POLL_INTERVAL,
TimeValue.timeValueMinutes(5),
TimeValue.timeValueSeconds(1),
Setting.Property.Dynamic,
Setting.Property.NodeScope
);
public static final ByteSizeValue ONE_HUNDRED_MB = ByteSizeValue.ofMb(100);
public static final int TARGET_MERGE_FACTOR_VALUE = 16;
public static final Setting<Integer> DATA_STREAM_MERGE_POLICY_TARGET_FACTOR_SETTING = Setting.intSetting(
"data_streams.lifecycle.target.merge.policy.merge_factor",
TARGET_MERGE_FACTOR_VALUE,
2,
Setting.Property.Dynamic,
Setting.Property.NodeScope
);
public static final Setting<ByteSizeValue> DATA_STREAM_MERGE_POLICY_TARGET_FLOOR_SEGMENT_SETTING = Setting.byteSizeSetting(
"data_streams.lifecycle.target.merge.policy.floor_segment",
ONE_HUNDRED_MB,
Setting.Property.Dynamic,
Setting.Property.NodeScope
);
/**
* This setting controls how often we signal that an index is in the error state when it comes to its data stream lifecycle
* progression.
* The signalling is currently logging at the `error` level but in the future it can signify other types of signalling.
*/
public static final Setting<Integer> DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING = Setting.intSetting(
"data_streams.lifecycle.signalling.error_retry_interval",
10,
1,
Setting.Property.Dynamic,
Setting.Property.NodeScope
);
public static final String DOWNSAMPLED_INDEX_PREFIX = "downsample-";
private static final Logger logger = LogManager.getLogger(DataStreamLifecycleService.class);
/**
* Name constant for the job that schedules the data stream lifecycle
*/
private static final String LIFECYCLE_JOB_NAME = "data_stream_lifecycle";
/*
* This is the key for data stream lifecycle related custom index metadata.
*/
public static final String FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY = "force_merge_completed_timestamp";
private final Settings settings;
private final Client client;
private final ClusterService clusterService;
private final ThreadPool threadPool;
final ResultDeduplicator<Tuple<ProjectId, TransportRequest>, Void> transportActionsDeduplicator;
final ResultDeduplicator<Tuple<ProjectId, String>, Void> clusterStateChangesDeduplicator;
private final DataStreamLifecycleHealthInfoPublisher dslHealthInfoPublisher;
private final DataStreamGlobalRetentionSettings globalRetentionSettings;
private LongSupplier nowSupplier;
private final Clock clock;
private final DataStreamLifecycleErrorStore errorStore;
private volatile boolean isMaster = false;
private volatile TimeValue pollInterval;
private volatile RolloverConfiguration rolloverConfiguration;
private SchedulerEngine.Job scheduledJob;
private final SetOnce<SchedulerEngine> scheduler = new SetOnce<>();
private final MasterServiceTaskQueue<UpdateForceMergeCompleteTask> forceMergeClusterStateUpdateTaskQueue;
private final MasterServiceTaskQueue<DeleteSourceAndAddDownsampleToDS> swapSourceWithDownsampleIndexQueue;
private volatile ByteSizeValue targetMergePolicyFloorSegment;
private volatile int targetMergePolicyFactor;
/**
* The number of retries for a particular index and error after which DSL will emmit a signal (e.g. log statement)
*/
private volatile int signallingErrorRetryInterval;
/**
* The following stats are tracking how the data stream lifecycle runs are performing time wise
*/
private volatile Long lastRunStartedAt = null;
private volatile Long lastRunDuration = null;
private volatile Long timeBetweenStarts = null;
private static final SimpleBatchedExecutor<UpdateForceMergeCompleteTask, Void> FORCE_MERGE_STATE_UPDATE_TASK_EXECUTOR =
new SimpleBatchedExecutor<>() {
@Override
public Tuple<ClusterState, Void> executeTask(UpdateForceMergeCompleteTask task, ClusterState clusterState) throws Exception {
return Tuple.tuple(task.execute(clusterState), null);
}
@Override
public void taskSucceeded(UpdateForceMergeCompleteTask task, Void unused) {
logger.trace("Updated cluster state for force merge of index [{}]", task.targetIndex);
task.listener.onResponse(null);
}
};
public DataStreamLifecycleService(
Settings settings,
Client client,
ClusterService clusterService,
Clock clock,
ThreadPool threadPool,
LongSupplier nowSupplier,
DataStreamLifecycleErrorStore errorStore,
AllocationService allocationService,
DataStreamLifecycleHealthInfoPublisher dataStreamLifecycleHealthInfoPublisher,
DataStreamGlobalRetentionSettings globalRetentionSettings
) {
this.settings = settings;
this.client = client;
this.clusterService = clusterService;
this.clock = clock;
this.threadPool = threadPool;
this.transportActionsDeduplicator = new ResultDeduplicator<>(threadPool.getThreadContext());
this.clusterStateChangesDeduplicator = new ResultDeduplicator<>(threadPool.getThreadContext());
this.nowSupplier = nowSupplier;
this.errorStore = errorStore;
this.globalRetentionSettings = globalRetentionSettings;
this.scheduledJob = null;
this.pollInterval = DATA_STREAM_LIFECYCLE_POLL_INTERVAL_SETTING.get(settings);
this.targetMergePolicyFloorSegment = DATA_STREAM_MERGE_POLICY_TARGET_FLOOR_SEGMENT_SETTING.get(settings);
this.targetMergePolicyFactor = DATA_STREAM_MERGE_POLICY_TARGET_FACTOR_SETTING.get(settings);
this.signallingErrorRetryInterval = DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING.get(settings);
this.rolloverConfiguration = clusterService.getClusterSettings()
.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING);
this.forceMergeClusterStateUpdateTaskQueue = clusterService.createTaskQueue(
"data-stream-lifecycle-forcemerge-state-update",
Priority.LOW,
FORCE_MERGE_STATE_UPDATE_TASK_EXECUTOR
);
this.swapSourceWithDownsampleIndexQueue = clusterService.createTaskQueue(
"data-stream-lifecycle-swap-source-with-downsample",
Priority.URGENT, // urgent priority as this deletes indices
new DeleteSourceAndAddDownsampleIndexExecutor(allocationService)
);
this.dslHealthInfoPublisher = dataStreamLifecycleHealthInfoPublisher;
}
/**
* Initializer method to avoid the publication of a self reference in the constructor.
*/
public void init() {
clusterService.addListener(this);
clusterService.getClusterSettings()
.addSettingsUpdateConsumer(DATA_STREAM_LIFECYCLE_POLL_INTERVAL_SETTING, this::updatePollInterval);
clusterService.getClusterSettings()
.addSettingsUpdateConsumer(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING, this::updateRolloverConfiguration);
clusterService.getClusterSettings()
.addSettingsUpdateConsumer(DATA_STREAM_MERGE_POLICY_TARGET_FACTOR_SETTING, this::updateMergePolicyFactor);
clusterService.getClusterSettings()
.addSettingsUpdateConsumer(DATA_STREAM_MERGE_POLICY_TARGET_FLOOR_SEGMENT_SETTING, this::updateMergePolicyFloorSegment);
clusterService.getClusterSettings()
.addSettingsUpdateConsumer(DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING, this::updateSignallingRetryThreshold);
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
// wait for the cluster state to be recovered
if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) {
return;
}
final boolean prevIsMaster = this.isMaster;
if (prevIsMaster != event.localNodeMaster()) {
this.isMaster = event.localNodeMaster();
if (this.isMaster) {
// we weren't the master, and now we are
maybeScheduleJob();
} else {
// we were the master, and now we aren't
cancelJob();
// clear the deduplicator on master failover so we could re-send the requests in case we're re-elected
transportActionsDeduplicator.clear();
logger.trace("Clearing the error store as we are not the elected master anymore");
errorStore.clearStore();
}
}
}
@Override
public void close() {
SchedulerEngine engine = scheduler.get();
if (engine != null) {
engine.stop();
}
logger.trace("Clearing the error store as we are closing");
errorStore.clearStore();
}
@Override
public void triggered(SchedulerEngine.Event event) {
if (event.jobName().equals(LIFECYCLE_JOB_NAME)) {
if (this.isMaster) {
logger.trace(
"Data stream lifecycle job triggered: {}, {}, {}",
event.jobName(),
event.scheduledTime(),
event.triggeredTime()
);
run(clusterService.state());
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
@Override
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
assert acknowledgedResponse.isAcknowledged() : "updating the health info is always acknowledged";
}
@Override
public void onFailure(Exception e) {
logger.debug(
String.format(
Locale.ROOT,
"unable to update the health cache with DSL errors related information "
+ "due to [%s]. Will retry on the next DSL run",
e.getMessage()
),
e
);
}
});
}
}
}
/**
* Iterates over the data stream lifecycle managed data streams and executes the needed operations
* to satisfy the configured {@link DataStreamLifecycle}.
*/
// default visibility for testing purposes
void run(ClusterState state) {
long startTime = nowSupplier.getAsLong();
if (lastRunStartedAt != null) {
timeBetweenStarts = startTime - lastRunStartedAt;
}
lastRunStartedAt = startTime;
for (var projectId : state.metadata().projects().keySet()) {
// We catch inside the loop to avoid one broken project preventing DLM to run on other projects.
try {
run(state.projectState(projectId));
} catch (Exception e) {
logger.error(Strings.format("Data stream lifecycle failed to run on project [%s]", projectId), e);
}
}
}
private void run(ProjectState projectState) {
final var project = projectState.metadata();
int affectedIndices = 0;
int affectedDataStreams = 0;
for (DataStream dataStream : project.dataStreams().values()) {
clearErrorStoreForUnmanagedIndices(project, dataStream);
var dataLifecycleEnabled = dataStream.getDataLifecycle() != null && dataStream.getDataLifecycle().enabled();
var failureLifecycle = dataStream.getFailuresLifecycle();
var failuresLifecycleEnabled = failureLifecycle != null && failureLifecycle.enabled();
if (dataLifecycleEnabled == false && failuresLifecycleEnabled == false) {
continue;
}
// Retrieve the effective retention to ensure the same retention is used for this data stream
// through all operations.
var dataRetention = getEffectiveRetention(dataStream, globalRetentionSettings, false);
var failuresRetention = getEffectiveRetention(dataStream, globalRetentionSettings, true);
// the following indices should not be considered for the remainder of this service run, for various reasons.
Set<Index> indicesToExcludeForRemainingRun = new HashSet<>();
// These are the pre-rollover write indices. They may or may not be the write index after maybeExecuteRollover has executed,
// depending on rollover criteria, for this reason we exclude them for the remaining run.
indicesToExcludeForRemainingRun.add(maybeExecuteRollover(project, dataStream, dataRetention, false));
Index failureStoreWriteIndex = maybeExecuteRollover(project, dataStream, failuresRetention, true);
if (failureStoreWriteIndex != null) {
indicesToExcludeForRemainingRun.add(failureStoreWriteIndex);
}
// tsds indices that are still within their time bounds (i.e. now < time_series.end_time) - we don't want these indices to be
// deleted, forcemerged, or downsampled as they're still expected to receive large amounts of writes
indicesToExcludeForRemainingRun.addAll(
timeSeriesIndicesStillWithinTimeBounds(
project,
getTargetIndices(dataStream, indicesToExcludeForRemainingRun, project::index, false),
nowSupplier
)
);
try {
indicesToExcludeForRemainingRun.addAll(
maybeExecuteRetention(project, dataStream, dataRetention, failuresRetention, indicesToExcludeForRemainingRun)
);
} catch (Exception e) {
// individual index errors would be reported via the API action listener for every delete call
// we could potentially record errors at a data stream level and expose it via the _data_stream API?
logger.error(
() -> String.format(
Locale.ROOT,
"Data stream lifecycle failed to execute retention for data stream [%s]",
dataStream.getName()
),
e
);
}
try {
indicesToExcludeForRemainingRun.addAll(
maybeExecuteForceMerge(project, getTargetIndices(dataStream, indicesToExcludeForRemainingRun, project::index, true))
);
} catch (Exception e) {
logger.error(
() -> String.format(
Locale.ROOT,
"Data stream lifecycle failed to execute force merge for data stream [%s]",
dataStream.getName()
),
e
);
}
try {
indicesToExcludeForRemainingRun.addAll(
maybeExecuteDownsampling(
projectState,
dataStream,
getTargetIndices(dataStream, indicesToExcludeForRemainingRun, project::index, false)
)
);
} catch (Exception e) {
logger.error(
() -> String.format(
Locale.ROOT,
"Data stream lifecycle failed to execute downsampling for data stream [%s]",
dataStream.getName()
),
e
);
}
affectedIndices += indicesToExcludeForRemainingRun.size();
affectedDataStreams++;
}
lastRunDuration = nowSupplier.getAsLong() - lastRunStartedAt;
logger.trace(
"Data stream lifecycle service ran for {} and performed operations on [{}] indices, part of [{}] data streams, in project [{}]",
TimeValue.timeValueMillis(lastRunDuration).toHumanReadableString(2),
affectedIndices,
affectedDataStreams,
project.id()
);
}
// visible for testing
static Set<Index> timeSeriesIndicesStillWithinTimeBounds(ProjectMetadata project, List<Index> targetIndices, LongSupplier nowSupplier) {
Set<Index> tsIndicesWithinBounds = new HashSet<>();
for (Index index : targetIndices) {
IndexMetadata backingIndex = project.index(index);
assert backingIndex != null : "the data stream backing indices must exist";
if (IndexSettings.MODE.get(backingIndex.getSettings()) == IndexMode.TIME_SERIES) {
Instant configuredEndTime = IndexSettings.TIME_SERIES_END_TIME.get(backingIndex.getSettings());
assert configuredEndTime != null
: "a time series index must have an end time configured but [" + index.getName() + "] does not";
if (nowSupplier.getAsLong() <= configuredEndTime.toEpochMilli()) {
logger.trace(
"Data stream lifecycle will not perform any operations in this run on time series index [{}] because "
+ "its configured [{}] end time has not lapsed",
index.getName(),
configuredEndTime
);
tsIndicesWithinBounds.add(index);
}
}
}
return tsIndicesWithinBounds;
}
/**
* Data stream lifecycle supports configuring multiple rounds of downsampling for each managed index. When attempting to execute
* downsampling we iterate through the ordered rounds of downsampling that match an index (ordered ascending according to the `after`
* configuration) and try to figure out:
* - if we started downsampling for an earlier round and is in progress, in which case we need to wait for it to complete
* - if we started downsampling for an earlier round and it's finished but the downsampling index is not part of the data stream, in
* which case we need to replace the backing index with the downsampling index and delete the backing index
* - if we don't have any early rounds started or to add to the data stream, start downsampling the last matching round
*
* Note that the first time an index has a matching downsampling round we first mark it as read-only.
*
* Returns a set of indices that now have in-flight operations triggered by downsampling (it could be marking them as read-only,
* replacing an index in the data stream, deleting a source index, or downsampling itself) so these indices can be skipped in case
* there are other operations to be executed by the data stream lifecycle after downsampling.
*/
Set<Index> maybeExecuteDownsampling(ProjectState projectState, DataStream dataStream, List<Index> targetIndices) {
Set<Index> affectedIndices = new HashSet<>();
final var project = projectState.metadata();
for (Index index : targetIndices) {
IndexMetadata backingIndexMeta = project.index(index);
assert backingIndexMeta != null : "the data stream backing indices must exist";
List<DataStreamLifecycle.DownsamplingRound> downsamplingRounds = dataStream.getDownsamplingRoundsFor(
index,
project::index,
nowSupplier
);
if (downsamplingRounds.isEmpty()) {
continue;
}
String indexName = index.getName();
String downsamplingSourceIndex = IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.get(backingIndexMeta.getSettings());
// if the current index is not a downsample we want to mark the index as read-only before proceeding with downsampling
if (org.elasticsearch.common.Strings.hasText(downsamplingSourceIndex) == false
&& projectState.blocks().indexBlocked(project.id(), ClusterBlockLevel.WRITE, indexName) == false) {
affectedIndices.add(index);
addIndexBlockOnce(project.id(), indexName);
} else {
// we're not performing any operation for this index which means that it:
// - has matching downsample rounds
// - is read-only
// So let's wait for an in-progress downsampling operation to succeed or trigger the last matching round
var downsamplingMethod = dataStream.getDataLifecycle().downsamplingMethod();
affectedIndices.addAll(
waitForInProgressOrTriggerDownsampling(dataStream, backingIndexMeta, downsamplingRounds, downsamplingMethod, project)
);
}
}
return affectedIndices;
}
/**
* Iterate over the matching downsampling rounds for the backing index (if any) and either wait for an early round to complete,
* add an early completed downsampling round to the data stream, or otherwise trigger the last matching downsampling round.
*
* Returns the indices for which we triggered an action/operation.
*/
private Set<Index> waitForInProgressOrTriggerDownsampling(
DataStream dataStream,
IndexMetadata backingIndex,
List<DataStreamLifecycle.DownsamplingRound> downsamplingRounds,
DownsampleConfig.SamplingMethod downsamplingMethod,
ProjectMetadata project
) {
assert dataStream.getIndices().contains(backingIndex.getIndex())
: "the provided backing index must be part of data stream:" + dataStream.getName();
assert downsamplingRounds.isEmpty() == false : "the index should be managed and have matching downsampling rounds";
Set<Index> affectedIndices = new HashSet<>();
DataStreamLifecycle.DownsamplingRound lastRound = downsamplingRounds.get(downsamplingRounds.size() - 1);
Index index = backingIndex.getIndex();
String indexName = index.getName();
for (DataStreamLifecycle.DownsamplingRound round : downsamplingRounds) {
// the downsample index name for each round is deterministic
String downsampleIndexName = DownsampleConfig.generateDownsampleIndexName(
DOWNSAMPLED_INDEX_PREFIX,
backingIndex,
round.fixedInterval()
);
IndexMetadata targetDownsampleIndexMeta = project.index(downsampleIndexName);
boolean targetDownsampleIndexExists = targetDownsampleIndexMeta != null;
if (targetDownsampleIndexExists) {
Set<Index> downsamplingNotComplete = evaluateDownsampleStatus(
project.id(),
dataStream,
INDEX_DOWNSAMPLE_STATUS.get(targetDownsampleIndexMeta.getSettings()),
round,
lastRound,
downsamplingMethod,
backingIndex,
targetDownsampleIndexMeta.getIndex()
);
if (downsamplingNotComplete.isEmpty() == false) {
affectedIndices.addAll(downsamplingNotComplete);
break;
}
} else {
if (round.equals(lastRound)) {
// no maintenance needed for previously started downsampling actions and we are on the last matching round so it's time
// to kick off downsampling
affectedIndices.add(index);
downsampleIndexOnce(round, downsamplingMethod, project.id(), backingIndex, downsampleIndexName);
}
}
}
return affectedIndices;
}
/**
* Issues a request downsample the source index to the downsample index for the specified round.
*/
private void downsampleIndexOnce(
DataStreamLifecycle.DownsamplingRound round,
DownsampleConfig.SamplingMethod requestedDownsamplingMethod,
ProjectId projectId,
IndexMetadata sourceIndexMetadata,
String downsampleIndexName
) {
// When an index is already downsampled with a method, we require all later downsampling rounds to use the same method.
// This is necessary to preserve the relation of the downsampled index to the raw data. For example, if an index is already
// downsampled and downsampled it again to 1 hour; we know that a document represents either the aggregated raw data of an hour
// or the last value of the raw data within this hour. If we mix the methods, we cannot derive any meaning from them.
// Furthermore, data stream lifecycle is configured on the data stream level and not on the individual index level, meaning that
// when a user changes downsampling method, some indices would not be able to be downsampled anymore.
// For this reason, when we encounter an already downsampled index, we use the source downsampling method which might be different
// from the requested one.
var sourceIndexSamplingMethod = DownsampleConfig.SamplingMethod.fromIndexMetadata(sourceIndexMetadata);
String sourceIndex = sourceIndexMetadata.getIndex().getName();
DownsampleAction.Request request = new DownsampleAction.Request(
TimeValue.THIRTY_SECONDS /* TODO should this be longer/configurable? */,
sourceIndex,
downsampleIndexName,
null,
new DownsampleConfig(
round.fixedInterval(),
sourceIndexSamplingMethod == null ? requestedDownsamplingMethod : sourceIndexSamplingMethod
)
);
transportActionsDeduplicator.executeOnce(
Tuple.tuple(projectId, request),
new ErrorRecordingActionListener(
DownsampleAction.NAME,
projectId,
sourceIndex,
errorStore,
Strings.format(
"Data stream lifecycle encountered an error trying to downsample index [%s]. Data stream lifecycle will "
+ "attempt to downsample the index on its next run.",
sourceIndex
),
signallingErrorRetryInterval
),
(req, reqListener) -> downsampleIndex(projectId, request, reqListener)
);
}
/**
* Checks the status of the downsampling operations for the provided backing index and its corresponding downsample index.
* Depending on the status, we'll either error (if it's UNKNOWN and we've reached the last round), wait for it to complete (if it's
* STARTED), or replace the backing index with the downsample index in the data stream (if the status is SUCCESS).
*/
private Set<Index> evaluateDownsampleStatus(
ProjectId projectId,
DataStream dataStream,
IndexMetadata.DownsampleTaskStatus downsampleStatus,
DataStreamLifecycle.DownsamplingRound currentRound,
DataStreamLifecycle.DownsamplingRound lastRound,
DownsampleConfig.SamplingMethod downsamplingMethod,
IndexMetadata backingIndex,
Index downsampleIndex
) {
Set<Index> affectedIndices = new HashSet<>();
String indexName = backingIndex.getIndex().getName();
String downsampleIndexName = downsampleIndex.getName();
return switch (downsampleStatus) {
case UNKNOWN -> {
if (currentRound.equals(lastRound)) {
// target downsampling index exists and is not a downsampling index (name clash?)
// we fail now but perhaps we should just randomise the name?
recordAndLogError(
projectId,
indexName,
errorStore,
new ResourceAlreadyExistsException(downsampleIndexName),
String.format(
Locale.ROOT,
"Data stream lifecycle service is unable to downsample backing index [%s] for data "
+ "stream [%s] and donwsampling round [%s] because the target downsample index [%s] already exists",
indexName,
dataStream.getName(),
currentRound,
downsampleIndexName
),
signallingErrorRetryInterval
);
}
yield affectedIndices;
}
case STARTED -> {
// we'll wait for this round to complete
// TODO add support for cancelling a current in-progress operation if another, later, round matches
logger.trace(
"Data stream lifecycle service waits for index [{}] to be downsampled. Current status is [{}] and the "
+ "downsample index name is [{}]",
indexName,
STARTED,
downsampleIndexName
);
// this request here might seem weird, but hear me out:
// if we triggered a downsample operation, and then had a master failover (so DSL starts from scratch)
// we can't really find out if the downsampling persistent task failed (if it was successful, no worries, the next case
// SUCCESS branch will catch it and we will cruise forward)
// if the downsampling persistent task failed, we will find out only via re-issuing the downsample request (and we will
// continue to re-issue the request until we get SUCCESS)
// NOTE that the downsample request is made through the deduplicator so it will only really be executed if
// there isn't one already in-flight. This can happen if a previous request timed-out, failed, or there was a
// master failover and data stream lifecycle needed to restart
downsampleIndexOnce(currentRound, downsamplingMethod, projectId, backingIndex, downsampleIndexName);
affectedIndices.add(backingIndex.getIndex());
yield affectedIndices;
}
case SUCCESS -> {
if (dataStream.getIndices().contains(downsampleIndex) == false) {
// at this point the source index is part of the data stream and the downsample index is complete but not
// part of the data stream. we need to replace the source index with the downsample index in the data stream
affectedIndices.add(backingIndex.getIndex());
replaceBackingIndexWithDownsampleIndexOnce(projectId, dataStream, indexName, downsampleIndexName);
}
yield affectedIndices;
}
};
}
/**
* Issues a request to replace the backing index with the downsample index through the cluster state changes deduplicator.
*/
private void replaceBackingIndexWithDownsampleIndexOnce(
ProjectId projectId,
DataStream dataStream,
String backingIndexName,
String downsampleIndexName
) {
String requestName = "dsl-replace-" + dataStream.getName() + "-" + backingIndexName + "-" + downsampleIndexName;
clusterStateChangesDeduplicator.executeOnce(
// we use a String key here as otherwise it's ... awkward as we have to create the DeleteSourceAndAddDownsampleToDS as the
// key _without_ a listener (passing in null) and then below we create it again with the `reqListener`. We're using a String
// as it seems to be clearer.
Tuple.tuple(projectId, requestName),
new ErrorRecordingActionListener(
requestName,
projectId,
backingIndexName,
errorStore,
Strings.format(
"Data stream lifecycle encountered an error trying to replace index [%s] with index [%s] in data stream [%s]",
backingIndexName,
downsampleIndexName,
dataStream
),
signallingErrorRetryInterval
),
(req, reqListener) -> {
logger.trace(
"Data stream lifecycle issues request to replace index [{}] with index [{}] in data stream [{}]",
backingIndexName,
downsampleIndexName,
dataStream
);
swapSourceWithDownsampleIndexQueue.submitTask(
"data-stream-lifecycle-delete-source[" + backingIndexName + "]-add-to-datastream-[" + downsampleIndexName + "]",
new DeleteSourceAndAddDownsampleToDS(
settings,
projectId,
dataStream.getName(),
backingIndexName,
downsampleIndexName,
reqListener
),
null
);
}
);
}
/**
* Issues a request to delete the provided index through the transport action deduplicator.
*/
private void deleteIndexOnce(ProjectId projectId, String indexName, String reason) {
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName).masterNodeTimeout(TimeValue.MAX_VALUE);
transportActionsDeduplicator.executeOnce(
Tuple.tuple(projectId, deleteIndexRequest),
new ErrorRecordingActionListener(
TransportDeleteIndexAction.TYPE.name(),
projectId,
indexName,
errorStore,
Strings.format("Data stream lifecycle encountered an error trying to delete index [%s]", indexName),
signallingErrorRetryInterval
),
(req, reqListener) -> deleteIndex(projectId, deleteIndexRequest, reason, reqListener)
);
}
/**
* Issues a request to add a WRITE index block for the provided index through the transport action deduplicator.
*/
private void addIndexBlockOnce(ProjectId projectId, String indexName) {
AddIndexBlockRequest addIndexBlockRequest = new AddIndexBlockRequest(WRITE, indexName).masterNodeTimeout(TimeValue.MAX_VALUE);
transportActionsDeduplicator.executeOnce(
Tuple.tuple(projectId, addIndexBlockRequest),
new ErrorRecordingActionListener(
TransportAddIndexBlockAction.TYPE.name(),
projectId,
indexName,
errorStore,
Strings.format("Data stream lifecycle service encountered an error trying to mark index [%s] as readonly", indexName),
signallingErrorRetryInterval
),
(req, reqListener) -> addIndexBlock(projectId, addIndexBlockRequest, reqListener)
);
}
/**
* Returns the data stream lifecycle managed indices that are not part of the set of indices to exclude.
*/
// For testing
static List<Index> getTargetIndices(
DataStream dataStream,
Set<Index> indicesToExcludeForRemainingRun,
Function<String, IndexMetadata> indexMetadataSupplier,
boolean withFailureStore
) {
List<Index> targetIndices = new ArrayList<>();
for (Index index : dataStream.getIndices()) {
if (dataStream.isIndexManagedByDataStreamLifecycle(index, indexMetadataSupplier)
&& indicesToExcludeForRemainingRun.contains(index) == false) {
targetIndices.add(index);
}
}
if (withFailureStore && dataStream.getFailureIndices().isEmpty() == false) {
for (Index index : dataStream.getFailureIndices()) {
if (dataStream.isIndexManagedByDataStreamLifecycle(index, indexMetadataSupplier)
&& indicesToExcludeForRemainingRun.contains(index) == false) {
targetIndices.add(index);
}
}
}
return targetIndices;
}
/**
* This clears the error store for the case where a data stream or some backing indices were managed by data stream lifecycle, failed in
* their lifecycle execution, and then they were not managed by the data stream lifecycle (maybe they were switched to ILM).
*/
private void clearErrorStoreForUnmanagedIndices(ProjectMetadata project, DataStream dataStream) {
for (String indexName : errorStore.getAllIndices(project.id())) {
IndexAbstraction indexAbstraction = project.getIndicesLookup().get(indexName);
DataStream parentDataStream = indexAbstraction != null ? indexAbstraction.getParentDataStream() : null;
if (indexAbstraction == null || parentDataStream == null) {
logger.trace(
"Clearing recorded error for index [{}] because the index doesn't exist or is not a data stream backing index anymore",
indexName
);
errorStore.clearRecordedError(project.id(), indexName);
} else if (parentDataStream.getName().equals(dataStream.getName())) {
// we're only verifying the indices that pertain to this data stream
IndexMetadata indexMeta = project.index(indexName);
if (dataStream.isIndexManagedByDataStreamLifecycle(indexMeta.getIndex(), project::index) == false) {
logger.trace("Clearing recorded error for index [{}] because the index is not managed by DSL anymore", indexName);
errorStore.clearRecordedError(project.id(), indexName);
}
}
}
}
@Nullable
private Index maybeExecuteRollover(
ProjectMetadata project,
DataStream dataStream,
TimeValue effectiveRetention,
boolean rolloverFailureStore
) {
Index currentRunWriteIndex = rolloverFailureStore ? dataStream.getWriteFailureIndex() : dataStream.getWriteIndex();
if (currentRunWriteIndex == null) {
return null;
}
try {
if (dataStream.isIndexManagedByDataStreamLifecycle(currentRunWriteIndex, project::index)) {
DataStreamLifecycle lifecycle = rolloverFailureStore ? dataStream.getFailuresLifecycle() : dataStream.getDataLifecycle();
RolloverRequest rolloverRequest = getDefaultRolloverRequest(
rolloverConfiguration,
dataStream.getName(),
effectiveRetention,
rolloverFailureStore
);
transportActionsDeduplicator.executeOnce(
Tuple.tuple(project.id(), rolloverRequest),
new ErrorRecordingActionListener(
RolloverAction.NAME,
project.id(),
currentRunWriteIndex.getName(),
errorStore,
Strings.format(
"Data stream lifecycle encountered an error trying to roll over%s data stream [%s]",
rolloverFailureStore ? " the failure store of " : "",
dataStream.getName()
),
signallingErrorRetryInterval
),
(req, reqListener) -> rolloverDataStream(project.id(), currentRunWriteIndex.getName(), rolloverRequest, reqListener)
);
}
} catch (Exception e) {
logger.error(
() -> String.format(
Locale.ROOT,
"Data stream lifecycle encountered an error trying to roll over%s data stream [%s]",
rolloverFailureStore ? " the failure store of " : "",
dataStream.getName()
),
e
);
ProjectMetadata latestProject = clusterService.state().metadata().projects().get(project.id());
DataStream latestDataStream = latestProject == null ? null : latestProject.dataStreams().get(dataStream.getName());
if (latestDataStream != null) {
if (latestDataStream.getWriteIndex().getName().equals(currentRunWriteIndex.getName())) {
// data stream has not been rolled over in the meantime so record the error against the write index we
// attempted the rollover
errorStore.recordError(project.id(), currentRunWriteIndex.getName(), e);
}
}
}
return currentRunWriteIndex;
}
/**
* This method sends requests to delete any indices in the datastream that exceed its retention policy. It returns the set of indices
* it has sent delete requests for.
*
* @param project The project metadata from which to get index metadata
* @param dataStream The data stream
* @param indicesToExcludeForRemainingRun Indices to exclude from retention even if it would be time for them to be deleted
* @return The set of indices that delete requests have been sent for
*/
Set<Index> maybeExecuteRetention(
ProjectMetadata project,
DataStream dataStream,
TimeValue dataRetention,
TimeValue failureRetention,
Set<Index> indicesToExcludeForRemainingRun
) {
if (dataRetention == null && failureRetention == null) {
return Set.of();
}
List<Index> backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(
project::index,
nowSupplier,
dataRetention,
false
);
List<Index> failureIndicesOlderThanRetention = dataStream.getIndicesPastRetention(
project::index,
nowSupplier,
failureRetention,
true
);
if (backingIndicesOlderThanRetention.isEmpty() && failureIndicesOlderThanRetention.isEmpty()) {
return Set.of();
}
Set<Index> indicesToBeRemoved = new HashSet<>();
if (backingIndicesOlderThanRetention.isEmpty() == false) {
assert dataStream.getDataLifecycle() != null : "data stream should have data lifecycle if we have 'old' indices";
for (Index index : backingIndicesOlderThanRetention) {
if (indicesToExcludeForRemainingRun.contains(index) == false) {
IndexMetadata backingIndex = project.index(index);
assert backingIndex != null : "the data stream backing indices must exist";
IndexMetadata.DownsampleTaskStatus downsampleStatus = INDEX_DOWNSAMPLE_STATUS.get(backingIndex.getSettings());
// we don't want to delete the source index if they have an in-progress downsampling operation because the
// target downsample index will remain in the system as a standalone index
if (downsampleStatus == STARTED) {
// there's an opportunity here to cancel downsampling and delete the source index now
logger.trace(
"Data stream lifecycle skips deleting index [{}] even though its retention period [{}] has lapsed "
+ "because there's a downsampling operation currently in progress for this index. Current downsampling "
+ "status is [{}]. When downsampling completes, DSL will delete this index.",
index.getName(),
dataRetention,
downsampleStatus
);
} else {
// UNKNOWN is the default value, and has no real use. So index should be deleted
// SUCCESS meaning downsampling completed successfully and there is nothing in progress, so we can also delete
indicesToBeRemoved.add(index);
// there's an opportunity here to batch the delete requests (i.e. delete 100 indices / request)
// let's start simple and reevaluate
String indexName = backingIndex.getIndex().getName();
deleteIndexOnce(project.id(), indexName, "the lapsed [" + dataRetention + "] retention period");
}
}
}
}
if (failureIndicesOlderThanRetention.isEmpty() == false) {
assert dataStream.getFailuresLifecycle() != null : "data stream should have failures lifecycle if we have 'old' indices";
for (Index index : failureIndicesOlderThanRetention) {
if (indicesToExcludeForRemainingRun.contains(index) == false) {
IndexMetadata failureIndex = project.index(index);
assert failureIndex != null : "the data stream failure indices must exist";
indicesToBeRemoved.add(index);
// there's an opportunity here to batch the delete requests (i.e. delete 100 indices / request)
// let's start simple and reevaluate
String indexName = failureIndex.getIndex().getName();
deleteIndexOnce(project.id(), indexName, "the lapsed [" + failureRetention + "] retention period");
}
}
}
return indicesToBeRemoved;
}
/*
* This method force merges the given indices in the datastream. It writes a timestamp in the cluster state upon completion of the
* force merge.
*/
private Set<Index> maybeExecuteForceMerge(ProjectMetadata project, List<Index> indices) {
Set<Index> affectedIndices = new HashSet<>();
for (Index index : indices) {
IndexMetadata backingIndex = project.index(index);
assert backingIndex != null : "the data stream backing indices must exist";
String indexName = index.getName();
boolean alreadyForceMerged = isForceMergeComplete(backingIndex);
if (alreadyForceMerged) {
logger.trace("Already force merged {}", indexName);
continue;
}
ByteSizeValue configuredFloorSegmentMerge = MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.get(
backingIndex.getSettings()
);
Integer configuredMergeFactor = MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.get(backingIndex.getSettings());
if ((configuredFloorSegmentMerge == null || configuredFloorSegmentMerge.equals(targetMergePolicyFloorSegment) == false)
|| (configuredMergeFactor == null || configuredMergeFactor.equals(targetMergePolicyFactor) == false)) {
UpdateSettingsRequest updateMergePolicySettingsRequest = new UpdateSettingsRequest();
updateMergePolicySettingsRequest.indices(indexName);
updateMergePolicySettingsRequest.settings(
Settings.builder()
.put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), targetMergePolicyFloorSegment)
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), targetMergePolicyFactor)
);
updateMergePolicySettingsRequest.masterNodeTimeout(TimeValue.MAX_VALUE);
affectedIndices.add(index);
transportActionsDeduplicator.executeOnce(
Tuple.tuple(project.id(), updateMergePolicySettingsRequest),
new ErrorRecordingActionListener(
TransportUpdateSettingsAction.TYPE.name(),
project.id(),
indexName,
errorStore,
Strings.format(
"Data stream lifecycle encountered an error trying to to update settings [%s] for index [%s]",
updateMergePolicySettingsRequest.settings().keySet(),
indexName
),
signallingErrorRetryInterval
),
(req, reqListener) -> updateIndexSetting(project.id(), updateMergePolicySettingsRequest, reqListener)
);
} else {
affectedIndices.add(index);
ForceMergeRequest forceMergeRequest = new ForceMergeRequest(indexName);
// time to force merge the index
transportActionsDeduplicator.executeOnce(
Tuple.tuple(project.id(), new ForceMergeRequestWrapper(forceMergeRequest)),
new ErrorRecordingActionListener(
ForceMergeAction.NAME,
project.id(),
indexName,
errorStore,
Strings.format(
"Data stream lifecycle encountered an error trying to force merge index [%s]. Data stream lifecycle will "
+ "attempt to force merge the index on its next run.",
indexName
),
signallingErrorRetryInterval
),
(req, reqListener) -> forceMergeIndex(project.id(), forceMergeRequest, reqListener)
);
}
}
return affectedIndices;
}
private void rolloverDataStream(
ProjectId projectId,
String writeIndexName,
RolloverRequest rolloverRequest,
ActionListener<Void> listener
) {
// "saving" the rollover target name here so we don't capture the entire request
ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression(
rolloverRequest.getRolloverTarget(),
rolloverRequest.indicesOptions()
);
logger.trace("Data stream lifecycle issues rollover request for data stream [{}]", rolloverRequest.getRolloverTarget());
client.projectClient(projectId).admin().indices().rolloverIndex(rolloverRequest, new ActionListener<>() {
@Override
public void onResponse(RolloverResponse rolloverResponse) {
// Log only when the conditions were met and the index was rolled over.
if (rolloverResponse.isRolledOver()) {
List<String> metConditions = rolloverResponse.getConditionStatus()
.entrySet()
.stream()
.filter(Map.Entry::getValue)
.map(Map.Entry::getKey)
.toList();
logger.info(
"Data stream lifecycle successfully rolled over datastream [{}] due to the following met rollover "
+ "conditions {}. The new index is [{}]",
rolloverRequest.getRolloverTarget(),
metConditions,
rolloverResponse.getNewIndex()
);
}
listener.onResponse(null);
}
@Override
public void onFailure(Exception e) {
ProjectMetadata latestProject = clusterService.state().metadata().projects().get(projectId);
DataStream dataStream = latestProject == null ? null : latestProject.dataStreams().get(resolvedRolloverTarget.resource());
boolean targetsFailureStore = IndexComponentSelector.FAILURES == resolvedRolloverTarget.selector();
if (dataStream == null || Objects.equals(getWriteIndexName(dataStream, targetsFailureStore), writeIndexName) == false) {
// the data stream has another write index so no point in recording an error for the previous write index we were
// attempting to roll over
// if there are persistent issues with rolling over this data stream, the next data stream lifecycle run will attempt to
// rollover the _current_ write index and the error problem should surface then
listener.onResponse(null);
} else {
// the data stream has NOT been rolled over since we issued our rollover request, so let's record the
// error against the data stream's write index.
listener.onFailure(e);
}
}
});
}
@Nullable
private String getWriteIndexName(DataStream dataStream, boolean failureStore) {
if (dataStream == null) {
return null;
}
if (failureStore) {
return dataStream.getWriteFailureIndex() == null ? null : dataStream.getWriteFailureIndex().getName();
}
return dataStream.getWriteIndex().getName();
}
private void updateIndexSetting(ProjectId projectId, UpdateSettingsRequest updateSettingsRequest, ActionListener<Void> listener) {
assert updateSettingsRequest.indices() != null && updateSettingsRequest.indices().length == 1
: "Data stream lifecycle service updates the settings for one index at a time";
// "saving" the index name here so we don't capture the entire request
String targetIndex = updateSettingsRequest.indices()[0];
logger.trace(
"Data stream lifecycle service issues request to update settings [{}] for index [{}]",
updateSettingsRequest.settings().keySet(),
targetIndex
);
client.projectClient(projectId).admin().indices().updateSettings(updateSettingsRequest, new ActionListener<>() {
@Override
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
logger.info(
"Data stream lifecycle service successfully updated settings [{}] for index index [{}]",
updateSettingsRequest.settings().keySet(),
targetIndex
);
listener.onResponse(null);
}
@Override
public void onFailure(Exception e) {
if (e instanceof IndexNotFoundException) {
// index was already deleted, treat this as a success
logger.trace("Clearing recorded error for index [{}] because the index was deleted", targetIndex);
errorStore.clearRecordedError(projectId, targetIndex);
listener.onResponse(null);
return;
}
listener.onFailure(e);
}
});
}
private void addIndexBlock(ProjectId projectId, AddIndexBlockRequest addIndexBlockRequest, ActionListener<Void> listener) {
assert addIndexBlockRequest.indices() != null && addIndexBlockRequest.indices().length == 1
: "Data stream lifecycle service updates the index block for one index at a time";
// "saving" the index name here so we don't capture the entire request
String targetIndex = addIndexBlockRequest.indices()[0];
logger.trace(
"Data stream lifecycle service issues request to add block [{}] for index [{}]",
addIndexBlockRequest.getBlock(),
targetIndex
);
client.projectClient(projectId).admin().indices().addBlock(addIndexBlockRequest, new ActionListener<>() {
@Override
public void onResponse(AddIndexBlockResponse addIndexBlockResponse) {
if (addIndexBlockResponse.isAcknowledged()) {
logger.info(
"Data stream lifecycle service successfully added block [{}] for index index [{}]",
addIndexBlockRequest.getBlock(),
targetIndex
);
listener.onResponse(null);
} else {
Optional<AddIndexBlockResponse.AddBlockResult> resultForTargetIndex = addIndexBlockResponse.getIndices()
.stream()
.filter(blockResult -> blockResult.getIndex().getName().equals(targetIndex))
.findAny();
if (resultForTargetIndex.isEmpty()) {
// blimey
// this is weird, we don't have a result for our index, so let's treat this as a success and the next DSL run will
// check if we need to retry adding the block for this index
logger.trace(
"Data stream lifecycle service received an unacknowledged response when attempting to add the "
+ "read-only block to index [{}], but the response didn't contain an explicit result for the index.",
targetIndex
);
listener.onFailure(
new ElasticsearchException("request to mark index [" + targetIndex + "] as read-only was not acknowledged")
);
} else if (resultForTargetIndex.get().hasFailures()) {
AddIndexBlockResponse.AddBlockResult blockResult = resultForTargetIndex.get();
if (blockResult.getException() != null) {
listener.onFailure(blockResult.getException());
} else {
List<AddIndexBlockResponse.AddBlockShardResult.Failure> shardFailures = new ArrayList<>(
blockResult.getShards().length
);
for (AddIndexBlockResponse.AddBlockShardResult shard : blockResult.getShards()) {
if (shard.hasFailures()) {
shardFailures.addAll(Arrays.asList(shard.getFailures()));
}
}
assert shardFailures.isEmpty() == false
: "The block response must have shard failures as the global "
+ "exception is null. The block result is: "
+ blockResult;
String errorMessage = org.elasticsearch.common.Strings.collectionToDelimitedString(
shardFailures.stream().map(org.elasticsearch.common.Strings::toString).collect(Collectors.toList()),
","
);
listener.onFailure(new ElasticsearchException(errorMessage));
}
} else {
listener.onFailure(
new ElasticsearchException("request to mark index [" + targetIndex + "] as read-only was not acknowledged")
);
}
}
}
@Override
public void onFailure(Exception e) {
if (e instanceof IndexNotFoundException) {
// index was already deleted, treat this as a success
logger.trace("Clearing recorded error for index [{}] because the index was deleted", targetIndex);
errorStore.clearRecordedError(projectId, targetIndex);
listener.onResponse(null);
return;
}
listener.onFailure(e);
}
});
}
private void deleteIndex(ProjectId projectId, DeleteIndexRequest deleteIndexRequest, String reason, ActionListener<Void> listener) {
assert deleteIndexRequest.indices() != null && deleteIndexRequest.indices().length == 1
: "Data stream lifecycle deletes one index at a time";
// "saving" the index name here so we don't capture the entire request
String targetIndex = deleteIndexRequest.indices()[0];
logger.trace("Data stream lifecycle issues request to delete index [{}]", targetIndex);
client.projectClient(projectId).admin().indices().delete(deleteIndexRequest, new ActionListener<>() {
@Override
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
if (acknowledgedResponse.isAcknowledged()) {
logger.info("Data stream lifecycle successfully deleted index [{}] due to {}", targetIndex, reason);
} else {
logger.trace(
"The delete request for index [{}] was not acknowledged. Data stream lifecycle service will retry on the"
+ " next run if the index still exists",
targetIndex
);
}
listener.onResponse(null);
}
@Override
public void onFailure(Exception e) {
if (e instanceof IndexNotFoundException) {
logger.trace("Data stream lifecycle did not delete index [{}] as it was already deleted", targetIndex);
// index was already deleted, treat this as a success
errorStore.clearRecordedError(projectId, targetIndex);
listener.onResponse(null);
return;
}
if (e instanceof SnapshotInProgressException) {
logger.info(
"Data stream lifecycle was unable to delete index [{}] because it's currently being snapshot. Retrying on "
+ "the next data stream lifecycle run",
targetIndex
);
}
listener.onFailure(e);
}
});
}
private void downsampleIndex(ProjectId projectId, DownsampleAction.Request request, ActionListener<Void> listener) {
String sourceIndex = request.getSourceIndex();
String downsampleIndex = request.getTargetIndex();
logger.info("Data stream lifecycle issuing request to downsample index [{}] to index [{}]", sourceIndex, downsampleIndex);
client.projectClient(projectId).execute(DownsampleAction.INSTANCE, request, new ActionListener<>() {
@Override
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
assert acknowledgedResponse.isAcknowledged() : "the downsample response is always acknowledged";
logger.info("Data stream lifecycle successfully downsampled index [{}] to index [{}]", sourceIndex, downsampleIndex);
listener.onResponse(null);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
/*
* This method executes the given force merge request. Once the request has completed successfully it writes a timestamp as custom
* metadata in the cluster state indicating when the force merge has completed. The listener is notified after the cluster state
* update has been made, or when the forcemerge fails or the write of the to the cluster state fails.
*/
private void forceMergeIndex(ProjectId projectId, ForceMergeRequest forceMergeRequest, ActionListener<Void> listener) {
assert forceMergeRequest.indices() != null && forceMergeRequest.indices().length == 1
: "Data stream lifecycle force merges one index at a time";
final String targetIndex = forceMergeRequest.indices()[0];
logger.info("Data stream lifecycle is issuing a request to force merge index [{}]", targetIndex);
client.projectClient(projectId).admin().indices().forceMerge(forceMergeRequest, new ActionListener<>() {
@Override
public void onResponse(BroadcastResponse forceMergeResponse) {
if (forceMergeResponse.getFailedShards() > 0) {
DefaultShardOperationFailedException[] failures = forceMergeResponse.getShardFailures();
String message = Strings.format(
"Data stream lifecycle failed to forcemerge %d shards for index [%s] due to failures [%s]",
forceMergeResponse.getFailedShards(),
targetIndex,
failures == null
? "unknown"
: Arrays.stream(failures).map(DefaultShardOperationFailedException::toString).collect(Collectors.joining(","))
);
onFailure(new ElasticsearchException(message));
} else if (forceMergeResponse.getTotalShards() != forceMergeResponse.getSuccessfulShards()) {
String message = Strings.format(
"Force merge request only had %d successful shards out of a total of %d",
forceMergeResponse.getSuccessfulShards(),
forceMergeResponse.getTotalShards()
);
onFailure(new ElasticsearchException(message));
} else {
logger.info("Data stream lifecycle successfully force merged index [{}]", targetIndex);
setForceMergeCompletedTimestamp(projectId, targetIndex, listener);
}
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
/*
* This method sets the value of the custom index metadata field "force_merge_completed_timestamp" within the field
* "data_stream_lifecycle" to value. The method returns immediately, but the update happens asynchronously and listener is notified on
* success or failure.
*/
private void setForceMergeCompletedTimestamp(ProjectId projectId, String targetIndex, ActionListener<Void> listener) {
forceMergeClusterStateUpdateTaskQueue.submitTask(
Strings.format("Adding force merge complete marker to cluster state for [%s]", targetIndex),
new UpdateForceMergeCompleteTask(listener, projectId, targetIndex, threadPool),
null
);
}
/*
* Returns true if a value has been set for the custom index metadata field "force_merge_completed_timestamp" within the field
* "data_stream_lifecycle".
*/
private static boolean isForceMergeComplete(IndexMetadata backingIndex) {
Map<String, String> customMetadata = backingIndex.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY);
return customMetadata != null && customMetadata.containsKey(FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY);
}
@Nullable
private static TimeValue getEffectiveRetention(
DataStream dataStream,
DataStreamGlobalRetentionSettings globalRetentionSettings,
boolean failureStore
) {
DataStreamLifecycle lifecycle = failureStore ? dataStream.getFailuresLifecycle() : dataStream.getDataLifecycle();
return lifecycle == null || lifecycle.enabled() == false
? null
: lifecycle.getEffectiveDataRetention(globalRetentionSettings.get(failureStore), dataStream.isInternal());
}
/**
* @return the duration of the last run in millis or null if the service hasn't completed a run yet.
*/
@Nullable
public Long getLastRunDuration() {
return lastRunDuration;
}
/**
* @return the time passed between the start times of the last two consecutive runs or null if the service hasn't started twice yet.
*/
@Nullable
public Long getTimeBetweenStarts() {
return timeBetweenStarts;
}
/**
* Action listener that records the encountered failure using the provided recordError callback for the
* provided target index. If the listener is notified of success it will clear the recorded entry for the provided
* target index using the clearErrorRecord callback.
*/
static | DataStreamLifecycleService |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/processor/ForwardHashExchangeProcessor.java | {
"start": 3554,
"end": 14896
} | class ____ implements ExecNodeGraphProcessor {
@Override
public ExecNodeGraph process(ExecNodeGraph execGraph, ProcessorContext context) {
if (execGraph.getRootNodes().get(0) instanceof StreamExecNode) {
throw new TableException("StreamExecNode is not supported yet");
}
JobManagerOptions.SchedulerType schedulerType =
context.getPlanner()
.getExecEnv()
.getConfig()
.getSchedulerType()
.orElse(JobManagerOptions.SchedulerType.AdaptiveBatch);
if (schedulerType != JobManagerOptions.SchedulerType.AdaptiveBatch) {
return execGraph;
}
ReadableConfig tableConfig = context.getPlanner().getTableConfig();
ExecNodeVisitor visitor =
new AbstractExecNodeExactlyOnceVisitor() {
@Override
protected void visitNode(ExecNode<?> node) {
visitInputs(node);
if (node instanceof CommonExecExchange) {
return;
}
boolean changed = false;
List<ExecEdge> newEdges = new ArrayList<>(node.getInputEdges());
for (int i = 0; i < node.getInputProperties().size(); ++i) {
InputProperty inputProperty = node.getInputProperties().get(i);
RequiredDistribution requiredDistribution =
inputProperty.getRequiredDistribution();
ExecEdge edge = node.getInputEdges().get(i);
if (requiredDistribution.getType() != DistributionType.HASH) {
boolean visitChild =
requiredDistribution.getType()
== DistributionType.SINGLETON;
if (!hasExchangeInput(edge)
&& hasSortInputForInputSortedNode(node)) {
ExecEdge newEdge =
addExchangeAndReconnectEdge(
tableConfig,
edge,
inputProperty,
true,
visitChild);
newEdges.set(i, newEdge);
changed = true;
}
continue;
}
if (!hasExchangeInput(edge)) {
ExecEdge newEdge;
if (isInputSortedNode(node)) {
if (hasSortInputForInputSortedNode(node)) {
// add Exchange with keep_input_as_is distribution as the
// input of Sort
ExecNode<?> sort = edge.getSource();
ExecEdge newEdgeOfSort =
addExchangeAndReconnectEdge(
tableConfig,
sort.getInputEdges().get(0),
inputProperty,
false,
true);
sort.setInputEdges(
Collections.singletonList(newEdgeOfSort));
}
// if operation chaining is disabled, this could mark sure the
// sort node and its output can also be connected by
// ForwardPartitioner
newEdge =
addExchangeAndReconnectEdge(
tableConfig, edge, inputProperty, true, true);
} else {
// add Exchange with keep_input_as_is distribution as the input
// of the node
newEdge =
addExchangeAndReconnectEdge(
tableConfig, edge, inputProperty, false, true);
updateOriginalEdgeInMultipleInput(
node, i, (BatchExecExchange) newEdge.getSource());
}
// update the edge
newEdges.set(i, newEdge);
changed = true;
} else if (hasSortInputForInputSortedNode(node)) {
// if operation chaining is disabled, this could mark sure the sort
// node and its output can also be connected by ForwardPartitioner
ExecEdge newEdge =
addExchangeAndReconnectEdge(
tableConfig, edge, inputProperty, true, true);
newEdges.set(i, newEdge);
changed = true;
}
}
if (changed) {
node.setInputEdges(newEdges);
}
}
};
execGraph.getRootNodes().forEach(s -> s.accept(visitor));
return execGraph;
}
// TODO This implementation should be updated once FLINK-21224 is finished.
private ExecEdge addExchangeAndReconnectEdge(
ReadableConfig tableConfig,
ExecEdge edge,
InputProperty inputProperty,
boolean strict,
boolean visitChild) {
ExecNode<?> target = edge.getTarget();
ExecNode<?> source = edge.getSource();
if (source instanceof CommonExecExchange) {
return edge;
}
// only Calc, Correlate and Sort can propagate sort property and distribution property
if (visitChild
&& (source instanceof BatchExecCalc
|| source instanceof BatchExecPythonCalc
|| source instanceof BatchExecSort
|| source instanceof BatchExecCorrelate
|| source instanceof BatchExecPythonCorrelate)) {
ExecEdge newEdge =
addExchangeAndReconnectEdge(
tableConfig,
source.getInputEdges().get(0),
inputProperty,
strict,
true);
source.setInputEdges(Collections.singletonList(newEdge));
}
BatchExecExchange exchange =
createExchangeWithKeepInputAsIsDistribution(
tableConfig, inputProperty, strict, (RowType) edge.getOutputType());
ExecEdge newEdge =
new ExecEdge(source, exchange, edge.getShuffle(), edge.getExchangeMode());
exchange.setInputEdges(Collections.singletonList(newEdge));
return new ExecEdge(exchange, target, edge.getShuffle(), edge.getExchangeMode());
}
private BatchExecExchange createExchangeWithKeepInputAsIsDistribution(
ReadableConfig tableConfig,
InputProperty inputProperty,
boolean strict,
RowType outputRowType) {
InputProperty newInputProperty =
InputProperty.builder()
.requiredDistribution(
InputProperty.keepInputAsIsDistribution(
inputProperty.getRequiredDistribution(), strict))
.damBehavior(inputProperty.getDamBehavior())
.priority(inputProperty.getPriority())
.build();
return new BatchExecExchange(
tableConfig, newInputProperty, outputRowType, newInputProperty.toString());
}
private boolean hasExchangeInput(ExecEdge edge) {
ExecNode<?> input = edge.getSource();
if (hasSortInputForInputSortedNode(edge.getTarget())) {
// skip Sort node
input = input.getInputEdges().get(0).getSource();
}
return input instanceof CommonExecExchange;
}
private boolean hasSortInputForInputSortedNode(ExecNode<?> node) {
return isInputSortedNode(node)
&& node.getInputEdges().get(0).getSource() instanceof BatchExecSort;
}
private boolean isInputSortedNode(ExecNode<?> node) {
return node instanceof InputSortedExecNode;
}
private void updateOriginalEdgeInMultipleInput(
ExecNode<?> node, int edgeIdx, BatchExecExchange newExchange) {
if (node instanceof BatchExecMultipleInput) {
updateOriginalEdgeInMultipleInput((BatchExecMultipleInput) node, edgeIdx, newExchange);
}
}
/**
* Add new exchange node between the input node and the target node for the given edge, and
* reconnect the edges. So that the transformations can be connected correctly.
*/
private void updateOriginalEdgeInMultipleInput(
BatchExecMultipleInput multipleInput, int edgeIdx, BatchExecExchange newExchange) {
ExecEdge originalEdge = multipleInput.getOriginalEdges().get(edgeIdx);
ExecNode<?> inputNode = originalEdge.getSource();
ExecNode<?> targetNode = originalEdge.getTarget();
int edgeIdxInTargetNode = targetNode.getInputEdges().indexOf(originalEdge);
checkArgument(edgeIdxInTargetNode >= 0);
List<ExecEdge> newEdges = new ArrayList<>(targetNode.getInputEdges());
// connect input node to new exchange node
ExecEdge newEdge1 =
new ExecEdge(
inputNode,
newExchange,
originalEdge.getShuffle(),
originalEdge.getExchangeMode());
newExchange.setInputEdges(Collections.singletonList(newEdge1));
// connect new exchange node to target node
ExecEdge newEdge2 =
new ExecEdge(
newExchange,
targetNode,
originalEdge.getShuffle(),
originalEdge.getExchangeMode());
newEdges.set(edgeIdxInTargetNode, newEdge2);
targetNode.setInputEdges(newEdges);
// update the originalEdge in MultipleInput, this is need for multiple operator fusion
// codegen
multipleInput.getOriginalEdges().set(edgeIdx, newEdge2);
}
}
| ForwardHashExchangeProcessor |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/AllocatedSlotPool.java | {
"start": 5085,
"end": 5527
} | interface ____ {
SlotInfo asSlotInfo();
/**
* Returns since when this slot is free.
*
* @return the time since when the slot is free
*/
long getFreeSince();
default AllocationID getAllocationId() {
return asSlotInfo().getAllocationId();
}
}
/** A collection of {@link AllocatedSlot AllocatedSlots} and their reservation status. */
| FreeSlotInfo |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/dos/DeepJsonParsingTest.java | {
"start": 500,
"end": 3378
} | class ____ extends DatabindTestUtil
{
private final static int DEFAULT_MAX_DEPTH = StreamReadConstraints.DEFAULT_MAX_DEPTH;
private final static int TOO_DEEP_NESTING = DEFAULT_MAX_DEPTH + 1;
private final JsonFactory unconstrainedFactory = JsonFactory.builder()
.streamReadConstraints(StreamReadConstraints.builder().maxNestingDepth(Integer.MAX_VALUE).build())
.build();
private final ObjectMapper unconstrainedMapper = JsonMapper.builder(unconstrainedFactory).build();
private final ObjectMapper defaultMapper = JsonMapper.builder().build();
public void testParseWithArrayWithDefaultConfig() throws Exception
{
final String doc = _nestedDoc(TOO_DEEP_NESTING, "[ ", "] ");
try (JsonParser jp = defaultMapper.createParser(doc)) {
while (jp.nextToken() != null) { }
fail("expected StreamConstraintsException");
} catch (StreamConstraintsException e) {
assertThat(e.getMessage())
.startsWith("Document nesting depth ("+(DEFAULT_MAX_DEPTH+1)
+") exceeds the maximum allowed ("+DEFAULT_MAX_DEPTH);
}
}
public void testParseWithObjectWithDefaultConfig() throws Exception
{
final String doc = "{"+_nestedDoc(TOO_DEEP_NESTING, "\"x\":{", "} ") + "}";
try (JsonParser jp = defaultMapper.createParser(doc)) {
while (jp.nextToken() != null) { }
fail("expected StreamConstraintsException");
} catch (StreamConstraintsException e) {
assertThat(e.getMessage())
.startsWith("Document nesting depth ("+(DEFAULT_MAX_DEPTH+1)
+") exceeds the maximum allowed ("+DEFAULT_MAX_DEPTH);
}
}
public void testParseWithArrayWithUnconstrainedConfig() throws Exception
{
final String doc = _nestedDoc(TOO_DEEP_NESTING, "[ ", "] ");
try (JsonParser jp = unconstrainedMapper.createParser(doc)) {
while (jp.nextToken() != null) { }
}
}
public void testParseWithObjectWithUnconstrainedConfig() throws Exception
{
final String doc = "{"+_nestedDoc(TOO_DEEP_NESTING, "\"x\":{", "} ") + "}";
try (JsonParser jp = unconstrainedMapper.createParser(doc)) {
while (jp.nextToken() != null) { }
}
}
private String _nestedDoc(int nesting, String open, String close) {
StringBuilder sb = new StringBuilder(nesting * (open.length() + close.length()));
for (int i = 0; i < nesting; ++i) {
sb.append(open);
if ((i & 31) == 0) {
sb.append("\n");
}
}
for (int i = 0; i < nesting; ++i) {
sb.append(close);
if ((i & 31) == 0) {
sb.append("\n");
}
}
return sb.toString();
}
}
| DeepJsonParsingTest |
java | apache__camel | components/camel-spring-parent/camel-spring-main/src/test/java/org/apache/camel/spring/MyVetoLifecycle.java | {
"start": 997,
"end": 1269
} | class ____ extends LifecycleStrategySupport {
@Override
public void onContextStarting(CamelContext context) throws VetoCamelContextStartException {
throw new VetoCamelContextStartException("We do not like this route", context, false);
}
}
| MyVetoLifecycle |
java | bumptech__glide | annotation/compiler/test/src/test/resources/GlideExtensionOptionsTest/SkipStaticMethod/GlideOptions.java | {
"start": 1010,
"end": 15154
} | class ____ extends RequestOptions implements Cloneable {
private static GlideOptions fitCenterTransform0;
private static GlideOptions centerInsideTransform1;
private static GlideOptions centerCropTransform2;
private static GlideOptions circleCropTransform3;
private static GlideOptions noTransformation4;
private static GlideOptions noAnimation5;
/**
* @see RequestOptions#sizeMultiplierOf(float)
*/
@CheckResult
@NonNull
public static GlideOptions sizeMultiplierOf(@FloatRange(from = 0.0, to = 1.0) float value) {
return new GlideOptions().sizeMultiplier(value);
}
/**
* @see RequestOptions#diskCacheStrategyOf(DiskCacheStrategy)
*/
@CheckResult
@NonNull
public static GlideOptions diskCacheStrategyOf(@NonNull DiskCacheStrategy strategy) {
return new GlideOptions().diskCacheStrategy(strategy);
}
/**
* @see RequestOptions#priorityOf(Priority)
*/
@CheckResult
@NonNull
public static GlideOptions priorityOf(@NonNull Priority priority) {
return new GlideOptions().priority(priority);
}
/**
* @see RequestOptions#placeholderOf(Drawable)
*/
@CheckResult
@NonNull
public static GlideOptions placeholderOf(@Nullable Drawable drawable) {
return new GlideOptions().placeholder(drawable);
}
/**
* @see RequestOptions#placeholderOf(int)
*/
@CheckResult
@NonNull
public static GlideOptions placeholderOf(@DrawableRes int id) {
return new GlideOptions().placeholder(id);
}
/**
* @see RequestOptions#errorOf(Drawable)
*/
@CheckResult
@NonNull
public static GlideOptions errorOf(@Nullable Drawable drawable) {
return new GlideOptions().error(drawable);
}
/**
* @see RequestOptions#errorOf(int)
*/
@CheckResult
@NonNull
public static GlideOptions errorOf(@DrawableRes int id) {
return new GlideOptions().error(id);
}
/**
* @see RequestOptions#skipMemoryCacheOf(boolean)
*/
@CheckResult
@NonNull
public static GlideOptions skipMemoryCacheOf(boolean skipMemoryCache) {
return new GlideOptions().skipMemoryCache(skipMemoryCache);
}
/**
* @see RequestOptions#overrideOf(int, int)
*/
@CheckResult
@NonNull
public static GlideOptions overrideOf(int width, int height) {
return new GlideOptions().override(width, height);
}
/**
* @see RequestOptions#overrideOf(int)
*/
@CheckResult
@NonNull
public static GlideOptions overrideOf(int size) {
return new GlideOptions().override(size);
}
/**
* @see RequestOptions#signatureOf(Key)
*/
@CheckResult
@NonNull
public static GlideOptions signatureOf(@NonNull Key key) {
return new GlideOptions().signature(key);
}
/**
* @see RequestOptions#fitCenterTransform()
*/
@CheckResult
@NonNull
public static GlideOptions fitCenterTransform() {
if (GlideOptions.fitCenterTransform0 == null) {
GlideOptions.fitCenterTransform0 =
new GlideOptions().fitCenter().autoClone();
}
return GlideOptions.fitCenterTransform0;
}
/**
* @see RequestOptions#centerInsideTransform()
*/
@CheckResult
@NonNull
public static GlideOptions centerInsideTransform() {
if (GlideOptions.centerInsideTransform1 == null) {
GlideOptions.centerInsideTransform1 =
new GlideOptions().centerInside().autoClone();
}
return GlideOptions.centerInsideTransform1;
}
/**
* @see RequestOptions#centerCropTransform()
*/
@CheckResult
@NonNull
public static GlideOptions centerCropTransform() {
if (GlideOptions.centerCropTransform2 == null) {
GlideOptions.centerCropTransform2 =
new GlideOptions().centerCrop().autoClone();
}
return GlideOptions.centerCropTransform2;
}
/**
* @see RequestOptions#circleCropTransform()
*/
@CheckResult
@NonNull
public static GlideOptions circleCropTransform() {
if (GlideOptions.circleCropTransform3 == null) {
GlideOptions.circleCropTransform3 =
new GlideOptions().circleCrop().autoClone();
}
return GlideOptions.circleCropTransform3;
}
/**
* @see RequestOptions#bitmapTransform(Transformation)
*/
@CheckResult
@NonNull
public static GlideOptions bitmapTransform(@NonNull Transformation<Bitmap> transformation) {
return new GlideOptions().transform(transformation);
}
/**
* @see RequestOptions#noTransformation()
*/
@CheckResult
@NonNull
public static GlideOptions noTransformation() {
if (GlideOptions.noTransformation4 == null) {
GlideOptions.noTransformation4 =
new GlideOptions().dontTransform().autoClone();
}
return GlideOptions.noTransformation4;
}
/**
* @see RequestOptions#option(Option, T)
*/
@CheckResult
@NonNull
public static <T> GlideOptions option(@NonNull Option<T> option, @NonNull T t) {
return new GlideOptions().set(option, t);
}
/**
* @see RequestOptions#decodeTypeOf(Class)
*/
@CheckResult
@NonNull
public static GlideOptions decodeTypeOf(@NonNull Class<?> clazz) {
return new GlideOptions().decode(clazz);
}
/**
* @see RequestOptions#formatOf(DecodeFormat)
*/
@CheckResult
@NonNull
public static GlideOptions formatOf(@NonNull DecodeFormat format) {
return new GlideOptions().format(format);
}
/**
* @see RequestOptions#frameOf(long)
*/
@CheckResult
@NonNull
public static GlideOptions frameOf(@IntRange(from = 0) long value) {
return new GlideOptions().frame(value);
}
/**
* @see RequestOptions#downsampleOf(DownsampleStrategy)
*/
@CheckResult
@NonNull
public static GlideOptions downsampleOf(@NonNull DownsampleStrategy strategy) {
return new GlideOptions().downsample(strategy);
}
/**
* @see RequestOptions#timeoutOf(int)
*/
@CheckResult
@NonNull
public static GlideOptions timeoutOf(@IntRange(from = 0) int value) {
return new GlideOptions().timeout(value);
}
/**
* @see RequestOptions#encodeQualityOf(int)
*/
@CheckResult
@NonNull
public static GlideOptions encodeQualityOf(@IntRange(from = 0, to = 100) int value) {
return new GlideOptions().encodeQuality(value);
}
/**
* @see RequestOptions#encodeFormatOf(CompressFormat)
*/
@CheckResult
@NonNull
public static GlideOptions encodeFormatOf(@NonNull Bitmap.CompressFormat format) {
return new GlideOptions().encodeFormat(format);
}
/**
* @see RequestOptions#noAnimation()
*/
@CheckResult
@NonNull
public static GlideOptions noAnimation() {
if (GlideOptions.noAnimation5 == null) {
GlideOptions.noAnimation5 =
new GlideOptions().dontAnimate().autoClone();
}
return GlideOptions.noAnimation5;
}
@Override
@NonNull
@CheckResult
public GlideOptions sizeMultiplier(@FloatRange(from = 0.0, to = 1.0) float value) {
return (GlideOptions) super.sizeMultiplier(value);
}
@Override
@NonNull
@CheckResult
public GlideOptions useUnlimitedSourceGeneratorsPool(boolean flag) {
return (GlideOptions) super.useUnlimitedSourceGeneratorsPool(flag);
}
@Override
@NonNull
@CheckResult
public GlideOptions useAnimationPool(boolean flag) {
return (GlideOptions) super.useAnimationPool(flag);
}
@Override
@NonNull
@CheckResult
public GlideOptions onlyRetrieveFromCache(boolean flag) {
return (GlideOptions) super.onlyRetrieveFromCache(flag);
}
@Override
@NonNull
@CheckResult
public GlideOptions diskCacheStrategy(@NonNull DiskCacheStrategy strategy) {
return (GlideOptions) super.diskCacheStrategy(strategy);
}
@Override
@NonNull
@CheckResult
public GlideOptions priority(@NonNull Priority priority) {
return (GlideOptions) super.priority(priority);
}
@Override
@NonNull
@CheckResult
public GlideOptions placeholder(@Nullable Drawable drawable) {
return (GlideOptions) super.placeholder(drawable);
}
@Override
@NonNull
@CheckResult
public GlideOptions placeholder(@DrawableRes int id) {
return (GlideOptions) super.placeholder(id);
}
@Override
@NonNull
@CheckResult
public GlideOptions fallback(@Nullable Drawable drawable) {
return (GlideOptions) super.fallback(drawable);
}
@Override
@NonNull
@CheckResult
public GlideOptions fallback(@DrawableRes int id) {
return (GlideOptions) super.fallback(id);
}
@Override
@NonNull
@CheckResult
public GlideOptions error(@Nullable Drawable drawable) {
return (GlideOptions) super.error(drawable);
}
@Override
@NonNull
@CheckResult
public GlideOptions error(@DrawableRes int id) {
return (GlideOptions) super.error(id);
}
@Override
@NonNull
@CheckResult
public GlideOptions theme(@Nullable Resources.Theme theme) {
return (GlideOptions) super.theme(theme);
}
@Override
@NonNull
@CheckResult
public GlideOptions skipMemoryCache(boolean skip) {
return (GlideOptions) super.skipMemoryCache(skip);
}
@Override
@NonNull
@CheckResult
public GlideOptions override(int width, int height) {
return (GlideOptions) super.override(width, height);
}
@Override
@NonNull
@CheckResult
public GlideOptions override(int size) {
return (GlideOptions) super.override(size);
}
@Override
@NonNull
@CheckResult
public GlideOptions signature(@NonNull Key key) {
return (GlideOptions) super.signature(key);
}
@Override
@CheckResult
public GlideOptions clone() {
return (GlideOptions) super.clone();
}
@Override
@NonNull
@CheckResult
public <Y> GlideOptions set(@NonNull Option<Y> option, @NonNull Y y) {
return (GlideOptions) super.set(option, y);
}
@Override
@NonNull
@CheckResult
public GlideOptions decode(@NonNull Class<?> clazz) {
return (GlideOptions) super.decode(clazz);
}
@Override
@NonNull
@CheckResult
public GlideOptions encodeFormat(@NonNull Bitmap.CompressFormat format) {
return (GlideOptions) super.encodeFormat(format);
}
@Override
@NonNull
@CheckResult
public GlideOptions encodeQuality(@IntRange(from = 0, to = 100) int value) {
return (GlideOptions) super.encodeQuality(value);
}
@Override
@NonNull
@CheckResult
public GlideOptions frame(@IntRange(from = 0) long value) {
return (GlideOptions) super.frame(value);
}
@Override
@NonNull
@CheckResult
public GlideOptions format(@NonNull DecodeFormat format) {
return (GlideOptions) super.format(format);
}
@Override
@NonNull
@CheckResult
public GlideOptions disallowHardwareConfig() {
return (GlideOptions) super.disallowHardwareConfig();
}
@Override
@NonNull
@CheckResult
public GlideOptions downsample(@NonNull DownsampleStrategy strategy) {
return (GlideOptions) super.downsample(strategy);
}
@Override
@NonNull
@CheckResult
public GlideOptions timeout(@IntRange(from = 0) int value) {
return (GlideOptions) super.timeout(value);
}
@Override
@NonNull
@CheckResult
public GlideOptions optionalCenterCrop() {
return (GlideOptions) super.optionalCenterCrop();
}
@Override
@NonNull
@CheckResult
public GlideOptions centerCrop() {
return (GlideOptions) super.centerCrop();
}
@Override
@NonNull
@CheckResult
public GlideOptions optionalFitCenter() {
return (GlideOptions) super.optionalFitCenter();
}
@Override
@NonNull
@CheckResult
public GlideOptions fitCenter() {
return (GlideOptions) super.fitCenter();
}
@Override
@NonNull
@CheckResult
public GlideOptions optionalCenterInside() {
return (GlideOptions) super.optionalCenterInside();
}
@Override
@NonNull
@CheckResult
public GlideOptions centerInside() {
return (GlideOptions) super.centerInside();
}
@Override
@NonNull
@CheckResult
public GlideOptions optionalCircleCrop() {
return (GlideOptions) super.optionalCircleCrop();
}
@Override
@NonNull
@CheckResult
public GlideOptions circleCrop() {
return (GlideOptions) super.circleCrop();
}
@Override
@NonNull
@CheckResult
public GlideOptions transform(@NonNull Transformation<Bitmap> transformation) {
return (GlideOptions) super.transform(transformation);
}
@Override
@SafeVarargs
@SuppressWarnings("varargs")
@NonNull
@CheckResult
public final GlideOptions transform(@NonNull Transformation<Bitmap>... transformations) {
return (GlideOptions) super.transform(transformations);
}
@Override
@SafeVarargs
@SuppressWarnings("varargs")
@Deprecated
@NonNull
@CheckResult
public final GlideOptions transforms(@NonNull Transformation<Bitmap>... transformations) {
return (GlideOptions) super.transforms(transformations);
}
@Override
@NonNull
@CheckResult
public GlideOptions optionalTransform(@NonNull Transformation<Bitmap> transformation) {
return (GlideOptions) super.optionalTransform(transformation);
}
@Override
@NonNull
@CheckResult
public <Y> GlideOptions optionalTransform(@NonNull Class<Y> clazz,
@NonNull Transformation<Y> transformation) {
return (GlideOptions) super.optionalTransform(clazz, transformation);
}
@Override
@NonNull
@CheckResult
public <Y> GlideOptions transform(@NonNull Class<Y> clazz,
@NonNull Transformation<Y> transformation) {
return (GlideOptions) super.transform(clazz, transformation);
}
@Override
@NonNull
@CheckResult
public GlideOptions dontTransform() {
return (GlideOptions) super.dontTransform();
}
@Override
@NonNull
@CheckResult
public GlideOptions dontAnimate() {
return (GlideOptions) super.dontAnimate();
}
@Override
@NonNull
@CheckResult
public GlideOptions apply(@NonNull BaseRequestOptions<?> options) {
return (GlideOptions) super.apply(options);
}
@Override
@NonNull
public GlideOptions lock() {
return (GlideOptions) super.lock();
}
@Override
@NonNull
public GlideOptions autoClone() {
return (GlideOptions) super.autoClone();
}
/**
* @see Extension#test(BaseRequestOptions)
*/
@SuppressWarnings("unchecked")
@CheckResult
@NonNull
public GlideOptions test() {
return (GlideOptions) Extension.test(this);
}
}
| GlideOptions |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/StatementSwitchToExpressionSwitchTest.java | {
"start": 12498,
"end": 13101
} | class ____ {
public void foo(Suit suit) {
for (; ; ) {
switch (suit) {
case HEART:
System.out.println("heart");
break;
case DIAMOND:
continue;
case SPADE:
return;
case CLUB:
throw new AssertionError();
}
}
}
}
""")
.addOutputLines(
"Test.java",
"""
| Test |
java | alibaba__nacos | core/src/test/java/com/alibaba/nacos/core/controller/v3/NacosClusterControllerV3Test.java | {
"start": 1965,
"end": 4601
} | class ____ {
private final MockEnvironment mockEnvironment = new MockEnvironment();
@InjectMocks
private NacosClusterControllerV3 nacosClusterControllerV3;
@Mock
private NacosClusterOperationService nacosClusterOperationService;
@BeforeEach
void setUp() {
EnvUtil.setEnvironment(mockEnvironment);
}
@Test
void testSelf() {
Member self = new Member();
when(nacosClusterOperationService.self()).thenReturn(self);
Result<Member> result = nacosClusterControllerV3.self();
assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode());
assertEquals(self, result.getData());
}
@Test
void testListNodes() throws NacosException {
Member member1 = new Member();
member1.setIp("1.1.1.1");
member1.setPort(8848);
member1.setState(NodeState.DOWN);
Member member2 = new Member();
member2.setIp("2.2.2.2");
member2.setPort(8848);
List<Member> members = Arrays.asList(member1, member2);
Mockito.when(nacosClusterOperationService.listNodes(any(), any())).thenReturn(members);
Result<Collection<Member>> result = nacosClusterControllerV3.listNodes("1.1.1.1", null);
assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode());
assertTrue(result.getData().stream().findFirst().isPresent());
assertEquals("1.1.1.1:8848", result.getData().stream().findFirst().get().getAddress());
}
@Test
void testUpdateNodes() throws NacosApiException {
Member member = new Member();
member.setIp("1.1.1.1");
member.setPort(8848);
member.setAddress("test");
when(nacosClusterOperationService.updateNodes(any())).thenReturn(true);
Result<Boolean> result = nacosClusterControllerV3.updateNodes(Collections.singletonList(member));
verify(nacosClusterOperationService).updateNodes(any());
assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode());
assertTrue(result.getData());
}
@Test
void testUpdateLookup() throws NacosException {
LookupUpdateRequest request = new LookupUpdateRequest();
request.setType("test");
when(nacosClusterOperationService.updateLookup(any())).thenReturn(true);
Result<Boolean> result = nacosClusterControllerV3.updateLookup(request);
verify(nacosClusterOperationService).updateLookup(any());
assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode());
assertTrue(result.getData());
}
}
| NacosClusterControllerV3Test |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/CancelCheckpointMarker.java | {
"start": 1336,
"end": 2740
} | class ____ extends RuntimeEvent {
/** The id of the checkpoint to be canceled. */
private final long checkpointId;
public CancelCheckpointMarker(long checkpointId) {
this.checkpointId = checkpointId;
}
public long getCheckpointId() {
return checkpointId;
}
// ------------------------------------------------------------------------
// These known and common event go through special code paths, rather than
// through generic serialization.
@Override
public void write(DataOutputView out) throws IOException {
throw new UnsupportedOperationException("this method should never be called");
}
@Override
public void read(DataInputView in) throws IOException {
throw new UnsupportedOperationException("this method should never be called");
}
// ------------------------------------------------------------------------
@Override
public int hashCode() {
return (int) (checkpointId ^ (checkpointId >>> 32));
}
@Override
public boolean equals(Object other) {
return other != null
&& other.getClass() == CancelCheckpointMarker.class
&& this.checkpointId == ((CancelCheckpointMarker) other).checkpointId;
}
@Override
public String toString() {
return "CancelCheckpointMarker " + checkpointId;
}
}
| CancelCheckpointMarker |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/reflect/Stringable.java | {
"start": 1066,
"end": 1393
} | class ____ field should be represented by an Avro string. It's
* {@link Object#toString()} method will be used to convert it to a string, and
* its single String parameter constructor will be used to create instances.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ ElementType.TYPE, ElementType.FIELD })
@Documented
public @ | or |
java | grpc__grpc-java | api/src/main/java/io/grpc/DecompressorRegistry.java | {
"start": 1213,
"end": 4995
} | class ____ {
static final Joiner ACCEPT_ENCODING_JOINER = Joiner.on(',');
public static DecompressorRegistry emptyInstance() {
return new DecompressorRegistry();
}
private static final DecompressorRegistry DEFAULT_INSTANCE =
emptyInstance()
.with(new Codec.Gzip(), true)
.with(Codec.Identity.NONE, false);
public static DecompressorRegistry getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private final Map<String, DecompressorInfo> decompressors;
private final byte[] advertisedDecompressors;
/**
* Registers a decompressor for both decompression and message encoding negotiation. Returns a
* new registry.
*
* @param d The decompressor to register
* @param advertised If true, the message encoding will be listed in the Accept-Encoding header.
*/
public DecompressorRegistry with(Decompressor d, boolean advertised) {
return new DecompressorRegistry(d, advertised, this);
}
private DecompressorRegistry(Decompressor d, boolean advertised, DecompressorRegistry parent) {
String encoding = d.getMessageEncoding();
checkArgument(!encoding.contains(","), "Comma is currently not allowed in message encoding");
int newSize = parent.decompressors.size();
if (!parent.decompressors.containsKey(d.getMessageEncoding())) {
newSize++;
}
Map<String, DecompressorInfo> newDecompressors =
new LinkedHashMap<>(newSize);
for (DecompressorInfo di : parent.decompressors.values()) {
String previousEncoding = di.decompressor.getMessageEncoding();
if (!previousEncoding.equals(encoding)) {
newDecompressors.put(
previousEncoding, new DecompressorInfo(di.decompressor, di.advertised));
}
}
newDecompressors.put(encoding, new DecompressorInfo(d, advertised));
decompressors = Collections.unmodifiableMap(newDecompressors);
advertisedDecompressors = ACCEPT_ENCODING_JOINER.join(getAdvertisedMessageEncodings())
.getBytes(Charset.forName("US-ASCII"));
}
private DecompressorRegistry() {
decompressors = new LinkedHashMap<>(0);
advertisedDecompressors = new byte[0];
}
/**
* Provides a list of all message encodings that have decompressors available.
*/
public Set<String> getKnownMessageEncodings() {
return decompressors.keySet();
}
byte[] getRawAdvertisedMessageEncodings() {
return advertisedDecompressors;
}
/**
* Provides a list of all message encodings that have decompressors available and should be
* advertised.
*
* <p>The specification doesn't say anything about ordering, or preference, so the returned codes
* can be arbitrary.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1704")
public Set<String> getAdvertisedMessageEncodings() {
Set<String> advertisedDecompressors = new HashSet<>(decompressors.size());
for (Map.Entry<String, DecompressorInfo> entry : decompressors.entrySet()) {
if (entry.getValue().advertised) {
advertisedDecompressors.add(entry.getKey());
}
}
return Collections.unmodifiableSet(advertisedDecompressors);
}
/**
* Returns a decompressor for the given message encoding, or {@code null} if none has been
* registered.
*
* <p>This ignores whether the compressor is advertised. According to the spec, if we know how
* to process this encoding, we attempt to, regardless of whether or not it is part of the
* encodings sent to the remote host.
*/
@Nullable
public Decompressor lookupDecompressor(String messageEncoding) {
DecompressorInfo info = decompressors.get(messageEncoding);
return info != null ? info.decompressor : null;
}
/**
* Information about a decompressor.
*/
private static final | DecompressorRegistry |
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/docker/type/ImageArchive.java | {
"start": 2091,
"end": 8528
} | class ____ implements TarArchive {
private static final Instant WINDOWS_EPOCH_PLUS_SECOND = OffsetDateTime.of(1980, 1, 1, 0, 0, 1, 0, ZoneOffset.UTC)
.toInstant();
private static final DateTimeFormatter DATE_FORMATTER = DateTimeFormatter.ISO_ZONED_DATE_TIME
.withZone(ZoneOffset.UTC);
private static final String EMPTY_LAYER_NAME_PREFIX = "blank_";
private static final IOConsumer<Update> NO_UPDATES = (update) -> {
};
private final JsonMapper jsonMapper;
private final ImageConfig imageConfig;
private final Instant createDate;
private final @Nullable ImageReference tag;
private final String os;
private final @Nullable String architecture;
private final @Nullable String variant;
private final List<LayerId> existingLayers;
private final List<Layer> newLayers;
ImageArchive(JsonMapper jsonMapper, ImageConfig imageConfig, Instant createDate, @Nullable ImageReference tag,
String os, @Nullable String architecture, @Nullable String variant, List<LayerId> existingLayers,
List<Layer> newLayers) {
this.jsonMapper = jsonMapper;
this.imageConfig = imageConfig;
this.createDate = createDate;
this.tag = tag;
this.os = os;
this.architecture = architecture;
this.variant = variant;
this.existingLayers = existingLayers;
this.newLayers = newLayers;
}
/**
* Return the image config for the archive.
* @return the image config
*/
public ImageConfig getImageConfig() {
return this.imageConfig;
}
/**
* Return the create date of the archive.
* @return the create date
*/
public Instant getCreateDate() {
return this.createDate;
}
/**
* Return the tag of the archive.
* @return the tag
*/
public @Nullable ImageReference getTag() {
return this.tag;
}
@Override
public void writeTo(OutputStream outputStream) throws IOException {
TarArchive.of(this::write).writeTo(outputStream);
}
private void write(Layout writer) throws IOException {
List<LayerId> writtenLayers = writeLayers(writer);
String config = writeConfig(writer, writtenLayers);
writeManifest(writer, config, writtenLayers);
}
private List<LayerId> writeLayers(Layout writer) throws IOException {
for (int i = 0; i < this.existingLayers.size(); i++) {
writeEmptyLayer(writer, EMPTY_LAYER_NAME_PREFIX + i);
}
List<LayerId> writtenLayers = new ArrayList<>();
for (Layer layer : this.newLayers) {
writtenLayers.add(writeLayer(writer, layer));
}
return Collections.unmodifiableList(writtenLayers);
}
private void writeEmptyLayer(Layout writer, String name) throws IOException {
writer.file(name, Owner.ROOT, Content.of(""));
}
private LayerId writeLayer(Layout writer, Layer layer) throws IOException {
LayerId id = layer.getId();
writer.file(id.getHash() + ".tar", Owner.ROOT, layer);
return id;
}
private String writeConfig(Layout writer, List<LayerId> writtenLayers) throws IOException {
try {
ObjectNode config = createConfig(writtenLayers);
String json = this.jsonMapper.writeValueAsString(config).replace("\r\n", "\n");
MessageDigest digest = MessageDigest.getInstance("SHA-256");
InspectedContent content = InspectedContent.of(Content.of(json), digest::update);
String name = LayerId.ofSha256Digest(digest.digest()).getHash() + ".json";
writer.file(name, Owner.ROOT, content);
return name;
}
catch (NoSuchAlgorithmException ex) {
throw new IllegalStateException(ex);
}
}
private ObjectNode createConfig(List<LayerId> writtenLayers) {
ObjectNode config = this.jsonMapper.createObjectNode();
config.set("Config", this.imageConfig.getNodeCopy());
config.set("Created", config.stringNode(getCreatedDate()));
config.set("History", createHistory(writtenLayers));
config.set("Os", config.stringNode(this.os));
config.set("Architecture", config.stringNode(this.architecture));
config.set("Variant", config.stringNode(this.variant));
config.set("RootFS", createRootFs(writtenLayers));
return config;
}
private String getCreatedDate() {
return DATE_FORMATTER.format(this.createDate);
}
private JsonNode createHistory(List<LayerId> writtenLayers) {
ArrayNode history = this.jsonMapper.createArrayNode();
int size = this.existingLayers.size() + writtenLayers.size();
for (int i = 0; i < size; i++) {
history.addObject();
}
return history;
}
private JsonNode createRootFs(List<LayerId> writtenLayers) {
ObjectNode rootFs = this.jsonMapper.createObjectNode();
ArrayNode diffIds = rootFs.putArray("diff_ids");
this.existingLayers.stream().map(Object::toString).forEach(diffIds::add);
writtenLayers.stream().map(Object::toString).forEach(diffIds::add);
return rootFs;
}
private void writeManifest(Layout writer, String config, List<LayerId> writtenLayers) throws IOException {
ArrayNode manifest = createManifest(config, writtenLayers);
String manifestJson = this.jsonMapper.writeValueAsString(manifest);
writer.file("manifest.json", Owner.ROOT, Content.of(manifestJson));
}
private ArrayNode createManifest(String config, List<LayerId> writtenLayers) {
ArrayNode manifest = this.jsonMapper.createArrayNode();
ObjectNode entry = manifest.addObject();
entry.set("Config", entry.stringNode(config));
entry.set("Layers", getManifestLayers(writtenLayers));
if (this.tag != null) {
entry.set("RepoTags", entry.arrayNode().add(this.tag.toString()));
}
return manifest;
}
private ArrayNode getManifestLayers(List<LayerId> writtenLayers) {
ArrayNode layers = this.jsonMapper.createArrayNode();
for (int i = 0; i < this.existingLayers.size(); i++) {
layers.add(EMPTY_LAYER_NAME_PREFIX + i);
}
writtenLayers.stream().map((id) -> id.getHash() + ".tar").forEach(layers::add);
return layers;
}
/**
* Create a new {@link ImageArchive} based on an existing {@link Image}.
* @param image the image that this archive is based on
* @return the new image archive.
* @throws IOException on IO error
*/
public static ImageArchive from(Image image) throws IOException {
return from(image, NO_UPDATES);
}
/**
* Create a new {@link ImageArchive} based on an existing {@link Image}.
* @param image the image that this archive is based on
* @param update consumer to apply updates
* @return the new image archive.
* @throws IOException on IO error
*/
public static ImageArchive from(Image image, IOConsumer<Update> update) throws IOException {
return new Update(image).applyTo(update);
}
/**
* Update | ImageArchive |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java | {
"start": 1394,
"end": 1846
} | class ____ extends MountdBase {
public Mountd(NfsConfiguration config, DatagramSocket registrationSocket,
boolean allowInsecurePorts) throws IOException {
super(new RpcProgramMountd(config, registrationSocket, allowInsecurePorts));
}
public static void main(String[] args) throws IOException {
NfsConfiguration config = new NfsConfiguration();
Mountd mountd = new Mountd(config, null, true);
mountd.start(true);
}
}
| Mountd |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/AbstractJUnit4InitMethodNotRun.java | {
"start": 2033,
"end": 2439
} | class ____ extends BugChecker implements MethodTreeMatcher {
private static final String JUNIT_TEST = "org.junit.Test";
/**
* Returns a matcher that selects which methods this matcher applies to (e.g. public void setUp()
* without @Before/@BeforeClass annotation)
*/
protected abstract Matcher<MethodTree> methodMatcher();
/**
* Returns the fully qualified | AbstractJUnit4InitMethodNotRun |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.