language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-security | oauth2/oauth2-core/src/main/java/org/springframework/security/oauth2/core/oidc/OidcIdToken.java | {
"start": 5158,
"end": 5282
} | class ____ in the resulting
* {@link OidcIdToken}
* @param authenticationContextClass The authentication context | reference |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/lookup/LookupConditionOnProducersTest.java | {
"start": 1123,
"end": 1201
} | interface ____ {
String ping();
}
@Singleton
static | Service |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessarilyFullyQualifiedTest.java | {
"start": 4718,
"end": 4894
} | class ____ {}
}
""")
.expectUnchanged()
.addInputLines(
"Test.java",
"""
package b;
| Builder |
java | alibaba__fastjson | src/test/java/com/alibaba/fastjson/deserializer/issues3796/bean/ObjectH.java | {
"start": 95,
"end": 1902
} | class ____ {
private int a;
private long b;
private int c;
private int d;
private int e;
private int f;
private int g;
private int h;
private int i;
private int j;
private int k;
private int l;
private List<ObjectH_A> m;
private int n;
private int o;
private boolean p = false;
private boolean q = false;
public int getA() {
return a;
}
public void setA(int a) {
this.a = a;
}
public long getB() {
return b;
}
public void setB(long b) {
this.b = b;
}
public int getC() {
return c;
}
public void setC(int c) {
this.c = c;
}
public int getD() {
return d;
}
public void setD(int d) {
this.d = d;
}
public int getE() {
return e;
}
public void setE(int e) {
this.e = e;
}
public int getF() {
return f;
}
public void setF(int f) {
this.f = f;
}
public int getG() {
return g;
}
public void setG(int g) {
this.g = g;
}
public int getH() {
return h;
}
public void setH(int h) {
this.h = h;
}
public int getI() {
return i;
}
public void setI(int i) {
this.i = i;
}
public int getJ() {
return j;
}
public void setJ(int j) {
this.j = j;
}
public int getK() {
return k;
}
public void setK(int k) {
this.k = k;
}
public int getL() {
return l;
}
public void setL(int l) {
this.l = l;
}
public List<ObjectH_A> getM() {
return m;
}
public void setM(List<ObjectH_A> m) {
this.m = m;
}
public int getN() {
return n;
}
public void setN(int n) {
this.n = n;
}
public int getO() {
return o;
}
public void setO(int o) {
this.o = o;
}
public boolean isP() {
return p;
}
public void setP(boolean p) {
this.p = p;
}
public boolean isQ() {
return q;
}
public void setQ(boolean q) {
this.q = q;
}
}
| ObjectH |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rest/RestServerEndpointITCase.java | {
"start": 39968,
"end": 40524
} | class ____ extends MessagePathParameter<JobID> {
FaultyJobIDPathParameter() {
super(JOB_ID_KEY);
}
@Override
protected JobID convertFromString(String value) throws ConversionException {
return JobID.fromHexString(value);
}
@Override
protected String convertToString(JobID value) {
return "foobar";
}
@Override
public String getDescription() {
return "faulty JobID parameter";
}
}
static | FaultyJobIDPathParameter |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/result/method/RequestMappingInfoHandlerMapping.java | {
"start": 13709,
"end": 14610
} | class ____ {
private final HttpHeaders headers = new HttpHeaders();
public HttpOptionsHandler(Set<HttpMethod> declaredMethods, Set<MediaType> acceptPatch) {
this.headers.setAllow(initAllowedHttpMethods(declaredMethods));
this.headers.setAcceptPatch(new ArrayList<>(acceptPatch));
}
private static Set<HttpMethod> initAllowedHttpMethods(Set<HttpMethod> declaredMethods) {
if (declaredMethods.isEmpty()) {
return Stream.of(HttpMethod.values())
.filter(method -> !HttpMethod.TRACE.equals(method))
.collect(Collectors.toSet());
}
else {
Set<HttpMethod> result = new LinkedHashSet<>(declaredMethods);
if (result.contains(HttpMethod.GET)) {
result.add(HttpMethod.HEAD);
}
result.add(HttpMethod.OPTIONS);
return result;
}
}
@SuppressWarnings("unused")
public HttpHeaders handle() {
return this.headers;
}
}
}
| HttpOptionsHandler |
java | apache__camel | components/camel-avro-rpc/camel-avro-rpc-component/src/test/java/org/apache/camel/component/avro/AvroNettySpringConsumerTest.java | {
"start": 1237,
"end": 2152
} | class ____ extends AvroNettyConsumerTest {
private AbstractApplicationContext applicationContext;
@Override
public void doPostSetup() {
keyValue = (KeyValueProtocolImpl) applicationContext.getBean("keyValue");
testReflection = (TestReflectionImpl) applicationContext.getBean("testReflection");
}
@Override
public void doPostTearDown() {
IOHelper.close(applicationContext);
}
@Override
protected CamelContext createCamelContext() throws Exception {
String xmlPath = "org/apache/camel/component/avro/netty-consumer/";
applicationContext = new ClassPathXmlApplicationContext(xmlPath + "base.xml", xmlPath + getRouteType().name() + ".xml");
return SpringCamelContext.springCamelContext(applicationContext, true);
}
@Override
public boolean isUseRouteBuilder() {
return false;
}
}
| AvroNettySpringConsumerTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/injection/guice/internal/Annotations.java | {
"start": 1052,
"end": 2559
} | class ____ {
/**
* Returns true if the given annotation is retained at runtime.
*/
public static boolean isRetainedAtRuntime(Class<? extends Annotation> annotationType) {
Retention retention = annotationType.getAnnotation(Retention.class);
return retention != null && retention.value() == RetentionPolicy.RUNTIME;
}
/**
* Gets a key for the given type, member and annotations.
*/
public static Key<?> getKey(TypeLiteral<?> type, Member member, Annotation[] annotations, Errors errors) throws ErrorsException {
int numErrorsBefore = errors.size();
Annotation found = findBindingAnnotation(errors, member, annotations);
errors.throwIfNewErrors(numErrorsBefore);
return found == null ? Key.get(type) : Key.get(type, found);
}
/**
* Returns the binding annotation on {@code member}, or null if there isn't one.
*/
public static Annotation findBindingAnnotation(Errors errors, Member member, Annotation[] annotations) {
Annotation found = null;
for (Annotation annotation : annotations) {
if (annotation.annotationType().getAnnotation(BindingAnnotation.class) != null) {
if (found != null) {
errors.duplicateBindingAnnotations(member, found.annotationType(), annotation.annotationType());
} else {
found = annotation;
}
}
}
return found;
}
}
| Annotations |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/type/InheritedAnnotationsAnnotationMetadataTests.java | {
"start": 6133,
"end": 6220
} | interface ____ {
}
@MetaAnnotation
@Retention(RetentionPolicy.RUNTIME)
@ | MetaAnnotation |
java | alibaba__nacos | plugin-default-impl/nacos-default-auth-plugin/src/main/java/com/alibaba/nacos/plugin/auth/impl/users/NacosUserServiceDirectImpl.java | {
"start": 1277,
"end": 3413
} | class ____ extends AbstractCachedUserService implements NacosUserService {
private final UserPersistService userPersistService;
private final AuthConfigs authConfigs;
public NacosUserServiceDirectImpl(AuthConfigs authConfigs, UserPersistService userPersistService) {
super();
this.userPersistService = userPersistService;
this.authConfigs = authConfigs;
}
@Override
public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException {
User user = getCachedUserMap().get(username);
if (!authConfigs.isCachingEnabled()) {
user = getUser(username);
}
if (user == null) {
throw new UsernameNotFoundException(String.format("User %s not found", username));
}
return new NacosUserDetails(user);
}
@Override
public void updateUserPassword(String username, String password) {
userPersistService.updateUserPassword(username, PasswordEncoderUtil.encode(password));
}
@Override
public Page<User> getUsers(int pageNo, int pageSize, String username) {
return userPersistService.getUsers(pageNo, pageSize, username);
}
@Override
public User getUser(String username) {
return userPersistService.findUserByUsername(username);
}
@Override
public List<String> findUserNames(String username) {
return userPersistService.findUserLikeUsername(username);
}
@Override
public void createUser(String username, String password, boolean encode) {
validateUserCredentials(username, password);
if (encode) {
password = PasswordEncoderUtil.encode(password);
}
userPersistService.createUser(username, password);
}
@Override
public void deleteUser(String username) {
userPersistService.deleteUser(username);
}
@Override
public Page<User> findUsers(String username, int pageNo, int pageSize) {
return userPersistService.findUsersLike4Page(username, pageNo, pageSize);
}
}
| NacosUserServiceDirectImpl |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/OracleSampleClauseTest.java | {
"start": 882,
"end": 1865
} | class ____ extends TestCase {
public void test_0() throws Exception {
String sql = "SELECT COUNT(*) * 10 FROM orders SAMPLE (10);";
String expected = "SELECT COUNT(*) * 10\n" + "FROM orders SAMPLE (10);";
OracleStatementParser parser = new OracleStatementParser(sql);
SQLSelectStatement stmt = (SQLSelectStatement) parser.parseStatementList().get(0);
String text = TestUtils.outputOracle(stmt);
assertEquals(expected, text);
}
public void test_1() throws Exception {
String sql = "SELECT COUNT(*) * 10 FROM orders SAMPLE (10) SEED (1);";
String expected = "SELECT COUNT(*) * 10\n" + "FROM orders SAMPLE (10) SEED (1);";
OracleStatementParser parser = new OracleStatementParser(sql);
SQLSelectStatement stmt = (SQLSelectStatement) parser.parseStatementList().get(0);
String text = TestUtils.outputOracle(stmt);
assertEquals(expected, text);
}
}
| OracleSampleClauseTest |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/result/method/annotation/AbstractMessageReaderArgumentResolver.java | {
"start": 2758,
"end": 3268
} | class ____ argument resolvers that resolve method arguments
* by reading the request body with an {@link HttpMessageReader}.
*
* <p>Applies validation if the method argument is annotated with any
* {@linkplain org.springframework.validation.annotation.ValidationAnnotationUtils#determineValidationHints
* annotations that trigger validation}. Validation failure results in a
* {@link ServerWebInputException}.
*
* @author Rossen Stoyanchev
* @author Sebastien Deleuze
* @since 5.0
*/
public abstract | for |
java | quarkusio__quarkus | test-framework/junit5/src/main/java/io/quarkus/test/junit/ClassCoercingTestProfile.java | {
"start": 243,
"end": 3416
} | class ____ implements QuarkusTestProfile {
private final QuarkusTestProfile profile;
private final Object uncast;
public ClassCoercingTestProfile(Object uncast) {
this.uncast = uncast;
if (uncast instanceof QuarkusTestProfile) {
this.profile = (QuarkusTestProfile) uncast;
} else {
this.profile = null;
}
}
private Object invokeReflectively(String methodName) {
// TODO instance variable these?
try {
Method method = uncast.getClass().getMethod(methodName);
return method.invoke(uncast);
} catch (InvocationTargetException | NoSuchMethodException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public Set<Class<?>> getEnabledAlternatives() {
if (profile != null) {
return profile.getEnabledAlternatives();
} else {
return (Set<Class<?>>) invokeReflectively("getEnabledAlternatives");
}
}
@Override
public Map<String, String> getConfigOverrides() {
if (profile != null) {
return profile.getConfigOverrides();
} else {
return (Map<String, String>) invokeReflectively("getConfigOverrides");
}
}
@Override
public String getConfigProfile() {
if (profile != null) {
return profile.getConfigProfile();
} else {
return (String) invokeReflectively("getConfigProfile");
}
}
@Override
public List<TestResourceEntry> testResources() {
// TODO this is not safe, because testResources will be in the wrong class, so we would need to wrap them as well
if (profile != null) {
return profile.testResources();
} else {
return (List<TestResourceEntry>) invokeReflectively("testResources");
}
}
@Override
public boolean disableGlobalTestResources() {
if (profile != null) {
return profile.disableGlobalTestResources();
} else {
return (boolean) invokeReflectively("disableGlobalTestResources");
}
}
@Override
public Set<String> tags() {
if (profile != null) {
return profile.tags();
} else {
return (Set<String>) invokeReflectively("tags");
}
}
@Override
public String[] commandLineParameters() {
if (profile != null) {
return profile.commandLineParameters();
} else {
return (String[]) invokeReflectively("commandLineParameters");
}
}
@Override
public boolean runMainMethod() {
if (profile != null) {
return profile.runMainMethod();
} else {
return (boolean) invokeReflectively("runMainMethod");
}
}
@Override
public boolean disableApplicationLifecycleObservers() {
if (profile != null) {
return profile.disableApplicationLifecycleObservers();
} else {
return (boolean) invokeReflectively("disableApplicationLifecycleObservers");
}
}
}
| ClassCoercingTestProfile |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/ReferenceCounted.java | {
"start": 1195,
"end": 1309
} | class ____ designed to be high-performance, lock-free and
* thread-safe.
*/
@Internal
@ThreadSafe
public abstract | is |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1400/Issue1429.java | {
"start": 1288,
"end": 1387
} | class ____ {
public int id;
public int age;
public String name;
}
}
| Student |
java | apache__camel | components/camel-box/camel-box-component/src/main/java/org/apache/camel/component/box/internal/BoxConnectionHelper.java | {
"start": 2045,
"end": 12934
} | class ____
}
public static BoxAPIConnection createConnection(final BoxConfiguration configuration) {
if (configuration.getAuthenticationType() == null) {
throw new RuntimeCamelException(
"Box API connection failed: Authentication type not specified in configuration");
}
switch (configuration.getAuthenticationType()) {
case BoxConfiguration.APP_ENTERPRISE_AUTHENTICATION:
return createAppEnterpriseAuthenticatedConnection(configuration);
case BoxConfiguration.APP_USER_AUTHENTICATION:
return createAppUserAuthenticatedConnection(configuration);
case BoxConfiguration.STANDARD_AUTHENTICATION:
return createStandardAuthenticatedConnection(configuration);
default:
throw new RuntimeCamelException(
String.format("Box API connection failed: Invalid authentication type '%s'",
configuration.getAuthenticationType()));
}
}
public static BoxAPIConnection createStandardAuthenticatedConnection(BoxConfiguration configuration) {
// authorize application on user's behalf
try {
//prepare proxy parameter
final Proxy proxy;
final Map<String, Object> httpParams = configuration.getHttpParams();
if (httpParams != null && httpParams.get("http.route.default-proxy") != null) {
final HttpHost proxyHost = (HttpHost) httpParams.get("http.route.default-proxy");
final Boolean socksProxy = (Boolean) httpParams.get("http.route.socks-proxy");
SocketAddress proxyAddr = new InetSocketAddress(proxyHost.getHostName(), proxyHost.getPort());
if (socksProxy != null && socksProxy) {
proxy = new Proxy(Proxy.Type.SOCKS, proxyAddr);
} else {
proxy = new Proxy(Proxy.Type.HTTP, proxyAddr);
}
} else {
proxy = null;
}
// generate anti-forgery token to prevent/detect CSRF attack
final String csrfToken = String.valueOf(new SecureRandom().nextLong());
final String authorizeUrl = authorizationUrl(configuration.getClientId(), csrfToken);
//load loginPage
final Connection.Response loginPageResponse
= addProxy(Jsoup.connect(authorizeUrl), proxy).method(Connection.Method.GET).execute();
final Document loginPage = loginPageResponse.parse();
validatePage(loginPage);
//fill login form
final FormElement loginForm = (FormElement) loginPage.select("form[name=login_form]").first();
final Element loginField = loginForm.select("input[name=login]").first();
loginField.val(configuration.getUserName());
final Element passwordField = loginForm.select("input[name=password]").first();
passwordField.val(configuration.getUserPassword());
//submit loginPage
final Map<String, String> cookies = new HashMap<>(loginPageResponse.cookies());
Connection.Response response = addProxy(loginForm.submit(), proxy)
.cookies(cookies)
.execute();
cookies.putAll(response.cookies());
final Document consentPage = response.parse();
//possible invalid credentials error
validatePage(consentPage);
final FormElement consentForm = (FormElement) consentPage.select("form[name=consent_form]").first();
//remove reject input
consentForm.elements().removeIf(e -> e.attr("name").equals("consent_reject"));
//parse request_token from javascript from head, it is the first script in the header
final String requestTokenScript = consentPage.select("script").first().html();
final Matcher m = Pattern.compile("var\\s+request_token\\s+=\\s+'([^'].+)'.*").matcher(requestTokenScript);
String requestToken = "";
if (m.find()) {
requestToken = m.group(1);
}
response = addProxy(consentForm.submit(), proxy)
.data("request_token", requestToken)
.followRedirects(false)
.cookies(cookies)
.execute();
final String location = response.header("Location");
final Map<String, String> params = new HashMap<>();
final Matcher matcher = QUERY_PARAM_PATTERN.matcher(URI.create(location).toURL().getQuery());
while (matcher.find()) {
params.put(matcher.group(1), matcher.group(2));
}
final String state = params.get("state");
if (!csrfToken.equals(state)) {
throw new SecurityException("Invalid CSRF code!");
} else {
// get authorization code
final String authorizationCode = params.get("code");
return new BoxAPIConnection(
configuration.getClientId(), configuration.getClientSecret(),
authorizationCode);
}
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
String.format("Box API connection failed: API returned the error code %d%n%n%s",
e.getResponseCode(), e.getResponse()),
e);
} catch (Exception e) {
throw new RuntimeCamelException(String.format("Box API connection failed: %s", e.getMessage()), e);
}
}
/**
* Validation of page: - detects CAPTCHA test - detects 2-step verification - detects invalid credentials error -
* detects wrong clientId error
*/
private static void validatePage(Document page) {
// CAPTCHA
Elements captchaDivs = page.select("div[class*=g-recaptcha]");
if (!captchaDivs.isEmpty()) {
throw new IllegalArgumentException(
"Authentication requires CAPTCHA test. First you need to authenticate the account manually via web to unlock CAPTCHA.");
}
// 2-step verification
Elements twoStepDivs = page.select("div[data-module=two-factor-enroll-form]");
if (!twoStepDivs.isEmpty()) {
throw new IllegalArgumentException(
"2-step verification is enabled on the Box account. Turn it off for camel-box to proceed the standard authentication.");
}
// login failures
Elements errorDivs = page.select("div[class*=error_message]");
String errorMessage = null;
if (!errorDivs.isEmpty()) {
errorMessage = errorDivs.first().text().replaceAll("\\s+", " ")
.replace(" Show Error Details", ":").trim();
} else {
errorDivs = page.select("div[class*=message]");
if (!errorDivs.isEmpty()) {
errorMessage = errorDivs.first().text();
}
}
if (!errorDivs.isEmpty()) {
throw new IllegalArgumentException("Error authorizing application: " + errorMessage);
}
}
/**
* Helper method to add proxy into JSoup connection
*/
private static Connection addProxy(Connection connection, Proxy proxy) {
if (proxy != null) {
return connection.proxy(proxy);
}
return connection;
}
public static BoxAPIConnection createAppUserAuthenticatedConnection(BoxConfiguration configuration) {
// Create Encryption Preferences
JWTEncryptionPreferences encryptionPref = new JWTEncryptionPreferences();
encryptionPref.setPublicKeyID(configuration.getPublicKeyId());
try {
encryptionPref.setPrivateKey(new String(Files.readAllBytes(Paths.get(configuration.getPrivateKeyFile()))));
} catch (Exception e) {
throw new RuntimeCamelException("Box API connection failed: could not read privateKeyFile", e);
}
encryptionPref.setPrivateKeyPassword(configuration.getPrivateKeyPassword());
encryptionPref.setEncryptionAlgorithm(configuration.getEncryptionAlgorithm());
IAccessTokenCache accessTokenCache = configuration.getAccessTokenCache();
if (accessTokenCache == null) {
accessTokenCache = new InMemoryLRUAccessTokenCache(configuration.getMaxCacheEntries());
}
try {
return BoxDeveloperEditionAPIConnection.getUserConnection(configuration.getUserId(),
configuration.getClientId(), configuration.getClientSecret(), encryptionPref, accessTokenCache);
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
String.format("Box API connection failed: API returned the error code %d%n%n%s",
e.getResponseCode(), e.getResponse()),
e);
}
}
public static BoxAPIConnection createAppEnterpriseAuthenticatedConnection(BoxConfiguration configuration) {
// Create Encryption Preferences
JWTEncryptionPreferences encryptionPref = new JWTEncryptionPreferences();
encryptionPref.setPublicKeyID(configuration.getPublicKeyId());
try {
encryptionPref.setPrivateKey(new String(Files.readAllBytes(Paths.get(configuration.getPrivateKeyFile()))));
} catch (Exception e) {
throw new RuntimeCamelException("Box API connection failed: could not read privateKeyFile", e);
}
encryptionPref.setPrivateKeyPassword(configuration.getPrivateKeyPassword());
encryptionPref.setEncryptionAlgorithm(configuration.getEncryptionAlgorithm());
IAccessTokenCache accessTokenCache = configuration.getAccessTokenCache();
if (accessTokenCache == null) {
accessTokenCache = new InMemoryLRUAccessTokenCache(configuration.getMaxCacheEntries());
}
try {
return BoxDeveloperEditionAPIConnection.getAppEnterpriseConnection(configuration.getEnterpriseId(),
configuration.getClientId(), configuration.getClientSecret(), encryptionPref, accessTokenCache);
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
String.format("Box API connection failed: API returned the error code %d%n%n%s",
e.getResponseCode(), e.getResponse()),
e);
}
}
public static String authorizationUrl(String clientId, String stateToken) {
return "https://account.box.com/api/oauth2/authorize?response_type=code&redirect_url=https%3A%2F%2Flocalhost%2F&client_id="
+ clientId + "&state=" + stateToken;
}
}
| constructor |
java | elastic__elasticsearch | x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java | {
"start": 2243,
"end": 21021
} | class ____ {
private static final Logger logger = LogManager.getLogger(PolicyStepsRegistry.class);
private final NamedXContentRegistry xContentRegistry;
private final Client client;
private final XPackLicenseState licenseState;
// keeps track of existing policies in the cluster state
private final SortedMap<String, LifecyclePolicyMetadata> lifecyclePolicyMap;
// keeps track of what the first step in a policy is, the key is policy name
private final Map<String, Step> firstStepMap;
// keeps track of a mapping from policy/step-name to respective Step, the key is policy name
private final Map<String, Map<Step.StepKey, Step>> stepMap;
// tracks an index->step cache, where the indexmetadata is also tracked for cache invalidation/eviction purposes.
// for a given index, the step can be cached as long as the indexmetadata (and the policy!) hasn't changed. since
// policies change infrequently, the entire cache is cleared on policy change.
private final Map<Index, Tuple<IndexMetadata, Step>> cachedSteps = new ConcurrentHashMap<>();
public PolicyStepsRegistry(NamedXContentRegistry xContentRegistry, Client client, XPackLicenseState licenseState) {
this(new TreeMap<>(), new HashMap<>(), new HashMap<>(), xContentRegistry, client, licenseState);
}
PolicyStepsRegistry(
SortedMap<String, LifecyclePolicyMetadata> lifecyclePolicyMap,
Map<String, Step> firstStepMap,
Map<String, Map<Step.StepKey, Step>> stepMap,
NamedXContentRegistry xContentRegistry,
Client client,
XPackLicenseState licenseState
) {
this.lifecyclePolicyMap = lifecyclePolicyMap;
this.firstStepMap = firstStepMap;
this.stepMap = stepMap;
this.xContentRegistry = xContentRegistry;
this.client = client;
this.licenseState = licenseState;
}
SortedMap<String, LifecyclePolicyMetadata> getLifecyclePolicyMap() {
return lifecyclePolicyMap;
}
Map<String, Step> getFirstStepMap() {
return firstStepMap;
}
Map<String, Map<Step.StepKey, Step>> getStepMap() {
return stepMap;
}
public void update(IndexLifecycleMetadata meta) {
assert meta != null : "IndexLifecycleMetadata cannot be null when updating the policy steps registry";
DiffableUtils.MapDiff<String, LifecyclePolicyMetadata, Map<String, LifecyclePolicyMetadata>> mapDiff = DiffableUtils.diff(
lifecyclePolicyMap,
meta.getPolicyMetadatas(),
DiffableUtils.getStringKeySerializer(),
// Use a non-diffable value serializer. Otherwise actions in the same
// action and phase that are changed show up as diffs instead of upserts.
// We want to treat any change in the policy as an upsert so the map is
// correctly rebuilt
new DiffableUtils.NonDiffableValueSerializer<>() {
@Override
public void write(LifecyclePolicyMetadata value, StreamOutput out) {
// This is never called
throw new UnsupportedOperationException("should never be called");
}
@Override
public LifecyclePolicyMetadata read(StreamInput in, String key) {
// This is never called
throw new UnsupportedOperationException("should never be called");
}
}
);
for (String deletedPolicyName : mapDiff.getDeletes()) {
lifecyclePolicyMap.remove(deletedPolicyName);
firstStepMap.remove(deletedPolicyName);
stepMap.remove(deletedPolicyName);
}
if (mapDiff.getUpserts().isEmpty() == false) {
for (var entry : mapDiff.getUpserts()) {
LifecyclePolicyMetadata policyMetadata = entry.getValue();
LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(
client,
ClientHelper.INDEX_LIFECYCLE_ORIGIN,
policyMetadata.getHeaders()
);
lifecyclePolicyMap.put(policyMetadata.getName(), policyMetadata);
List<Step> policyAsSteps = policyMetadata.getPolicy().toSteps(policyClient, licenseState);
if (policyAsSteps.isEmpty() == false) {
firstStepMap.put(policyMetadata.getName(), policyAsSteps.get(0));
final Map<Step.StepKey, Step> stepMapForPolicy = new LinkedHashMap<>();
for (Step step : policyAsSteps) {
assert ErrorStep.NAME.equals(step.getKey().name()) == false : "unexpected error step in policy";
stepMapForPolicy.put(step.getKey(), step);
}
logger.trace(
"updating cached steps for [{}] policy, new steps: {}",
policyMetadata.getName(),
stepMapForPolicy.keySet()
);
stepMap.put(policyMetadata.getName(), stepMapForPolicy);
}
}
}
// Since the policies (may have) changed, the whole steps cache needs to be thrown out.
// We do this after we update `lifecyclePolicyMap` to ensure `cachedSteps` does not contain outdated data.
// This means we may clear up-to-date data, but that's a lot better than the cache containing outdated entries indefinitely.
cachedSteps.clear();
}
/**
* Remove the entry for an index from the index->step cache.
*
* We clear the map entirely when the master of the cluster changes, and when any
* policy changes, but in a long-lived cluster that doesn't happen to experience
* either of those events (and where indices are removed regularly) we still want
* the cache to trim deleted indices.
*
* n.b. even with this, there's still a pretty small chance that a given index
* could leak, if we're right in the middle of populating the cache for that
* index (in getStep) when we process the delete here, then we'll end up with an
* entry that doesn't get deleted until the master changes or a policy changes
* -- it's harmless enough
*/
public void delete(Index deleted) {
cachedSteps.remove(deleted);
}
/**
* Clear internal maps that were populated by update (and others).
*/
public void clear() {
// this is potentially large, so it's important to clear it
cachedSteps.clear();
// these are relatively small, but there's no harm in clearing them
lifecyclePolicyMap.clear();
firstStepMap.clear();
stepMap.clear();
}
/**
* Return all ordered steps for the current policy for the index. Does not
* resolve steps using the phase caching, but only for the currently existing policy.
*/
private List<Step> getAllStepsForIndex(ProjectMetadata project, Index index) {
if (project.hasIndex(index) == false) {
throw new IllegalArgumentException("index " + index + " does not exist in the current cluster state");
}
final IndexMetadata indexMetadata = project.index(index);
final String policyName = indexMetadata.getLifecyclePolicyName();
final LifecyclePolicyMetadata policyMetadata = lifecyclePolicyMap.get(policyName);
if (policyMetadata == null) {
throw new IllegalArgumentException("the policy [" + policyName + "] for index" + index + " does not exist");
}
final LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(
client,
ClientHelper.INDEX_LIFECYCLE_ORIGIN,
policyMetadata.getHeaders()
);
return policyMetadata.getPolicy().toSteps(policyClient, licenseState);
}
/**
* Given an index and a phase name, return the {@link Step.StepKey} for the
* first step in that phase, if it exists, or null otherwise.
*/
@Nullable
public Step.StepKey getFirstStepForPhase(ProjectMetadata project, Index index, String phase) {
return getAllStepsForIndex(project, index).stream()
.map(Step::getKey)
.filter(stepKey -> phase.equals(stepKey.phase()))
.findFirst()
.orElse(null);
}
/**
* Given an index, phase name, and action name, return the {@link Step.StepKey}
* for the first step in that phase, if it exists, or null otherwise.
*/
@Nullable
public Step.StepKey getFirstStepForPhaseAndAction(ProjectMetadata project, Index index, String phase, String action) {
return getAllStepsForIndex(project, index).stream()
.map(Step::getKey)
.filter(stepKey -> phase.equals(stepKey.phase()))
.filter(stepKey -> action.equals(stepKey.action()))
.findFirst()
.orElse(null);
}
/*
* Parses the step keys from the {@code phaseDef} for the given phase.
* ILM makes use of some implicit steps that belong to actions that we automatically inject
* (eg. unfollow and migrate) or special purpose steps like the phase `complete` step.
*
* The {@code phaseDef} is *mostly* a valid json we store in the lifecycle execution state. However,
* we have a few of exceptional cases:
* - null is treated as the `new` phase (see {@code InitializePolicyContextStep})
* - the `new` phase is not stored as json but ... "new"
* - there's a legacy step, the {@code TerminalPolicyStep} which is also not stored as json but as "completed"
* (note: this step exists only for BWC reasons as these days we move to the {@code PhaseCompleteStep} when reaching
* the end of the phase)
*
* This method returns **all** the steps that are part of the phase definition including the implicit steps.
*
* Returns null if there's a parsing error.
*/
@Nullable
public Set<Step.StepKey> parseStepKeysFromPhase(String policy, String currentPhase, String phaseDef) {
try {
String phaseDefNonNull = Objects.requireNonNullElse(phaseDef, InitializePolicyContextStep.INITIALIZATION_PHASE);
return parseStepsFromPhase(policy, currentPhase, phaseDefNonNull).stream().map(Step::getKey).collect(Collectors.toSet());
} catch (IOException e) {
logger.trace(
() -> Strings.format(
"unable to parse steps for policy [%s], phase [%s], and phase definition [%s]",
policy,
currentPhase,
phaseDef
),
e
);
return null;
}
}
/**
* The {@code phaseDef} is *mostly* a valid json we store in the lifecycle execution state. However,
* we have a few of exceptional cases:
* - null is treated as the `new` phase (see {@code InitializePolicyContextStep})
* - the `new` phase is not stored as json but ... "new"
* - there's a legacy step, the {@code TerminalPolicyStep} which is also not stored as json but as "completed"
* (note: this step exists only for BWC reasons as these days we move to the {@code PhaseCompleteStep} when reaching
* the end of the phase)
*/
private List<Step> parseStepsFromPhase(String policy, String currentPhase, String phaseDef) throws IOException {
final PhaseExecutionInfo phaseExecutionInfo;
LifecyclePolicyMetadata policyMetadata = lifecyclePolicyMap.get(policy);
if (policyMetadata == null) {
throw new IllegalStateException("unable to parse steps for policy [" + policy + "] as it doesn't exist");
}
LifecyclePolicy currentPolicy = policyMetadata.getPolicy();
final LifecyclePolicy policyToExecute;
if (InitializePolicyContextStep.INITIALIZATION_PHASE.equals(phaseDef) || TerminalPolicyStep.COMPLETED_PHASE.equals(phaseDef)) {
// It is ok to re-use potentially modified policy here since we are in an initialization or completed phase
policyToExecute = currentPolicy;
} else {
// if the current phase definition describes an internal step/phase, do not parse
try (
XContentParser parser = JsonXContent.jsonXContent.createParser(
XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry),
phaseDef
)
) {
phaseExecutionInfo = PhaseExecutionInfo.parse(parser, currentPhase);
}
Map<String, Phase> phaseMap = new HashMap<>(currentPolicy.getPhases());
if (phaseExecutionInfo.getPhase() != null) {
phaseMap.put(currentPhase, phaseExecutionInfo.getPhase());
}
policyToExecute = new LifecyclePolicy(currentPolicy.getType(), currentPolicy.getName(), phaseMap, currentPolicy.getMetadata());
}
LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(
client,
ClientHelper.INDEX_LIFECYCLE_ORIGIN,
lifecyclePolicyMap.get(policy).getHeaders()
);
final List<Step> steps = policyToExecute.toSteps(policyClient, licenseState);
// Build a list of steps that correspond with the phase the index is currently in
final List<Step> phaseSteps;
if (steps == null) {
phaseSteps = List.of();
} else {
phaseSteps = steps.stream().filter(e -> e.getKey().phase().equals(currentPhase)).toList();
}
logger.trace(
"parsed steps for policy [{}] in phase [{}], definition: [{}], steps: [{}]",
policy,
currentPhase,
phaseDef,
phaseSteps
);
return phaseSteps;
}
/**
* Read-only internal helper for getStep that returns a non-null step if one is cached for the provided
* IndexMetadata and StepKey, and null otherwise.
*/
@Nullable
private Step getCachedStep(final IndexMetadata indexMetadata, final Step.StepKey stepKey) {
final Tuple<IndexMetadata, Step> cachedStep = cachedSteps.get(indexMetadata.getIndex());
// n.b. we're using instance equality here for the IndexMetadata rather than object equality because it's fast,
// this means that we're erring on the side of cache misses (if the IndexMetadata changed in any way, it'll be
// a new instance, so we'll miss-and-repopulate the cache for the index in question)
if (cachedStep != null && cachedStep.v1() == indexMetadata) {
assert cachedStep.v2() != null : "null steps should never be cached in the policy step registry";
if (cachedStep.v2() != null && cachedStep.v2().getKey().equals(stepKey)) {
return cachedStep.v2();
}
}
return null;
}
@Nullable
public Step getStep(final IndexMetadata indexMetadata, final Step.StepKey stepKey) {
final Step cachedStep = getCachedStep(indexMetadata, stepKey);
if (cachedStep != null) {
return cachedStep;
}
if (ErrorStep.NAME.equals(stepKey.name())) {
return new ErrorStep(new Step.StepKey(stepKey.phase(), stepKey.action(), ErrorStep.NAME));
}
final String phase = stepKey.phase();
final String policyName = indexMetadata.getLifecyclePolicyName();
final Index index = indexMetadata.getIndex();
if (policyName == null) {
throw new IllegalArgumentException("failed to retrieve step " + stepKey + " as index [" + index.getName() + "] has no policy");
}
// parse phase steps from the phase definition in the index settings
final String phaseJson = Objects.requireNonNullElse(
indexMetadata.getLifecycleExecutionState().phaseDefinition(),
InitializePolicyContextStep.INITIALIZATION_PHASE
);
final List<Step> phaseSteps;
try {
phaseSteps = parseStepsFromPhase(policyName, phase, phaseJson);
} catch (IOException e) {
throw new ElasticsearchException("failed to load cached steps for " + stepKey, e);
} catch (XContentParseException parseErr) {
throw new XContentParseException(
parseErr.getLocation(),
"failed to load steps for " + stepKey + " from [" + phaseJson + "]",
parseErr
);
}
assert phaseSteps.stream().allMatch(step -> step.getKey().phase().equals(phase))
: "expected phase steps loaded from phase definition for ["
+ index.getName()
+ "] to be in phase ["
+ phase
+ "] but they were not, steps: "
+ phaseSteps;
// Return the step that matches the given stepKey or else null if we couldn't find it
final Step s = phaseSteps.stream().filter(step -> step.getKey().equals(stepKey)).findFirst().orElse(null);
if (s != null) {
cachedSteps.put(indexMetadata.getIndex(), Tuple.tuple(indexMetadata, s));
}
return s;
}
/**
* Given a policy and stepkey, return true if a step exists, false otherwise
*/
public boolean stepExists(final String policy, final Step.StepKey stepKey) {
Map<Step.StepKey, Step> steps = stepMap.get(policy);
if (steps == null) {
return false;
} else {
return steps.containsKey(stepKey);
}
}
public boolean policyExists(final String policy) {
return lifecyclePolicyMap.containsKey(policy);
}
public Step getFirstStep(String policy) {
return firstStepMap.get(policy);
}
public TimeValue getIndexAgeForPhase(final String policy, final String phase) {
// These built in phases should never wait
if (InitializePolicyContextStep.INITIALIZATION_PHASE.equals(phase) || TerminalPolicyStep.COMPLETED_PHASE.equals(phase)) {
return TimeValue.ZERO;
}
final LifecyclePolicyMetadata meta = lifecyclePolicyMap.get(policy);
if (meta == null) {
throw new IllegalArgumentException("no policy found with name \"" + policy + "\"");
} else {
final Phase retrievedPhase = meta.getPolicy().getPhases().get(phase);
if (retrievedPhase == null) {
// We don't have that phase registered, proceed right through it
return TimeValue.ZERO;
} else {
return retrievedPhase.getMinimumAge();
}
}
}
}
| PolicyStepsRegistry |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java | {
"start": 1918,
"end": 4351
} | class ____ {
private static final Logger LOG = LoggerFactory
.getLogger(TestSignalContainer.class);
@Test
public void testSignalRequestDeliveryToNM() throws Exception {
GenericTestUtils.setRootLogLevel(Level.DEBUG);
MockRM rm = new MockRM();
FairScheduler fs = null;
if (rm.getResourceScheduler().getClass() == FairScheduler.class) {
fs = (FairScheduler)rm.getResourceScheduler();
}
rm.start();
MockNM nm1 = rm.registerNode("h1:1234", 5000);
RMApp app = MockRMAppSubmitter.submitWithMemory(2000, rm);
//kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt = app.getCurrentAppAttempt();
MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
//request for containers
final int request = 2;
am.allocate("h1" , 1000, request, new ArrayList<ContainerId>());
//kick the scheduler
nm1.nodeHeartbeat(true);
List<Container> conts = new ArrayList<>(request);
int waitCount = 0;
while (conts.size() < request && waitCount++ < 200) {
LOG.info("Got " + conts.size() + " containers. Waiting to get "
+ request);
Thread.sleep(100);
List<Container> allocation = am.allocate(new ArrayList<ResourceRequest>(),
new ArrayList<ContainerId>()).getAllocatedContainers();
conts.addAll(allocation);
if (fs != null) {
nm1.nodeHeartbeat(true);
}
}
assertEquals(request, conts.size());
for(Container container : conts) {
rm.signalToContainer(container.getId(),
SignalContainerCommand.OUTPUT_THREAD_DUMP);
}
NodeHeartbeatResponse resp;
List<SignalContainerRequest> contsToSignal;
int signaledConts = 0;
waitCount = 0;
while ( signaledConts < request && waitCount++ < 200) {
LOG.info("Waiting to get signalcontainer events.. signaledConts: "
+ signaledConts);
resp = nm1.nodeHeartbeat(true);
contsToSignal = resp.getContainersToSignalList();
signaledConts += contsToSignal.size();
Thread.sleep(100);
}
// Verify NM receives the expected number of signal container requests.
assertEquals(request, signaledConts);
am.unregisterAppAttempt();
nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FINISHED);
rm.stop();
}
}
| TestSignalContainer |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/snapshots/ConcurrentSnapshotExecutionException.java | {
"start": 747,
"end": 1343
} | class ____ extends SnapshotException {
public ConcurrentSnapshotExecutionException(final String repositoryName, final String snapshotName, final String msg) {
super(repositoryName, snapshotName, msg);
}
public ConcurrentSnapshotExecutionException(final Snapshot snapshot, final String msg) {
super(snapshot, msg);
}
public ConcurrentSnapshotExecutionException(StreamInput in) throws IOException {
super(in);
}
@Override
public RestStatus status() {
return RestStatus.SERVICE_UNAVAILABLE;
}
}
| ConcurrentSnapshotExecutionException |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-common/spi-deployment/src/main/java/io/quarkus/resteasy/reactive/spi/WriterInterceptorBuildItem.java | {
"start": 279,
"end": 589
} | class ____ extends AbstractInterceptorBuildItem.Builder<WriterInterceptorBuildItem, Builder> {
public Builder(String className) {
super(className);
}
public WriterInterceptorBuildItem build() {
return new WriterInterceptorBuildItem(this);
}
}
}
| Builder |
java | spring-projects__spring-framework | spring-jdbc/src/test/java/org/springframework/jdbc/datasource/init/AbstractDatabaseInitializationTests.java | {
"start": 1421,
"end": 2613
} | class ____ {
private final ClassRelativeResourceLoader resourceLoader = new ClassRelativeResourceLoader(getClass());
EmbeddedDatabase db;
JdbcTemplate jdbcTemplate;
@BeforeEach
void setUp() {
db = new EmbeddedDatabaseBuilder().setType(getEmbeddedDatabaseType()).build();
jdbcTemplate = new JdbcTemplate(db);
}
@AfterEach
void shutDown() {
if (TransactionSynchronizationManager.isSynchronizationActive()) {
TransactionSynchronizationManager.clear();
TransactionSynchronizationManager.unbindResource(db);
}
db.shutdown();
}
abstract EmbeddedDatabaseType getEmbeddedDatabaseType();
Resource resource(String path) {
return resourceLoader.getResource(path);
}
Resource defaultSchema() {
return resource("db-schema.sql");
}
Resource usersSchema() {
return resource("users-schema.sql");
}
void assertUsersDatabaseCreated(String... lastNames) {
for (String lastName : lastNames) {
String sql = "select count(0) from users where last_name = ?";
Integer result = jdbcTemplate.queryForObject(sql, Integer.class, lastName);
assertThat(result).as("user with last name [" + lastName + "]").isEqualTo(1);
}
}
}
| AbstractDatabaseInitializationTests |
java | apache__hadoop | hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java | {
"start": 3578,
"end": 8603
} | class ____ implements Closeable {
private static final int ONDISK_VERSION = 1;
private static final int LAYOUT_VERSION =
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
private final Path outdir;
private final FileSystem outfs;
private final File dirsTmp;
private final OutputStream dirs;
private final File inodesTmp;
private final OutputStream inodes;
private final MessageDigest digest;
private final FSImageCompression compress;
private final long startBlock;
private final long startInode;
private final UGIResolver ugis;
private final BlockAliasMap.Writer<FileRegion> blocks;
private final BlockResolver blockIds;
private final Map<Long, DirEntry.Builder> dircache;
private final TrackedOutputStream<DigestOutputStream> raw;
private boolean closed = false;
private long curSec;
private long curBlock;
private final AtomicLong curInode;
private final FileSummary.Builder summary = FileSummary.newBuilder()
.setOndiskVersion(ONDISK_VERSION)
.setLayoutVersion(LAYOUT_VERSION);
private final String blockPoolID;
public static Options defaults() {
return new Options();
}
@SuppressWarnings("unchecked")
public ImageWriter(Options opts) throws IOException {
final OutputStream out;
if (null == opts.outStream) {
FileSystem fs = opts.outdir.getFileSystem(opts.getConf());
outfs = (fs instanceof LocalFileSystem)
? ((LocalFileSystem)fs).getRaw()
: fs;
Path tmp = opts.outdir;
if (!outfs.mkdirs(tmp)) {
throw new IOException("Failed to create output dir: " + tmp);
}
try (NNStorage stor = new NNStorage(opts.getConf(),
Arrays.asList(tmp.toUri()), Arrays.asList(tmp.toUri()))) {
NamespaceInfo info = NNStorage.newNamespaceInfo();
if (info.getLayoutVersion() != LAYOUT_VERSION) {
throw new IllegalStateException("Incompatible layout " +
info.getLayoutVersion() + " (expected " + LAYOUT_VERSION + ")");
}
// set the cluster id, if given
if (opts.clusterID.length() > 0) {
info.setClusterID(opts.clusterID);
}
// if block pool id is given
if (opts.blockPoolID.length() > 0) {
info.setBlockPoolID(opts.blockPoolID);
}
stor.format(info);
blockPoolID = info.getBlockPoolID();
}
outdir = new Path(tmp, "current");
out = outfs.create(new Path(outdir, "fsimage_0000000000000000000"));
} else {
outdir = null;
outfs = null;
out = opts.outStream;
blockPoolID = "";
}
digest = MD5Hash.getDigester();
raw = new TrackedOutputStream<>(new DigestOutputStream(
new BufferedOutputStream(out), digest));
compress = opts.compress;
CompressionCodec codec = compress.getImageCodec();
if (codec != null) {
summary.setCodec(codec.getClass().getCanonicalName());
}
startBlock = opts.startBlock;
curBlock = startBlock;
startInode = opts.startInode;
curInode = new AtomicLong(startInode);
dircache = Collections.synchronizedMap(new DirEntryCache(opts.maxdircache));
ugis = null == opts.ugis
? ReflectionUtils.newInstance(opts.ugisClass, opts.getConf())
: opts.ugis;
BlockAliasMap<FileRegion> fmt = null == opts.blocks
? ReflectionUtils.newInstance(opts.aliasMap, opts.getConf())
: opts.blocks;
blocks = fmt.getWriter(null, blockPoolID);
blockIds = null == opts.blockIds
? ReflectionUtils.newInstance(opts.blockIdsClass, opts.getConf())
: opts.blockIds;
// create directory and inode sections as side-files.
// The details are written to files to avoid keeping them in memory.
FileOutputStream dirsTmpStream = null;
try {
dirsTmp = File.createTempFile("fsimg_dir", null);
dirsTmp.deleteOnExit();
dirsTmpStream = new FileOutputStream(dirsTmp);
dirs = beginSection(dirsTmpStream);
} catch (IOException e) {
IOUtils.cleanupWithLogger(null, raw, dirsTmpStream);
throw e;
}
try {
inodesTmp = File.createTempFile("fsimg_inode", null);
inodesTmp.deleteOnExit();
inodes = new FileOutputStream(inodesTmp);
} catch (IOException e) {
IOUtils.cleanupWithLogger(null, raw, dirsTmpStream, dirs);
throw e;
}
raw.write(MAGIC_HEADER);
curSec = raw.pos;
assert raw.pos == MAGIC_HEADER.length;
}
public void accept(TreePath e) throws IOException {
assert e.getParentId() < curInode.get();
// allocate ID
long id = curInode.getAndIncrement();
e.accept(id);
assert e.getId() < curInode.get();
INode n = e.toINode(ugis, blockIds, blocks);
writeInode(n);
if (e.getParentId() > 0) {
// add DirEntry to map, which may page out entries
DirEntry.Builder de = DirEntry.newBuilder()
.setParent(e.getParentId())
.addChildren(e.getId());
dircache.put(e.getParentId(), de);
}
}
@SuppressWarnings("serial")
| ImageWriter |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/createTable/OracleCreateTableTest49.java | {
"start": 1026,
"end": 14527
} | class ____ extends OracleTest {
public void test_types() throws Exception {
String sql = //
" CREATE TABLE \"SC_001\".\"TB_001\" \n" +
" ( \"ID\" NUMBER NOT NULL ENABLE, \n" +
" \"GMT_CREATE\" DATE, \n" +
" \"GMT_MODIFIED\" DATE, \n" +
" \"POSTING_TYPE\" VARCHAR2(20), \n" +
" \"POSTING_ID\" NUMBER, \n" +
" \"SYS_FRAUD\" NUMBER, \n" +
" \"CONFIRM_FRAUD\" NUMBER, \n" +
" \"FRAUD_RESON\" VARCHAR2(128), \n" +
" \"USER_ID\" NUMBER, \n" +
" \"IDENTIFIER_ID\" NUMBER, \n" +
" \"POSTING_CREATE\" DATE, \n" +
" \"POSTING_MODIFIER\" DATE, \n" +
" \"MEMBER_ID\" VARCHAR2(32), \n" +
" \"MEMBER_LEVEL\" VARCHAR2(16), \n" +
" \"SERVICE_VALUE\" VARCHAR2(32), \n" +
" \"ADDRESS\" VARCHAR2(256), \n" +
" \"COUNTRY\" VARCHAR2(64), \n" +
" \"PROVINCE\" VARCHAR2(128), \n" +
" \"CITY\" VARCHAR2(128), \n" +
" \"ZIP\" VARCHAR2(32), \n" +
" \"FIRST_NAME\" VARCHAR2(128), \n" +
" \"LAST_NAME\" VARCHAR2(128), \n" +
" \"PHONE_COUNTRY\" VARCHAR2(8), \n" +
" \"PHONE_AREA\" VARCHAR2(8), \n" +
" \"PHONE_NUMBER\" VARCHAR2(128), \n" +
" \"FAX_COUNTRY\" VARCHAR2(8), \n" +
" \"FAX_AREA\" VARCHAR2(8), \n" +
" \"FAX_NUMBER\" VARCHAR2(128), \n" +
" \"IP_COUNTRY\" VARCHAR2(128), \n" +
" \"MOBILE_NO\" VARCHAR2(128), \n" +
" \"EMAIL\" VARCHAR2(128), \n" +
" \"ALT_EMAIL\" VARCHAR2(128), \n" +
" \"COMPANY\" VARCHAR2(128), \n" +
" \"HOMEPAGE_URL\" VARCHAR2(128), \n" +
" \"CATEGORY_ID_1\" NUMBER, \n" +
" \"CATEGORY_ID_2\" NUMBER, \n" +
" \"CATEGORY_ID_3\" NUMBER, \n" +
" \"CATEGORY_ID_4\" NUMBER, \n" +
" \"CATEGORY_ID_5\" NUMBER, \n" +
" \"SUBJECT\" VARCHAR2(256), \n" +
" \"KEYWORDS\" VARCHAR2(512), \n" +
" \"DETAIL\" CLOB, \n" +
" \"POSTING_STATUS\" VARCHAR2(16), \n" +
" \"OFFER_TYPE\" VARCHAR2(32)\n" +
" ) PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 \n" +
" STORAGE(\n" +
" BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APP_DATA1K\" \n" +
" LOB (\"DETAIL\") STORE AS (\n" +
" ENABLE STORAGE IN ROW CHUNK 8192 PCTVERSION 10\n" +
" NOCACHE LOGGING \n" +
" STORAGE(\n" +
" BUFFER_POOL DEFAULT)) \n" +
" PARTITION BY RANGE (\"GMT_CREATE\") \n" +
" (PARTITION \"P2008\" VALUES LESS THAN (TO_DATE(' 2009-01-01 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIAN')) \n" +
" PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 \n" +
" STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APP_DATA1K\" \n" +
" LOB (\"DETAIL\") STORE AS (\n" +
" TABLESPACE \"APP_DATA1K\" ENABLE STORAGE IN ROW CHUNK 8192 PCTVERSION 10\n" +
" NOCACHE LOGGING \n" +
" STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)) NOCOMPRESS , \n" +
" PARTITION \"P2009\" VALUES LESS THAN (TO_DATE(' 2010-01-01 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIAN')) \n" +
" PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 \n" +
" STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APP_DATA1K\" \n" +
" LOB (\"DETAIL\") STORE AS (\n" +
" TABLESPACE \"APP_DATA1K\" ENABLE STORAGE IN ROW CHUNK 8192 PCTVERSION 10\n" +
" NOCACHE LOGGING \n" +
" STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)) NOCOMPRESS , \n" +
" PARTITION \"P2010\" VALUES LESS THAN (TO_DATE(' 2011-01-01 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIAN')) \n" +
" PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 \n" +
" STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)\n" +
" TABLESPACE \"APPDATA1M\" \n" +
" LOB (\"DETAIL\") STORE AS (\n" +
" TABLESPACE \"BOPSDATATS\" ENABLE STORAGE IN ROW CHUNK 8192 PCTVERSION 10\n" +
" NOCACHE LOGGING \n" +
" STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645\n" +
" PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT)) NOCOMPRESS ) ";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
assertEquals("CREATE TABLE \"SC_001\".\"TB_001\" (\n" +
"\t\"ID\" NUMBER NOT NULL ENABLE,\n" +
"\t\"GMT_CREATE\" DATE,\n" +
"\t\"GMT_MODIFIED\" DATE,\n" +
"\t\"POSTING_TYPE\" VARCHAR2(20),\n" +
"\t\"POSTING_ID\" NUMBER,\n" +
"\t\"SYS_FRAUD\" NUMBER,\n" +
"\t\"CONFIRM_FRAUD\" NUMBER,\n" +
"\t\"FRAUD_RESON\" VARCHAR2(128),\n" +
"\t\"USER_ID\" NUMBER,\n" +
"\t\"IDENTIFIER_ID\" NUMBER,\n" +
"\t\"POSTING_CREATE\" DATE,\n" +
"\t\"POSTING_MODIFIER\" DATE,\n" +
"\t\"MEMBER_ID\" VARCHAR2(32),\n" +
"\t\"MEMBER_LEVEL\" VARCHAR2(16),\n" +
"\t\"SERVICE_VALUE\" VARCHAR2(32),\n" +
"\t\"ADDRESS\" VARCHAR2(256),\n" +
"\t\"COUNTRY\" VARCHAR2(64),\n" +
"\t\"PROVINCE\" VARCHAR2(128),\n" +
"\t\"CITY\" VARCHAR2(128),\n" +
"\t\"ZIP\" VARCHAR2(32),\n" +
"\t\"FIRST_NAME\" VARCHAR2(128),\n" +
"\t\"LAST_NAME\" VARCHAR2(128),\n" +
"\t\"PHONE_COUNTRY\" VARCHAR2(8),\n" +
"\t\"PHONE_AREA\" VARCHAR2(8),\n" +
"\t\"PHONE_NUMBER\" VARCHAR2(128),\n" +
"\t\"FAX_COUNTRY\" VARCHAR2(8),\n" +
"\t\"FAX_AREA\" VARCHAR2(8),\n" +
"\t\"FAX_NUMBER\" VARCHAR2(128),\n" +
"\t\"IP_COUNTRY\" VARCHAR2(128),\n" +
"\t\"MOBILE_NO\" VARCHAR2(128),\n" +
"\t\"EMAIL\" VARCHAR2(128),\n" +
"\t\"ALT_EMAIL\" VARCHAR2(128),\n" +
"\t\"COMPANY\" VARCHAR2(128),\n" +
"\t\"HOMEPAGE_URL\" VARCHAR2(128),\n" +
"\t\"CATEGORY_ID_1\" NUMBER,\n" +
"\t\"CATEGORY_ID_2\" NUMBER,\n" +
"\t\"CATEGORY_ID_3\" NUMBER,\n" +
"\t\"CATEGORY_ID_4\" NUMBER,\n" +
"\t\"CATEGORY_ID_5\" NUMBER,\n" +
"\t\"SUBJECT\" VARCHAR2(256),\n" +
"\t\"KEYWORDS\" VARCHAR2(512),\n" +
"\t\"DETAIL\" CLOB,\n" +
"\t\"POSTING_STATUS\" VARCHAR2(16),\n" +
"\t\"OFFER_TYPE\" VARCHAR2(32)\n" +
")\n" +
"PCTFREE 10\n" +
"PCTUSED 40\n" +
"INITRANS 1\n" +
"MAXTRANS 255\n" +
"TABLESPACE \"APP_DATA1K\"\n" +
"STORAGE (\n" +
"\tBUFFER_POOL DEFAULT\n" +
")\n" +
"LOB (\"DETAIL\") STORE AS (\n" +
"\tLOGGING\n" +
"\tSTORAGE (\n" +
"\t\tBUFFER_POOL DEFAULT\n" +
"\t)\n" +
"\tENABLE STORAGE IN ROW\n" +
"\tCHUNK 8192\n" +
"\tNOCACHE\n" +
")\n" +
"PARTITION BY RANGE (\"GMT_CREATE\") (\n" +
"\tPARTITION \"P2008\" VALUES LESS THAN (TO_DATE(' 2009-01-01 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIAN'))\n" +
"\t\tPCTFREE 10\n" +
"\t\tPCTUSED 40\n" +
"\t\tINITRANS 1\n" +
"\t\tMAXTRANS 255\n" +
"\t\tTABLESPACE \"APP_DATA1K\"\n" +
"\t\tSTORAGE (\n" +
"\t\t\tINITIAL 65536\n" +
"\t\t\tNEXT 1048576\n" +
"\t\t\tMINEXTENTS 1\n" +
"\t\t\tMAXEXTENTS 2147483645\n" +
"\t\t\tPCTINCREASE 0\n" +
"\t\t\tFREELISTS 1\n" +
"\t\t\tFREELIST GROUPS 1\n" +
"\t\t\tBUFFER_POOL DEFAULT\n" +
"\t\t)\n" +
"\t\tNOCOMPRESS,\n" +
"\tPARTITION \"P2009\" VALUES LESS THAN (TO_DATE(' 2010-01-01 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIAN'))\n" +
"\t\tPCTFREE 10\n" +
"\t\tPCTUSED 40\n" +
"\t\tINITRANS 1\n" +
"\t\tMAXTRANS 255\n" +
"\t\tTABLESPACE \"APP_DATA1K\"\n" +
"\t\tSTORAGE (\n" +
"\t\t\tINITIAL 65536\n" +
"\t\t\tNEXT 1048576\n" +
"\t\t\tMINEXTENTS 1\n" +
"\t\t\tMAXEXTENTS 2147483645\n" +
"\t\t\tPCTINCREASE 0\n" +
"\t\t\tFREELISTS 1\n" +
"\t\t\tFREELIST GROUPS 1\n" +
"\t\t\tBUFFER_POOL DEFAULT\n" +
"\t\t)\n" +
"\t\tNOCOMPRESS,\n" +
"\tPARTITION \"P2010\" VALUES LESS THAN (TO_DATE(' 2011-01-01 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIAN'))\n" +
"\t\tPCTFREE 10\n" +
"\t\tPCTUSED 40\n" +
"\t\tINITRANS 1\n" +
"\t\tMAXTRANS 255\n" +
"\t\tTABLESPACE \"APPDATA1M\"\n" +
"\t\tSTORAGE (\n" +
"\t\t\tINITIAL 65536\n" +
"\t\t\tNEXT 1048576\n" +
"\t\t\tMINEXTENTS 1\n" +
"\t\t\tMAXEXTENTS 2147483645\n" +
"\t\t\tPCTINCREASE 0\n" +
"\t\t\tFREELISTS 1\n" +
"\t\t\tFREELIST GROUPS 1\n" +
"\t\t\tBUFFER_POOL DEFAULT\n" +
"\t\t)\n" +
"\t\tNOCOMPRESS\n" +
")",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(44, visitor.getColumns().size());
assertTrue(visitor.containsColumn("SC_001.TB_001", "ID"));
}
}
| OracleCreateTableTest49 |
java | spring-projects__spring-security | buildSrc/src/main/java/s101/S101Plugin.java | {
"start": 1074,
"end": 6333
} | class ____ implements Plugin<Project> {
@Override
public void apply(Project project) {
project.getExtensions().add("s101", new S101PluginExtension(project));
project.getTasks().register("s101Install", S101Install.class, this::configure);
project.getTasks().register("s101Configure", S101Configure.class, this::configure);
project.getTasks().register("s101", JavaExec.class, this::configure);
}
private void configure(S101Install install) {
install.setDescription("Installs Structure101 to your filesystem");
}
private void configure(S101Configure configure) {
configure.setDescription("Applies a default Structure101 configuration to the project");
}
private void configure(JavaExec exec) {
exec.setDescription("Runs Structure101 headless analysis, installing and configuring if necessary");
exec.dependsOn("assemble");
Project project = exec.getProject();
S101PluginExtension extension = project.getExtensions().getByType(S101PluginExtension.class);
exec
.workingDir(extension.getInstallationDirectory())
.classpath(new File(extension.getInstallationDirectory().get(), "structure101-java-build.jar"))
.args(new File(new File(project.getBuildDir(), "s101"), "config.xml"))
.systemProperty("s101.label", computeLabel(extension).get())
.doFirst((task) -> {
installAndConfigureIfNeeded(project);
copyConfigurationToBuildDirectory(extension, project);
})
.doLast((task) -> {
copyResultsBackToConfigurationDirectory(extension, project);
});
}
private Property<String> computeLabel(S101PluginExtension extension) {
boolean hasBaseline = extension.getConfigurationDirectory().get().toPath()
.resolve("repository").resolve("snapshots").resolve("baseline").toFile().exists();
if (!hasBaseline) {
return extension.getLabel().convention("baseline");
}
return extension.getLabel().convention("recent");
}
private void installAndConfigureIfNeeded(Project project) {
S101Configurer configurer = new S101Configurer(project);
S101PluginExtension extension = project.getExtensions().getByType(S101PluginExtension.class);
String licenseId = extension.getLicenseId().getOrNull();
if (licenseId != null) {
configurer.license(licenseId);
}
File installationDirectory = extension.getInstallationDirectory().get();
File configurationDirectory = extension.getConfigurationDirectory().get();
if (!installationDirectory.exists()) {
configurer.install(installationDirectory, configurationDirectory);
}
if (!configurationDirectory.exists()) {
configurer.configure(installationDirectory, configurationDirectory);
}
}
private void copyConfigurationToBuildDirectory(S101PluginExtension extension, Project project) {
Path configurationDirectory = extension.getConfigurationDirectory().get().toPath();
Path buildDirectory = project.getBuildDir().toPath();
copyDirectory(project, configurationDirectory, buildDirectory);
}
private void copyResultsBackToConfigurationDirectory(S101PluginExtension extension, Project project) {
Path buildConfigurationDirectory = project.getBuildDir().toPath().resolve("s101");
String label = extension.getLabel().get();
if ("baseline".equals(label)) { // a new baseline was created
copyDirectory(project, buildConfigurationDirectory.resolve("repository").resolve("snapshots"),
extension.getConfigurationDirectory().get().toPath().resolve("repository"));
copyDirectory(project, buildConfigurationDirectory.resolve("repository"),
extension.getConfigurationDirectory().get().toPath());
}
}
private void copyDirectory(Project project, Path source, Path destination) {
try {
Files.walk(source)
.forEach(each -> {
Path relativeToSource = source.getParent().relativize(each);
Path resolvedDestination = destination.resolve(relativeToSource);
if (each.toFile().isDirectory()) {
resolvedDestination.toFile().mkdirs();
return;
}
InputStream input;
if ("project.java.hsp".equals(each.toFile().getName())) {
Path relativeTo = project.getBuildDir().toPath().resolve("s101").relativize(project.getProjectDir().toPath());
String value = "const(THIS_FILE)/" + relativeTo;
input = replace(each, "<property name=\"relative-to\" value=\"(.*)\" />", "<property name=\"relative-to\" value=\"" + value + "\" />");
} else if (each.toFile().toString().endsWith(".xml")) {
input = replace(each, "\\r\\n", "\n");
} else {
input = input(each);
}
try {
Files.copy(input, resolvedDestination, StandardCopyOption.REPLACE_EXISTING);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private InputStream replace(Path file, String search, String replace) {
try {
byte[] b = Files.readAllBytes(file);
String contents = new String(b).replaceAll(search, replace);
return new ByteArrayInputStream(contents.getBytes(StandardCharsets.UTF_8));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private InputStream input(Path file) {
try {
return new FileInputStream(file.toFile());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| S101Plugin |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/InsecureCipherModeTest.java | {
"start": 880,
"end": 1578
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(InsecureCipherMode.class, getClass());
@Test
public void positiveCase() {
compilationHelper
.addSourceLines(
"InsecureCipherModePositiveCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import java.security.KeyFactory;
import java.security.KeyPairGenerator;
import java.security.NoSuchAlgorithmException;
import java.security.NoSuchProviderException;
import javax.crypto.Cipher;
import javax.crypto.KeyAgreement;
import javax.crypto.NoSuchPaddingException;
/**
* @author avenet@google.com (Arnaud J. Venet)
*/
public | InsecureCipherModeTest |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/observers/DisposableCompletableObserver.java | {
"start": 2183,
"end": 2902
} | class ____ implements CompletableObserver, Disposable {
final AtomicReference<Disposable> upstream = new AtomicReference<>();
@Override
public final void onSubscribe(@NonNull Disposable d) {
if (EndConsumerHelper.setOnce(this.upstream, d, getClass())) {
onStart();
}
}
/**
* Called once the single upstream {@link Disposable} is set via {@link #onSubscribe(Disposable)}.
*/
protected void onStart() {
}
@Override
public final boolean isDisposed() {
return upstream.get() == DisposableHelper.DISPOSED;
}
@Override
public final void dispose() {
DisposableHelper.dispose(upstream);
}
}
| DisposableCompletableObserver |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/notfound/OptionalEagerRefNonPKNotFoundTest.java | {
"start": 16992,
"end": 17593
} | class ____ extends Person {
@Id
private Long id;
@OneToOne(cascade = CascadeType.PERSIST)
@MapsId
@Fetch(FetchMode.SELECT)
@JoinColumn(
name = "cityName",
referencedColumnName = "name",
foreignKey = @ForeignKey(ConstraintMode.NO_CONSTRAINT)
)
private City city;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public City getCity() {
return city;
}
@Override
public void setCity(City city) {
this.city = city;
}
}
@Entity
@Table(name = "PersonMapsIdSelectIgnore")
public static | PersonMapsIdSelectException |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/io/InputStreamFSInputWrapper.java | {
"start": 1266,
"end": 2735
} | class ____ extends FSDataInputStream {
private final InputStream inStream;
private long pos = 0;
public InputStreamFSInputWrapper(InputStream inStream) {
this.inStream = inStream;
}
@Override
public void close() throws IOException {
this.inStream.close();
}
@Override
public void seek(long desired) throws IOException {
if (desired < this.pos) {
throw new IllegalArgumentException("Wrapped InputStream: cannot search backwards.");
}
while (this.pos < desired) {
long numReadBytes = this.inStream.skip(desired - pos);
if (numReadBytes == -1) {
throw new EOFException("Unexpected EOF during forward seek.");
}
this.pos += numReadBytes;
}
}
@Override
public long getPos() throws IOException {
return this.pos;
}
@Override
public int read() throws IOException {
int read = inStream.read();
if (read != -1) {
this.pos++;
}
return read;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
int numReadBytes = inStream.read(b, off, len);
if (numReadBytes != -1) {
this.pos += numReadBytes;
}
return numReadBytes;
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
}
| InputStreamFSInputWrapper |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4345DefaultPluginExecutionOrderTest.java | {
"start": 1150,
"end": 2109
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that plugin executions contributed by default lifecycle mappings always execute first in the targeted
* lifecycle phase regardless of other plugin executions bound to the same phase and regardless of the POM
* order of plugin declarations.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-4345");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("process-resources");
verifier.execute();
verifier.verifyErrorFreeLog();
List<String> lines = verifier.loadLines("target/log.txt");
assertEquals(Arrays.asList(new String[] {"first", "second"}), lines);
}
}
| MavenITmng4345DefaultPluginExecutionOrderTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/AnnotationPositionTest.java | {
"start": 1517,
"end": 1925
} | interface ____ {
String value() default "";
}
""")
.expectUnchanged()
.addInputLines(
"EitherUse.java",
"""
import java.lang.annotation.ElementType;
import java.lang.annotation.Target;
@Target({ElementType.TYPE_USE, ElementType.METHOD, ElementType.TYPE})
@ | TypeUse |
java | apache__camel | components/camel-servicenow/camel-servicenow-component/src/test/java/org/apache/camel/component/servicenow/ServiceNowServiceCatalogIT.java | {
"start": 1466,
"end": 4026
} | class ____ extends ServiceNowITSupport {
@Produce("direct:servicenow")
ProducerTemplate template;
@Test
public void testRetrieveServiceCatalogsAndCategories() {
List<Map<?, ?>> result1 = template.requestBodyAndHeaders(
"direct:servicenow",
null,
kvBuilder()
.put(ServiceNowConstants.RESOURCE, ServiceNowConstants.RESOURCE_SERVICE_CATALOG)
.put(ServiceNowConstants.ACTION, ServiceNowConstants.ACTION_RETRIEVE)
.build(),
List.class);
assertFalse(result1.isEmpty());
List<Map<?, ?>> result2 = template.requestBodyAndHeaders(
"direct:servicenow",
null,
kvBuilder()
.put(ServiceNowConstants.RESOURCE, ServiceNowConstants.RESOURCE_SERVICE_CATALOG)
.put(ServiceNowConstants.ACTION, ServiceNowConstants.ACTION_RETRIEVE)
.put(ServiceNowConstants.ACTION_SUBJECT, ServiceNowConstants.ACTION_SUBJECT_CATEGORIES)
.put(ServiceNowParams.PARAM_SYS_ID, result1.get(0).get("sys_id"))
.build(),
List.class);
assertFalse(result2.isEmpty());
}
@Test
public void testWrongSubject() {
final Map<String, Object> invalid = kvBuilder()
.put(ServiceNowConstants.RESOURCE, ServiceNowConstants.RESOURCE_SERVICE_CATALOG)
.put(ServiceNowConstants.ACTION, ServiceNowConstants.ACTION_RETRIEVE)
.put(ServiceNowConstants.ACTION_SUBJECT, "Invalid")
.build();
assertThrows(CamelExecutionException.class,
() -> template.requestBodyAndHeaders(
"direct:servicenow",
null,
invalid,
List.class));
}
// *************************************************************************
//
// *************************************************************************
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:servicenow")
.to("servicenow:{{env:SERVICENOW_INSTANCE}}")
.to("log:org.apache.camel.component.servicenow?level=INFO&showAll=true")
.to("mock:servicenow");
}
};
}
}
| ServiceNowServiceCatalogIT |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/ExtendedBeanInfoTests.java | {
"start": 2808,
"end": 3329
} | class ____ {
public C setFoo(String foo) { return this; }
}
BeanInfo bi = Introspector.getBeanInfo(C.class);
ExtendedBeanInfo ebi = new ExtendedBeanInfo(bi);
assertThat(hasReadMethodForProperty(bi, "foo")).isFalse();
assertThat(hasWriteMethodForProperty(bi, "foo")).isFalse();
assertThat(hasReadMethodForProperty(ebi, "foo")).isFalse();
assertThat(hasWriteMethodForProperty(ebi, "foo")).isTrue();
}
@Test
void standardReadAndNonStandardWriteMethods() throws Exception {
@SuppressWarnings("unused") | C |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/event/collection/detached/MultipleCollectionListeners.java | {
"start": 3185,
"end": 3537
} | class ____ extends AbstractListener
implements PostCollectionRecreateEventListener {
private PostCollectionRecreateListener(
MultipleCollectionListeners listeners) {
super(listeners);
}
public void onPostRecreateCollection(PostCollectionRecreateEvent event) {
addEvent(event, this);
}
}
public static | PostCollectionRecreateListener |
java | bumptech__glide | library/test/src/test/java/com/bumptech/glide/manager/DefaultConnectivityMonitorTest.java | {
"start": 7892,
"end": 8877
} | class ____ extends ShadowConnectivityManager {
private boolean isNetworkPermissionGranted = true;
private boolean isConnected;
@Implementation
@Override
public Network getActiveNetwork() {
if (isConnected) {
return ShadowNetwork.newInstance(1);
} else {
return null;
}
}
@Implementation
@Override
protected void registerDefaultNetworkCallback(NetworkCallback networkCallback) {
if (!isNetworkPermissionGranted) {
throw new SecurityException();
}
super.registerDefaultNetworkCallback(networkCallback);
if (isConnected) {
networkCallback.onAvailable(null);
} else {
networkCallback.onLost(null);
}
}
@Implementation
@Override
public NetworkInfo getActiveNetworkInfo() {
if (!isNetworkPermissionGranted) {
throw new SecurityException();
}
return super.getActiveNetworkInfo();
}
}
}
| PermissionConnectivityManager |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/floatarray/FloatArrayAssert_hasSameSizeAs_with_Iterable_Test.java | {
"start": 999,
"end": 1425
} | class ____ extends FloatArrayAssertBaseTest {
private final List<String> other = newArrayList("Yoda", "Luke");
@Override
protected FloatArrayAssert invoke_api_method() {
return assertions.hasSameSizeAs(other);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasSameSizeAs(getInfo(assertions), getActual(assertions), other);
}
}
| FloatArrayAssert_hasSameSizeAs_with_Iterable_Test |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java | {
"start": 1085,
"end": 9667
} | class ____ {
private static final Logger logger = LogManager.getLogger(SearchProgressListener.class);
public static final SearchProgressListener NOOP = new SearchProgressListener() {
};
private List<SearchShard> shards;
/**
* Executed when shards are ready to be queried.
*
* @param shards The list of shards to query.
* @param skippedShards The list of skipped shards.
* @param clusters The statistics for remote clusters included in the search.
* @param fetchPhase <code>true</code> if the search needs a fetch phase, <code>false</code> otherwise.
* @param timeProvider absolute and relative time provider for this search
**/
protected void onListShards(
List<SearchShard> shards,
List<SearchShard> skippedShards,
Clusters clusters,
boolean fetchPhase,
TransportSearchAction.SearchTimeProvider timeProvider
) {}
/**
* Executed when a shard returns a query result.
*
* @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards} )}.
* @param queryResult
*/
protected void onQueryResult(int shardIndex, QuerySearchResult queryResult) {}
/**
* Executed when a shard reports a query failure.
*
* @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards})}.
* @param shardTarget The last shard target that thrown an exception.
* @param exc The cause of the failure.
*/
protected void onQueryFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {}
/**
* Executed when a partial reduce is created. The number of partial reduce can be controlled via
* {@link SearchRequest#setBatchedReduceSize(int)}.
*
* @param shards The list of shards that are part of this reduce.
* @param totalHits The total number of hits in this reduce.
* @param aggs The partial result for aggregations.
* @param reducePhase The version number for this reduce.
*/
protected void onPartialReduce(List<SearchShard> shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) {}
/**
* Executed once when the final reduce is created.
*
* @param shards The list of shards that are part of this reduce.
* @param totalHits The total number of hits in this reduce.
* @param aggs The final result for aggregations.
* @param reducePhase The version number for this reduce.
*/
protected void onFinalReduce(List<SearchShard> shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) {}
/**
* Executed when a shard returns a rank feature result.
*
* @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards})}.
*/
protected void onRankFeatureResult(int shardIndex) {}
/**
* Executed when a shard reports a rank feature failure.
*
* @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards})}.
* @param shardTarget The last shard target that thrown an exception.
* @param exc The cause of the failure.
*/
protected void onRankFeatureFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {}
/**
* Executed when a shard returns a fetch result.
*
* @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards})}.
*/
protected void onFetchResult(int shardIndex) {}
/**
* Executed when a shard reports a fetch failure.
*
* @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards})}.
* @param shardTarget The last shard target that thrown an exception.
* @param exc The cause of the failure.
*/
protected void onFetchFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {}
/**
* Indicates that a cluster has finished a search operation. Used for CCS minimize_roundtrips=true only.
*
* @param clusterAlias alias of cluster that has finished a search operation and returned a SearchResponse.
* The cluster alias for the local cluster is RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.
* @param searchResponse SearchResponse from cluster 'clusterAlias'
*/
protected void onClusterResponseMinimizeRoundtrips(String clusterAlias, SearchResponse searchResponse) {}
final void notifyListShards(
List<SearchShard> shards,
List<SearchShard> skippedShards,
Clusters clusters,
boolean fetchPhase,
TransportSearchAction.SearchTimeProvider timeProvider
) {
this.shards = shards;
try {
onListShards(shards, skippedShards, clusters, fetchPhase, timeProvider);
} catch (Exception e) {
logger.warn("Failed to execute progress listener on list shards", e);
}
}
final void notifyQueryResult(int shardIndex, QuerySearchResult queryResult) {
try {
onQueryResult(shardIndex, queryResult);
} catch (Exception e) {
logger.warn(() -> "[" + shards.get(shardIndex) + "] Failed to execute progress listener on query result", e);
}
}
final void notifyQueryFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {
try {
onQueryFailure(shardIndex, shardTarget, exc);
} catch (Exception e) {
logger.warn(() -> "[" + shards.get(shardIndex) + "] Failed to execute progress listener on query failure", e);
}
}
final void notifyPartialReduce(List<SearchShard> shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) {
try {
onPartialReduce(shards, totalHits, aggs, reducePhase);
} catch (Exception e) {
logger.warn("Failed to execute progress listener on partial reduce", e);
}
}
protected final void notifyFinalReduce(List<SearchShard> shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) {
try {
onFinalReduce(shards, totalHits, aggs, reducePhase);
} catch (Exception e) {
logger.warn("Failed to execute progress listener on reduce", e);
}
}
final void notifyRankFeatureResult(int shardIndex) {
try {
onRankFeatureResult(shardIndex);
} catch (Exception e) {
logger.warn(() -> "[" + shards.get(shardIndex) + "] Failed to execute progress listener on rank-feature result", e);
}
}
final void notifyRankFeatureFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {
try {
onRankFeatureFailure(shardIndex, shardTarget, exc);
} catch (Exception e) {
logger.warn(() -> "[" + shards.get(shardIndex) + "] Failed to execute progress listener on rank-feature failure", e);
}
}
final void notifyFetchResult(int shardIndex) {
try {
onFetchResult(shardIndex);
} catch (Exception e) {
logger.warn(() -> "[" + shards.get(shardIndex) + "] Failed to execute progress listener on fetch result", e);
}
}
final void notifyFetchFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {
try {
onFetchFailure(shardIndex, shardTarget, exc);
} catch (Exception e) {
logger.warn(() -> "[" + shards.get(shardIndex) + "] Failed to execute progress listener on fetch failure", e);
}
}
final void notifyClusterResponseMinimizeRoundtrips(String clusterAlias, SearchResponse searchResponse) {
try {
onClusterResponseMinimizeRoundtrips(clusterAlias, searchResponse);
} catch (Exception e) {
logger.warn(() -> "[" + clusterAlias + "] Failed to execute progress listener onResponseMinimizeRoundtrips", e);
}
}
static List<SearchShard> buildSearchShards(List<? extends SearchPhaseResult> results) {
return results.stream()
.filter(Objects::nonNull)
.map(SearchPhaseResult::getSearchShardTarget)
.map(e -> new SearchShard(e.getClusterAlias(), e.getShardId()))
.toList();
}
static List<SearchShard> buildSearchShardsFromIter(List<SearchShardIterator> its) {
return its.stream().map(e -> new SearchShard(e.getClusterAlias(), e.shardId())).toList();
}
}
| SearchProgressListener |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/utils/MemberUtilsTest.java | {
"start": 1252,
"end": 1767
} | class ____ {
@Test
void test() throws NoSuchMethodException {
assertFalse(isStatic(getClass().getMethod("noStatic")));
assertTrue(isStatic(getClass().getMethod("staticMethod")));
assertTrue(isPrivate(getClass().getDeclaredMethod("privateMethod")));
assertTrue(isPublic(getClass().getMethod("publicMethod")));
}
public void noStatic() {}
public static void staticMethod() {}
private void privateMethod() {}
public void publicMethod() {}
}
| MemberUtilsTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/MulticastOnPrepareTest.java | {
"start": 2208,
"end": 2587
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) {
Animal body = exchange.getIn().getBody(Animal.class);
assertEquals(1, body.getId());
assertEquals("Tiger", body.getName());
// adjust the name
body.setName("Tony the Tiger");
}
}
public static | ProcessorA |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/pool/ha/node/ZookeeperNodeRegister.java | {
"start": 1291,
"end": 5792
} | class ____ {
private static final Log LOG = LogFactory.getLog(ZookeeperNodeRegister.class);
private final Lock lock = new ReentrantLock();
private String zkConnectString;
private String path = "/ha-druid-datasources";
private CuratorFramework client;
private GroupMember member;
private boolean privateZkClient; // Should I close the client?
/**
* Init a CuratorFramework if there's no CuratorFramework provided.
*/
public void init() {
if (client == null) {
client = CuratorFrameworkFactory.builder()
.connectionTimeoutMs(5000)
.connectString(zkConnectString)
.retryPolicy(new RetryForever(10000))
.sessionTimeoutMs(30000)
.build();
client.start();
privateZkClient = true;
}
}
/**
* Register a Node which has a Properties as the payload.
* <pre>
* CAUTION: only one node can be registered,
* if you want to register another one,
* call deregister first
* </pre>
*
* @param payload The information used to generate the payload Properties
* @return true, register successfully; false, skip the registeration
*/
public boolean register(String nodeId, List<ZookeeperNodeInfo> payload) {
if (payload == null || payload.isEmpty()) {
return false;
}
lock.lock();
try {
createPathIfNotExisted();
if (member != null) {
LOG.warn("GroupMember has already registered. Please deregister first.");
return false;
}
String payloadString = getPropertiesString(payload);
member = new GroupMember(client, path, nodeId, payloadString.getBytes());
member.start();
LOG.info("Register Node[" + nodeId + "] in path[" + path + "].");
return true;
} finally {
lock.unlock();
}
}
/**
* Close the current GroupMember.
*/
public void deregister() {
if (member != null) {
member.close();
member = null;
}
if (client != null && privateZkClient) {
client.close();
}
}
/**
* @see #deregister()
*/
public void destroy() {
deregister();
}
private void createPathIfNotExisted() {
try {
if (client.checkExists().forPath(path) == null) {
LOG.info("Path[" + path + "] is NOT existed, create it.");
client.create().creatingParentsIfNeeded().forPath(path);
}
} catch (Exception e) {
LOG.error("Can NOT check the path.", e);
}
}
private String getPropertiesString(List<ZookeeperNodeInfo> payload) {
Properties properties = new Properties();
for (ZookeeperNodeInfo n : payload) {
if (n.getHost() != null) {
properties.setProperty(n.getPrefix() + "host", n.getHost());
}
if (n.getPort() != null) {
properties.setProperty(n.getPrefix() + "port", n.getPort().toString());
}
if (n.getDatabase() != null) {
properties.setProperty(n.getPrefix() + "database", n.getDatabase());
}
if (n.getUsername() != null) {
properties.setProperty(n.getPrefix() + "username", n.getUsername());
}
if (n.getPassword() != null) {
properties.setProperty(n.getPrefix() + "password", n.getPassword());
}
}
StringWriter sw = new StringWriter();
try {
properties.store(sw, "");
} catch (IOException e) {
LOG.error("Why Properties.store goes wrong?", e);
}
return sw.toString();
}
public void setClient(CuratorFramework client) {
if (client != null) {
this.client = client;
privateZkClient = false;
}
}
public CuratorFramework getClient() {
return client;
}
public String getZkConnectString() {
return zkConnectString;
}
public void setZkConnectString(String zkConnectString) {
this.zkConnectString = zkConnectString;
}
public String getPath() {
return path;
}
public void setPath(String path) {
this.path = path;
}
}
| ZookeeperNodeRegister |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/ClientOptions.java | {
"start": 29952,
"end": 32021
} | enum ____ {
/**
* This is the default behavior. The client will fetch current credentials from the underlying
* {@link RedisCredentialsProvider} only when the driver needs to, e.g. when the connection is first established or when
* it is re-established after a disconnect.
* <p/>
* <p>
* No re-authentication is performed when new credentials are emitted by a {@link RedisCredentialsProvider} that
* supports streaming. The client does not subscribe to or react to any updates in the credential stream provided by
* {@link RedisCredentialsProvider#credentials()}.
* </p>
*/
DEFAULT,
/**
* Automatically triggers re-authentication whenever new credentials are emitted by a {@link RedisCredentialsProvider}
* that supports streaming, as indicated by {@link RedisCredentialsProvider#supportsStreaming()}.
*
* <p>
* When this behavior is enabled, the client subscribes to the credential stream provided by
* {@link RedisCredentialsProvider#credentials()} and issues an {@code AUTH} command to the Redis server each time new
* credentials are received. This behavior supports dynamic credential scenarios, such as token-based authentication, or
* credential rotation where credentials are refreshed periodically to maintain access.
* </p>
*
* <p>
* Note: {@code AUTH} commands issued as part of this behavior may interleave with user-submitted commands, as the
* client performs re-authentication independently of user command flow.
* </p>
*/
ON_NEW_CREDENTIALS
}
/**
* Whether we should use hash indexed queue, which provides O(1) remove(Object)
*
* @return if hash indexed queue should be used
*/
public boolean isUseHashIndexedQueue() {
return useHashIndexedQueue;
}
/**
* Behavior of connections in disconnected state.
*/
public | ReauthenticateBehavior |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/BedrockAgentRuntimeEndpointBuilderFactory.java | {
"start": 17576,
"end": 21481
} | interface ____
extends
EndpointProducerBuilder {
default BedrockAgentRuntimeEndpointBuilder basic() {
return (BedrockAgentRuntimeEndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedBedrockAgentRuntimeEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedBedrockAgentRuntimeEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* To use an existing configured AWS Bedrock Agent Runtime client.
*
* The option is a:
* <code>software.amazon.awssdk.services.bedrockagentruntime.BedrockAgentRuntimeClient</code> type.
*
* Group: advanced
*
* @param bedrockAgentRuntimeClient the value to set
* @return the dsl builder
*/
default AdvancedBedrockAgentRuntimeEndpointBuilder bedrockAgentRuntimeClient(software.amazon.awssdk.services.bedrockagentruntime.BedrockAgentRuntimeClient bedrockAgentRuntimeClient) {
doSetProperty("bedrockAgentRuntimeClient", bedrockAgentRuntimeClient);
return this;
}
/**
* To use an existing configured AWS Bedrock Agent Runtime client.
*
* The option will be converted to a
* <code>software.amazon.awssdk.services.bedrockagentruntime.BedrockAgentRuntimeClient</code> type.
*
* Group: advanced
*
* @param bedrockAgentRuntimeClient the value to set
* @return the dsl builder
*/
default AdvancedBedrockAgentRuntimeEndpointBuilder bedrockAgentRuntimeClient(String bedrockAgentRuntimeClient) {
doSetProperty("bedrockAgentRuntimeClient", bedrockAgentRuntimeClient);
return this;
}
}
public | AdvancedBedrockAgentRuntimeEndpointBuilder |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FieldCanBeFinalTest.java | {
"start": 7307,
"end": 7534
} | class ____ {
int x;
A(B b) {
x = 42;
b.x = 42;
}
}
""")
.addSourceLines(
"B.java",
"""
| A |
java | quarkusio__quarkus | extensions/panache/hibernate-reactive-panache/deployment/src/main/java/io/quarkus/hibernate/reactive/panache/deployment/PanacheHibernateResourceProcessor.java | {
"start": 2460,
"end": 9605
} | class ____ {
static final DotName DOTNAME_PANACHE_REPOSITORY_BASE = DotName.createSimple(PanacheRepositoryBase.class.getName());
private static final DotName DOTNAME_PANACHE_REPOSITORY = DotName.createSimple(PanacheRepository.class.getName());
static final DotName DOTNAME_PANACHE_ENTITY_BASE = DotName.createSimple(PanacheEntityBase.class.getName());
private static final DotName DOTNAME_PANACHE_ENTITY = DotName.createSimple(PanacheEntity.class.getName());
private static final DotName DOTNAME_REACTIVE_SESSION = DotName.createSimple(Mutiny.Session.class.getName());
private static final DotName DOTNAME_ID = DotName.createSimple(Id.class.getName());
private static final DotName DOTNAME_UNI = DotName.createSimple(Uni.class.getName());
private static final DotName DOTNAME_MULTI = DotName.createSimple(Multi.class.getName());
@BuildStep
FeatureBuildItem featureBuildItem() {
return new FeatureBuildItem(Feature.HIBERNATE_REACTIVE_PANACHE);
}
@BuildStep
AdditionalJpaModelBuildItem produceModel() {
// only useful for the index resolution: hibernate will register it to be transformed, but BuildMojo
// only transforms classes from the application jar, so we do our own transforming
return new AdditionalJpaModelBuildItem("io.quarkus.hibernate.reactive.panache.PanacheEntity",
// Only added to persistence units actually using this class, using Jandex-based discovery,
// so we pass empty sets of PUs.
// The build items tell the Hibernate extension to process the classes at build time:
// add to Jandex index, bytecode enhancement, proxy generation, ...
Set.of());
}
@BuildStep
UnremovableBeanBuildItem ensureBeanLookupAvailable() {
return UnremovableBeanBuildItem.beanTypes(DOTNAME_REACTIVE_SESSION);
}
@BuildStep
void collectEntityClasses(CombinedIndexBuildItem index, BuildProducer<PanacheEntityClassBuildItem> entityClasses) {
// NOTE: we don't skip abstract/generic entities because they still need accessors
for (ClassInfo panacheEntityBaseSubclass : index.getIndex().getAllKnownSubclasses(DOTNAME_PANACHE_ENTITY_BASE)) {
// FIXME: should we really skip PanacheEntity or all MappedSuperClass?
if (!panacheEntityBaseSubclass.name().equals(DOTNAME_PANACHE_ENTITY)) {
entityClasses.produce(new PanacheEntityClassBuildItem(panacheEntityBaseSubclass));
}
}
}
@BuildStep
@Consume(HibernateEnhancersRegisteredBuildItem.class)
@Consume(InterceptedStaticMethodsTransformersRegisteredBuildItem.class)
void build(CombinedIndexBuildItem index,
BuildProducer<BytecodeTransformerBuildItem> transformers,
List<PanacheEntityClassBuildItem> entityClasses,
Optional<JpaModelPersistenceUnitMappingBuildItem> jpaModelPersistenceUnitMapping,
List<PanacheMethodCustomizerBuildItem> methodCustomizersBuildItems,
BuildProducer<EntityToPersistenceUnitBuildItem> entityToPersistenceUnit) throws Exception {
List<PanacheMethodCustomizer> methodCustomizers = methodCustomizersBuildItems.stream()
.map(bi -> bi.getMethodCustomizer()).collect(Collectors.toList());
PanacheJpaRepositoryEnhancer daoEnhancer = new PanacheJpaRepositoryEnhancer(index.getIndex());
Set<String> daoClasses = new HashSet<>();
Set<String> panacheEntities = new HashSet<>();
for (ClassInfo classInfo : index.getIndex().getAllKnownImplementors(DOTNAME_PANACHE_REPOSITORY_BASE)) {
// Skip PanacheRepository
if (classInfo.name().equals(DOTNAME_PANACHE_REPOSITORY))
continue;
if (daoEnhancer.skipRepository(classInfo))
continue;
List<org.jboss.jandex.Type> typeParameters = JandexUtil
.resolveTypeParameters(classInfo.name(), DOTNAME_PANACHE_REPOSITORY_BASE, index.getIndex());
var entityTypeName = typeParameters.get(0).name();
panacheEntities.add(entityTypeName.toString());
// Also add subclasses, so that they get resolved to a persistence unit.
for (var subclass : index.getIndex().getAllKnownSubclasses(entityTypeName)) {
panacheEntities.add(subclass.name().toString());
}
daoClasses.add(classInfo.name().toString());
}
for (ClassInfo classInfo : index.getIndex().getAllKnownImplementors(DOTNAME_PANACHE_REPOSITORY)) {
if (daoEnhancer.skipRepository(classInfo))
continue;
daoClasses.add(classInfo.name().toString());
}
for (String daoClass : daoClasses) {
transformers.produce(new BytecodeTransformerBuildItem(daoClass, daoEnhancer));
}
PanacheJpaEntityOperationsEnhancer entityOperationsEnhancer = new PanacheJpaEntityOperationsEnhancer(index.getIndex(),
methodCustomizers,
ReactiveJavaJpaTypeBundle.BUNDLE);
Set<String> modelClasses = new HashSet<>();
for (PanacheEntityClassBuildItem entityClass : entityClasses) {
String entityClassName = entityClass.get().name().toString();
modelClasses.add(entityClassName);
transformers.produce(new BytecodeTransformerBuildItem(entityClassName, entityOperationsEnhancer));
}
panacheEntities.addAll(modelClasses);
determineEntityPersistenceUnits(jpaModelPersistenceUnitMapping, panacheEntities, "Panache")
.forEach((e, pu) -> {
entityToPersistenceUnit.produce(new EntityToPersistenceUnitBuildItem(e, pu));
});
}
@BuildStep
@Record(ExecutionTime.STATIC_INIT)
void recordEntityToPersistenceUnit(List<EntityToPersistenceUnitBuildItem> items,
PanacheHibernateReactiveRecorder recorder) {
Map<String, String> map = new HashMap<>();
for (EntityToPersistenceUnitBuildItem item : items) {
map.put(item.getEntityClass(), item.getPersistenceUnitName());
}
// This is called even if there are no entity types, so that Panache gets properly initialized.
recorder.addEntityTypesToPersistenceUnit(map);
}
@BuildStep
ValidationPhaseBuildItem.ValidationErrorBuildItem validate(ValidationPhaseBuildItem validationPhase,
CombinedIndexBuildItem index) throws BuildException {
// we verify that no ID fields are defined (via @Id) when extending PanacheEntity
for (AnnotationInstance annotationInstance : index.getIndex().getAnnotations(DOTNAME_ID)) {
ClassInfo info = JandexUtil.getEnclosingClass(annotationInstance);
if (JandexUtil.isSubclassOf(index.getIndex(), info, DOTNAME_PANACHE_ENTITY)) {
BuildException be = new BuildException("You provide a JPA identifier via @Id inside '" + info.name() +
"' but one is already provided by PanacheEntity, " +
"your | PanacheHibernateResourceProcessor |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/mediatype/MessageBodyWriteTest.java | {
"start": 1696,
"end": 1923
} | class ____ {
@GET
public Response response() {
return Response.ok(Map.of("key", "value")).build();
}
}
@Provider
@Produces(MediaType.APPLICATION_JSON)
public static | TestResource |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Preconditions.java | {
"start": 1070,
"end": 1574
} | class ____ {@code guava.Preconditions} which provides helpers
* to validate the following conditions:
* <ul>
* <li>An invalid {@code null} obj causes a {@link NullPointerException}.</li>
* <li>An invalid argument causes an {@link IllegalArgumentException}.</li>
* <li>An invalid state causes an {@link IllegalStateException}.</li>
* <li>An invalid index causes an {@link IndexOutOfBoundsException}.</li>
* </ul>
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final | replaces |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1700/issue1763_2/TypeReferenceBug1763_2.java | {
"start": 725,
"end": 2911
} | class ____ type
* parameter.
*
* <p>Clients create an empty anonymous subclass. Doing so embeds the type
* parameter in the anonymous class's type hierarchy so we can reconstitute it
* at runtime despite erasure.
*/
protected TypeReferenceBug1763_2(){
Type superClass = getClass().getGenericSuperclass();
Type type = ((ParameterizedType) superClass).getActualTypeArguments()[0];
Type cachedType = classTypeCache.get(type);
if (cachedType == null) {
classTypeCache.putIfAbsent(type, type);
cachedType = classTypeCache.get(type);
}
this.type = cachedType;
}
/**
* @since 1.2.9
* @param actualTypeArguments
*/
protected TypeReferenceBug1763_2(Type... actualTypeArguments){
Class<?> thisClass = this.getClass();
Type superClass = thisClass.getGenericSuperclass();
ParameterizedType argType = (ParameterizedType) ((ParameterizedType) superClass).getActualTypeArguments()[0];
Type rawType = argType.getRawType();
Type[] argTypes = argType.getActualTypeArguments();
int actualIndex = 0;
for (int i = 0; i < argTypes.length; ++i) {
if (argTypes[i] instanceof TypeVariable &&
actualIndex < actualTypeArguments.length) {
argTypes[i] = actualTypeArguments[actualIndex++];
}
// fix for openjdk and android env
if (argTypes[i] instanceof GenericArrayType) {
argTypes[i] = TypeUtils.checkPrimitiveArray(
(GenericArrayType) argTypes[i]);
}
}
Type key = new ParameterizedTypeImpl(argTypes, thisClass, rawType);
Type cachedType = classTypeCache.get(key);
if (cachedType == null) {
classTypeCache.putIfAbsent(key, key);
cachedType = classTypeCache.get(key);
}
type = cachedType;
}
/**
* Gets underlying {@code Type} instance.
*/
public Type getType() {
return type;
}
public final static Type LIST_STRING = new TypeReference<List<String>>() {}.getType();
}
| from |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java | {
"start": 850,
"end": 2725
} | class ____ extends CachingUsernamePasswordRealm {
private final FileUserPasswdStore userPasswdStore;
private final FileUserRolesStore userRolesStore;
public FileRealm(RealmConfig config, ResourceWatcherService watcherService, ThreadPool threadPool) {
this(config, new FileUserPasswdStore(config, watcherService), new FileUserRolesStore(config, watcherService), threadPool);
}
// pkg private for testing
FileRealm(RealmConfig config, FileUserPasswdStore userPasswdStore, FileUserRolesStore userRolesStore, ThreadPool threadPool) {
super(config, threadPool);
this.userPasswdStore = userPasswdStore;
userPasswdStore.addListener(this::expireAll);
this.userRolesStore = userRolesStore;
userRolesStore.addListener(this::expireAll);
}
@Override
protected void doAuthenticate(UsernamePasswordToken token, ActionListener<AuthenticationResult<User>> listener) {
final AuthenticationResult<User> result = userPasswdStore.verifyPassword(token.principal(), token.credentials(), () -> {
String[] roles = userRolesStore.roles(token.principal());
return new User(token.principal(), roles);
});
listener.onResponse(result);
}
@Override
protected void doLookupUser(String username, ActionListener<User> listener) {
if (userPasswdStore.userExists(username)) {
String[] roles = userRolesStore.roles(username);
listener.onResponse(new User(username, roles));
} else {
listener.onResponse(null);
}
}
@Override
public void usageStats(ActionListener<Map<String, Object>> listener) {
super.usageStats(listener.delegateFailureAndWrap((l, stats) -> {
stats.put("size", userPasswdStore.usersCount());
l.onResponse(stats);
}));
}
}
| FileRealm |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/protocol/body/SubscriptionGroupWrapper.java | {
"start": 1162,
"end": 2321
} | class ____ extends RemotingSerializable {
private ConcurrentMap<String, SubscriptionGroupConfig> subscriptionGroupTable =
new ConcurrentHashMap<>(1024);
private ConcurrentMap<String, ConcurrentMap<String, Integer>> forbiddenTable =
new ConcurrentHashMap<>(1024);
private DataVersion dataVersion = new DataVersion();
public ConcurrentMap<String, SubscriptionGroupConfig> getSubscriptionGroupTable() {
return subscriptionGroupTable;
}
public void setSubscriptionGroupTable(
ConcurrentMap<String, SubscriptionGroupConfig> subscriptionGroupTable) {
this.subscriptionGroupTable = subscriptionGroupTable;
}
public ConcurrentMap<String, ConcurrentMap<String, Integer>> getForbiddenTable() {
return forbiddenTable;
}
public void setForbiddenTable(ConcurrentMap<String, ConcurrentMap<String, Integer>> forbiddenTable) {
this.forbiddenTable = forbiddenTable;
}
public DataVersion getDataVersion() {
return dataVersion;
}
public void setDataVersion(DataVersion dataVersion) {
this.dataVersion = dataVersion;
}
}
| SubscriptionGroupWrapper |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/type/java/BooleanJavaTypeDescriptorTest.java | {
"start": 4708,
"end": 5591
} | class ____ implements AttributeConverter<Boolean, Integer>, BasicValueConverter<Boolean, Integer> {
@Override
public Integer convertToDatabaseColumn(Boolean attribute) {
return attribute != null && attribute ? 1 : null;
}
@Override
public Boolean convertToEntityAttribute(Integer dbData) {
return dbData != null && dbData == 1;
}
@Override
public @Nullable Boolean toDomainValue(@Nullable Integer relationalForm) {
return convertToEntityAttribute(relationalForm);
}
@Override
public @Nullable Integer toRelationalValue(@Nullable Boolean domainForm) {
return convertToDatabaseColumn(domainForm);
}
@Override
public JavaType<Boolean> getDomainJavaType() {
return BooleanJavaType.INSTANCE;
}
@Override
public JavaType<Integer> getRelationalJavaType() {
return IntegerJavaType.INSTANCE;
}
}
private static | OneNullBooleanConverter |
java | netty__netty | codec-base/src/main/java/io/netty/handler/codec/DecoderException.java | {
"start": 743,
"end": 1346
} | class ____ extends CodecException {
private static final long serialVersionUID = 6926716840699621852L;
/**
* Creates a new instance.
*/
public DecoderException() {
}
/**
* Creates a new instance.
*/
public DecoderException(String message, Throwable cause) {
super(message, cause);
}
/**
* Creates a new instance.
*/
public DecoderException(String message) {
super(message);
}
/**
* Creates a new instance.
*/
public DecoderException(Throwable cause) {
super(cause);
}
}
| DecoderException |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java | {
"start": 1255,
"end": 1828
} | class ____ {
private Collection<TopicPartition> topicPartitions;
/**
* Set the topic partitions whose offsets are to be listed for a streams group.
*/
public ListStreamsGroupOffsetsSpec topicPartitions(Collection<TopicPartition> topicPartitions) {
this.topicPartitions = topicPartitions;
return this;
}
/**
* Returns the topic partitions whose offsets are to be listed for a streams group.
*/
public Collection<TopicPartition> topicPartitions() {
return topicPartitions;
}
}
| ListStreamsGroupOffsetsSpec |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/core/v2/ServiceManager.java | {
"start": 1090,
"end": 3871
} | class ____ {
private static final ServiceManager INSTANCE = new ServiceManager();
private final ConcurrentHashMap<Service, Service> singletonRepository;
private final ConcurrentHashMap<String, Set<Service>> namespaceSingletonMaps;
private ServiceManager() {
singletonRepository = new ConcurrentHashMap<>(1 << 10);
namespaceSingletonMaps = new ConcurrentHashMap<>(1 << 2);
}
public static ServiceManager getInstance() {
return INSTANCE;
}
public Set<Service> getSingletons(String namespace) {
return namespaceSingletonMaps.getOrDefault(namespace, new HashSet<>(1));
}
/**
* Get singleton service. Put to manager if no singleton.
*
* @param service new service
* @return if service is exist, return exist service, otherwise return new service
*/
public Service getSingleton(Service service) {
Service result = singletonRepository.computeIfAbsent(service, key -> {
NotifyCenter.publishEvent(new MetadataEvent.ServiceMetadataEvent(service, false));
return service;
});
namespaceSingletonMaps.computeIfAbsent(result.getNamespace(), namespace -> new ConcurrentHashSet<>()).add(result);
return result;
}
/**
* Get singleton service if Exist.
*
* @param namespace namespace of service
* @param group group of service
* @param name name of service
* @return singleton service if exist, otherwise null optional
*/
public Optional<Service> getSingletonIfExist(String namespace, String group, String name) {
return getSingletonIfExist(Service.newService(namespace, group, name));
}
/**
* Get singleton service if Exist.
*
* @param service service template
* @return singleton service if exist, otherwise null optional
*/
public Optional<Service> getSingletonIfExist(Service service) {
return Optional.ofNullable(singletonRepository.get(service));
}
public Set<String> getAllNamespaces() {
return namespaceSingletonMaps.keySet();
}
/**
* Remove singleton service.
*
* @param service service need to remove
* @return removed service
*/
public Service removeSingleton(Service service) {
if (namespaceSingletonMaps.containsKey(service.getNamespace())) {
namespaceSingletonMaps.get(service.getNamespace()).remove(service);
}
return singletonRepository.remove(service);
}
public boolean containSingleton(Service service) {
return singletonRepository.containsKey(service);
}
public int size() {
return singletonRepository.size();
}
}
| ServiceManager |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStReducingState.java | {
"start": 2001,
"end": 7002
} | class ____<K, N, V> extends AbstractReducingState<K, N, V>
implements ForStInnerTable<K, N, V> {
/** The column family which this internal value state belongs to. */
private final ColumnFamilyHandle columnFamilyHandle;
/** The serialized key builder which should be thread-safe. */
private final ThreadLocal<SerializedCompositeKeyBuilder<K>> serializedKeyBuilder;
/** The default namespace if not set. * */
private final N defaultNamespace;
/** The serializer for namespace. * */
private final ThreadLocal<TypeSerializer<N>> namespaceSerializer;
/** The data outputStream used for value serializer, which should be thread-safe. */
private final ThreadLocal<DataOutputSerializer> valueSerializerView;
/** The data inputStream used for value deserializer, which should be thread-safe. */
private final ThreadLocal<DataInputDeserializer> valueDeserializerView;
/** Whether to enable the reuse of serialized key(and namespace). */
private final boolean enableKeyReuse;
public ForStReducingState(
StateRequestHandler stateRequestHandler,
ColumnFamilyHandle columnFamily,
ReduceFunction<V> reduceFunction,
TypeSerializer<V> valueSerializer,
Supplier<SerializedCompositeKeyBuilder<K>> serializedKeyBuilderInitializer,
N defaultNamespace,
Supplier<TypeSerializer<N>> namespaceSerializerInitializer,
Supplier<DataOutputSerializer> valueSerializerViewInitializer,
Supplier<DataInputDeserializer> valueDeserializerViewInitializer) {
super(stateRequestHandler, reduceFunction, valueSerializer);
this.columnFamilyHandle = columnFamily;
this.serializedKeyBuilder = ThreadLocal.withInitial(serializedKeyBuilderInitializer);
this.defaultNamespace = defaultNamespace;
this.namespaceSerializer = ThreadLocal.withInitial(namespaceSerializerInitializer);
this.valueSerializerView = ThreadLocal.withInitial(valueSerializerViewInitializer);
this.valueDeserializerView = ThreadLocal.withInitial(valueDeserializerViewInitializer);
// We only enable key reuse for the most common namespace across all states.
this.enableKeyReuse =
(defaultNamespace instanceof VoidNamespace)
&& (namespaceSerializerInitializer.get()
instanceof VoidNamespaceSerializer);
}
@Override
public ColumnFamilyHandle getColumnFamilyHandle() {
return columnFamilyHandle;
}
@Override
public byte[] serializeKey(ContextKey<K, N> contextKey) throws IOException {
return ForStSerializerUtils.serializeKeyAndNamespace(
contextKey,
serializedKeyBuilder.get(),
defaultNamespace,
namespaceSerializer.get(),
enableKeyReuse);
}
@Override
public byte[] serializeValue(V value) throws IOException {
DataOutputSerializer outputView = valueSerializerView.get();
outputView.clear();
getValueSerializer().serialize(value, outputView);
return outputView.getCopyOfBuffer();
}
@Override
public V deserializeValue(byte[] valueBytes) throws IOException {
DataInputDeserializer inputView = valueDeserializerView.get();
inputView.setBuffer(valueBytes);
return getValueSerializer().deserialize(inputView);
}
@SuppressWarnings("unchecked")
@Override
public ForStDBGetRequest<K, N, V, V> buildDBGetRequest(StateRequest<?, ?, ?, ?> stateRequest) {
Preconditions.checkArgument(stateRequest.getRequestType() == StateRequestType.REDUCING_GET);
ContextKey<K, N> contextKey =
new ContextKey<>(
(RecordContext<K>) stateRequest.getRecordContext(),
(N) stateRequest.getNamespace());
return new ForStDBSingleGetRequest<>(
contextKey, this, (InternalAsyncFuture<V>) stateRequest.getFuture());
}
@SuppressWarnings("unchecked")
@Override
public ForStDBPutRequest<K, N, V> buildDBPutRequest(StateRequest<?, ?, ?, ?> stateRequest) {
Preconditions.checkArgument(
stateRequest.getRequestType() == StateRequestType.REDUCING_ADD
|| stateRequest.getRequestType() == StateRequestType.CLEAR);
ContextKey<K, N> contextKey =
new ContextKey<>(
(RecordContext<K>) stateRequest.getRecordContext(),
(N) stateRequest.getNamespace());
V value =
stateRequest.getRequestType() == StateRequestType.CLEAR
? null // "Delete(key)" is equivalent to "Put(key, null)"
: (V) stateRequest.getPayload();
return ForStDBPutRequest.of(
contextKey, value, this, (InternalAsyncFuture<Void>) stateRequest.getFuture());
}
}
| ForStReducingState |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLaunchException.java | {
"start": 1439,
"end": 3277
} | class ____ extends ExitUtil.ExitException
implements ExitCodeProvider, LauncherExitCodes {
/**
* Create an exception with the specific exit code.
* @param exitCode exit code
* @param cause cause of the exception
*/
public ServiceLaunchException(int exitCode, Throwable cause) {
super(exitCode, cause);
}
/**
* Create an exception with the specific exit code and text.
* @param exitCode exit code
* @param message message to use in exception
*/
public ServiceLaunchException(int exitCode, String message) {
super(exitCode, message);
}
/**
* Create a formatted exception.
* <p>
* This uses {@link String#format(String, Object...)}
* to build the formatted exception in the ENGLISH locale.
* <p>
* If the last argument is a throwable, it becomes the cause of the exception.
* It will also be used as a parameter for the format.
* @param exitCode exit code
* @param format format for message to use in exception
* @param args list of arguments
*/
public ServiceLaunchException(int exitCode, String format, Object... args) {
super(exitCode, String.format(Locale.ENGLISH, format, args));
if (args.length > 0 && (args[args.length - 1] instanceof Throwable)) {
initCause((Throwable) args[args.length - 1]);
}
}
/**
* Create a formatted exception.
* <p>
* This uses {@link String#format(String, Object...)}
* to build the formatted exception in the ENGLISH locale.
* @param exitCode exit code
* @param cause inner cause
* @param format format for message to use in exception
* @param args list of arguments
*/
public ServiceLaunchException(int exitCode, Throwable cause,
String format, Object... args) {
super(exitCode, String.format(Locale.ENGLISH, format, args), cause);
}
}
| ServiceLaunchException |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rest/RestInferenceActionProxyTests.java | {
"start": 930,
"end": 1951
} | class ____ extends RestActionTestCase {
@Before
public void setUpAction() {
controller().registerHandler(new RestInferenceAction());
}
public void testStreamIsFalse() {
SetOnce<Boolean> executeCalled = new SetOnce<>();
verifyingClient.setExecuteVerifier(((actionType, actionRequest) -> {
assertThat(actionRequest, instanceOf(InferenceActionProxy.Request.class));
var request = (InferenceActionProxy.Request) actionRequest;
assertThat(request.isStreaming(), is(false));
executeCalled.set(true);
return createResponse();
}));
RestRequest inferenceRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST)
.withPath("_inference/test")
.withContent(new BytesArray("{}"), XContentType.JSON)
.build();
dispatchRequest(inferenceRequest);
assertThat(executeCalled.get(), equalTo(true));
}
}
| RestInferenceActionProxyTests |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/jta/DeleteCollectionJtaSessionClosedBeforeCommitTest.java | {
"start": 1612,
"end": 3431
} | class ____ {
private static final int ENTITY_ID = 1;
private static final int OTHER_ENTITY_ID = 2;
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) throws Exception {
TestingJtaPlatformImpl.INSTANCE.getTransactionManager().begin();
var entityManager = scope.getEntityManagerFactory().createEntityManager();
try {
TestEntity entity = new TestEntity( ENTITY_ID, "Fab" );
entityManager.persist( entity );
OtherTestEntity other = new OtherTestEntity( OTHER_ENTITY_ID, "other" );
entity.addOther( other );
entityManager.persist( entity );
entityManager.persist( other );
entityManager.flush();
}
finally {
entityManager.close();
TestingJtaPlatformImpl.tryCommit();
}
TestingJtaPlatformImpl.INSTANCE.getTransactionManager().begin();
entityManager = scope.getEntityManagerFactory().createEntityManager();
try {
TestEntity entity = entityManager.find( TestEntity.class, ENTITY_ID );
OtherTestEntity other = entityManager.find( OtherTestEntity.class, OTHER_ENTITY_ID );
entityManager.remove( entity );
entityManager.remove( other );
}
finally {
TestingJtaPlatformImpl.tryCommit();
entityManager.close();
}
}
@Test
public void testRevisionCounts(EntityManagerFactoryScope scope) {
scope.inEntityManager( entityManager -> assertEquals(
Arrays.asList( 1, 2 ),
AuditReaderFactory.get( entityManager ).getRevisions( TestEntity.class, ENTITY_ID )
) );
}
@Test
public void testRevisionHistory(EntityManagerFactoryScope scope) {
scope.inEntityManager( entityManager -> assertEquals(
new TestEntity( 1, "Fab" ),
AuditReaderFactory.get( entityManager ).find( TestEntity.class, ENTITY_ID, 1 )
) );
}
@Audited
@Entity
@Table(name = "ENTITY")
public static | DeleteCollectionJtaSessionClosedBeforeCommitTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1300/Issue1330_decimal.java | {
"start": 1383,
"end": 1438
} | class ____ {
public BigDecimal value;
}
}
| Model |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java | {
"start": 1437,
"end": 8437
} | class ____ {
private static final Logger logger = LogManager.getLogger(ClusterStateUpdaters.class);
public static ClusterState setLocalNode(
ClusterState clusterState,
DiscoveryNode localNode,
CompatibilityVersions compatibilityVersions
) {
return ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).build())
.putCompatibilityVersions(localNode.getId(), compatibilityVersions)
.build();
}
public static ClusterState upgradeAndArchiveUnknownOrInvalidSettings(
final ClusterState clusterState,
final ClusterSettings clusterSettings
) {
final Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata());
metadataBuilder.persistentSettings(
clusterSettings.archiveUnknownOrInvalidSettings(
metadataBuilder.persistentSettings(),
e -> logUnknownSetting("persistent", e),
(e, ex) -> logInvalidSetting("persistent", e, ex)
)
);
metadataBuilder.transientSettings(
clusterSettings.archiveUnknownOrInvalidSettings(
metadataBuilder.transientSettings(),
e -> logUnknownSetting("transient", e),
(e, ex) -> logInvalidSetting("transient", e, ex)
)
);
return ClusterState.builder(clusterState).metadata(metadataBuilder).build();
}
private static void logUnknownSetting(final String settingType, final Map.Entry<String, String> e) {
logger.warn("ignoring unknown {} setting: [{}] with value [{}]; archiving", settingType, e.getKey(), e.getValue());
}
private static void logInvalidSetting(final String settingType, final Map.Entry<String, String> e, final IllegalArgumentException ex) {
logger.warn(
() -> format("ignoring invalid %s setting: [%s] with value [%s]; archiving", settingType, e.getKey(), e.getValue()),
ex
);
}
public static ClusterState recoverClusterBlocks(final ClusterState state) {
final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(state.blocks());
if (Metadata.SETTING_READ_ONLY_SETTING.get(state.metadata().settings())) {
blocks.addGlobalBlock(Metadata.CLUSTER_READ_ONLY_BLOCK);
}
if (Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(state.metadata().settings())) {
blocks.addGlobalBlock(Metadata.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
}
state.forEachProject(projectState -> {
for (final IndexMetadata indexMetadata : projectState.metadata()) {
blocks.addBlocks(projectState.projectId(), indexMetadata);
}
});
return ClusterState.builder(state).blocks(blocks).build();
}
static ClusterState updateRoutingTable(final ClusterState state, ShardRoutingRoleStrategy shardRoutingRoleStrategy) {
// initialize all index routing tables as empty
final GlobalRoutingTable.Builder globalRoutingTableBuilder = GlobalRoutingTable.builder(state.globalRoutingTable());
for (var projectMetadata : state.metadata().projects().values()) {
final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(
shardRoutingRoleStrategy,
state.routingTable(projectMetadata.id())
);
for (final IndexMetadata indexMetadata : projectMetadata) {
routingTableBuilder.addAsRecovery(indexMetadata);
}
globalRoutingTableBuilder.put(projectMetadata.id(), routingTableBuilder);
}
return ClusterState.builder(state).routingTable(globalRoutingTableBuilder.build()).build();
}
static ClusterState removeStateNotRecoveredBlock(final ClusterState state) {
return ClusterState.builder(state)
.blocks(ClusterBlocks.builder().blocks(state.blocks()).removeGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK).build())
.build();
}
public static ClusterState addStateNotRecoveredBlock(ClusterState state) {
return ClusterState.builder(state)
.blocks(ClusterBlocks.builder().blocks(state.blocks()).addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK).build())
.build();
}
static ClusterState mixCurrentStateAndRecoveredState(final ClusterState currentState, final ClusterState recoveredState) {
assert currentState.metadata().getTotalNumberOfIndices() == 0;
final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).blocks(recoveredState.blocks());
final Metadata.Builder metadataBuilder = Metadata.builder(recoveredState.metadata());
// automatically generate a UID for the metadata if we need to
metadataBuilder.generateClusterUuidIfNeeded();
for (final ProjectMetadata projectMetadata : recoveredState.metadata().projects().values()) {
for (final IndexMetadata indexMetadata : projectMetadata) {
metadataBuilder.getProject(projectMetadata.id()).put(indexMetadata, false);
}
}
return ClusterState.builder(currentState).blocks(blocks).metadata(metadataBuilder).build();
}
public static ClusterState hideStateIfNotRecovered(ClusterState state) {
if (state.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) {
final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(state.blocks());
blocks.removeGlobalBlock(Metadata.CLUSTER_READ_ONLY_BLOCK);
blocks.removeGlobalBlock(Metadata.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
state.forEachProject(projectState -> {
for (IndexMetadata indexMetadata : projectState.metadata()) {
blocks.removeIndexBlocks(projectState.projectId(), indexMetadata.getIndex().getName());
}
});
final Metadata metadata = Metadata.builder()
.clusterUUID(state.metadata().clusterUUID())
.coordinationMetadata(state.metadata().coordinationMetadata())
.build();
assert state.globalRoutingTable().hasIndices() == false
: "routing table is not empty: " + state.globalRoutingTable().routingTables();
// metadata has been rebuilt from scratch, so clear the routing table
final GlobalRoutingTable.Builder globalRoutingTableBuilder = GlobalRoutingTable.builder();
metadata.projects().keySet().forEach(projectId -> globalRoutingTableBuilder.put(projectId, RoutingTable.EMPTY_ROUTING_TABLE));
return ClusterState.builder(state)
.routingTable(globalRoutingTableBuilder.build())
.metadata(metadata)
.blocks(blocks.build())
.build();
}
return state;
}
}
| ClusterStateUpdaters |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/cache/spi/StandardCacheTransactionSynchronization.java | {
"start": 169,
"end": 375
} | class ____ extends AbstractCacheTransactionSynchronization {
public StandardCacheTransactionSynchronization(RegionFactory regionFactory) {
super( regionFactory );
}
}
| StandardCacheTransactionSynchronization |
java | apache__camel | core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/DefaultCompileStrategy.java | {
"start": 939,
"end": 1204
} | class ____ implements CompileStrategy {
private String workDir;
@Override
public String getWorkDir() {
return workDir;
}
@Override
public void setWorkDir(String workDir) {
this.workDir = workDir;
}
}
| DefaultCompileStrategy |
java | spring-projects__spring-framework | spring-aspects/src/test/java/org/springframework/transaction/aspectj/TransactionAspectTests.java | {
"start": 5928,
"end": 6132
} | class ____ implements ITransactional {
@Override
public Object echo(Throwable t) throws Throwable {
if (t != null) {
throw t;
}
return t;
}
}
public static | ImplementsAnnotatedInterface |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/ValidateTest.java | {
"start": 33046,
"end": 33543
} | class ____ {
@Test
void shouldNotThrowForTrueExpression() {
Validate.isTrue(true);
}
@Test
void shouldThrowExceptionWithDefaultMessageForFalseExpression() {
final IllegalArgumentException ex = assertIllegalArgumentException(() -> Validate.isTrue(false));
assertEquals("The validated expression is false", ex.getMessage());
}
}
}
@Nested
final | WithoutMessage |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java | {
"start": 29098,
"end": 33308
} | class ____ {
protected Path path;
protected short ancestorPermission;
protected short parentPermission;
private short permission;
protected short requiredAncestorPermission;
protected short requiredParentPermission;
protected short requiredPermission;
final static protected short opAncestorPermission = SEARCH_MASK;
protected short opParentPermission;
protected short opPermission;
protected UserGroupInformation ugi;
/* initialize */
protected void set(Path path, short ancestorPermission,
short parentPermission, short permission) {
this.path = path;
this.ancestorPermission = ancestorPermission;
this.parentPermission = parentPermission;
this.permission = permission;
setOpPermission();
this.ugi = null;
}
/* Perform an operation and verify if the permission checking is correct */
void verifyPermission(UserGroupInformation ugi) throws IOException {
if (this.ugi != ugi) {
setRequiredPermissions(ugi);
this.ugi = ugi;
}
try {
try {
call();
assertFalse(expectPermissionDeny());
} catch(AccessControlException e) {
assertTrue(expectPermissionDeny());
}
} catch (AssertionError ae) {
logPermissions();
throw ae;
}
}
/** Log the permissions and required permissions */
protected void logPermissions() {
LOG.info("required ancestor permission:"
+ Integer.toOctalString(requiredAncestorPermission));
LOG.info("ancestor permission: "
+ Integer.toOctalString(ancestorPermission));
LOG.info("required parent permission:"
+ Integer.toOctalString(requiredParentPermission));
LOG.info("parent permission: " + Integer.toOctalString(parentPermission));
LOG.info("required permission:"
+ Integer.toOctalString(requiredPermission));
LOG.info("permission: " + Integer.toOctalString(permission));
}
/* Return true if an AccessControlException is expected */
protected boolean expectPermissionDeny() {
return (requiredPermission & permission) != requiredPermission
|| (requiredParentPermission & parentPermission) !=
requiredParentPermission
|| (requiredAncestorPermission & ancestorPermission) !=
requiredAncestorPermission;
}
/* Set the permissions required to pass the permission checking */
protected void setRequiredPermissions(UserGroupInformation ugi) {
if (SUPERUSER.equals(ugi)) {
requiredAncestorPermission = SUPER_MASK;
requiredParentPermission = SUPER_MASK;
requiredPermission = SUPER_MASK;
} else if (USER1.equals(ugi)) {
requiredAncestorPermission = (short)(opAncestorPermission & OWNER_MASK);
requiredParentPermission = (short)(opParentPermission & OWNER_MASK);
requiredPermission = (short)(opPermission & OWNER_MASK);
} else if (USER2.equals(ugi)) {
requiredAncestorPermission = (short)(opAncestorPermission & GROUP_MASK);
requiredParentPermission = (short)(opParentPermission & GROUP_MASK);
requiredPermission = (short)(opPermission & GROUP_MASK);
} else if (USER3.equals(ugi)) {
requiredAncestorPermission = (short)(opAncestorPermission & OTHER_MASK);
requiredParentPermission = (short)(opParentPermission & OTHER_MASK);
requiredPermission = (short)(opPermission & OTHER_MASK);
} else {
throw new IllegalArgumentException("Non-supported user: " + ugi);
}
}
/* Set the rwx permissions required for the operation */
abstract void setOpPermission();
/* Perform the operation */
abstract void call() throws IOException;
}
final static private short SUPER_MASK = 0;
final static private short READ_MASK = 0444;
final static private short WRITE_MASK = 0222;
final static private short SEARCH_MASK = 0111;
final static private short NULL_MASK = 0;
final static private short OWNER_MASK = 0700;
final static private short GROUP_MASK = 0070;
final static private short OTHER_MASK = 0007;
/* A | PermissionVerifier |
java | google__truth | core/src/main/java/com/google/common/truth/SimpleSubjectBuilder.java | {
"start": 1221,
"end": 1981
} | class ____<SubjectT extends Subject, ActualT> {
private final FailureMetadata metadata;
private final Subject.Factory<SubjectT, ActualT> subjectFactory;
private SimpleSubjectBuilder(
FailureMetadata metadata, Subject.Factory<SubjectT, ActualT> subjectFactory) {
this.metadata = checkNotNull(metadata);
this.subjectFactory = checkNotNull(subjectFactory);
}
public SubjectT that(@Nullable ActualT actual) {
return subjectFactory.createSubject(metadata, actual);
}
static <SubjectT extends Subject, ActualT> SimpleSubjectBuilder<SubjectT, ActualT> create(
FailureMetadata metadata, Subject.Factory<SubjectT, ActualT> subjectFactory) {
return new SimpleSubjectBuilder<>(metadata, subjectFactory);
}
}
| SimpleSubjectBuilder |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authentication/event/AuthenticationFailureDisabledEvent.java | {
"start": 978,
"end": 1296
} | class ____ extends AbstractAuthenticationFailureEvent {
@Serial
private static final long serialVersionUID = 8037552364666766279L;
public AuthenticationFailureDisabledEvent(Authentication authentication, AuthenticationException exception) {
super(authentication, exception);
}
}
| AuthenticationFailureDisabledEvent |
java | grpc__grpc-java | netty/src/main/java/io/grpc/netty/InternalNettyServerBuilder.java | {
"start": 993,
"end": 2548
} | class ____ {
public static void setStatsEnabled(NettyServerBuilder builder, boolean value) {
builder.setStatsEnabled(value);
}
public static void setStatsRecordStartedRpcs(NettyServerBuilder builder, boolean value) {
builder.setStatsRecordStartedRpcs(value);
}
public static void setStatsRecordRealTimeMetrics(NettyServerBuilder builder, boolean value) {
builder.setStatsRecordRealTimeMetrics(value);
}
public static void setTracingEnabled(NettyServerBuilder builder, boolean value) {
builder.setTracingEnabled(value);
}
public static void setForceHeapBuffer(NettyServerBuilder builder, boolean value) {
builder.setForceHeapBuffer(value);
}
/**
* Sets {@link io.grpc.Channel} and {@link io.netty.channel.EventLoopGroup}s to Nio. A major
* benefit over using existing setters is gRPC will manage the life cycle of {@link
* io.netty.channel.EventLoopGroup}s.
*/
public static void useNioTransport(NettyServerBuilder builder) {
builder.channelType(NioServerSocketChannel.class);
builder
.bossEventLoopGroupPool(SharedResourcePool.forResource(Utils.NIO_BOSS_EVENT_LOOP_GROUP));
builder
.workerEventLoopGroupPool(
SharedResourcePool.forResource(Utils.NIO_WORKER_EVENT_LOOP_GROUP));
}
/** Sets the EAG attributes available to protocol negotiators. */
public static void eagAttributes(NettyServerBuilder builder, Attributes eagAttributes) {
builder.eagAttributes(eagAttributes);
}
private InternalNettyServerBuilder() {}
}
| InternalNettyServerBuilder |
java | spring-projects__spring-boot | core/spring-boot-test/src/test/java/org/springframework/boot/test/json/ObjectContentAssertTests.java | {
"start": 1013,
"end": 2374
} | class ____ {
private static final ExampleObject SOURCE = new ExampleObject();
private static final ExampleObject DIFFERENT;
static {
DIFFERENT = new ExampleObject();
DIFFERENT.setAge(123);
}
@Test
void isEqualToWhenObjectsAreEqualShouldPass() {
assertThat(forObject(SOURCE)).isEqualTo(SOURCE);
}
@Test
void isEqualToWhenObjectsAreDifferentShouldFail() {
assertThatExceptionOfType(AssertionError.class)
.isThrownBy(() -> assertThat(forObject(SOURCE)).isEqualTo(DIFFERENT));
}
@Test
void asArrayForArrayShouldReturnObjectArrayAssert() {
ExampleObject[] source = new ExampleObject[] { SOURCE };
assertThat(forObject(source)).asArray().containsExactly(SOURCE);
}
@Test
void asArrayForNonArrayShouldFail() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> assertThat(forObject(SOURCE)).asArray());
}
@Test
void asMapForMapShouldReturnMapAssert() {
Map<String, ExampleObject> source = Collections.singletonMap("a", SOURCE);
assertThat(forObject(source)).asMap().containsEntry("a", SOURCE);
}
@Test
void asMapForNonMapShouldFail() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> assertThat(forObject(SOURCE)).asMap());
}
private AssertProvider<ObjectContentAssert<Object>> forObject(Object source) {
return () -> new ObjectContentAssert<>(source);
}
}
| ObjectContentAssertTests |
java | processing__processing4 | app/src/processing/app/syntax/PdeInputHandler.java | {
"start": 1428,
"end": 10565
} | class ____ extends DefaultInputHandler {
/**
* Need the Editor object for Input Method changes, plus most subclasses
* will want a local copy anyway. Changed after Processing 3.1.2, need to
* see if this breaks any other Modes before releasing.
*/
protected Editor editor;
/**
* Recommended constructor.
* @since 3.2
*/
public PdeInputHandler(Editor editor) {
// Make sure the default constructor is called to set up the basics
this();
this.editor = editor;
}
/**
* Not recommended, but included for API compatibility.
*/
public PdeInputHandler() {
// Use option on macOS for many text edit controls that are ctrl on Windows/Linux.
// (i.e. ctrl-left/right on Windows/Linux is option-left/right on macOS)
String altOrCtrl = Platform.isMacOS() ? "A" : "C";
// right now, ctrl-up/down is select up/down, but mod should be
// used instead, because the mac expects it to be option(alt)
addKeyBinding("BACK_SPACE", InputHandler.BACKSPACE);
// for 0122, shift-backspace is delete, for 0176, it's now a preference,
// to prevent holy warriors from attacking me for it.
if (Preferences.getBoolean("editor.keys.shift_backspace_is_delete")) {
addKeyBinding("S+BACK_SPACE", InputHandler.DELETE);
} else {
// Made the default for 0215, deemed better for our audience.
addKeyBinding("S+BACK_SPACE", InputHandler.BACKSPACE);
}
addKeyBinding("DELETE", InputHandler.DELETE);
addKeyBinding("S+DELETE", InputHandler.DELETE);
// the following two were changed for 0122 for better mac/pc compatibility
addKeyBinding(altOrCtrl + "+BACK_SPACE", InputHandler.BACKSPACE_WORD); // 0122
addKeyBinding(altOrCtrl + "S+BACK_SPACE", InputHandler.BACKSPACE_WORD); // 0215
addKeyBinding(altOrCtrl + "+DELETE", InputHandler.DELETE_WORD); // 0122
addKeyBinding(altOrCtrl + "S+DELETE", InputHandler.DELETE_WORD); // 0215
// handled by listener, don't bother here
//addKeyBinding("ENTER", InputHandler.INSERT_BREAK);
//addKeyBinding("TAB", InputHandler.INSERT_TAB);
addKeyBinding("INSERT", InputHandler.OVERWRITE);
// https://processing.org/bugs/bugzilla/162.html
// added for 0176, though the bindings do not appear relevant for osx
if (Preferences.getBoolean("editor.keys.alternative_cut_copy_paste")) {
addKeyBinding("C+INSERT", InputHandler.CLIPBOARD_COPY);
addKeyBinding("S+INSERT", InputHandler.CLIPBOARD_PASTE);
addKeyBinding("S+DELETE", InputHandler.CLIPBOARD_CUT);
}
// disabling for 0122, not sure what this does
//addKeyBinding("C+\\", InputHandler.TOGGLE_RECT);
// for 0122, these have been changed for better compatibility
// HOME and END now mean the beginning/end of the document
// for 0176 changed this to a preference so that the Mac OS X people
// can get the "normal" behavior as well if they prefer.
if (Preferences.getBoolean("editor.keys.home_and_end_travel_far")) {
addKeyBinding("HOME", InputHandler.DOCUMENT_HOME);
addKeyBinding("END", InputHandler.DOCUMENT_END);
addKeyBinding("S+HOME", InputHandler.SELECT_DOC_HOME);
addKeyBinding("S+END", InputHandler.SELECT_DOC_END);
} else {
// for 0123 added the proper windows defaults
addKeyBinding("HOME", InputHandler.HOME);
addKeyBinding("END", InputHandler.END);
addKeyBinding("S+HOME", InputHandler.SELECT_HOME);
addKeyBinding("S+END", InputHandler.SELECT_END);
addKeyBinding("C+HOME", InputHandler.DOCUMENT_HOME);
addKeyBinding("C+END", InputHandler.DOCUMENT_END);
addKeyBinding("CS+HOME", InputHandler.SELECT_DOC_HOME);
addKeyBinding("CS+END", InputHandler.SELECT_DOC_END);
}
if (Platform.isMacOS()) {
// Additional OS X key bindings added for 0215.
// Also note that two more are added above and marked 0215.
// https://github.com/processing/processing/issues/1392
// "Mac keyboard shortcuts" document from Apple:
// https://support.apple.com/en-us/HT201236
// control-A move to start of current paragraph
addKeyBinding("C+A", InputHandler.HOME);
addKeyBinding("CS+A", InputHandler.SELECT_HOME);
// control-E move to end of current paragraph
addKeyBinding("C+E", InputHandler.END);
addKeyBinding("CS+E", InputHandler.SELECT_END);
// control-D forward delete
addKeyBinding("C+D", InputHandler.DELETE);
// control-B move left one character
addKeyBinding("C+B", InputHandler.PREV_CHAR);
addKeyBinding("CS+B", InputHandler.SELECT_PREV_CHAR);
// control-F move right one character
addKeyBinding("C+F", InputHandler.NEXT_CHAR);
addKeyBinding("CS+F", InputHandler.SELECT_NEXT_CHAR);
// control-H delete (just ASCII for backspace)
addKeyBinding("C+H", InputHandler.BACKSPACE);
// control-N move down one line
addKeyBinding("C+N", InputHandler.NEXT_LINE);
addKeyBinding("CS+N", InputHandler.SELECT_NEXT_LINE);
// control-P move up one line
addKeyBinding("C+P", InputHandler.PREV_LINE);
addKeyBinding("CS+P", InputHandler.SELECT_PREV_LINE);
// might be nice, but no handlers currently available
// control-O insert new line after cursor
// control-T transpose (swap) two surrounding character
// control-V move to end, then left one character
// control-K delete remainder of current paragraph
// control-Y paste text previously deleted with control-K
}
String metaOrCtrl = Platform.isMacOS() ? "M" : "C";
addKeyBinding(metaOrCtrl + "+LEFT", InputHandler.HOME);
addKeyBinding(metaOrCtrl + "+RIGHT", InputHandler.END);
addKeyBinding(metaOrCtrl + "S+LEFT", InputHandler.SELECT_HOME); // 0122
addKeyBinding(metaOrCtrl + "S+RIGHT", InputHandler.SELECT_END); // 0122
addKeyBinding(metaOrCtrl + "+UP", InputHandler.DOCUMENT_HOME); // 1276
addKeyBinding(metaOrCtrl + "+DOWN", InputHandler.DOCUMENT_END); // 1276
addKeyBinding(metaOrCtrl + "S+UP", InputHandler.SELECT_DOC_HOME);
addKeyBinding(metaOrCtrl + "S+DOWN", InputHandler.SELECT_DOC_END);
//
addKeyBinding("PAGE_UP", InputHandler.PREV_PAGE);
addKeyBinding("PAGE_DOWN", InputHandler.NEXT_PAGE);
addKeyBinding("S+PAGE_UP", InputHandler.SELECT_PREV_PAGE);
addKeyBinding("S+PAGE_DOWN", InputHandler.SELECT_NEXT_PAGE);
addKeyBinding("LEFT", InputHandler.PREV_CHAR);
addKeyBinding("S+LEFT", InputHandler.SELECT_PREV_CHAR);
addKeyBinding(altOrCtrl + "+LEFT", InputHandler.PREV_WORD);
addKeyBinding(altOrCtrl + "S+LEFT", InputHandler.SELECT_PREV_WORD);
addKeyBinding("RIGHT", InputHandler.NEXT_CHAR);
addKeyBinding("S+RIGHT", InputHandler.SELECT_NEXT_CHAR);
addKeyBinding(altOrCtrl + "+RIGHT", InputHandler.NEXT_WORD);
addKeyBinding(altOrCtrl + "S+RIGHT", InputHandler.SELECT_NEXT_WORD);
addKeyBinding("UP", InputHandler.PREV_LINE);
addKeyBinding(altOrCtrl + "+UP", InputHandler.PREV_LINE); // p5
addKeyBinding("S+UP", InputHandler.SELECT_PREV_LINE);
addKeyBinding("DOWN", InputHandler.NEXT_LINE);
addKeyBinding(altOrCtrl + "+DOWN", InputHandler.NEXT_LINE); // p5
addKeyBinding("S+DOWN", InputHandler.SELECT_NEXT_LINE);
addKeyBinding(altOrCtrl + "+ENTER", InputHandler.REPEAT);
}
protected boolean isMnemonic(KeyEvent event) {
// Don't do this on OS X, because alt (the option key) is used for
// non-ASCII chars, and there are no menu mnemonics to speak of
if (!Platform.isMacOS()) {
if (event.isAltDown() && !event.isControlDown() &&
event.getKeyChar() != KeyEvent.VK_UNDEFINED) {
// This is probably a menu mnemonic, don't pass it through.
// If it's an alt-NNNN sequence, those only work on the keypad
// and pass through UNDEFINED as the keyChar.
return true;
}
}
return false;
}
public void keyPressed(KeyEvent event) {
// don't pass the ctrl-, through to the editor
// https://github.com/processing/processing/issues/3074
if (event.isControlDown() && event.getKeyChar() == ',') {
return;
}
// don't pass menu mnemonics (alt-f for file, etc) to the editor
if (isMnemonic(event)) {
return;
}
if (!handlePressed(event)) {
super.keyPressed(event);
}
}
public void keyTyped(KeyEvent event) {
if (isMnemonic(event)) {
return;
}
if (!handleTyped(event)) {
super.keyTyped(event);
}
}
// we don't need keyReleased(), so that's passed through automatically
/**
* Override this function in your InputHandler to do any gymnastics.
* @return true if key has been handled (no further handling should be done)
*/
public boolean handlePressed(KeyEvent event) {
return false;
}
/**
* Override this instead of keyPressed/keyTyped
* @return true if key has been handled (no further handling should be done)
*/
public boolean handleTyped(KeyEvent event) {
return false;
}
public void handleInputMethodCommit() {
editor.getSketch().setModified(true);
}
} | PdeInputHandler |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/tasks/mailbox/MailboxProcessor.java | {
"start": 2396,
"end": 3237
} | class ____ the {@link
* MailboxController} to communicate control flow changes to the mailbox loop, e.g. that invocations
* of the default action are temporarily or permanently exhausted.
*
* <p>The design of {@link #runMailboxLoop()} is centered around the idea of keeping the expected
* hot path (default action, no mail) as fast as possible. This means that all checking of mail and
* other control flags (mailboxLoopRunning, suspendedDefaultAction) are always connected to #hasMail
* indicating true. This means that control flag changes in the mailbox thread can be done directly,
* but we must ensure that there is at least one action in the mailbox so that the change is picked
* up. For control flag changes by all other threads, that must happen through mailbox actions, this
* is automatically the case.
*
* <p>This | through |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/jdk/EnumTypingTest.java | {
"start": 952,
"end": 1005
} | class ____ extends ArrayList<Tag> { }
static | TagList |
java | junit-team__junit5 | junit-platform-launcher/src/main/java/org/junit/platform/launcher/Launcher.java | {
"start": 2277,
"end": 7206
} | interface ____ {
/**
* Register one or more listeners for test discovery.
*
* @param listeners the listeners to be notified of test discovery events;
* never {@code null} or empty
*/
@API(status = STABLE, since = "1.10")
void registerLauncherDiscoveryListeners(LauncherDiscoveryListener... listeners);
/**
* Register one or more listeners for test execution.
*
* @param listeners the listeners to be notified of test execution events;
* never {@code null} or empty
*/
void registerTestExecutionListeners(TestExecutionListener... listeners);
/**
* Discover tests and build a {@link TestPlan} according to the supplied
* {@link LauncherDiscoveryRequest} by querying all registered engines and
* collecting their results.
*
* @apiNote This method may be called to generate a preview of the test
* tree. The resulting {@link TestPlan} is unmodifiable and may be passed to
* {@link #execute(TestPlan, TestExecutionListener...)} for execution at
* most once.
*
* @param discoveryRequest the launcher discovery request; never {@code null}
* @return an unmodifiable {@code TestPlan} that contains all resolved
* {@linkplain TestIdentifier identifiers} from all registered engines
*/
TestPlan discover(LauncherDiscoveryRequest discoveryRequest);
/**
* Execute a {@link TestPlan} which is built according to the supplied
* {@link LauncherDiscoveryRequest} by querying all registered engines and
* collecting their results, and notify
* {@linkplain #registerTestExecutionListeners registered listeners} about
* the progress and results of the execution.
*
* <p>Supplied test execution listeners are registered in addition to already
* registered listeners but only for the supplied launcher discovery request.
*
* @apiNote Calling this method will cause test discovery to be executed for
* all registered engines. If the same {@link LauncherDiscoveryRequest} was
* previously passed to {@link #discover(LauncherDiscoveryRequest)}, you
* should instead call {@link #execute(TestPlan, TestExecutionListener...)}
* and pass the already acquired {@link TestPlan} to avoid the potential
* performance degradation (e.g., classpath scanning) of running test
* discovery twice.
*
* @param discoveryRequest the launcher discovery request; never {@code null}
* @param listeners additional test execution listeners; never {@code null}
* @see #execute(TestPlan, TestExecutionListener...)
* @see #execute(LauncherExecutionRequest)
*/
void execute(LauncherDiscoveryRequest discoveryRequest, TestExecutionListener... listeners);
/**
* Execute the supplied {@link TestPlan} and notify
* {@linkplain #registerTestExecutionListeners registered listeners} about
* the progress and results of the execution.
*
* <p>Supplied test execution listeners are registered in addition to
* already registered listeners but only for the execution of the supplied
* test plan.
*
* @apiNote The supplied {@link TestPlan} must not have been executed
* previously.
*
* @param testPlan the test plan to execute; never {@code null}
* @param listeners additional test execution listeners; never {@code null}
* @since 1.4
* @see #execute(LauncherDiscoveryRequest, TestExecutionListener...)
* @see #execute(LauncherExecutionRequest)
*/
@API(status = STABLE, since = "1.4")
void execute(TestPlan testPlan, TestExecutionListener... listeners);
/**
* Execute tests according to the supplied {@link LauncherExecutionRequest} and
* notify {@linkplain #registerTestExecutionListeners registered listeners} about
* the progress and results of the execution.
*
* <p>Test execution listeners supplied
* {@linkplain LauncherExecutionRequest#getAdditionalTestExecutionListeners()
* as part of the request} are registered in addition to already registered
* listeners but only for the supplied execution request.
*
* @apiNote If the execution request contains a {@link TestPlan} rather than
* a {@link LauncherDiscoveryRequest}, it must not have been executed
* previously.
*
* <p>If the execution request contains a {@link LauncherDiscoveryRequest},
* calling this method will cause test discovery to be executed for all
* registered engines. If the same {@link LauncherDiscoveryRequest} was
* previously passed to {@link #discover(LauncherDiscoveryRequest)}, you
* should instead provide the resulting {@link TestPlan} as part of the
* supplied execution request to avoid the potential performance degradation
* (e.g., classpath scanning) of running test discovery twice.
*
* @param executionRequest the launcher execution request; never {@code null}
* @since 6.0
* @see #execute(LauncherDiscoveryRequest, TestExecutionListener...)
* @see #execute(TestPlan, TestExecutionListener...)
*/
@API(status = MAINTAINED, since = "6.0")
void execute(LauncherExecutionRequest executionRequest);
}
| Launcher |
java | google__guice | extensions/servlet/test/com/google/inject/servlet/FilterDispatchIntegrationTest.java | {
"start": 14236,
"end": 14493
} | class ____ extends HttpServlet {
@Override
protected void service(HttpServletRequest req, HttpServletResponse resp)
throws ServletException {
throw new ServletException("failure!");
}
}
@Singleton
private static | ThrowingServlet |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cascade/RefreshTest.java | {
"start": 3196,
"end": 3662
} | class ____ {
@Id @GeneratedValue
Long id;
@Column( nullable = false )
@Temporal( TemporalType.TIMESTAMP )
Date batchDate;
@OneToMany( mappedBy = "batch", fetch = FetchType.LAZY, cascade = CascadeType.ALL )
@Fetch( FetchMode.SELECT )
Set<Job> jobs = new HashSet<>();
JobBatch() {}
JobBatch(Date batchDate) {
this.batchDate = batchDate;
}
Job createJob() {
Job job = new Job( this );
jobs.add( job );
return job;
}
}
}
| JobBatch |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java | {
"start": 18274,
"end": 21133
} | class ____ implements Task.Status {
public static final String NAME = "resync";
private final String phase;
private final int totalOperations;
private final int resyncedOperations;
private final int skippedOperations;
public Status(StreamInput in) throws IOException {
phase = in.readString();
totalOperations = in.readVInt();
resyncedOperations = in.readVInt();
skippedOperations = in.readVInt();
}
public Status(String phase, int totalOperations, int resyncedOperations, int skippedOperations) {
this.phase = requireNonNull(phase, "Phase cannot be null");
this.totalOperations = totalOperations;
this.resyncedOperations = resyncedOperations;
this.skippedOperations = skippedOperations;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("phase", phase);
builder.field("totalOperations", totalOperations);
builder.field("resyncedOperations", resyncedOperations);
builder.field("skippedOperations", skippedOperations);
builder.endObject();
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(phase);
out.writeVLong(totalOperations);
out.writeVLong(resyncedOperations);
out.writeVLong(skippedOperations);
}
@Override
public String toString() {
return Strings.toString(this);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Status status = (Status) o;
if (totalOperations != status.totalOperations) return false;
if (resyncedOperations != status.resyncedOperations) return false;
if (skippedOperations != status.skippedOperations) return false;
return phase.equals(status.phase);
}
@Override
public int hashCode() {
int result = phase.hashCode();
result = 31 * result + totalOperations;
result = 31 * result + resyncedOperations;
result = 31 * result + skippedOperations;
return result;
}
}
}
}
| Status |
java | apache__camel | components/camel-debezium/camel-debezium-db2/src/generated/java/org/apache/camel/component/debezium/db2/configuration/Db2ConnectorEmbeddedDebeziumConfiguration.java | {
"start": 34932,
"end": 42631
} | class ____ returns SourceInfo
* schema and struct.
*/
public void setSourceinfoStructMaker(String sourceinfoStructMaker) {
this.sourceinfoStructMaker = sourceinfoStructMaker;
}
public String getSourceinfoStructMaker() {
return sourceinfoStructMaker;
}
/**
* The Kafka bootstrap server address used as input/output namespace/
*/
public void setOpenlineageIntegrationDatasetKafkaBootstrapServers(
String openlineageIntegrationDatasetKafkaBootstrapServers) {
this.openlineageIntegrationDatasetKafkaBootstrapServers = openlineageIntegrationDatasetKafkaBootstrapServers;
}
public String getOpenlineageIntegrationDatasetKafkaBootstrapServers() {
return openlineageIntegrationDatasetKafkaBootstrapServers;
}
/**
* The name of the schema where CDC control structures are located; defaults
* to 'ASNCDC'
*/
public void setCdcControlSchema(String cdcControlSchema) {
this.cdcControlSchema = cdcControlSchema;
}
public String getCdcControlSchema() {
return cdcControlSchema;
}
/**
* Flag specifying whether built-in tables should be ignored.
*/
public void setTableIgnoreBuiltin(boolean tableIgnoreBuiltin) {
this.tableIgnoreBuiltin = tableIgnoreBuiltin;
}
public boolean isTableIgnoreBuiltin() {
return tableIgnoreBuiltin;
}
/**
* Enable Debezium to emit data lineage metadata through OpenLineage API
*/
public void setOpenlineageIntegrationEnabled(
boolean openlineageIntegrationEnabled) {
this.openlineageIntegrationEnabled = openlineageIntegrationEnabled;
}
public boolean isOpenlineageIntegrationEnabled() {
return openlineageIntegrationEnabled;
}
/**
* This setting must be set to specify a list of tables/collections whose
* snapshot must be taken on creating or restarting the connector.
*/
public void setSnapshotIncludeCollectionList(
String snapshotIncludeCollectionList) {
this.snapshotIncludeCollectionList = snapshotIncludeCollectionList;
}
public String getSnapshotIncludeCollectionList() {
return snapshotIncludeCollectionList;
}
/**
* When 'snapshot.mode' is set as configuration_based, this setting permits
* to specify whenever the stream should start or not after snapshot.
*/
public void setSnapshotModeConfigurationBasedStartStream(
boolean snapshotModeConfigurationBasedStartStream) {
this.snapshotModeConfigurationBasedStartStream = snapshotModeConfigurationBasedStartStream;
}
public boolean isSnapshotModeConfigurationBasedStartStream() {
return snapshotModeConfigurationBasedStartStream;
}
/**
* Maximum size of the queue in bytes for change events read from the
* database log but not yet recorded or forwarded. Defaults to 0. Mean the
* feature is not enabled
*/
public void setMaxQueueSizeInBytes(long maxQueueSizeInBytes) {
this.maxQueueSizeInBytes = maxQueueSizeInBytes;
}
public long getMaxQueueSizeInBytes() {
return maxQueueSizeInBytes;
}
/**
* When 'snapshot.mode' is set as configuration_based, this setting permits
* to specify whenever the schema should be snapshotted or not.
*/
public void setSnapshotModeConfigurationBasedSnapshotSchema(
boolean snapshotModeConfigurationBasedSnapshotSchema) {
this.snapshotModeConfigurationBasedSnapshotSchema = snapshotModeConfigurationBasedSnapshotSchema;
}
public boolean isSnapshotModeConfigurationBasedSnapshotSchema() {
return snapshotModeConfigurationBasedSnapshotSchema;
}
/**
* Time, date, and timestamps can be represented with different kinds of
* precisions, including: 'adaptive' (the default) bases the precision of
* time, date, and timestamp values on the database column's precision;
* 'adaptive_time_microseconds' like 'adaptive' mode, but TIME fields always
* use microseconds precision; 'connect' always represents time, date, and
* timestamp values using Kafka Connect's built-in representations for Time,
* Date, and Timestamp, which uses millisecond precision regardless of the
* database columns' precision.
*/
public void setTimePrecisionMode(String timePrecisionMode) {
this.timePrecisionMode = timePrecisionMode;
}
public String getTimePrecisionMode() {
return timePrecisionMode;
}
/**
* Interval for looking for new signals in registered channels, given in
* milliseconds. Defaults to 5 seconds.
*/
public void setSignalPollIntervalMs(long signalPollIntervalMs) {
this.signalPollIntervalMs = signalPollIntervalMs;
}
public long getSignalPollIntervalMs() {
return signalPollIntervalMs;
}
/**
* Optional list of post processors. The processors are defined using
* '<post.processor.prefix>.type' config option and configured using options
* '<post.processor.prefix.<option>'
*/
public void setPostProcessors(String postProcessors) {
this.postProcessors = postProcessors;
}
public String getPostProcessors() {
return postProcessors;
}
/**
* List of notification channels names that are enabled.
*/
public void setNotificationEnabledChannels(
String notificationEnabledChannels) {
this.notificationEnabledChannels = notificationEnabledChannels;
}
public String getNotificationEnabledChannels() {
return notificationEnabledChannels;
}
/**
* Specify how failures during processing of events (i.e. when encountering
* a corrupted event) should be handled, including: 'fail' (the default) an
* exception indicating the problematic event and its position is raised,
* causing the connector to be stopped; 'warn' the problematic event and its
* position will be logged and the event will be skipped; 'ignore' the
* problematic event will be skipped.
*/
public void setEventProcessingFailureHandlingMode(
String eventProcessingFailureHandlingMode) {
this.eventProcessingFailureHandlingMode = eventProcessingFailureHandlingMode;
}
public String getEventProcessingFailureHandlingMode() {
return eventProcessingFailureHandlingMode;
}
/**
* Port of the database server.
*/
public void setDatabasePort(int databasePort) {
this.databasePort = databasePort;
}
public int getDatabasePort() {
return databasePort;
}
/**
* The name of the topic for the notifications. This is required in case
* 'sink' is in the list of enabled channels
*/
public void setNotificationSinkTopicName(String notificationSinkTopicName) {
this.notificationSinkTopicName = notificationSinkTopicName;
}
public String getNotificationSinkTopicName() {
return notificationSinkTopicName;
}
/**
* When 'snapshot.mode' is set as custom, this setting must be set to
* specify a the name of the custom implementation provided in the 'name()'
* method. The implementations must implement the 'Snapshotter' interface
* and is called on each app boot to determine whether to do a snapshot.
*/
public void setSnapshotModeCustomName(String snapshotModeCustomName) {
this.snapshotModeCustomName = snapshotModeCustomName;
}
public String getSnapshotModeCustomName() {
return snapshotModeCustomName;
}
/**
* The name of the SchemaHistory | that |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/referencearray/AtomicReferenceArrayAssert_areAtLeastOne_Test.java | {
"start": 916,
"end": 1363
} | class ____ extends AtomicReferenceArrayAssertBaseTest {
private static final Condition<Object> condition = new TestCondition<>();
@Override
protected AtomicReferenceArrayAssert<Object> invoke_api_method() {
return assertions.areAtLeastOne(condition);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertAreAtLeast(info(), internalArray(), 1, condition);
}
}
| AtomicReferenceArrayAssert_areAtLeastOne_Test |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/ComplexWebApplicationContext.java | {
"start": 17463,
"end": 17902
} | class ____ implements ViewResolver, Ordered {
@Override
public int getOrder() {
return 0;
}
@Override
public @Nullable View resolveViewName(String viewName, Locale locale) throws Exception {
if (viewName.equalsIgnoreCase("form")) {
InternalResourceView view = new InternalResourceView("myform.jsp");
view.setRequestContextAttribute("rc");
return view;
}
return null;
}
}
public static | TestViewResolver |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/image/publisher/SnapshotGenerator.java | {
"start": 3748,
"end": 10104
} | interface ____ {
/**
* Emit a snapshot for the given image.
*
* Note: if a snapshot has already been emitted for the given offset and epoch pair, this
* function will not recreate it.
*
* @param image The metadata image to emit.
*/
void maybeEmit(MetadataImage image);
}
/**
* The node ID.
*/
private final int nodeId;
/**
* The clock to use.
*/
private final Time time;
/**
* The emitter callback, which actually generates the snapshot.
*/
private final Emitter emitter;
/**
* The slf4j logger to use.
*/
private final Logger log;
/**
* The fault handler to use.
*/
private final FaultHandler faultHandler;
/**
* The maximum number of bytes we will wait to see before triggering a new snapshot.
*/
private final long maxBytesSinceLastSnapshot;
/**
* The maximum amount of time we will wait before triggering a snapshot, or 0 to disable
* time-based snapshotting.
*/
private final long maxTimeSinceLastSnapshotNs;
/**
* If non-null, the reason why snapshots have been disabled.
*/
private final AtomicReference<String> disabledReason;
/**
* The event queue used to schedule emitting snapshots.
*/
private final EventQueue eventQueue;
/**
* The log bytes that we have read since the last snapshot.
*/
private long bytesSinceLastSnapshot;
/**
* The time at which we created the last snapshot.
*/
private long lastSnapshotTimeNs;
private SnapshotGenerator(
int nodeId,
Time time,
Emitter emitter,
FaultHandler faultHandler,
long maxBytesSinceLastSnapshot,
long maxTimeSinceLastSnapshotNs,
AtomicReference<String> disabledReason,
String threadNamePrefix
) {
this.nodeId = nodeId;
this.time = time;
this.emitter = emitter;
this.faultHandler = faultHandler;
this.maxBytesSinceLastSnapshot = maxBytesSinceLastSnapshot;
this.maxTimeSinceLastSnapshotNs = maxTimeSinceLastSnapshotNs;
LogContext logContext = new LogContext("[SnapshotGenerator id=" + nodeId + "] ");
this.log = logContext.logger(SnapshotGenerator.class);
this.disabledReason = disabledReason;
this.eventQueue = new KafkaEventQueue(time, logContext, threadNamePrefix + "snapshot-generator-");
resetSnapshotCounters();
log.debug("Starting SnapshotGenerator.");
}
@Override
public String name() {
return "SnapshotGenerator";
}
void resetSnapshotCounters() {
this.bytesSinceLastSnapshot = 0L;
this.lastSnapshotTimeNs = time.nanoseconds();
}
@Override
public void onMetadataUpdate(
MetadataDelta delta,
MetadataImage newImage,
LoaderManifest manifest
) {
switch (manifest.type()) {
case LOG_DELTA:
publishLogDelta(newImage, (LogDeltaManifest) manifest);
break;
case SNAPSHOT:
publishSnapshot(newImage);
break;
}
}
void publishSnapshot(MetadataImage newImage) {
log.debug("Resetting the snapshot counters because we just read {}.", newImage.provenance().snapshotName());
resetSnapshotCounters();
}
void publishLogDelta(MetadataImage newImage, LogDeltaManifest manifest) {
bytesSinceLastSnapshot += manifest.numBytes();
if (bytesSinceLastSnapshot >= maxBytesSinceLastSnapshot) {
if (eventQueue.isEmpty()) {
maybeScheduleEmit("we have replayed at least " + maxBytesSinceLastSnapshot +
" bytes", newImage, manifest.provenance().isOffsetBatchAligned());
} else if (log.isTraceEnabled()) {
log.trace("Not scheduling bytes-based snapshot because event queue is not empty yet.");
}
} else if (maxTimeSinceLastSnapshotNs != 0 &&
(time.nanoseconds() - lastSnapshotTimeNs >= maxTimeSinceLastSnapshotNs)) {
if (eventQueue.isEmpty()) {
maybeScheduleEmit("we have waited at least " +
TimeUnit.NANOSECONDS.toMinutes(maxTimeSinceLastSnapshotNs) +
" minute(s)", newImage, manifest.provenance().isOffsetBatchAligned());
} else if (log.isTraceEnabled()) {
log.trace("Not scheduling time-based snapshot because event queue is not empty yet.");
}
} else if (log.isTraceEnabled()) {
log.trace("Neither time-based nor bytes-based criteria are met; not scheduling snapshot.");
}
}
void maybeScheduleEmit(
String reason,
MetadataImage image,
boolean isOffsetBatchAligned
) {
String currentDisabledReason = disabledReason.get();
if (currentDisabledReason != null) {
log.error("Not emitting {} despite the fact that {} because snapshots are " +
"disabled; {}", image.provenance().snapshotName(), reason, currentDisabledReason);
} else if (!isOffsetBatchAligned) {
log.debug("Not emitting {} despite the fact that {} because snapshots are " +
"disabled; {}", image.provenance().snapshotName(), reason, "metadata image is not batch aligned");
} else {
eventQueue.append(() -> {
resetSnapshotCounters();
log.info("Creating new KRaft snapshot file {} because {}.",
image.provenance().snapshotName(), reason);
try {
emitter.maybeEmit(image);
} catch (Throwable e) {
faultHandler.handleFault("KRaft snapshot file generation error", e);
}
});
}
}
public void beginShutdown() {
log.debug("Beginning shutdown of SnapshotGenerator.");
this.disabledReason.compareAndSet(null, "we are shutting down");
eventQueue.beginShutdown("beginShutdown");
}
@Override
public void close() throws InterruptedException {
eventQueue.beginShutdown("close");
log.debug("Closing SnapshotGenerator.");
eventQueue.close();
}
}
| Emitter |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java | {
"start": 1220,
"end": 4084
} | class ____ implements QueryVectorBuilder {
private static final String NAME = "test_query_vector_builder";
private static final ParseField QUERY_VECTOR = new ParseField("query_vector");
@SuppressWarnings("unchecked")
static final ConstructingObjectParser<TestQueryVectorBuilder, Void> PARSER = new ConstructingObjectParser<>(
NAME + "_parser",
true,
a -> new TestQueryVectorBuilder((List<Float>) a[0])
);
static {
PARSER.declareFloatArray(ConstructingObjectParser.constructorArg(), QUERY_VECTOR);
}
private final List<Float> vectorToBuild;
public TestQueryVectorBuilder(List<Float> vectorToBuild) {
this.vectorToBuild = vectorToBuild;
}
public TestQueryVectorBuilder(float[] expected) {
this.vectorToBuild = new ArrayList<>(expected.length);
for (float f : expected) {
vectorToBuild.add(f);
}
}
TestQueryVectorBuilder(StreamInput in) throws IOException {
this.vectorToBuild = in.readCollectionAsList(StreamInput::readFloat);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.startObject().field(QUERY_VECTOR.getPreferredName(), vectorToBuild).endObject();
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.current();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeCollection(vectorToBuild, StreamOutput::writeFloat);
}
@Override
public void buildVector(Client client, ActionListener<float[]> listener) {
float[] response = new float[vectorToBuild.size()];
int i = 0;
for (Float f : vectorToBuild) {
response[i++] = f;
}
listener.onResponse(response);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TestQueryVectorBuilder that = (TestQueryVectorBuilder) o;
return Objects.equals(vectorToBuild, that.vectorToBuild);
}
@Override
public int hashCode() {
return Objects.hash(vectorToBuild);
}
}
@Override
public List<QueryVectorBuilderSpec<?>> getQueryVectorBuilders() {
return List.of(
new QueryVectorBuilderSpec<>(TestQueryVectorBuilder.NAME, TestQueryVectorBuilder::new, TestQueryVectorBuilder.PARSER)
);
}
}
| TestQueryVectorBuilder |
java | google__dagger | hilt-compiler/main/java/dagger/hilt/processor/internal/root/ComponentTree.java | {
"start": 1318,
"end": 4387
} | class ____ {
private final ImmutableGraph<ComponentDescriptor> graph;
private final ComponentDescriptor root;
/** Creates a new tree from a set of descriptors. */
static ComponentTree from(Set<ComponentDescriptor> descriptors, ComponentDescriptor root) {
MutableGraph<ComponentDescriptor> graph =
GraphBuilder.directed().allowsSelfLoops(false).build();
descriptors.forEach(
descriptor -> {
// Only add components that have builders (besides the root component) since if
// we didn't find any builder class, then we don't need to generate the component
// since it would be inaccessible.
if (descriptor.creator().isPresent() || descriptor.isRoot()) {
graph.addNode(descriptor);
descriptor.parent().ifPresent(parent -> graph.putEdge(parent, descriptor));
}
});
// Only include nodes that are reachable from the given root. Also, the graph may still
// have nodes that are children of components that don't have builders that need to
// be removed as well.
return new ComponentTree(ImmutableGraph.copyOf(
Graphs.inducedSubgraph(graph, Graphs.reachableNodes(graph, root))));
}
private ComponentTree(ImmutableGraph<ComponentDescriptor> graph) {
this.graph = Preconditions.checkNotNull(graph);
Preconditions.checkState(
!Graphs.hasCycle(graph),
"Component graph has cycles: %s",
graph.nodes());
// Check that each component has a unique descriptor
Map<ClassName, ComponentDescriptor> descriptors = new HashMap<>();
for (ComponentDescriptor descriptor : graph.nodes()) {
if (descriptors.containsKey(descriptor.component())) {
ComponentDescriptor prevDescriptor = descriptors.get(descriptor.component());
Preconditions.checkState(
// TODO(b/144939893): Use "==" instead of ".equals()"?
descriptor.equals(prevDescriptor),
"%s has mismatching descriptors:\n"
+ " %s\n\n"
+ " %s\n\n",
prevDescriptor.component(),
prevDescriptor,
descriptor);
}
descriptors.put(descriptor.component(), descriptor);
}
ImmutableList<ComponentDescriptor> roots =
graph.nodes().stream()
.filter(node -> graph.inDegree(node) == 0)
.collect(toImmutableList());
Preconditions.checkState(
roots.size() == 1,
"Component graph must have exactly 1 root. Found: %s",
roots.stream().map(ComponentDescriptor::component).collect(toImmutableList()));
root = Iterables.getOnlyElement(roots);
}
ImmutableSet<ComponentDescriptor> getComponentDescriptors() {
return ImmutableSet.copyOf(graph.nodes());
}
ImmutableSet<ComponentDescriptor> childrenOf(ComponentDescriptor componentDescriptor) {
return ImmutableSet.copyOf(graph.successors(componentDescriptor));
}
ImmutableGraph<ComponentDescriptor> graph() {
return graph;
}
ComponentDescriptor root() {
return root;
}
}
| ComponentTree |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java | {
"start": 992,
"end": 3808
} | class ____ extends Command {
private final LikePattern catalogPattern;
private final String catalog;
private final String index;
private final LikePattern pattern;
private final boolean includeFrozen;
public ShowTables(Source source, LikePattern catalogPattern, String catalog, String index, LikePattern pattern, boolean includeFrozen) {
super(source);
this.catalogPattern = catalogPattern;
this.catalog = catalog;
this.index = index;
this.pattern = pattern;
this.includeFrozen = includeFrozen;
}
@Override
protected NodeInfo<ShowTables> info() {
return NodeInfo.create(this, ShowTables::new, catalogPattern, catalog, index, pattern, includeFrozen);
}
public String index() {
return index;
}
public LikePattern pattern() {
return pattern;
}
@Override
public List<Attribute> output() {
return asList(keyword("catalog"), keyword("name"), keyword("type"), keyword("kind"));
}
@Override
public final void execute(SqlSession session, ActionListener<Page> listener) {
String cat = catalogPattern != null ? catalogPattern.asIndexNameWildcard() : catalog;
cat = hasText(cat) ? cat : session.configuration().catalog();
String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*");
String regex = pattern != null ? pattern.asJavaRegex() : null;
boolean withFrozen = session.configuration().includeFrozen() || includeFrozen;
// to avoid redundancy, indicate whether frozen fields are required by specifying the type
EnumSet<IndexType> indexType = withFrozen ? IndexType.VALID_INCLUDE_FROZEN : IndexType.VALID_REGULAR;
session.indexResolver()
.resolveNames(
cat,
idx,
regex,
indexType,
listener.delegateFailureAndWrap(
(l, result) -> l.onResponse(
of(
session,
result.stream().map(t -> asList(t.cluster(), t.name(), t.type().toSql(), t.type().toNative())).collect(toList())
)
)
)
);
}
@Override
public int hashCode() {
return Objects.hash(index, pattern, includeFrozen);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
ShowTables other = (ShowTables) obj;
return Objects.equals(index, other.index) && Objects.equals(pattern, other.pattern) && includeFrozen == other.includeFrozen;
}
}
| ShowTables |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMSecretManagerService.java | {
"start": 1667,
"end": 5733
} | class ____ extends AbstractService {
AMRMTokenSecretManager amRmTokenSecretManager;
NMTokenSecretManagerInRM nmTokenSecretManager;
ClientToAMTokenSecretManagerInRM clientToAMSecretManager;
RMContainerTokenSecretManager containerTokenSecretManager;
RMDelegationTokenSecretManager rmDTSecretManager;
RMContextImpl rmContext;
/**
* Construct the service.
* @param conf Configuration.
* @param rmContext RMContext.
*/
public RMSecretManagerService(Configuration conf, RMContextImpl rmContext) {
super(RMSecretManagerService.class.getName());
this.rmContext = rmContext;
// To initialize correctly, these managers should be created before
// being called serviceInit().
nmTokenSecretManager = createNMTokenSecretManager(conf);
rmContext.setNMTokenSecretManager(nmTokenSecretManager);
containerTokenSecretManager = createContainerTokenSecretManager(conf);
rmContext.setContainerTokenSecretManager(containerTokenSecretManager);
clientToAMSecretManager = createClientToAMTokenSecretManager();
rmContext.setClientToAMTokenSecretManager(clientToAMSecretManager);
amRmTokenSecretManager = createAMRMTokenSecretManager(conf, this.rmContext);
rmContext.setAMRMTokenSecretManager(amRmTokenSecretManager);
rmDTSecretManager =
createRMDelegationTokenSecretManager(conf, rmContext);
rmContext.setRMDelegationTokenSecretManager(rmDTSecretManager);
}
@Override
public void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
}
@Override
public void serviceStart() throws Exception {
amRmTokenSecretManager.start();
containerTokenSecretManager.start();
nmTokenSecretManager.start();
try {
rmDTSecretManager.startThreads();
} catch(IOException ie) {
throw new YarnRuntimeException("Failed to start secret manager threads", ie);
}
super.serviceStart();
}
@Override
public void serviceStop() throws Exception {
if (rmDTSecretManager != null) {
rmDTSecretManager.stopThreads();
}
if (amRmTokenSecretManager != null) {
amRmTokenSecretManager.stop();
}
if (containerTokenSecretManager != null) {
containerTokenSecretManager.stop();
}
if(nmTokenSecretManager != null) {
nmTokenSecretManager.stop();
}
super.serviceStop();
}
protected RMContainerTokenSecretManager createContainerTokenSecretManager(
Configuration conf) {
return new RMContainerTokenSecretManager(conf);
}
protected NMTokenSecretManagerInRM createNMTokenSecretManager(
Configuration conf) {
return new NMTokenSecretManagerInRM(conf);
}
protected AMRMTokenSecretManager createAMRMTokenSecretManager(
Configuration conf, RMContext rmContext) {
return new AMRMTokenSecretManager(conf, rmContext);
}
protected ClientToAMTokenSecretManagerInRM createClientToAMTokenSecretManager() {
return new ClientToAMTokenSecretManagerInRM();
}
@VisibleForTesting
protected RMDelegationTokenSecretManager createRMDelegationTokenSecretManager(
Configuration conf, RMContext rmContext) {
long secretKeyInterval =
conf.getLong(YarnConfiguration.RM_DELEGATION_KEY_UPDATE_INTERVAL_KEY,
YarnConfiguration.RM_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
long tokenMaxLifetime =
conf.getLong(YarnConfiguration.RM_DELEGATION_TOKEN_MAX_LIFETIME_KEY,
YarnConfiguration.RM_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
long tokenRenewInterval =
conf.getLong(YarnConfiguration.RM_DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
YarnConfiguration.RM_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
long removeScanInterval =
conf.getTimeDuration(YarnConfiguration.RM_DELEGATION_TOKEN_REMOVE_SCAN_INTERVAL_KEY,
YarnConfiguration.RM_DELEGATION_TOKEN_REMOVE_SCAN_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
return new RMDelegationTokenSecretManager(secretKeyInterval,
tokenMaxLifetime, tokenRenewInterval, removeScanInterval, rmContext);
}
}
| RMSecretManagerService |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/generics/WildCardSuperWrapper.java | {
"start": 231,
"end": 511
} | class ____<T> {
private T wrapped;
public WildCardSuperWrapper(T wrapped) {
this.wrapped = wrapped;
}
public T getWrapped() {
return wrapped;
}
public void setWrapped(T wrapped) {
this.wrapped = wrapped;
}
}
| WildCardSuperWrapper |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/throughput/BufferSizeEMA.java | {
"start": 991,
"end": 3698
} | class ____ {
private final int maxBufferSize;
private final int minBufferSize;
/** EMA algorithm specific constant which responsible for speed of reaction. */
private final double alpha;
private double lastBufferSize;
public BufferSizeEMA(int maxBufferSize, int minBufferSize, long numberOfSamples) {
this(maxBufferSize, maxBufferSize, minBufferSize, numberOfSamples);
}
public BufferSizeEMA(
int startingBufferSize, int maxBufferSize, int minBufferSize, long numberOfSamples) {
this.maxBufferSize = maxBufferSize;
this.minBufferSize = minBufferSize;
alpha = 2.0 / (numberOfSamples + 1);
this.lastBufferSize = startingBufferSize;
}
/**
* Calculating the buffer size over total possible buffers size and number of buffers in use.
*
* @param totalBufferSizeInBytes Total buffers size.
* @param totalBuffers Total number of buffers in use.
* @return Throughput calculated according to implemented algorithm.
*/
public int calculateBufferSize(long totalBufferSizeInBytes, int totalBuffers) {
checkArgument(totalBufferSizeInBytes >= 0, "Size of buffer should be non negative");
checkArgument(totalBuffers > 0, "Number of buffers should be positive");
// Since the result value is always limited by max buffer size while the instant value is
// potentially unlimited. It can lead to an instant change from min to max value in case
// when the instant value is significantly larger than the possible max value.
// The solution is to limit the instant buffer size by twice of current buffer size in order
// to have the same growth and shrink speeds. for example if the instant value is equal to 0
// and the current value is 16000 we can decrease it at maximum by 1600(suppose alfa=0.1) .
// The idea is to allow increase and decrease size by the same number. So if the instant
// value would be large(for example 100000) it will be possible to increase the current
// value by 1600(the same as decreasing) because the limit will be 2 * currentValue = 32000.
// Example of change speed:
// growing = 32768, 29647, 26823, 24268, 21956, 19864
// shrinking = 19864, 21755, 23826, 26095, 28580, 31301, 32768
double desirableBufferSize =
Math.min(((double) totalBufferSizeInBytes) / totalBuffers, 2L * lastBufferSize);
lastBufferSize += alpha * (desirableBufferSize - lastBufferSize);
lastBufferSize = Math.max(minBufferSize, Math.min(lastBufferSize, maxBufferSize));
return (int) Math.round(lastBufferSize);
}
}
| BufferSizeEMA |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/aop/framework/AbstractAopProxyTests.java | {
"start": 41379,
"end": 46456
} | class ____ implements MethodInterceptor {
private final Map<String, String> expectedValues;
private final Map<String, String> valuesToAdd;
public MapAwareMethodInterceptor(Map<String, String> expectedValues, Map<String, String> valuesToAdd) {
this.expectedValues = expectedValues;
this.valuesToAdd = valuesToAdd;
}
@Override
public Object invoke(MethodInvocation invocation) throws Throwable {
ReflectiveMethodInvocation rmi = (ReflectiveMethodInvocation) invocation;
for (Object key : rmi.getUserAttributes().keySet()) {
assertThat(rmi.getUserAttributes().get(key)).isEqualTo(expectedValues.get(key));
}
rmi.getUserAttributes().putAll(valuesToAdd);
return invocation.proceed();
}
}
AdvisedSupport pc = new AdvisedSupport(ITestBean.class);
MapAwareMethodInterceptor mami1 = new MapAwareMethodInterceptor(new HashMap<>(), new HashMap<>());
Map<String, String> firstValuesToAdd = new HashMap<>();
firstValuesToAdd.put("test", "");
MapAwareMethodInterceptor mami2 = new MapAwareMethodInterceptor(new HashMap<>(), firstValuesToAdd);
MapAwareMethodInterceptor mami3 = new MapAwareMethodInterceptor(firstValuesToAdd, new HashMap<>());
MapAwareMethodInterceptor mami4 = new MapAwareMethodInterceptor(firstValuesToAdd, new HashMap<>());
Map<String, String> secondValuesToAdd = new HashMap<>();
secondValuesToAdd.put("foo", "bar");
secondValuesToAdd.put("cat", "dog");
MapAwareMethodInterceptor mami5 = new MapAwareMethodInterceptor(firstValuesToAdd, secondValuesToAdd);
Map<String, String> finalExpected = new HashMap<>(firstValuesToAdd);
finalExpected.putAll(secondValuesToAdd);
MapAwareMethodInterceptor mami6 = new MapAwareMethodInterceptor(finalExpected, secondValuesToAdd);
pc.addAdvice(mami1);
pc.addAdvice(mami2);
pc.addAdvice(mami3);
pc.addAdvice(mami4);
pc.addAdvice(mami5);
pc.addAdvice(mami6);
// We don't care about the object
pc.setTarget(new TestBean());
AopProxy aop = createAopProxy(pc);
ITestBean tb = (ITestBean) aop.getProxy();
String newName = "foo";
tb.setName(newName);
assertThat(tb.getName()).isEqualTo(newName);
}
@Test
void multiAdvice() {
CountingMultiAdvice cca = new CountingMultiAdvice();
@SuppressWarnings("serial")
Advisor matchesNoArgs = new StaticMethodMatcherPointcutAdvisor(cca) {
@Override
public boolean matches(Method m, @Nullable Class<?> targetClass) {
return m.getParameterCount() == 0 || "exceptional".equals(m.getName());
}
};
TestBean target = new TestBean();
target.setAge(80);
ProxyFactory pf = new ProxyFactory(target);
pf.addAdvice(new NopInterceptor());
pf.addAdvisor(matchesNoArgs);
assertThat(pf.getAdvisors()[1]).as("Advisor was added").isEqualTo(matchesNoArgs);
ITestBean proxied = (ITestBean) createProxy(pf);
assertThat(cca.getCalls()).isEqualTo(0);
assertThat(cca.getCalls("getAge")).isEqualTo(0);
assertThat(proxied.getAge()).isEqualTo(target.getAge());
assertThat(cca.getCalls()).isEqualTo(2);
assertThat(cca.getCalls("getAge")).isEqualTo(2);
assertThat(cca.getCalls("setAge")).isEqualTo(0);
// Won't be advised
proxied.setAge(26);
assertThat(cca.getCalls()).isEqualTo(2);
assertThat(proxied.getAge()).isEqualTo(26);
assertThat(cca.getCalls()).isEqualTo(4);
assertThatExceptionOfType(SpecializedUncheckedException.class).as("Should have thrown CannotGetJdbcConnectionException")
.isThrownBy(() -> proxied.exceptional(new SpecializedUncheckedException("foo", (SQLException)null)));
assertThat(cca.getCalls()).isEqualTo(6);
}
@Test
void beforeAdviceThrowsException() {
final RuntimeException rex = new RuntimeException();
@SuppressWarnings("serial")
CountingBeforeAdvice ba = new CountingBeforeAdvice() {
@Override
public void before(Method m, Object[] args, Object target) throws Throwable {
super.before(m, args, target);
if (m.getName().startsWith("set")) {
throw rex;
}
}
};
TestBean target = new TestBean();
target.setAge(80);
NopInterceptor nop1 = new NopInterceptor();
NopInterceptor nop2 = new NopInterceptor();
ProxyFactory pf = new ProxyFactory(target);
pf.addAdvice(nop1);
pf.addAdvice(ba);
pf.addAdvice(nop2);
ITestBean proxied = (ITestBean) createProxy(pf);
// Won't throw an exception
assertThat(proxied.getAge()).isEqualTo(target.getAge());
assertThat(ba.getCalls()).isEqualTo(1);
assertThat(ba.getCalls("getAge")).isEqualTo(1);
assertThat(nop1.getCount()).isEqualTo(1);
assertThat(nop2.getCount()).isEqualTo(1);
// Will fail, after invoking Nop1
assertThatRuntimeException().as("before advice should have ended chain")
.isThrownBy(() -> proxied.setAge(26))
.matches(rex::equals);
assertThat(ba.getCalls()).isEqualTo(2);
assertThat(nop1.getCount()).isEqualTo(2);
// Nop2 didn't get invoked when the exception was thrown
assertThat(nop2.getCount()).isEqualTo(1);
// Shouldn't have changed value in joinpoint
assertThat(proxied.getAge()).isEqualTo(target.getAge());
}
@Test
void afterReturningAdvisorIsInvoked() {
| MapAwareMethodInterceptor |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/filter/HttpFilterResolver.java | {
"start": 1923,
"end": 3949
} | interface ____ extends AnnotationMetadataProvider {
/**
* @return The filter
*/
@NonNull
GenericHttpFilter getFilter();
/**
* @return The filter methods.
*/
@NonNull
Set<HttpMethod> getFilterMethods();
/**
* @return The filter patterns
*/
@NonNull String[] getPatterns();
/**
* @return The filter patterns
*/
default FilterPatternStyle getPatternStyle() {
return FilterPatternStyle.defaultStyle();
}
/**
* @return Does the entry define any methods.
*/
default boolean hasMethods() {
return CollectionUtils.isNotEmpty(getFilterMethods());
}
/**
* @return Are any patterns defined
*/
default boolean hasPatterns() {
return ArrayUtils.isNotEmpty(getPatterns());
}
/**
* Creates a filter entry for the given arguments.
* @param filter The filter
* @param annotationMetadata The annotation metadata
* @param methods The methods
* @param patternStyle the pattern style
* @param patterns The patterns
* @return The filter entry
*/
static FilterEntry of(
@NonNull HttpFilter filter,
@Nullable AnnotationMetadata annotationMetadata,
@Nullable Set<HttpMethod> methods,
@NonNull FilterPatternStyle patternStyle, String... patterns) {
return new DefaultFilterEntry(
GenericHttpFilter.createLegacyFilter(
Objects.requireNonNull(filter, "Filter cannot be null"),
new FilterOrder.Dynamic(OrderUtil.getOrder(annotationMetadata))),
annotationMetadata != null ? annotationMetadata : AnnotationMetadata.EMPTY_METADATA,
methods,
patternStyle,
patterns
);
}
}
}
| FilterEntry |
java | spring-projects__spring-security | oauth2/oauth2-jose/src/main/java/org/springframework/security/oauth2/jwt/JwtClaimValidator.java | {
"start": 1274,
"end": 2463
} | class ____<T> implements OAuth2TokenValidator<Jwt> {
private final Log logger = LogFactory.getLog(getClass());
private final String claim;
private final Predicate<T> test;
private final OAuth2Error error;
/**
* Constructs a {@link JwtClaimValidator} using the provided parameters
* @param claim - is the name of the claim in {@link Jwt} to validate.
* @param test - is the predicate function for the claim to test against.
*/
public JwtClaimValidator(String claim, Predicate<T> test) {
Assert.notNull(claim, "claim can not be null");
Assert.notNull(test, "test can not be null");
this.claim = claim;
this.test = test;
this.error = new OAuth2Error(OAuth2ErrorCodes.INVALID_TOKEN, "The " + this.claim + " claim is not valid",
"https://tools.ietf.org/html/rfc6750#section-3.1");
}
@Override
public OAuth2TokenValidatorResult validate(Jwt token) {
Assert.notNull(token, "token cannot be null");
T claimValue = token.getClaim(this.claim);
if (this.test.test(claimValue)) {
return OAuth2TokenValidatorResult.success();
}
this.logger.debug(this.error.getDescription());
return OAuth2TokenValidatorResult.failure(this.error);
}
}
| JwtClaimValidator |
java | quarkusio__quarkus | integration-tests/mailer/src/main/java/io/quarkus/it/mailer/MailResource.java | {
"start": 2122,
"end": 7701
} | class ____ {
public static native MailTemplate.MailTemplateInstance hello(String name);
}
/**
* Send a simple text email.
*/
@GET
@Path("/text")
public String sendSimpleTextEmail() {
return sendSimpleTextEmailWithCustomMailer(null);
}
/**
* Send a simple text email.
*/
@GET
@Path("/text/{name}")
public String sendSimpleTextEmailWithCustomMailer(@PathParam("name") String name) {
Mailer mailer = select(name);
String subject = "simple test email";
if (name != null) {
subject += " " + name;
}
mailer.send(Mail.withText("nobody@quarkus.io",
subject,
"This is a simple test email.\nRegards,\nRoger the robot"));
return "ok";
}
/**
* Send a simple text email with a text attachment (not inlined)
*/
@GET
@Path("/text-with-attachment")
public String sendSimpleTextEmailWithASingleAttachment() {
Buffer lorem = vertx.fileSystem().readFile("META-INF/resources/lorem.txt").await().atMost(Duration.ofSeconds(1));
defaultMailer.send(Mail.withText("nobody@quarkus.io",
"simple test email with an attachment",
"This is a simple test email.\nRegards,\nRoger the robot")
.addAttachment("lorem.txt", lorem.getBytes(), "text/plain"));
return "ok";
}
/**
* Send a simple text email with non-ascii characters.
*/
@GET
@Path("/text-non-ascii")
public String sendSimpleTextEmailWithNonAsciiCharacters() {
defaultMailer.send(Mail.withText("nobody@quarkus.io",
"Příliš žluťoučký kůň úpěl ďábelské ódy na 各分野最高のライブラリと標準で構成された、",
"This is a simple test email with non-ascii characters.\n" +
"Non-ascii characters: Příliš žluťoučký kůň úpěl ďábelské ódy na 各分野最高のライブラリと標準で構成された、\n" +
"Regards,\nRoger the robot"));
return "ok";
}
/**
* Send a simple text email with human friendly address prefix.
*/
@GET
@Path("/human-friendly-address")
public String sendEmailWithHumanFriendlyAddressPrefix() {
defaultMailer.send(Mail.withText("Mr. Nobody <nobody@quarkus.io>",
"Simple test email",
"This is a simple test email.\nRegards,\nRoger the robot")
.setFrom("Roger the robot <roger-the-robot@quarkus.io>"));
return "ok";
}
/**
* Send a simple HTML email.
*/
@GET
@Path("/html")
public String sendSimpleHtmlEmail() {
defaultMailer.send(Mail.withHtml("nobody@quarkus.io",
"html test email",
"<h3>Hello!</h3><p>This is a simple test email.</p><p>Regards,</p><p>Roger the robot</p>"));
return "ok";
}
/**
* Send a simple HTML email with inline attachment.
*/
@GET
@Path("/html-inline-attachment")
public String sendSimpleHtmlEmailWithAttachmentInline() throws IOException {
Buffer logo = vertx.fileSystem().readFile("META-INF/resources/logo.png").await().atMost(Duration.ofSeconds(1));
defaultMailer.send(Mail.withHtml("nobody@quarkus.io",
"HTML test email",
"<h3>Hello!</h3><p>This is a simple test email.</p>" +
"<p>Here is a file for you: <img src=\"cid:my-file@quarkus.io\"/></p>" +
"<p>Regards,</p><p>Roger the robot</p>")
.addInlineAttachment("quarkus-logo.png", logo.getBytes(), "image/png",
"<my-file@quarkus.io>"));
return "ok";
}
/**
* Send a simple text email to multiple recipients.
*/
@GET
@Path("/multiple-recipients")
public String sendSimpleTextEmailToMultipleRecipients() {
defaultMailer.send(Mail.withText("nobody@quarkus.io",
"simple test email",
"This is a simple test email.\nRegards,\nRoger the robot")
.addTo("nobody@example.com"));
return "ok";
}
/**
* Send a simple text email with DKIM signature.
*/
@GET
@Path("/dkim")
public String sendSimpleEmailWithDkimSignature() {
defaultMailer.send(Mail.withText("nobody@quarkus.io",
"simple test email",
"This is a simple test email.\nRegards,\nRoger the robot")
.addTo("nobody@example.com"));
return System.getProperty("vertx.mail.attachment.cache.file");
}
/**
* Confirm attachment caching has been enabled successfully.
*/
@GET
@Path("/attachments/cache")
public String checkAttachmentCache() {
return System.getProperty("vertx.mail.attachment.cache.file", "false");
}
/**
* Send a text email from template.
*/
@GET
@Path("/text-from-template")
public String sendEmailFromTemplate() {
Templates.hello("John")
.subject("template mail")
.to("nobody@quarkus.io")
.send()
.await().atMost(Duration.ofSeconds(10));
return "ok";
}
/**
* Send a simple text email with custom header.
*/
@GET
@Path("/text-with-headers")
public String sendSimpleTextEmailWithCustomHeader() {
defaultMailer.send(Mail.withText("nobody@quarkus.io",
"simple test email",
"This is a simple test email.\nRegards,\nRoger the robot")
.addHeader("Accept", "http"));
return "ok";
}
}
| Templates |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java | {
"start": 10208,
"end": 14315
} | class ____ extends InputStream {
private int pos;
private byte[] buffer;
private final ByteArrayOutputStream output;
private final FileReader<?> fileReader;
private final DatumWriter<Object> writer;
private final JsonEncoder encoder;
private final byte[] finalSeparator;
public AvroFileInputStream(FileStatus status) throws IOException {
pos = 0;
buffer = new byte[0];
GenericDatumReader<Object> reader = new GenericDatumReader<Object>();
FileContext fc = FileContext.getFileContext(new Configuration());
fileReader =
DataFileReader.openReader(new AvroFSInput(fc, status.getPath()),reader);
Schema schema = fileReader.getSchema();
writer = new GenericDatumWriter<Object>(schema);
output = new ByteArrayOutputStream();
encoder = EncoderFactory.get().jsonEncoder(schema, output);
finalSeparator = System.getProperty("line.separator").getBytes(StandardCharsets.UTF_8);
}
/**
* Read a single byte from the stream.
*/
@Override
public int read() throws IOException {
if (buffer == null) {
return -1;
}
if (pos < buffer.length) {
return buffer[pos++];
}
if (!fileReader.hasNext()) {
// Unset buffer to signal EOF on future calls.
buffer = null;
return -1;
}
writer.write(fileReader.next(), encoder);
encoder.flush();
if (!fileReader.hasNext()) {
if (buffer.length > 0) {
// Write a new line after the last Avro record.
output.write(finalSeparator);
output.flush();
}
}
swapBuffer();
return read();
}
@Override
public int read(byte[] dest, int destPos, int destLen) throws IOException {
validateInputStreamReadArguments(dest, destPos, destLen);
if (destLen == 0) {
return 0;
}
if (buffer == null) {
return -1;
}
int bytesRead = 0;
while (destLen > 0 && buffer != null) {
if (pos < buffer.length) {
// We have buffered data available, either from the Avro file or the final separator.
int copyLen = Math.min(buffer.length - pos, destLen);
System.arraycopy(buffer, pos, dest, destPos, copyLen);
pos += copyLen;
bytesRead += copyLen;
destPos += copyLen;
destLen -= copyLen;
} else if (buffer == finalSeparator) {
// There is no buffered data, and the last buffer processed was the final separator.
// Unset buffer to signal EOF on future calls.
buffer = null;
} else if (!fileReader.hasNext()) {
if (buffer.length > 0) {
// There is no data remaining in the file. Get ready to write the final separator on
// the next iteration.
buffer = finalSeparator;
pos = 0;
} else {
// We never read data into the buffer. This must be an empty file.
// Immediate EOF, no separator needed.
buffer = null;
return -1;
}
} else {
// Read the next data from the file into the buffer.
writer.write(fileReader.next(), encoder);
encoder.flush();
swapBuffer();
}
}
return bytesRead;
}
private void swapBuffer() {
pos = 0;
buffer = output.toByteArray();
output.reset();
}
/**
* Close the stream.
*/
@Override
public void close() throws IOException {
fileReader.close();
output.close();
super.close();
}
}
private static void validateInputStreamReadArguments(byte[] dest, int destPos, int destLen)
throws IOException {
if (dest == null) {
throw new NullPointerException("null destination buffer");
} else if (destPos < 0 || destLen < 0 || destLen > dest.length - destPos) {
throw new IndexOutOfBoundsException(String.format(
"invalid destination buffer range: destPos = %d, destLen = %d", destPos, destLen));
}
}
}
| AvroFileInputStream |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/generated/temporals/GeneratedInstantTests.java | {
"start": 1116,
"end": 3262
} | class ____ {
@Test
public void test(SessionFactoryScope scope) {
final GeneratedInstantEntity created = scope.fromTransaction( (session) -> {
final GeneratedInstantEntity entity = new GeneratedInstantEntity( 1, "tsifr" );
session.persist( entity );
return entity;
} );
assertThat( created.createdAt ).isNotNull();
assertThat( created.updatedAt ).isNotNull();
assertThat( created.createdAt ).isEqualTo( created.updatedAt );
// assertThat( created.createdAt2 ).isNotNull();
// assertThat( created.updatedAt2 ).isNotNull();
// assertThat( created.createdAt2 ).isEqualTo( created.updatedAt2 );
created.name = "first";
//We need to wait a little to make sure the timestamps produced are different
waitALittle();
// then changing
final GeneratedInstantEntity merged = scope.fromTransaction( (session) -> {
return (GeneratedInstantEntity) session.merge( created );
} );
assertThat( merged ).isNotNull();
assertThat( merged.createdAt ).isNotNull();
assertThat( merged.updatedAt ).isNotNull();
assertThat( merged.createdAt ).isEqualTo( created.createdAt );
assertThat( merged.updatedAt ).isNotEqualTo( created.updatedAt );
assertThat( merged ).isNotNull();
// assertThat( merged.createdAt2 ).isNotNull();
// assertThat( merged.updatedAt2 ).isNotNull();
// assertThat( merged.createdAt2 ).isEqualTo( created.createdAt2 );
// assertThat( merged.updatedAt2 ).isNotEqualTo( created.updatedAt2 );
//We need to wait a little to make sure the timestamps produced are different
waitALittle();
// lastly, make sure we can load it..
final GeneratedInstantEntity loaded = scope.fromTransaction( (session) -> {
return session.get( GeneratedInstantEntity.class, 1 );
} );
assertThat( loaded ).isNotNull();
assertThat( loaded.createdAt ).isEqualTo( merged.createdAt );
assertThat( loaded.updatedAt ).isEqualTo( merged.updatedAt );
// assertThat( loaded.createdAt2 ).isEqualTo( merged.createdAt2 );
// assertThat( loaded.updatedAt2 ).isEqualTo( merged.updatedAt2 );
}
@Entity( name = "GeneratedInstantEntity" )
@Table( name = "gen_ann_instant" )
public static | GeneratedInstantTests |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/ioc/mappers/ChristmasMappers.java | {
"start": 338,
"end": 662
} | interface ____ {
@Mapping(from = "packaging.color", to = "packagingColor")
@Mapping(from = "#{packaging.weight + present.weight}", to = "weight")
@Mapping(from = "#{'Merry christmas'}", to = "greetingCard")
ChristmasPresent merge(PresentPackaging packaging, Present present);
}
//end::mapper[]
| ChristmasMappers |
java | micronaut-projects__micronaut-core | http-server/src/main/java/io/micronaut/http/server/exceptions/InternalServerException.java | {
"start": 755,
"end": 1145
} | class ____ extends HttpServerException {
/**
* @param message The message
*/
public InternalServerException(String message) {
super(message);
}
/**
* @param message The message
* @param cause The throwable
*/
public InternalServerException(String message, Throwable cause) {
super(message, cause);
}
}
| InternalServerException |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/compositefk/OneToOneEmbeddedIdWithGenericAttributeTest.java | {
"start": 4287,
"end": 5015
} | class ____ extends DomainEntityModel<String> {
private Integer code;
private String name;
@OneToOne(mappedBy = "customer")
private Invoice invoice;
public Customer(Integer code, String name) {
this();
this.code = code;
this.name = name;
}
protected Customer() {
super( new CustomerId( "customer" ) );
}
public Integer getCode() {
return code;
}
public void setCode(Integer code) {
this.code = code;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Invoice getInvoice() {
return invoice;
}
public void setInvoice(Invoice invoice) {
this.invoice = invoice;
}
}
@Embeddable
public static | Customer |
java | elastic__elasticsearch | qa/vector/src/main/java/org/elasticsearch/test/knn/CmdLineArgs.java | {
"start": 10907,
"end": 18859
} | class ____ {
private List<Path> docVectors;
private Path queryVectors;
private int numDocs = 1000;
private int numQueries = 100;
private KnnIndexTester.IndexType indexType = KnnIndexTester.IndexType.HNSW;
private int numCandidates = 1000;
private int k = 10;
private double[] visitPercentages = new double[] { 1.0 };
private int ivfClusterSize = 1000;
private float overSamplingFactor = 0;
private int hnswM = 16;
private int hnswEfConstruction = 200;
private int searchThreads = 1;
private int numSearchers = 1;
private int indexThreads = 1;
private boolean reindex = false;
private boolean forceMerge = false;
private int forceMergeMaxNumSegments = 1;
private VectorSimilarityFunction vectorSpace = VectorSimilarityFunction.EUCLIDEAN;
private int quantizeBits = 8;
private VectorEncoding vectorEncoding = VectorEncoding.FLOAT32;
private int dimensions;
private boolean earlyTermination;
private float filterSelectivity = 1f;
private long seed = 1751900822751L;
private KnnIndexTester.MergePolicyType mergePolicy = null;
private double writerBufferSizeInMb = DEFAULT_WRITER_BUFFER_MB;
private boolean onDiskRescore = false;
private boolean filterCached = true;
/**
* Elasticsearch does not set this explicitly, and in Lucene this setting is
* disabled by default (writer flushes by RAM usage).
*/
private int writerMaxBufferedDocs = IndexWriterConfig.DISABLE_AUTO_FLUSH;
public Builder setDocVectors(List<String> docVectors) {
if (docVectors == null || docVectors.isEmpty()) {
throw new IllegalArgumentException("Document vectors path must be provided");
}
// Convert list of strings to list of Paths
this.docVectors = docVectors.stream().map(PathUtils::get).toList();
return this;
}
public Builder setQueryVectors(String queryVectors) {
this.queryVectors = PathUtils.get(queryVectors);
return this;
}
public Builder setNumDocs(int numDocs) {
this.numDocs = numDocs;
return this;
}
public Builder setNumQueries(int numQueries) {
this.numQueries = numQueries;
return this;
}
public Builder setIndexType(String indexType) {
this.indexType = KnnIndexTester.IndexType.valueOf(indexType.toUpperCase(Locale.ROOT));
return this;
}
public Builder setNumCandidates(int numCandidates) {
this.numCandidates = numCandidates;
return this;
}
public Builder setK(int k) {
this.k = k;
return this;
}
public Builder setVisitPercentages(List<Double> visitPercentages) {
this.visitPercentages = visitPercentages.stream().mapToDouble(Double::doubleValue).toArray();
return this;
}
public Builder setIvfClusterSize(int ivfClusterSize) {
this.ivfClusterSize = ivfClusterSize;
return this;
}
public Builder setOverSamplingFactor(float overSamplingFactor) {
this.overSamplingFactor = overSamplingFactor;
return this;
}
public Builder setHnswM(int hnswM) {
this.hnswM = hnswM;
return this;
}
public Builder setHnswEfConstruction(int hnswEfConstruction) {
this.hnswEfConstruction = hnswEfConstruction;
return this;
}
public Builder setSearchThreads(int searchThreads) {
this.searchThreads = searchThreads;
return this;
}
public Builder setNumSearchers(int numSearchers) {
this.numSearchers = numSearchers;
return this;
}
public Builder setIndexThreads(int indexThreads) {
this.indexThreads = indexThreads;
return this;
}
public Builder setReindex(boolean reindex) {
this.reindex = reindex;
return this;
}
public Builder setForceMerge(boolean forceMerge) {
this.forceMerge = forceMerge;
return this;
}
public Builder setVectorSpace(String vectorSpace) {
this.vectorSpace = VectorSimilarityFunction.valueOf(vectorSpace.toUpperCase(Locale.ROOT));
return this;
}
public Builder setQuantizeBits(int quantizeBits) {
this.quantizeBits = quantizeBits;
return this;
}
public Builder setVectorEncoding(String vectorEncoding) {
this.vectorEncoding = VectorEncoding.valueOf(vectorEncoding.toUpperCase(Locale.ROOT));
return this;
}
public Builder setDimensions(int dimensions) {
this.dimensions = dimensions;
return this;
}
public Builder setEarlyTermination(Boolean patience) {
this.earlyTermination = patience;
return this;
}
public Builder setFilterSelectivity(float filterSelectivity) {
this.filterSelectivity = filterSelectivity;
return this;
}
public Builder setSeed(long seed) {
this.seed = seed;
return this;
}
public Builder setMergePolicy(String mergePolicy) {
this.mergePolicy = KnnIndexTester.MergePolicyType.valueOf(mergePolicy.toUpperCase(Locale.ROOT));
return this;
}
public Builder setWriterBufferMb(double writerBufferSizeInMb) {
this.writerBufferSizeInMb = writerBufferSizeInMb;
return this;
}
public Builder setWriterMaxBufferedDocs(int writerMaxBufferedDocs) {
this.writerMaxBufferedDocs = writerMaxBufferedDocs;
return this;
}
public Builder setForceMergeMaxNumSegments(int forceMergeMaxNumSegments) {
this.forceMergeMaxNumSegments = forceMergeMaxNumSegments;
return this;
}
public Builder setOnDiskRescore(boolean onDiskRescore) {
this.onDiskRescore = onDiskRescore;
return this;
}
public Builder setFilterCached(boolean filterCached) {
this.filterCached = filterCached;
return this;
}
public CmdLineArgs build() {
if (docVectors == null) {
throw new IllegalArgumentException("Document vectors path must be provided");
}
if (dimensions <= 0 && dimensions != -1) {
throw new IllegalArgumentException(
"dimensions must be a positive integer or -1 for when dimension is available in the vector file"
);
}
return new CmdLineArgs(
docVectors,
queryVectors,
numDocs,
numQueries,
indexType,
numCandidates,
k,
visitPercentages,
ivfClusterSize,
overSamplingFactor,
hnswM,
hnswEfConstruction,
searchThreads,
numSearchers,
indexThreads,
reindex,
forceMerge,
filterSelectivity,
seed,
vectorSpace,
quantizeBits,
vectorEncoding,
dimensions,
earlyTermination,
mergePolicy,
writerBufferSizeInMb,
writerMaxBufferedDocs,
forceMergeMaxNumSegments,
onDiskRescore,
filterCached
);
}
}
}
| Builder |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java | {
"start": 2366,
"end": 3249
} | class ____ provides useful static methods for lease recovery.
*
* Lease Recovery Algorithm
* 1) Namenode retrieves lease information
* 2) For each file f in the lease, consider the last block b of f
* 2.1) Get the datanodes which contains b
* 2.2) Assign one of the datanodes as the primary datanode p
* 2.3) p obtains a new generation stamp from the namenode
* 2.4) p gets the block info from each datanode
* 2.5) p computes the minimum block length
* 2.6) p updates the datanodes, which have a valid generation stamp,
* with the new generation stamp and the minimum block length
* 2.7) p acknowledges the namenode the update results
* 2.8) Namenode updates the BlockInfo
* 2.9) Namenode removes f from the lease
* and removes the lease once all files have been removed
* 2.10) Namenode commit changes to edit log
*/
@InterfaceAudience.Private
public | also |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestTimelineEntityGroupId.java | {
"start": 1265,
"end": 2395
} | class ____ {
@Test
void testTimelineEntityGroupId() {
ApplicationId appId1 = ApplicationId.newInstance(1234, 1);
ApplicationId appId2 = ApplicationId.newInstance(1234, 2);
TimelineEntityGroupId group1 = TimelineEntityGroupId.newInstance(appId1, "1");
TimelineEntityGroupId group2 = TimelineEntityGroupId.newInstance(appId1, "2");
TimelineEntityGroupId group3 = TimelineEntityGroupId.newInstance(appId2, "1");
TimelineEntityGroupId group4 = TimelineEntityGroupId.newInstance(appId1, "1");
assertEquals(group1, group4);
assertNotEquals(group1, group2);
assertNotEquals(group1, group3);
assertTrue(group1.compareTo(group4) == 0);
assertTrue(group1.compareTo(group2) < 0);
assertTrue(group1.compareTo(group3) < 0);
assertTrue(group1.hashCode() == group4.hashCode());
assertFalse(group1.hashCode() == group2.hashCode());
assertFalse(group1.hashCode() == group3.hashCode());
assertEquals("timelineEntityGroupId_1234_1_1", group1.toString());
assertEquals(TimelineEntityGroupId.fromString("timelineEntityGroupId_1234_1_1"), group1);
}
}
| TestTimelineEntityGroupId |
java | alibaba__nacos | common/src/main/java/com/alibaba/nacos/common/packagescan/resource/PathMatchingResourcePatternResolver.java | {
"start": 9908,
"end": 10529
} | class ____.
*
* @param resourceLoader the ResourceLoader to load root directories and
* actual resources with
*/
public PathMatchingResourcePatternResolver(ResourceLoader resourceLoader) {
AbstractAssert.notNull(resourceLoader, "ResourceLoader must not be null");
this.resourceLoader = resourceLoader;
}
/**
* Create a new PathMatchingResourcePatternResolver with a DefaultResourceLoader.
*
* @param classLoader the ClassLoader to load classpath resources with,
* or {@code null} for using the thread context | loader |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java | {
"start": 2470,
"end": 4143
} | class ____ implements Comparator<Suggestion.Entry.Option> {
@Override
public int compare(Suggestion.Entry.Option first, Suggestion.Entry.Option second) {
// first criteria: the popularity
int cmp = ((TermSuggestion.Entry.Option) second).getFreq() - ((TermSuggestion.Entry.Option) first).getFreq();
if (cmp != 0) {
return cmp;
}
// second criteria (if first criteria is equal): the distance
cmp = Float.compare(second.getScore(), first.getScore());
if (cmp != 0) {
return cmp;
}
// third criteria: term text
return first.getText().compareTo(second.getText());
}
}
@Override
protected Comparator<Option> sortComparator() {
return switch (sort) {
case SCORE -> SCORE;
case FREQUENCY -> FREQUENCY;
};
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
sort.writeTo(out);
}
@Override
public String getWriteableName() {
return TermSuggestionBuilder.SUGGESTION_NAME;
}
@Override
protected Entry newEntry(StreamInput in) throws IOException {
return new Entry(in);
}
@Override
public boolean equals(Object other) {
return super.equals(other) && Objects.equals(sort, ((TermSuggestion) other).sort);
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), sort);
}
/**
* Represents a part from the suggest text with suggested options.
*/
public static | Frequency |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/Animal.java | {
"start": 342,
"end": 908
} | class ____ {
private Long id;
private Animal mother;
private Animal father;
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Id
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
@ManyToOne
public Animal getMother() {
return mother;
}
public void setMother(Animal mother) {
this.mother = mother;
}
@ManyToOne
public Animal getFather() {
return father;
}
public void setFather(Animal father) {
this.father = father;
}
}
| Animal |
java | google__guava | android/guava/src/com/google/common/hash/MacHashFunction.java | {
"start": 2785,
"end": 3688
} | class ____ extends AbstractByteHasher {
private final Mac mac;
private boolean done;
private MacHasher(Mac mac) {
this.mac = mac;
}
@Override
protected void update(byte b) {
checkNotDone();
mac.update(b);
}
@Override
protected void update(byte[] b) {
checkNotDone();
mac.update(b);
}
@Override
protected void update(byte[] b, int off, int len) {
checkNotDone();
mac.update(b, off, len);
}
@Override
protected void update(ByteBuffer bytes) {
checkNotDone();
checkNotNull(bytes);
mac.update(bytes);
}
private void checkNotDone() {
checkState(!done, "Cannot re-use a Hasher after calling hash() on it");
}
@Override
public HashCode hash() {
checkNotDone();
done = true;
return HashCode.fromBytesNoCopy(mac.doFinal());
}
}
}
| MacHasher |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.