language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
quarkusio__quarkus
|
integration-tests/openapi/src/test/java/io/quarkus/it/openapi/AbstractByteArrayTest.java
|
{
"start": 307,
"end": 2010
}
|
class ____ extends AbstractTest {
protected static final String APPLICATION_OCTET_STREAM = "application/octet-stream";
protected void testServiceByteArrayRequest(String path, String expectedContentType) throws IOException {
byte[] b = Files.readAllBytes(tempFile().toPath());
byte[] responseFile = RestAssured
.with().body(b)
.and()
.with().contentType(APPLICATION_OCTET_STREAM)
.when()
.post(path)
.then()
.header("Content-Type", Matchers.startsWith(APPLICATION_OCTET_STREAM))
.extract().asByteArray();
Assertions.assertEquals(b.length, responseFile.length);
}
protected void testServiceByteArrayResponse(String path, String expectedResponseType)
throws UnsupportedEncodingException, IOException {
// Service
File f = tempFile();
byte[] b = Files.readAllBytes(f.toPath());
String filename = URLEncoder.encode(f.getAbsoluteFile().toString(), "UTF-8");
byte[] responseFile = RestAssured
.when()
.get(path + "/" + filename)
.then()
.header("Content-Type", Matchers.startsWith(expectedResponseType))
.and()
.extract().asByteArray();
Assertions.assertEquals(b.length, responseFile.length);
}
private File tempFile() {
try {
java.nio.file.Path createTempFile = Files.createTempFile("", "");
return createTempFile.toFile();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
}
|
AbstractByteArrayTest
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/dialect/odps/parser/OdpsSelectParser.java
|
{
"start": 1105,
"end": 10033
}
|
class ____ extends SQLSelectParser {
public OdpsSelectParser(SQLExprParser exprParser) {
super(exprParser.getLexer());
this.exprParser = exprParser;
}
public OdpsSelectParser(SQLExprParser exprParser, SQLSelectListCache selectListCache) {
super(exprParser.getLexer());
this.exprParser = exprParser;
this.selectListCache = selectListCache;
}
protected SQLSelectQueryBlock createSelectQueryBlock() {
return new OdpsSelectQueryBlock();
}
@Override
public SQLSelectQuery query(SQLObject parent, boolean acceptUnion) {
if (lexer.token() == Token.LPAREN) {
lexer.nextToken();
SQLSelectQuery select = query();
accept(Token.RPAREN);
return queryRest(select, acceptUnion);
}
OdpsSelectQueryBlock queryBlock = new OdpsSelectQueryBlock();
if (lexer.hasComment() && lexer.isKeepComments()) {
queryBlock.addBeforeComment(lexer.readAndResetComments());
}
if (lexer.token() == Token.FROM) {
parseFrom(queryBlock);
parseWhere(queryBlock);
parseGroupBy(queryBlock);
if (lexer.token() == Token.SELECT) {
lexer.nextToken();
if (lexer.token() == Token.HINT) {
this.exprParser.parseHints(queryBlock.getHints());
}
if (lexer.token() == Token.COMMENT) {
lexer.nextToken();
}
if (lexer.token() == Token.DISTINCT) {
queryBlock.setDistionOption(SQLSetQuantifier.DISTINCT);
lexer.nextToken();
} else if (lexer.token() == Token.UNIQUE) {
Lexer.SavePoint mark = lexer.mark();
lexer.nextToken();
if (lexer.token() == Token.DOT) {
lexer.reset(mark);
} else {
queryBlock.setDistionOption(SQLSetQuantifier.UNIQUE);
}
} else if (lexer.token() == Token.ALL) {
String str = lexer.stringVal();
lexer.nextToken();
queryBlock.setDistionOption(SQLSetQuantifier.ALL);
}
parseSelectList(queryBlock);
}
if (queryBlock.getWhere() == null && lexer.token() == Token.WHERE) {
parseWhere(queryBlock);
}
} else {
accept(Token.SELECT);
if (lexer.token() == Token.HINT) {
this.exprParser.parseHints(queryBlock.getHints());
}
if (lexer.token() == Token.COMMENT) {
Lexer.SavePoint mark = lexer.mark();
String tokenStr = lexer.stringVal();
lexer.nextToken();
if (lexer.token() == Token.COMMA) {
SQLIdentifierExpr expr = new SQLIdentifierExpr(tokenStr);
queryBlock.addSelectItem(expr);
lexer.nextToken();
} else {
lexer.reset(mark);
}
}
if (queryBlock.getSelectList().isEmpty()) {
if (lexer.token() == Token.DISTINCT) {
queryBlock.setDistionOption(SQLSetQuantifier.DISTINCT);
lexer.nextToken();
} else if (lexer.token() == Token.UNIQUE) {
Lexer.SavePoint mark = lexer.mark();
lexer.nextToken();
if (lexer.token() == Token.DOT || lexer.token() == Token.COMMA) {
lexer.reset(mark);
} else {
queryBlock.setDistionOption(SQLSetQuantifier.UNIQUE);
}
} else if (lexer.token() == Token.ALL) {
Lexer.SavePoint mark = lexer.mark();
lexer.nextToken();
switch (lexer.token()) {
case DOT:
case COMMA:
case SUB:
case PLUS:
case SLASH:
case GT:
case GTEQ:
case EQ:
case LT:
case LTEQ:
lexer.reset(mark);
break;
default:
queryBlock.setDistionOption(SQLSetQuantifier.ALL);
break;
}
}
}
parseSelectList(queryBlock);
parseFrom(queryBlock);
if (queryBlock.getFrom() == null && lexer.token() == Token.LATERAL) {
lexer.nextToken();
SQLTableSource tableSource = this.parseLateralView(null);
queryBlock.setFrom(tableSource);
}
parseWhere(queryBlock);
parseGroupBy(queryBlock);
}
parseGroupBy(queryBlock);
if (lexer.identifierEquals(FnvHash.Constants.WINDOW)) {
parseWindow(queryBlock);
}
if (lexer.token() == Token.QUALIFY) {
lexer.nextToken();
SQLExpr qualify = this.exprParser.expr();
queryBlock.setQualify(qualify);
}
queryBlock.setOrderBy(this.exprParser.parseOrderBy());
queryBlock.setZOrderBy(this.exprParser.parseZOrderBy());
if (lexer.token() == Token.DISTRIBUTE) {
lexer.nextToken();
accept(Token.BY);
for (; ; ) {
SQLSelectOrderByItem distributeByItem = this.exprParser.parseSelectOrderByItem();
queryBlock.addDistributeBy(distributeByItem);
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
} else {
break;
}
}
}
if (lexer.identifierEquals(FnvHash.Constants.ZORDER)) {
queryBlock.setZOrderBy(this.exprParser.parseZOrderBy());
}
if (lexer.identifierEquals(FnvHash.Constants.SORT)) {
lexer.nextToken();
accept(Token.BY);
for (; ; ) {
SQLSelectOrderByItem sortByItem = this.exprParser.parseSelectOrderByItem();
queryBlock.addSortBy(sortByItem);
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
} else {
break;
}
}
}
if (lexer.identifierEquals(FnvHash.Constants.CLUSTER)) {
lexer.nextToken();
accept(Token.BY);
for (; ; ) {
SQLSelectOrderByItem clusterByItem = this.exprParser.parseSelectOrderByItem();
queryBlock.addClusterBy(clusterByItem);
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
} else {
break;
}
}
}
if (lexer.token() == Token.LIMIT) {
SQLLimit limit = exprParser.parseLimit();
queryBlock.setLimit(limit);
}
return queryRest(queryBlock, acceptUnion);
}
public SQLTableSource parseTableSource() {
if (lexer.token() == Token.NULL) {
String str = lexer.stringVal();
lexer.nextToken();
return new SQLExprTableSource(new SQLIdentifierExpr(str));
}
SQLTableSource tableSource = super.parseTableSource();
if (lexer.token() == Token.HINT) {
this.exprParser.parseHints(tableSource.getHints());
}
if (lexer.token() == Token.TABLE && tableSource.getAlias() == null) {
tableSource.setAlias(
lexer.stringVal()
);
lexer.nextToken();
if (tableSource instanceof SQLLateralViewTableSource) {
if (lexer.token() == Token.AS) {
parseLateralViewAs((SQLLateralViewTableSource) tableSource);
}
}
tableSource = parseTableSourceRest(tableSource);
}
return tableSource;
}
protected SQLTableSource primaryTableSourceRest(SQLTableSource tableSource) {
if (lexer.identifierEquals(FnvHash.Constants.LATERAL) || lexer.token() == Token.LATERAL) {
Lexer.SavePoint mark = lexer.mark();
lexer.nextToken();
if (lexer.token() == Token.VIEW) {
tableSource = parseLateralView(tableSource);
} else {
lexer.reset(mark);
}
}
return tableSource;
}
public void parseTableSourceSample(SQLTableSource tableSource) {
parseTableSourceSampleHive(tableSource);
}
}
|
OdpsSelectParser
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/serializer/IgnoreNonFieldGetterTest2.java
|
{
"start": 202,
"end": 529
}
|
class ____ extends TestCase {
public void test_int() throws Exception {
VO vo = new VO();
vo.setId(123);
String text = JSON.toJSONString(vo, SerializerFeature.IgnoreNonFieldGetter);
Assert.assertEquals("{\"id\":123}", text);
}
private static
|
IgnoreNonFieldGetterTest2
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/TypeParameterShadowingTest.java
|
{
"start": 1512,
"end": 1856
}
|
class ____<T> {
// BUG: Diagnostic contains: T declared in Test
<T> void something() {}
}
""")
.doTest();
}
@Test
public void staticNotFlagged() {
compilationHelper
.addSourceLines(
"Test.java",
"""
package foo.bar;
|
Test
|
java
|
elastic__elasticsearch
|
modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java
|
{
"start": 1249,
"end": 2899
}
|
class ____ extends AbstractRepositoryS3RestTestCase {
private static final String PREFIX = getIdentifierPrefix("RepositoryS3BasicCredentialsRestIT");
private static final String BUCKET = PREFIX + "bucket";
private static final String BASE_PATH = PREFIX + "base_path";
private static final String ACCESS_KEY = PREFIX + "access-key";
private static final String SECRET_KEY = PREFIX + "secret-key";
private static final String CLIENT = "basic_credentials_client";
private static final Supplier<String> regionSupplier = new DynamicRegionSupplier();
private static final S3HttpFixture s3Fixture = new S3HttpFixture(
true,
BUCKET,
BASE_PATH,
fixedAccessKey(ACCESS_KEY, regionSupplier, "s3")
);
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.module("repository-s3")
.systemProperty("aws.region", regionSupplier)
.keystore("s3.client." + CLIENT + ".access_key", ACCESS_KEY)
.keystore("s3.client." + CLIENT + ".secret_key", SECRET_KEY)
.setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress)
.build();
@ClassRule
public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster);
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
@Override
protected String getBucketName() {
return BUCKET;
}
@Override
protected String getBasePath() {
return BASE_PATH;
}
@Override
protected String getClientName() {
return CLIENT;
}
}
|
RepositoryS3BasicCredentialsRestIT
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/osgi/Activator.java
|
{
"start": 2226,
"end": 6084
}
|
class ____ extends ProviderActivator implements SynchronousBundleListener {
private static final Logger LOGGER = StatusLogger.getLogger();
private final AtomicReference<BundleContext> contextRef = new AtomicReference<>();
private ServiceRegistration<ContextDataProvider> contextDataRegistration = null;
public Activator() {
super(new Log4jProvider());
}
@Override
public void start(final BundleContext context) throws Exception {
super.start(context);
final ContextDataProvider threadContextProvider = new ThreadContextDataProvider();
contextDataRegistration = context.registerService(ContextDataProvider.class, threadContextProvider, null);
loadContextProviders(context);
// allow the user to override the default ContextSelector (e.g., by using BasicContextSelector for a global cfg)
if (PropertiesUtil.getProperties().getStringProperty(Constants.LOG4J_CONTEXT_SELECTOR) == null) {
System.setProperty(Constants.LOG4J_CONTEXT_SELECTOR, BundleContextSelector.class.getName());
}
if (this.contextRef.compareAndSet(null, context)) {
context.addBundleListener(this);
// done after the BundleListener as to not miss any new bundle installs in the interim
scanInstalledBundlesForPlugins(context);
}
}
private static void scanInstalledBundlesForPlugins(final BundleContext context) {
final Bundle[] bundles = context.getBundles();
for (final Bundle bundle : bundles) {
// TODO: bundle state can change during this
scanBundleForPlugins(bundle);
}
}
private static void scanBundleForPlugins(final Bundle bundle) {
final long bundleId = bundle.getBundleId();
// LOG4J2-920: don't scan system bundle for plugins
if (bundle.getState() == Bundle.ACTIVE && bundleId != 0) {
LOGGER.trace("Scanning bundle [{}, id=%d] for plugins.", bundle.getSymbolicName(), bundleId);
PluginRegistry.getInstance()
.loadFromBundle(bundleId, bundle.adapt(BundleWiring.class).getClassLoader());
}
}
private static void loadContextProviders(final BundleContext bundleContext) {
try {
final Collection<ServiceReference<ContextDataProvider>> serviceReferences =
bundleContext.getServiceReferences(ContextDataProvider.class, null);
for (final ServiceReference<ContextDataProvider> serviceReference : serviceReferences) {
final ContextDataProvider provider = bundleContext.getService(serviceReference);
ThreadContextDataInjector.contextDataProviders.add(provider);
}
} catch (final InvalidSyntaxException ex) {
LOGGER.error("Error accessing context data provider", ex);
}
}
private static void stopBundlePlugins(final Bundle bundle) {
LOGGER.trace("Stopping bundle [{}] plugins.", bundle.getSymbolicName());
// TODO: plugin lifecycle code
PluginRegistry.getInstance().clearBundlePlugins(bundle.getBundleId());
}
@Override
public void stop(final BundleContext context) throws Exception {
contextDataRegistration.unregister();
this.contextRef.compareAndSet(context, null);
LogManager.shutdown();
super.stop(context);
}
@Override
public void bundleChanged(final BundleEvent event) {
switch (event.getType()) {
// FIXME: STARTING instead of STARTED?
case BundleEvent.STARTED:
scanBundleForPlugins(event.getBundle());
break;
case BundleEvent.STOPPING:
stopBundlePlugins(event.getBundle());
break;
default:
break;
}
}
}
|
Activator
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java
|
{
"start": 3029,
"end": 9686
}
|
class ____ {
static final String RESERVED_TYPE = ReservedRealm.TYPE;
static final String NATIVE_TYPE = NativeRealmSettings.TYPE;
static final String FILE_TYPE = FileRealmSettings.TYPE;
static final String LDAP_TYPE = LdapRealmSettings.LDAP_TYPE;
static final String AD_TYPE = LdapRealmSettings.AD_TYPE;
static final String PKI_TYPE = PkiRealmSettings.TYPE;
static final String SAML_TYPE = SingleSpSamlRealmSettings.TYPE;
static final String OIDC_TYPE = OpenIdConnectRealmSettings.TYPE;
static final String JWT_TYPE = JwtRealmSettings.TYPE;
static final String KERBEROS_TYPE = KerberosRealmSettings.TYPE;
private static final Set<String> BUILTIN_TYPES = Set.of(NATIVE_TYPE, FILE_TYPE);
/**
* The map of all <em>licensed</em> internal realm types to their licensed feature
*/
private static final Map<String, LicensedFeature.Persistent> LICENSED_REALMS;
static {
Map<String, LicensedFeature.Persistent> realms = new HashMap<>();
realms.put(AD_TYPE, Security.AD_REALM_FEATURE);
realms.put(LDAP_TYPE, Security.LDAP_REALM_FEATURE);
realms.put(PKI_TYPE, Security.PKI_REALM_FEATURE);
realms.put(SAML_TYPE, Security.SAML_REALM_FEATURE);
realms.put(KERBEROS_TYPE, Security.KERBEROS_REALM_FEATURE);
realms.put(OIDC_TYPE, Security.OIDC_REALM_FEATURE);
realms.put(JWT_TYPE, Security.JWT_REALM_FEATURE);
LICENSED_REALMS = Map.copyOf(realms);
}
/**
* The set of all <em>internal</em> realm types, excluding {@link ReservedRealm#TYPE}
* @deprecated Use of this method (other than in tests) is discouraged.
*/
@Deprecated
public static Collection<String> getConfigurableRealmsTypes() {
return Set.copyOf(Sets.union(BUILTIN_TYPES, LICENSED_REALMS.keySet()));
}
static boolean isInternalRealm(String type) {
return RESERVED_TYPE.equals(type) || BUILTIN_TYPES.contains(type) || LICENSED_REALMS.containsKey(type);
}
static boolean isBuiltinRealm(String type) {
return BUILTIN_TYPES.contains(type);
}
/**
* @return The licensed feature for the given realm type, or {@code null} if the realm does not require a specific license type
* @throws IllegalArgumentException if the provided type is not an {@link #isInternalRealm(String) internal realm}
*/
@Nullable
static LicensedFeature.Persistent getLicensedFeature(String type) {
if (Strings.isNullOrEmpty(type)) {
throw new IllegalArgumentException("Empty realm type [" + type + "]");
}
if (type.equals(RESERVED_TYPE) || isBuiltinRealm(type)) {
return null;
}
final LicensedFeature.Persistent feature = LICENSED_REALMS.get(type);
if (feature == null) {
throw new IllegalArgumentException("Unsupported realm type [" + type + "]");
}
return feature;
}
/**
* Creates {@link Realm.Factory factories} for each <em>internal</em> realm type.
* This excludes the {@link ReservedRealm}, as it cannot be created dynamically.
*
* @return A map from <em>realm-type</em> to <code>Factory</code>
*/
public static Map<String, Realm.Factory> getFactories(
ThreadPool threadPool,
Settings settings,
ResourceWatcherService resourceWatcherService,
SSLService sslService,
NativeUsersStore nativeUsersStore,
UserRoleMapper userRoleMapper,
SecurityIndexManager securityIndex
) {
return Map.of(
// file realm
FileRealmSettings.TYPE,
config -> new FileRealm(config, resourceWatcherService, threadPool),
// native realm
NativeRealmSettings.TYPE,
config -> buildNativeRealm(threadPool, settings, nativeUsersStore, securityIndex, config),
// active directory realm
LdapRealmSettings.AD_TYPE,
config -> new LdapRealm(config, sslService, resourceWatcherService, userRoleMapper, threadPool),
// LDAP realm
LdapRealmSettings.LDAP_TYPE,
config -> new LdapRealm(config, sslService, resourceWatcherService, userRoleMapper, threadPool),
// PKI realm
PkiRealmSettings.TYPE,
config -> new PkiRealm(config, resourceWatcherService, userRoleMapper),
// SAML realm
SingleSpSamlRealmSettings.TYPE,
config -> SamlRealm.create(
config,
sslService,
resourceWatcherService,
userRoleMapper,
SingleSamlSpConfiguration.create(config)
),
// Kerberos realm
KerberosRealmSettings.TYPE,
config -> new KerberosRealm(config, userRoleMapper, threadPool),
// OpenID Connect realm
OpenIdConnectRealmSettings.TYPE,
config -> new OpenIdConnectRealm(config, sslService, userRoleMapper, resourceWatcherService),
// JWT realm
JwtRealmSettings.TYPE,
config -> new JwtRealm(config, sslService, userRoleMapper, threadPool)
);
}
private static NativeRealm buildNativeRealm(
ThreadPool threadPool,
Settings settings,
NativeUsersStore nativeUsersStore,
SecurityIndexManager securityIndex,
RealmConfig config
) {
if (settings.getAsBoolean(NativeRealmSettings.NATIVE_USERS_ENABLED, true) == false) {
throw new IllegalArgumentException(
"Cannot configure a ["
+ NativeRealmSettings.TYPE
+ "] realm when ["
+ NativeRealmSettings.NATIVE_USERS_ENABLED
+ "] is false"
);
}
final NativeRealm nativeRealm = new NativeRealm(config, nativeUsersStore, threadPool);
securityIndex.addStateListener(nativeRealm::onSecurityIndexStateChange);
return nativeRealm;
}
private InternalRealms() {}
public static List<BootstrapCheck> getBootstrapChecks(final Settings globalSettings, final Environment env) {
final Set<String> realmTypes = Sets.newHashSet(LdapRealmSettings.AD_TYPE, LdapRealmSettings.LDAP_TYPE, PkiRealmSettings.TYPE);
return RealmSettings.getRealmSettings(globalSettings)
.keySet()
.stream()
.filter(id -> realmTypes.contains(id.getType()))
.map(id -> new RealmConfig(id, globalSettings, env, null))
.map(RoleMappingFileBootstrapCheck::create)
.filter(Objects::nonNull)
.toList();
}
}
|
InternalRealms
|
java
|
apache__camel
|
components/camel-elytron/src/main/java/org/apache/camel/component/elytron/ElytronSecurityProvider.java
|
{
"start": 2577,
"end": 6971
}
|
class ____ implements UndertowSecurityProvider {
/**
* Name of the header which contains associated security identity if request is authenticated.
*/
public static final String SECURITY_IDENTITY_HEADER = "securityIdentity";
private SecurityDomain securityDomain;
private WildFlyElytronBaseProvider elytronProvider;
private String mechanismName;
/**
* Provider adds header `securityIdentity` with value of type `SecurityIdentity` after successful authentication.
*/
@Override
public void addHeader(BiConsumer<String, Object> consumer, HttpServerExchange httpExchange) throws Exception {
SecurityIdentity securityIdentity = this.securityDomain.getCurrentSecurityIdentity();
//add security principal to headers
consumer.accept(SECURITY_IDENTITY_HEADER, securityIdentity);
}
/**
* Authentication is verified by securityDomain from configuration.
*/
@Override
public int authenticate(HttpServerExchange httpExchange, List<String> allowedRoles) throws Exception {
SecurityIdentity identity = this.securityDomain.getCurrentSecurityIdentity();
if (identity != null) {
//already authenticated
Set<String> roles = new HashSet<>();
Roles identityRoles = identity.getRoles();
if (identityRoles != null) {
for (String roleName : identityRoles) {
roles.add(roleName);
}
}
if (isAllowed(roles, allowedRoles)) {
return StatusCodes.OK;
}
}
return StatusCodes.FORBIDDEN;
}
@Override
public boolean acceptConfiguration(Object configuration, String endpointUri) throws Exception {
if (configuration instanceof ElytronSercurityConfiguration) {
ElytronSercurityConfiguration conf = (ElytronSercurityConfiguration) configuration;
this.securityDomain = conf.getDomainBuilder().build();
this.mechanismName = conf.getMechanismName();
this.elytronProvider = conf.getElytronProvider();
return true;
}
return false;
}
/**
* Elytron hook into undertow is by creation of wrapping httpHandler.
*/
@Override
public HttpHandler wrapHttpHandler(HttpHandler httpHandler) throws Exception {
HttpAuthenticationFactory httpAuthenticationFactory = createHttpAuthenticationFactory(securityDomain);
HttpHandler rootHandler = new ElytronRunAsHandler(httpHandler);
rootHandler = new AuthenticationCallHandler(rootHandler);
rootHandler = new AuthenticationConstraintHandler(rootHandler);
return ElytronContextAssociationHandler.builder()
.setNext(rootHandler)
.setMechanismSupplier(() -> {
try {
return Collections.singletonList(httpAuthenticationFactory.createMechanism(mechanismName));
} catch (HttpAuthenticationException e) {
throw new RuntimeCamelException(e);
}
}).build();
}
private HttpAuthenticationFactory createHttpAuthenticationFactory(final SecurityDomain securityDomain) {
HttpServerAuthenticationMechanismFactory providerFactory
= new SecurityProviderServerMechanismFactory(() -> new Provider[] { this.elytronProvider });
HttpServerAuthenticationMechanismFactory httpServerMechanismFactory
= new FilterServerMechanismFactory(providerFactory, true, this.mechanismName);
return HttpAuthenticationFactory.builder()
.setSecurityDomain(securityDomain)
.setMechanismConfigurationSelector(MechanismConfigurationSelector.constantSelector(
MechanismConfiguration.builder()
.addMechanismRealm(MechanismRealmConfiguration.builder().setRealmName("Elytron Realm").build())
.build()))
.setFactory(httpServerMechanismFactory)
.build();
}
public boolean isAllowed(Set<String> roles, List<String> allowedRoles) {
for (String role : allowedRoles) {
if (roles.contains(role)) {
return true;
}
}
return false;
}
}
|
ElytronSecurityProvider
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseTests.java
|
{
"start": 956,
"end": 3509
}
|
class ____ extends AbstractXContentSerializingTestCase<Phase> {
private String phaseName;
@Before
public void setup() {
phaseName = randomAlphaOfLength(20);
}
@Override
protected Phase createTestInstance() {
return randomTestPhase(phaseName);
}
static Phase randomTestPhase(String phaseName) {
TimeValue after = null;
if (randomBoolean()) {
after = randomTimeValue(0, 1_000_000_000, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS);
}
Map<String, LifecycleAction> actions = Map.of();
if (randomBoolean()) {
actions = Map.of(MockAction.NAME, new MockAction());
}
return new Phase(phaseName, after, actions);
}
@Override
protected Phase doParseInstance(XContentParser parser) throws IOException {
return Phase.parse(parser, phaseName);
}
@Override
protected Reader<Phase> instanceReader() {
return Phase::new;
}
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return new NamedWriteableRegistry(
List.of(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new))
);
}
@Override
protected NamedXContentRegistry xContentRegistry() {
return new NamedXContentRegistry(
CollectionUtils.appendToCopy(
ClusterModule.getNamedXWriteables(),
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(MockAction.NAME), MockAction::parse)
)
);
}
@Override
protected Phase mutateInstance(Phase instance) {
String name = instance.getName();
TimeValue after = instance.getMinimumAge();
Map<String, LifecycleAction> actions = instance.getActions();
switch (between(0, 2)) {
case 0 -> name = name + randomAlphaOfLengthBetween(1, 5);
case 1 -> after = TimeValue.timeValueSeconds(after.getSeconds() + randomIntBetween(1, 1000));
case 2 -> {
actions = new HashMap<>(actions);
actions.put(MockAction.NAME + "another", new MockAction(List.of()));
}
default -> throw new AssertionError("Illegal randomisation branch");
}
return new Phase(name, after, actions);
}
public void testDefaultAfter() {
Phase phase = new Phase(randomAlphaOfLength(20), null, Map.of());
assertEquals(TimeValue.ZERO, phase.getMinimumAge());
}
}
|
PhaseTests
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/api/Boolean2DArrayAssert.java
|
{
"start": 1400,
"end": 14110
}
|
class ____ extends Abstract2DArrayAssert<Boolean2DArrayAssert, boolean[][], Boolean> {
private final Failures failures = Failures.instance();
// TODO reduce the visibility of the fields annotated with @VisibleForTesting
protected Boolean2DArrays boolean2dArrays = Boolean2DArrays.instance();
public Boolean2DArrayAssert(boolean[][] actual) {
super(actual, Boolean2DArrayAssert.class);
}
/**
* Verifies that the actual {@code boolean[][]} is <b>deeply</b> equal to the given one.
* <p>
* Two arrays are considered deeply equal if both are {@code null}
* or if they refer to arrays that contain the same number of elements and
* all corresponding pairs of elements in the two arrays are deeply equal.
* <p>
* Example:
* <pre><code class='java'> // assertion will pass
* assertThat(new boolean[][] {{true, false}, {false, true}}).isDeepEqualTo(new boolean[][] {{true, false}, {false, true}});
*
* // assertions will fail
* assertThat(new boolean[][] {{true, false}, {false, true}}).isDeepEqualTo(new boolean[][] {{true, false}, {true, true}});
* assertThat(new boolean[][] {{true, false}, {false, true}}).isDeepEqualTo(new boolean[][] {{true}, {false, false, true}});</code></pre>
*
* @param expected the given value to compare the actual value to.
* @return {@code this} assertion object.
* @throws AssertionError if the actual value is not deeply equal to the given one.
*/
@Override
public Boolean2DArrayAssert isDeepEqualTo(boolean[][] expected) {
// boolean[][] actual = new boolean[][] { { true, false }, { false, true } };
if (actual == expected) return myself;
isNotNull();
if (expected.length != actual.length) {
throw failures.failure(info, shouldHaveSameSizeAs(actual, expected, actual.length, expected.length));
}
for (int i = 0; i < actual.length; i++) {
boolean[] actualSubArray = actual[i];
boolean[] expectedSubArray = expected[i];
if (actualSubArray == expectedSubArray) continue;
if (actualSubArray == null) throw failures.failure(info, shouldNotBeNull("actual[" + i + "]"));
if (expectedSubArray.length != actualSubArray.length) {
throw failures.failure(info, subarraysShouldHaveSameSize(actual, expected, actualSubArray, actualSubArray.length,
expectedSubArray, expectedSubArray.length, i),
info.representation().toStringOf(actual), info.representation().toStringOf(expected));
}
for (int j = 0; j < actualSubArray.length; j++) {
if (actualSubArray[j] != expectedSubArray[j]) {
throw failures.failure(info, elementShouldBeEqual(actualSubArray[j], expectedSubArray[j], i, j),
info.representation().toStringOf(actual), info.representation().toStringOf(expected));
}
}
}
return myself;
}
/**
* Verifies that the actual {@code boolean[][]} is equal to the given one.
* <p>
* <b>WARNING!</b> This method will use {@code equals} to compare (it will compare arrays references only).<br>
* Unless you specify a comparator with {@link #usingComparator(Comparator)}, it is advised to use
* {@link #isDeepEqualTo(boolean[][])} instead.
* <p>
* Example:
* <pre><code class='java'> boolean[][] array = {{true, true}, {false, false}};
*
* // assertion will pass
* assertThat(array).isEqualTo(array);
*
* // assertion will fail as isEqualTo calls equals which compares arrays references only.
* assertThat(array).isEqualTo(new boolean[][] {{true, true}, {false, false}});</code></pre>
*
* @param expected the given value to compare the actual {@code boolean[][]} to.
* @return {@code this} assertion object.
* @throws AssertionError if the actual {@code boolean[][]} is not equal to the given one.
*/
@Override
public Boolean2DArrayAssert isEqualTo(Object expected) {
return super.isEqualTo(expected);
}
/**
* Verifies that the actual {@code boolean[][]}is {@code null} or empty, empty means the array has no elements,
* said otherwise it can have any number of rows but all rows must be empty.
* <p>
* Example:
* <pre><code class='java'> // assertions will pass
* boolean[][] array = null;
* assertThat(array).isNullOrEmpty();
* assertThat(new boolean[][] { }).isNullOrEmpty();
* assertThat(new boolean[][] {{ }}).isNullOrEmpty();
* // this is considered empty as there are no elements in the 2d array which is comprised of 3 empty rows.
* assertThat(new boolean[][] {{ }, { }, { }}).isNullOrEmpty();
*
* // assertion will fail
* assertThat(new String[][] {{"a"}, {"b"}}).isNullOrEmpty();</code></pre>
*
* @throws AssertionError if the actual {@code boolean[][]}is not {@code null} or not empty.
*/
@Override
public void isNullOrEmpty() {
boolean2dArrays.assertNullOrEmpty(info, actual);
}
/**
* Verifies that the actual {@code boolean[][]}is empty, empty means the array has no elements,
* said otherwise it can have any number of rows but all rows must be empty.
* <p>
* Example:
* <pre><code class='java'> // assertions will pass
* assertThat(new boolean[][] {{ }}).isEmpty();
* // this is considered empty as there are no elements in the 2d array which is comprised of 3 empty rows.
* assertThat(new boolean[][] {{ }, { }, { }}).isEmpty();
*
* // assertions will fail
* assertThat(new boolean[][] {{ true }, { false }}).isEmpty();
* boolean[][] array = null;
* assertThat(array).isEmpty();</code></pre>
*
* @throws AssertionError if the actual {@code boolean[][]}is not empty.
*/
@Override
public void isEmpty() {
boolean2dArrays.assertEmpty(info, actual);
}
/**
* Verifies that the actual {@code boolean[][]}is not empty, not empty means the array has at least one element.
* <p>
* Example:
* <pre><code class='java'> // assertions will pass
* assertThat(new boolean[][] {{ true }, { false }}).isNotEmpty();
* assertThat(new boolean[][] {{ }, { false }}).isNotEmpty();
*
* // assertions will fail
* assertThat(new boolean[][] { }).isNotEmpty();
* assertThat(new boolean[][] {{ }}).isNotEmpty();
* // this is considered empty as there are no elements in the 2d array which is comprised of 3 empty rows.
* assertThat(new boolean[][] {{ }, { }, { }}).isNotEmpty();
* boolean[][] array = null;
* assertThat(array).isNotEmpty();</code></pre>
*
* @return {@code this} assertion object.
* @throws AssertionError if the actual {@code boolean[][]}is empty or null.
*/
@Override
public Boolean2DArrayAssert isNotEmpty() {
boolean2dArrays.assertNotEmpty(info, actual);
return myself;
}
/**
* Verifies that the actual 2D array has the given dimensions.
* <p>
* Example:
* <pre><code class='java'> // assertion will pass
* assertThat(new boolean[][] {{true, true, true}, {false, false, false}}).hasDimensions(2, 3);
*
* // assertions will fail
* assertThat(new boolean[][] { }).hasSize(1, 1);
* assertThat(new boolean[][] {{true, true, true}, {false, false, false}}).hasDimensions(3, 2);
* assertThat(new boolean[][] {{true, true, true}, {false, false, false, false}}).hasDimensions(2, 3); </code></pre>
*
* @param expectedFirstDimension the expected number of values in first dimension of the actual array.
* @param expectedSecondDimension the expected number of values in second dimension of the actual array.
* @return {@code this} assertion object.
* @throws AssertionError if the actual array's dimensions are not equal to the given ones.
*/
@Override
public Boolean2DArrayAssert hasDimensions(int expectedFirstDimension, int expectedSecondDimension) {
boolean2dArrays.assertHasDimensions(info, actual, expectedFirstDimension, expectedSecondDimension);
return myself;
}
/**
* Verifies that the actual two-dimensional array has the given number of rows.
* <p>
* Example:
* <pre><code class='java'> // assertion will pass
* assertThat(new boolean[][] {{true, true, true}, {false, false, false}}).hasNumberOfRows(2);
* assertThat(new boolean[][] {{true}, {true, false}, {true, false, false}}).hasNumberOfRows(3);
*
* // assertions will fail
* assertThat(new boolean[][] { }).hasNumberOfRows(1);
* assertThat(new boolean[][] {{true, true, true}, {false, false, false}}).hasNumberOfRows(3);
* assertThat(new boolean[][] {{true, true, true}, {false, false, false, false}}).hasNumberOfRows(1); </code></pre>
*
* @param expected the expected number of rows of the two-dimensional array.
* @return {@code this} assertion object.
* @throws AssertionError if the actual number of rows are not equal to the given one.
*/
@Override
public Boolean2DArrayAssert hasNumberOfRows(int expected) {
boolean2dArrays.assertNumberOfRows(info, actual, expected);
return myself;
}
/**
* Verifies that the actual {@code boolean[][]} has the same dimensions as the given array.
* <p>
* Parameter is declared as Object to accept both Object and primitive arrays.
* </p>
* Example:
* <pre><code class='java'> boolean[][] booleanArray = {{true, true, false}, {false, false, true}};
* char[][] charArray = {{'a', 'b', 'c'}, {'d', 'e', 'f'}};
*
* // assertion will pass
* assertThat(booleanArray).hasSameDimensionsAs(charArray);
*
* // assertions will fail
* assertThat(booleanArray).hasSameDimensionsAs(new char[][] {{'a', 'b'}, {'c', 'd'}, {'e', 'f'}});
* assertThat(booleanArray).hasSameDimensionsAs(new char[][] {{'a', 'b'}, {'c', 'd', 'e'}});
* assertThat(booleanArray).hasSameDimensionsAs(new char[][] {{'a', 'b', 'c'}, {'d', 'e'}});</code></pre>
*
* @param array the array to compare dimensions with actual {@code boolean[][]}.
* @return {@code this} assertion object.
* @throws AssertionError if the actual {@code boolean[][]} is {@code null}.
* @throws AssertionError if the array parameter is {@code null} or is not a true array.
* @throws AssertionError if actual {@code boolean[][]} and given array don't have the same dimensions.
*/
@Override
public Boolean2DArrayAssert hasSameDimensionsAs(Object array) {
boolean2dArrays.assertHasSameDimensionsAs(info, actual, array);
return myself;
}
/**
* Verifies that the actual {@code boolean[][]}contains the given boolean[] at the given index.
* <p>
* Example:
* <pre><code class='java'> // assertion will pass
* assertThat(new boolean[][] {{true, false}, {false, true}}).contains(new boolean[] {true, false}, info);
*
* // assertion will fail
* assertThat(new boolean[][] {{true, false}, {false, true}}).contains(new boolean[] {true, false}, atIndex(1));</code></pre>
*
* @param value the value to look for.
* @param index the index where the value should be stored in the actual array.
* @return myself assertion object.
* @throws AssertionError if the actual {@code boolean[][]}is {@code null} or empty.
* @throws NullPointerException if the given {@code Index} is {@code null}.
* @throws IndexOutOfBoundsException if the value of the given {@code Index} is equal to or greater than the size of
* the actual array.
* @throws AssertionError if the actual {@code boolean[][]}does not contain the given value at the given index.
*/
public Boolean2DArrayAssert contains(boolean[] value, Index index) {
boolean2dArrays.assertContains(info, actual, value, index);
return myself;
}
/**
* Verifies that the actual {@code boolean[][]}does not contain the given boolean[] at the given index.
* <p>
* Example:
* <pre><code class='java'> // assertion will pass
* assertThat(new boolean[][] {{true, false}, {false, true}}).doesNotContain(new boolean[] {true, false}, atIndex(1));
*
* // assertion will fail
* assertThat(new boolean[][] {{true, false}, {false, true}}).doesNotContain(new boolean[] {true, false}, atIndex(0));</code></pre>
*
* @param value the value to look for.
* @param index the index where the value should be stored in the actual array.
* @return myself assertion object.
* @throws AssertionError if the actual {@code boolean[][]}is {@code null}.
* @throws NullPointerException if the given {@code Index} is {@code null}.
* @throws AssertionError if the actual {@code boolean[][]}contains the given value at the given index.
*/
public Boolean2DArrayAssert doesNotContain(boolean[] value, Index index) {
boolean2dArrays.assertDoesNotContain(info, actual, value, index);
return myself;
}
}
|
Boolean2DArrayAssert
|
java
|
junit-team__junit5
|
junit-jupiter-params/src/main/java/org/junit/jupiter/params/ParameterizedTestMethodParameterResolver.java
|
{
"start": 501,
"end": 1153
}
|
class ____ extends ParameterizedInvocationParameterResolver {
private final Method testTemplateMethod;
ParameterizedTestMethodParameterResolver(ParameterizedTestContext methodContext, EvaluatedArgumentSet arguments,
int invocationIndex) {
super(methodContext.getResolverFacade(), arguments, invocationIndex, ResolutionCache.DISABLED);
this.testTemplateMethod = methodContext.getAnnotatedElement();
}
@Override
protected boolean isSupportedOnConstructorOrMethod(Executable declaringExecutable,
ExtensionContext extensionContext) {
return this.testTemplateMethod.equals(declaringExecutable);
}
}
|
ParameterizedTestMethodParameterResolver
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/state/ReadOnlyWindowStore.java
|
{
"start": 1619,
"end": 11026
}
|
interface ____<K, V> {
/**
* Get the value of key from a window.
*
* @param key the key to fetch
* @param time start timestamp (inclusive) of the window
* @return The value or {@code null} if no value is found in the window
* @throws InvalidStateStoreException if the store is not initialized
* @throws NullPointerException if {@code null} is used for any key.
*/
V fetch(K key, long time);
/**
* Get all the key-value pairs with the given key and the time range from all the existing windows.
* <p>
* This iterator must be closed after use.
* <p>
* The time range is inclusive and applies to the starting timestamp of the window.
* For example, if we have the following windows:
* <pre>
* +-------------------------------+
* | key | start time | end time |
* +-------+------------+----------+
* | A | 10 | 20 |
* +-------+------------+----------+
* | A | 15 | 25 |
* +-------+------------+----------+
* | A | 20 | 30 |
* +-------+------------+----------+
* | A | 25 | 35 |
* +--------------------------------
* </pre>
* And we call {@code store.fetch("A", Instant.ofEpochMilli(10), Instant.ofEpochMilli(20))} then the results will contain the first
* three windows from the table above, i.e., all those where 10 <= start time <= 20.
* <p>
* For each key, the iterator guarantees ordering of windows, starting from the oldest/earliest
* available window to the newest/latest window.
*
* @param key the key to fetch
* @param timeFrom time range start (inclusive), where iteration starts.
* @param timeTo time range end (inclusive), where iteration ends.
* @return an iterator over key-value pairs {@code <timestamp, value>}, from beginning to end of time.
* @throws InvalidStateStoreException if the store is not initialized
* @throws NullPointerException if {@code null} is used for key.
* @throws IllegalArgumentException if duration is negative or can't be represented as {@code long milliseconds}
*/
WindowStoreIterator<V> fetch(K key, Instant timeFrom, Instant timeTo) throws IllegalArgumentException;
/**
* Get all the key-value pairs with the given key and the time range from all the existing windows
* in backward order with respect to time (from end to beginning of time).
* <p>
* This iterator must be closed after use.
* <p>
* The time range is inclusive and applies to the starting timestamp of the window.
* For example, if we have the following windows:
* <pre>
* +-------------------------------+
* | key | start time | end time |
* +-------+------------+----------+
* | A | 10 | 20 |
* +-------+------------+----------+
* | A | 15 | 25 |
* +-------+------------+----------+
* | A | 20 | 30 |
* +-------+------------+----------+
* | A | 25 | 35 |
* +--------------------------------
* </pre>
* And we call {@code store.backwardFetch("A", Instant.ofEpochMilli(10), Instant.ofEpochMilli(20))} then the
* results will contain the first three windows from the table above in backward order,
* i.e., all those where 10 <= start time <= 20.
* <p>
* For each key, the iterator guarantees ordering of windows, starting from the newest/latest
* available window to the oldest/earliest window.
*
* @param key the key to fetch
* @param timeFrom time range start (inclusive), where iteration ends.
* @param timeTo time range end (inclusive), where iteration starts.
* @return an iterator over key-value pairs {@code <timestamp, value>}, from end to beginning of time.
* @throws InvalidStateStoreException if the store is not initialized
* @throws NullPointerException if {@code null} is used for key.
* @throws IllegalArgumentException if duration is negative or can't be represented as {@code long milliseconds}
*/
default WindowStoreIterator<V> backwardFetch(K key, Instant timeFrom, Instant timeTo) throws IllegalArgumentException {
throw new UnsupportedOperationException();
}
/**
* Get all the key-value pairs in the given key range and time range from all the existing windows.
* <p>
* This iterator must be closed after use.
*
* @param keyFrom the first key in the range
* A null value indicates a starting position from the first element in the store.
* @param keyTo the last key in the range
* A null value indicates that the range ends with the last element in the store.
* @param timeFrom time range start (inclusive), where iteration starts.
* @param timeTo time range end (inclusive), where iteration ends.
* @return an iterator over windowed key-value pairs {@code <Windowed<K>, value>}, from beginning to end of time.
* @throws InvalidStateStoreException if the store is not initialized
* @throws IllegalArgumentException if duration is negative or can't be represented as {@code long milliseconds}
*/
KeyValueIterator<Windowed<K>, V> fetch(K keyFrom, K keyTo, Instant timeFrom, Instant timeTo)
throws IllegalArgumentException;
/**
* Get all the key-value pairs in the given key range and time range from all the existing windows
* in backward order with respect to time (from end to beginning of time).
* <p>
* This iterator must be closed after use.
*
* @param keyFrom the first key in the range
* A null value indicates a starting position from the first element in the store.
* @param keyTo the last key in the range
* A null value indicates that the range ends with the last element in the store.
* @param timeFrom time range start (inclusive), where iteration ends.
* @param timeTo time range end (inclusive), where iteration starts.
* @return an iterator over windowed key-value pairs {@code <Windowed<K>, value>}, from end to beginning of time.
* @throws InvalidStateStoreException if the store is not initialized
* @throws IllegalArgumentException if duration is negative or can't be represented as {@code long milliseconds}
*/
default KeyValueIterator<Windowed<K>, V> backwardFetch(K keyFrom, K keyTo, Instant timeFrom, Instant timeTo)
throws IllegalArgumentException {
throw new UnsupportedOperationException();
}
/**
* Gets all the key-value pairs in the existing windows.
*
* @return an iterator over windowed key-value pairs {@code <Windowed<K>, value>}, from beginning to end of time.
* @throws InvalidStateStoreException if the store is not initialized
*/
KeyValueIterator<Windowed<K>, V> all();
/**
* Gets all the key-value pairs in the existing windows in backward order
* with respect to time (from end to beginning of time).
*
* @return a backward iterator over windowed key-value pairs {@code <Windowed<K>, value>}, from the end to beginning of time.
* @throws InvalidStateStoreException if the store is not initialized
*/
default KeyValueIterator<Windowed<K>, V> backwardAll() {
throw new UnsupportedOperationException();
}
/**
* Gets all the key-value pairs that belong to the windows within in the given time range.
*
* @param timeFrom the beginning of the time slot from which to search (inclusive), where iteration starts.
* @param timeTo the end of the time slot from which to search (inclusive), where iteration ends.
* @return an iterator over windowed key-value pairs {@code <Windowed<K>, value>}, from beginning to end of time.
* @throws InvalidStateStoreException if the store is not initialized
* @throws NullPointerException if {@code null} is used for any key
* @throws IllegalArgumentException if duration is negative or can't be represented as {@code long milliseconds}
*/
KeyValueIterator<Windowed<K>, V> fetchAll(Instant timeFrom, Instant timeTo) throws IllegalArgumentException;
/**
* Gets all the key-value pairs that belong to the windows within in the given time range in backward order
* with respect to time (from end to beginning of time).
*
* @param timeFrom the beginning of the time slot from which to search (inclusive), where iteration ends.
* @param timeTo the end of the time slot from which to search (inclusive), where iteration starts.
* @return a backward iterator over windowed key-value pairs {@code <Windowed<K>, value>}, from end to beginning of time.
* @throws InvalidStateStoreException if the store is not initialized
* @throws NullPointerException if {@code null} is used for any key
* @throws IllegalArgumentException if duration is negative or can't be represented as {@code long milliseconds}
*/
default KeyValueIterator<Windowed<K>, V> backwardFetchAll(Instant timeFrom, Instant timeTo) throws IllegalArgumentException {
throw new UnsupportedOperationException();
}
}
|
ReadOnlyWindowStore
|
java
|
apache__camel
|
components/camel-vertx/camel-vertx/src/main/java/org/apache/camel/component/vertx/VertxProducer.java
|
{
"start": 3110,
"end": 4000
}
|
class ____ implements Handler<AsyncResult<Message<Object>>> {
private final Exchange exchange;
private final AsyncCallback callback;
private CamelReplyHandler(Exchange exchange, AsyncCallback callback) {
this.exchange = exchange;
this.callback = callback;
}
@Override
public void handle(AsyncResult<Message<Object>> event) {
try {
// preserve headers
MessageHelper.copyHeaders(exchange.getIn(), exchange.getOut(), false);
Throwable e = event.cause();
if (e != null) {
exchange.setException(e);
} else {
exchange.getMessage().setBody(event.result().body());
}
} finally {
callback.done(false);
}
}
}
}
|
CamelReplyHandler
|
java
|
apache__maven
|
impl/maven-impl/src/main/java/org/apache/maven/impl/resolver/MavenSnapshotMetadata.java
|
{
"start": 1070,
"end": 2562
}
|
class ____ extends MavenMetadata {
static final String SNAPSHOT = "SNAPSHOT";
protected final Collection<Artifact> artifacts = new ArrayList<>();
protected MavenSnapshotMetadata(Metadata metadata, Path path, Instant timestamp) {
super(metadata, path, timestamp);
}
protected static Metadata createRepositoryMetadata(Artifact artifact) {
return Metadata.newBuilder()
.modelVersion("1.1.0")
.groupId(artifact.getGroupId())
.artifactId(artifact.getArtifactId())
.version(artifact.getBaseVersion())
.build();
}
public void bind(Artifact artifact) {
artifacts.add(artifact);
}
public Object getKey() {
return getGroupId() + ':' + getArtifactId() + ':' + getVersion();
}
public static Object getKey(Artifact artifact) {
return artifact.getGroupId() + ':' + artifact.getArtifactId() + ':' + artifact.getBaseVersion();
}
protected String getKey(String classifier, String extension) {
return classifier + ':' + extension;
}
@Override
public String getGroupId() {
return metadata.getGroupId();
}
@Override
public String getArtifactId() {
return metadata.getArtifactId();
}
@Override
public String getVersion() {
return metadata.getVersion();
}
@Override
public Nature getNature() {
return Nature.SNAPSHOT;
}
}
|
MavenSnapshotMetadata
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/testutils/ClassLoaderUtils.java
|
{
"start": 9321,
"end": 9999
}
|
class ____.
*
* <p>NOTE: Even though this method may throw IOExceptions, we do not declare those and rather
* wrap them in Runtime Exceptions. While this is generally discouraged, we do this here because
* it is merely a test utility and not production code, and it makes it easier to use this
* method during the initialization of variables and especially static variables.
*/
public static ObjectAndClassLoader<Serializable> createSerializableObjectFromNewClassLoader() {
final String classSource =
"import java.io.Serializable;"
+ "import java.util.Random;"
+ "public
|
path
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/oauth2/client/OAuth2LoginConfigurerTests.java
|
{
"start": 58269,
"end": 58990
}
|
class ____ {
@Bean
JwtDecoderFactory<ClientRegistration> jwtDecoderFactory() {
return (clientRegistration) -> getJwtDecoder();
}
private static JwtDecoder getJwtDecoder() {
Map<String, Object> claims = new HashMap<>();
claims.put(IdTokenClaimNames.SUB, "sub123");
claims.put(IdTokenClaimNames.ISS, "http://localhost/iss");
claims.put(IdTokenClaimNames.AUD, Arrays.asList("clientId", "a", "u", "d"));
claims.put(IdTokenClaimNames.AZP, "clientId");
Jwt jwt = TestJwts.jwt().claims((c) -> c.putAll(claims)).build();
JwtDecoder jwtDecoder = mock(JwtDecoder.class);
given(jwtDecoder.decode(any())).willReturn(jwt);
return jwtDecoder;
}
}
@Configuration
static
|
JwtDecoderFactoryConfig
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/client/runtime/src/main/java/org/jboss/resteasy/reactive/client/handlers/VertxClientInputStream.java
|
{
"start": 2718,
"end": 8142
}
|
class ____ implements Handler<Buffer> {
protected final HttpClientResponse request;
protected Buffer input1;
protected Deque<Buffer> inputOverflow;
protected boolean waiting = false;
protected boolean eof = false;
protected Throwable readException;
private final long timeout;
public VertxBlockingInput(HttpClientResponse response, long timeout) {
this.request = response;
this.timeout = timeout;
response.pause();
response.handler(this);
try {
response.endHandler(new Handler<Void>() {
@Override
public void handle(Void event) {
synchronized (VertxBlockingInput.this) {
eof = true;
if (waiting) {
VertxBlockingInput.this.notify();
}
}
}
});
response.exceptionHandler(new Handler<Throwable>() {
@Override
public void handle(Throwable event) {
synchronized (VertxBlockingInput.this) {
readException = new IOException(event);
if (input1 != null) {
input1.getByteBuf().release();
input1 = null;
}
if (inputOverflow != null) {
Buffer d = inputOverflow.poll();
while (d != null) {
d.getByteBuf().release();
d = inputOverflow.poll();
}
}
if (waiting) {
VertxBlockingInput.this.notify();
}
}
}
});
response.fetch(1);
} catch (IllegalStateException e) {
//already ended
eof = true;
}
}
protected ByteBuf readBlocking() throws IOException {
long expire = System.currentTimeMillis() + timeout;
synchronized (VertxBlockingInput.this) {
while (input1 == null && !eof && readException == null) {
long rem = expire - System.currentTimeMillis();
if (rem <= 0) {
//everything is broken, if read has timed out we can assume that the underling connection
//is wrecked, so just close it
request.netSocket().close();
IOException throwable = new IOException("Read timed out");
readException = throwable;
throw throwable;
}
try {
if (Context.isOnEventLoopThread()) {
throw new BlockingNotAllowedException("Attempting a blocking read on io thread");
}
waiting = true;
VertxBlockingInput.this.wait(rem);
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
} finally {
waiting = false;
}
}
if (readException != null) {
throw new IOException(readException);
}
Buffer ret = input1;
input1 = null;
if (inputOverflow != null) {
input1 = inputOverflow.poll();
if (input1 == null) {
request.fetch(1);
}
} else if (!eof) {
request.fetch(1);
}
return ret == null ? null : ret.getByteBuf();
}
}
@Override
public void handle(Buffer event) {
synchronized (VertxBlockingInput.this) {
if (input1 == null) {
input1 = event;
} else {
if (inputOverflow == null) {
inputOverflow = new ArrayDeque<>();
}
inputOverflow.add(event);
}
if (waiting) {
VertxBlockingInput.this.notifyAll();
}
}
}
public int readBytesAvailable() {
if (input1 != null) {
return input1.getByteBuf().readableBytes();
}
String length = request.getHeader(HttpHeaders.CONTENT_LENGTH);
if (length == null) {
return 0;
}
try {
return Integer.parseInt(length);
} catch (NumberFormatException e) {
Long.parseLong(length); // ignore the value as can only return an int anyway
return Integer.MAX_VALUE;
}
}
public void discard() {
request.pause().handler(null).exceptionHandler(null).endHandler(null).resume();
}
}
}
|
VertxBlockingInput
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/main/java/org/springframework/web/reactive/function/client/ExchangeStrategies.java
|
{
"start": 1190,
"end": 2676
}
|
interface ____ {
/**
* Return {@link HttpMessageReader HttpMessageReaders} to read and decode the response body with.
* @return the message readers
*/
List<HttpMessageReader<?>> messageReaders();
/**
* Return {@link HttpMessageWriter HttpMessageWriters} to write and encode the request body with.
* @return the message writers
*/
List<HttpMessageWriter<?>> messageWriters();
/**
* Return a builder to create a new {@link ExchangeStrategies} instance
* replicated from the current instance.
* @since 5.1.12
*/
default Builder mutate() {
throw new UnsupportedOperationException();
}
// Static builder methods
/**
* Return an {@code ExchangeStrategies} instance with default configuration
* provided by {@link ClientCodecConfigurer}.
*/
static ExchangeStrategies withDefaults() {
return DefaultExchangeStrategiesBuilder.DEFAULT_EXCHANGE_STRATEGIES;
}
/**
* Return a builder pre-configured with default configuration to start.
* This is the same as {@link #withDefaults()} but returns a mutable builder
* for further customizations.
*/
static Builder builder() {
DefaultExchangeStrategiesBuilder builder = new DefaultExchangeStrategiesBuilder();
builder.defaultConfiguration();
return builder;
}
/**
* Return a builder with empty configuration to start.
*/
static Builder empty() {
return new DefaultExchangeStrategiesBuilder();
}
/**
* A mutable builder for an {@link ExchangeStrategies}.
*/
|
ExchangeStrategies
|
java
|
apache__dubbo
|
dubbo-remoting/dubbo-remoting-netty4/src/main/java/org/apache/dubbo/remoting/transport/netty4/ssl/SslClientTlsHandler.java
|
{
"start": 1485,
"end": 3299
}
|
class ____ extends ChannelInboundHandlerAdapter {
private static final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(SslClientTlsHandler.class);
private static final AttributeKey<SSLSession> SSL_SESSION_KEY = AttributeKey.valueOf(Constants.SSL_SESSION_KEY);
private final SslContext sslContext;
public SslClientTlsHandler(URL url) {
this(SslContexts.buildClientSslContext(url));
}
public SslClientTlsHandler(SslContext sslContext) {
this.sslContext = sslContext;
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) {
SSLEngine sslEngine = sslContext.newEngine(ctx.alloc());
ctx.pipeline().addAfter(ctx.name(), null, new SslHandler(sslEngine, false));
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof SslHandshakeCompletionEvent) {
SslHandshakeCompletionEvent handshakeEvent = (SslHandshakeCompletionEvent) evt;
if (handshakeEvent.isSuccess()) {
SSLSession session =
ctx.pipeline().get(SslHandler.class).engine().getSession();
logger.info("TLS negotiation succeed with: " + session.getPeerHost());
ctx.pipeline().remove(this);
ctx.channel().attr(SSL_SESSION_KEY).set(session);
} else {
logger.error(
INTERNAL_ERROR,
"unknown error in remoting module",
"",
"TLS negotiation failed when trying to accept new connection.",
handshakeEvent.cause());
ctx.fireExceptionCaught(handshakeEvent.cause());
}
}
}
}
|
SslClientTlsHandler
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManagerRealScheduler.java
|
{
"start": 1454,
"end": 4743
}
|
class ____ extends FairSchedulerTestBase {
private final static File ALLOC_FILE = new File(TEST_DIR, "test-queue-mgr");
@BeforeEach
public void setup() throws IOException {
createConfiguration();
writeAllocFile(30);
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,
ALLOC_FILE.getAbsolutePath());
resourceManager = new MockRM(conf);
resourceManager.start();
scheduler = (FairScheduler) resourceManager.getResourceScheduler();
}
@AfterEach
public void teardown() {
ALLOC_FILE.deleteOnExit();
if (resourceManager != null) {
resourceManager.stop();
resourceManager = null;
}
}
private void writeAllocFile(int defaultFairShareTimeout) {
AllocationFileWriter.create()
.addQueue(new AllocationFileQueue.Builder("default")
.build())
.addQueue(new AllocationFileQueue.Builder("queueA").build())
.addQueue(new AllocationFileQueue.Builder("queueB")
.subQueue(new AllocationFileQueue.Builder("queueB1")
.minSharePreemptionTimeout(5).build())
.subQueue(new AllocationFileQueue.Builder("queueB2").build())
.build())
.addQueue(new AllocationFileQueue.Builder("queueC").build())
.defaultMinSharePreemptionTimeout(15)
.defaultFairSharePreemptionTimeout(defaultFairShareTimeout)
.writeToFile(ALLOC_FILE.getAbsolutePath());
}
@Test
public void testBackwardsCompatiblePreemptionConfiguration()
throws IOException {
// Check the min/fair share preemption timeout for each queue
QueueManager queueMgr = scheduler.getQueueManager();
assertEquals(30000, queueMgr.getQueue("root")
.getFairSharePreemptionTimeout());
assertEquals(30000, queueMgr.getQueue("default")
.getFairSharePreemptionTimeout());
assertEquals(30000, queueMgr.getQueue("queueA")
.getFairSharePreemptionTimeout());
assertEquals(30000, queueMgr.getQueue("queueB")
.getFairSharePreemptionTimeout());
assertEquals(30000, queueMgr.getQueue("queueB.queueB1")
.getFairSharePreemptionTimeout());
assertEquals(30000, queueMgr.getQueue("queueB.queueB2")
.getFairSharePreemptionTimeout());
assertEquals(30000, queueMgr.getQueue("queueC")
.getFairSharePreemptionTimeout());
assertEquals(15000, queueMgr.getQueue("root")
.getMinSharePreemptionTimeout());
assertEquals(15000, queueMgr.getQueue("default")
.getMinSharePreemptionTimeout());
assertEquals(15000, queueMgr.getQueue("queueA")
.getMinSharePreemptionTimeout());
assertEquals(15000, queueMgr.getQueue("queueB")
.getMinSharePreemptionTimeout());
assertEquals(5000, queueMgr.getQueue("queueB.queueB1")
.getMinSharePreemptionTimeout());
assertEquals(15000, queueMgr.getQueue("queueB.queueB2")
.getMinSharePreemptionTimeout());
assertEquals(15000, queueMgr.getQueue("queueC")
.getMinSharePreemptionTimeout());
// Lower the fairshare preemption timeouts and verify it is picked
// correctly.
writeAllocFile(25);
scheduler.reinitialize(conf, resourceManager.getRMContext());
assertEquals(25000, queueMgr.getQueue("root")
.getFairSharePreemptionTimeout());
}
}
|
TestQueueManagerRealScheduler
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java
|
{
"start": 1931,
"end": 3875
}
|
class ____ extends AbstractSessionCredentialsProvider {
public static final String NAME
= "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider";
public static final String COMPONENT
= "Session credentials in Hadoop configuration";
/**
* Construct from just a configuration.
* @param conf configuration.
* @throws IOException on any failure to load the credentials.
*/
public TemporaryAWSCredentialsProvider(final Configuration conf)
throws IOException {
this(null, conf);
}
/**
* Constructor: the URI will be null if the provider is inited unbonded
* to a filesystem.
* @param uri binding to a filesystem URI.
* @param conf configuration.
* @throws IOException on any failure to load the credentials.
*/
public TemporaryAWSCredentialsProvider(
@Nullable final URI uri,
final Configuration conf)
throws IOException {
super(uri, conf);
}
/**
* The credentials here must include a session token, else this operation
* will raise an exception.
* @param config the configuration
* @return temporary credentials.
* @throws IOException on any failure to load the credentials.
* @throws NoAuthWithAWSException validation failure
* @throws NoAwsCredentialsException the credentials are actually empty.
*/
@Override
protected AwsCredentials createCredentials(Configuration config)
throws IOException {
MarshalledCredentials creds = MarshalledCredentialBinding.fromFileSystem(
getUri(), config);
MarshalledCredentials.CredentialTypeRequired sessionOnly
= MarshalledCredentials.CredentialTypeRequired.SessionOnly;
// treat only having non-session creds as empty.
if (!creds.isValid(sessionOnly)) {
throw new NoAwsCredentialsException(COMPONENT);
}
return MarshalledCredentialBinding.toAWSCredentials(creds,
sessionOnly, COMPONENT);
}
}
|
TemporaryAWSCredentialsProvider
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/data/util/DataFormatTestUtil.java
|
{
"start": 1433,
"end": 5423
}
|
class ____ {
/** Stringify the given {@link RowData}. */
public static String rowDataToString(RowData row, RowType type) {
checkArgument(type.getFieldCount() == row.getArity());
StringBuilder build = new StringBuilder();
build.append(row.getRowKind().shortString()).append("(");
for (int i = 0; i < row.getArity(); i++) {
build.append(',');
if (row.isNullAt(i)) {
build.append("null");
} else {
RowData.FieldGetter fieldGetter = RowData.createFieldGetter(type.getTypeAt(i), i);
build.append(fieldGetter.getFieldOrNull(row));
}
}
build.append(')');
return build.toString();
}
/** Get a binary row of 24 bytes long. */
public static BinaryRowData get24BytesBinaryRow() {
// header (8 bytes) + 2 * string in fixed-length part (8 bytes each)
BinaryRowData row = new BinaryRowData(2);
BinaryRowWriter writer = new BinaryRowWriter(row);
writer.writeString(0, StringData.fromString(RandomStringUtils.randomNumeric(2)));
writer.writeString(1, StringData.fromString(RandomStringUtils.randomNumeric(2)));
writer.complete();
return row;
}
/** Get a binary row of 160 bytes long. */
public static BinaryRowData get160BytesBinaryRow() {
// header (8 bytes) +
// 72 byte length string (8 bytes in fixed-length, 72 bytes in variable-length) +
// 64 byte length string (8 bytes in fixed-length, 64 bytes in variable-length)
BinaryRowData row = new BinaryRowData(2);
BinaryRowWriter writer = new BinaryRowWriter(row);
writer.writeString(0, StringData.fromString(RandomStringUtils.randomNumeric(72)));
writer.writeString(1, StringData.fromString(RandomStringUtils.randomNumeric(64)));
writer.complete();
return row;
}
/**
* Get a binary row consisting of 6 segments. The bytes of the returned row is the same with the
* given input binary row.
*/
public static BinaryRowData getMultiSeg160BytesBinaryRow(BinaryRowData row160) {
BinaryRowData multiSegRow160 = new BinaryRowData(2);
MemorySegment[] segments = new MemorySegment[6];
int baseOffset = 8;
int posInSeg = baseOffset;
int remainSize = 160;
for (int i = 0; i < segments.length; i++) {
segments[i] = MemorySegmentFactory.wrap(new byte[32]);
int copy = Math.min(32 - posInSeg, remainSize);
row160.getSegments()[0].copyTo(160 - remainSize, segments[i], posInSeg, copy);
remainSize -= copy;
posInSeg = 0;
}
multiSegRow160.pointTo(segments, baseOffset, 160);
assertThat(multiSegRow160).isEqualTo(row160);
return multiSegRow160;
}
/**
* Get a binary row consisting of 2 segments. Its first segment is the same with the given input
* binary row, while its second segment is empty.
*/
public static BinaryRowData getMultiSeg160BytesInOneSegRow(BinaryRowData row160) {
MemorySegment[] segments = new MemorySegment[2];
segments[0] = row160.getSegments()[0];
segments[1] = MemorySegmentFactory.wrap(new byte[row160.getSegments()[0].size()]);
row160.pointTo(segments, 0, row160.getSizeInBytes());
return row160;
}
/** Split the given byte array into two memory segments. */
public static MemorySegment[] splitBytes(byte[] bytes, int baseOffset) {
int newSize = (bytes.length + 1) / 2 + baseOffset;
MemorySegment[] ret = new MemorySegment[2];
ret[0] = MemorySegmentFactory.wrap(new byte[newSize]);
ret[1] = MemorySegmentFactory.wrap(new byte[newSize]);
ret[0].put(baseOffset, bytes, 0, newSize - baseOffset);
ret[1].put(0, bytes, newSize - baseOffset, bytes.length - (newSize - baseOffset));
return ret;
}
/** A simple
|
DataFormatTestUtil
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/component/empty/ComponentEmptyEmbeddedOwner.java
|
{
"start": 336,
"end": 720
}
|
class ____ {
@Id
@GeneratedValue
private Integer id;
private ComponentEmptyEmbedded embedded;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public ComponentEmptyEmbedded getEmbedded() {
return embedded;
}
public void setEmbedded(ComponentEmptyEmbedded embedded) {
this.embedded = embedded;
}
}
|
ComponentEmptyEmbeddedOwner
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/config/LocalResponseCacheAutoConfiguration.java
|
{
"start": 4745,
"end": 4919
}
|
class ____ {
}
@ConditionalOnProperty(value = GatewayProperties.PREFIX + ".filter.local-response-cache.enabled",
havingValue = "true")
static
|
OnGatewayPropertyEnabled
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
|
{
"start": 990,
"end": 1409
}
|
class ____ extends StorageStatistics {
EmptyStorageStatistics(String name) {
super(name);
}
@Override
public Iterator<LongStatistic> getLongStatistics() {
return Collections.emptyIterator();
}
@Override
public Long getLong(String key) {
return null;
}
@Override
public boolean isTracked(String key) {
return false;
}
@Override
public void reset() {
}
}
|
EmptyStorageStatistics
|
java
|
bumptech__glide
|
mocks/src/main/java/com/bumptech/glide/mocks/MockGlideBuilders.java
|
{
"start": 548,
"end": 930
}
|
class ____ {
private MockGlideBuilders() {}
/** Creates a new {@link RequestBuilder} instance with a matching resource type. */
@SuppressWarnings("unchecked")
public static <T> RequestBuilder<T> mockRequestBuilder() {
return (RequestBuilder<T>) mockGlideRequest(RequestBuilder.class);
}
/** Creates a new instance of a generated {@code GlideRequest}
|
MockGlideBuilders
|
java
|
apache__dubbo
|
dubbo-serialization/dubbo-serialization-fastjson2/src/main/java/org/apache/dubbo/common/serialize/fastjson2/Fastjson2CreatorManager.java
|
{
"start": 1411,
"end": 3132
}
|
class ____ implements ScopeClassLoaderListener<FrameworkModel> {
/**
* An empty classLoader used when classLoader is system classLoader. Prevent the NPE.
*/
private static final ClassLoader SYSTEM_CLASSLOADER_KEY = new ClassLoader() {};
private final ConcurrentHashMap<ClassLoader, ObjectReaderCreator> readerMap = new ConcurrentHashMap<>();
private final ConcurrentHashMap<ClassLoader, ObjectWriterCreator> writerMap = new ConcurrentHashMap<>();
public Fastjson2CreatorManager(FrameworkModel frameworkModel) {
frameworkModel.addClassLoaderListener(this);
}
public void setCreator(ClassLoader classLoader) {
if (classLoader == null) {
classLoader = SYSTEM_CLASSLOADER_KEY;
}
if (NativeDetector.inNativeImage()) {
JSONFactory.setContextReaderCreator(readerMap.putIfAbsent(classLoader, ObjectReaderCreator.INSTANCE));
JSONFactory.setContextWriterCreator(writerMap.putIfAbsent(classLoader, ObjectWriterCreator.INSTANCE));
} else {
JSONFactory.setContextReaderCreator(
ConcurrentHashMapUtils.computeIfAbsent(readerMap, classLoader, ObjectReaderCreatorASM::new));
JSONFactory.setContextWriterCreator(
ConcurrentHashMapUtils.computeIfAbsent(writerMap, classLoader, ObjectWriterCreatorASM::new));
}
}
@Override
public void onAddClassLoader(FrameworkModel scopeModel, ClassLoader classLoader) {
// nop
}
@Override
public void onRemoveClassLoader(FrameworkModel scopeModel, ClassLoader classLoader) {
readerMap.remove(classLoader);
writerMap.remove(classLoader);
}
}
|
Fastjson2CreatorManager
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Bug_for_Issue_534.java
|
{
"start": 652,
"end": 869
}
|
class ____ {
private Long aLong;
public Long getaLong() {
return aLong;
}
public void setaLong(Long aLong) {
this.aLong = aLong;
}
}
}
|
Value
|
java
|
elastic__elasticsearch
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/MaxmindIpDataLookups.java
|
{
"start": 15357,
"end": 16982
}
|
class ____ extends AbstractBase<ConnectionTypeResponse, ConnectionTypeResponse> {
ConnectionType(final Set<Database.Property> properties) {
super(
properties,
ConnectionTypeResponse.class,
(response, ipAddress, network, locales) -> new ConnectionTypeResponse(response, ipAddress, network)
);
}
@Override
protected ConnectionTypeResponse cacheableRecord(ConnectionTypeResponse response) {
return response;
}
@Override
protected Map<String, Object> transform(final ConnectionTypeResponse response) {
ConnectionTypeResponse.ConnectionType connectionType = response.getConnectionType();
Map<String, Object> data = new HashMap<>();
for (Database.Property property : this.properties) {
switch (property) {
case IP -> data.put("ip", response.getIpAddress());
case CONNECTION_TYPE -> {
if (connectionType != null) {
data.put("connection_type", connectionType.toString());
}
}
}
}
return data;
}
}
record CacheableCountryResponse(
Boolean isInEuropeanUnion,
String countryIsoCode,
String countryName,
String continentCode,
String continentName,
Boolean registeredCountryIsInEuropeanUnion,
String registeredCountryIsoCode,
String registeredCountryName
) {}
static
|
ConnectionType
|
java
|
spring-projects__spring-framework
|
spring-r2dbc/src/main/java/org/springframework/r2dbc/connection/TransactionAwareConnectionFactoryProxy.java
|
{
"start": 5037,
"end": 6797
}
|
class ____ implements InvocationHandler {
private final Connection connection;
private final ConnectionFactory targetConnectionFactory;
private boolean closed = false;
TransactionAwareInvocationHandler(Connection connection, ConnectionFactory targetConnectionFactory) {
this.connection = connection;
this.targetConnectionFactory = targetConnectionFactory;
}
@Override
public @Nullable Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if (ReflectionUtils.isObjectMethod(method)) {
if (ReflectionUtils.isToStringMethod(method)) {
return proxyToString(proxy);
}
if (ReflectionUtils.isEqualsMethod(method)) {
return (proxy == args[0]);
}
if (ReflectionUtils.isHashCodeMethod(method)) {
return System.identityHashCode(proxy);
}
}
return switch (method.getName()) {
case "unwrap" -> this.connection;
// Handle close method: only close if not within a transaction.
case "close" -> ConnectionFactoryUtils.doReleaseConnection(this.connection, this.targetConnectionFactory)
.doOnSubscribe(n -> this.closed = true);
case "isClosed" -> this.closed;
default -> {
if (this.closed) {
throw new IllegalStateException("Connection handle already closed");
}
try {
// Invoke method on target Connection.
yield method.invoke(this.connection, args);
}
catch (InvocationTargetException ex) {
throw ex.getTargetException();
}
}
};
}
private String proxyToString(@Nullable Object proxy) {
// Allow for differentiating between the proxy and the raw Connection.
return "Transaction-aware proxy for target Connection [" + this.connection + "]";
}
}
}
|
TransactionAwareInvocationHandler
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java
|
{
"start": 1678,
"end": 2813
}
|
class ____
implements InputFormat<FloatWritable, NullWritable> {
public RecordReader<FloatWritable, NullWritable> getRecordReader(
InputSplit genericSplit, JobConf job, Reporter reporter)
throws IOException {
return new PipesDummyRecordReader(job, genericSplit);
}
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
// Delegate the generation of input splits to the 'original' InputFormat
return ReflectionUtils.newInstance(
job.getClass(Submitter.INPUT_FORMAT,
TextInputFormat.class,
InputFormat.class), job).getSplits(job, numSplits);
}
/**
* A dummy {@link org.apache.hadoop.mapred.RecordReader} to help track the
* progress of Hadoop Pipes' applications when they are using a non-Java
* <code>RecordReader</code>.
*
* The <code>PipesDummyRecordReader</code> is informed of the 'progress' of
* the task by the {@link OutputHandler#progress(float)} which calls the
* {@link #next(FloatWritable, NullWritable)} with the progress as the
* <code>key</code>.
*/
static
|
PipesNonJavaInputFormat
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/search/SearchPhase.java
|
{
"start": 908,
"end": 4414
}
|
class ____ {
private final String name;
protected SearchPhase(String name) {
this.name = Objects.requireNonNull(name, "name must not be null");
}
protected abstract void run();
/**
* Returns the phases name.
*/
public String getName() {
return name;
}
private static String makeMissingShardsError(StringBuilder missingShards) {
return "Search rejected due to missing shards ["
+ missingShards
+ "]. Consider using `allow_partial_search_results` setting to bypass this error.";
}
protected static void doCheckNoMissingShards(String phaseName, SearchRequest request, List<SearchShardIterator> shardsIts) {
assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults";
if (request.allowPartialSearchResults() == false) {
final StringBuilder missingShards = new StringBuilder();
// Fail-fast verification of all shards being available
for (int index = 0; index < shardsIts.size(); index++) {
final SearchShardIterator shardRoutings = shardsIts.get(index);
if (shardRoutings.size() == 0) {
if (missingShards.isEmpty() == false) {
missingShards.append(", ");
}
missingShards.append(shardRoutings.shardId());
}
}
if (missingShards.isEmpty() == false) {
// Status red - shard is missing all copies and would produce partial results for an index search
final String msg = makeMissingShardsError(missingShards);
throw new SearchPhaseExecutionException(phaseName, msg, null, ShardSearchFailure.EMPTY_ARRAY);
}
}
}
/**
* Releases shard targets that are not used in the docsIdsToLoad.
*/
protected static void releaseIrrelevantSearchContext(SearchPhaseResult searchPhaseResult, AbstractSearchAsyncAction<?> context) {
// we only release search context that we did not fetch from, if we are not scrolling
// or using a PIT and if it has at least one hit that didn't make it to the global topDocs
if (searchPhaseResult == null) {
return;
}
// phaseResult.getContextId() is the same for query & rank feature results
SearchPhaseResult phaseResult = searchPhaseResult.queryResult() != null
? searchPhaseResult.queryResult()
: searchPhaseResult.rankFeatureResult();
if (phaseResult != null
&& (phaseResult.hasSearchContext()
|| (phaseResult instanceof QuerySearchResult q && q.isPartiallyReduced() && q.getContextId() != null))
&& context.getRequest().scroll() == null
&& (context.isPartOfPointInTime(phaseResult.getContextId()) == false)) {
try {
context.getLogger().trace("trying to release search context [{}]", phaseResult.getContextId());
SearchShardTarget shardTarget = phaseResult.getSearchShardTarget();
Transport.Connection connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId());
context.sendReleaseSearchContext(phaseResult.getContextId(), connection);
} catch (Exception e) {
context.getLogger().trace("failed to release context", e);
}
}
}
}
|
SearchPhase
|
java
|
quarkusio__quarkus
|
core/runtime/src/main/java/io/quarkus/runtime/Shutdown.java
|
{
"start": 1366,
"end": 1562
}
|
class ____ {
*
* @Shutdown
* void shutdown() {
* // place the logic here
* }
* }
* </pre>
*
* @see ShutdownEvent
*/
@Target(METHOD)
@Retention(RUNTIME)
public @
|
Bean2
|
java
|
apache__spark
|
sql/catalyst/src/main/java/org/apache/spark/sql/connector/write/SupportsDelta.java
|
{
"start": 1161,
"end": 1746
}
|
interface ____ extends RowLevelOperation {
@Override
DeltaWriteBuilder newWriteBuilder(LogicalWriteInfo info);
/**
* Returns the row ID column references that should be used for row equality.
*/
NamedReference[] rowId();
/**
* Controls whether to represent updates as deletes and inserts.
* <p>
* Data sources may choose to split updates into deletes and inserts to either better cluster
* and order the incoming delta of rows or to simplify the write process.
*/
default boolean representUpdateAsDeleteAndInsert() {
return false;
}
}
|
SupportsDelta
|
java
|
google__guava
|
android/guava/src/com/google/common/base/FinalizableReferenceQueue.java
|
{
"start": 14448,
"end": 15032
}
|
class ____ implements FinalizerLoader {
@Override
public Class<?> loadFinalizer() {
try {
return Class.forName(FINALIZER_CLASS_NAME);
} catch (ClassNotFoundException e) {
throw new AssertionError(e);
}
}
}
/** Looks up Finalizer.startFinalizer(). */
static Method getStartFinalizer(Class<?> finalizer) {
try {
return finalizer.getMethod(
"startFinalizer", Class.class, ReferenceQueue.class, PhantomReference.class);
} catch (NoSuchMethodException e) {
throw new AssertionError(e);
}
}
}
|
DirectLoader
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/ChannelViewsTest.java
|
{
"start": 2276,
"end": 16674
}
|
class ____ {
private static final long SEED = 649180756312423613L;
private static final int KEY_MAX = Integer.MAX_VALUE;
private static final int VALUE_SHORT_LENGTH = 114;
private static final int VALUE_LONG_LENGTH = 112 * 1024;
private static final int NUM_PAIRS_SHORT = 1000000;
private static final int NUM_PAIRS_LONG = 3000;
private static final int MEMORY_SIZE = 1024 * 1024;
private static final int MEMORY_PAGE_SIZE = 64 * 1024;
private static final int NUM_MEMORY_SEGMENTS = 3;
private final AbstractInvokable parentTask = new DummyInvokable();
private IOManager ioManager;
private MemoryManager memoryManager;
// --------------------------------------------------------------------------------------------
@BeforeEach
void beforeTest() {
this.memoryManager =
MemoryManagerBuilder.newBuilder()
.setMemorySize(MEMORY_SIZE)
.setPageSize(MEMORY_PAGE_SIZE)
.build();
this.ioManager = new IOManagerAsync();
}
@AfterEach
void afterTest() throws Exception {
this.ioManager.close();
if (memoryManager != null) {
assertThat(this.memoryManager.verifyEmpty())
.withFailMessage(
"Memory leak: not all segments have been returned to the memory manager.")
.isTrue();
this.memoryManager.shutdown();
this.memoryManager = null;
}
}
// --------------------------------------------------------------------------------------------
@Test
void testWriteReadSmallRecords() throws Exception {
final TestData.TupleGenerator generator =
new TestData.TupleGenerator(
SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
final FileIOChannel.ID channel = this.ioManager.createChannel();
final TypeSerializer<Tuple2<Integer, String>> serializer =
TestData.getIntStringTupleSerializer();
// create the writer output view
List<MemorySegment> memory =
this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelWriter<MemorySegment> writer =
this.ioManager.createBlockChannelWriter(channel);
final ChannelWriterOutputView outView =
new ChannelWriterOutputView(writer, memory, MEMORY_PAGE_SIZE);
// write a number of pairs
final Tuple2<Integer, String> rec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
generator.next(rec);
serializer.serialize(rec, outView);
}
this.memoryManager.release(outView.close());
// create the reader input view
memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelReader<MemorySegment> reader =
this.ioManager.createBlockChannelReader(channel);
final ChannelReaderInputView inView =
new ChannelReaderInputView(reader, memory, outView.getBlockCount(), true);
generator.reset();
// read and re-generate all records and compare them
final Tuple2<Integer, String> readRec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
generator.next(rec);
serializer.deserialize(readRec, inView);
assertReadRecordMatchRegenerated(readRec, rec);
}
this.memoryManager.release(inView.close());
reader.deleteChannel();
}
@Test
void testWriteAndReadLongRecords() throws Exception {
final TestData.TupleGenerator generator =
new TestData.TupleGenerator(
SEED, KEY_MAX, VALUE_LONG_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
final FileIOChannel.ID channel = this.ioManager.createChannel();
final TypeSerializer<Tuple2<Integer, String>> serializer =
TestData.getIntStringTupleSerializer();
// create the writer output view
List<MemorySegment> memory =
this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelWriter<MemorySegment> writer =
this.ioManager.createBlockChannelWriter(channel);
final ChannelWriterOutputView outView =
new ChannelWriterOutputView(writer, memory, MEMORY_PAGE_SIZE);
// write a number of pairs
final Tuple2<Integer, String> rec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_LONG; i++) {
generator.next(rec);
serializer.serialize(rec, outView);
}
this.memoryManager.release(outView.close());
// create the reader input view
memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelReader<MemorySegment> reader =
this.ioManager.createBlockChannelReader(channel);
final ChannelReaderInputView inView =
new ChannelReaderInputView(reader, memory, outView.getBlockCount(), true);
generator.reset();
// read and re-generate all records and compare them
final Tuple2<Integer, String> readRec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_LONG; i++) {
generator.next(rec);
serializer.deserialize(readRec, inView);
assertReadRecordMatchRegenerated(readRec, rec);
}
this.memoryManager.release(inView.close());
reader.deleteChannel();
}
@Test
void testReadTooMany() throws Exception {
final TestData.TupleGenerator generator =
new TestData.TupleGenerator(
SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
final FileIOChannel.ID channel = this.ioManager.createChannel();
final TypeSerializer<Tuple2<Integer, String>> serializer =
TestData.getIntStringTupleSerializer();
// create the writer output view
List<MemorySegment> memory =
this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelWriter<MemorySegment> writer =
this.ioManager.createBlockChannelWriter(channel);
final ChannelWriterOutputView outView =
new ChannelWriterOutputView(writer, memory, MEMORY_PAGE_SIZE);
// write a number of pairs
final Tuple2<Integer, String> rec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
generator.next(rec);
serializer.serialize(rec, outView);
}
this.memoryManager.release(outView.close());
// create the reader input view
memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelReader<MemorySegment> reader =
this.ioManager.createBlockChannelReader(channel);
final ChannelReaderInputView inView =
new ChannelReaderInputView(reader, memory, outView.getBlockCount(), true);
generator.reset();
// read and re-generate all records and compare them
final Tuple2<Integer, String> readRec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
generator.next(rec);
serializer.deserialize(readRec, inView);
assertReadRecordMatchRegenerated(readRec, rec);
}
generator.next(rec);
assertThatThrownBy(() -> serializer.deserialize(readRec, inView))
.withFailMessage("Expected an EOFException which did not occur.")
.isInstanceOf(EOFException.class);
this.memoryManager.release(inView.close());
reader.deleteChannel();
}
@Test
void testReadWithoutKnownBlockCount() throws Exception {
final TestData.TupleGenerator generator =
new TestData.TupleGenerator(
SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
final FileIOChannel.ID channel = this.ioManager.createChannel();
final TypeSerializer<Tuple2<Integer, String>> serializer =
TestData.getIntStringTupleSerializer();
// create the writer output view
List<MemorySegment> memory =
this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelWriter<MemorySegment> writer =
this.ioManager.createBlockChannelWriter(channel);
final ChannelWriterOutputView outView =
new ChannelWriterOutputView(writer, memory, MEMORY_PAGE_SIZE);
// write a number of pairs
final Tuple2<Integer, String> rec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
generator.next(rec);
serializer.serialize(rec, outView);
}
this.memoryManager.release(outView.close());
// create the reader input view
memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelReader<MemorySegment> reader =
this.ioManager.createBlockChannelReader(channel);
final ChannelReaderInputView inView = new ChannelReaderInputView(reader, memory, true);
generator.reset();
// read and re-generate all records and compare them
final Tuple2<Integer, String> readRec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
generator.next(rec);
serializer.deserialize(readRec, inView);
assertReadRecordMatchRegenerated(readRec, rec);
}
this.memoryManager.release(inView.close());
reader.deleteChannel();
}
@Test
void testWriteReadOneBufferOnly() throws Exception {
final TestData.TupleGenerator generator =
new TestData.TupleGenerator(
SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
final FileIOChannel.ID channel = this.ioManager.createChannel();
final TypeSerializer<Tuple2<Integer, String>> serializer =
TestData.getIntStringTupleSerializer();
// create the writer output view
List<MemorySegment> memory = this.memoryManager.allocatePages(this.parentTask, 1);
final BlockChannelWriter<MemorySegment> writer =
this.ioManager.createBlockChannelWriter(channel);
final ChannelWriterOutputView outView =
new ChannelWriterOutputView(writer, memory, MEMORY_PAGE_SIZE);
// write a number of pairs
final Tuple2<Integer, String> rec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
generator.next(rec);
serializer.serialize(rec, outView);
}
this.memoryManager.release(outView.close());
// create the reader input view
memory = this.memoryManager.allocatePages(this.parentTask, 1);
final BlockChannelReader<MemorySegment> reader =
this.ioManager.createBlockChannelReader(channel);
final ChannelReaderInputView inView =
new ChannelReaderInputView(reader, memory, outView.getBlockCount(), true);
generator.reset();
// read and re-generate all records and compare them
final Tuple2<Integer, String> readRec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
generator.next(rec);
serializer.deserialize(readRec, inView);
assertReadRecordMatchRegenerated(readRec, rec);
}
this.memoryManager.release(inView.close());
reader.deleteChannel();
}
@Test
void testWriteReadNotAll() throws Exception {
final TestData.TupleGenerator generator =
new TestData.TupleGenerator(
SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
final FileIOChannel.ID channel = this.ioManager.createChannel();
final TypeSerializer<Tuple2<Integer, String>> serializer =
TestData.getIntStringTupleSerializer();
// create the writer output view
List<MemorySegment> memory =
this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelWriter<MemorySegment> writer =
this.ioManager.createBlockChannelWriter(channel);
final ChannelWriterOutputView outView =
new ChannelWriterOutputView(writer, memory, MEMORY_PAGE_SIZE);
// write a number of pairs
final Tuple2<Integer, String> rec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
generator.next(rec);
serializer.serialize(rec, outView);
}
this.memoryManager.release(outView.close());
// create the reader input view
memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
final BlockChannelReader<MemorySegment> reader =
this.ioManager.createBlockChannelReader(channel);
final ChannelReaderInputView inView =
new ChannelReaderInputView(reader, memory, outView.getBlockCount(), true);
generator.reset();
// read and re-generate all records and compare them
final Tuple2<Integer, String> readRec = new Tuple2<>();
for (int i = 0; i < NUM_PAIRS_SHORT / 2; i++) {
generator.next(rec);
serializer.deserialize(readRec, inView);
assertReadRecordMatchRegenerated(readRec, rec);
}
this.memoryManager.release(inView.close());
reader.deleteChannel();
}
private static void assertReadRecordMatchRegenerated(
Tuple2<Integer, String> readRec, Tuple2<Integer, String> rec) {
int k1 = rec.f0;
String v1 = rec.f1;
int k2 = readRec.f0;
String v2 = readRec.f1;
assertThat(k2)
.withFailMessage("The re-generated and the read record do not match.")
.isEqualTo(k1);
assertThat(v2)
.withFailMessage("The re-generated and the read record do not match.")
.isEqualTo(v1);
}
}
|
ChannelViewsTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/service/internal/AbstractServiceRegistryImpl.java
|
{
"start": 1635,
"end": 16813
}
|
class ____
implements ServiceRegistryImplementor, ServiceBinding.ServiceLifecycleOwner {
public static final String ALLOW_CRAWLING = "hibernate.service.allow_crawling";
private volatile @Nullable ServiceRegistryImplementor parent;
private final boolean allowCrawling;
private final ConcurrentMap<Class<?>,ServiceBinding<?>> serviceBindingMap = new ConcurrentHashMap<>();
private final ConcurrentMap<Class<?>,Class<?>> roleXref = new ConcurrentHashMap<>();
// The services stored in initializedServiceByRole are completely initialized
// (i.e., configured, dependencies injected, and started)
private final ConcurrentMap<Class<?>,Service> initializedServiceByRole = new ConcurrentHashMap<>();
// IMPL NOTE: the list used for ordered destruction. Cannot use the map above,
// because we need to iterate it in reverse order, which is only available
// through ListIterator.
// Assume 20 services for initial sizing.
// All access guarded by synchronization on the serviceBindingList itself.
private final List<ServiceBinding<?>> serviceBindingList = CollectionHelper.arrayList( 20 );
// Guarded by synchronization on this.
private final boolean autoCloseRegistry;
// Guarded by synchronization on this.
private Set<ServiceRegistryImplementor> childRegistries;
private final AtomicBoolean active = new AtomicBoolean( true );
protected AbstractServiceRegistryImpl(@Nullable ServiceRegistryImplementor parent) {
this( parent, true );
}
protected AbstractServiceRegistryImpl(
@Nullable ServiceRegistryImplementor parent,
boolean autoCloseRegistry) {
this.parent = parent;
this.allowCrawling = getBoolean( ALLOW_CRAWLING, Environment.getProperties(), true );
this.autoCloseRegistry = autoCloseRegistry;
}
public AbstractServiceRegistryImpl(BootstrapServiceRegistry bootstrapServiceRegistry) {
this( bootstrapServiceRegistry, true );
}
public AbstractServiceRegistryImpl(
BootstrapServiceRegistry bootstrapServiceRegistry,
boolean autoCloseRegistry) {
this.autoCloseRegistry = autoCloseRegistry;
if ( !(bootstrapServiceRegistry instanceof ServiceRegistryImplementor) ) {
throw new IllegalArgumentException( "ServiceRegistry parent needs to implement ServiceRegistryImplementor" );
}
this.parent = (ServiceRegistryImplementor) bootstrapServiceRegistry;
this.allowCrawling = getBoolean( ALLOW_CRAWLING, Environment.getProperties(), true );
}
// For nullness checking purposes
protected void initialize() {
if ( parent != null ) {
parent.registerChild( this );
}
}
protected <R extends Service> void createServiceBinding(ServiceInitiator<R> initiator) {
serviceBindingMap.put( initiator.getServiceInitiated(),
new ServiceBinding<>( this, initiator ) );
}
protected <R extends Service> void createServiceBinding(ProvidedService<R> providedService) {
var binding = locateServiceBinding( providedService.serviceRole(), false );
if ( binding == null ) {
binding = new ServiceBinding<>( this, providedService.serviceRole(), providedService.service() );
serviceBindingMap.put( providedService.serviceRole(), binding );
}
registerService( binding, providedService.service() );
}
protected void visitServiceBindings(Consumer<ServiceBinding<?>> action) {
serviceBindingList.forEach( action );
}
@Override
public @Nullable ServiceRegistry getParentServiceRegistry() {
return parent;
}
@Override
public <R extends Service> @Nullable ServiceBinding<R> locateServiceBinding(Class<R> serviceRole) {
return locateServiceBinding( serviceRole, true );
}
@SuppressWarnings("unchecked")
protected <R extends Service> @Nullable ServiceBinding<R> locateServiceBinding(Class<R> serviceRole, boolean checkParent) {
var serviceBinding = (ServiceBinding<R>) serviceBindingMap.get( serviceRole );
if ( serviceBinding == null && checkParent && parent != null ) {
// look in parent
serviceBinding = parent.locateServiceBinding( serviceRole );
}
if ( serviceBinding != null ) {
return serviceBinding;
}
if ( !allowCrawling ) {
return null;
}
// look for a previously resolved alternate registration
final Class<?> alternative = roleXref.get( serviceRole );
if ( alternative != null ) {
return (ServiceBinding<R>) serviceBindingMap.get( alternative );
}
// perform a crawl looking for an alternate registration
for ( var binding : serviceBindingMap.values() ) {
final Class<?> bindingServiceRole = binding.getServiceRole();
if ( serviceRole.isAssignableFrom( bindingServiceRole ) ) {
// we found an alternate...
SERVICE_LOGGER.alternateServiceRole( serviceRole.getName(), bindingServiceRole.getName() );
registerAlternate( serviceRole, bindingServiceRole );
return (ServiceBinding<R>) binding;
}
else {
final var bindingService = binding.getService();
if ( serviceRole.isInstance( bindingService ) ) {
// we found an alternate...
SERVICE_LOGGER.alternateServiceRole( serviceRole.getName(), bindingServiceRole.getName() );
registerAlternate( serviceRole, bindingServiceRole );
return (ServiceBinding<R>) binding;
}
}
}
return null;
}
private void registerAlternate(Class<?> alternate, Class<?> target) {
roleXref.put( alternate, target );
}
@Override
public <R extends Service> @Nullable R getService(Class<R> serviceRole) {
// Fast-path for ClassLoaderService as it's extremely hot during bootstrap
// (and after bootstrap service loading performance is less interesting as it's
// ideally being cached by long-term consumers)
if ( ClassLoaderService.class == serviceRole && parent != null ) {
return parent.getService( serviceRole );
}
// TODO: should an exception be thrown if active == false???
R service = serviceRole.cast( initializedServiceByRole.get( serviceRole ) );
if ( service != null ) {
return service;
}
//Any service initialization needs synchronization
synchronized ( this ) {
// Check again after having acquired the lock:
service = serviceRole.cast( initializedServiceByRole.get( serviceRole ) );
if ( service != null ) {
return service;
}
final ServiceBinding<R> serviceBinding = locateServiceBinding( serviceRole );
if ( serviceBinding == null ) {
throw new UnknownServiceException( serviceRole );
}
service = serviceBinding.getService();
if ( service == null ) {
service = initializeService( serviceBinding );
}
if ( service != null ) {
// add the service only after it is completely initialized
initializedServiceByRole.put( serviceRole, service );
}
return service;
}
}
protected <R extends Service> void registerService(ServiceBinding<R> serviceBinding, R service) {
serviceBinding.setService( service );
synchronized ( serviceBindingList ) {
serviceBindingList.add( serviceBinding );
}
}
private <R extends Service> @Nullable R initializeService(ServiceBinding<R> serviceBinding) {
if ( SERVICE_LOGGER.isTraceEnabled() ) {
SERVICE_LOGGER.initializingService( serviceBinding.getServiceRole().getName() );
}
// PHASE 1: create service
final R service = createService( serviceBinding );
if ( service == null ) {
return null;
}
// PHASE 2: inject service (***potentially recursive***)
serviceBinding.getLifecycleOwner().injectDependencies( serviceBinding );
// PHASE 3: configure service
serviceBinding.getLifecycleOwner().configureService( serviceBinding );
// PHASE 4: Start service
serviceBinding.getLifecycleOwner().startService( serviceBinding );
return service;
}
protected <R extends Service> @Nullable R createService(ServiceBinding<R> serviceBinding) {
final var serviceInitiator = serviceBinding.getServiceInitiator();
if ( serviceInitiator == null ) {
// this condition should never ever occur
throw new UnknownServiceException( serviceBinding.getServiceRole() );
}
try {
final R service = serviceBinding.getLifecycleOwner().initiateService( serviceInitiator );
// IMPL NOTE: the register call here is important to avoid potential stack overflow issues
// from recursive calls through #configureService
if ( service != null ) {
registerService( serviceBinding, service );
}
return service;
}
catch ( ServiceException e ) {
throw e;
}
catch ( Exception e ) {
throw new ServiceException( "Unable to create requested service ["
+ serviceBinding.getServiceRole().getName() + "] due to: " + e.getMessage(), e );
}
}
@Override
public <R extends Service> void injectDependencies(ServiceBinding<R> serviceBinding) {
final R service = serviceBinding.getService();
applyInjections( service );
if ( service instanceof ServiceRegistryAwareService serviceRegistryAwareService ) {
serviceRegistryAwareService.injectServices( this );
}
}
private <R extends Service> void applyInjections(R service) {
try {
for ( var method : service.getClass().getMethods() ) {
final var injectService = method.getAnnotation( InjectService.class );
if ( injectService != null ) {
processInjection( service, method, injectService );
}
}
}
catch (NullPointerException e) {
SERVICE_LOGGER.error( "NPE injecting service dependencies: " + service.getClass().getName() );
}
}
private <T extends Service> void processInjection(T service, Method injectionMethod, InjectService injectService) {
injectDependentService( service, injectionMethod, injectService,
dependentServiceRole( injectionMethod, injectService ) );
}
private <T extends Service> void injectDependentService(T service, Method injectionMethod, InjectService injectService, Class<? extends Service> dependentServiceRole) {
// todo : because of the use of proxies, this is no longer returning null here...
final var dependantService = getService( dependentServiceRole );
if ( dependantService == null ) {
if ( injectService.required() ) {
throw new ServiceDependencyException(
"Dependency [" + dependentServiceRole + "] declared by service [" + service + "] not found"
);
}
}
else {
try {
injectionMethod.invoke( service, dependantService );
}
catch ( Exception e ) {
throw new ServiceDependencyException( "Cannot inject dependency service", e );
}
}
}
private static Class<? extends Service> dependentServiceRole(Method injectionMethod, InjectService injectService) {
final var parameterTypes = injectionMethod.getParameterTypes();
if ( injectionMethod.getParameterCount() != 1 ) {
throw new ServiceDependencyException(
"Encountered @InjectService on method with unexpected number of parameters"
);
}
final var dependentServiceRole = injectService.serviceRole();
if ( dependentServiceRole == null
|| Void.class.equals( dependentServiceRole ) // old default value
|| Service.class.equals( dependentServiceRole ) ) { // new default value
return (Class<? extends Service>) parameterTypes[0];
}
else {
return dependentServiceRole;
}
}
@Override
public <R extends Service> void startService(ServiceBinding<R> serviceBinding) {
if ( serviceBinding.getService() instanceof Startable startable ) {
startable.start();
}
}
@Override
public boolean isActive() {
return active.get();
}
@Override
public synchronized void destroy() {
if ( active.compareAndSet( true, false ) ) {
try {
//First thing, make sure that the fast path read is disabled so that
//threads not owning the synchronization lock can't get an invalid Service:
initializedServiceByRole.clear();
synchronized (serviceBindingList) {
final var serviceBindingsIterator =
serviceBindingList.listIterator( serviceBindingList.size() );
while ( serviceBindingsIterator.hasPrevious() ) {
final var serviceBinding = serviceBindingsIterator.previous();
serviceBinding.getLifecycleOwner().stopService( serviceBinding );
}
serviceBindingList.clear();
}
serviceBindingMap.clear();
}
finally {
if ( parent != null ) {
parent.deRegisterChild( this );
}
}
}
}
@Override
public synchronized <R extends Service> void stopService(ServiceBinding<R> binding) {
final var service = binding.getService();
if ( service instanceof Stoppable stoppable ) {
try {
stoppable.stop();
}
catch ( Exception e ) {
SERVICE_LOGGER.unableToStopService( binding.getServiceRole().getName(), e );
}
}
}
@Override
public synchronized void registerChild(ServiceRegistryImplementor child) {
if ( childRegistries == null ) {
childRegistries = new HashSet<>();
}
if ( !childRegistries.add( child ) ) {
SERVICE_LOGGER.warnf( "Child ServiceRegistry [%s] was already registered; this will end badly later", child );
}
}
@Override
public synchronized void deRegisterChild(ServiceRegistryImplementor child) {
if ( childRegistries == null ) {
throw new IllegalStateException( "No child ServiceRegistry registrations found" );
}
childRegistries.remove( child );
if ( childRegistries.isEmpty() ) {
if ( autoCloseRegistry ) {
SERVICE_LOGGER.trace( "Automatically destroying ServiceRegistry after deregistration of every child ServiceRegistry" );
destroy();
}
else {
SERVICE_LOGGER.trace( "Skipping destroying ServiceRegistry after deregistration of every child ServiceRegistry" );
}
}
}
/**
* Not intended for general use. We need the ability to stop and "reactivate" a registry to allow
* experimentation with technologies such as GraalVM, Quarkus and Cri-O.
*/
public synchronized void resetParent(@Nullable BootstrapServiceRegistry newParent) {
if ( parent != null ) {
parent.deRegisterChild( this );
}
if ( newParent != null ) {
if ( !(newParent instanceof ServiceRegistryImplementor) ) {
throw new IllegalArgumentException( "ServiceRegistry parent needs to implement ServiceRegistryImplementor" );
}
parent = (ServiceRegistryImplementor) newParent;
parent.registerChild( this );
}
else {
parent = null;
}
}
@Override
public <T extends Service> @Nullable T fromRegistryOrChildren(Class<T> serviceRole) {
return fromRegistryOrChildren( serviceRole, this, childRegistries );
}
public static <T extends Service> @Nullable T fromRegistryOrChildren(
Class<T> serviceRole,
ServiceRegistryImplementor serviceRegistry,
@Nullable Set<ServiceRegistryImplementor> childRegistries) {
// prefer `serviceRegistry`
final T localService = serviceRegistry.getService( serviceRole );
if ( localService != null ) {
return localService;
}
if ( childRegistries != null ) {
for ( var childRegistry : childRegistries ) {
final T extracted = childRegistry.getService( serviceRole );
if ( extracted != null ) {
return extracted;
}
}
}
return null;
}
/**
* Not intended for general use. We need the ability to stop and "reactivate" a registry
* to allow experimentation with technologies such as GraalVM, Quarkus and Cri-O.
*/
@Internal
public synchronized void reactivate() {
if ( !active.compareAndSet( false, true ) ) {
throw new IllegalStateException( "Was not inactive, could not reactivate" );
}
}
}
|
AbstractServiceRegistryImpl
|
java
|
apache__dubbo
|
dubbo-common/src/main/java/org/apache/dubbo/common/constants/CommonConstants.java
|
{
"start": 16401,
"end": 17432
}
|
interface ____ {
String USER_HOME = "user.home";
String SYSTEM_JAVA_VERSION = "java.version";
String SYSTEM_JAVA_IO_TMPDIR = "java.io.tmpdir";
String SYSTEM_LINE_SEPARATOR = "line.separator";
String SERIALIZATION_SECURITY_CHECK_KEY = "serialization.security.check";
String SYSTEM_BYTE_ACCESSOR_KEY = "byte.accessor";
String SYSTEM_OS_NAME = "os.name";
String SYSTEM_OS_VERSION = "os.version";
String JAVA_RUNTIME_NAME = "java.runtime.name";
String JAVA_RUNTIME_VERSION = "java.runtime.version";
String JAVA_VM_NAME = "java.vm.name";
String JAVA_VM_VERSION = "java.vm.version";
String JAVA_VM_INFO = "java.vm.info";
String JAVA_HOME = "java.home";
String OS_ARCH = "os.arch";
String SYSTEM_FILE_ENCODING = "file.encoding";
String SYSTEM_TCP_RESPONSE_TIMEOUT = "sun.rmi.transport.tcp.responseTimeout";
}
/**
* Third-party-related VM properties
*/
|
SystemProperty
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/produce/ProduceTemplateTest.java
|
{
"start": 1227,
"end": 1659
}
|
class ____ extends SpringRunWithTestSupport {
@Autowired
protected ProducerTemplate producer;
@EndpointInject("mock:result")
protected MockEndpoint result;
@Test
public void testProducerTemplate() throws Exception {
result.expectedBodiesReceived("hello");
// lets send a message
producer.sendBody("direct:start", "hello");
result.assertIsSatisfied();
}
}
|
ProduceTemplateTest
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/watermarkstatus/HeapPriorityQueueTest.java
|
{
"start": 1228,
"end": 10050
}
|
class ____ {
private static final HeapPriorityQueue.PriorityComparator<TestElement>
TEST_ELEMENT_PRIORITY_COMPARATOR =
(left, right) -> Long.compare(left.getPriority(), right.getPriority());
@Test
void testPeekPollOrder() {
final int initialCapacity = 4;
final int testSize = 1000;
final Comparator<Long> comparator = getTestElementPriorityComparator();
HeapPriorityQueue<TestElement> priorityQueue = newPriorityQueue(initialCapacity);
HashSet<TestElement> checkSet = new HashSet<>(testSize);
insertRandomElements(priorityQueue, checkSet, testSize);
long lastPriorityValue = getHighestPriorityValueForComparator();
int lastSize = priorityQueue.size();
assertThat(testSize).isEqualTo(lastSize);
TestElement testElement;
while ((testElement = priorityQueue.peek()) != null) {
assertThat(priorityQueue.isEmpty()).isFalse();
assertThat(lastSize).isEqualTo(priorityQueue.size());
assertThat(testElement).isEqualTo(priorityQueue.poll());
assertThat(checkSet.remove(testElement)).isTrue();
assertThat(comparator.compare(testElement.getPriority(), lastPriorityValue) >= 0)
.isTrue();
lastPriorityValue = testElement.getPriority();
--lastSize;
}
assertThat(priorityQueue.isEmpty()).isTrue();
assertThat(priorityQueue.size()).isZero();
assertThat(checkSet).isEmpty();
}
@Test
void testRemoveInsertMixKeepsOrder() {
HeapPriorityQueue<TestElement> priorityQueue = newPriorityQueue(3);
final Comparator<Long> comparator = getTestElementPriorityComparator();
final ThreadLocalRandom random = ThreadLocalRandom.current();
final int testSize = 300;
final int addCounterMax = testSize / 4;
int iterationsTillNextAdds = random.nextInt(addCounterMax);
HashSet<TestElement> checkSet = new HashSet<>(testSize);
insertRandomElements(priorityQueue, checkSet, testSize);
// check that the whole set is still in order
while (!checkSet.isEmpty()) {
final long highestPrioValue = getHighestPriorityValueForComparator();
Iterator<TestElement> iterator = checkSet.iterator();
TestElement element = iterator.next();
iterator.remove();
final boolean removesHead = element.equals(priorityQueue.peek());
if (removesHead) {
assertThat(priorityQueue.remove(element)).isTrue();
} else {
priorityQueue.remove(element);
}
long currentPriorityWatermark;
// test some bulk polling from time to time
if (removesHead) {
currentPriorityWatermark = element.getPriority();
} else {
currentPriorityWatermark = highestPrioValue;
}
while ((element = priorityQueue.poll()) != null) {
assertThat(comparator.compare(element.getPriority(), currentPriorityWatermark) >= 0)
.isTrue();
currentPriorityWatermark = element.getPriority();
if (--iterationsTillNextAdds == 0) {
// some random adds
iterationsTillNextAdds = random.nextInt(addCounterMax);
insertRandomElements(
priorityQueue, new HashSet<>(checkSet), 1 + random.nextInt(3));
currentPriorityWatermark = priorityQueue.peek().getPriority();
}
}
assertThat(priorityQueue.isEmpty()).isTrue();
checkSet.forEach(priorityQueue::add);
}
}
@Test
void testPoll() {
HeapPriorityQueue<TestElement> priorityQueue = newPriorityQueue(3);
final Comparator<Long> comparator = getTestElementPriorityComparator();
assertThat(priorityQueue.poll()).isNull();
final int testSize = 345;
HashSet<TestElement> checkSet = new HashSet<>(testSize);
insertRandomElements(priorityQueue, checkSet, testSize);
long lastPriorityValue = getHighestPriorityValueForComparator();
while (!priorityQueue.isEmpty()) {
TestElement removed = priorityQueue.poll();
assertThat(removed).isNotNull();
assertThat(checkSet.remove(removed)).isTrue();
assertThat(comparator.compare(removed.getPriority(), lastPriorityValue) >= 0).isTrue();
lastPriorityValue = removed.getPriority();
}
assertThat(checkSet).isEmpty();
assertThat(priorityQueue.poll()).isNull();
}
@Test
void testIsEmpty() {
HeapPriorityQueue<TestElement> priorityQueue = newPriorityQueue(1);
assertThat(priorityQueue.isEmpty()).isTrue();
assertThat(priorityQueue.add(new TestElement(4711L, 42L))).isTrue();
assertThat(priorityQueue.isEmpty()).isFalse();
priorityQueue.poll();
assertThat(priorityQueue.isEmpty()).isTrue();
}
@Test
void testAdd() {
HeapPriorityQueue<TestElement> priorityQueue = newPriorityQueue(1);
final List<TestElement> testElements =
Arrays.asList(new TestElement(4711L, 42L), new TestElement(815L, 23L));
testElements.sort(
(l, r) -> getTestElementPriorityComparator().compare(r.priority, l.priority));
assertThat(priorityQueue.add(testElements.get(0))).isTrue();
assertThat(priorityQueue.size()).isEqualTo(1);
assertThat(priorityQueue.add(testElements.get(1))).isTrue();
assertThat(priorityQueue.size()).isEqualTo(2);
assertThat(priorityQueue.poll()).isEqualTo(testElements.get(1));
assertThat(priorityQueue.size()).isEqualTo(1);
assertThat(priorityQueue.poll()).isEqualTo(testElements.get(0));
assertThat(priorityQueue.size()).isZero();
}
@Test
void testRemove() {
HeapPriorityQueue<TestElement> priorityQueue = newPriorityQueue(1);
final long key = 4711L;
final long priorityValue = 42L;
final TestElement testElement = new TestElement(key, priorityValue);
assertThat(priorityQueue.add(testElement)).isTrue();
assertThat(priorityQueue.remove(testElement)).isTrue();
assertThat(priorityQueue.isEmpty()).isTrue();
}
@Test
void testClear() {
HeapPriorityQueue<TestElement> priorityQueueSet = newPriorityQueue(1);
int count = 10;
HashSet<TestElement> checkSet = new HashSet<>(count);
insertRandomElements(priorityQueueSet, checkSet, count);
assertThat(priorityQueueSet.size()).isEqualTo(count);
priorityQueueSet.clear();
assertThat(priorityQueueSet.size()).isZero();
}
private HeapPriorityQueue<TestElement> newPriorityQueue(int initialCapacity) {
return new HeapPriorityQueue<>(TEST_ELEMENT_PRIORITY_COMPARATOR, initialCapacity);
}
private Comparator<Long> getTestElementPriorityComparator() {
return Long::compareTo;
}
private long getHighestPriorityValueForComparator() {
return getTestElementPriorityComparator().compare(-1L, 1L) > 0
? Long.MAX_VALUE
: Long.MIN_VALUE;
}
private static void insertRandomElements(
HeapPriorityQueue<TestElement> priorityQueue, Set<TestElement> checkSet, int count) {
ThreadLocalRandom localRandom = ThreadLocalRandom.current();
final int numUniqueKeys = Math.max(count / 4, 64);
long duplicatePriority = Long.MIN_VALUE;
final boolean checkEndSizes = priorityQueue.isEmpty();
for (int i = 0; i < count; ++i) {
TestElement element;
do {
long elementPriority;
if (duplicatePriority == Long.MIN_VALUE) {
elementPriority = localRandom.nextLong();
} else {
elementPriority = duplicatePriority;
duplicatePriority = Long.MIN_VALUE;
}
element = new TestElement(localRandom.nextInt(numUniqueKeys), elementPriority);
} while (!checkSet.add(element));
if (localRandom.nextInt(10) == 0) {
duplicatePriority = element.getPriority();
}
final boolean headChangedIndicated = priorityQueue.add(element);
if (element.equals(priorityQueue.peek())) {
assertThat(headChangedIndicated).isTrue();
}
}
if (checkEndSizes) {
assertThat(count).isEqualTo(priorityQueue.size());
}
}
/** Payload for usage in the test. */
private static
|
HeapPriorityQueueTest
|
java
|
micronaut-projects__micronaut-core
|
http-client/src/main/java/io/micronaut/http/client/netty/ssl/NettyClientSslBuilder.java
|
{
"start": 2458,
"end": 8245
}
|
class ____ extends SslBuilder<SslContext> implements ClientSslBuilder {
private static final Logger LOG = LoggerFactory.getLogger(NettyClientSslBuilder.class);
/**
* @param resourceResolver The resource resolver
*/
public NettyClientSslBuilder(ResourceResolver resourceResolver) {
super(resourceResolver);
}
@SuppressWarnings("Duplicates")
@Override
public final Optional<SslContext> build(SslConfiguration ssl) {
return build(ssl, HttpVersion.HTTP_1_1);
}
@Override
public final Optional<SslContext> build(SslConfiguration ssl, HttpVersion httpVersion) {
if (!ssl.isEnabled()) {
return Optional.empty();
}
return Optional.of(build(ssl, HttpVersionSelection.forLegacyVersion(httpVersion)));
}
@NonNull
@Override
public final SslContext build(SslConfiguration ssl, HttpVersionSelection versionSelection) {
try {
return createSslContextBuilder(ssl, versionSelection).build();
} catch (SSLException ex) {
throw new SslConfigurationException("An error occurred while setting up SSL", ex);
}
}
/**
* Create a new client context builder.
*
* @param ssl The ssl configuration
* @param versionSelection The allowed HTTP versions
* @return The builder
*/
protected SslContextBuilder createSslContextBuilder(SslConfiguration ssl, HttpVersionSelection versionSelection) {
SslContextBuilder sslBuilder = SslContextBuilder
.forClient()
.keyManager(getKeyManagerFactory(ssl))
.trustManager(getTrustManagerFactory(ssl))
.sslProvider(NettyTlsUtils.sslProvider(ssl));
Optional<String[]> protocols = ssl.getProtocols();
if (protocols.isPresent()) {
sslBuilder.protocols(protocols.get());
}
Optional<String[]> ciphers = ssl.getCiphers();
if (ciphers.isPresent()) {
sslBuilder = sslBuilder.ciphers(Arrays.asList(ciphers.get()));
} else if (versionSelection.isHttp2CipherSuites()) {
sslBuilder.ciphers(Http2SecurityUtil.CIPHERS, SupportedCipherSuiteFilter.INSTANCE);
}
Optional<ClientAuthentication> clientAuthentication = ssl.getClientAuthentication();
if (clientAuthentication.isPresent()) {
ClientAuthentication clientAuth = clientAuthentication.get();
if (clientAuth == ClientAuthentication.NEED) {
sslBuilder = sslBuilder.clientAuth(ClientAuth.REQUIRE);
} else if (clientAuth == ClientAuthentication.WANT) {
sslBuilder = sslBuilder.clientAuth(ClientAuth.OPTIONAL);
}
}
if (versionSelection.isAlpn()) {
SslProvider provider = ssl.isPreferOpenssl() && SslProvider.isAlpnSupported(SslProvider.OPENSSL) ? SslProvider.OPENSSL : SslProvider.JDK;
sslBuilder.sslProvider(provider);
sslBuilder.applicationProtocolConfig(new ApplicationProtocolConfig(
ApplicationProtocolConfig.Protocol.ALPN,
ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE,
ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT,
versionSelection.getAlpnSupportedProtocols()
));
}
return sslBuilder;
}
@Override
public final QuicSslContext buildHttp3(SslConfiguration ssl) {
QuicSslContextBuilder sslBuilder = QuicSslContextBuilder.forClient()
.keyManager(getKeyManagerFactory(ssl), ssl.getKeyStore().getPassword().orElse(null))
.trustManager(getTrustManagerFactory(ssl))
.applicationProtocols(Http3.supportedApplicationProtocols());
Optional<ClientAuthentication> clientAuthentication = ssl.getClientAuthentication();
if (clientAuthentication.isPresent()) {
ClientAuthentication clientAuth = clientAuthentication.get();
if (clientAuth == ClientAuthentication.NEED) {
sslBuilder.clientAuth(ClientAuth.REQUIRE);
} else if (clientAuth == ClientAuthentication.WANT) {
sslBuilder.clientAuth(ClientAuth.OPTIONAL);
}
}
return sslBuilder.build();
}
@Override
protected KeyManagerFactory getKeyManagerFactory(SslConfiguration ssl) {
try {
Optional<KeyStore> ks = this.getKeyStore(ssl);
if (ks.isPresent()) {
return NettyTlsUtils.storeToFactory(ssl, ks.orElse(null));
} else {
return null;
}
} catch (Exception ex) {
throw new SslConfigurationException(ex);
}
}
@Override
protected TrustManagerFactory getTrustManagerFactory(SslConfiguration ssl) {
try {
Optional<KeyStore> trustStore = getTrustStore(ssl);
if (trustStore.isPresent()) {
return super.getTrustManagerFactory(trustStore.get());
} else {
if (ssl instanceof AbstractClientSslConfiguration configuration && configuration.isInsecureTrustAllCertificates()) {
if (LOG.isWarnEnabled()) {
LOG.warn("HTTP Client is configured to trust all certificates ('insecure-trust-all-certificates' is set to true). Trusting all certificates is not secure and should not be used in production.");
}
return InsecureTrustManagerFactory.INSTANCE;
} else {
// netty will use the JDK trust store
return null;
}
}
} catch (Exception ex) {
throw new SslConfigurationException(ex);
}
}
}
|
NettyClientSslBuilder
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/MapSerializer.java
|
{
"start": 1822,
"end": 7251
}
|
class ____<K, V> extends TypeSerializer<Map<K, V>> {
private static final long serialVersionUID = -6885593032367050078L;
/** The serializer for the keys in the map */
private final TypeSerializer<K> keySerializer;
/** The serializer for the values in the map */
private final TypeSerializer<V> valueSerializer;
/**
* Creates a map serializer that uses the given serializers to serialize the key-value pairs in
* the map.
*
* @param keySerializer The serializer for the keys in the map
* @param valueSerializer The serializer for the values in the map
*/
public MapSerializer(TypeSerializer<K> keySerializer, TypeSerializer<V> valueSerializer) {
this.keySerializer =
Preconditions.checkNotNull(keySerializer, "The key serializer cannot be null");
this.valueSerializer =
Preconditions.checkNotNull(valueSerializer, "The value serializer cannot be null.");
}
// ------------------------------------------------------------------------
// MapSerializer specific properties
// ------------------------------------------------------------------------
public TypeSerializer<K> getKeySerializer() {
return keySerializer;
}
public TypeSerializer<V> getValueSerializer() {
return valueSerializer;
}
// ------------------------------------------------------------------------
// Type Serializer implementation
// ------------------------------------------------------------------------
@Override
public boolean isImmutableType() {
return false;
}
@Override
public TypeSerializer<Map<K, V>> duplicate() {
TypeSerializer<K> duplicateKeySerializer = keySerializer.duplicate();
TypeSerializer<V> duplicateValueSerializer = valueSerializer.duplicate();
return (duplicateKeySerializer == keySerializer)
&& (duplicateValueSerializer == valueSerializer)
? this
: new MapSerializer<>(duplicateKeySerializer, duplicateValueSerializer);
}
@Override
public Map<K, V> createInstance() {
return new HashMap<>();
}
@Override
public Map<K, V> copy(Map<K, V> from) {
Map<K, V> newMap = CollectionUtil.newHashMapWithExpectedSize(from.size());
for (Map.Entry<K, V> entry : from.entrySet()) {
K newKey = keySerializer.copy(entry.getKey());
V newValue = entry.getValue() == null ? null : valueSerializer.copy(entry.getValue());
newMap.put(newKey, newValue);
}
return newMap;
}
@Override
public Map<K, V> copy(Map<K, V> from, Map<K, V> reuse) {
return copy(from);
}
@Override
public int getLength() {
return -1; // var length
}
@Override
public void serialize(Map<K, V> map, DataOutputView target) throws IOException {
final int size = map.size();
target.writeInt(size);
for (Map.Entry<K, V> entry : map.entrySet()) {
keySerializer.serialize(entry.getKey(), target);
if (entry.getValue() == null) {
target.writeBoolean(true);
} else {
target.writeBoolean(false);
valueSerializer.serialize(entry.getValue(), target);
}
}
}
@Override
public Map<K, V> deserialize(DataInputView source) throws IOException {
final int size = source.readInt();
final Map<K, V> map = CollectionUtil.newHashMapWithExpectedSize(size);
for (int i = 0; i < size; ++i) {
K key = keySerializer.deserialize(source);
boolean isNull = source.readBoolean();
V value = isNull ? null : valueSerializer.deserialize(source);
map.put(key, value);
}
return map;
}
@Override
public Map<K, V> deserialize(Map<K, V> reuse, DataInputView source) throws IOException {
return deserialize(source);
}
@Override
public void copy(DataInputView source, DataOutputView target) throws IOException {
final int size = source.readInt();
target.writeInt(size);
for (int i = 0; i < size; ++i) {
keySerializer.copy(source, target);
boolean isNull = source.readBoolean();
target.writeBoolean(isNull);
if (!isNull) {
valueSerializer.copy(source, target);
}
}
}
@Override
public boolean equals(Object obj) {
return obj == this
|| (obj != null
&& obj.getClass() == getClass()
&& keySerializer.equals(((MapSerializer<?, ?>) obj).getKeySerializer())
&& valueSerializer.equals(
((MapSerializer<?, ?>) obj).getValueSerializer()));
}
@Override
public int hashCode() {
return keySerializer.hashCode() * 31 + valueSerializer.hashCode();
}
// --------------------------------------------------------------------------------------------
// Serializer configuration snapshotting
// --------------------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<Map<K, V>> snapshotConfiguration() {
return new MapSerializerSnapshot<>(this);
}
}
|
MapSerializer
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/inject/dagger/AndroidInjectionBeforeSuperTest.java
|
{
"start": 5310,
"end": 5966
}
|
class ____ extends Service {
@Override
public void onCreate() {
super.onCreate();
// BUG: Diagnostic contains: AndroidInjectionBeforeSuper
AndroidInjection.inject(this);
}
@Override
public IBinder onBind(Intent intent) {
return null;
}
}
}\
""")
.addSourceLines(
"AndroidInjection.java",
"""
package dagger.android;
import android.app.Activity;
import android.app.Fragment;
import android.app.Service;
/**
* Stub
|
WrongOrderService
|
java
|
elastic__elasticsearch
|
distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ProxyUtilsTests.java
|
{
"start": 935,
"end": 2269
}
|
class ____ extends ESTestCase {
/**
* Check that building a proxy with just a hostname and port succeeds.
*/
public void testBuildProxy_withHostPort() throws Exception {
assertThat(buildProxy("host:1234"), matchesProxy(Type.HTTP, "host", 1234));
}
/**
* Check that building a proxy with a null value succeeds, returning a pass-through (direct) proxy.
*/
public void testBuildProxy_withNullValue() throws Exception {
assertThat(buildProxy(null), is(nullValue()));
}
/**
* Check that building a proxy with a missing host is rejected.
*/
public void testBuildProxy_withMissingHost() {
UserException e = expectThrows(UserException.class, () -> buildProxy(":1234"));
assertThat(e.getMessage(), equalTo("Malformed [proxy], expected [host:port]"));
}
/**
* Check that building a proxy with a missing or invalid port is rejected.
*/
public void testBuildProxy_withInvalidPort() {
Stream.of("host:", "host.domain:-1", "host.domain:$PORT", "host.domain:{{port}}", "host.domain").forEach(testCase -> {
UserException e = expectThrows(UserException.class, () -> buildProxy(testCase));
assertThat(e.getMessage(), equalTo("Malformed [proxy], expected [host:port]"));
});
}
}
|
ProxyUtilsTests
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/builder/ToStringStyle.java
|
{
"start": 16236,
"end": 18767
}
|
class ____ extends ToStringStyle {
private static final long serialVersionUID = 1L;
/**
* Constructs a new instance.
*
* <p>
* Use the static constant rather than instantiating.
* </p>
*/
SimpleToStringStyle() {
setUseClassName(false);
setUseIdentityHashCode(false);
setUseFieldNames(false);
setContentStart(StringUtils.EMPTY);
setContentEnd(StringUtils.EMPTY);
}
/**
* Ensure <code>Singleton</ode> after serialization.
*
* @return the singleton
*/
private Object readResolve() {
return SIMPLE_STYLE;
}
}
/**
* Serialization version ID.
*/
private static final long serialVersionUID = -2587890625525655916L;
/**
* The default toString style. Using the {@code Person} example from {@link ToStringBuilder}, the output would look like this:
*
* <pre>
* Person@182f0db[name=John Doe,age=33,smoker=false]
* </pre>
*/
public static final ToStringStyle DEFAULT_STYLE = new DefaultToStringStyle();
/**
* The multi line toString style. Using the {@code Person} example from {@link ToStringBuilder}, the output would look like this:
*
* <pre>
* Person@182f0db[
* name=John Doe
* age=33
* smoker=false
* ]
* </pre>
*/
public static final ToStringStyle MULTI_LINE_STYLE = new MultiLineToStringStyle();
/**
* The no field names toString style. Using the {@code Person} example from {@link ToStringBuilder}, the output would look like this:
*
* <pre>
* Person@182f0db[John Doe,33,false]
* </pre>
*/
public static final ToStringStyle NO_FIELD_NAMES_STYLE = new NoFieldNameToStringStyle();
/**
* The short prefix toString style. Using the {@code Person} example from {@link ToStringBuilder}, the output would look like this:
*
* <pre>
* Person[name=John Doe,age=33,smoker=false]
* </pre>
*
* @since 2.1
*/
public static final ToStringStyle SHORT_PREFIX_STYLE = new ShortPrefixToStringStyle();
/**
* The simple toString style. Using the {@code Person} example from {@link ToStringBuilder}, the output would look like this:
*
* <pre>
* John Doe,33,false
* </pre>
*/
public static final ToStringStyle SIMPLE_STYLE = new SimpleToStringStyle();
/**
* The no
|
SimpleToStringStyle
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/common/runtime/src/test/java/org/jboss/resteasy/reactive/common/util/URLUtilsTest.java
|
{
"start": 252,
"end": 1969
}
|
class ____ {
@Test
void decodeInvalidPercentEncoding() {
String incomplete = "invalid%2";
String invalidHex = "invalid%zz";
assertThrows(IllegalArgumentException.class,
() -> URLUtils.decode(incomplete, StandardCharsets.UTF_8, true, new StringBuilder()));
assertThrows(IllegalArgumentException.class,
() -> URLUtils.decode(invalidHex, StandardCharsets.UTF_8, true, new StringBuilder()));
}
@Test
void decodeGrayAreaInvalidUtf8() {
String invalidUtf8 = "invalid%80";
// This is a gray area: %80 is not valid in UTF-8 as a standalone byte,
// but Java's default decoding behavior does not throw an exception.
// Instead, it replaces it with a special character (�).
//
// To enforce strict decoding, CharsetDecoder with CodingErrorAction.REPORT
// should be used inside URLUtils.decode.
String decoded = URLUtils.decode(invalidUtf8, StandardCharsets.UTF_8, true, new StringBuilder());
assertEquals("invalid�", decoded); // Note: This may vary depending on the JVM.
}
@Test
void decodeValidValues() {
String path = "test%20path";
String formEncoded = "test+path";
String japanese = "%E3%83%86%E3%82%B9%E3%83%88"; // テスト
assertEquals("test path",
URLUtils.decode(path, StandardCharsets.UTF_8, true, new StringBuilder()));
assertEquals("test path",
URLUtils.decode(formEncoded, StandardCharsets.UTF_8, true, true, new StringBuilder()));
assertEquals("テスト",
URLUtils.decode(japanese, StandardCharsets.UTF_8, true, new StringBuilder()));
}
}
|
URLUtilsTest
|
java
|
quarkusio__quarkus
|
integration-tests/oidc-mtls/src/test/java/io/quarkus/it/oidc/OidcMtlsBasicAuthTest.java
|
{
"start": 1154,
"end": 6079
}
|
class ____ {
KeycloakTestClient client = new KeycloakTestClient(
new Tls("target/certificates/oidc-client-keystore.p12",
"target/certificates/oidc-client-truststore.p12"));
@TestHTTPResource(tls = true)
URL url;
@Inject
Vertx vertx;
@Test
public void testMtlsJwtLax() throws Exception {
// verifies that in LAX mode, it is permitted that not all the mechanisms need to create the identity
WebClientOptions options = createWebClientOptions(url);
WebClient webClient = WebClient.create(new io.vertx.mutiny.core.Vertx(vertx), options);
try {
// HTTP 200
HttpResponse<io.vertx.mutiny.core.buffer.Buffer> resp = webClient.get("/multiple-auth-mechanisms/mtls-jwt-lax")
.putHeader("Authorization",
OidcConstants.BEARER_SCHEME + " " + getAccessToken("backend-service", null, "alice"))
.send().await()
.indefinitely();
assertEquals(200, resp.statusCode());
// HTTP 401, invalid token
resp = webClient.get("/multiple-auth-mechanisms/mtls-jwt-lax")
.putHeader("Authorization", OidcConstants.BEARER_SCHEME + " " + "123")
.send().await()
.indefinitely();
assertEquals(401, resp.statusCode());
// HTTP 200, no token and inclusive authentication in the lax mode, therefore 200
resp = webClient.get("/multiple-auth-mechanisms/mtls-jwt-lax").send().await().indefinitely();
assertEquals(200, resp.statusCode());
} finally {
webClient.close();
}
}
@Test
public void testMtlsJwt() throws Exception {
// verifies that in LAX mode, it is permitted that not all the mechanisms need to create the identity
WebClientOptions options = createWebClientOptions(url);
WebClient webClient = WebClient.create(new io.vertx.mutiny.core.Vertx(vertx), options);
try {
// HTTP 200
HttpResponse<io.vertx.mutiny.core.buffer.Buffer> resp = webClient.get("/multiple-auth-mechanisms/mtls-jwt")
.putHeader("Authorization",
OidcConstants.BEARER_SCHEME + " " + getAccessToken("backend-service", null, "alice"))
.send().await()
.indefinitely();
assertEquals(200, resp.statusCode());
// HTTP 401, invalid token
resp = webClient.get("/multiple-auth-mechanisms/mtls-jwt")
.putHeader("Authorization", OidcConstants.BEARER_SCHEME + " " + "123")
.send().await()
.indefinitely();
assertEquals(401, resp.statusCode());
// HTTP 403, no token and inclusive authentication in the lax mode,
// but permission checker requires both JWT and mTLS
resp = webClient.get("/multiple-auth-mechanisms/mtls-jwt").send().await().indefinitely();
assertEquals(403, resp.statusCode());
} finally {
webClient.close();
}
}
@Test
public void testMtlsBasic() throws Exception {
WebClientOptions options = createWebClientOptions(url);
WebClient webClient = WebClient.create(new io.vertx.mutiny.core.Vertx(vertx), options);
try {
// HTTP 403, basic & mTLS are expected and basic is missing
HttpResponse<io.vertx.mutiny.core.buffer.Buffer> resp = webClient.get("/multiple-auth-mechanisms/mtls-basic")
.putHeader("Authorization",
OidcConstants.BEARER_SCHEME + " " + getAccessToken("backend-service", null, "alice"))
.send().await()
.indefinitely();
assertEquals(403, resp.statusCode());
// HTTP 200, basic & mTLS are expected
resp = webClient.get("/multiple-auth-mechanisms/mtls-basic")
.putHeader("Authorization",
new UsernamePasswordCredentials("Gaston", "Gaston").applyHttpChallenge(null).toHttpAuthorization())
.send().await()
.indefinitely();
assertEquals(200, resp.statusCode());
// HTTP 403, only basic but mTLS & basic are required
RestAssured
.given()
.auth().preemptive().basic("Gaston", "Gaston")
.get("/multiple-auth-mechanisms/mtls-basic")
.then()
.statusCode(403);
} finally {
webClient.close();
}
}
private String getAccessToken(String clientName, String clientSecret, String userName) {
return client.getAccessToken(userName, userName, clientName, clientSecret);
}
public static
|
OidcMtlsBasicAuthTest
|
java
|
quarkusio__quarkus
|
extensions/grpc/deployment/src/test/java/io/quarkus/grpc/deployment/GrpcServerProcessorTest.java
|
{
"start": 6941,
"end": 7321
}
|
class ____ extends NoClassAnnotationsRoot {
static final Set<String> EXPECTED = ImmutableSet.of("nonBlocking", "blocking", "transactional", "noAnnotation");
void nonBlocking() {
}
void blocking() {
}
void transactional() {
}
void noAnnotation() {
}
}
@NonBlocking
static
|
ClassAnnotationsBlocking
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/JoinToMultiJoinForReorderRule.java
|
{
"start": 4254,
"end": 4445
}
|
class ____ {@link Join}, not just {@link
* LogicalJoin}.
*
* @see FilterMultiJoinMergeRule
* @see ProjectMultiJoinMergeRule
* @see CoreRules#JOIN_TO_MULTI_JOIN
*/
@Value.Enclosing
public
|
of
|
java
|
google__auto
|
value/src/main/java/com/google/auto/value/processor/AutoValueishProcessor.java
|
{
"start": 19167,
"end": 19422
}
|
class ____ have a non-private no-arg constructor",
simpleAnnotationName,
simpleAnnotationName);
}
if (type.getModifiers().contains(Modifier.FINAL)) {
errorReporter.abortWithError(
type,
"[%sFinal] @%s
|
must
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/Answers.java
|
{
"start": 1113,
"end": 3239
}
|
enum ____ implements Answer<Object> {
/**
* The default configured answer of every mock.
*
* <p>Please see the {@link org.mockito.Mockito#RETURNS_DEFAULTS} documentation for more details.</p>
*
* @see org.mockito.Mockito#RETURNS_DEFAULTS
*/
RETURNS_DEFAULTS(new GloballyConfiguredAnswer()),
/**
* An answer that returns smart-nulls.
*
* <p>Please see the {@link org.mockito.Mockito#RETURNS_SMART_NULLS} documentation for more details.</p>
*
* @see org.mockito.Mockito#RETURNS_SMART_NULLS
*/
RETURNS_SMART_NULLS(new ReturnsSmartNulls()),
/**
* An answer that returns <strong>mocks</strong> (not stubs).
*
* <p>Please see the {@link org.mockito.Mockito#RETURNS_MOCKS} documentation for more details.</p>
*
* @see org.mockito.Mockito#RETURNS_MOCKS
*/
RETURNS_MOCKS(new ReturnsMocks()),
/**
* An answer that returns <strong>deep stubs</strong> (not mocks).
*
* <p>Please see the {@link org.mockito.Mockito#RETURNS_DEEP_STUBS} documentation for more details.</p>
*
* @see org.mockito.Mockito#RETURNS_DEEP_STUBS
*/
RETURNS_DEEP_STUBS(new ReturnsDeepStubs()),
/**
* An answer that calls the real methods (used for partial mocks).
*
* <p>Please see the {@link org.mockito.Mockito#CALLS_REAL_METHODS} documentation for more details.</p>
*
* @see org.mockito.Mockito#CALLS_REAL_METHODS
*/
CALLS_REAL_METHODS(new CallsRealMethods()),
/**
* An answer that tries to return itself. This is useful for mocking {@code Builders}.
*
* <p>Please see the {@link org.mockito.Mockito#RETURNS_SELF} documentation for more details.</p>
*
* @see org.mockito.Mockito#RETURNS_SELF
*/
RETURNS_SELF(new TriesToReturnSelf());
private final Answer<Object> implementation;
Answers(Answer<Object> implementation) {
this.implementation = implementation;
}
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
return implementation.answer(invocation);
}
}
|
Answers
|
java
|
micronaut-projects__micronaut-core
|
core-processor/src/main/java/io/micronaut/inject/ast/MethodElement.java
|
{
"start": 1817,
"end": 2089
}
|
interface ____ extends MemberElement {
/**
* Returns the method annotations.
* The method will only return annotations defined on a method or inherited from the super methods,
* while {@link #getAnnotationMetadata()} for a method combines the
|
MethodElement
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/cglib/SpringCglibInfo.java
|
{
"start": 675,
"end": 980
}
|
class ____ to ensure that the {@code org.springframework.cglib}
* package is processed during javadoc generation.
*
* <p>See <a href="package-summary.html">package-level javadocs</a> for more
* information on {@code org.springframework.cglib}.
*
* @author Chris Beams
* @since 3.2
*/
public final
|
used
|
java
|
grpc__grpc-java
|
core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java
|
{
"start": 3073,
"end": 27565
}
|
class ____ {
private static final LoadBalancerRegistry defaultRegistry =
LoadBalancerRegistry.getDefaultRegistry();
private final AutoConfiguredLoadBalancerFactory lbf =
new AutoConfiguredLoadBalancerFactory(GrpcUtil.DEFAULT_LB_POLICY);
private final ChannelLogger channelLogger = mock(ChannelLogger.class);
private final LoadBalancer testLbBalancer = mock(LoadBalancer.class);
private final LoadBalancer testLbBalancer2 = mock(LoadBalancer.class);
private final AtomicReference<ConfigOrError> nextParsedConfigOrError =
new AtomicReference<>(ConfigOrError.fromConfig("default"));
private final AtomicReference<ConfigOrError> nextParsedConfigOrError2 =
new AtomicReference<>(ConfigOrError.fromConfig("default2"));
private final FakeLoadBalancerProvider testLbBalancerProvider =
mock(FakeLoadBalancerProvider.class,
delegatesTo(
new FakeLoadBalancerProvider("test_lb", testLbBalancer, nextParsedConfigOrError)));
private final FakeLoadBalancerProvider testLbBalancerProvider2 =
mock(FakeLoadBalancerProvider.class,
delegatesTo(
new FakeLoadBalancerProvider("test_lb2", testLbBalancer2, nextParsedConfigOrError2)));
private final Class<? extends LoadBalancer> pfLbClass =
PickFirstLoadBalancerProvider.isEnabledNewPickFirst()
? PickFirstLeafLoadBalancer.class
: PickFirstLoadBalancer.class;
@Before
public void setUp() {
when(testLbBalancer.acceptResolvedAddresses(isA(ResolvedAddresses.class))).thenReturn(
Status.OK);
when(testLbBalancer2.acceptResolvedAddresses(isA(ResolvedAddresses.class))).thenReturn(
Status.OK);
defaultRegistry.register(testLbBalancerProvider);
defaultRegistry.register(testLbBalancerProvider2);
}
@After
public void tearDown() {
defaultRegistry.deregister(testLbBalancerProvider);
defaultRegistry.deregister(testLbBalancerProvider2);
}
@Test
public void newLoadBalancer_isAuto() {
AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper());
assertThat(lb).isInstanceOf(AutoConfiguredLoadBalancer.class);
}
@Test
public void defaultIsPickFirst() {
AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper());
assertThat(lb.getDelegateProvider()).isInstanceOf(PickFirstLoadBalancerProvider.class);
assertThat(lb.getDelegate().getClass().getName()).contains("PickFirst");
}
@Test
public void defaultIsConfigurable() {
AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory("test_lb")
.newLoadBalancer(new TestHelper());
assertThat(lb.getDelegateProvider()).isSameInstanceAs(testLbBalancerProvider);
assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer);
}
@SuppressWarnings("deprecation")
@Test
public void forwardsCalls() {
AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper());
final AtomicInteger calls = new AtomicInteger();
TestLoadBalancer testlb = new TestLoadBalancer() {
@Override
public void handleNameResolutionError(Status error) {
calls.getAndSet(1);
}
@Override
public void handleSubchannelState(Subchannel subchannel, ConnectivityStateInfo stateInfo) {
calls.getAndSet(2);
}
@Override
public void shutdown() {
calls.getAndSet(3);
}
};
lb.setDelegate(testlb);
lb.handleNameResolutionError(Status.RESOURCE_EXHAUSTED);
assertThat(calls.getAndSet(0)).isEqualTo(1);
lb.handleSubchannelState(null, null);
assertThat(calls.getAndSet(0)).isEqualTo(2);
lb.shutdown();
assertThat(calls.getAndSet(0)).isEqualTo(3);
}
@Test
public void acceptResolvedAddresses_keepOldBalancer() {
final List<EquivalentAddressGroup> servers =
Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){}));
Helper helper = new TestHelper() {
@Override
public Subchannel createSubchannel(CreateSubchannelArgs args) {
assertThat(args.getAddresses()).isEqualTo(servers);
return new TestSubchannel(args);
}
};
AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper);
LoadBalancer oldDelegate = lb.getDelegate();
Status addressAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setAttributes(Attributes.EMPTY)
.setLoadBalancingPolicyConfig(null)
.build());
assertThat(addressAcceptanceStatus.isOk()).isTrue();
assertThat(lb.getDelegate()).isSameInstanceAs(oldDelegate);
}
@Test
public void acceptResolvedAddresses_shutsDownOldBalancer() throws Exception {
Map<String, ?> serviceConfig =
parseConfig("{\"loadBalancingConfig\": [ {\"round_robin\": { } } ] }");
ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(serviceConfig);
final List<EquivalentAddressGroup> servers =
Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){}));
Helper helper = new TestHelper() {
@Override
public Subchannel createSubchannel(CreateSubchannelArgs args) {
assertThat(args.getAddresses()).isEqualTo(servers);
return new TestSubchannel(args);
}
};
AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper);
final AtomicBoolean shutdown = new AtomicBoolean();
TestLoadBalancer testlb = new TestLoadBalancer() {
@Override
public void handleNameResolutionError(Status error) {
// noop
}
@Override
public void shutdown() {
shutdown.set(true);
}
};
lb.setDelegate(testlb);
Status addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setLoadBalancingPolicyConfig(lbConfigs.getConfig())
.build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
assertThat(lb.getDelegateProvider().getClass().getName()).isEqualTo(
"io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider");
assertTrue(shutdown.get());
}
@Test
@SuppressWarnings("unchecked")
public void acceptResolvedAddresses_propagateLbConfigToDelegate() throws Exception {
Map<String, ?> rawServiceConfig =
parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { \"setting1\": \"high\" } } ] }");
ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig);
assertThat(lbConfigs.getConfig()).isNotNull();
final List<EquivalentAddressGroup> servers =
Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){}));
Helper helper = new TestHelper();
AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper);
Status addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setLoadBalancingPolicyConfig(lbConfigs.getConfig())
.build());
verify(testLbBalancerProvider).newLoadBalancer(same(helper));
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer);
ArgumentCaptor<ResolvedAddresses> resultCaptor =
ArgumentCaptor.forClass(ResolvedAddresses.class);
verify(testLbBalancer).acceptResolvedAddresses(resultCaptor.capture());
assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder();
ArgumentCaptor<Map<String, ?>> lbConfigCaptor = ArgumentCaptor.forClass(Map.class);
verify(testLbBalancerProvider).parseLoadBalancingPolicyConfig(lbConfigCaptor.capture());
assertThat(lbConfigCaptor.getValue()).containsExactly("setting1", "high");
verifyNoMoreInteractions(testLbBalancer);
rawServiceConfig =
parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { \"setting1\": \"low\" } } ] }");
lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig);
addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setLoadBalancingPolicyConfig(lbConfigs.getConfig())
.build());
resultCaptor =
ArgumentCaptor.forClass(ResolvedAddresses.class);
verify(testLbBalancer, times(2)).acceptResolvedAddresses(resultCaptor.capture());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder();
verify(testLbBalancerProvider, times(2))
.parseLoadBalancingPolicyConfig(lbConfigCaptor.capture());
assertThat(lbConfigCaptor.getValue()).containsExactly("setting1", "low");
// Service config didn't change policy, thus the delegateLb is not swapped
verifyNoMoreInteractions(testLbBalancer);
verify(testLbBalancerProvider).newLoadBalancer(any(Helper.class));
}
@Test
public void acceptResolvedAddresses_propagateAddrsToDelegate() throws Exception {
Map<String, ?> rawServiceConfig =
parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { \"setting1\": \"high\" } } ] }");
ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig);
assertThat(lbConfigs.getConfig()).isNotNull();
Helper helper = new TestHelper();
AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper);
List<EquivalentAddressGroup> servers =
Collections.singletonList(new EquivalentAddressGroup(new InetSocketAddress(8080){}));
Status addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setLoadBalancingPolicyConfig(lbConfigs.getConfig())
.build());
verify(testLbBalancerProvider).newLoadBalancer(same(helper));
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer);
ArgumentCaptor<ResolvedAddresses> resultCaptor =
ArgumentCaptor.forClass(ResolvedAddresses.class);
verify(testLbBalancer).acceptResolvedAddresses(resultCaptor.capture());
assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder();
servers =
Collections.singletonList(new EquivalentAddressGroup(new InetSocketAddress(9090){}));
addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setLoadBalancingPolicyConfig(lbConfigs.getConfig())
.build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
verify(testLbBalancer, times(2)).acceptResolvedAddresses(resultCaptor.capture());
assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder();
}
@Test
public void acceptResolvedAddresses_delegateDoNotAcceptEmptyAddressList_nothing()
throws Exception {
// The test LB will NOT accept the addresses we give them.
when(testLbBalancer.acceptResolvedAddresses(isA(ResolvedAddresses.class))).thenReturn(
Status.UNAVAILABLE);
Helper helper = new TestHelper();
AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper);
Map<String, ?> serviceConfig =
parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { \"setting1\": \"high\" } } ] }");
ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(serviceConfig);
Status addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(Collections.<EquivalentAddressGroup>emptyList())
.setLoadBalancingPolicyConfig(lbConfig.getConfig())
.build());
assertThat(addressesAcceptanceStatus.isOk()).isFalse();
assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer);
}
@Test
public void acceptResolvedAddresses_delegateAcceptsEmptyAddressList()
throws Exception {
Helper helper = new TestHelper();
AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper);
Map<String, ?> rawServiceConfig =
parseConfig("{\"loadBalancingConfig\": [ {\"test_lb2\": { \"setting1\": \"high\" } } ] }");
ConfigOrError lbConfigs =
lbf.parseLoadBalancerPolicy(rawServiceConfig);
Status addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(Collections.<EquivalentAddressGroup>emptyList())
.setLoadBalancingPolicyConfig(lbConfigs.getConfig())
.build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer2);
ArgumentCaptor<ResolvedAddresses> resultCaptor =
ArgumentCaptor.forClass(ResolvedAddresses.class);
verify(testLbBalancer2).acceptResolvedAddresses(resultCaptor.capture());
assertThat(resultCaptor.getValue().getAddresses()).isEmpty();
assertThat(resultCaptor.getValue().getLoadBalancingPolicyConfig())
.isEqualTo(nextParsedConfigOrError2.get().getConfig());
}
@Test
public void acceptResolvedAddresses_useSelectedLbPolicy() throws Exception {
Map<String, ?> rawServiceConfig =
parseConfig("{\"loadBalancingConfig\": [{\"round_robin\": {}}]}");
ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig);
assertThat(lbConfigs.getConfig()).isNotNull();
assertThat(((PolicySelection) lbConfigs.getConfig()).provider.getClass().getName())
.isEqualTo("io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider");
final List<EquivalentAddressGroup> servers =
Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){}));
Helper helper = new TestHelper() {
@Override
public Subchannel createSubchannel(CreateSubchannelArgs args) {
assertThat(args.getAddresses()).isEqualTo(servers);
return new TestSubchannel(args);
}
};
AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper);
Status addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setLoadBalancingPolicyConfig(lbConfigs.getConfig())
.build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
assertThat(lb.getDelegate().getClass().getName())
.isEqualTo("io.grpc.util.RoundRobinLoadBalancer");
}
@Test
public void acceptResolvedAddresses_noLbPolicySelected_defaultToPickFirst() {
final List<EquivalentAddressGroup> servers =
Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){}));
Helper helper = new TestHelper() {
@Override
public Subchannel createSubchannel(CreateSubchannelArgs args) {
assertThat(args.getAddresses()).isEqualTo(servers);
return new TestSubchannel(args);
}
};
AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper);
Status addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setLoadBalancingPolicyConfig(null)
.build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
assertThat(lb.getDelegate()).isInstanceOf(pfLbClass);
}
@Test
public void acceptResolvedAddresses_noLbPolicySelected_defaultToCustomDefault() {
AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory("test_lb")
.newLoadBalancer(new TestHelper());
List<EquivalentAddressGroup> servers =
Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){}));
Status addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setLoadBalancingPolicyConfig(null)
.build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer);
}
@Test
public void channelTracing_lbPolicyChanged() throws Exception {
List<EquivalentAddressGroup> servers =
Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){}));
Helper helper = new TestHelper() {
@Override
public Subchannel createSubchannel(CreateSubchannelArgs args) {
return new TestSubchannel(args);
}
};
AutoConfiguredLoadBalancer lb =
new AutoConfiguredLoadBalancerFactory(GrpcUtil.DEFAULT_LB_POLICY).newLoadBalancer(helper);
Status addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setAttributes(Attributes.EMPTY)
.build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
verifyNoMoreInteractions(channelLogger);
ConfigOrError testLbParsedConfig = ConfigOrError.fromConfig("foo");
nextParsedConfigOrError.set(testLbParsedConfig);
Map<String, ?> serviceConfig =
parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { } } ] }");
ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(serviceConfig);
addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setLoadBalancingPolicyConfig(lbConfigs.getConfig())
.build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
verify(channelLogger).log(
eq(ChannelLogLevel.INFO),
eq("Load balancer changed from {0} to {1}"),
eq(pfLbClass.getSimpleName()),
eq(testLbBalancer.getClass().getSimpleName()));
verify(channelLogger).log(
eq(ChannelLogLevel.DEBUG),
eq("Load-balancing config: {0}"),
eq(testLbParsedConfig.getConfig()));
verifyNoMoreInteractions(channelLogger);
testLbParsedConfig = ConfigOrError.fromConfig("bar");
nextParsedConfigOrError.set(testLbParsedConfig);
serviceConfig = parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { } } ] }");
lbConfigs = lbf.parseLoadBalancerPolicy(serviceConfig);
addressesAcceptanceStatus = lb.tryAcceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers)
.setLoadBalancingPolicyConfig(lbConfigs.getConfig())
.build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
verify(channelLogger).log(
eq(ChannelLogLevel.DEBUG),
eq("Load-balancing config: {0}"),
eq(testLbParsedConfig.getConfig()));
verifyNoMoreInteractions(channelLogger);
}
@Test
public void parseLoadBalancerConfig_failedOnUnknown() throws Exception {
Map<String, ?> serviceConfig =
parseConfig("{\"loadBalancingConfig\": [ {\"magic_balancer\": {} } ] }");
ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig);
assertThat(parsed.getError()).isNotNull();
assertThat(parsed.getError().getDescription())
.isEqualTo("None of [magic_balancer] specified by Service Config are available.");
}
@Test
public void parseLoadBalancerPolicy_failedOnUnknown() throws Exception {
Map<String, ?> serviceConfig =
parseConfig("{\"loadBalancingPolicy\": \"magic_balancer\"}");
ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig);
assertThat(parsed.getError()).isNotNull();
assertThat(parsed.getError().getDescription())
.isEqualTo("None of [magic_balancer] specified by Service Config are available.");
}
@Test
public void parseLoadBalancerConfig_multipleValidPolicies() throws Exception {
Map<String, ?> serviceConfig =
parseConfig(
"{\"loadBalancingConfig\": ["
+ "{\"round_robin\": {}},"
+ "{\"test_lb\": {} } ] }");
ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig);
assertThat(parsed).isNotNull();
assertThat(parsed.getError()).isNull();
assertThat(parsed.getConfig()).isInstanceOf(PolicySelection.class);
assertThat(((PolicySelection) parsed.getConfig()).provider.getClass().getName())
.isEqualTo("io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider");
}
@Test
public void parseLoadBalancerConfig_policyShouldBeIgnoredIfConfigExists() throws Exception {
Map<String, ?> serviceConfig =
parseConfig(
"{\"loadBalancingConfig\": [{\"round_robin\": {} } ],"
+ "\"loadBalancingPolicy\": \"pick_first\" }");
ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig);
assertThat(parsed).isNotNull();
assertThat(parsed.getError()).isNull();
assertThat(parsed.getConfig()).isInstanceOf(PolicySelection.class);
assertThat(((PolicySelection) parsed.getConfig()).provider.getClass().getName())
.isEqualTo("io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider");
}
@Test
public void parseLoadBalancerConfig_policyShouldBeIgnoredEvenIfUnknownPolicyExists()
throws Exception {
Map<String, ?> serviceConfig =
parseConfig(
"{\"loadBalancingConfig\": [{\"magic_balancer\": {} } ],"
+ "\"loadBalancingPolicy\": \"round_robin\" }");
ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig);
assertThat(parsed.getError()).isNotNull();
assertThat(parsed.getError().getDescription())
.isEqualTo("None of [magic_balancer] specified by Service Config are available.");
}
@Test
@SuppressWarnings("unchecked")
public void parseLoadBalancerConfig_firstInvalidPolicy() throws Exception {
when(testLbBalancerProvider.parseLoadBalancingPolicyConfig(any(Map.class)))
.thenReturn(ConfigOrError.fromError(Status.UNKNOWN));
Map<String, ?> serviceConfig =
parseConfig(
"{\"loadBalancingConfig\": ["
+ "{\"test_lb\": {}},"
+ "{\"round_robin\": {} } ] }");
ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig);
assertThat(parsed).isNotNull();
assertThat(parsed.getConfig()).isNull();
assertThat(parsed.getError()).isEqualTo(Status.UNKNOWN);
}
@Test
@SuppressWarnings("unchecked")
public void parseLoadBalancerConfig_firstValidSecondInvalidPolicy() throws Exception {
when(testLbBalancerProvider.parseLoadBalancingPolicyConfig(any(Map.class)))
.thenReturn(ConfigOrError.fromError(Status.UNKNOWN));
Map<String, ?> serviceConfig =
parseConfig(
"{\"loadBalancingConfig\": ["
+ "{\"round_robin\": {}},"
+ "{\"test_lb\": {} } ] }");
ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig);
assertThat(parsed).isNotNull();
assertThat(parsed.getConfig()).isNotNull();
assertThat(((PolicySelection) parsed.getConfig()).config).isNotNull();
}
@Test
public void parseLoadBalancerConfig_someProvidesAreNotAvailable() throws Exception {
Map<String, ?> serviceConfig =
parseConfig("{\"loadBalancingConfig\": [ "
+ "{\"magic_balancer\": {} },"
+ "{\"round_robin\": {}} ] }");
ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig);
assertThat(parsed).isNotNull();
assertThat(parsed.getConfig()).isNotNull();
assertThat(((PolicySelection) parsed.getConfig()).config).isNotNull();
}
@Test
public void parseLoadBalancerConfig_lbConfigPropagated() throws Exception {
Map<String, ?> rawServiceConfig =
parseConfig(
"{\"loadBalancingConfig\": ["
+ "{\"pick_first\": {\"shuffleAddressList\": true } }"
+ "] }");
ConfigOrError parsed = lbf.parseLoadBalancerPolicy(rawServiceConfig);
assertThat(parsed).isNotNull();
assertThat(parsed.getConfig()).isNotNull();
PolicySelection policySelection = (PolicySelection) parsed.getConfig();
assertThat(policySelection.provider).isInstanceOf(PickFirstLoadBalancerProvider.class);
if (PickFirstLoadBalancerProvider.isEnabledNewPickFirst()) {
assertThat(policySelection.config).isInstanceOf(PickFirstLeafLoadBalancerConfig.class);
assertThat(((PickFirstLeafLoadBalancerConfig) policySelection.config).shuffleAddressList)
.isTrue();
} else {
assertThat(policySelection.config).isInstanceOf(PickFirstLoadBalancerConfig.class);
assertThat(((PickFirstLoadBalancerConfig) policySelection.config).shuffleAddressList)
.isTrue();
}
verifyNoInteractions(channelLogger);
}
public static
|
AutoConfiguredLoadBalancerFactoryTest
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/MapBindingComponentProcessorTest.java
|
{
"start": 5975,
"end": 6518
}
|
interface ____ {}",
"}");
Source moduleFile =
CompilerTests.javaSource(
"mapkeys.MapModule",
"package mapkeys;",
"",
"import dagger.Binds;",
"import dagger.Module;",
"import dagger.Provides;",
"import dagger.multibindings.ClassKey;",
"import dagger.multibindings.IntoMap;",
"import java.util.Map;",
"import javax.inject.Provider;",
"",
"@Module",
"public
|
Inaccessible
|
java
|
hibernate__hibernate-orm
|
hibernate-jfr/src/test/java/org/hibernate/event/jfr/cache/CacheGetEventTests.java
|
{
"start": 1226,
"end": 3182
}
|
class ____ {
public JfrEvents jfrEvents = new JfrEvents();
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
TestEntity entity = new TestEntity( 1, "name_1" );
session.persist( entity );
}
);
}
@Test
@EnableEvent(JdbcBatchExecutionEvent.NAME)
public void testCacheGetEvent(SessionFactoryScope scope) {
jfrEvents.reset();
scope.inTransaction(
session -> {
TestEntity testEntity = session.find( TestEntity.class, 1 );
List<RecordedEvent> events = jfrEvents.events()
.filter(
recordedEvent ->
{
String eventName = recordedEvent.getEventType().getName();
return eventName.equals( CacheGetEvent.NAME );
}
).toList();
assertThat( events ).hasSize( 1 );
RecordedEvent event = events.get( 0 );
assertThat( event.getEventType().getName() )
.isEqualTo( CacheGetEvent.NAME );
assertThat( event.getDuration() ).isPositive();
assertThat( event.getString( "sessionIdentifier" ) )
.isEqualTo( session.getSessionIdentifier().toString() );
assertThat( event.getString( "entityName" ) )
.isEqualTo( TestEntity.class.getName() );
assertThat( event.getBoolean( "isNaturalId" ) ).isFalse();
assertThat( event.getBoolean( "hit" ) ).isTrue();
assertThat( event.getString( "regionName" ) ).isNotNull();
}
);
}
@Test
@EnableEvent(JdbcBatchExecutionEvent.NAME)
public void testCacheGetEventNoFired(SessionFactoryScope scope) {
jfrEvents.reset();
scope.inTransaction(
session -> {
}
);
List<RecordedEvent> events = jfrEvents.events()
.filter(
recordedEvent ->
{
String eventName = recordedEvent.getEventType().getName();
return eventName.equals( CacheGetEvent.NAME );
}
).toList();
assertThat( events ).hasSize( 0 );
}
@Entity(name = "TestEntity")
@Cacheable
public static
|
CacheGetEventTests
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/shutdown/ShutdownTimeoutDefaultExecutorTest.java
|
{
"start": 1256,
"end": 2983
}
|
class ____ {
protected static final int HANDLER_WAIT_TIME = 50000;
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.setAllowTestClassOutsideDeployment(true)
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(ShutdownTimeoutDefaultExecutorTest.class)
.addAsResource(new StringAsset(
"quarkus.shutdown.timeout=PT0.1S\nquarkus.thread-pool.shutdown-check-interval=PT0.2S"),
"application.properties");
}
})
.setAfterUndeployListener(new Runnable() {
@Override
public void run() {
try {
ShutdownTimer.socket.close();
} catch (IOException e) {
e.printStackTrace();
}
Assertions.assertTrue(System.currentTimeMillis() - ShutdownTimer.requestStarted < HANDLER_WAIT_TIME);
}
});
@TestHTTPResource
URL url;
@Test
public void testShutdownBehaviour() throws Exception {
ShutdownTimer.requestStarted = System.currentTimeMillis();
ShutdownTimer.socket = new Socket(url.getHost(), url.getPort());
ShutdownTimer.socket.getOutputStream()
.write("GET /shutdown HTTP/1.1\r\nHost: localhost\r\n\r\n".getBytes(StandardCharsets.UTF_8));
Thread.sleep(1000);
}
@ApplicationScoped
public static
|
ShutdownTimeoutDefaultExecutorTest
|
java
|
google__guava
|
android/guava/src/com/google/common/util/concurrent/FuturesGetChecked.java
|
{
"start": 10254,
"end": 10441
}
|
class ____ an accessible "
+ "constructor whose parameters (if any) must be of type String and/or Throwable",
exceptionClass);
}
private FuturesGetChecked() {}
}
|
with
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiDriver.java
|
{
"start": 17638,
"end": 18631
}
|
class ____ {
private Optional<RequestSpec<K>> inflightRequest = Optional.empty();
private int tries = 0;
private long nextAllowedRetryMs = 0;
boolean hasInflight() {
return inflightRequest.isPresent();
}
public void clearInflight(long currentTimeMs) {
this.inflightRequest = Optional.empty();
this.nextAllowedRetryMs = currentTimeMs;
}
public void clearInflightAndBackoff(long currentTimeMs) {
clearInflight(currentTimeMs + retryBackoff.backoff(tries >= 1 ? tries - 1 : 0));
}
public void setInflight(RequestSpec<K> spec) {
this.inflightRequest = Optional.of(spec);
this.tries++;
}
}
/**
* Completion of the Lookup stage results in a destination broker to send the
* fulfillment request to. Each destination broker in the Fulfillment stage
* gets its own request scope.
*/
private static
|
RequestState
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
|
{
"start": 1547,
"end": 3986
}
|
class ____
extends org.apache.hadoop.mapreduce.Mapper
<IntWritable, IntWritable, Writable, Writable>
implements SimpleEntityWriterConstants {
private static final Logger LOG =
LoggerFactory.getLogger(SimpleEntityWriterV1.class);
public void map(IntWritable key, IntWritable val, Context context)
throws IOException {
TimelineClient tlc = TimelineClient.createTimelineClient();
Configuration conf = context.getConfiguration();
final int kbs = conf.getInt(KBS_SENT, KBS_SENT_DEFAULT);
long totalTime = 0;
final int testtimes = conf.getInt(TEST_TIMES, TEST_TIMES_DEFAULT);
final Random rand = new Random();
final TaskAttemptID taskAttemptId = context.getTaskAttemptID();
final char[] payLoad = new char[kbs * 1024];
for (int i = 0; i < testtimes; i++) {
// Generate a fixed length random payload
for (int xx = 0; xx < kbs * 1024; xx++) {
int alphaNumIdx =
rand.nextInt(ALPHA_NUMS.length);
payLoad[xx] = ALPHA_NUMS[alphaNumIdx];
}
String entId = taskAttemptId + "_" + Integer.toString(i);
final TimelineEntity entity = new TimelineEntity();
entity.setEntityId(entId);
entity.setEntityType("FOO_ATTEMPT");
entity.addOtherInfo("PERF_TEST", payLoad);
// add an event
TimelineEvent event = new TimelineEvent();
event.setTimestamp(System.currentTimeMillis());
event.setEventType("foo_event");
entity.addEvent(event);
// use the current user for this purpose
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
long startWrite = System.nanoTime();
try {
tlc.putEntities(entity);
} catch (Exception e) {
context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_FAILURES).
increment(1);
LOG.error("writing to the timeline service failed", e);
}
long endWrite = System.nanoTime();
totalTime += TimeUnit.NANOSECONDS.toMillis(endWrite-startWrite);
}
LOG.info("wrote " + testtimes + " entities (" + kbs*testtimes +
" kB) in " + totalTime + " ms");
context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_TIME).
increment(totalTime);
context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_COUNTER).
increment(testtimes);
context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_KBS).
increment(kbs*testtimes);
}
}
|
SimpleEntityWriterV1
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/main/java/org/springframework/web/servlet/tags/form/SelectedValueComparator.java
|
{
"start": 1002,
"end": 2657
}
|
class ____ testing whether a candidate value matches a {@link BindStatus#getValue data bound value}.
* Eagerly attempts to prove a comparison through a number of avenues to deal with issues such as instance
* inequality, logical (String-representation-based) equality and {@link PropertyEditor}-based comparison.
*
* <p>Full support is provided for comparing arrays, {@link Collection Collections} and {@link Map Maps}.
*
* <p><h1><a name="equality-contract">Equality Contract</a></h1>
* For single-valued objects equality is first tested using standard {@link Object#equals Java equality}. As
* such, user code should endeavor to implement {@link Object#equals} to speed up the comparison process. If
* {@link Object#equals} returns {@code false} then an attempt is made at an
* {@link #exhaustiveCompare exhaustive comparison} with the aim being to <strong>prove</strong> equality rather
* than disprove it.
*
* <p>Next, an attempt is made to compare the {@code String} representations of both the candidate and bound
* values. This may result in {@code true} in a number of cases due to the fact both values will be represented
* as {@code Strings} when shown to the user.
*
* <p>Next, if the candidate value is a {@code String}, an attempt is made to compare the bound value to
* result of applying the corresponding {@link PropertyEditor} to the candidate. This comparison may be
* executed twice, once against the direct {@code String} instances, and then against the {@code String}
* representations if the first comparison results in {@code false}.
*
* @author Rob Harrop
* @author Juergen Hoeller
* @since 2.0
*/
abstract
|
for
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassParser.java
|
{
"start": 36492,
"end": 37012
}
|
class ____ {
private final ConfigurationClass configurationClass;
private final DeferredImportSelector importSelector;
DeferredImportSelectorHolder(ConfigurationClass configClass, DeferredImportSelector selector) {
this.configurationClass = configClass;
this.importSelector = selector;
}
ConfigurationClass getConfigurationClass() {
return this.configurationClass;
}
DeferredImportSelector getImportSelector() {
return this.importSelector;
}
}
private static
|
DeferredImportSelectorHolder
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-gcp/src/test/java/org/apache/hadoop/fs/gs/contract/ITestGoogleContractContentSummary.java
|
{
"start": 1032,
"end": 1240
}
|
class ____ extends AbstractContractContentSummaryTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new GoogleContract(conf);
}
}
|
ITestGoogleContractContentSummary
|
java
|
apache__camel
|
components/camel-aws/camel-aws-xray/src/main/java/org/apache/camel/component/aws/xray/decorators/internal/DisruptorSegmentDecorator.java
|
{
"start": 876,
"end": 1037
}
|
class ____ extends AbstractInternalSegmentDecorator {
@Override
public String getComponent() {
return "disruptor";
}
}
|
DisruptorSegmentDecorator
|
java
|
square__retrofit
|
retrofit-adapters/rxjava3/src/test/java/retrofit2/adapter/rxjava3/MaybeThrowingTest.java
|
{
"start": 1700,
"end": 8583
}
|
interface ____ {
@GET("/")
Maybe<String> body();
@GET("/")
Maybe<Response<String>> response();
@GET("/")
Maybe<Result<String>> result();
}
private Service service;
@Before
public void setUp() {
Retrofit retrofit =
new Retrofit.Builder()
.baseUrl(server.url("/"))
.addConverterFactory(new StringConverterFactory())
.addCallAdapterFactory(RxJava3CallAdapterFactory.createSynchronous())
.build();
service = retrofit.create(Service.class);
}
@Test
public void bodyThrowingInOnSuccessDeliveredToPlugin() {
server.enqueue(new MockResponse());
final AtomicReference<Throwable> throwableRef = new AtomicReference<>();
RxJavaPlugins.setErrorHandler(
throwable -> {
if (!throwableRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable);
}
});
RecordingMaybeObserver<String> observer = subscriberRule.create();
final RuntimeException e = new RuntimeException();
service
.body()
.subscribe(
new ForwardingObserver<String>(observer) {
@Override
public void onSuccess(String value) {
throw e;
}
});
assertThat(throwableRef.get()).hasCauseThat().isSameInstanceAs(e);
}
@Test
public void bodyThrowingInOnErrorDeliveredToPlugin() {
server.enqueue(new MockResponse().setResponseCode(404));
final AtomicReference<Throwable> throwableRef = new AtomicReference<>();
RxJavaPlugins.setErrorHandler(
throwable -> {
if (!throwableRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable);
}
});
RecordingMaybeObserver<String> observer = subscriberRule.create();
final AtomicReference<Throwable> errorRef = new AtomicReference<>();
final RuntimeException e = new RuntimeException();
service
.body()
.subscribe(
new ForwardingObserver<String>(observer) {
@Override
public void onError(Throwable throwable) {
if (!errorRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable);
}
throw e;
}
});
//noinspection ThrowableResultOfMethodCallIgnored
CompositeException composite = (CompositeException) throwableRef.get();
assertThat(composite.getExceptions()).containsExactly(errorRef.get(), e);
}
@Test
public void responseThrowingInOnSuccessDeliveredToPlugin() {
server.enqueue(new MockResponse());
final AtomicReference<Throwable> throwableRef = new AtomicReference<>();
RxJavaPlugins.setErrorHandler(
throwable -> {
if (!throwableRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable);
}
});
RecordingMaybeObserver<Response<String>> observer = subscriberRule.create();
final RuntimeException e = new RuntimeException();
service
.response()
.subscribe(
new ForwardingObserver<Response<String>>(observer) {
@Override
public void onSuccess(Response<String> value) {
throw e;
}
});
assertThat(throwableRef.get()).hasCauseThat().isSameInstanceAs(e);
}
@Test
public void responseThrowingInOnErrorDeliveredToPlugin() {
server.enqueue(new MockResponse().setSocketPolicy(DISCONNECT_AFTER_REQUEST));
final AtomicReference<Throwable> throwableRef = new AtomicReference<>();
RxJavaPlugins.setErrorHandler(
throwable -> {
if (!throwableRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable);
}
});
RecordingMaybeObserver<Response<String>> observer = subscriberRule.create();
final AtomicReference<Throwable> errorRef = new AtomicReference<>();
final RuntimeException e = new RuntimeException();
service
.response()
.subscribe(
new ForwardingObserver<Response<String>>(observer) {
@Override
public void onError(Throwable throwable) {
if (!errorRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable);
}
throw e;
}
});
//noinspection ThrowableResultOfMethodCallIgnored
CompositeException composite = (CompositeException) throwableRef.get();
assertThat(composite.getExceptions()).containsExactly(errorRef.get(), e);
}
@Test
public void resultThrowingInOnSuccessDeliveredToPlugin() {
server.enqueue(new MockResponse());
final AtomicReference<Throwable> throwableRef = new AtomicReference<>();
RxJavaPlugins.setErrorHandler(
throwable -> {
if (!throwableRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable);
}
});
RecordingMaybeObserver<Result<String>> observer = subscriberRule.create();
final RuntimeException e = new RuntimeException();
service
.result()
.subscribe(
new ForwardingObserver<Result<String>>(observer) {
@Override
public void onSuccess(Result<String> value) {
throw e;
}
});
assertThat(throwableRef.get()).hasCauseThat().isSameInstanceAs(e);
}
@Ignore("Single's contract is onNext|onError so we have no way of triggering this case")
@Test
public void resultThrowingInOnErrorDeliveredToPlugin() {
server.enqueue(new MockResponse());
final AtomicReference<Throwable> throwableRef = new AtomicReference<>();
RxJavaPlugins.setErrorHandler(
throwable -> {
if (!throwableRef.compareAndSet(null, throwable)) {
throw Exceptions.propagate(throwable);
}
});
RecordingMaybeObserver<Result<String>> observer = subscriberRule.create();
final RuntimeException first = new RuntimeException();
final RuntimeException second = new RuntimeException();
service
.result()
.subscribe(
new ForwardingObserver<Result<String>>(observer) {
@Override
public void onSuccess(Result<String> value) {
// The only way to trigger onError for Result is if onSuccess throws.
throw first;
}
@Override
public void onError(Throwable throwable) {
throw second;
}
});
//noinspection ThrowableResultOfMethodCallIgnored
CompositeException composite = (CompositeException) throwableRef.get();
assertThat(composite.getExceptions()).containsExactly(first, second);
}
private abstract static
|
Service
|
java
|
google__auto
|
value/src/main/java/com/google/auto/value/processor/BuilderMethodClassifier.java
|
{
"start": 15274,
"end": 25903
}
|
class ____ a property called {@code foos} with a type whose builder can be made with an
* argument of the given type.
* </ul>
*/
private void classifyMethodOneArg(ExecutableElement method) {
if (classifyPropertyBuilderOneArg(method)) {
return;
}
String methodName = method.getSimpleName().toString();
ImmutableMap<String, E> propertyElements = propertyElements();
String propertyName = null;
E propertyElement = propertyElements.get(methodName);
Multimap<String, PropertySetter> propertyNameToSetters = null;
if (propertyElement != null) {
propertyNameToSetters = propertyNameToUnprefixedSetters;
propertyName = methodName;
} else if (methodName.startsWith("set") && methodName.length() > 3) {
propertyNameToSetters = propertyNameToPrefixedSetters;
propertyName = PropertyNames.decapitalizeLikeJavaBeans(methodName.substring(3));
propertyElement = propertyElements.get(propertyName);
if (propertyElement == null) {
// If our property is defined by a getter called getOAuth() then it is called "OAuth"
// because of JavaBeans rules. Therefore we want JavaBeans rules to be used for the setter
// too, so that you can write setOAuth(x). Meanwhile if the property is defined by a getter
// called oAuth() then it is called "oAuth", but you would still expect to be able to set it
// using setOAuth(x). Hence the second try using a decapitalize method without the quirky
// two-leading-capitals rule.
propertyName = PropertyNames.decapitalizeNormally(methodName.substring(3));
propertyElement = propertyElements.get(propertyName);
}
} else {
// We might also have an unprefixed setter, so the getter is called OAuth() or getOAuth() and
// the setter is called oAuth(x), where again JavaBeans rules imply that it should be called
// OAuth(x). Iterating over the properties here is a bit clunky but this case should be
// unusual.
propertyNameToSetters = propertyNameToUnprefixedSetters;
for (Map.Entry<String, E> entry : propertyElements.entrySet()) {
if (methodName.equals(PropertyNames.decapitalizeNormally(entry.getKey()))) {
propertyName = entry.getKey();
propertyElement = entry.getValue();
break;
}
}
}
if (propertyElement == null || propertyNameToSetters == null) {
// The second disjunct isn't needed but convinces control-flow checkers that
// propertyNameToSetters can't be null when we call put on it below.
errorReporter.reportError(
method,
"[%sBuilderWhatProp] Method %s does not correspond to %s",
autoWhat(),
methodName,
getterMustMatch());
checkForFailedJavaBean(method);
return;
}
Optional<Copier> function = getSetterFunction(propertyElement, method);
if (function.isPresent()) {
MethodSignature methodSignature = MethodSignature.asMemberOf(typeUtils, builderType, method);
TypeMirror returnType = methodSignature.returnType().getType();
if (typeUtils.isSubtype(builderType.asType(), returnType)
&& !MoreTypes.isTypeOf(Object.class, returnType)) {
if (nullableAnnotationFor(method, returnType).isPresent()) {
errorReporter.
reportWarning(
method,
"[%sBuilderSetterNullable] Setter methods always return the Builder so @Nullable"
+ " is not appropriate",
autoWhat());
}
// We allow the return type to be a supertype (other than Object), to support step builders.
AnnotatedTypeMirror parameterType =
Iterables.getOnlyElement(methodSignature.parameterTypes());
propertyNameToSetters.put(
propertyName, new PropertySetter(method, parameterType, function.get()));
} else {
errorReporter.reportError(
method,
"[%sBuilderRet] Setter methods must return %s or a supertype",
autoWhat(),
builderType.asType());
}
}
}
/**
* Classifies a method given that it has one argument and is a property builder with a parameter,
* like {@code ImmutableSortedSet.Builder<String> foosBuilder(Comparator<String>)}.
*
* @param method A method to classify
* @return true if method has been classified successfully
*/
private boolean classifyPropertyBuilderOneArg(ExecutableElement method) {
String methodName = method.getSimpleName().toString();
if (!methodName.endsWith("Builder")) {
return false;
}
String property = methodName.substring(0, methodName.length() - "Builder".length());
if (!rewrittenPropertyTypes.containsKey(property)) {
return false;
}
PropertyBuilderClassifier propertyBuilderClassifier =
new PropertyBuilderClassifier(
errorReporter,
typeUtils,
elementUtils,
this,
this::propertyIsNullable,
rewrittenPropertyTypes,
nullables);
Optional<PropertyBuilder> maybePropertyBuilder =
propertyBuilderClassifier.makePropertyBuilder(method, property);
maybePropertyBuilder.ifPresent(
propertyBuilder -> propertyNameToPropertyBuilder.put(property, propertyBuilder));
return maybePropertyBuilder.isPresent();
}
/**
* Returns an {@code Optional} describing how to convert a value from the setter's parameter type
* to the getter's return type, or {@code Optional.empty()} if the conversion isn't possible. An
* error will have been reported in the latter case. We can convert if they are already the same
* type, when the returned function will be the identity; or if the setter type can be copied
* using a method like {@code ImmutableList.copyOf} or {@code Optional.of}, when the returned
* function will be something like {@code s -> "Optional.of(" + s + ")"}.
*/
private Optional<Copier> getSetterFunction(E propertyElement, ExecutableElement setter) {
VariableElement parameterElement = Iterables.getOnlyElement(setter.getParameters());
boolean nullableParameter =
nullableAnnotationFor(parameterElement, parameterElement.asType()).isPresent();
String property = propertyElements().inverse().get(propertyElement);
TypeMirror targetType = rewrittenPropertyTypes.get(property).getType();
TypeMirror parameterType =
MethodSignature.asMemberOf(typeUtils, builderType, setter)
.parameterTypes()
.get(0)
.getType();
// Two types are assignable to each other if they are the same type, or if one is primitive and
// the other is the corresponding boxed type. There might be other cases where this is true, but
// we're likely to want to accept those too.
if (typeUtils.isAssignable(parameterType, targetType)
&& typeUtils.isAssignable(targetType, parameterType)) {
if (nullableParameter) {
boolean nullableProperty =
nullableAnnotationFor(propertyElement, originalPropertyType(propertyElement))
.isPresent();
if (!nullableProperty) {
errorReporter.reportError(
setter,
"[%sNullNotNull] Parameter of setter method is @Nullable but %s is not",
autoWhat(),
propertyString(propertyElement));
return Optional.empty();
}
}
if (!parameterElement.asType().getKind().isPrimitive()
&& originalPropertyType(propertyElement).getKind().isPrimitive()) {
errorReporter
.reportWarning(
setter,
"[%sUnnecessaryBoxing] %s is primitive but parameter of setter method is not",
autoWhat(),
propertyString(propertyElement));
}
return Optional.of(Copier.IDENTITY);
}
// Parameter type is not equal to property type, but might be convertible with copyOf.
ImmutableList<ExecutableElement> copyOfMethods = copyOfMethods(targetType, nullableParameter);
if (!copyOfMethods.isEmpty()) {
return getConvertingSetterFunction(copyOfMethods, propertyElement, setter, parameterType);
}
errorReporter.reportError(
setter,
"[%sGetVsSet] Parameter type %s of setter method should be %s to match %s",
autoWhat(),
parameterType,
targetType,
propertyString(propertyElement));
return Optional.empty();
}
/**
* Returns an {@code Optional} describing how to convert a value from the setter's parameter type
* to the getter's return type using one of the given methods, or {@code Optional.empty()} if the
* conversion isn't possible. An error will have been reported in the latter case.
*/
private Optional<Copier> getConvertingSetterFunction(
ImmutableList<ExecutableElement> copyOfMethods,
E propertyElement,
ExecutableElement setter,
TypeMirror parameterType) {
String property = propertyElements().inverse().get(propertyElement);
DeclaredType targetType = MoreTypes.asDeclared(rewrittenPropertyTypes.get(property).getType());
for (ExecutableElement copyOfMethod : copyOfMethods) {
Optional<Copier> function =
getConvertingSetterFunction(copyOfMethod, targetType, parameterType);
if (function.isPresent()) {
return function;
}
}
String targetTypeSimpleName = targetType.asElement().getSimpleName().toString();
errorReporter.reportError(
setter,
"[%sGetVsSetOrConvert] Parameter type %s of setter method should be %s to match %s, or it"
+ " should be a type that can be passed to %s.%s to produce %s",
autoWhat(),
parameterType,
targetType,
propertyString(propertyElement),
targetTypeSimpleName,
copyOfMethods.get(0).getSimpleName(),
targetType);
return Optional.empty();
}
/**
* Returns an {@code Optional} containing a function to use {@code copyOfMethod} to copy the
* {@code parameterType} to the {@code targetType}, or {@code Optional.empty()} if the method
* can't be used. For example, we might have a property of type {@code ImmutableSet<T>} and our
* setter has a parameter of type {@code Set<? extends T>}. Can we use {@code ImmutableSet<E>
* ImmutableSet.copyOf(Collection<? extends E>)} to set the property? What about {@code
* ImmutableSet<E> ImmutableSet.copyOf(E[])}?
*
* <p>The example here is deliberately complicated, in that it has a type parameter of its own,
* presumably because the {@code @AutoValue}
|
has
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SshEndpointBuilderFactory.java
|
{
"start": 1562,
"end": 29688
}
|
interface ____
extends
EndpointConsumerBuilder {
default AdvancedSshEndpointConsumerBuilder advanced() {
return (AdvancedSshEndpointConsumerBuilder) this;
}
/**
* Specifies whether a connection to an unknown host should fail or not.
* This value is only checked when the property knownHosts is set.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param failOnUnknownHost the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder failOnUnknownHost(boolean failOnUnknownHost) {
doSetProperty("failOnUnknownHost", failOnUnknownHost);
return this;
}
/**
* Specifies whether a connection to an unknown host should fail or not.
* This value is only checked when the property knownHosts is set.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param failOnUnknownHost the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder failOnUnknownHost(String failOnUnknownHost) {
doSetProperty("failOnUnknownHost", failOnUnknownHost);
return this;
}
/**
* Sets the resource path for a known_hosts file.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param knownHostsResource the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder knownHostsResource(String knownHostsResource) {
doSetProperty("knownHostsResource", knownHostsResource);
return this;
}
/**
* Sets the timeout in milliseconds to wait in establishing the remote
* SSH server connection. Defaults to 30000 milliseconds.
*
* The option is a: <code>long</code> type.
*
* Default: 30000
* Group: common
*
* @param timeout the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder timeout(long timeout) {
doSetProperty("timeout", timeout);
return this;
}
/**
* Sets the timeout in milliseconds to wait in establishing the remote
* SSH server connection. Defaults to 30000 milliseconds.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 30000
* Group: common
*
* @param timeout the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder timeout(String timeout) {
doSetProperty("timeout", timeout);
return this;
}
/**
* Sets the command string to send to the remote SSH server during every
* poll cycle. Only works with camel-ssh component being used as a
* consumer, i.e. from(ssh://...) You may need to end your command with
* a newline, and that must be URL encoded %0A.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param pollCommand the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder pollCommand(String pollCommand) {
doSetProperty("pollCommand", pollCommand);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param sendEmptyMessageWhenIdle the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder sendEmptyMessageWhenIdle(boolean sendEmptyMessageWhenIdle) {
doSetProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param sendEmptyMessageWhenIdle the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder sendEmptyMessageWhenIdle(String sendEmptyMessageWhenIdle) {
doSetProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffErrorThreshold the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder backoffErrorThreshold(int backoffErrorThreshold) {
doSetProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffErrorThreshold the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder backoffErrorThreshold(String backoffErrorThreshold) {
doSetProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffIdleThreshold the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder backoffIdleThreshold(int backoffIdleThreshold) {
doSetProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffIdleThreshold the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder backoffIdleThreshold(String backoffIdleThreshold) {
doSetProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffMultiplier the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder backoffMultiplier(int backoffMultiplier) {
doSetProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffMultiplier the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder backoffMultiplier(String backoffMultiplier) {
doSetProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* Milliseconds before the next poll.
*
* The option is a: <code>long</code> type.
*
* Default: 500
* Group: scheduler
*
* @param delay the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder delay(long delay) {
doSetProperty("delay", delay);
return this;
}
/**
* Milliseconds before the next poll.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 500
* Group: scheduler
*
* @param delay the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder delay(String delay) {
doSetProperty("delay", delay);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: scheduler
*
* @param greedy the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder greedy(boolean greedy) {
doSetProperty("greedy", greedy);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: scheduler
*
* @param greedy the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder greedy(String greedy) {
doSetProperty("greedy", greedy);
return this;
}
/**
* Milliseconds before the first poll starts.
*
* The option is a: <code>long</code> type.
*
* Default: 1000
* Group: scheduler
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder initialDelay(long initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Milliseconds before the first poll starts.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 1000
* Group: scheduler
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder initialDelay(String initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Specifies a maximum limit of number of fires. So if you set it to 1,
* the scheduler will only fire once. If you set it to 5, it will only
* fire five times. A value of zero or negative means fire forever.
*
* The option is a: <code>long</code> type.
*
* Default: 0
* Group: scheduler
*
* @param repeatCount the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder repeatCount(long repeatCount) {
doSetProperty("repeatCount", repeatCount);
return this;
}
/**
* Specifies a maximum limit of number of fires. So if you set it to 1,
* the scheduler will only fire once. If you set it to 5, it will only
* fire five times. A value of zero or negative means fire forever.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 0
* Group: scheduler
*
* @param repeatCount the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder repeatCount(String repeatCount) {
doSetProperty("repeatCount", repeatCount);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
*
* The option is a: <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: TRACE
* Group: scheduler
*
* @param runLoggingLevel the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder runLoggingLevel(org.apache.camel.LoggingLevel runLoggingLevel) {
doSetProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
*
* The option will be converted to a
* <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: TRACE
* Group: scheduler
*
* @param runLoggingLevel the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder runLoggingLevel(String runLoggingLevel) {
doSetProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
*
* The option is a:
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
*
* Group: scheduler
*
* @param scheduledExecutorService the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder scheduledExecutorService(ScheduledExecutorService scheduledExecutorService) {
doSetProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
*
* The option will be converted to a
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
*
* Group: scheduler
*
* @param scheduledExecutorService the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder scheduledExecutorService(String scheduledExecutorService) {
doSetProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz
* component. Use value spring or quartz for built in scheduler.
*
* The option is a: <code>java.lang.Object</code> type.
*
* Default: none
* Group: scheduler
*
* @param scheduler the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder scheduler(Object scheduler) {
doSetProperty("scheduler", scheduler);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz
* component. Use value spring or quartz for built in scheduler.
*
* The option will be converted to a <code>java.lang.Object</code> type.
*
* Default: none
* Group: scheduler
*
* @param scheduler the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder scheduler(String scheduler) {
doSetProperty("scheduler", scheduler);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz, Spring based scheduler. This is a multi-value
* option with prefix: scheduler.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* schedulerProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: scheduler
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default SshEndpointConsumerBuilder schedulerProperties(String key, Object value) {
doSetMultiValueProperty("schedulerProperties", "scheduler." + key, value);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz, Spring based scheduler. This is a multi-value
* option with prefix: scheduler.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* schedulerProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: scheduler
*
* @param values the values
* @return the dsl builder
*/
default SshEndpointConsumerBuilder schedulerProperties(Map values) {
doSetMultiValueProperties("schedulerProperties", "scheduler.", values);
return this;
}
/**
* Whether the scheduler should be auto started.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param startScheduler the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder startScheduler(boolean startScheduler) {
doSetProperty("startScheduler", startScheduler);
return this;
}
/**
* Whether the scheduler should be auto started.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param startScheduler the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder startScheduler(String startScheduler) {
doSetProperty("startScheduler", startScheduler);
return this;
}
/**
* Time unit for initialDelay and delay options.
*
* The option is a: <code>java.util.concurrent.TimeUnit</code> type.
*
* Default: MILLISECONDS
* Group: scheduler
*
* @param timeUnit the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder timeUnit(TimeUnit timeUnit) {
doSetProperty("timeUnit", timeUnit);
return this;
}
/**
* Time unit for initialDelay and delay options.
*
* The option will be converted to a
* <code>java.util.concurrent.TimeUnit</code> type.
*
* Default: MILLISECONDS
* Group: scheduler
*
* @param timeUnit the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder timeUnit(String timeUnit) {
doSetProperty("timeUnit", timeUnit);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param useFixedDelay the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder useFixedDelay(boolean useFixedDelay) {
doSetProperty("useFixedDelay", useFixedDelay);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param useFixedDelay the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder useFixedDelay(String useFixedDelay) {
doSetProperty("useFixedDelay", useFixedDelay);
return this;
}
/**
* Sets the resource path of the certificate to use for Authentication.
* Will use ResourceHelperKeyPairProvider to resolve file based
* certificate, and depends on keyType setting.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param certResource the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder certResource(String certResource) {
doSetProperty("certResource", certResource);
return this;
}
/**
* Sets the password to use in loading certResource, if certResource is
* an encrypted key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param certResourcePassword the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder certResourcePassword(String certResourcePassword) {
doSetProperty("certResourcePassword", certResourcePassword);
return this;
}
/**
* Comma-separated list of allowed/supported ciphers in their order of
* preference.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param ciphers the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder ciphers(String ciphers) {
doSetProperty("ciphers", ciphers);
return this;
}
/**
* Comma-separated list of allowed/supported key exchange algorithms in
* their order of preference.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param kex the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder kex(String kex) {
doSetProperty("kex", kex);
return this;
}
/**
* Sets the KeyPairProvider reference to use when connecting using
* Certificates to the remote SSH Server.
*
* The option is a:
* <code>org.apache.sshd.common.keyprovider.KeyPairProvider</code> type.
*
* Group: security
*
* @param keyPairProvider the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder keyPairProvider(org.apache.sshd.common.keyprovider.KeyPairProvider keyPairProvider) {
doSetProperty("keyPairProvider", keyPairProvider);
return this;
}
/**
* Sets the KeyPairProvider reference to use when connecting using
* Certificates to the remote SSH Server.
*
* The option will be converted to a
* <code>org.apache.sshd.common.keyprovider.KeyPairProvider</code> type.
*
* Group: security
*
* @param keyPairProvider the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder keyPairProvider(String keyPairProvider) {
doSetProperty("keyPairProvider", keyPairProvider);
return this;
}
/**
* Sets the key type to pass to the KeyPairProvider as part of
* authentication. KeyPairProvider.loadKey(...) will be passed this
* value. From Camel 3.0.0 / 2.25.0, by default Camel will select the
* first available KeyPair that is loaded. Prior to this, a KeyType of
* 'ssh-rsa' was enforced by default.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param keyType the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder keyType(String keyType) {
doSetProperty("keyType", keyType);
return this;
}
/**
* Comma-separated list of allowed/supported message authentication code
* algorithms in their order of preference. The MAC algorithm is used
* for data integrity protection.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param macs the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder macs(String macs) {
doSetProperty("macs", macs);
return this;
}
/**
* Sets the password to use in connecting to remote SSH server. Requires
* keyPairProvider to be set to null.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param password the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder password(String password) {
doSetProperty("password", password);
return this;
}
/**
* Comma-separated list of allowed/supported signature algorithms in
* their order of preference.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param signatures the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder signatures(String signatures) {
doSetProperty("signatures", signatures);
return this;
}
/**
* Sets the username to use in logging into the remote SSH server.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param username the value to set
* @return the dsl builder
*/
default SshEndpointConsumerBuilder username(String username) {
doSetProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the SSH component.
*/
public
|
SshEndpointConsumerBuilder
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/converters/SqlShowCreateModelConverter.java
|
{
"start": 1398,
"end": 2380
}
|
class ____ implements SqlNodeConverter<SqlShowCreateModel> {
@Override
public Operation convertSqlNode(SqlShowCreateModel showCreateModel, ConvertContext context) {
UnresolvedIdentifier unresolvedIdentifier =
UnresolvedIdentifier.of(showCreateModel.getFullModelName());
ObjectIdentifier identifier =
context.getCatalogManager().qualifyIdentifier(unresolvedIdentifier);
Optional<ContextResolvedModel> model = context.getCatalogManager().getModel(identifier);
if (model.isEmpty()) {
throw new ValidationException(
String.format(
"Could not execute SHOW CREATE MODEL. Model with identifier %s does not exist.",
identifier.asSerializableString()));
}
return new ShowCreateModelOperation(
identifier, model.get().getResolvedModel(), model.get().isTemporary());
}
}
|
SqlShowCreateModelConverter
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java
|
{
"start": 142965,
"end": 145201
}
|
class ____ extends AbstractQueryBuilder<TestRewriteCounterQueryBuilder> {
final int asyncRewriteCount;
final Supplier<Boolean> fetched;
TestRewriteCounterQueryBuilder() {
asyncRewriteCount = 0;
fetched = null;
}
private TestRewriteCounterQueryBuilder(int asyncRewriteCount, Supplier<Boolean> fetched) {
this.asyncRewriteCount = asyncRewriteCount;
this.fetched = fetched;
}
@Override
public String getWriteableName() {
return "test_query";
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {}
@Override
protected Query doToQuery(SearchExecutionContext context) throws IOException {
return new MatchAllDocsQuery();
}
@Override
protected boolean doEquals(TestRewriteCounterQueryBuilder other) {
return true;
}
@Override
protected int doHashCode() {
return 42;
}
@Override
protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException {
if (asyncRewriteCount > 0) {
return this;
}
if (fetched != null) {
if (fetched.get() == null) {
return this;
}
assert fetched.get();
return new TestRewriteCounterQueryBuilder(1, null);
}
if (queryRewriteContext.convertToDataRewriteContext() != null) {
SetOnce<Boolean> awaitingFetch = new SetOnce<>();
queryRewriteContext.registerAsyncAction((c, l) -> {
awaitingFetch.set(true);
l.onResponse(null);
});
return new TestRewriteCounterQueryBuilder(0, awaitingFetch::get);
}
return this;
}
}
}
|
TestRewriteCounterQueryBuilder
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/models/xml/internal/FilterProcessing.java
|
{
"start": 677,
"end": 1852
}
|
class ____ {
private static final SqlFragmentAlias[] NO_ALIASES = new SqlFragmentAlias[0];
public static SqlFragmentAlias[] collectSqlFragmentAliases(
List<JaxbFilterImpl.JaxbAliasesImpl> jaxbAliases,
XmlDocumentContext xmlDocumentContext) {
if ( CollectionHelper.isEmpty( jaxbAliases ) ) {
return NO_ALIASES;
}
final SqlFragmentAlias[] result = new SqlFragmentAlias[jaxbAliases.size()];
for ( int i = 0; i < jaxbAliases.size(); i++ ) {
final SqlFragmentAliasAnnotation alias = HibernateAnnotations.SQL_FRAGMENT_ALIAS.createUsage(
xmlDocumentContext.getModelBuildingContext()
);
result[i] = alias;
final JaxbFilterImpl.JaxbAliasesImpl jaxbAlias = jaxbAliases.get( i );
alias.alias( jaxbAlias.getAlias() );
if ( StringHelper.isNotEmpty( jaxbAlias.getTable() ) ) {
alias.table( jaxbAlias.getTable() );
}
if ( StringHelper.isNotEmpty( jaxbAlias.getEntity() ) ) {
final ClassDetails classDetails = xmlDocumentContext.getModelBuildingContext()
.getClassDetailsRegistry()
.resolveClassDetails( jaxbAlias.getEntity() );
alias.entity( classDetails.toJavaClass() );
}
}
return result;
}
}
|
FilterProcessing
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/persister/entity/JoinedSubclassEntityPersister.java
|
{
"start": 5116,
"end": 5290
}
|
class
____ final int[] subclassPropertyTableNumberClosure;
// the closure of all columns used by the entire hierarchy including
// subclasses and superclasses of this
|
private
|
java
|
spring-projects__spring-boot
|
module/spring-boot-micrometer-metrics/src/main/java/org/springframework/boot/micrometer/metrics/ValidationFailureAnalyzer.java
|
{
"start": 1112,
"end": 1753
}
|
class ____ extends AbstractFailureAnalyzer<ValidationException> {
@Override
protected FailureAnalysis analyze(Throwable rootFailure, ValidationException cause) {
StringBuilder description = new StringBuilder(String.format("Invalid Micrometer configuration detected:%n"));
for (Invalid<?> failure : cause.getValidation().failures()) {
description.append(String.format("%n - %s was '%s' but it %s", failure.getProperty(), failure.getValue(),
failure.getMessage()));
}
return new FailureAnalysis(description.toString(),
"Update your application to correct the invalid configuration.", cause);
}
}
|
ValidationFailureAnalyzer
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/MockMemoryResourceCalculatorProcessTree.java
|
{
"start": 964,
"end": 1016
}
|
class ____ obtain resource usage (Memory).
*/
public
|
to
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/AsyncCallback.java
|
{
"start": 850,
"end": 1303
}
|
interface ____ an {@link AsyncProcessor} so that it can notify you when an {@link Exchange} is done.
* <p/>
* For example a {@link AsyncProcessor} should invoke the done method when the {@link Exchange} is ready to be continued
* routed. This allows to implement asynchronous {@link Producer} which can continue routing {@link Exchange} when all
* the data has been gathered. This allows to build non blocking request/reply communication.
*/
public
|
for
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateMap.java
|
{
"start": 1375,
"end": 6722
}
|
interface ____ of StateMap -------------------------------------------------------
/**
* Returns whether this {@link StateMap} is empty.
*
* @return {@code true} if this {@link StateMap} has no elements, {@code false} otherwise.
* @see #size()
*/
public boolean isEmpty() {
return size() == 0;
}
/**
* Returns the total number of entries in this {@link StateMap}.
*
* @return the number of entries in this {@link StateMap}.
*/
public abstract int size();
/**
* Returns the state for the composite of active key and given namespace.
*
* @param key the key. Not null.
* @param namespace the namespace. Not null.
* @return the state of the mapping with the specified key/namespace composite key, or {@code
* null} if no mapping for the specified key is found.
*/
public abstract S get(K key, N namespace);
/**
* Returns whether this map contains the specified key/namespace composite key.
*
* @param key the key in the composite key to search for. Not null.
* @param namespace the namespace in the composite key to search for. Not null.
* @return {@code true} if this map contains the specified key/namespace composite key, {@code
* false} otherwise.
*/
public abstract boolean containsKey(K key, N namespace);
/**
* Maps the specified key/namespace composite key to the specified value. This method should be
* preferred over {@link #putAndGetOld(K, N, S)} (key, Namespace, State) when the caller is not
* interested in the old state.
*
* @param key the key. Not null.
* @param namespace the namespace. Not null.
* @param state the state. Can be null.
*/
public abstract void put(K key, N namespace, S state);
/**
* Maps the composite of active key and given namespace to the specified state. Returns the
* previous state that was registered under the composite key.
*
* @param key the key. Not null.
* @param namespace the namespace. Not null.
* @param state the state. Can be null.
* @return the state of any previous mapping with the specified key or {@code null} if there was
* no such mapping.
*/
public abstract S putAndGetOld(K key, N namespace, S state);
/**
* Removes the mapping for the composite of active key and given namespace. This method should
* be preferred over {@link #removeAndGetOld(K, N)} when the caller is not interested in the old
* state.
*
* @param key the key of the mapping to remove. Not null.
* @param namespace the namespace of the mapping to remove. Not null.
*/
public abstract void remove(K key, N namespace);
/**
* Removes the mapping for the composite of active key and given namespace, returning the state
* that was found under the entry.
*
* @param key the key of the mapping to remove. Not null.
* @param namespace the namespace of the mapping to remove. Not null.
* @return the state of the removed mapping or {@code null} if no mapping for the specified key
* was found.
*/
public abstract S removeAndGetOld(K key, N namespace);
/**
* Applies the given {@link StateTransformationFunction} to the state (1st input argument),
* using the given value as second input argument. The result of {@link
* StateTransformationFunction#apply(Object, Object)} is then stored as the new state. This
* function is basically an optimization for get-update-put pattern.
*
* @param key the key. Not null.
* @param namespace the namespace. Not null.
* @param value the value to use in transforming the state. Can be null.
* @param transformation the transformation function.
* @throws Exception if some exception happens in the transformation function.
*/
public abstract <T> void transform(
K key, N namespace, T value, StateTransformationFunction<S, T> transformation)
throws Exception;
// For queryable state ------------------------------------------------------------------------
public abstract Stream<K> getKeys(N namespace);
public abstract InternalKvState.StateIncrementalVisitor<K, N, S> getStateIncrementalVisitor(
int recommendedMaxNumberOfReturnedRecords);
/**
* Creates a snapshot of this {@link StateMap}, to be written in checkpointing. Users should
* call {@link #releaseSnapshot(StateMapSnapshot)} after using the returned object.
*
* @return a snapshot from this {@link StateMap}, for checkpointing.
*/
@Nonnull
public abstract StateMapSnapshot<K, N, S, ? extends StateMap<K, N, S>> stateSnapshot();
/**
* Releases a snapshot for this {@link StateMap}. This method should be called once a snapshot
* is no more needed.
*
* @param snapshotToRelease the snapshot to release, which was previously created by this state
* map.
*/
public void releaseSnapshot(
StateMapSnapshot<K, N, S, ? extends StateMap<K, N, S>> snapshotToRelease) {}
// For testing --------------------------------------------------------------------------------
@VisibleForTesting
public abstract int sizeOfNamespace(Object namespace);
}
|
methods
|
java
|
apache__dubbo
|
dubbo-rpc/dubbo-rpc-api/src/main/java/org/apache/dubbo/rpc/AsyncContext.java
|
{
"start": 3358,
"end": 3947
}
|
class ____ implements AsyncService {
* public CompletableFuture sayHello(String name) {
* CompletableFuture future = new CompletableFuture();
* final AsyncContext asyncContext = RpcContext.startAsync();
* new Thread(() -> {
* // the right place to use this method
* asyncContext.signalContextSwitch();
* // some operations...
* future.complete();
* // only reset after future.complete()
* asyncContext.resetContext();
* }).start();
* return future;
* }
* }
* </code>
*/
void resetContext();
}
|
AsyncServiceImpl
|
java
|
spring-projects__spring-boot
|
module/spring-boot-http-converter/src/test/java/org/springframework/boot/http/converter/autoconfigure/HttpMessageConvertersAutoConfigurationTests.java
|
{
"start": 19819,
"end": 20060
}
|
class ____ {
@Bean
JsonMapper jsonMapper() {
return new JsonMapper();
}
@Bean
JsonMapper.Builder builder() {
return JsonMapper.builder();
}
}
@Configuration(proxyBeanMethods = false)
static
|
JacksonJsonMapperBuilderConfig
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/ClusterStateAckListener.java
|
{
"start": 1083,
"end": 4565
}
|
interface ____ {
/**
* Called to determine the nodes from which an acknowledgement is expected.
* <p>
* This method will be called multiple times to determine the set of acking nodes, so it is crucial for it to return consistent results:
* Given the same listener instance and the same node parameter, the method implementation should return the same result.
*
* @return {@code true} if and only if this task will wait for an ack from the given node.
*/
boolean mustAck(DiscoveryNode discoveryNode);
/**
* Called once all the selected nodes have acknowledged the cluster state update request. Must be very lightweight execution, since it
* is executed on the cluster service thread.
*/
void onAllNodesAcked();
/**
* Called after all the selected nodes have acknowledged the cluster state update request but at least one of them failed. Must be very
* lightweight execution, since it is executed on the cluster service thread.
*
* @param e exception representing the failure.
*/
void onAckFailure(Exception e);
/**
* Called if the acknowledgement timeout defined by {@link #ackTimeout()} expires while still waiting for an acknowledgement from one
* or more of the selected nodes.
*/
void onAckTimeout();
/**
* @return acknowledgement timeout, i.e. the maximum time interval to wait for a full set of acknowledgements. This time interval is
* measured from the start of the publication (which is after computing the new cluster state and serializing it as a transport
* message). If the cluster state is committed (i.e. a quorum of master-eligible nodes have accepted the new state) and then the
* timeout elapses then the corresponding listener is completed via {@link
* org.elasticsearch.cluster.ClusterStateAckListener#onAckTimeout()}. Although the time interval is measured from the start of
* the publication, it does not have any effect until the cluster state is committed:
* <ul>
* <li>
* If the cluster state update fails before committing then the failure is always reported via {@link
* org.elasticsearch.cluster.ClusterStateAckListener#onAckFailure(Exception)} rather than {@link
* org.elasticsearch.cluster.ClusterStateAckListener#onAckTimeout()}, and this may therefore happen some time after the
* timeout period elapses.
* </li>
* <li>
* If the cluster state update is eventually committed, but takes longer than {@code ackTimeout} to do so, then the
* corresponding listener will be completed via {@link org.elasticsearch.cluster.ClusterStateAckListener#onAckTimeout()}
* when it is committed, and this may therefore happen some time after the timeout period elapses.
* </li>
* </ul>
* <p>
* A timeout of {@link TimeValue#MINUS_ONE} means that the master should wait indefinitely for acknowledgements.
* <p>
* A timeout of {@link TimeValue#ZERO} means that the master will complete this listener (via {@link #onAckTimeout()}) as soon
* as the state is committed, before any nodes have applied the new state.
*/
TimeValue ackTimeout();
}
|
ClusterStateAckListener
|
java
|
spring-projects__spring-security
|
core/src/test/java/org/springframework/security/scheduling/ExplicitSecurityContextSchedulingTaskExecutorTests.java
|
{
"start": 962,
"end": 1372
}
|
class ____
extends AbstractSecurityContextSchedulingTaskExecutorTests {
@BeforeEach
public void setUp() throws Exception {
explicitSecurityContextSetup();
}
@Override
protected DelegatingSecurityContextSchedulingTaskExecutor create() {
return new DelegatingSecurityContextSchedulingTaskExecutor(this.taskExecutorDelegate, this.securityContext);
}
}
|
ExplicitSecurityContextSchedulingTaskExecutorTests
|
java
|
spring-projects__spring-boot
|
module/spring-boot-session-jdbc/src/main/java/org/springframework/boot/session/jdbc/autoconfigure/JdbcSessionDataSourceScriptDatabaseInitializer.java
|
{
"start": 1269,
"end": 1832
}
|
class ____
extends PropertiesBasedDataSourceScriptDatabaseInitializer<JdbcSessionProperties> {
/**
* Create a new {@link JdbcSessionDataSourceScriptDatabaseInitializer} instance.
* @param dataSource the Spring Session JDBC data source
* @param properties the Spring Session JDBC properties
* @see #getSettings
*/
public JdbcSessionDataSourceScriptDatabaseInitializer(DataSource dataSource, JdbcSessionProperties properties) {
super(dataSource, properties, Map.of(DatabaseDriver.MARIADB, "mysql"));
}
}
|
JdbcSessionDataSourceScriptDatabaseInitializer
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableOnErrorComplete.java
|
{
"start": 1072,
"end": 1571
}
|
class ____<T> extends AbstractFlowableWithUpstream<T, T> {
final Predicate<? super Throwable> predicate;
public FlowableOnErrorComplete(Flowable<T> source,
Predicate<? super Throwable> predicate) {
super(source);
this.predicate = predicate;
}
@Override
protected void subscribeActual(Subscriber<? super T> observer) {
source.subscribe(new OnErrorCompleteSubscriber<>(observer, predicate));
}
public static final
|
FlowableOnErrorComplete
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/dev/testing/TestClassResult.java
|
{
"start": 227,
"end": 1759
}
|
class ____ implements TestClassResultInterface {
final String className;
final List<TestResult> passing;
final List<TestResult> failing;
final List<TestResult> skipped;
final long latestRunId;
final long time;
public TestClassResult(
String className,
List<TestResult> passing,
List<TestResult> failing,
List<TestResult> skipped,
long time) {
this.className = className;
this.passing = passing;
this.failing = failing;
this.skipped = skipped;
this.time = time;
long runId = 0;
for (TestResultInterface i : passing) {
runId = Math.max(i.getRunId(), runId);
}
for (TestResultInterface i : failing) {
runId = Math.max(i.getRunId(), runId);
}
latestRunId = runId;
}
@Override
public String getClassName() {
return className;
}
public List<TestResult> getPassing() {
return passing;
}
public List<TestResult> getFailing() {
return failing;
}
public List<TestResult> getSkipped() {
return skipped;
}
public long getLatestRunId() {
return latestRunId;
}
public long getTime() {
return time;
}
@Override
public List<TestResult> getResults() {
List<TestResult> ret = new ArrayList<>();
ret.addAll(passing);
ret.addAll(failing);
ret.addAll(skipped);
return ret;
}
}
|
TestClassResult
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java
|
{
"start": 4518,
"end": 8733
}
|
class ____ implements Writeable, ToXContentObject {
public static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index");
public static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster");
public static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index");
public static final ParseField STATUS_FIELD = new ParseField("status");
public static final ParseField PARAMETERS_FIELD = new ParseField("parameters");
private final String followerIndex;
private final String remoteCluster;
private final String leaderIndex;
private final Status status;
private final FollowParameters parameters;
public FollowerInfo(
String followerIndex,
String remoteCluster,
String leaderIndex,
Status status,
FollowParameters parameters
) {
this.followerIndex = followerIndex;
this.remoteCluster = remoteCluster;
this.leaderIndex = leaderIndex;
this.status = status;
this.parameters = parameters;
}
public String getFollowerIndex() {
return followerIndex;
}
public String getRemoteCluster() {
return remoteCluster;
}
public String getLeaderIndex() {
return leaderIndex;
}
public Status getStatus() {
return status;
}
public FollowParameters getParameters() {
return parameters;
}
FollowerInfo(StreamInput in) throws IOException {
followerIndex = in.readString();
remoteCluster = in.readString();
leaderIndex = in.readString();
status = Status.fromString(in.readString());
parameters = in.readOptionalWriteable(FollowParameters::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(followerIndex);
out.writeString(remoteCluster);
out.writeString(leaderIndex);
out.writeString(status.name);
out.writeOptionalWriteable(parameters);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex);
builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), remoteCluster);
builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex);
builder.field(STATUS_FIELD.getPreferredName(), status.name);
if (parameters != null) {
builder.startObject(PARAMETERS_FIELD.getPreferredName());
{
parameters.toXContentFragment(builder);
}
builder.endObject();
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FollowerInfo that = (FollowerInfo) o;
return Objects.equals(followerIndex, that.followerIndex)
&& Objects.equals(remoteCluster, that.remoteCluster)
&& Objects.equals(leaderIndex, that.leaderIndex)
&& status == that.status
&& Objects.equals(parameters, that.parameters);
}
@Override
public int hashCode() {
return Objects.hash(followerIndex, remoteCluster, leaderIndex, status, parameters);
}
public String toString() {
return Strings.toString(this);
}
}
public
|
FollowerInfo
|
java
|
mybatis__mybatis-3
|
src/main/java/org/apache/ibatis/scripting/xmltags/ForEachSqlNode.java
|
{
"start": 4103,
"end": 5165
}
|
class ____ extends DynamicContext {
private final DynamicContext delegate;
private final String prefix;
private boolean prefixApplied;
public PrefixedContext(DynamicContext delegate, String prefix) {
super(configuration, delegate.getParameterObject(), delegate.getParameterType(), delegate.getParamNameResolver(),
delegate.isParamExists());
this.delegate = delegate;
this.prefix = prefix;
this.prefixApplied = false;
this.bindings.putAll(delegate.getBindings());
}
public boolean isPrefixApplied() {
return prefixApplied;
}
@Override
public void appendSql(String sql) {
if (!prefixApplied && sql != null && sql.trim().length() > 0) {
delegate.appendSql(prefix);
prefixApplied = true;
}
delegate.appendSql(sql);
}
@Override
public String getSql() {
return delegate.getSql();
}
@Override
public List<ParameterMapping> getParameterMappings() {
return delegate.getParameterMappings();
}
}
}
|
PrefixedContext
|
java
|
spring-projects__spring-boot
|
core/spring-boot-testcontainers/src/test/java/org/springframework/boot/testcontainers/service/connection/FieldOriginTests.java
|
{
"start": 1168,
"end": 2048
}
|
class ____ {
@Test
@SuppressWarnings("NullAway") // Test null check
void createWhenFieldIsNullThrowsException() {
assertThatIllegalArgumentException().isThrownBy(() -> new FieldOrigin(null))
.withMessage("'field' must not be null");
}
@Test
void equalsAndHashCode() {
Origin o1 = new FieldOrigin(findField("one"));
Origin o2 = new FieldOrigin(findField("one"));
Origin o3 = new FieldOrigin(findField("two"));
assertThat(o1).isEqualTo(o1).isEqualTo(o2).isNotEqualTo(o3);
assertThat(o1).hasSameHashCodeAs(o2);
}
@Test
void toStringReturnsSensibleString() {
Origin origin = new FieldOrigin(findField("one"));
assertThat(origin).hasToString("FieldOriginTests.Fields.one");
}
private Field findField(String name) {
Field field = ReflectionUtils.findField(Fields.class, name);
assertThat(field).isNotNull();
return field;
}
static
|
FieldOriginTests
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/HazelcastMapEndpointBuilderFactory.java
|
{
"start": 11605,
"end": 15755
}
|
interface ____
extends
EndpointProducerBuilder {
default AdvancedHazelcastMapEndpointProducerBuilder advanced() {
return (AdvancedHazelcastMapEndpointProducerBuilder) this;
}
/**
* To specify a default operation to use, if no operation header has
* been provided.
*
* The option is a:
* <code>org.apache.camel.component.hazelcast.HazelcastOperation</code>
* type.
*
* Group: common
*
* @param defaultOperation the value to set
* @return the dsl builder
*/
default HazelcastMapEndpointProducerBuilder defaultOperation(org.apache.camel.component.hazelcast.HazelcastOperation defaultOperation) {
doSetProperty("defaultOperation", defaultOperation);
return this;
}
/**
* To specify a default operation to use, if no operation header has
* been provided.
*
* The option will be converted to a
* <code>org.apache.camel.component.hazelcast.HazelcastOperation</code>
* type.
*
* Group: common
*
* @param defaultOperation the value to set
* @return the dsl builder
*/
default HazelcastMapEndpointProducerBuilder defaultOperation(String defaultOperation) {
doSetProperty("defaultOperation", defaultOperation);
return this;
}
/**
* Hazelcast configuration file.
*
* This option can also be loaded from an existing file, by prefixing
* with file: or classpath: followed by the location of the file.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param hazelcastConfigUri the value to set
* @return the dsl builder
*/
default HazelcastMapEndpointProducerBuilder hazelcastConfigUri(String hazelcastConfigUri) {
doSetProperty("hazelcastConfigUri", hazelcastConfigUri);
return this;
}
/**
* The hazelcast instance reference which can be used for hazelcast
* endpoint.
*
* The option is a: <code>com.hazelcast.core.HazelcastInstance</code>
* type.
*
* Group: common
*
* @param hazelcastInstance the value to set
* @return the dsl builder
*/
default HazelcastMapEndpointProducerBuilder hazelcastInstance(com.hazelcast.core.HazelcastInstance hazelcastInstance) {
doSetProperty("hazelcastInstance", hazelcastInstance);
return this;
}
/**
* The hazelcast instance reference which can be used for hazelcast
* endpoint.
*
* The option will be converted to a
* <code>com.hazelcast.core.HazelcastInstance</code> type.
*
* Group: common
*
* @param hazelcastInstance the value to set
* @return the dsl builder
*/
default HazelcastMapEndpointProducerBuilder hazelcastInstance(String hazelcastInstance) {
doSetProperty("hazelcastInstance", hazelcastInstance);
return this;
}
/**
* The hazelcast instance reference name which can be used for hazelcast
* endpoint. If you don't specify the instance reference, camel use the
* default hazelcast instance from the camel-hazelcast instance.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param hazelcastInstanceName the value to set
* @return the dsl builder
*/
default HazelcastMapEndpointProducerBuilder hazelcastInstanceName(String hazelcastInstanceName) {
doSetProperty("hazelcastInstanceName", hazelcastInstanceName);
return this;
}
}
/**
* Advanced builder for endpoint producers for the Hazelcast Map component.
*/
public
|
HazelcastMapEndpointProducerBuilder
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_1714/Issue1714Mapper.java
|
{
"start": 822,
"end": 1094
}
|
class ____ {
private String seasonNumber;
public String getSeasonNumber() {
return seasonNumber;
}
public void setSeasonNumber(String seasonNumber) {
this.seasonNumber = seasonNumber;
}
}
|
OfferEntity
|
java
|
junit-team__junit5
|
junit-platform-commons/src/main/java/org/junit/platform/commons/util/ReflectionUtils.java
|
{
"start": 27653,
"end": 27696
}
|
class ____ method.
*
* <p>Note that the
|
and
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/internal/tls/SslContextManager.java
|
{
"start": 10586,
"end": 11821
}
|
class ____ {
private final KeyCertOptions keyCertOptions;
private final TrustOptions trustOptions;
private final List<Buffer> crlValues;
public ConfigKey(SSLOptions options) {
this(options.getKeyCertOptions(), trustOptionsOf(options), options.getCrlValues());
}
public ConfigKey(KeyCertOptions keyCertOptions, TrustOptions trustOptions, List<Buffer> crlValues) {
this.keyCertOptions = keyCertOptions;
this.trustOptions = trustOptions;
this.crlValues = crlValues != null ? new ArrayList<>(crlValues) : null;
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj instanceof ConfigKey) {
ConfigKey that = (ConfigKey) obj;
return Objects.equals(keyCertOptions, that.keyCertOptions) && Objects.equals(trustOptions, that.trustOptions) && Objects.equals(crlValues, that.crlValues);
}
return false;
}
@Override
public int hashCode() {
int hashCode = Objects.hashCode(keyCertOptions);
hashCode = 31 * hashCode + Objects.hashCode(trustOptions);
hashCode = 31 * hashCode + Objects.hashCode(crlValues);
return hashCode;
}
}
private final static
|
ConfigKey
|
java
|
apache__camel
|
components/camel-salesforce/camel-salesforce-component/src/test/java/org/apache/camel/component/salesforce/CompositeApiTreeManualIT.java
|
{
"start": 1538,
"end": 4327
}
|
class ____ extends AbstractSalesforceTestBase {
@Parameter
private String format;
@Test
public void shouldSubmitTreeUsingCompositeApi() {
final Account simpleAccount = new Account();
final Contact smith = new Contact();
final Contact evans = new Contact();
final Account simpleAccount2 = new Account();
simpleAccount.setName("SampleAccount");
simpleAccount.setPhone("1234567890");
simpleAccount.setWebsite("www.salesforce.com");
simpleAccount.setNumberOfEmployees(100);
simpleAccount.setIndustry(Account_IndustryEnum.BANKING);
smith.setLastName("Smith");
smith.setTitle("President");
smith.setEmail("sample@salesforce.com");
evans.setLastName("Evans");
evans.setTitle("Vice President");
evans.setEmail("sample@salesforce.com");
simpleAccount2.setName("SampleAccount2");
simpleAccount2.setPhone("1234567890");
simpleAccount2.setWebsite("www.salesforce2.com");
simpleAccount2.setNumberOfEmployees(100);
simpleAccount2.setIndustry(Account_IndustryEnum.BANKING);
final SObjectTree tree = new SObjectTree();
tree.addObject(simpleAccount).addChildren("Contacts", smith, evans);
tree.addObject(simpleAccount2);
final Account simpleAccount3 = new Account();
simpleAccount3.setName("SimpleAccount3");
final Contact contact = new Contact();
contact.setFirstName("Simple");
contact.setLastName("Contact");
final Asset asset = new Asset();
asset.setName("Asset Name");
asset.setDescription("Simple asset");
tree.addObject(simpleAccount3).addChild("Contacts", contact).addChild("Assets", asset);
final SObjectTree response
= template.requestBody("salesforce:composite-tree?format=" + format, tree, SObjectTree.class);
assertNotNull(response, "Response should be provided");
assertNotNull(simpleAccount.getId(), "First account should have Id set");
assertNotNull(smith.getId(), "President of the first account should have Id set");
assertNotNull(evans.getId(), "Vice president of the first account should have Id set");
assertNotNull(simpleAccount2.getId(), "Second account should have Id set");
assertNotNull(simpleAccount3.getId(), "Third account should have Id set");
assertNotNull(contact.getId(), "Simple contact on third account should have Id set");
assertNotNull(asset.getId(), "Simple asset on the contact of the third account should have Id set");
}
@Parameters(name = "format = {0}")
public static Iterable<String> formats() {
return Arrays.asList("JSON", "XML");
}
}
|
CompositeApiTreeManualIT
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/NestedStructEmbeddableTest.java
|
{
"start": 23674,
"end": 26665
}
|
class ____ purpose to verify Dialect#resolveSqlTypeDescriptor works
: Object.class
)
.getResultList();
assertEquals( 1, resultList.size() );
assertInstanceOf( TheStruct.class, resultList.get( 0 ) );
TheStruct theStruct = (TheStruct) resultList.get( 0 );
assertEquals( "XYZ", theStruct.stringField );
assertEquals( 10, theStruct.simpleEmbeddable.integerField );
assertEquals( "String \"<abc>A&B</abc>\"", theStruct.simpleEmbeddable.doubleNested.theNested.theLeaf.stringField );
assertStructEquals( EmbeddableAggregate.createAggregate1(), theStruct.nested );
}
);
}
@Test
public void testFunction(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
ProcedureCall structFunction = session.createStoredProcedureCall( "structFunction" )
.markAsFunctionCall( TheStruct.class );
//noinspection unchecked
final List<Object> resultList = structFunction.getResultList();
assertEquals( 1, resultList.size() );
assertInstanceOf( TheStruct.class, resultList.get( 0 ) );
TheStruct result = (TheStruct) resultList.get( 0 );
EmbeddableAggregate struct = EmbeddableAggregate.createAggregate3();
assertStructEquals( struct, result.nested );
}
);
}
@Test
@SkipForDialect(dialectClass = PostgreSQLDialect.class, majorVersion = 10, reason = "Procedures were only introduced in version 11")
@SkipForDialect(dialectClass = PostgresPlusDialect.class, majorVersion = 10, reason = "Procedures were only introduced in version 11")
@SkipForDialect(dialectClass = DB2Dialect.class, reason = "DB2 does not support struct types in procedures")
public void testProcedure(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
final Dialect dialect = session.getJdbcServices().getDialect();
final ParameterMode parameterMode;
if ( dialect instanceof PostgreSQLDialect ) {
parameterMode = ParameterMode.INOUT;
}
else {
parameterMode = ParameterMode.OUT;
}
ProcedureCall structFunction = session.createStoredProcedureCall( "structProcedure" );
ProcedureParameter<TheStruct> resultParameter = structFunction.registerParameter(
"result",
TheStruct.class,
parameterMode
);
structFunction.setParameter( resultParameter, null );
TheStruct result = structFunction.getOutputs().getOutputParameterValue( resultParameter );
EmbeddableAggregate struct = EmbeddableAggregate.createAggregate3();
assertStructEquals( struct, result.nested );
}
);
}
private static void assertStructEquals(EmbeddableAggregate struct, EmbeddableAggregate struct2) {
assertArrayEquals( struct.getTheBinary(), struct2.getTheBinary() );
assertEquals( struct.getTheString(), struct2.getTheString() );
assertEquals( struct.getTheLocalDateTime(), struct2.getTheLocalDateTime() );
assertEquals( struct.getTheUuid(), struct2.getTheUuid() );
}
@Entity(name = "StructHolder")
public static
|
on
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/common/logging/JULBridge.java
|
{
"start": 967,
"end": 3352
}
|
class ____ extends Handler {
private static final Map<java.util.logging.Level, Level> levelMap = Map.of(
java.util.logging.Level.OFF,
Level.OFF,
java.util.logging.Level.SEVERE,
Level.ERROR,
java.util.logging.Level.WARNING,
Level.WARN,
java.util.logging.Level.INFO,
Level.INFO,
java.util.logging.Level.FINE,
Level.DEBUG,
java.util.logging.Level.FINEST,
Level.TRACE,
java.util.logging.Level.ALL,
Level.ALL
);
private static final NavigableMap<Integer, Level> sortedLevelMap = levelMap.entrySet()
.stream()
.collect(Maps.toUnmodifiableSortedMap(e -> e.getKey().intValue(), Map.Entry::getValue));
public static void install() {
var rootJulLogger = java.util.logging.LogManager.getLogManager().getLogger("");
// clear out any other handlers, so eg we don't also print to stdout
for (var existingHandler : rootJulLogger.getHandlers()) {
rootJulLogger.removeHandler(existingHandler);
}
rootJulLogger.addHandler(new JULBridge());
}
private JULBridge() {}
@Override
public void publish(LogRecord record) {
Logger logger = LogManager.getLogger(record.getLoggerName());
Level level = translateJulLevel(record.getLevel());
Throwable thrown = record.getThrown();
String rawMessage = record.getMessage();
final String message;
if (rawMessage == null) {
message = "<null message>";
} else {
message = new MessageFormat(rawMessage, Locale.ROOT).format(record.getParameters());
}
if (thrown == null) {
logger.log(level, message);
} else {
logger.log(level, () -> message, thrown);
}
}
private static Level translateJulLevel(java.util.logging.Level julLevel) {
Level esLevel = levelMap.get(julLevel);
if (esLevel != null) {
return esLevel;
}
// no matching known level, so find the closest level by int value
var closestEntry = sortedLevelMap.lowerEntry(julLevel.intValue());
assert closestEntry != null; // not possible since ALL is min int
return closestEntry.getValue();
}
@Override
public void flush() {}
@Override
public void close() {}
}
|
JULBridge
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/misc/CaseInsensitiveDeserTest.java
|
{
"start": 2700,
"end": 2843
}
|
class ____ {
public CaseInsensitiveRole role;
}
// [databind#1886]: ... but also overrides
static
|
CaseInsensitiveRoleContainer
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/operator/comparison/NullEquals.java
|
{
"start": 757,
"end": 1686
}
|
class ____ extends BinaryComparison {
public NullEquals(Source source, Expression left, Expression right, ZoneId zoneId) {
super(source, left, right, BinaryComparisonOperation.NULLEQ, zoneId);
}
@Override
protected NodeInfo<NullEquals> info() {
return NodeInfo.create(this, NullEquals::new, left(), right(), zoneId());
}
@Override
protected NullEquals replaceChildren(Expression newLeft, Expression newRight) {
return new NullEquals(source(), newLeft, newRight, zoneId());
}
@Override
public NullEquals swapLeftAndRight() {
return new NullEquals(source(), right(), left(), zoneId());
}
@Override
public Nullability nullable() {
return Nullability.FALSE;
}
@Override
public BinaryComparison reverse() {
return this;
}
@Override
protected boolean isCommutative() {
return true;
}
}
|
NullEquals
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java
|
{
"start": 1964,
"end": 18339
}
|
class ____ extends ESIntegTestCase {
private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, boolean abort) throws Exception {
final MockTerminal terminal = MockTerminal.create();
final OptionSet options = command.getParser().parse();
final ProcessInfo processInfo = new ProcessInfo(Map.of(), Map.of(), createTempDir());
final String input;
if (abort) {
input = randomValueOtherThanMany(c -> c.equalsIgnoreCase("y"), () -> randomAlphaOfLength(1));
} else {
input = randomBoolean() ? "y" : "Y";
}
terminal.addTextInput(input);
try {
command.execute(terminal, options, environment, processInfo);
} finally {
assertThat(terminal.getOutput(), containsString(ElasticsearchNodeCommand.STOP_WARNING_MSG));
}
return terminal;
}
private MockTerminal unsafeBootstrap(Environment environment, boolean abort) throws Exception {
final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, abort);
assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG));
assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG));
return terminal;
}
private MockTerminal detachCluster(Environment environment, boolean abort) throws Exception {
final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, abort);
assertThat(terminal.getOutput(), containsString(DetachClusterCommand.CONFIRMATION_MSG));
assertThat(terminal.getOutput(), containsString(DetachClusterCommand.NODE_DETACHED_MSG));
return terminal;
}
private MockTerminal unsafeBootstrap(Environment environment) throws Exception {
return unsafeBootstrap(environment, false);
}
private MockTerminal detachCluster(Environment environment) throws Exception {
return detachCluster(environment, false);
}
private void expectThrows(ThrowingRunnable runnable, String message) {
ElasticsearchException ex = expectThrows(ElasticsearchException.class, runnable);
assertThat(ex.getMessage(), containsString(message));
}
public void testBootstrapNotMasterEligible() {
final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(nonMasterNode(internalCluster().getDefaultSettings())).build()
);
expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NOT_MASTER_NODE_MSG);
}
public void testBootstrapNoDataFolder() {
final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings());
expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_NODE_FOLDER_FOUND_MSG);
}
public void testDetachNoDataFolder() {
final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings());
expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_NODE_FOLDER_FOUND_MSG);
}
public void testBootstrapNodeLocked() throws IOException {
Settings envSettings = buildEnvSettings(Settings.EMPTY);
Environment environment = TestEnvironment.newEnvironment(envSettings);
try (NodeEnvironment ignored = new NodeEnvironment(envSettings, environment)) {
expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG);
}
}
public void testDetachNodeLocked() throws IOException {
Settings envSettings = buildEnvSettings(Settings.EMPTY);
Environment environment = TestEnvironment.newEnvironment(envSettings);
try (NodeEnvironment ignored = new NodeEnvironment(envSettings, environment)) {
expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG);
}
}
public void testBootstrapNoNodeMetadata() {
Settings envSettings = buildEnvSettings(Settings.EMPTY);
Environment environment = TestEnvironment.newEnvironment(envSettings);
expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_NODE_METADATA_FOUND_MSG);
}
public void testBootstrapNotBootstrappedCluster() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(InternalTestCluster.BOOTSTRAP_MASTER_NODE_INDEX_DONE); // explicitly skip bootstrap
String node = internalCluster().startNode(
Settings.builder()
.put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup
.build()
);
awaitClusterState(node, state -> state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID));
Settings dataPathSettings = internalCluster().dataPathSettings(node);
internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()
);
expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG);
}
public void testBootstrapNoClusterState() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0);
String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1);
NodeEnvironment nodeEnvironment = internalCluster().getAnyMasterNodeInstance(NodeEnvironment.class);
internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()
);
PersistedClusterStateService.deleteAll(nodeEnvironment.nodeDataPaths());
expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_NODE_METADATA_FOUND_MSG);
}
public void testDetachNoClusterState() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0);
String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1);
NodeEnvironment nodeEnvironment = internalCluster().getAnyMasterNodeInstance(NodeEnvironment.class);
internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()
);
PersistedClusterStateService.deleteAll(nodeEnvironment.nodeDataPaths());
expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_NODE_METADATA_FOUND_MSG);
}
public void testBootstrapAbortedByUser() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0);
String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1);
internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()
);
expectThrows(() -> unsafeBootstrap(environment, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG);
}
public void testDetachAbortedByUser() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0);
String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1);
internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()
);
expectThrows(() -> detachCluster(environment, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG);
}
public void test3MasterNodes2Failed() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
List<String> masterNodes = new ArrayList<>();
logger.info("--> start 1st master-eligible node");
masterNodes.add(
internalCluster().startMasterOnlyNode(Settings.builder().put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build())
); // node ordinal 0
logger.info("--> start one data-only node");
String dataNode = internalCluster().startDataOnlyNode(
Settings.builder().put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build()
); // node ordinal 1
logger.info("--> start 2nd and 3rd master-eligible nodes and bootstrap");
masterNodes.addAll(internalCluster().startMasterOnlyNodes(2)); // node ordinals 2 and 3
logger.info("--> wait for all nodes to join the cluster");
ensureStableCluster(4);
logger.info("--> create index test");
createIndex("test");
ensureGreen("test");
Settings master1DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(0));
Settings master2DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(1));
Settings master3DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(2));
Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode);
logger.info("--> stop 2nd and 3d master eligible node");
internalCluster().stopNode(masterNodes.get(1));
internalCluster().stopNode(masterNodes.get(2));
logger.info("--> ensure NO_MASTER_BLOCK on data-only node");
awaitClusterState(dataNode, state -> state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID));
logger.info("--> try to unsafely bootstrap 1st master-eligible node, while node lock is held");
Environment environmentMaster1 = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(master1DataPathSettings).build()
);
expectThrows(() -> unsafeBootstrap(environmentMaster1), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG);
logger.info("--> stop 1st master-eligible node and data-only node");
NodeEnvironment nodeEnvironment = internalCluster().getAnyMasterNodeInstance(NodeEnvironment.class);
internalCluster().stopNode(masterNodes.get(0));
assertBusy(() -> internalCluster().getInstance(GatewayMetaState.class, dataNode).allPendingAsyncStatesWritten());
internalCluster().stopRandomDataNode();
logger.info("--> unsafely-bootstrap 1st master-eligible node");
MockTerminal terminal = unsafeBootstrap(environmentMaster1);
Metadata metadata = ElasticsearchNodeCommand.createPersistedClusterStateService(Settings.EMPTY, nodeEnvironment.nodeDataPaths())
.loadBestOnDiskState().metadata;
assertThat(
terminal.getOutput(),
containsString(
String.format(
Locale.ROOT,
UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT,
metadata.coordinationMetadata().term(),
metadata.version()
)
)
);
logger.info("--> start 1st master-eligible node");
internalCluster().startMasterOnlyNode(master1DataPathSettings);
logger.info("--> detach-cluster on data-only node");
Environment environmentData = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataNodeDataPathSettings).build()
);
detachCluster(environmentData, false);
logger.info("--> start data-only node");
String dataNode2 = internalCluster().startDataOnlyNode(dataNodeDataPathSettings);
logger.info("--> ensure there is no NO_MASTER_BLOCK and unsafe-bootstrap is reflected in cluster state");
awaitClusterState(
dataNode2,
state -> state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID) == false
&& state.metadata().persistentSettings().getAsBoolean(UnsafeBootstrapMasterCommand.UNSAFE_BOOTSTRAP.getKey(), false)
);
logger.info("--> ensure index test is green");
ensureGreen("test");
IndexMetadata indexMetadata = clusterService().state().metadata().getProject().index("test");
assertThat(indexMetadata.getSettings().get(IndexMetadata.SETTING_HISTORY_UUID), notNullValue());
logger.info("--> detach-cluster on 2nd and 3rd master-eligible nodes");
Environment environmentMaster2 = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(master2DataPathSettings).build()
);
detachCluster(environmentMaster2, false);
Environment environmentMaster3 = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(master3DataPathSettings).build()
);
detachCluster(environmentMaster3, false);
logger.info("--> start 2nd and 3rd master-eligible nodes and ensure 4 nodes stable cluster");
internalCluster().startMasterOnlyNode(master2DataPathSettings);
internalCluster().startMasterOnlyNode(master3DataPathSettings);
ensureStableCluster(4);
}
public void testNoInitialBootstrapAfterDetach() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
String masterNode = internalCluster().startMasterOnlyNode();
Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode);
internalCluster().stopCurrentMasterNode();
final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build()
);
detachCluster(environment);
String node = internalCluster().startMasterOnlyNode(
Settings.builder()
// give the cluster 2 seconds to elect the master (it should not)
.put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "2s")
.put(masterNodeDataPathSettings)
.build()
);
ClusterState state = internalCluster().client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID));
internalCluster().stopNode(node);
}
public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0);
String masterNode = internalCluster().startMasterOnlyNode();
Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode);
updateClusterSettings(Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb"));
ClusterState state = internalCluster().client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
assertThat(state.metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb"));
internalCluster().stopCurrentMasterNode();
final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build()
);
detachCluster(environment);
unsafeBootstrap(environment);
internalCluster().startMasterOnlyNode(masterNodeDataPathSettings);
ensureGreen();
state = internalCluster().client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
assertThat(state.metadata().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb"));
}
}
|
UnsafeBootstrapAndDetachCommandIT
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java
|
{
"start": 8833,
"end": 13113
}
|
class ____ extends ResolvedIndexAbstraction implements Writeable, ToXContentObject {
static final ParseField ALIASES_FIELD = new ParseField("aliases");
static final ParseField ATTRIBUTES_FIELD = new ParseField("attributes");
static final ParseField DATA_STREAM_FIELD = new ParseField("data_stream");
static final ParseField MODE_FIELD = new ParseField("mode");
private final String[] aliases;
private final String[] attributes;
private final String dataStream;
private final IndexMode mode;
ResolvedIndex(StreamInput in) throws IOException {
setName(in.readString());
this.aliases = in.readStringArray();
this.attributes = in.readStringArray();
this.dataStream = in.readOptionalString();
if (in.getTransportVersion().supports(RESOLVE_INDEX_MODE_ADDED)) {
this.mode = IndexMode.readFrom(in);
} else {
this.mode = null;
}
}
ResolvedIndex(String name, String[] aliases, String[] attributes, @Nullable String dataStream, IndexMode mode) {
super(name);
this.aliases = aliases;
this.attributes = attributes;
this.dataStream = dataStream;
this.mode = mode;
}
public ResolvedIndex copy(String newName) {
return new ResolvedIndex(newName, aliases, attributes, dataStream, mode);
}
public String[] getAliases() {
return aliases;
}
public String[] getAttributes() {
return attributes;
}
public String getDataStream() {
return dataStream;
}
public IndexMode getMode() {
return mode;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(getName());
out.writeStringArray(aliases);
out.writeStringArray(attributes);
out.writeOptionalString(dataStream);
if (out.getTransportVersion().supports(RESOLVE_INDEX_MODE_ADDED)) {
IndexMode.writeTo(mode, out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(NAME_FIELD.getPreferredName(), getName());
if (aliases.length > 0) {
builder.array(ALIASES_FIELD.getPreferredName(), aliases);
}
builder.array(ATTRIBUTES_FIELD.getPreferredName(), attributes);
if (Strings.isNullOrEmpty(dataStream) == false) {
builder.field(DATA_STREAM_FIELD.getPreferredName(), dataStream);
}
if (mode != null) {
builder.field(MODE_FIELD.getPreferredName(), mode.toString());
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ResolvedIndex index = (ResolvedIndex) o;
return getName().equals(index.getName())
&& Objects.equals(dataStream, index.dataStream)
&& Arrays.equals(aliases, index.aliases)
&& Arrays.equals(attributes, index.attributes)
&& Objects.equals(mode, index.mode);
}
@Override
public int hashCode() {
int result = Objects.hash(getName(), dataStream);
result = 31 * result + Objects.hashCode(mode);
result = 31 * result + Arrays.hashCode(aliases);
result = 31 * result + Arrays.hashCode(attributes);
return result;
}
@Override
public String toString() {
return String.format(
Locale.ROOT,
"ResolvedIndex{name=%s, aliases=%s, attributes=%s, dataStream=%s, mode=%s}",
getName(),
Arrays.toString(aliases),
Arrays.toString(attributes),
dataStream,
mode
);
}
}
public static
|
ResolvedIndex
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java
|
{
"start": 1476,
"end": 1517
}
|
class ____ ParentQueue
*/
public abstract
|
of
|
java
|
google__dagger
|
dagger-compiler/main/java/dagger/internal/codegen/binding/DependencyRequestFormatter.java
|
{
"start": 2586,
"end": 5569
}
|
class ____ extends Formatter<DependencyRequest> {
private final XProcessingEnv processingEnv;
@Inject
DependencyRequestFormatter(XProcessingEnv processingEnv) {
this.processingEnv = processingEnv;
}
public String formatEdges(ImmutableCollection<DependencyEdge> edges, BindingGraph graph) {
return edges.stream()
.map(edge -> formatEdge(edge, graph))
.filter(line -> !line.isEmpty())
.collect(joining("\n"));
}
public String formatEdge(DependencyEdge edge, BindingGraph graph) {
Node sourceNode = graph.network().incidentNodes(edge).source();
XTypeElement sourceComponent = sourceNode.componentPath().currentComponent().xprocessing();
return format(Optional.of(sourceComponent), edge.dependencyRequest());
}
@Override
public String format(DependencyRequest request) {
return format(Optional.empty(), request);
}
private String format(Optional<XTypeElement> optionalComponent, DependencyRequest request) {
if (!request.requestElement().isPresent()) {
return "";
}
XElement requestElement = request.requestElement().get().xprocessing();
String componentReference =
optionalComponent
.map(component -> String.format("[%s] ", component.getQualifiedName()))
.orElse("");
if (isMethod(requestElement)) {
return INDENT
+ request.key()
+ " is "
+ componentMethodRequestVerb(request)
+ " at\n"
+ DOUBLE_INDENT
+ componentReference
+ elementToString(requestElement);
} else if (isVariableElement(requestElement)) {
return INDENT
+ formatQualifier(request.key().qualifier())
+ XTypes.toStableString(
requestType(request.kind(), request.key().type().xprocessing(), processingEnv))
+ " is injected at\n"
+ DOUBLE_INDENT
+ componentReference
+ elementToString(requestElement);
} else if (isTypeElement(requestElement)) {
return ""; // types by themselves provide no useful information.
} else {
throw new IllegalStateException("Invalid request element " + requestElement);
}
}
private static String formatQualifier(Optional<DaggerAnnotation> maybeQualifier) {
return maybeQualifier.map(qualifier -> qualifier + " ").orElse("");
}
/**
* Returns the verb for a component method dependency request. Returns "produced", "provided", or
* "injected", depending on the kind of request.
*/
private static String componentMethodRequestVerb(DependencyRequest request) {
switch (request.kind()) {
case FUTURE:
case PRODUCER:
case INSTANCE:
case LAZY:
case PROVIDER:
case PROVIDER_OF_LAZY:
return "requested";
case MEMBERS_INJECTION:
return "injected";
case PRODUCED:
break;
}
throw new AssertionError("illegal request kind for method: " + request);
}
}
|
DependencyRequestFormatter
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/streams/Pipe.java
|
{
"start": 1939,
"end": 3572
}
|
interface ____<T> {
/**
* Set to {@code true} to call {@link WriteStream#end()} when the source {@code ReadStream} fails, {@code false} otherwise.
*
* @param end {@code true} to end the stream on a source {@code ReadStream} failure
* @return a reference to this, so the API can be used fluently
*/
@Fluent
Pipe<T> endOnFailure(boolean end);
/**
* Set to {@code true} to call {@link WriteStream#end()} when the source {@code ReadStream} succeeds, {@code false} otherwise.
*
* @param end {@code true} to end the stream on a source {@code ReadStream} success
* @return a reference to this, so the API can be used fluently
*/
@Fluent
Pipe<T> endOnSuccess(boolean end);
/**
* Set to {@code true} to call {@link WriteStream#end()} when the source {@code ReadStream} completes, {@code false} otherwise.
* <p>
* Calling this overwrites {@link #endOnFailure} and {@link #endOnSuccess}.
*
* @param end {@code true} to end the stream on a source {@code ReadStream} completion
* @return a reference to this, so the API can be used fluently
*/
@Fluent
Pipe<T> endOnComplete(boolean end);
/**
* Start to pipe the elements to the destination {@code WriteStream}.
* <p>
* When the operation fails with a write error, the source stream is resumed.
*
* @param dst the destination write stream
* @return a future notified when the pipe operation completes
*/
Future<Void> to(WriteStream<T> dst);
/**
* Close the pipe.
* <p>
* The streams handlers will be unset and the read stream resumed unless it is already ended.
*/
void close();
}
|
Pipe
|
java
|
alibaba__nacos
|
api/src/main/java/com/alibaba/nacos/api/config/model/ConfigListenerInfo.java
|
{
"start": 770,
"end": 1410
}
|
class ____ {
public static final String QUERY_TYPE_CONFIG = "config";
public static final String QUERY_TYPE_IP = "ip";
private String queryType;
private Map<String, String> listenersStatus;
public String getQueryType() {
return queryType;
}
public void setQueryType(String queryType) {
this.queryType = queryType;
}
public Map<String, String> getListenersStatus() {
return listenersStatus;
}
public void setListenersStatus(Map<String, String> listenersStatus) {
this.listenersStatus = listenersStatus;
}
}
|
ConfigListenerInfo
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.