language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | grpc__grpc-java | binder/src/androidTest/java/io/grpc/binder/internal/LeakSafeOneWayBinderTest.java | {
"start": 1015,
"end": 1155
} | class ____ {
private LeakSafeOneWayBinder binder;
private final FakeHandler handler = new FakeHandler();
static | LeakSafeOneWayBinderTest |
java | elastic__elasticsearch | modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/TlsHandshakeThrottleManager.java | {
"start": 6930,
"end": 10040
} | class ____ {
// volatile for metrics
private volatile int inProgressHandshakesCount = 0;
// actions to run (or fail) to release a throttled handshake
private final ArrayDeque<AbstractRunnable> delayedHandshakes = new ArrayDeque<>();
// delayedHandshakes.size() but tracked separately for metrics
private volatile int delayedHandshakesCount = 0;
// for metrics
private volatile long totalDelayedHandshakesCount = 0;
private volatile long droppedHandshakesCount = 0;
private AbstractRunnable takeFirstDelayedHandshake() {
final var result = delayedHandshakes.removeFirst();
delayedHandshakesCount = delayedHandshakes.size();
return result;
}
private AbstractRunnable takeLastDelayedHandshake() {
final var result = delayedHandshakes.removeLast();
delayedHandshakesCount = delayedHandshakes.size();
return result;
}
private void addDelayedHandshake(AbstractRunnable abstractRunnable) {
delayedHandshakes.addFirst(abstractRunnable);
// noinspection NonAtomicOperationOnVolatileField all writes are on this thread
totalDelayedHandshakesCount += 1;
delayedHandshakesCount = delayedHandshakes.size();
}
void close() {
while (delayedHandshakes.isEmpty() == false) {
takeFirstDelayedHandshake().onFailure(new NodeClosedException((DiscoveryNode) null));
}
}
ChannelHandler newHandshakeThrottleHandler(SubscribableListener<Void> handshakeCompletePromise) {
return new HandshakeThrottleHandler(handshakeCompletePromise);
}
ChannelHandler newHandshakeCompletionWatcher(SubscribableListener<Void> handshakeCompletePromise) {
return new HandshakeCompletionWatcher(handshakeCompletePromise);
}
void handleHandshakeCompletion() {
if (delayedHandshakes.isEmpty()) {
// noinspection NonAtomicOperationOnVolatileField all writes are on this thread
inProgressHandshakesCount -= 1;
} else {
takeFirstDelayedHandshake().run();
}
}
public int getInProgressHandshakesCount() {
return inProgressHandshakesCount;
}
public int getCurrentDelayedHandshakesCount() {
return delayedHandshakesCount;
}
public long getTotalDelayedHandshakesCount() {
return totalDelayedHandshakesCount;
}
public long getDroppedHandshakesCount() {
return droppedHandshakesCount;
}
/**
* A Netty pipeline handler that aggregates inbound messages until it receives a full TLS {@code ClientHello} and then either
* passes all the received messages on down the pipeline (if not throttled) or else delays that work until another TLS handshake
* completes (if too many such handshakes are already in flight).
*/
private | TlsHandshakeThrottle |
java | apache__camel | test-infra/camel-test-infra-milvus/src/main/java/org/apache/camel/test/infra/milvus/services/MilvusInfraService.java | {
"start": 980,
"end": 1279
} | interface ____ extends InfrastructureService {
String getMilvusEndpointUrl();
@Deprecated
default String getMilvusHost() {
return host();
}
@Deprecated
default int getMilvusPort() {
return port();
}
String host();
int port();
}
| MilvusInfraService |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/health/HealthCheckResultStrategyTest.java | {
"start": 1329,
"end": 2838
} | class ____ extends ContextTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testMyFoo() {
context.getRegistry().bind("myStrategy", new MyResultStrategy());
context.start();
HealthCheck hc = PluginHelper.getHealthCheckResolver(context).resolveHealthCheck("myfoo");
Assertions.assertNotNull(hc);
Assertions.assertEquals("acme", hc.getGroup());
Assertions.assertEquals("myfoo", hc.getId());
HealthCheck.Result r = hc.call();
Assertions.assertEquals(HealthCheck.State.UP, r.getState());
Assertions.assertEquals("I changed this", r.getMessage().get());
}
@Test
public void testAddToRegistry() {
context.getRegistry().bind("myStrategy", new MyResultStrategy());
context.start();
HealthCheck hc = PluginHelper.getHealthCheckResolver(context).resolveHealthCheck("myfoo");
Assertions.assertNotNull(hc);
HealthCheckRegistry hcr = context.getCamelContextExtension().getContextPlugin(HealthCheckRegistry.class);
hcr.register(hc);
Collection<HealthCheck.Result> col = HealthCheckHelper.invoke(context);
Assertions.assertEquals(1, col.size());
HealthCheck.Result r = col.iterator().next();
Assertions.assertEquals(HealthCheck.State.UP, r.getState());
Assertions.assertEquals("I changed this", r.getMessage().get());
}
private static | HealthCheckResultStrategyTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderMultiFieldTests.java | {
"start": 1171,
"end": 7501
} | class ____ extends MapperServiceTestCase {
public void testDefaultField() throws Exception {
MapperService mapperService = createMapperService("""
{ "_doc" : { "properties" : {
"f_text1" : { "type" : "text" },
"f_text2" : { "type" : "text" },
"f_keyword1" : { "type" : "keyword" },
"f_keyword2" : { "type" : "keyword" },
"f_date" : { "type" : "date" }
}}}
""");
ParsedDocument doc = mapperService.documentMapper().parse(source("""
{ "f_text1" : "foo", "f_text2" : "bar", "f_keyword1" : "baz", "f_keyword2" : "buz", "f_date" : "2021-12-20T00:00:00" }
"""));
withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), ir -> {
IndexSearcher searcher = newSearcher(ir);
{
// default value 'index.query.default_field = *' sets leniency to true
SearchExecutionContext context = createSearchExecutionContext(mapperService, searcher);
Query query = new MultiMatchQueryBuilder("hello").toQuery(context);
Query expected = new DisjunctionMaxQuery(
List.of(
new TermQuery(new Term("f_text1", "hello")),
new TermQuery(new Term("f_text2", "hello")),
new TermQuery(new Term("f_keyword1", "hello")),
new TermQuery(new Term("f_keyword2", "hello")),
new MatchNoDocsQuery()
),
0f
);
assertThat(query, equalTo(expected));
}
{
// default field list contains '*' sets leniency to true
Settings settings = Settings.builder().putList("index.query.default_field", "f_text1", "*").build();
SearchExecutionContext context = createSearchExecutionContext(mapperService, searcher, settings);
Query query = new MultiMatchQueryBuilder("hello").toQuery(context);
Query expected = new DisjunctionMaxQuery(
List.of(
new TermQuery(new Term("f_text1", "hello")),
new TermQuery(new Term("f_text2", "hello")),
new TermQuery(new Term("f_keyword1", "hello")),
new TermQuery(new Term("f_keyword2", "hello")),
new MatchNoDocsQuery()
),
0f
);
assertThat(query, equalTo(expected));
}
{
// default field list contains no wildcards, leniency = false
Settings settings = Settings.builder().putList("index.query.default_field", "f_text1", "f_date").build();
SearchExecutionContext context = createSearchExecutionContext(mapperService, searcher, settings);
Exception e = expectThrows(Exception.class, () -> new MultiMatchQueryBuilder("hello").toQuery(context));
assertThat(e.getMessage(), containsString("failed to parse date field [hello]"));
}
{
// default field list contains boost
Settings settings = Settings.builder().putList("index.query.default_field", "f_text1", "f_text2^4").build();
SearchExecutionContext context = createSearchExecutionContext(mapperService, searcher, settings);
Query query = new MultiMatchQueryBuilder("hello").toQuery(context);
Query expected = new DisjunctionMaxQuery(
List.of(new TermQuery(new Term("f_text1", "hello")), new BoostQuery(new TermQuery(new Term("f_text2", "hello")), 4f)),
0f
);
assertThat(query, equalTo(expected));
}
{
// set tiebreaker
SearchExecutionContext context = createSearchExecutionContext(mapperService, searcher);
Query query = new MultiMatchQueryBuilder("hello").tieBreaker(0.5f).toQuery(context);
Query expected = new DisjunctionMaxQuery(
List.of(
new TermQuery(new Term("f_text1", "hello")),
new TermQuery(new Term("f_text2", "hello")),
new TermQuery(new Term("f_keyword1", "hello")),
new TermQuery(new Term("f_keyword2", "hello")),
new MatchNoDocsQuery()
),
0.5f
);
assertThat(query, equalTo(expected));
}
});
}
public void testFieldListIncludesWildcard() throws Exception {
MapperService mapperService = createMapperService("""
{ "_doc" : { "properties" : {
"f_text1" : { "type" : "text" },
"f_text2" : { "type" : "text" },
"f_keyword1" : { "type" : "keyword" },
"f_keyword2" : { "type" : "keyword" },
"f_date" : { "type" : "date" }
}}}
""");
ParsedDocument doc = mapperService.documentMapper().parse(source("""
{ "f_text1" : "foo", "f_text2" : "bar", "f_keyword1" : "baz", "f_keyword2" : "buz", "f_date" : "2021-12-20T00:00:00" }
"""));
withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), ir -> {
SearchExecutionContext context = createSearchExecutionContext(mapperService, newSearcher(ir));
Query expected = new DisjunctionMaxQuery(
List.of(
new TermQuery(new Term("f_text1", "hello")),
new TermQuery(new Term("f_text2", "hello")),
new TermQuery(new Term("f_keyword1", "hello")),
new TermQuery(new Term("f_keyword2", "hello")),
new MatchNoDocsQuery()
),
0f
);
assertEquals(expected, new MultiMatchQueryBuilder("hello").field("*").toQuery(context));
assertEquals(expected, new MultiMatchQueryBuilder("hello").field("f_text1").field("*").toQuery(context));
});
}
}
| MultiMatchQueryBuilderMultiFieldTests |
java | google__dagger | javatests/dagger/functional/cycle/Cycles.java | {
"start": 1201,
"end": 1342
} | class ____ {
public final B b;
public final E e;
@Inject
A(E e, B b) {
this.e = e;
this.b = b;
}
}
static | A |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/identifier/SimpleEntityTest.java | {
"start": 917,
"end": 4152
} | class ____ {
@BeforeEach
public void init(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
Library library = new Library();
library.setId(1L);
library.setName("Amazon");
entityManager.persist(library);
Book book = new Book();
book.setId(1L);
book.setTitle("High-Performance Java Persistence");
book.setAuthor("Vlad Mihalcea");
entityManager.persist(book);
});
}
@AfterEach
public void tearDown(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
scope.getEntityManagerFactory().getSchemaManager().truncate();
} );
}
@Test
public void testIdentityScope(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
//tag::entity-pojo-identity-scope-example[]
Book book1 = entityManager.find(Book.class, 1L);
Book book2 = entityManager.find(Book.class, 1L);
assertTrue(book1 == book2);
//end::entity-pojo-identity-scope-example[]
});
}
@Test
public void testSetIdentityScope(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
//tag::entity-pojo-set-identity-scope-example[]
Library library = entityManager.find(Library.class, 1L);
Book book1 = entityManager.find(Book.class, 1L);
Book book2 = entityManager.find(Book.class, 1L);
library.getBooks().add(book1);
library.getBooks().add(book2);
assertEquals(1, library.getBooks().size());
//end::entity-pojo-set-identity-scope-example[]
});
}
@Test
public void testMultiSessionIdentityScope(EntityManagerFactoryScope scope) {
//tag::entity-pojo-multi-session-identity-scope-example[]
Book book1 = scope.fromTransaction( entityManager -> entityManager.find(Book.class, 1L) );
Book book2 = scope.fromTransaction( entityManager -> entityManager.find(Book.class, 1L) );
assertFalse(book1 == book2);
//end::entity-pojo-multi-session-identity-scope-example[]
}
@Test
public void testMultiSessionSetIdentityScope(EntityManagerFactoryScope scope) {
Book book1 = scope.fromTransaction( entityManager -> entityManager.find(Book.class, 1L) );
Book book2 = scope.fromTransaction( entityManager -> entityManager.find(Book.class, 1L) );
//tag::entity-pojo-multi-session-set-identity-scope-example[]
scope.inTransaction( entityManager -> {
Set<Book> books = new HashSet<>();
books.add(book1);
books.add(book2);
assertEquals(2, books.size());
});
//end::entity-pojo-multi-session-set-identity-scope-example[]
}
@Test
public void testTransientSetIdentityScope(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
//tag::entity-pojo-transient-set-identity-scope-example[]
Library library = entityManager.find(Library.class, 1L);
Book book1 = new Book();
book1.setId(100L);
book1.setTitle("High-Performance Java Persistence");
Book book2 = new Book();
book2.setId(101L);
book2.setTitle("Java Persistence with Hibernate");
library.getBooks().add(book1);
library.getBooks().add(book2);
assertEquals(2, library.getBooks().size());
//end::entity-pojo-transient-set-identity-scope-example[]
});
}
//tag::entity-pojo-set-mapping-example[]
@Entity(name = "MyLibrary")
public static | SimpleEntityTest |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/aop/aspectj/ThisAndTargetSelectionOnlyPointcutsAtAspectJTests.java | {
"start": 1172,
"end": 3188
} | class ____ {
private ClassPathXmlApplicationContext ctx;
private TestInterface testBean;
private TestInterface testAnnotatedClassBean;
private TestInterface testAnnotatedMethodBean;
private Counter counter;
@BeforeEach
void setup() {
this.ctx = new ClassPathXmlApplicationContext(getClass().getSimpleName() + ".xml", getClass());
testBean = (TestInterface) ctx.getBean("testBean");
testAnnotatedClassBean = (TestInterface) ctx.getBean("testAnnotatedClassBean");
testAnnotatedMethodBean = (TestInterface) ctx.getBean("testAnnotatedMethodBean");
counter = (Counter) ctx.getBean("counter");
counter.reset();
}
@AfterEach
void tearDown() {
this.ctx.close();
}
@Test
void thisAsClassDoesNotMatch() {
testBean.doIt();
assertThat(counter.thisAsClassCounter).isEqualTo(0);
}
@Test
void thisAsInterfaceMatch() {
testBean.doIt();
assertThat(counter.thisAsInterfaceCounter).isEqualTo(1);
}
@Test
void targetAsClassDoesMatch() {
testBean.doIt();
assertThat(counter.targetAsClassCounter).isEqualTo(1);
}
@Test
void targetAsInterfaceMatch() {
testBean.doIt();
assertThat(counter.targetAsInterfaceCounter).isEqualTo(1);
}
@Test
void thisAsClassAndTargetAsClassCounterNotMatch() {
testBean.doIt();
assertThat(counter.thisAsClassAndTargetAsClassCounter).isEqualTo(0);
}
@Test
void thisAsInterfaceAndTargetAsInterfaceCounterMatch() {
testBean.doIt();
assertThat(counter.thisAsInterfaceAndTargetAsInterfaceCounter).isEqualTo(1);
}
@Test
void thisAsInterfaceAndTargetAsClassCounterMatch() {
testBean.doIt();
assertThat(counter.thisAsInterfaceAndTargetAsInterfaceCounter).isEqualTo(1);
}
@Test
void atTargetClassAnnotationMatch() {
testAnnotatedClassBean.doIt();
assertThat(counter.atTargetClassAnnotationCounter).isEqualTo(1);
}
@Test
void atAnnotationMethodAnnotationMatch() {
testAnnotatedMethodBean.doIt();
assertThat(counter.atAnnotationMethodAnnotationCounter).isEqualTo(1);
}
| ThisAndTargetSelectionOnlyPointcutsAtAspectJTests |
java | alibaba__nacos | naming/src/test/java/com/alibaba/nacos/naming/healthcheck/v2/processor/HealthCheckCommonV2Test.java | {
"start": 1570,
"end": 4657
} | class ____ {
@Mock
private SwitchDomain.HealthParams healthParams;
@Mock
private HealthCheckTaskV2 healthCheckTaskV2;
@Mock
private Service service;
@Mock
private IpPortBasedClient ipPortBasedClient;
@Mock
private HealthCheckInstancePublishInfo healthCheckInstancePublishInfo;
private HealthCheckCommonV2 healthCheckCommonV2;
@BeforeEach
void setUp() {
healthCheckCommonV2 = new HealthCheckCommonV2();
when(healthCheckTaskV2.getClient()).thenReturn(ipPortBasedClient);
when(ipPortBasedClient.getInstancePublishInfo(service)).thenReturn(healthCheckInstancePublishInfo);
when(healthCheckInstancePublishInfo.getFailCount()).thenReturn(new AtomicInteger());
}
@Test
void testReEvaluateCheckRt() {
healthCheckCommonV2.reEvaluateCheckRt(1, healthCheckTaskV2, healthParams);
verify(healthParams, times(2)).getMax();
verify(healthParams, times(1)).getMin();
verify(healthParams, times(2)).getFactor();
verify(healthCheckTaskV2).getCheckRtWorst();
verify(healthCheckTaskV2).getCheckRtBest();
verify(healthCheckTaskV2).getCheckRtNormalized();
}
@Test
void testCheckOk() {
healthCheckCommonV2.checkOk(healthCheckTaskV2, service, "test checkOk");
verify(healthCheckTaskV2).getClient();
verify(service).getGroupedServiceName();
verify(ipPortBasedClient).getInstancePublishInfo(service);
verify(healthCheckInstancePublishInfo).isHealthy();
verify(healthCheckInstancePublishInfo).getCluster();
verify(healthCheckInstancePublishInfo).resetFailCount();
verify(healthCheckInstancePublishInfo).finishCheck();
}
@Test
void testCheckFail() {
when(healthCheckInstancePublishInfo.isHealthy()).thenReturn(true);
healthCheckCommonV2.checkFail(healthCheckTaskV2, service, "test checkFail");
verify(healthCheckTaskV2).getClient();
verify(service).getGroupedServiceName();
verify(ipPortBasedClient).getInstancePublishInfo(service);
verify(healthCheckInstancePublishInfo).isHealthy();
verify(healthCheckInstancePublishInfo).getCluster();
verify(healthCheckInstancePublishInfo).resetOkCount();
verify(healthCheckInstancePublishInfo).finishCheck();
}
@Test
void testCheckFailNow() {
when(healthCheckInstancePublishInfo.isHealthy()).thenReturn(true);
healthCheckCommonV2.checkFailNow(healthCheckTaskV2, service, "test checkFailNow");
verify(healthCheckTaskV2).getClient();
verify(service).getGroupedServiceName();
verify(ipPortBasedClient).getInstancePublishInfo(service);
verify(healthCheckInstancePublishInfo).isHealthy();
verify(healthCheckInstancePublishInfo).getCluster();
verify(healthCheckInstancePublishInfo).resetOkCount();
verify(healthCheckInstancePublishInfo).finishCheck();
}
}
| HealthCheckCommonV2Test |
java | apache__camel | components/camel-ssh/src/main/java/org/apache/camel/component/ssh/SshShellOutputStringHelper.java | {
"start": 965,
"end": 3652
} | class ____ {
private SshShellOutputStringHelper() {
// empty const
}
/**
* Returns the string before the given token If this token is repeating, than return all text before the last token
*
* @param text the text
* @param before the token which is expected to be repeated
* @return the text before the last token, or <tt>null</tt> if text does not contain the token
*/
public static String beforeLast(String text, String before) {
if (!text.contains(before)) {
return null;
}
return text.substring(0, text.lastIndexOf(before));
}
/**
* Returns an object before the given last token
*
* @param text the text
* @param beforeLast the last token
* @param mapper a mapping function to convert the string before the token to type T
* @return an Optional describing the result of applying a mapping function to the text before the token.
*/
public static <T> Optional<T> beforeLast(String text, String beforeLast, Function<String, T> mapper) {
String result = beforeLast(text, beforeLast);
if (result == null) {
return Optional.empty();
} else {
return Optional.ofNullable(mapper.apply(result));
}
}
/**
* Returns the string between the given tokens
*
* @param text the text
* @param after is the starting token to skip the text before that.
* @param beforeLast the last token
* @return the text between the tokens, or <tt>null</tt> if text does not contain the tokens
*/
public static String betweenBeforeLast(String text, String after, String beforeLast) {
text = StringHelper.after(text, after);
if (text == null) {
return null;
}
return beforeLast(text, beforeLast);
}
/**
* Returns an object between the given token
*
* @param text the text
* @param after the before last token
* @param before the after token
* @param mapper a mapping function to convert the string between the token to type T
* @return an Optional describing the result of applying a mapping function to the text between the token.
*/
public static <T> Optional<T> betweenBeforeLast(String text, String after, String before, Function<String, T> mapper) {
String result = betweenBeforeLast(text, after, before);
if (result == null) {
return Optional.empty();
} else {
return Optional.ofNullable(mapper.apply(result));
}
}
}
| SshShellOutputStringHelper |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/injectionpoint/SomeBean.java | {
"start": 654,
"end": 824
} | class ____ {
private String name;
public SomeBean(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
| SomeBean |
java | apache__camel | components/camel-github/src/main/java/org/apache/camel/component/github/consumer/PullRequestConsumer.java | {
"start": 1333,
"end": 3883
} | class ____ extends AbstractGitHubConsumer {
private static final transient Logger LOG = LoggerFactory.getLogger(PullRequestConsumer.class);
private PullRequestService pullRequestService;
private int lastOpenPullRequest;
public PullRequestConsumer(GitHubEndpoint endpoint, Processor processor) throws Exception {
super(endpoint, processor);
Registry registry = endpoint.getCamelContext().getRegistry();
Object service = registry.lookupByName(GitHubConstants.GITHUB_PULL_REQUEST_SERVICE);
if (service != null) {
LOG.debug("Using PullRequestService found in registry {}", service.getClass().getCanonicalName());
pullRequestService = (PullRequestService) service;
} else {
pullRequestService = new PullRequestService();
}
initService(pullRequestService);
LOG.info("GitHub PullRequestConsumer: Indexing current pull requests...");
List<PullRequest> pullRequests = pullRequestService.getPullRequests(getRepository(), "open");
if (!pullRequests.isEmpty()) {
lastOpenPullRequest = pullRequests.get(0).getNumber();
}
}
@Override
protected int poll() throws Exception {
List<PullRequest> openPullRequests = pullRequestService.getPullRequests(getRepository(), "open");
// In the end, we want PRs oldest to newest.
ArrayDeque<PullRequest> newPullRequests = new ArrayDeque<>();
for (PullRequest pullRequest : openPullRequests) {
if (pullRequest.getNumber() > lastOpenPullRequest) {
newPullRequests.push(pullRequest);
} else {
break;
}
}
if (!newPullRequests.isEmpty()) {
lastOpenPullRequest = openPullRequests.get(0).getNumber();
}
Queue<Object> exchanges = new ArrayDeque<>();
while (!newPullRequests.isEmpty()) {
PullRequest newPullRequest = newPullRequests.pop();
Exchange e = createExchange(true);
e.getIn().setBody(newPullRequest);
// Required by the producers. Set it here for convenience.
e.getIn().setHeader(GitHubConstants.GITHUB_PULLREQUEST, newPullRequest.getNumber());
if (newPullRequest.getHead() != null) {
e.getIn().setHeader(GitHubConstants.GITHUB_PULLREQUEST_HEAD_COMMIT_SHA, newPullRequest.getHead().getSha());
}
exchanges.add(e);
}
return processBatch(exchanges);
}
}
| PullRequestConsumer |
java | quarkusio__quarkus | extensions/cache/runtime/src/main/java/io/quarkus/cache/DefaultCacheKey.java | {
"start": 293,
"end": 1145
} | class ____ {
private final String cacheName;
/**
* Constructor.
*
* @param cacheName cache name
* @throws NullPointerException if the cache name is {@code null}
*/
public DefaultCacheKey(String cacheName) {
this.cacheName = Objects.requireNonNull(cacheName);
}
@Override
public int hashCode() {
return Objects.hash(cacheName);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj instanceof DefaultCacheKey) {
DefaultCacheKey other = (DefaultCacheKey) obj;
return Objects.equals(cacheName, other.cacheName);
}
return false;
}
@Override
public String toString() {
return "DefaultCacheKey[cacheName=" + cacheName + "]";
}
}
| DefaultCacheKey |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/InconsistentCapitalizationTest.java | {
"start": 3582,
"end": 3734
} | class ____ {
Object aB;
Nested2(Object aa) {
DoesntConflictWithNested.this.aa = aa;
}
}
}
}
static | Nested2 |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/changelog/SequenceNumberRange.java | {
"start": 1819,
"end": 2943
} | class ____ implements SequenceNumberRange {
private final GenericSequenceNumber from;
private final GenericSequenceNumber to;
private final long size;
public GenericSequenceNumberRange(GenericSequenceNumber from, GenericSequenceNumber to) {
this.from = from;
this.to = to;
this.size = to.number - from.number;
Preconditions.checkArgument(size >= 0);
}
@Override
public SequenceNumber from() {
return from;
}
@Override
public SequenceNumber to() {
return to;
}
@Override
public long size() {
return size;
}
@Override
public boolean contains(SequenceNumber sqn) {
return from.compareTo(sqn) <= 0 && sqn.compareTo(to) < 0;
}
@Override
public boolean isEmpty() {
return size == 0;
}
@Override
public String toString() {
return String.format("from=%s, to=%s, size=%d", from, to, size);
}
}
}
| GenericSequenceNumberRange |
java | quarkusio__quarkus | extensions/smallrye-health/runtime/src/main/java/io/quarkus/smallrye/health/runtime/ShutdownReadinessCheck.java | {
"start": 271,
"end": 732
} | class ____ implements HealthCheck {
protected static final String GRACEFUL_SHUTDOWN = "Graceful Shutdown";
private volatile boolean shuttingDown;
public void shutdown() {
shuttingDown = true;
}
@Override
public HealthCheckResponse call() {
if (shuttingDown) {
return HealthCheckResponse.down(GRACEFUL_SHUTDOWN);
}
return HealthCheckResponse.up(GRACEFUL_SHUTDOWN);
}
}
| ShutdownReadinessCheck |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/web/jsf/DelegatingPhaseListenerTests.java | {
"start": 2859,
"end": 3223
} | class ____ implements PhaseListener {
boolean beforeCalled = false;
boolean afterCalled = false;
@Override
public PhaseId getPhaseId() {
return PhaseId.ANY_PHASE;
}
@Override
public void beforePhase(PhaseEvent arg0) {
beforeCalled = true;
}
@Override
public void afterPhase(PhaseEvent arg0) {
afterCalled = true;
}
}
}
| TestListener |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/NamespaceChange.java | {
"start": 1303,
"end": 2249
} | interface ____ {
/**
* Create a NamespaceChange for setting a namespace property.
* <p>
* If the property already exists, it will be replaced with the new value.
*
* @param property the property name
* @param value the new property value
* @return a NamespaceChange for the addition
*/
static NamespaceChange setProperty(String property, String value) {
return new SetProperty(property, value);
}
/**
* Create a NamespaceChange for removing a namespace property.
* <p>
* If the property does not exist, the change will succeed.
*
* @param property the property name
* @return a NamespaceChange for the addition
*/
static NamespaceChange removeProperty(String property) {
return new RemoveProperty(property);
}
/**
* A NamespaceChange to set a namespace property.
* <p>
* If the property already exists, it must be replaced with the new value.
*/
final | NamespaceChange |
java | spring-projects__spring-boot | module/spring-boot-actuator-autoconfigure/src/main/java/org/springframework/boot/actuate/autoconfigure/startup/StartupEndpointAutoConfiguration.java | {
"start": 2580,
"end": 3471
} | class ____ extends SpringBootCondition {
@Override
public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) {
ConditionMessage.Builder message = ConditionMessage.forCondition("ApplicationStartup");
ConfigurableListableBeanFactory beanFactory = context.getBeanFactory();
Assert.state(beanFactory != null, "'beanFactory' must not be null");
ApplicationStartup applicationStartup = beanFactory.getApplicationStartup();
if (applicationStartup instanceof BufferingApplicationStartup) {
return ConditionOutcome
.match(message.because("configured applicationStartup is of type BufferingApplicationStartup."));
}
return ConditionOutcome.noMatch(message.because("configured applicationStartup is of type "
+ applicationStartup.getClass() + ", expected BufferingApplicationStartup."));
}
}
}
| ApplicationStartupCondition |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/component/rest/SpringFromRestGetOnExceptionTest.java | {
"start": 983,
"end": 1269
} | class ____ extends FromRestGetOnExceptionTest {
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/component/rest/SpringFromRestGetOnExceptionTest.xml");
}
}
| SpringFromRestGetOnExceptionTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/SearchExtBuilder.java | {
"start": 1825,
"end": 2141
} | class ____ implements VersionedNamedWriteable, ToXContentFragment {
@Override
public abstract int hashCode();
@Override
public abstract boolean equals(Object obj);
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
}
| SearchExtBuilder |
java | bumptech__glide | library/test/src/test/java/com/bumptech/glide/signature/EmptySignatureTest.java | {
"start": 302,
"end": 652
} | class ____ {
@Rule public final KeyTester keyTester = new KeyTester();
@Test
public void testEquals() {
keyTester
.addEquivalenceGroup(EmptySignature.obtain(), EmptySignature.obtain())
.addEquivalenceGroup(mock(Key.class))
.addEmptyDigestRegressionTest(EmptySignature.obtain())
.test();
}
}
| EmptySignatureTest |
java | alibaba__nacos | core/src/main/java/com/alibaba/nacos/core/controller/ServerLoaderController.java | {
"start": 16821,
"end": 18367
} | class ____ {
String address;
Map<String, String> metric = new HashMap<>();
/**
* Getter method for property <tt>address</tt>.
*
* @return property value of address
*/
public String getAddress() {
return address;
}
/**
* Setter method for property <tt>address</tt>.
*
* @param address value to be assigned to property address
*/
public void setAddress(String address) {
this.address = address;
}
/**
* Getter method for property <tt>metric</tt>.
*
* @return property value of metric
*/
public Map<String, String> getMetric() {
return metric;
}
/**
* Setter method for property <tt>metric</tt>.
*
* @param metric value to be assigned to property metric
*/
public void setMetric(Map<String, String> metric) {
this.metric = metric;
}
}
private static String getRemoteIp(HttpServletRequest request) {
String xForwardedFor = request.getHeader(X_FORWARDED_FOR);
if (!StringUtils.isBlank(xForwardedFor)) {
return xForwardedFor.split(X_FORWARDED_FOR_SPLIT_SYMBOL)[0].trim();
}
String nginxHeader = request.getHeader(X_REAL_IP);
return StringUtils.isBlank(nginxHeader) ? request.getRemoteAddr() : nginxHeader;
}
}
| ServerLoaderMetrics |
java | google__guava | android/guava-tests/test/com/google/common/cache/LocalCacheTest.java | {
"start": 118320,
"end": 118657
} | class ____<K, V> implements Weigher<K, V>, Serializable {
@Override
public int weigh(K key, V value) {
return 42;
}
@Override
public int hashCode() {
return 42;
}
@Override
public boolean equals(@Nullable Object o) {
return o instanceof SerializableWeigher;
}
}
}
| SerializableWeigher |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java | {
"start": 7386,
"end": 57206
} | class ____ implements ToXContentObject {
private static final Logger logger = LogManager.getLogger(Authentication.class);
private static final TransportVersion VERSION_AUTHENTICATION_TYPE = TransportVersion.fromId(6_07_00_99);
public static final TransportVersion VERSION_API_KEY_ROLES_AS_BYTES = TransportVersions.V_7_9_0;
public static final TransportVersion VERSION_REALM_DOMAINS = TransportVersions.V_8_2_0;
public static final TransportVersion VERSION_METADATA_BEYOND_GENERIC_MAP = TransportVersions.V_8_8_0;
private static final TransportVersion SECURITY_CLOUD_API_KEY_REALM_AND_TYPE = TransportVersion.fromName(
"security_cloud_api_key_realm_and_type"
);
private final AuthenticationType type;
private final Subject authenticatingSubject;
private final Subject effectiveSubject;
private Authentication(Subject subject, AuthenticationType type) {
this(subject, subject, type);
}
private Authentication(Subject effectiveSubject, Subject authenticatingSubject, AuthenticationType type) {
this.effectiveSubject = Objects.requireNonNull(effectiveSubject, "effective subject cannot be null");
this.authenticatingSubject = Objects.requireNonNull(authenticatingSubject, "authenticating subject cannot be null");
this.type = Objects.requireNonNull(type, "authentication type cannot be null");
if (Assertions.ENABLED) {
checkConsistency();
}
}
public Authentication(StreamInput in) throws IOException {
// Read the user(s)
final User outerUser = AuthenticationSerializationHelper.readUserWithoutTrailingBoolean(in);
final boolean hasInnerUser;
if (outerUser instanceof InternalUser) {
hasInnerUser = false;
} else {
hasInnerUser = in.readBoolean();
}
final User innerUser;
if (hasInnerUser) {
innerUser = AuthenticationSerializationHelper.readUserFrom(in);
assert false == innerUser instanceof InternalUser : "internal users cannot participate in run-as [" + innerUser + "]";
} else {
innerUser = null;
}
final RealmRef authenticatedBy = new RealmRef(in);
final RealmRef lookedUpBy;
if (in.readBoolean()) {
lookedUpBy = new RealmRef(in);
} else {
lookedUpBy = null;
}
// The valid combinations for innerUser and lookedUpBy are:
// 1. InnerUser == null -> no run-as -> lookedUpBy must be null as well
// 2. innerUser != null -> lookedUp by can be either null (failed run-as lookup) or non-null (successful lookup)
// 3. lookedUpBy == null -> innerUser can be either null (no run-as) or non-null (failed run-as lookup)
// 4. lookedUpBy != null -> successful run-as -> innerUser must be NOT null
assert innerUser != null || lookedUpBy == null : "Authentication has no inner-user, but looked-up-by is [" + lookedUpBy + "]";
final TransportVersion version = in.getTransportVersion();
final Map<String, Object> metadata;
if (version.onOrAfter(VERSION_AUTHENTICATION_TYPE)) {
type = AuthenticationType.values()[in.readVInt()];
metadata = readMetadata(in);
} else {
type = AuthenticationType.REALM;
metadata = Map.of();
}
if (innerUser != null) {
authenticatingSubject = new Subject(
copyUserWithRolesRemovedForLegacyApiKeys(version, innerUser),
authenticatedBy,
version,
metadata
);
// The lookup user for run-as currently doesn't have authentication metadata associated with them because
// lookupUser only returns the User object. The lookup user for authorization delegation does have
// authentication metadata, but the realm does not expose this difference between authenticatingUser and
// delegateUser so effectively this is handled together with the authenticatingSubject not effectiveSubject.
// Note: we do not call copyUserWithRolesRemovedForLegacyApiKeys here because an API key is never the target of run-as
effectiveSubject = new Subject(outerUser, lookedUpBy, version, Map.of());
} else {
authenticatingSubject = effectiveSubject = new Subject(
copyUserWithRolesRemovedForLegacyApiKeys(version, outerUser),
authenticatedBy,
version,
metadata
);
}
if (Assertions.ENABLED) {
checkConsistency();
}
}
private User copyUserWithRolesRemovedForLegacyApiKeys(TransportVersion version, User user) {
// API keys prior to 7.8 had synthetic role names. Strip these out to maintain the invariant that API keys don't have role names
if (type == AuthenticationType.API_KEY && version.onOrBefore(TransportVersions.V_7_8_0) && user.roles().length > 0) {
logger.debug(
"Stripping [{}] roles from API key user [{}] for legacy version [{}]",
user.roles().length,
user.principal(),
version
);
return new User(user.principal(), EMPTY_ARRAY, user.fullName(), user.email(), user.metadata(), user.enabled());
} else {
return user;
}
}
/**
* Get the {@link Subject} that performs the actual authentication. This normally means it provides a credentials.
*/
public Subject getAuthenticatingSubject() {
return authenticatingSubject;
}
/**
* Get the {@link Subject} that the authentication effectively represents. It may not be the authenticating subject
* because the authentication subject can run-as another subject.
*/
public Subject getEffectiveSubject() {
return effectiveSubject;
}
public AuthenticationType getAuthenticationType() {
return type;
}
/**
* Whether the authentication contains a subject run-as another subject. That is, the authentication subject
* is different from the effective subject.
*/
public boolean isRunAs() {
return authenticatingSubject != effectiveSubject;
}
public boolean isFailedRunAs() {
return isRunAs() && effectiveSubject.getRealm() == null;
}
/**
* Returns a new {@code Authentication}, like this one, but which is compatible with older version nodes.
* This is commonly employed when the {@code Authentication} is serialized across cluster nodes with mixed versions.
*/
public Authentication maybeRewriteForOlderVersion(TransportVersion olderVersion) {
// TODO how can this not be true
// assert olderVersion.onOrBefore(getVersion());
// cross cluster access introduced a new synthetic realm and subject type; these cannot be parsed by older versions, so rewriting is
// not possible
if (isCrossClusterAccess() && olderVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY)) {
throw new IllegalArgumentException(
"versions of Elasticsearch before ["
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
+ "] can't handle cross cluster access authentication and attempted to rewrite for ["
+ olderVersion.toReleaseVersion()
+ "]"
);
}
if (isCloudApiKey() && olderVersion.supports(SECURITY_CLOUD_API_KEY_REALM_AND_TYPE) == false) {
throw new IllegalArgumentException(
"versions of Elasticsearch before ["
+ SECURITY_CLOUD_API_KEY_REALM_AND_TYPE.toReleaseVersion()
+ "] can't handle cloud API key authentication and attempted to rewrite for ["
+ olderVersion.toReleaseVersion()
+ "]"
);
}
final Map<String, Object> newMetadata = maybeRewriteMetadata(olderVersion, this);
final Authentication newAuthentication;
if (isRunAs()) {
// The lookup user for run-as currently doesn't have authentication metadata associated with them because
// lookupUser only returns the User object. The lookup user for authorization delegation does have
// authentication metadata, but the realm does not expose this difference between authenticatingUser and
// delegateUser so effectively this is handled together with the authenticatingSubject not effectiveSubject.
newAuthentication = new Authentication(
new Subject(
effectiveSubject.getUser(),
maybeRewriteRealmRef(olderVersion, effectiveSubject.getRealm()),
olderVersion,
effectiveSubject.getMetadata()
),
new Subject(
authenticatingSubject.getUser(),
maybeRewriteRealmRef(olderVersion, authenticatingSubject.getRealm()),
olderVersion,
newMetadata
),
type
);
} else {
newAuthentication = new Authentication(
new Subject(
authenticatingSubject.getUser(),
maybeRewriteRealmRef(olderVersion, authenticatingSubject.getRealm()),
olderVersion,
newMetadata
),
type
);
}
return newAuthentication;
}
private static Map<String, Object> maybeRewriteMetadata(TransportVersion olderVersion, Authentication authentication) {
try {
if (authentication.isAuthenticatedAsApiKey()) {
return maybeRewriteMetadataForApiKeyRoleDescriptors(olderVersion, authentication);
} else if (authentication.isCrossClusterAccess()) {
return maybeRewriteMetadataForCrossClusterAccessAuthentication(olderVersion, authentication);
} else {
return authentication.getAuthenticatingSubject().getMetadata();
}
} catch (Exception e) {
// CCS workflows may swallow the exception message making this difficult to troubleshoot, so we explicitly log and re-throw
// here. It may result in duplicate logs, so we only log the message at warn level.
if (logger.isDebugEnabled()) {
logger.debug("Un-expected exception thrown while rewriting metadata. This is likely a bug.", e);
} else {
logger.warn("Un-expected exception thrown while rewriting metadata. This is likely a bug [" + e.getMessage() + "]");
}
throw e;
}
}
/**
* Creates a copy of this Authentication instance, but only with metadata entries specified by `fieldsToKeep`.
* All other entries are removed from the copy's metadata.
*/
public Authentication copyWithFilteredMetadataFields(final Set<String> fieldsToKeep) {
Objects.requireNonNull(fieldsToKeep);
if (fieldsToKeep.isEmpty()) {
return copyWithEmptyMetadata();
}
final Map<String, Object> metadataCopy = new HashMap<>(authenticatingSubject.getMetadata());
final boolean metadataChanged = metadataCopy.keySet().retainAll(fieldsToKeep);
if (logger.isTraceEnabled() && metadataChanged) {
logger.trace(
"Authentication metadata [{}] for subject [{}] contains fields other than [{}]. These will be removed in the copy.",
authenticatingSubject.getMetadata().keySet(),
authenticatingSubject.getUser().principal(),
fieldsToKeep
);
}
return copyWithMetadata(Collections.unmodifiableMap(metadataCopy));
}
public Authentication copyWithEmptyMetadata() {
if (logger.isTraceEnabled() && false == authenticatingSubject.getMetadata().isEmpty()) {
logger.trace(
"Authentication metadata [{}] for subject [{}] is not empty. All fields will be removed in the copy.",
authenticatingSubject.getMetadata().keySet(),
authenticatingSubject.getUser().principal()
);
}
return copyWithMetadata(Collections.emptyMap());
}
private Authentication copyWithMetadata(final Map<String, Object> newMetadata) {
Objects.requireNonNull(newMetadata);
return isRunAs()
? new Authentication(
effectiveSubject,
new Subject(
authenticatingSubject.getUser(),
authenticatingSubject.getRealm(),
authenticatingSubject.getTransportVersion(),
newMetadata
),
type
)
: new Authentication(
new Subject(
authenticatingSubject.getUser(),
authenticatingSubject.getRealm(),
authenticatingSubject.getTransportVersion(),
newMetadata
),
type
);
}
/**
* Returns a new {@code Authentication} that reflects a "run as another user" action under the current {@code Authentication}.
* The security {@code RealmRef#Domain} of the resulting {@code Authentication} is that of the run-as user's realm.
*
* @param runAs The user to be impersonated
* @param lookupRealmRef The realm where the impersonated user is looked up from. It can be null if the user does
* not exist. The null lookup realm is used to indicate the lookup failure which will be rejected
* at authorization time.
*/
public Authentication runAs(User runAs, @Nullable RealmRef lookupRealmRef) {
assert supportsRunAs(null);
assert false == runAs instanceof AnonymousUser;
assert false == hasSyntheticRealmNameOrType(lookupRealmRef) : "should not use synthetic realm name/type for lookup realms";
Objects.requireNonNull(runAs);
return new Authentication(
new Subject(runAs, lookupRealmRef, getEffectiveSubject().getTransportVersion(), Map.of()),
authenticatingSubject,
type
);
}
/** Returns a new {@code Authentication} for tokens created by the current {@code Authentication}, which is used when
* authenticating using the token credential.
*/
public Authentication token() {
assert false == isAuthenticatedInternally();
assert false == isServiceAccount();
assert false == isCrossClusterAccess();
final Authentication newTokenAuthentication = new Authentication(effectiveSubject, authenticatingSubject, AuthenticationType.TOKEN);
return newTokenAuthentication;
}
/**
* The final list of roles a user has should include all roles granted to the anonymous user when
* 1. Anonymous access is enable
* 2. The user itself is not the anonymous user
* 3. The authentication is not an API key or service account
*
* Depending on whether the above criteria is satisfied, the method may either return a new
* authentication object incorporating anonymous roles or the same authentication object (if anonymous
* roles are not applicable)
*
* NOTE this method is an artifact of how anonymous roles are resolved today on each node as opposed to
* just on the coordinating node. Whether this behaviour should be changed is an ongoing discussion.
* Therefore, using this method in more places other than its current usage requires careful consideration.
*/
public Authentication maybeAddAnonymousRoles(@Nullable AnonymousUser anonymousUser) {
final boolean shouldAddAnonymousRoleNames = anonymousUser != null
&& anonymousUser.enabled()
&& false == anonymousUser.equals(getEffectiveSubject().getUser())
&& false == getEffectiveSubject().getUser() instanceof InternalUser
&& false == isApiKey()
&& false == isCrossClusterAccess()
&& false == isServiceAccount();
if (false == shouldAddAnonymousRoleNames) {
return this;
}
// TODO: should we validate enable status and length of role names on instantiation time of anonymousUser?
if (anonymousUser.roles().length == 0) {
throw new IllegalStateException("anonymous is only enabled when the anonymous user has roles");
}
final String[] allRoleNames = ArrayUtils.concat(getEffectiveSubject().getUser().roles(), anonymousUser.roles());
if (isRunAs()) {
final User user = effectiveSubject.getUser();
return new Authentication(
new Subject(
new User(user.principal(), allRoleNames, user.fullName(), user.email(), user.metadata(), user.enabled()),
effectiveSubject.getRealm(),
effectiveSubject.getTransportVersion(),
effectiveSubject.getMetadata()
),
authenticatingSubject,
type
);
} else {
final User user = authenticatingSubject.getUser();
return new Authentication(
new Subject(
new User(user.principal(), allRoleNames, user.fullName(), user.email(), user.metadata(), user.enabled()),
authenticatingSubject.getRealm(),
authenticatingSubject.getTransportVersion(),
authenticatingSubject.getMetadata()
),
type
);
}
}
// Package private for tests
/**
* Returns {@code true} if the effective user belongs to a realm under a domain.
*/
boolean isAssignedToDomain() {
return getDomain() != null;
}
// Package private for tests
/**
* Returns the {@link RealmDomain} that the effective user belongs to.
* A user belongs to a realm which in turn belongs to a domain.
*
* The same username can be authenticated by different realms (e.g. with different credential types),
* but resources created across realms cannot be accessed unless the realms are also part of the same domain.
*/
@Nullable
RealmDomain getDomain() {
if (isFailedRunAs()) {
return null;
}
return getEffectiveSubject().getRealm().getDomain();
}
/**
* Whether the authenticating user is an API key, including a simple API key or a token created by an API key.
*/
public boolean isAuthenticatedAsApiKey() {
return authenticatingSubject.getType() == Subject.Type.API_KEY;
}
// TODO: this is not entirely accurate if anonymous user can create a token
private boolean isAuthenticatedAnonymously() {
return AuthenticationType.ANONYMOUS.equals(getAuthenticationType());
}
private boolean isAuthenticatedInternally() {
return AuthenticationType.INTERNAL.equals(getAuthenticationType());
}
/**
* Authenticate with a service account and no run-as
*/
public boolean isServiceAccount() {
return effectiveSubject.getType() == Subject.Type.SERVICE_ACCOUNT;
}
/**
* Whether the effective user is an API key, this including a simple API key authentication
* or a token created by the API key.
*/
public boolean isApiKey() {
return effectiveSubject.getType() == Subject.Type.API_KEY;
}
public boolean isCloudApiKey() {
return effectiveSubject.getType() == Subject.Type.CLOUD_API_KEY;
}
public boolean isCrossClusterAccess() {
return effectiveSubject.getType() == Subject.Type.CROSS_CLUSTER_ACCESS;
}
/**
* Whether the authentication can run-as another user
*/
public boolean supportsRunAs(@Nullable AnonymousUser anonymousUser) {
// Chained run-as not allowed
if (isRunAs()) {
return false;
}
// We may allow service account to run-as in the future, but for now no service account requires it
if (isServiceAccount()) {
return false;
}
// Real run-as for cross cluster access could happen on the querying cluster side, but not on the fulfilling cluster. Since the
// authentication instance corresponds to the fulfilling-cluster-side view, run-as is not supported
if (isCrossClusterAccess()) {
return false;
}
// We may allow cloud API keys to run-as in the future, but for now there is no requirement
if (isCloudApiKey()) {
return false;
}
// There is no reason for internal users to run-as. This check prevents either internal user itself
// or a token created for it (though no such thing in current code) to run-as.
if (getEffectiveSubject().getUser() instanceof InternalUser) {
return false;
}
// Anonymous user or its token cannot run-as
// There is no perfect way to determine an anonymous user if we take custom realms into consideration
// 1. A custom realm can return a user object that can pass `equals(anonymousUser)` check
// (this is the existing check used elsewhere)
// 2. A custom realm can declare its type and name to be __anonymous
//
// This problem is at least partly due to we don't have special serialisation for the AnonymousUser class.
// As a result, it is serialised just as a normal user. At deserializing time, it is impossible to reliably
// tell the difference. This is what happens when AnonymousUser creates a token.
// Also, if anonymous access is disabled or anonymous username, roles are changed after the token is created.
// Should we still consider the token being created by an anonymous user which is now different from the new
// anonymous user?
if (getEffectiveSubject().getUser().equals(anonymousUser)) {
assert ANONYMOUS_REALM_TYPE.equals(getAuthenticatingSubject().getRealm().getType())
&& ANONYMOUS_REALM_NAME.equals(getAuthenticatingSubject().getRealm().getName());
return false;
}
// Run-as is supported for authentication with realm, api_key or token.
if (AuthenticationType.REALM == getAuthenticationType()
|| AuthenticationType.API_KEY == getAuthenticationType()
|| AuthenticationType.TOKEN == getAuthenticationType()) {
return true;
}
return false;
}
/**
* Writes the authentication to the context. There must not be an existing authentication in the context and if there is an
* {@link IllegalStateException} will be thrown
*/
public void writeToContext(ThreadContext ctx) throws IOException, IllegalArgumentException {
new AuthenticationContextSerializer().writeToContext(this, ctx);
}
public String encode() throws IOException {
return doEncode(effectiveSubject, authenticatingSubject, type);
}
// Package private for testing
static String doEncode(Subject effectiveSubject, Subject authenticatingSubject, AuthenticationType type) throws IOException {
BytesStreamOutput output = new BytesStreamOutput();
output.setTransportVersion(effectiveSubject.getTransportVersion());
TransportVersion.writeVersion(effectiveSubject.getTransportVersion(), output);
doWriteTo(effectiveSubject, authenticatingSubject, type, output);
return Base64.getEncoder().encodeToString(BytesReference.toBytes(output.bytes()));
}
public void writeTo(StreamOutput out) throws IOException {
doWriteTo(effectiveSubject, authenticatingSubject, type, out);
}
private static void doWriteTo(Subject effectiveSubject, Subject authenticatingSubject, AuthenticationType type, StreamOutput out)
throws IOException {
// cross cluster access introduced a new synthetic realm and subject type; these cannot be parsed by older versions, so rewriting we
// should not send them across the wire to older nodes
final boolean isCrossClusterAccess = effectiveSubject.getType() == Subject.Type.CROSS_CLUSTER_ACCESS;
if (isCrossClusterAccess && out.getTransportVersion().before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY)) {
throw new IllegalArgumentException(
"versions of Elasticsearch before ["
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
+ "] can't handle cross cluster access authentication and attempted to send to ["
+ out.getTransportVersion().toReleaseVersion()
+ "]"
);
}
if (effectiveSubject.getType() == Subject.Type.CLOUD_API_KEY
&& out.getTransportVersion().supports(SECURITY_CLOUD_API_KEY_REALM_AND_TYPE) == false) {
throw new IllegalArgumentException(
"versions of Elasticsearch before ["
+ SECURITY_CLOUD_API_KEY_REALM_AND_TYPE.toReleaseVersion()
+ "] can't handle cloud API key authentication and attempted to send to ["
+ out.getTransportVersion().toReleaseVersion()
+ "]"
);
}
final boolean isRunAs = authenticatingSubject != effectiveSubject;
if (isRunAs) {
final User outerUser = effectiveSubject.getUser();
final User innerUser = authenticatingSubject.getUser();
assert false == outerUser instanceof InternalUser && false == innerUser instanceof InternalUser
: "internal users cannot participate in run-as (outer=[" + outerUser + "] inner=[" + innerUser + "])";
User.writeUser(outerUser, out);
out.writeBoolean(true);
User.writeUser(innerUser, out);
out.writeBoolean(false);
} else {
final User user = effectiveSubject.getUser();
AuthenticationSerializationHelper.writeUserTo(user, out);
}
authenticatingSubject.getRealm().writeTo(out);
final RealmRef lookedUpBy = isRunAs ? effectiveSubject.getRealm() : null;
if (lookedUpBy != null) {
out.writeBoolean(true);
lookedUpBy.writeTo(out);
} else {
out.writeBoolean(false);
}
final Map<String, Object> metadata = authenticatingSubject.getMetadata();
if (out.getTransportVersion().onOrAfter(VERSION_AUTHENTICATION_TYPE)) {
out.writeVInt(type.ordinal());
writeMetadata(out, metadata);
} else {
assert type == AuthenticationType.REALM && metadata.isEmpty()
: Strings.format(
"authentication with version [%s] must have authentication type %s and empty metadata, but got [%s] and [%s]",
out.getTransportVersion(),
AuthenticationType.REALM,
type,
metadata
);
}
}
/**
* Checks whether the current authentication, which can be for a user or for an API Key, can access the resources
* (e.g. search scrolls and async search results) created (owned) by the passed in authentication.
*
* The rules are as follows:
* * a resource created by an API Key can only be accessed by the exact same key; the creator user, its tokens,
* or any of its other keys cannot access it.
* * a resource created by a user authenticated by a realm, or any of its tokens, can be accessed by the same
* username authenticated by the same realm or by other realms from the same security domain (at the time of the
* access), or any of its tokens; realms are considered the same if they have the same type and name (except for
* file and native realms, for which only the type is considered, the name is irrelevant), see also
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/master/security-limitations.html">
* security limitations</a>
*/
public boolean canAccessResourcesOf(Authentication resourceCreatorAuthentication) {
// if we introduce new authentication types in the future, it is likely that we'll need to revisit this method
assert EnumSet.of(
Authentication.AuthenticationType.REALM,
Authentication.AuthenticationType.API_KEY,
Authentication.AuthenticationType.TOKEN,
Authentication.AuthenticationType.ANONYMOUS,
Authentication.AuthenticationType.INTERNAL
).containsAll(EnumSet.of(getAuthenticationType(), resourceCreatorAuthentication.getAuthenticationType()))
: "cross AuthenticationType comparison for canAccessResourcesOf is not applicable for: "
+ EnumSet.of(getAuthenticationType(), resourceCreatorAuthentication.getAuthenticationType());
final Subject mySubject = getEffectiveSubject();
final Subject creatorSubject = resourceCreatorAuthentication.getEffectiveSubject();
return mySubject.canAccessResourcesOf(creatorSubject);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Authentication that = (Authentication) o;
return type == that.type
&& authenticatingSubject.equals(that.authenticatingSubject)
&& effectiveSubject.equals(that.effectiveSubject);
}
@Override
public int hashCode() {
return Objects.hash(type, authenticatingSubject, effectiveSubject);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
toXContentFragment(builder);
return builder.endObject();
}
/**
* Generates XContent without the start/end object.
*/
public void toXContentFragment(XContentBuilder builder) throws IOException {
final User user = effectiveSubject.getUser();
final Map<String, Object> metadata = getAuthenticatingSubject().getMetadata();
builder.field(User.Fields.USERNAME.getPreferredName(), user.principal());
builder.array(User.Fields.ROLES.getPreferredName(), user.roles());
builder.field(User.Fields.FULL_NAME.getPreferredName(), user.fullName());
builder.field(User.Fields.EMAIL.getPreferredName(), user.email());
if (isServiceAccount()) {
final String tokenName = (String) metadata.get(ServiceAccountSettings.TOKEN_NAME_FIELD);
assert tokenName != null : "token name cannot be null";
final String tokenSource = (String) metadata.get(ServiceAccountSettings.TOKEN_SOURCE_FIELD);
assert tokenSource != null : "token source cannot be null";
builder.field(
User.Fields.TOKEN.getPreferredName(),
Map.of("name", tokenName, "type", ServiceAccountSettings.REALM_TYPE + "_" + tokenSource)
);
}
builder.field(User.Fields.METADATA.getPreferredName(), user.metadata());
builder.field(User.Fields.ENABLED.getPreferredName(), user.enabled());
builder.startObject(User.Fields.AUTHENTICATION_REALM.getPreferredName());
builder.field(User.Fields.REALM_NAME.getPreferredName(), getAuthenticatingSubject().getRealm().getName());
builder.field(User.Fields.REALM_TYPE.getPreferredName(), getAuthenticatingSubject().getRealm().getType());
// domain name is generally ambiguous, because it can change during the lifetime of the authentication,
// but it is good enough for display purposes (including auditing)
if (getAuthenticatingSubject().getRealm().getDomain() != null) {
builder.field(User.Fields.REALM_DOMAIN.getPreferredName(), getAuthenticatingSubject().getRealm().getDomain().name());
}
builder.endObject();
builder.startObject(User.Fields.LOOKUP_REALM.getPreferredName());
final RealmRef lookedUpBy = isRunAs() ? getEffectiveSubject().getRealm() : null;
if (lookedUpBy != null) {
builder.field(User.Fields.REALM_NAME.getPreferredName(), lookedUpBy.getName());
builder.field(User.Fields.REALM_TYPE.getPreferredName(), lookedUpBy.getType());
if (lookedUpBy.getDomain() != null) {
builder.field(User.Fields.REALM_DOMAIN.getPreferredName(), lookedUpBy.getDomain().name());
}
} else {
builder.field(User.Fields.REALM_NAME.getPreferredName(), getAuthenticatingSubject().getRealm().getName());
builder.field(User.Fields.REALM_TYPE.getPreferredName(), getAuthenticatingSubject().getRealm().getType());
if (getAuthenticatingSubject().getRealm().getDomain() != null) {
builder.field(User.Fields.REALM_DOMAIN.getPreferredName(), getAuthenticatingSubject().getRealm().getDomain().name());
}
}
builder.endObject();
builder.field(User.Fields.AUTHENTICATION_TYPE.getPreferredName(), getAuthenticationType().name().toLowerCase(Locale.ROOT));
if (isApiKey() || isCrossClusterAccess()) {
final String apiKeyId = (String) metadata.get(AuthenticationField.API_KEY_ID_KEY);
final String apiKeyName = (String) metadata.get(AuthenticationField.API_KEY_NAME_KEY);
final Map<String, Object> apiKeyField = new HashMap<>();
apiKeyField.put("id", apiKeyId);
if (apiKeyName != null) {
apiKeyField.put("name", apiKeyName);
}
apiKeyField.put("managed_by", CredentialManagedBy.ELASTICSEARCH.getDisplayName());
builder.field("api_key", Collections.unmodifiableMap(apiKeyField));
} else if (isCloudApiKey()) {
final String apiKeyId = user.principal();
final String apiKeyName = (String) user.metadata().get(AuthenticationField.API_KEY_NAME_KEY);
final boolean internal = (boolean) user.metadata().get(AuthenticationField.API_KEY_INTERNAL_KEY);
final Map<String, Object> apiKeyField = new HashMap<>();
apiKeyField.put("id", apiKeyId);
if (apiKeyName != null) {
apiKeyField.put("name", apiKeyName);
}
apiKeyField.put("internal", internal);
apiKeyField.put("managed_by", CredentialManagedBy.CLOUD.getDisplayName());
builder.field("api_key", Collections.unmodifiableMap(apiKeyField));
}
}
public static Authentication getAuthenticationFromCrossClusterAccessMetadata(Authentication authentication) {
if (authentication.isCrossClusterAccess()) {
return (Authentication) authentication.getAuthenticatingSubject().getMetadata().get(CROSS_CLUSTER_ACCESS_AUTHENTICATION_KEY);
} else {
String message = "authentication is not cross_cluster_access";
assert false : message;
throw new IllegalArgumentException(message);
}
}
private static final Map<String, CheckedFunction<StreamInput, Object, IOException>> METADATA_VALUE_READER = Map.of(
CROSS_CLUSTER_ACCESS_AUTHENTICATION_KEY,
Authentication::new,
CROSS_CLUSTER_ACCESS_ROLE_DESCRIPTORS_KEY,
in -> in.readCollectionAsList(RoleDescriptorsBytes::new)
);
private static Map<String, Object> readMetadata(StreamInput in) throws IOException {
if (in.getTransportVersion().onOrAfter(VERSION_METADATA_BEYOND_GENERIC_MAP)) {
final int size = in.readVInt();
final Map<String, Object> metadata = Maps.newHashMapWithExpectedSize(size);
for (int i = 0; i < size; i++) {
final String key = in.readString();
final Object value = METADATA_VALUE_READER.getOrDefault(key, StreamInput::readGenericValue).apply(in);
metadata.put(key, value);
}
return metadata;
} else {
return in.readGenericMap();
}
}
private static final Map<String, Writeable.Writer<?>> METADATA_VALUE_WRITER = Map.of(
CROSS_CLUSTER_ACCESS_AUTHENTICATION_KEY,
(out, v) -> ((Authentication) v).writeTo(out),
CROSS_CLUSTER_ACCESS_ROLE_DESCRIPTORS_KEY,
(out, v) -> {
@SuppressWarnings("unchecked")
final List<RoleDescriptorsBytes> roleDescriptorsBytesList = (List<RoleDescriptorsBytes>) v;
out.writeCollection(roleDescriptorsBytesList);
}
);
private static void writeMetadata(StreamOutput out, Map<String, Object> metadata) throws IOException {
if (out.getTransportVersion().onOrAfter(VERSION_METADATA_BEYOND_GENERIC_MAP)) {
out.writeVInt(metadata.size());
for (Map.Entry<String, Object> entry : metadata.entrySet()) {
out.writeString(entry.getKey());
@SuppressWarnings("unchecked")
final var valueWriter = (Writeable.Writer<Object>) METADATA_VALUE_WRITER.getOrDefault(
entry.getKey(),
StreamOutput::writeGenericValue
);
valueWriter.write(out, entry.getValue());
}
} else {
out.writeGenericMap(metadata);
}
}
/**
* An Authentication object has internal constraint between its fields, e.g. if it is internal authentication,
* it must have an internal user. These logics are upheld when authentication is built as a result of successful
* authentication. Hence, this method mostly runs in test (where assertion is enabled).
* However, for RCS cross cluster access, FC receives an authentication object as part of the request. There is
* no guarantee that this authentication object also maintains the internal logics. Therefore, this method
* is called explicitly in production when handling cross cluster access requests.
*/
public void checkConsistency() {
// isRunAs logic consistency
if (isRunAs()) {
assert authenticatingSubject != effectiveSubject : "isRunAs logic does not hold";
} else {
assert authenticatingSubject == effectiveSubject : "isRunAs logic does not hold";
}
// check consistency for each authentication type
switch (getAuthenticationType()) {
case ANONYMOUS -> checkConsistencyForAnonymousAuthenticationType();
case INTERNAL -> checkConsistencyForInternalAuthenticationType();
case API_KEY -> checkConsistencyForApiKeyAuthenticationType();
case REALM -> checkConsistencyForRealmAuthenticationType();
case TOKEN -> checkConsistencyForTokenAuthenticationType();
default -> {
assert false : "unknown authentication type " + type;
}
}
}
private void checkConsistencyForAnonymousAuthenticationType() {
final RealmRef authenticatingRealm = authenticatingSubject.getRealm();
if (false == authenticatingRealm.isAnonymousRealm()) {
throw new IllegalArgumentException(
Strings.format("Anonymous authentication cannot have realm type [%s]", authenticatingRealm.type)
);
}
checkNoDomain(authenticatingRealm, "Anonymous");
checkNoInternalUser(authenticatingSubject, "Anonymous");
checkNoRunAs(this, "Anonymous");
}
private void checkConsistencyForInternalAuthenticationType() {
final RealmRef authenticatingRealm = authenticatingSubject.getRealm();
if (false == authenticatingRealm.isFallbackRealm() && false == authenticatingRealm.isAttachRealm()) {
throw new IllegalArgumentException(
Strings.format("Internal authentication cannot have realm type [%s]", authenticatingRealm.type)
);
}
checkNoDomain(authenticatingRealm, "Internal");
if (false == authenticatingSubject.getUser() instanceof InternalUser) {
throw new IllegalArgumentException("Internal authentication must have internal user");
}
checkNoRunAs(this, "Internal");
}
private void checkConsistencyForApiKeyAuthenticationType() {
final RealmRef authenticatingRealm = authenticatingSubject.getRealm();
if (false == authenticatingRealm.usesApiKeys()) {
throw new IllegalArgumentException(
Strings.format("API key authentication cannot have realm type [%s]", authenticatingRealm.type)
);
}
if (authenticatingSubject.getType() == Subject.Type.CLOUD_API_KEY) {
checkConsistencyForCloudApiKeyAuthenticatingSubject("Cloud API key");
return;
}
checkConsistencyForApiKeyAuthenticatingSubject("API key");
if (Subject.Type.CROSS_CLUSTER_ACCESS == authenticatingSubject.getType()) {
if (authenticatingSubject.getMetadata().get(CROSS_CLUSTER_ACCESS_AUTHENTICATION_KEY) == null) {
throw new IllegalArgumentException(
"Cross cluster access authentication requires metadata to contain "
+ "a non-null serialized cross cluster access authentication field"
);
}
final Authentication innerAuthentication = (Authentication) authenticatingSubject.getMetadata()
.get(CROSS_CLUSTER_ACCESS_AUTHENTICATION_KEY);
if (innerAuthentication.isCrossClusterAccess()) {
throw new IllegalArgumentException(
"Cross cluster access authentication cannot contain another cross cluster access authentication in its metadata"
);
}
if (authenticatingSubject.getMetadata().get(CROSS_CLUSTER_ACCESS_ROLE_DESCRIPTORS_KEY) == null) {
throw new IllegalArgumentException(
"Cross cluster access authentication requires metadata to contain "
+ "a non-null serialized cross cluster access role descriptors field"
);
}
checkNoRunAs(this, "Cross cluster access");
} else {
if (isRunAs()) {
checkRunAsConsistency(effectiveSubject, authenticatingSubject);
}
}
}
private void checkConsistencyForRealmAuthenticationType() {
if (Subject.Type.USER != authenticatingSubject.getType()) {
throw new IllegalArgumentException("Realm authentication must have subject type of user");
}
if (isRunAs()) {
checkRunAsConsistency(effectiveSubject, authenticatingSubject);
}
}
private void checkConsistencyForTokenAuthenticationType() {
final RealmRef authenticatingRealm = authenticatingSubject.getRealm();
// The below assertion does not hold for custom realms. That's why it is an assertion instead of runtime error.
// Custom realms with synthetic realm names are likely fail in other places. But we don't fail in name/type checks
// for mostly historical reasons.
assert false == authenticatingRealm.isAttachRealm()
&& false == authenticatingRealm.isFallbackRealm()
&& false == authenticatingRealm.isCrossClusterAccessRealm()
: "Token authentication cannot have authenticating realm " + authenticatingRealm;
checkNoInternalUser(authenticatingSubject, "Token");
if (Subject.Type.SERVICE_ACCOUNT == authenticatingSubject.getType()) {
checkNoDomain(authenticatingRealm, "Service account");
checkNoRole(authenticatingSubject, "Service account");
checkNoRunAs(this, "Service account");
} else {
if (Subject.Type.API_KEY == authenticatingSubject.getType()) {
checkConsistencyForApiKeyAuthenticatingSubject("API key token");
}
if (isRunAs()) {
checkRunAsConsistency(effectiveSubject, authenticatingSubject);
}
}
}
private static void checkRunAsConsistency(Subject effectiveSubject, Subject authenticatingSubject) {
if (false == effectiveSubject.getTransportVersion().equals(authenticatingSubject.getTransportVersion())) {
throw new IllegalArgumentException(
Strings.format(
"inconsistent versions between effective subject [%s] and authenticating subject [%s]",
effectiveSubject.getTransportVersion(),
authenticatingSubject.getTransportVersion()
)
);
}
if (Subject.Type.USER != effectiveSubject.getType()) {
throw new IllegalArgumentException(Strings.format("Run-as subject type cannot be [%s]", effectiveSubject.getType()));
}
if (false == effectiveSubject.getMetadata().isEmpty()) {
throw new IllegalArgumentException("Run-as subject must have empty metadata");
}
// assert here because it does not hold for custom realm
assert false == hasSyntheticRealmNameOrType(effectiveSubject.getRealm()) : "run-as subject cannot be from a synthetic realm";
}
private void checkConsistencyForApiKeyAuthenticatingSubject(String prefixMessage) {
final RealmRef authenticatingRealm = authenticatingSubject.getRealm();
checkNoDomain(authenticatingRealm, prefixMessage);
checkNoInternalUser(authenticatingSubject, prefixMessage);
checkNoRole(authenticatingSubject, prefixMessage);
if (authenticatingSubject.getMetadata().get(AuthenticationField.API_KEY_ID_KEY) == null) {
throw new IllegalArgumentException(prefixMessage + " authentication requires metadata to contain a non-null API key ID");
}
}
private void checkConsistencyForCloudApiKeyAuthenticatingSubject(String prefixMessage) {
final RealmRef authenticatingRealm = authenticatingSubject.getRealm();
checkNoDomain(authenticatingRealm, prefixMessage);
checkNoInternalUser(authenticatingSubject, prefixMessage);
checkNoRunAs(this, prefixMessage);
if (authenticatingSubject.getMetadata().get(CROSS_CLUSTER_ACCESS_ROLE_DESCRIPTORS_KEY) != null
|| authenticatingSubject.getMetadata().get(API_KEY_ROLE_DESCRIPTORS_KEY) != null
|| authenticatingSubject.getMetadata().get(API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY) != null) {
throw new IllegalArgumentException(prefixMessage + " authentication cannot contain a role descriptors metadata field");
}
}
private static void checkNoInternalUser(Subject subject, String prefixMessage) {
if (subject.getUser() instanceof InternalUser) {
throw new IllegalArgumentException(
Strings.format(prefixMessage + " authentication cannot have internal user [%s]", subject.getUser().principal())
);
}
}
private static void checkNoDomain(RealmRef realm, String prefixMessage) {
if (realm.getDomain() != null) {
throw new IllegalArgumentException(prefixMessage + " authentication cannot have domain");
}
}
private static void checkNoRole(Subject subject, String prefixMessage) {
if (subject.getUser().roles().length != 0) {
throw new IllegalArgumentException(prefixMessage + " authentication user must have no role");
}
}
private static void checkNoRunAs(Authentication authentication, String prefixMessage) {
if (authentication.isRunAs()) {
throw new IllegalArgumentException(prefixMessage + " authentication cannot run-as other user");
}
}
private static boolean hasSyntheticRealmNameOrType(@Nullable RealmRef realmRef) {
if (realmRef == null) {
return false;
}
if (List.of(
API_KEY_REALM_NAME,
ServiceAccountSettings.REALM_NAME,
ANONYMOUS_REALM_NAME,
FALLBACK_REALM_NAME,
ATTACH_REALM_NAME,
CROSS_CLUSTER_ACCESS_REALM_NAME,
CLOUD_API_KEY_REALM_NAME
).contains(realmRef.getName())) {
return true;
}
if (List.of(
API_KEY_REALM_TYPE,
ServiceAccountSettings.REALM_TYPE,
ANONYMOUS_REALM_TYPE,
FALLBACK_REALM_TYPE,
ATTACH_REALM_TYPE,
CROSS_CLUSTER_ACCESS_REALM_TYPE,
CLOUD_API_KEY_REALM_TYPE
).contains(realmRef.getType())) {
return true;
}
return false;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder("Authentication[effectiveSubject=").append(effectiveSubject);
if (isRunAs()) {
builder.append(",authenticatingSubject=").append(authenticatingSubject);
}
builder.append(",type=").append(type);
builder.append("]");
return builder.toString();
}
/**
* {@link RealmRef} expresses the grouping of realms, identified with {@link RealmIdentifier}s, under {@link RealmDomain}s.
* A domain groups different realms, such that any username, authenticated by different realms from the <b>same domain</b>,
* is to be associated to a single {@link Profile}.
*/
public static | Authentication |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/ReporterSetupBuilder.java | {
"start": 8554,
"end": 8855
} | class ____ bundles all the information for a reporter type together so that we can
* handle all types in generic code.
*
* @param <REPORTER> type of the reporter.
* @param <SETUP> type of the setup.
* @param <REPORTER_FACTORY> type of the reporter factory.
*/
static | that |
java | quarkusio__quarkus | integration-tests/opentelemetry-grpc/src/main/java/io/quarkus/it/opentelemetry/grpc/HelloResource.java | {
"start": 218,
"end": 500
} | class ____ {
@GrpcClient
Greeter hello;
@GET
@Path("/{name}")
public Uni<String> hello(@PathParam("name") String name) {
return hello.sayHello(HelloRequest.newBuilder().setName(name).build()).onItem().transform(HelloReply::getMessage);
}
}
| HelloResource |
java | apache__dubbo | dubbo-spring-boot-project/dubbo-spring-boot/src/main/java/org/apache/dubbo/spring/boot/context/event/DubboOpenAPIExportListener.java | {
"start": 1251,
"end": 1382
} | class ____ OpenAPI specifications for Dubbo services
* when the Spring Boot application is fully started and ready.
*/
public | exports |
java | quarkusio__quarkus | integration-tests/narayana-jta/src/test/java/io/quarkus/narayana/jta/TransactionScopeQuarkusTransactionBeginCommitTest.java | {
"start": 428,
"end": 3730
} | class ____ {
@Inject
TransactionManager tm;
@Inject
TransactionScopedBean beanTransactional;
@Inject
TransactionBeanWithEvents beanEvents;
@Test
void transactionScopedInTransaction() throws Exception {
TransactionScopedBean.resetCounters();
QuarkusTransaction.begin();
beanTransactional.setValue(42);
assertEquals(1, TransactionScopedBean.getInitializedCount(), "Expected @PostConstruct to be invoked");
assertEquals(42, beanTransactional.getValue(), "Transaction scope did not save the value");
Transaction suspendedTransaction = tm.suspend();
assertThrows(ContextNotActiveException.class, () -> {
beanTransactional.getValue();
}, "Not expecting to have available TransactionScoped bean outside of the transaction");
QuarkusTransaction.begin();
beanTransactional.setValue(1);
assertEquals(2, TransactionScopedBean.getInitializedCount(), "Expected @PostConstruct to be invoked");
assertEquals(1, beanTransactional.getValue(), "Transaction scope did not save the value");
QuarkusTransaction.commit();
assertEquals(1, TransactionScopedBean.getPreDestroyCount(), "Expected @PreDestroy to be invoked");
assertThrows(ContextNotActiveException.class, () -> {
beanTransactional.getValue();
}, "Not expecting to have available TransactionScoped bean outside of the transaction");
tm.resume(suspendedTransaction);
assertEquals(42, beanTransactional.getValue(), "Transaction scope did not resumed correctly");
QuarkusTransaction.rollback();
assertEquals(2, TransactionScopedBean.getPreDestroyCount(), "Expected @PreDestroy to be invoked");
}
@Test
void scopeEventsAreEmitted() {
TransactionBeanWithEvents.cleanCounts();
QuarkusTransaction.begin();
beanEvents.listenToCommitRollback();
QuarkusTransaction.commit();
assertEquals(1, TransactionBeanWithEvents.getInitialized(), "Expected @Initialized to be observed");
assertEquals(1, TransactionBeanWithEvents.getBeforeDestroyed(), "Expected @BeforeDestroyed to be observed");
assertEquals(1, TransactionBeanWithEvents.getDestroyed(), "Expected @Destroyed to be observed");
assertEquals(1, TransactionBeanWithEvents.getCommited(), "Expected commit to be called once");
assertEquals(0, TransactionBeanWithEvents.getRolledBack(), "Expected no rollback");
TransactionBeanWithEvents.cleanCounts();
QuarkusTransaction.begin();
beanEvents.listenToCommitRollback();
QuarkusTransaction.rollback();
assertEquals(1, TransactionBeanWithEvents.getInitialized(), "Expected @Initialized to be observed");
assertEquals(1, TransactionBeanWithEvents.getBeforeDestroyed(), "Expected @BeforeDestroyed to be observed");
assertEquals(1, TransactionBeanWithEvents.getDestroyed(), "Expected @Destroyed to be observed");
assertEquals(0, TransactionBeanWithEvents.getCommited(), "Expected no commit");
assertEquals(1, TransactionBeanWithEvents.getRolledBack(), "Expected rollback to be called once");
TransactionBeanWithEvents.cleanCounts();
}
}
| TransactionScopeQuarkusTransactionBeginCommitTest |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/support/AbstractContextConfigurationUtilsTests.java | {
"start": 8374,
"end": 8474
} | class ____ {
}
@ContextConfiguration(classes = BarConfig.class)
| NestedTestCaseWithInheritedConfig |
java | apache__flink | flink-table/flink-sql-parser/src/main/java/org/apache/flink/sql/parser/ddl/SqlTableColumn.java | {
"start": 2950,
"end": 4522
} | class ____ extends SqlTableColumn {
private SqlDataTypeSpec type;
private final @Nullable SqlTableConstraint constraint;
public SqlRegularColumn(
SqlParserPos pos,
SqlIdentifier name,
@Nullable SqlCharStringLiteral comment,
SqlDataTypeSpec type,
@Nullable SqlTableConstraint constraint) {
super(pos, name, comment);
this.type = requireNonNull(type, "Column type should not be null");
this.constraint = constraint;
}
public SqlDataTypeSpec getType() {
return type;
}
public void setType(SqlDataTypeSpec type) {
this.type = type;
}
public Optional<SqlTableConstraint> getConstraint() {
return Optional.ofNullable(constraint);
}
@Override
protected void unparseColumn(SqlWriter writer, int leftPrec, int rightPrec) {
type.unparse(writer, leftPrec, rightPrec);
if (this.type.getNullable() != null && !this.type.getNullable()) {
// Default is nullable.
writer.keyword("NOT NULL");
}
if (constraint != null) {
constraint.unparse(writer, leftPrec, rightPrec);
}
}
@Override
public @Nonnull List<SqlNode> getOperandList() {
return ImmutableNullableList.of(name, type, constraint, comment);
}
}
/** A column derived from metadata. */
public static | SqlRegularColumn |
java | apache__flink | flink-streaming-java/src/main/java/org/apache/flink/streaming/util/retryable/AsyncRetryStrategies.java | {
"start": 7791,
"end": 9329
} | class ____<OUT> {
private final int maxAttempts;
private final long initialDelay;
private final long maxRetryDelay;
private final double multiplier;
private Predicate<Collection<OUT>> resultPredicate;
private Predicate<Throwable> exceptionPredicate;
public ExponentialBackoffDelayRetryStrategyBuilder(
int maxAttempts, long initialDelay, long maxRetryDelay, double multiplier) {
this.maxAttempts = maxAttempts;
this.initialDelay = initialDelay;
this.maxRetryDelay = maxRetryDelay;
this.multiplier = multiplier;
}
public ExponentialBackoffDelayRetryStrategyBuilder<OUT> ifResult(
@Nonnull Predicate<Collection<OUT>> resultRetryPredicate) {
this.resultPredicate = resultRetryPredicate;
return this;
}
public ExponentialBackoffDelayRetryStrategyBuilder<OUT> ifException(
@Nonnull Predicate<Throwable> exceptionRetryPredicate) {
this.exceptionPredicate = exceptionRetryPredicate;
return this;
}
public ExponentialBackoffDelayRetryStrategy<OUT> build() {
return new ExponentialBackoffDelayRetryStrategy<OUT>(
maxAttempts,
initialDelay,
maxRetryDelay,
multiplier,
resultPredicate,
exceptionPredicate);
}
}
}
| ExponentialBackoffDelayRetryStrategyBuilder |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/bean/issues/MyMessage.java | {
"start": 859,
"end": 1132
} | class ____ {
private final String message;
public MyMessage(String message) {
this.message = message;
}
public String getMessage() {
return message;
}
@Override
public String toString() {
return message;
}
}
| MyMessage |
java | apache__flink | flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/service/session/SessionManagerImpl.java | {
"start": 2603,
"end": 8989
} | class ____ implements SessionManager {
private static final Logger LOG = LoggerFactory.getLogger(SessionManagerImpl.class);
private static final String OPERATION_POOL_NAME = "sql-gateway-operation-pool";
private final DefaultContext defaultContext;
private final long idleTimeout;
private final long checkInterval;
private final int maxSessionCount;
private final Map<SessionHandle, Session> sessions;
private ExecutorService operationExecutorService;
private @Nullable ScheduledExecutorService cleanupService;
private @Nullable ScheduledFuture<?> timeoutCheckerFuture;
public SessionManagerImpl(DefaultContext defaultContext) {
this.defaultContext = defaultContext;
ReadableConfig conf = defaultContext.getFlinkConfig();
this.idleTimeout = conf.get(SQL_GATEWAY_SESSION_IDLE_TIMEOUT).toMillis();
this.checkInterval = conf.get(SQL_GATEWAY_SESSION_CHECK_INTERVAL).toMillis();
this.maxSessionCount = conf.get(SQL_GATEWAY_SESSION_MAX_NUM);
this.sessions = new ConcurrentHashMap<>();
}
@Override
public void start() {
if (checkInterval > 0 && idleTimeout > 0) {
cleanupService = Executors.newSingleThreadScheduledExecutor();
timeoutCheckerFuture =
cleanupService.scheduleAtFixedRate(
() -> {
LOG.debug(
"Start to cleanup expired sessions, current session count: {}",
sessions.size());
for (Map.Entry<SessionHandle, Session> entry :
sessions.entrySet()) {
SessionHandle sessionId = entry.getKey();
Session session = entry.getValue();
if (isSessionExpired(session)) {
LOG.info("Session {} is expired, closing it...", sessionId);
closeSession(session);
}
}
LOG.debug(
"Removing expired session finished, current session count: {}",
sessions.size());
},
checkInterval,
checkInterval,
TimeUnit.MILLISECONDS);
}
ReadableConfig conf = defaultContext.getFlinkConfig();
operationExecutorService =
ThreadUtils.newThreadPool(
conf.get(SQL_GATEWAY_WORKER_THREADS_MIN),
conf.get(SQL_GATEWAY_WORKER_THREADS_MAX),
conf.get(SQL_GATEWAY_WORKER_KEEPALIVE_TIME).toMillis(),
OPERATION_POOL_NAME);
}
@Override
public void stop() {
if (cleanupService != null) {
timeoutCheckerFuture.cancel(true);
cleanupService.shutdown();
}
if (operationExecutorService != null) {
operationExecutorService.shutdown();
}
LOG.info("SessionManager is stopped.");
}
@Override
public Session getSession(SessionHandle sessionHandle) throws SqlGatewayException {
Session session = sessions.get(sessionHandle);
if (session == null) {
String msg = String.format("Session '%s' does not exist.", sessionHandle);
LOG.warn(msg);
throw new SqlGatewayException(msg);
}
session.touch();
return session;
}
@Override
public synchronized Session openSession(SessionEnvironment environment)
throws SqlGatewayException {
// check session limit
checkSessionCount();
Session session = null;
SessionHandle sessionId = null;
do {
sessionId = SessionHandle.create();
} while (sessions.containsKey(sessionId));
SessionContext sessionContext =
SessionContext.create(
defaultContext, sessionId, environment, operationExecutorService);
session = new Session(sessionContext);
session.open();
sessions.put(sessionId, session);
LOG.info(
"Session {} is opened, and the number of current sessions is {}.",
session.getSessionHandle(),
sessions.size());
return session;
}
public void closeSession(SessionHandle sessionHandle) throws SqlGatewayException {
Session session = getSession(sessionHandle);
closeSession(session);
}
// ------------------------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------------------------
private void checkSessionCount() throws SqlGatewayException {
if (maxSessionCount <= 0) {
return;
}
if (sessions.size() >= maxSessionCount) {
String msg =
String.format(
"Failed to create session, the count of active sessions exceeds the max count: %s",
maxSessionCount);
LOG.warn(msg);
throw new SqlGatewayException(msg);
}
}
private boolean isSessionExpired(Session session) {
if (idleTimeout > 0) {
return (System.currentTimeMillis() - session.getLastAccessTime()) > idleTimeout;
} else {
return false;
}
}
private void closeSession(Session session) {
SessionHandle sessionId = session.getSessionHandle();
sessions.remove(sessionId);
session.close();
LOG.info("Session: {} is closed.", sessionId);
}
@VisibleForTesting
public boolean isSessionAlive(SessionHandle sessionId) {
return sessions.containsKey(sessionId);
}
@VisibleForTesting
public int currentSessionCount() {
return sessions.size();
}
@VisibleForTesting
public int getOperationCount(SessionHandle sessionHandle) {
return getSession(sessionHandle).getOperationManager().getOperationCount();
}
}
| SessionManagerImpl |
java | apache__camel | components/camel-arangodb/src/generated/java/org/apache/camel/component/arangodb/ArangoDbComponentConfigurer.java | {
"start": 735,
"end": 6479
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
private org.apache.camel.component.arangodb.ArangoDbConfiguration getOrCreateConfiguration(ArangoDbComponent target) {
if (target.getConfiguration() == null) {
target.setConfiguration(new org.apache.camel.component.arangodb.ArangoDbConfiguration());
}
return target.getConfiguration();
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
ArangoDbComponent target = (ArangoDbComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "arangodb":
case "arangoDB": target.setArangoDB(property(camelContext, com.arangodb.ArangoDB.class, value)); return true;
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "configuration": target.setConfiguration(property(camelContext, org.apache.camel.component.arangodb.ArangoDbConfiguration.class, value)); return true;
case "documentcollection":
case "documentCollection": getOrCreateConfiguration(target).setDocumentCollection(property(camelContext, java.lang.String.class, value)); return true;
case "edgecollection":
case "edgeCollection": getOrCreateConfiguration(target).setEdgeCollection(property(camelContext, java.lang.String.class, value)); return true;
case "graph": getOrCreateConfiguration(target).setGraph(property(camelContext, java.lang.String.class, value)); return true;
case "host": getOrCreateConfiguration(target).setHost(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "operation": getOrCreateConfiguration(target).setOperation(property(camelContext, org.apache.camel.component.arangodb.ArangoDbOperation.class, value)); return true;
case "password": getOrCreateConfiguration(target).setPassword(property(camelContext, java.lang.String.class, value)); return true;
case "port": getOrCreateConfiguration(target).setPort(property(camelContext, int.class, value)); return true;
case "user": getOrCreateConfiguration(target).setUser(property(camelContext, java.lang.String.class, value)); return true;
case "vertexcollection":
case "vertexCollection": getOrCreateConfiguration(target).setVertexCollection(property(camelContext, java.lang.String.class, value)); return true;
case "vertx": target.setVertx(property(camelContext, io.vertx.core.Vertx.class, value)); return true;
default: return false;
}
}
@Override
public String[] getAutowiredNames() {
return new String[]{"arangoDB", "vertx"};
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "arangodb":
case "arangoDB": return com.arangodb.ArangoDB.class;
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "configuration": return org.apache.camel.component.arangodb.ArangoDbConfiguration.class;
case "documentcollection":
case "documentCollection": return java.lang.String.class;
case "edgecollection":
case "edgeCollection": return java.lang.String.class;
case "graph": return java.lang.String.class;
case "host": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "operation": return org.apache.camel.component.arangodb.ArangoDbOperation.class;
case "password": return java.lang.String.class;
case "port": return int.class;
case "user": return java.lang.String.class;
case "vertexcollection":
case "vertexCollection": return java.lang.String.class;
case "vertx": return io.vertx.core.Vertx.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
ArangoDbComponent target = (ArangoDbComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "arangodb":
case "arangoDB": return target.getArangoDB();
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "configuration": return target.getConfiguration();
case "documentcollection":
case "documentCollection": return getOrCreateConfiguration(target).getDocumentCollection();
case "edgecollection":
case "edgeCollection": return getOrCreateConfiguration(target).getEdgeCollection();
case "graph": return getOrCreateConfiguration(target).getGraph();
case "host": return getOrCreateConfiguration(target).getHost();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "operation": return getOrCreateConfiguration(target).getOperation();
case "password": return getOrCreateConfiguration(target).getPassword();
case "port": return getOrCreateConfiguration(target).getPort();
case "user": return getOrCreateConfiguration(target).getUser();
case "vertexcollection":
case "vertexCollection": return getOrCreateConfiguration(target).getVertexCollection();
case "vertx": return target.getVertx();
default: return null;
}
}
}
| ArangoDbComponentConfigurer |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java | {
"start": 1100,
"end": 1170
} | interface ____ a replica in Pipeline that's being written to
*/
public | of |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/lucene/search/uhighlight/BoundedBreakIteratorScannerTests.java | {
"start": 869,
"end": 6446
} | class ____ extends ESTestCase {
private static final String[] WORD_BOUNDARIES = new String[] { " ", " ", "\t", "#", "\n" };
private static final String[] SENTENCE_BOUNDARIES = new String[] { "! ", "? ", ". ", ".\n", ".\n\n" };
private void testRandomAsciiTextCase(BreakIterator bi, int maxLen) {
// Generate a random set of unique terms with ascii character
int maxSize = randomIntBetween(5, 100);
String[] vocabulary = new String[maxSize];
for (int i = 0; i < maxSize; i++) {
if (rarely()) {
vocabulary[i] = randomAlphaOfLengthBetween(50, 200);
} else {
vocabulary[i] = randomAlphaOfLengthBetween(1, 30);
}
}
// Generate a random text made of random terms separated with word-boundaries
// and sentence-boundaries.
StringBuilder text = new StringBuilder();
List<Integer> offsetList = new ArrayList<>();
List<Integer> sizeList = new ArrayList<>();
// the number of sentences to generate
int numSentences = randomIntBetween(10, 100);
int maxTermLen = 0;
for (int i = 0; i < numSentences; i++) {
// the number of terms in the sentence
int numTerms = randomIntBetween(5, 10);
for (int j = 0; j < numTerms; j++) {
int termId = randomIntBetween(0, vocabulary.length - 1);
String term = vocabulary[termId].toLowerCase(Locale.ROOT);
if (j == 0) {
// capitalize the first letter of the first term in the sentence
term = term.substring(0, 1).toUpperCase(Locale.ROOT) + term.substring(1);
} else {
String sep = randomFrom(WORD_BOUNDARIES);
text.append(sep);
}
maxTermLen = Math.max(term.length(), maxTermLen);
offsetList.add(text.length());
sizeList.add(term.length());
text.append(term);
}
String boundary = randomFrom(SENTENCE_BOUNDARIES);
text.append(boundary);
}
int[] sizes = sizeList.stream().mapToInt(i -> i).toArray();
int[] offsets = offsetList.stream().mapToInt(i -> i).toArray();
bi.setText(text.toString());
int currentPos = randomIntBetween(0, 20);
int lastEnd = -1;
int maxPassageLen = maxLen + (maxTermLen * 2);
while (currentPos < offsets.length) {
// find the passage that contains the current term
int nextOffset = offsets[currentPos];
int start = bi.preceding(nextOffset + 1);
int end = bi.following(nextOffset);
// check that the passage is valid
assertThat(start, greaterThanOrEqualTo(lastEnd));
assertThat(end, greaterThan(start));
assertThat(start, lessThanOrEqualTo(nextOffset));
assertThat(end, greaterThanOrEqualTo(nextOffset));
int passageLen = end - start;
assertThat(passageLen, lessThanOrEqualTo(maxPassageLen));
// checks that the start and end of the passage are on word boundaries.
int startPos = Arrays.binarySearch(offsets, start);
int endPos = Arrays.binarySearch(offsets, end);
if (startPos < 0) {
int lastWordEnd = offsets[Math.abs(startPos) - 2] + sizes[Math.abs(startPos) - 2];
assertThat(start, greaterThanOrEqualTo(lastWordEnd));
}
if (endPos < 0) {
if (Math.abs(endPos) - 2 < offsets.length) {
int lastWordEnd = offsets[Math.abs(endPos) - 2] + sizes[Math.abs(endPos) - 2];
assertThat(end, greaterThanOrEqualTo(lastWordEnd));
}
// advance the position to the end of the current passage
currentPos = (Math.abs(endPos) - 1);
} else {
// advance the position to the end of the current passage
currentPos = endPos;
}
// randomly advance to the next term to highlight
currentPos += randomIntBetween(0, 20);
lastEnd = end;
}
}
public void testBoundedSentence() {
for (int i = 0; i < 20; i++) {
int maxLen = randomIntBetween(10, 500);
testRandomAsciiTextCase(BoundedBreakIteratorScanner.getSentence(Locale.ROOT, maxLen), maxLen);
}
}
public void testTextThatEndsBeforeMaxLen() {
BreakIterator bi = BoundedBreakIteratorScanner.getSentence(Locale.ROOT, 1000);
final String text = "This is the first test sentence. Here is the second one.";
int offset = text.indexOf("first");
bi.setText(text);
assertEquals(0, bi.preceding(offset));
assertEquals(text.length(), bi.following(offset - 1));
offset = text.indexOf("second");
bi.setText(text);
assertEquals(33, bi.preceding(offset));
assertEquals(text.length(), bi.following(offset - 1));
}
public void testFragmentSizeThatIsTooBig() {
final int fragmentSize = Integer.MAX_VALUE;
BreakIterator bi = BoundedBreakIteratorScanner.getSentence(Locale.ROOT, fragmentSize);
final String text = "Any sentence";
final int offset = 0; // find at beggining of text
bi.setText(text);
assertEquals(0, bi.preceding(offset));
assertEquals(text.length(), bi.following(offset - 1));
}
}
| BoundedBreakIteratorScannerTests |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/query/JpaQueryCreatorTests.java | {
"start": 30315,
"end": 31756
} | class ____ {
QueryCreatorBuilder builder;
Lazy<String> jpql;
private QueryCreatorTester(QueryCreatorBuilder builder) {
this.builder = builder;
this.jpql = Lazy.of(builder::render);
}
static QueryCreatorTester create(QueryCreatorBuilder builder) {
return new QueryCreatorTester(builder);
}
QueryCreatorTester expectJpql(String jpql, Object... args) {
assertThat(this.jpql.get()).isEqualTo(jpql, args);
return this;
}
QueryCreatorTester expectPlaceholderValue(String placeholder, Object value) {
return expectBindingAt(builder.bindingIndexFor(placeholder), value);
}
QueryCreatorTester expectBindingAt(int position, Object value) {
Object current = builder.bindableParameters().getBindableValue(position - 1);
assertThat(current).isEqualTo(value);
return this;
}
QueryCreatorTester validateQuery() {
if (builder instanceof DefaultCreatorBuilder dcb && dcb.metamodel instanceof TestMetaModel tmm) {
return validateQuery(tmm.entityManager());
}
throw new IllegalStateException("No EntityManager found, please provide one via [verify(EntityManager)]");
}
QueryCreatorTester validateQuery(EntityManager entityManager) {
if (builder instanceof DefaultCreatorBuilder dcb) {
entityManager.createQuery(this.jpql.get(), dcb.returnedType.getReturnedType());
} else {
entityManager.createQuery(this.jpql.get());
}
return this;
}
}
| QueryCreatorTester |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/uri/UriMatchInfo.java | {
"start": 810,
"end": 1246
} | interface ____ {
/**
* @return The matched URI
*/
String getUri();
/**
* @return The variable values following a successful match
*/
Map<String, Object> getVariableValues();
/**
* @return The list of template variables
*/
List<UriMatchVariable> getVariables();
/**
* @return A map of the variables.
*/
Map<String, UriMatchVariable> getVariableMap();
}
| UriMatchInfo |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/webmonitor/retriever/LeaderGatewayRetrieverTest.java | {
"start": 2871,
"end": 3774
} | class ____ extends LeaderGatewayRetriever<RpcGateway> {
private final RpcGateway rpcGateway;
private int retrievalAttempt = 0;
private TestingLeaderGatewayRetriever(RpcGateway rpcGateway) {
this.rpcGateway = rpcGateway;
}
@Override
protected CompletableFuture<RpcGateway> createGateway(
CompletableFuture<Tuple2<String, UUID>> leaderFuture) {
CompletableFuture<RpcGateway> result;
if (retrievalAttempt < 2) {
result =
FutureUtils.completedExceptionally(
new FlinkException("Could not resolve the leader gateway."));
} else {
result = CompletableFuture.completedFuture(rpcGateway);
}
retrievalAttempt++;
return result;
}
}
}
| TestingLeaderGatewayRetriever |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/TraditionalSwitchExpressionTest.java | {
"start": 825,
"end": 1134
} | class ____ {
private final CompilationTestHelper testHelper =
CompilationTestHelper.newInstance(TraditionalSwitchExpression.class, getClass());
@Test
public void positive() {
testHelper
.addSourceLines(
"Test.java",
"""
| TraditionalSwitchExpressionTest |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateJsonNoKeywordsTask.java | {
"start": 1917,
"end": 7921
} | class ____ extends DefaultTask {
private File jsonKeywords;
private File report;
private FileCollection inputFiles;
@Incremental
@InputFiles
public FileCollection getInputFiles() {
return inputFiles;
}
public void setInputFiles(FileCollection inputFiles) {
this.inputFiles = inputFiles;
}
@InputFile
public File getJsonKeywords() {
return jsonKeywords;
}
public void setJsonKeywords(File jsonKeywords) {
this.jsonKeywords = jsonKeywords;
}
public void setReport(File report) {
this.report = report;
}
@OutputFile
public File getReport() {
return report;
}
@TaskAction
public void validate(InputChanges inputChanges) {
final ObjectMapper mapper = new ObjectMapper().configure(JsonParser.Feature.ALLOW_COMMENTS, true);
final Map<File, Set<String>> errors = new LinkedHashMap<>();
getLogger().debug("Loading keywords from {}", jsonKeywords.getName());
final Map<String, Set<String>> languagesByKeyword = loadKeywords(mapper);
// incrementally evaluate input files
StreamSupport.stream(inputChanges.getFileChanges(getInputFiles()).spliterator(), false)
.filter(f -> f.getChangeType() != ChangeType.REMOVED)
.forEach(fileChange -> {
File file = fileChange.getFile();
if (file.isDirectory()) {
return;
}
getLogger().debug("Checking {}", file.getName());
try {
final JsonNode jsonNode = mapper.readTree(file);
if (jsonNode.isObject() == false) {
errors.put(file, Set.of("Expected an object, but found: " + jsonNode.getNodeType()));
return;
}
final ObjectNode rootNode = (ObjectNode) jsonNode;
if (rootNode.size() != 1) {
errors.put(file, Set.of("Expected an object with exactly 1 key, but found " + rootNode.size() + " keys"));
return;
}
final String apiName = rootNode.fieldNames().next();
for (String component : apiName.split("\\.")) {
if (languagesByKeyword.containsKey(component)) {
final Set<String> errorsForFile = errors.computeIfAbsent(file, _file -> new HashSet<>());
errorsForFile.add(
component + " is a reserved keyword in these languages: " + languagesByKeyword.get(component)
);
}
}
} catch (IOException e) {
errors.put(file, Set.of("Failed to load file: " + e.getMessage()));
}
});
if (errors.isEmpty()) {
return;
}
try {
try (PrintWriter pw = new PrintWriter(getReport())) {
pw.println("---------- Validation Report -----------");
pw.println("Some API names were found that, when client code is generated for these APIS,");
pw.println("could conflict with the reserved words in some programming languages. It may");
pw.println("still be possible to use these API names, but you will need to verify whether");
pw.println("the API name (and its components) can be used as method names, and update the");
pw.println("list of keywords below. The safest action is to rename the API to avoid conflicts.");
pw.println();
pw.printf("Keywords source: %s%n", getJsonKeywords());
pw.println();
pw.println("---------- Validation Errors -----------");
pw.println();
errors.forEach((file, errorsForFile) -> {
pw.printf("File: %s%n", file);
errorsForFile.forEach(err -> pw.printf("\t%s%n", err));
pw.println();
});
}
} catch (FileNotFoundException e) {
throw new GradleException("Failed to write keywords report", e);
}
String message = String.format(
Locale.ROOT,
"Error validating JSON. See the report at: %s%s%s",
getReport().toURI().toASCIIString(),
System.lineSeparator(),
String.format("Verification failed: %d files contained %d violations", errors.keySet().size(), errors.values().size())
);
throw new GradleException(message);
}
/**
* Loads the known keywords. Although the JSON on disk maps from language to keywords, this method
* inverts this to map from keyword to languages. This is because the same keywords are found in
* multiple languages, so it is easier and more useful to have a single map of keywords.
*
* @return a mapping from keyword to languages.
*/
private Map<String, Set<String>> loadKeywords(ObjectMapper mapper) {
Map<String, Set<String>> languagesByKeyword = new HashMap<>();
try {
final ObjectNode keywordsNode = ((ObjectNode) mapper.readTree(this.jsonKeywords));
keywordsNode.fieldNames().forEachRemaining(eachLanguage -> {
keywordsNode.get(eachLanguage).elements().forEachRemaining(e -> {
final String eachKeyword = e.textValue();
final Set<String> languages = languagesByKeyword.computeIfAbsent(eachKeyword, _keyword -> new HashSet<>());
languages.add(eachLanguage);
});
});
} catch (IOException e) {
throw new GradleException("Failed to load keywords JSON from " + jsonKeywords.getName() + " - " + e.getMessage(), e);
}
return languagesByKeyword;
}
}
| ValidateJsonNoKeywordsTask |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/language/LanguageResourceLoadScriptFromClasspathTest.java | {
"start": 903,
"end": 1529
} | class ____ extends LanguageLoadScriptFromClasspathTest {
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// START SNIPPET: e1
from("direct:start")
// load the script from the classpath
.to("language:simple:resource:classpath:org/apache/camel/component/language/mysimplescript.txt")
.to("mock:result");
// END SNIPPET: e1
}
};
}
}
| LanguageResourceLoadScriptFromClasspathTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/OperatorPrecedenceTest.java | {
"start": 5147,
"end": 5400
} | class ____ {
void f(boolean a, boolean b, boolean c, boolean d) {
boolean g = a || (b && c) && d;
}
}
""")
.addOutputLines(
"Test.java",
"""
| Test |
java | apache__camel | components/camel-aws/camel-aws-bedrock/src/main/java/org/apache/camel/component/aws2/bedrock/agentruntime/client/impl/BedrockAgentRuntimeClientSessionTokenImpl.java | {
"start": 2022,
"end": 5579
} | class ____ implements BedrockAgentRuntimeInternalClient {
private static final Logger LOG = LoggerFactory.getLogger(BedrockAgentRuntimeClientSessionTokenImpl.class);
private BedrockAgentRuntimeConfiguration configuration;
/**
* Constructor that uses the config file.
*/
public BedrockAgentRuntimeClientSessionTokenImpl(BedrockAgentRuntimeConfiguration configuration) {
LOG.trace("Creating an AWS Bedrock Agent Runtime manager using static credentials.");
this.configuration = configuration;
}
/**
* Getting the Bedrock Agent Runtime AWS client that is used.
*
* @return BedrockAgentRuntimeClient Client.
*/
@Override
public BedrockAgentRuntimeClient getBedrockAgentRuntimeClient() {
BedrockAgentRuntimeClient client = null;
BedrockAgentRuntimeClientBuilder clientBuilder = BedrockAgentRuntimeClient.builder();
ProxyConfiguration.Builder proxyConfig = null;
ApacheHttpClient.Builder httpClientBuilder = null;
boolean isClientConfigFound = false;
if (ObjectHelper.isNotEmpty(configuration.getProxyHost()) && ObjectHelper.isNotEmpty(configuration.getProxyPort())) {
proxyConfig = ProxyConfiguration.builder();
URI proxyEndpoint = URI.create(configuration.getProxyProtocol() + "://" + configuration.getProxyHost() + ":"
+ configuration.getProxyPort());
proxyConfig.endpoint(proxyEndpoint);
httpClientBuilder = ApacheHttpClient.builder().proxyConfiguration(proxyConfig.build());
isClientConfigFound = true;
}
if (configuration.getAccessKey() != null && configuration.getSecretKey() != null
&& configuration.getSessionToken() != null) {
AwsSessionCredentials cred = AwsSessionCredentials.create(configuration.getAccessKey(),
configuration.getSecretKey(), configuration.getSessionToken());
if (isClientConfigFound) {
clientBuilder = clientBuilder.httpClientBuilder(httpClientBuilder)
.credentialsProvider(StaticCredentialsProvider.create(cred));
} else {
clientBuilder = clientBuilder.credentialsProvider(StaticCredentialsProvider.create(cred));
}
} else {
if (!isClientConfigFound) {
clientBuilder = clientBuilder.httpClientBuilder(httpClientBuilder);
}
}
if (ObjectHelper.isNotEmpty(configuration.getRegion())) {
clientBuilder = clientBuilder.region(Region.of(configuration.getRegion()));
}
if (configuration.isOverrideEndpoint()) {
clientBuilder.endpointOverride(URI.create(configuration.getUriEndpointOverride()));
}
if (configuration.isTrustAllCertificates()) {
if (httpClientBuilder == null) {
httpClientBuilder = ApacheHttpClient.builder();
}
SdkHttpClient ahc = httpClientBuilder.buildWithDefaults(AttributeMap
.builder()
.put(
SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES,
Boolean.TRUE)
.build());
// set created http client to use instead of builder
clientBuilder.httpClient(ahc);
clientBuilder.httpClientBuilder(null);
}
client = clientBuilder.build();
return client;
}
}
| BedrockAgentRuntimeClientSessionTokenImpl |
java | netty__netty | codec-memcache/src/main/java/io/netty/handler/codec/memcache/FullMemcacheMessage.java | {
"start": 923,
"end": 1444
} | interface ____ extends MemcacheMessage, LastMemcacheContent {
@Override
FullMemcacheMessage copy();
@Override
FullMemcacheMessage duplicate();
@Override
FullMemcacheMessage retainedDuplicate();
@Override
FullMemcacheMessage replace(ByteBuf content);
@Override
FullMemcacheMessage retain(int increment);
@Override
FullMemcacheMessage retain();
@Override
FullMemcacheMessage touch();
@Override
FullMemcacheMessage touch(Object hint);
}
| FullMemcacheMessage |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/threadpool/support/eager/EagerThreadPoolTest.java | {
"start": 1959,
"end": 3813
} | class ____ {
@Test
void getExecutor1() throws Exception {
URL url = URL.valueOf("dubbo://10.20.130.230:20880/context/path?" + THREAD_NAME_KEY
+ "=demo&" + CORE_THREADS_KEY
+ "=1&" + THREADS_KEY
+ "=2&" + ALIVE_KEY
+ "=1000&" + QUEUES_KEY
+ "=0");
ThreadPool threadPool = new EagerThreadPool();
ThreadPoolExecutor executor = (ThreadPoolExecutor) threadPool.getExecutor(url);
assertThat(executor, instanceOf(EagerThreadPoolExecutor.class));
assertThat(executor.getCorePoolSize(), is(1));
assertThat(executor.getMaximumPoolSize(), is(2));
assertThat(executor.getKeepAliveTime(TimeUnit.MILLISECONDS), is(1000L));
assertThat(executor.getQueue().remainingCapacity(), is(1));
assertThat(executor.getQueue(), Matchers.<BlockingQueue<Runnable>>instanceOf(TaskQueue.class));
assertThat(
executor.getRejectedExecutionHandler(),
Matchers.<RejectedExecutionHandler>instanceOf(AbortPolicyWithReport.class));
final CountDownLatch latch = new CountDownLatch(1);
executor.execute(() -> {
Thread thread = Thread.currentThread();
assertThat(thread, instanceOf(InternalThread.class));
assertThat(thread.getName(), startsWith("demo"));
latch.countDown();
});
latch.await();
assertThat(latch.getCount(), is(0L));
}
@Test
void getExecutor2() {
URL url = URL.valueOf("dubbo://10.20.130.230:20880/context/path?" + QUEUES_KEY + "=2");
ThreadPool threadPool = new EagerThreadPool();
ThreadPoolExecutor executor = (ThreadPoolExecutor) threadPool.getExecutor(url);
assertThat(executor.getQueue().remainingCapacity(), is(2));
}
}
| EagerThreadPoolTest |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/web/client/IntrospectingClientHttpResponseTests.java | {
"start": 1399,
"end": 3180
} | class ____ {
@ParameterizedTest
@MethodSource("noBodyHttpStatus")
void noMessageBodyWhenStatus(HttpStatus status) throws Exception {
var response = new MockClientHttpResponse(new byte[0], status);
var wrapped = new IntrospectingClientHttpResponse(response);
assertThat(wrapped.hasMessageBody()).isFalse();
}
static Stream<HttpStatusCode> noBodyHttpStatus() {
return Stream.of(HttpStatus.NO_CONTENT, HttpStatus.EARLY_HINTS, HttpStatus.NOT_MODIFIED);
}
@Test
void noMessageBodyWhenContentLength0() throws Exception {
var response = new MockClientHttpResponse(new byte[0], HttpStatus.OK);
response.getHeaders().setContentLength(0);
var wrapped = new IntrospectingClientHttpResponse(response);
assertThat(wrapped.hasMessageBody()).isFalse();
}
@Test
void emptyMessageWhenNullInputStream() throws Exception {
ClientHttpResponse mockResponse = mock();
given(mockResponse.getBody()).willReturn(null);
var wrappedMock = new IntrospectingClientHttpResponse(mockResponse);
assertThat(wrappedMock.hasEmptyMessageBody()).isTrue();
}
@Test
void messageBodyExists() throws Exception {
var stream = new ByteArrayInputStream("content".getBytes());
var response = new MockClientHttpResponse(stream, HttpStatus.OK);
var wrapped = new IntrospectingClientHttpResponse(response);
assertThat(wrapped.hasEmptyMessageBody()).isFalse();
}
@Test
void emptyMessageWhenEOFException() throws Exception {
ClientHttpResponse mockResponse = mock();
InputStream stream = mock();
given(mockResponse.getBody()).willReturn(stream);
given(stream.read()).willThrow(new EOFException());
var wrappedMock = new IntrospectingClientHttpResponse(mockResponse);
assertThat(wrappedMock.hasEmptyMessageBody()).isTrue();
}
}
| IntrospectingClientHttpResponseTests |
java | apache__camel | components/camel-ibm/camel-ibm-watson-speech-to-text/src/main/java/org/apache/camel/component/ibm/watson/stt/WatsonSpeechToTextConstants.java | {
"start": 969,
"end": 2549
} | interface ____ {
@Metadata(description = "The operation to perform", javaType = "String")
String OPERATION = "CamelIBMWatsonSTTOperation";
// Recognition headers
@Metadata(description = "The audio file to transcribe", javaType = "java.io.File")
String AUDIO_FILE = "CamelIBMWatsonSTTAudioFile";
@Metadata(description = "The language model to use for recognition", javaType = "String")
String MODEL = "CamelIBMWatsonSTTModel";
@Metadata(description = "The audio format (e.g., audio/wav, audio/mp3, audio/flac)", javaType = "String")
String CONTENT_TYPE = "CamelIBMWatsonSTTContentType";
@Metadata(description = "Whether to include timestamps in the transcription", javaType = "Boolean")
String TIMESTAMPS = "CamelIBMWatsonSTTTimestamps";
@Metadata(description = "Whether to include word confidence scores", javaType = "Boolean")
String WORD_CONFIDENCE = "CamelIBMWatsonSTTWordConfidence";
@Metadata(description = "Whether to identify different speakers", javaType = "Boolean")
String SPEAKER_LABELS = "CamelIBMWatsonSTTSpeakerLabels";
// Model headers
@Metadata(description = "The name of the model to retrieve", javaType = "String")
String MODEL_NAME = "CamelIBMWatsonSTTModelName";
@Metadata(description = "The language for filtering models", javaType = "String")
String LANGUAGE = "CamelIBMWatsonSTTLanguage";
// Output headers
@Metadata(description = "The transcription result text", javaType = "String")
String TRANSCRIPT = "CamelIBMWatsonSTTTranscript";
}
| WatsonSpeechToTextConstants |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStoreBuilderImpl.java | {
"start": 1303,
"end": 3363
} | class ____ implements
IOStatisticsStoreBuilder {
private final List<String> counters = new ArrayList<>();
private final List<String> gauges = new ArrayList<>();
private final List<String> minimums = new ArrayList<>();
private final List<String> maximums = new ArrayList<>();
private final List<String> meanStatistics = new ArrayList<>();
@Override
public IOStatisticsStoreBuilderImpl withCounters(final String... keys) {
counters.addAll(Arrays.asList(keys));
return this;
}
@Override
public IOStatisticsStoreBuilderImpl withGauges(final String... keys) {
gauges.addAll(Arrays.asList(keys));
return this;
}
@Override
public IOStatisticsStoreBuilderImpl withMaximums(final String... keys) {
maximums.addAll(Arrays.asList(keys));
return this;
}
@Override
public IOStatisticsStoreBuilderImpl withMinimums(final String... keys) {
minimums.addAll(Arrays.asList(keys));
return this;
}
@Override
public IOStatisticsStoreBuilderImpl withMeanStatistics(
final String... keys) {
meanStatistics.addAll(Arrays.asList(keys));
return this;
}
@Override
public IOStatisticsStoreBuilderImpl withDurationTracking(
final String... prefixes) {
for (String p : prefixes) {
withCounters(p, p + SUFFIX_FAILURES);
withMinimums(
p + SUFFIX_MIN,
p + SUFFIX_FAILURES + SUFFIX_MIN);
withMaximums(
p + SUFFIX_MAX,
p + SUFFIX_FAILURES + SUFFIX_MAX);
withMeanStatistics(
p + SUFFIX_MEAN,
p + SUFFIX_FAILURES + SUFFIX_MEAN);
}
return this;
}
@Override
public IOStatisticsStoreBuilderImpl withSampleTracking(
final String... prefixes) {
for (String p : prefixes) {
withCounters(p);
withMinimums(p);
withMaximums(p);
withMeanStatistics(p);
}
return this;
}
@Override
public IOStatisticsStore build() {
return new IOStatisticsStoreImpl(counters, gauges, minimums,
maximums, meanStatistics);
}
}
| IOStatisticsStoreBuilderImpl |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MissingBracesTest.java | {
"start": 1661,
"end": 2437
} | class ____ {
void f(boolean x, List<Integer> is) {
if (x) {
throw new AssertionError();
} else {
x = !x;
}
while (x) {
g();
}
do {
g();
} while (x);
for (; x; ) {
g();
}
for (int i : is) {
g();
}
}
void g() {}
}
""")
.doTest();
}
@Test
public void negative() {
CompilationTestHelper.newInstance(MissingBraces.class, getClass())
.addSourceLines(
"Test.java",
"""
| Test |
java | elastic__elasticsearch | qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/SystemAliasIT.java | {
"start": 884,
"end": 10067
} | class ____ extends AbstractSystemIndicesIT {
@After
public void resetFeatures() throws Exception {
performPostFeaturesReset(client());
}
public void testCreatingSystemIndexWithAlias() throws Exception {
{
Request request = new Request("PUT", "/.internal-unmanaged-index-8");
request.setJsonEntity("{\"aliases\": {\".internal-unmanaged-alias\": {}}}");
request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("X-elastic-product-origin", "elastic"));
Response response = client().performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
}
assertAliasIsHiddenInIndexResponse(".internal-unmanaged-index-8", ".internal-unmanaged-alias", true);
assertAliasIsHiddenInAliasesEndpoint(".internal-unmanaged-index-8", ".internal-unmanaged-alias", true);
}
public void testCreatingSystemIndexWithLegacyAlias() throws Exception {
{
Request request = new Request("PUT", "/_template/system_template");
request.setJsonEntity(
"{"
+ " \"index_patterns\": [\".internal-unmanaged-*\"],"
+ " \"aliases\": {"
+ " \".internal-unmanaged-alias\": {}"
+ " }"
+ "}"
);
request.setOptions(expectWarnings("Legacy index templates are deprecated in favor of composable templates."));
Response response = client().performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
}
{
Request request = new Request("PUT", "/.internal-unmanaged-index-8");
request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("X-elastic-product-origin", "elastic"));
Response response = client().performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
}
assertAliasIsHiddenInIndexResponse(".internal-unmanaged-index-8", ".internal-unmanaged-alias", false);
assertAliasIsHiddenInAliasesEndpoint(".internal-unmanaged-index-8", ".internal-unmanaged-alias", false);
}
public void testCreatingSystemIndexWithIndexAliasEndpoint() throws Exception {
{
Request request = new Request("PUT", "/.internal-unmanaged-index-8");
request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("X-elastic-product-origin", "elastic"));
Response response = client().performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
}
{
Request request = new Request("PUT", "/.internal-unmanaged-index-8/_alias/.internal-unmanaged-alias");
request.setOptions(
expectWarnings(
"this request accesses system indices: [.internal-unmanaged-index-8], "
+ "but in a future major version, direct access to system indices will be prevented by default"
)
);
Response response = client().performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
}
assertAliasIsHiddenInIndexResponse(".internal-unmanaged-index-8", ".internal-unmanaged-alias", true);
assertAliasIsHiddenInAliasesEndpoint(".internal-unmanaged-index-8", ".internal-unmanaged-alias", true);
}
public void testCreatingSystemIndexWithAliasEndpoint() throws Exception {
{
Request request = new Request("PUT", "/.internal-unmanaged-index-8");
request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("X-elastic-product-origin", "elastic"));
Response response = client().performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
}
{
Request request = new Request("PUT", "/_alias/.internal-unmanaged-alias");
request.setJsonEntity("{\"index\": \".internal-unmanaged-index-8\"}");
request.setOptions(
expectWarnings(
"this request accesses system indices: [.internal-unmanaged-index-8], "
+ "but in a future major version, direct access to system indices will be prevented by default"
)
);
Response response = client().performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
}
assertAliasIsHiddenInIndexResponse(".internal-unmanaged-index-8", ".internal-unmanaged-alias", true);
assertAliasIsHiddenInAliasesEndpoint(".internal-unmanaged-index-8", ".internal-unmanaged-alias", true);
}
public void testCreatingSystemIndexWithAliasesEndpoint() throws Exception {
{
Request request = new Request("PUT", "/.internal-unmanaged-index-8");
request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("X-elastic-product-origin", "elastic"));
Response response = client().performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
}
{
Request request = new Request("POST", "/_aliases");
request.setJsonEntity(
"{"
+ " \"actions\": ["
+ " {"
+ " \"add\": {"
+ " \"index\": \".internal-unmanaged-index-8\","
+ " \"alias\": \".internal-unmanaged-alias\""
+ " }"
+ " }"
+ " ]"
+ "}"
);
request.setOptions(
expectWarnings(
"this request accesses system indices: [.internal-unmanaged-index-8], "
+ "but in a future major version, direct access to system indices will be prevented by default"
)
);
Response response = client().performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), is(200));
}
assertAliasIsHiddenInIndexResponse(".internal-unmanaged-index-8", ".internal-unmanaged-alias", true);
assertAliasIsHiddenInAliasesEndpoint(".internal-unmanaged-index-8", ".internal-unmanaged-alias", true);
}
@SuppressWarnings("unchecked")
private void assertAliasIsHiddenInIndexResponse(String indexName, String aliasName, boolean expectMatch) throws IOException {
Request request = new Request("GET", "/" + indexName);
request.setOptions(
expectWarnings(
"this request accesses system indices: ["
+ indexName
+ "], "
+ "but in a future major version, direct access to system indices will be prevented by default"
)
);
Response response = client().performRequest(request);
Map<String, Object> responseMap = responseAsMap(response);
Map<String, Object> indexMap = (Map<String, Object>) responseMap.get(indexName);
Map<String, Object> settingsMap = (Map<String, Object>) indexMap.get("settings");
Map<String, Object> indexSettingsMap = (Map<String, Object>) settingsMap.get("index");
assertThat(indexSettingsMap.get("hidden"), equalTo("true"));
Map<String, Object> aliasesMap = (Map<String, Object>) indexMap.get("aliases");
if (expectMatch == false) {
assertTrue(aliasesMap.keySet().isEmpty());
} else {
assertThat(aliasesMap.keySet(), equalTo(Set.of(aliasName)));
Map<String, Object> aliasMap = (Map<String, Object>) aliasesMap.get(aliasName);
assertThat(aliasMap.get("is_hidden"), notNullValue());
assertThat(aliasMap.get("is_hidden"), equalTo(true));
}
}
@SuppressWarnings("unchecked")
private void assertAliasIsHiddenInAliasesEndpoint(String indexName, String aliasName, boolean expectMatch) throws IOException {
Request request = new Request("GET", "/_aliases");
request.setOptions(
expectWarnings(
"this request accesses system indices: ["
+ indexName
+ "], "
+ "but in a future major version, direct access to system indices will be prevented by default"
)
);
Response response = client().performRequest(request);
Map<String, Object> responseMap = responseAsMap(response);
Map<String, Object> indexAliasMap = (Map<String, Object>) responseMap.get(indexName);
Map<String, Object> aliasesMap = (Map<String, Object>) indexAliasMap.get("aliases");
Map<String, Object> aliasMap = (Map<String, Object>) aliasesMap.get(aliasName);
if (expectMatch == false) {
assertNull(aliasMap);
} else {
assertThat(aliasMap.get("is_hidden"), notNullValue());
assertThat(aliasMap.get("is_hidden"), equalTo(true));
}
}
}
| SystemAliasIT |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/masterreplica/SentinelTopologyRefreshEvent.java | {
"start": 201,
"end": 736
} | class ____ implements Event {
private final String source;
private final String message;
private final long delayMs;
public SentinelTopologyRefreshEvent(String source, String message, long delayMs) {
this.source = source;
this.message = message;
this.delayMs = delayMs;
}
public String getSource() {
return source;
}
public String getMessage() {
return message;
}
public long getDelayMs() {
return delayMs;
}
}
| SentinelTopologyRefreshEvent |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java | {
"start": 19240,
"end": 22621
} | class ____ implements TransportRequestHandler<NodeRequest> {
@Override
public void messageReceived(final NodeRequest request, TransportChannel channel, Task task) throws Exception {
executeAsDataNode(
task,
request.getIndicesLevelRequest(),
request.getShards(),
request.getNodeId(),
new ChannelActionListener<>(channel)
);
}
}
private void executeAsDataNode(
Task task,
Request request,
List<ShardRouting> shards,
String nodeId,
ActionListener<NodeResponse> listener
) {
assert Transports.assertNotTransportThread("O(#shards) work must always fork to an appropriate executor");
logger.trace("[{}] executing operation on [{}] shards", actionName, shards.size());
final NodeContext nodeContext = createNodeContext();
new CancellableFanOut<ShardRouting, ShardOperationResult, NodeResponse>() {
final ArrayList<ShardOperationResult> results = new ArrayList<>(shards.size());
final ArrayList<BroadcastShardOperationFailedException> exceptions = new ArrayList<>(0);
@Override
protected void sendItemRequest(ShardRouting shardRouting, ActionListener<ShardOperationResult> listener) {
logger.trace(() -> format("[%s] executing operation for shard [%s]", actionName, shardRouting.shortSummary()));
ActionRunnable.wrap(listener, l -> shardOperation(request, shardRouting, task, nodeContext, l)).run();
}
@Override
protected void onItemResponse(ShardRouting shardRouting, ShardOperationResult shardOperationResult) {
assert Transports.assertNotTransportThread("O(#shards) work must always fork to an appropriate executor");
synchronized (results) {
results.add(shardOperationResult);
}
}
@Override
protected void onItemFailure(ShardRouting shardRouting, Exception e) {
assert Transports.assertNotTransportThread("O(#shards) work must always fork to an appropriate executor");
logger.log(
TransportActions.isShardNotAvailableException(e) ? Level.TRACE : Level.DEBUG,
() -> format("[%s] failed to execute operation for shard [%s]", actionName, shardRouting.shortSummary()),
e
);
if (TransportActions.isShardNotAvailableException(e) == false) {
synchronized (exceptions) {
exceptions.add(
new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", e)
);
}
}
}
@Override
protected NodeResponse onCompletion() {
// ref releases all happen-before here so no need to be synchronized
return new NodeResponse(nodeId, shards.size(), results, exceptions);
}
@Override
public String toString() {
return transportNodeBroadcastAction;
}
}.run(task, shards.iterator(), listener);
}
| BroadcastByNodeTransportRequestHandler |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/tool/schema/extract/spi/NameSpaceIndexesInformation.java | {
"start": 405,
"end": 1251
} | class ____ {
private final IdentifierHelper identifierHelper;
private final Map<String, List<IndexInformation>> indexes = new HashMap<>();
public NameSpaceIndexesInformation(IdentifierHelper identifierHelper) {
this.identifierHelper = identifierHelper;
}
public void addIndexInformation(TableInformation tableInformation, IndexInformation indexInformation) {
indexes.computeIfAbsent( tableInformation.getName().getTableName().getText(), k -> new ArrayList<>() )
.add( indexInformation );
}
public @Nullable List<IndexInformation> getIndexesInformation(Table table) {
return indexes.get( identifierHelper.toMetaDataObjectName( table.getQualifiedTableName().getTableName() ) );
}
public @Nullable List<IndexInformation> getIndexesInformation(String tableName) {
return indexes.get( tableName );
}
}
| NameSpaceIndexesInformation |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java | {
"start": 9052,
"end": 27420
} | class ____ {
private VolumeScanner scanner;
public void setup(VolumeScanner scanner) {
LOG.trace("Starting VolumeScanner {}",
scanner.volume);
this.scanner = scanner;
}
public void handle(ExtendedBlock block, IOException e) {
FsVolumeSpi volume = scanner.volume;
if (e == null) {
LOG.trace("Successfully scanned {} on {}", block, volume);
return;
}
// If the block does not exist anymore, then it's not an error.
if (!volume.getDataset().contains(block)) {
LOG.debug("Volume {}: block {} is no longer in the dataset.",
volume, block);
return;
}
// If the block exists, the exception may due to a race with write:
// The BlockSender got an old block path in rbw. BlockReceiver removed
// the rbw block from rbw to finalized but BlockSender tried to open the
// file before BlockReceiver updated the VolumeMap. The state of the
// block can be changed again now, so ignore this error here. If there
// is a block really deleted by mistake, DirectoryScan should catch it.
if (e instanceof FileNotFoundException ) {
LOG.info("Volume {}: verification failed for {} because of " +
"FileNotFoundException. This may be due to a race with write.",
volume, block);
return;
}
LOG.warn("Reporting bad {} on {}", block, volume, e);
scanner.datanode.handleBadBlock(block, e, true);
}
}
VolumeScanner(Conf conf, DataNode datanode, FsVolumeReference ref) {
this.conf = conf;
this.datanode = datanode;
this.metrics = datanode.getMetrics();
this.ref = ref;
this.volume = ref.getVolume();
ScanResultHandler handler;
try {
handler = conf.resultHandler.newInstance();
} catch (Throwable e) {
LOG.error("unable to instantiate {}", conf.resultHandler, e);
handler = new ScanResultHandler();
}
this.resultHandler = handler;
setName("VolumeScannerThread(" + volume + ")");
setDaemon(true);
}
private void saveBlockIterator(BlockIterator iter) {
try {
iter.save();
} catch (IOException e) {
LOG.warn("{}: error saving {}.", this, iter, e);
}
}
private void expireOldScannedBytesRecords(long monotonicMs) {
long newMinute =
TimeUnit.MINUTES.convert(monotonicMs, TimeUnit.MILLISECONDS);
if (curMinute == newMinute) {
return;
}
// If a minute or more has gone past since we last updated the scannedBytes
// array, zero out the slots corresponding to those minutes.
for (long m = curMinute + 1; m <= newMinute; m++) {
int slotIdx = (int)(m % MINUTES_PER_HOUR);
LOG.trace("{}: updateScannedBytes is zeroing out slotIdx {}. " +
"curMinute = {}; newMinute = {}", this, slotIdx,
curMinute, newMinute);
scannedBytesSum -= scannedBytes[slotIdx];
scannedBytes[slotIdx] = 0;
}
curMinute = newMinute;
}
/**
* Find a usable block iterator.<p/>
*
* We will consider available block iterators in order. This property is
* important so that we don't keep rescanning the same block pool id over
* and over, while other block pools stay unscanned.<p/>
*
* A block pool is always ready to scan if the iterator is not at EOF. If
* the iterator is at EOF, the block pool will be ready to scan when
* conf.scanPeriodMs milliseconds have elapsed since the iterator was last
* rewound.<p/>
*
* @return 0 if we found a usable block iterator; the
* length of time we should delay before
* checking again otherwise.
*/
private synchronized long findNextUsableBlockIter() {
int numBlockIters = blockIters.size();
if (numBlockIters == 0) {
LOG.debug("{}: no block pools are registered.", this);
return Long.MAX_VALUE;
}
int curIdx;
if (curBlockIter == null) {
curIdx = 0;
} else {
curIdx = blockIters.indexOf(curBlockIter);
Preconditions.checkState(curIdx >= 0);
}
// Note that this has to be wall-clock time, not monotonic time. This is
// because the time saved in the cursor file is a wall-clock time. We do
// not want to save a monotonic time in the cursor file, because it resets
// every time the machine reboots (on most platforms).
long nowMs = Time.now();
long minTimeoutMs = Long.MAX_VALUE;
for (int i = 0; i < numBlockIters; i++) {
int idx = (curIdx + i + 1) % numBlockIters;
BlockIterator iter = blockIters.get(idx);
if (!iter.atEnd()) {
LOG.info("Now scanning bpid {} on volume {}",
iter.getBlockPoolId(), volume);
curBlockIter = iter;
return 0L;
}
long iterStartMs = iter.getIterStartMs();
long waitMs = (iterStartMs + conf.scanPeriodMs) - nowMs;
if (waitMs <= 0) {
iter.rewind();
LOG.info("Now rescanning bpid {} on volume {}, after more than " +
"{} hour(s)", iter.getBlockPoolId(), volume,
TimeUnit.HOURS.convert(conf.scanPeriodMs, TimeUnit.MILLISECONDS));
curBlockIter = iter;
return 0L;
}
minTimeoutMs = Math.min(minTimeoutMs, waitMs);
}
LOG.info("{}: no suitable block pools found to scan. Waiting {} ms.",
this, minTimeoutMs);
return minTimeoutMs;
}
/**
* Scan a block.
*
* @param cblock The block to scan.
* @param bytesPerSec The bytes per second to scan at.
*
* @return The length of the block that was scanned, or
* -1 if the block could not be scanned.
*/
private long scanBlock(ExtendedBlock cblock, long bytesPerSec) {
// 'cblock' has a valid blockId and block pool id, but we don't yet know the
// genstamp the block is supposed to have. Ask the FsDatasetImpl for this
// information.
ExtendedBlock block = null;
try {
Block b = volume.getDataset().getStoredBlock(
cblock.getBlockPoolId(), cblock.getBlockId());
if (b == null) {
LOG.info("Replica {} was not found in the VolumeMap for volume {}",
cblock, volume);
} else {
block = new ExtendedBlock(cblock.getBlockPoolId(), b);
}
} catch (FileNotFoundException e) {
LOG.info("FileNotFoundException while finding block {} on volume {}",
cblock, volume);
} catch (IOException e) {
LOG.warn("I/O error while finding block {} on volume {}",
cblock, volume);
}
if (block == null) {
return -1; // block not found.
}
LOG.debug("start scanning block {}", block);
BlockSender blockSender = null;
try {
blockSender = new BlockSender(block, 0, -1,
false, true, true, datanode, null,
CachingStrategy.newDropBehind());
throttler.setBandwidth(bytesPerSec);
long bytesRead = blockSender.sendBlock(nullStream, null, throttler);
resultHandler.handle(block, null);
metrics.incrBlocksVerified();
return bytesRead;
} catch (IOException e) {
resultHandler.handle(block, e);
} finally {
IOUtils.cleanupWithLogger(null, blockSender);
}
metrics.incrBlockVerificationFailures();
return -1;
}
@VisibleForTesting
static boolean calculateShouldScan(String storageId, long targetBytesPerSec,
long scannedBytesSum, long startMinute, long curMinute) {
long runMinutes = curMinute - startMinute;
long effectiveBytesPerSec;
if (runMinutes <= 0) {
// avoid division by zero
effectiveBytesPerSec = scannedBytesSum;
} else {
if (runMinutes > MINUTES_PER_HOUR) {
// we only keep an hour's worth of rate information
runMinutes = MINUTES_PER_HOUR;
}
effectiveBytesPerSec = scannedBytesSum /
(SECONDS_PER_MINUTE * runMinutes);
}
boolean shouldScan = effectiveBytesPerSec <= targetBytesPerSec;
LOG.trace("{}: calculateShouldScan: effectiveBytesPerSec = {}, and " +
"targetBytesPerSec = {}. startMinute = {}, curMinute = {}, " +
"shouldScan = {}",
storageId, effectiveBytesPerSec, targetBytesPerSec,
startMinute, curMinute, shouldScan);
return shouldScan;
}
/**
* Get next block and check if it's needed to scan.
*
* @return the candidate block.
*/
ExtendedBlock getNextBlockToScan() {
ExtendedBlock block;
try {
block = curBlockIter.nextBlock();
} catch (IOException e) {
// There was an error listing the next block in the volume. This is a
// serious issue.
LOG.warn("{}: nextBlock error on {}", this, curBlockIter);
// On the next loop iteration, curBlockIter#eof will be set to true, and
// we will pick a different block iterator.
return null;
}
if (block == null) {
// The BlockIterator is at EOF.
LOG.info("{}: finished scanning block pool {}",
this, curBlockIter.getBlockPoolId());
saveBlockIterator(curBlockIter);
return null;
} else if (conf.skipRecentAccessed) {
// Check the access time of block file to avoid scanning recently
// changed blocks, reducing disk IO.
try {
BlockLocalPathInfo blockLocalPathInfo =
volume.getDataset().getBlockLocalPathInfo(block);
BasicFileAttributes attr = Files.readAttributes(
new File(blockLocalPathInfo.getBlockPath()).toPath(),
BasicFileAttributes.class);
if (System.currentTimeMillis() - attr.lastAccessTime().
to(TimeUnit.MILLISECONDS) < conf.scanPeriodMs) {
return null;
}
} catch (IOException ioe) {
LOG.debug("Failed to get access time of block {}",
block, ioe);
}
}
return block;
}
/**
* Run an iteration of the VolumeScanner loop.
*
* @param suspectBlock A suspect block which we should scan, or null to
* scan the next regularly scheduled block.
*
* @return The number of milliseconds to delay before running the loop
* again, or 0 to re-run the loop immediately.
*/
private long runLoop(ExtendedBlock suspectBlock) {
long bytesScanned = -1;
boolean scanError = false;
ExtendedBlock block = null;
try {
long monotonicMs = Time.monotonicNow();
expireOldScannedBytesRecords(monotonicMs);
if (!calculateShouldScan(volume.getStorageID(), conf.targetBytesPerSec,
scannedBytesSum, startMinute, curMinute)) {
// If neededBytesPerSec is too low, then wait few seconds for some old
// scannedBytes records to expire.
return 30000L;
}
if (suspectBlock != null) {
block = suspectBlock;
} else {
// Find a usable block pool to scan.
if ((curBlockIter == null) || curBlockIter.atEnd()) {
long timeout = findNextUsableBlockIter();
if (timeout > 0) {
LOG.trace("{}: no block pools are ready to scan yet. Waiting " +
"{} ms.", this, timeout);
synchronized (stats) {
stats.nextBlockPoolScanStartMs = Time.monotonicNow() + timeout;
}
return timeout;
}
synchronized (stats) {
stats.scansSinceRestart++;
stats.blocksScannedInCurrentPeriod = 0;
stats.nextBlockPoolScanStartMs = -1;
}
return 0L;
}
block = getNextBlockToScan();
if (block == null) {
return 0L;
}
}
if (curBlockIter != null) {
long saveDelta = monotonicMs - curBlockIter.getLastSavedMs();
if (saveDelta >= conf.cursorSaveMs) {
LOG.debug("{}: saving block iterator {} after {} ms.",
this, curBlockIter, saveDelta);
saveBlockIterator(curBlockIter);
}
}
bytesScanned = scanBlock(block, conf.targetBytesPerSec);
if (bytesScanned >= 0) {
scannedBytesSum += bytesScanned;
scannedBytes[(int)(curMinute % MINUTES_PER_HOUR)] += bytesScanned;
} else {
scanError = true;
}
return 0L;
} finally {
synchronized (stats) {
stats.bytesScannedInPastHour = scannedBytesSum;
if (bytesScanned > 0) {
stats.blocksScannedInCurrentPeriod++;
stats.blocksScannedSinceRestart++;
}
if (scanError) {
stats.scanErrorsSinceRestart++;
}
if (block != null) {
stats.lastBlockScanned = block;
}
if (curBlockIter == null) {
stats.eof = true;
stats.blockPoolPeriodEndsMs = -1;
} else {
stats.eof = curBlockIter.atEnd();
stats.blockPoolPeriodEndsMs =
curBlockIter.getIterStartMs() + conf.scanPeriodMs;
}
}
}
}
/**
* If there are elements in the suspectBlocks list, removes
* and returns the first one. Otherwise, returns null.
*/
private synchronized ExtendedBlock popNextSuspectBlock() {
Iterator<ExtendedBlock> iter = suspectBlocks.iterator();
if (!iter.hasNext()) {
return null;
}
ExtendedBlock block = iter.next();
iter.remove();
return block;
}
@Override
public void work() {
// Record the minute on which the scanner started.
this.startMinute =
TimeUnit.MINUTES.convert(Time.monotonicNow(), TimeUnit.MILLISECONDS);
this.curMinute = startMinute;
try {
LOG.trace("{}: thread starting.", this);
resultHandler.setup(this);
try {
long timeout = 0;
while (true) {
ExtendedBlock suspectBlock = null;
// Take the lock to check if we should stop, and access the
// suspect block list.
synchronized (this) {
if (stopping) {
break;
}
if (timeout > 0) {
LOG.debug("{}: wait for {} milliseconds", this, timeout);
wait(timeout);
if (stopping) {
break;
}
}
suspectBlock = popNextSuspectBlock();
}
timeout = runLoop(suspectBlock);
}
} catch (InterruptedException e) {
// We are exiting because of an InterruptedException,
// probably sent by VolumeScanner#shutdown.
LOG.trace("{} exiting because of InterruptedException.", this);
} catch (Throwable e) {
LOG.error("{} exiting because of exception ", this, e);
}
LOG.info("{} exiting.", this);
VolumeScannerCBInjector.get().preSavingBlockIteratorTask(this);
// Save the current position of all block iterators and close them.
for (BlockIterator iter : blockIters) {
saveBlockIterator(iter);
IOUtils.cleanupWithLogger(null, iter);
}
} finally {
VolumeScannerCBInjector.get().terminationCallBack(this);
// When the VolumeScanner exits, release the reference we were holding
// on the volume. This will allow the volume to be removed later.
IOUtils.cleanupWithLogger(null, ref);
}
}
@Override
public String toString() {
return "VolumeScanner(" + volume +
", " + volume.getStorageID() + ")";
}
/**
* Shut down this scanner.
*/
public synchronized void shutdown() {
stopping = true;
notify();
this.interrupt();
VolumeScannerCBInjector.get().shutdownCallBack(this);
}
public synchronized void markSuspectBlock(ExtendedBlock block) {
if (stopping) {
LOG.debug("{}: Not scheduling suspect block {} for " +
"rescanning, because this volume scanner is stopping.", this, block);
return;
}
Boolean recent = recentSuspectBlocks.getIfPresent(block);
if (recent != null) {
LOG.debug("{}: Not scheduling suspect block {} for " +
"rescanning, because we rescanned it recently.", this, block);
return;
}
if (suspectBlocks.contains(block)) {
LOG.debug("{}: suspect block {} is already queued for " +
"rescanning.", this, block);
return;
}
suspectBlocks.add(block);
recentSuspectBlocks.put(block, true);
LOG.debug("{}: Scheduling suspect block {} for rescanning.", this, block);
notify(); // wake scanner thread.
}
/**
* Allow the scanner to scan the given block pool.
*
* @param bpid The block pool id.
*/
public synchronized void enableBlockPoolId(String bpid) {
for (BlockIterator iter : blockIters) {
if (iter.getBlockPoolId().equals(bpid)) {
LOG.warn("{}: already enabled scanning on block pool {}", this, bpid);
return;
}
}
BlockIterator iter = null;
try {
// Load a block iterator for the next block pool on the volume.
iter = volume.loadBlockIterator(bpid, BLOCK_ITERATOR_NAME);
LOG.trace("{}: loaded block iterator for {}.", this, bpid);
} catch (FileNotFoundException e) {
LOG.debug("{}: failed to load block iterator: " + e.getMessage(), this);
} catch (IOException e) {
LOG.warn("{}: failed to load block iterator.", this, e);
}
if (iter == null) {
iter = volume.newBlockIterator(bpid, BLOCK_ITERATOR_NAME);
LOG.trace("{}: created new block iterator for {}.", this, bpid);
}
iter.setMaxStalenessMs(conf.maxStalenessMs);
blockIters.add(iter);
notify();
}
/**
* Disallow the scanner from scanning the given block pool.
*
* @param bpid The block pool id.
*/
public synchronized void disableBlockPoolId(String bpid) {
Iterator<BlockIterator> i = blockIters.iterator();
while (i.hasNext()) {
BlockIterator iter = i.next();
if (iter.getBlockPoolId().equals(bpid)) {
LOG.trace("{}: disabling scanning on block pool {}", this, bpid);
i.remove();
IOUtils.cleanupWithLogger(null, iter);
if (curBlockIter == iter) {
curBlockIter = null;
}
notify();
return;
}
}
LOG.warn("{}: can't remove block pool {}, because it was never " +
"added.", this, bpid);
}
@VisibleForTesting
Statistics getStatistics() {
synchronized (stats) {
return new Statistics(stats);
}
}
}
| ScanResultHandler |
java | apache__flink | flink-core/src/main/java/org/apache/flink/core/memory/SeekableDataInputView.java | {
"start": 1048,
"end": 1269
} | interface ____ extends DataInputView {
/**
* Sets the read pointer to the given position.
*
* @param position The new read position.
*/
void setReadPosition(long position);
}
| SeekableDataInputView |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializerSnapshotData.java | {
"start": 13970,
"end": 14224
} | class ____ class "
+ registeredClassname
+ " in classpath; using a dummy Kryo serializer that should be replaced as soon as"
+ " a new Kryo serializer for the | for |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/DialectFeatureChecks.java | {
"start": 42259,
"end": 42454
} | class ____ implements DialectFeatureCheck {
public boolean apply(Dialect dialect) {
return dialect.getPreferredSqlTypeCodeForArray() == SqlTypes.ARRAY;
}
}
public static | SupportsTypedArrays |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/refid_resolution/ExternalRefidResolutionTest.java | {
"start": 1083,
"end": 1600
} | class ____ {
@Test
void externalRefAfterSelectKey() {
assertDoesNotThrow(() -> {
String resource = "org/apache/ibatis/submitted/refid_resolution/ExternalMapperConfig.xml";
try (Reader reader = Resources.getResourceAsReader(resource)) {
SqlSessionFactoryBuilder builder = new SqlSessionFactoryBuilder();
SqlSessionFactory sqlSessionFactory = builder.build(reader);
sqlSessionFactory.getConfiguration().getMappedStatementNames();
}
});
}
}
| ExternalRefidResolutionTest |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java | {
"start": 7692,
"end": 10004
} | class ____, otherwise false.
*/
public static boolean isClassFile(Path path) {
return path.toString().toLowerCase(Locale.ROOT).endsWith(".class");
}
public static Set<Path> pluginLocations(String pluginPath, boolean failFast) {
if (pluginPath == null) {
return Set.of();
}
String[] pluginPathElements = COMMA_WITH_WHITESPACE.split(pluginPath.trim(), -1);
Set<Path> pluginLocations = new LinkedHashSet<>();
for (String path : pluginPathElements) {
try {
Path pluginPathElement = Paths.get(path).toAbsolutePath();
if (pluginPath.isEmpty()) {
log.warn("Plugin path element is empty, evaluating to {}.", pluginPathElement);
}
if (!Files.exists(pluginPathElement)) {
throw new FileNotFoundException(pluginPathElement.toString());
}
// Currently 'plugin.paths' property is a list of top-level directories
// containing plugins
if (Files.isDirectory(pluginPathElement)) {
pluginLocations.addAll(pluginLocations(pluginPathElement));
} else if (isArchive(pluginPathElement)) {
pluginLocations.add(pluginPathElement);
}
} catch (InvalidPathException | IOException e) {
if (failFast) {
throw new RuntimeException(e);
}
log.error("Could not get listing for plugin path: {}. Ignoring.", path, e);
}
}
return pluginLocations;
}
private static List<Path> pluginLocations(Path pluginPathElement) throws IOException {
List<Path> locations = new ArrayList<>();
try (
DirectoryStream<Path> listing = Files.newDirectoryStream(
pluginPathElement,
PLUGIN_PATH_FILTER
)
) {
for (Path dir : listing) {
locations.add(dir);
}
}
return locations;
}
/**
* Given a top path in the filesystem, return a list of paths to archives (JAR or ZIP
* files) contained under this top path. If the top path contains only java | file |
java | quarkusio__quarkus | extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/client/BroadCastOnClientTest.java | {
"start": 492,
"end": 939
} | class ____ {
@RegisterExtension
public static final QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot(root -> {
root.addClasses(ServerEndpoint.class, ClientEndpoint.class);
})
.setExpectedException(WebSocketClientException.class, true);
@Test
void testInvalidBroadcast() {
fail();
}
@WebSocket(path = "/end")
public static | BroadCastOnClientTest |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/model/BindingGraph.java | {
"start": 4262,
"end": 13294
} | class ____ {
/** Returns the graph in its {@link Network} representation. */
public abstract ImmutableNetwork<Node, Edge> network();
@Override
public String toString() {
return network().toString();
}
/**
* Returns {@code true} if this graph was constructed from a module for full binding graph
* validation.
*
* @deprecated use {@link #isFullBindingGraph()} to tell if this is a full binding graph, or
* {@link ComponentNode#isRealComponent() rootComponentNode().isRealComponent()} to tell if
* the root component node is really a component or derived from a module. Dagger can generate
* full binding graphs for components and subcomponents as well as modules.
*/
@Deprecated
public boolean isModuleBindingGraph() {
return !rootComponentNode().isRealComponent();
}
/**
* Returns {@code true} if this is a full binding graph, which contains all bindings installed in
* the component, or {@code false} if it is a reachable binding graph, which contains only
* bindings that are reachable from at least one {@linkplain #entryPointEdges() entry point}.
*
* @see <a href="https://dagger.dev/compiler-options#full-binding-graph-validation">Full binding
* graph validation</a>
*/
public abstract boolean isFullBindingGraph();
/**
* Returns {@code true} if the {@link #rootComponentNode()} is a subcomponent. This occurs in
* when {@code -Adagger.fullBindingGraphValidation} is used in a compilation with a subcomponent.
*
* @deprecated use {@link ComponentNode#isSubcomponent() rootComponentNode().isSubcomponent()}
* instead
*/
@Deprecated
public boolean isPartialBindingGraph() {
return rootComponentNode().isSubcomponent();
}
/** Returns the bindings. */
public ImmutableSet<Binding> bindings() {
return nodes(Binding.class);
}
/** Returns the bindings for a key. */
public ImmutableSet<Binding> bindings(Key key) {
return nodes(Binding.class).stream()
.filter(binding -> binding.key().equals(key))
.collect(toImmutableSet());
}
/** Returns the nodes that represent missing bindings. */
public ImmutableSet<MissingBinding> missingBindings() {
return nodes(MissingBinding.class);
}
/** Returns the component nodes. */
public ImmutableSet<ComponentNode> componentNodes() {
return nodes(ComponentNode.class);
}
/** Returns the component node for a component. */
public Optional<ComponentNode> componentNode(ComponentPath component) {
return componentNodes().stream()
.filter(node -> node.componentPath().equals(component))
.findFirst();
}
/** Returns the component nodes for a component. */
public ImmutableSet<ComponentNode> componentNodes(DaggerTypeElement component) {
return componentNodes().stream()
.filter(node -> node.componentPath().currentComponent().equals(component))
.collect(toImmutableSet());
}
/** Returns the component node for the root component. */
public ComponentNode rootComponentNode() {
return componentNodes().stream()
.filter(node -> node.componentPath().atRoot())
.findFirst()
.get();
}
/** Returns the dependency edges. */
public ImmutableSet<DependencyEdge> dependencyEdges() {
return dependencyEdgeStream().collect(toImmutableSet());
}
/**
* Returns the dependency edges for the dependencies of a binding. For valid graphs, each {@link
* DependencyRequest} will map to a single {@link DependencyEdge}. When conflicting bindings exist
* for a key, the multimap will have several edges for that {@link DependencyRequest}. Graphs that
* have no binding for a key will have an edge whose {@linkplain EndpointPair#target() target
* node} is a {@link MissingBinding}.
*/
public ImmutableSetMultimap<DependencyRequest, DependencyEdge> dependencyEdges(
Binding binding) {
return dependencyEdgeStream(binding)
.collect(toImmutableSetMultimap(DependencyEdge::dependencyRequest, edge -> edge));
}
/** Returns the dependency edges for a dependency request. */
public ImmutableSet<DependencyEdge> dependencyEdges(DependencyRequest dependencyRequest) {
return dependencyEdgeStream()
.filter(edge -> edge.dependencyRequest().equals(dependencyRequest))
.collect(toImmutableSet());
}
/**
* Returns the dependency edges for the entry points of a given {@code component}. Each edge's
* source node is that component's component node.
*/
public ImmutableSet<DependencyEdge> entryPointEdges(ComponentPath component) {
return dependencyEdgeStream(componentNode(component).get()).collect(toImmutableSet());
}
private Stream<DependencyEdge> dependencyEdgeStream(Node node) {
return network().outEdges(node).stream().flatMap(instancesOf(DependencyEdge.class));
}
/**
* Returns the dependency edges for all entry points for all components and subcomponents. Each
* edge's source node is a component node.
*/
public ImmutableSet<DependencyEdge> entryPointEdges() {
return entryPointEdgeStream().collect(toImmutableSet());
}
/** Returns the binding or missing binding nodes that directly satisfy entry points. */
public ImmutableSet<MaybeBinding> entryPointBindings() {
return entryPointEdgeStream()
.map(edge -> (MaybeBinding) network().incidentNodes(edge).target())
.collect(toImmutableSet());
}
/**
* Returns the edges for entry points that transitively depend on a binding or missing binding for
* a key.
*/
public ImmutableSet<DependencyEdge> entryPointEdgesDependingOnBinding(
MaybeBinding binding) {
ImmutableNetwork<Node, DependencyEdge> dependencyGraph = dependencyGraph();
Network<Node, DependencyEdge> subgraphDependingOnBinding =
inducedSubgraph(
dependencyGraph, reachableNodes(transpose(dependencyGraph).asGraph(), binding));
return intersection(entryPointEdges(), subgraphDependingOnBinding.edges()).immutableCopy();
}
/** Returns the bindings that directly request a given binding as a dependency. */
public ImmutableSet<Binding> requestingBindings(MaybeBinding binding) {
return network().predecessors(binding).stream()
.flatMap(instancesOf(Binding.class))
.collect(toImmutableSet());
}
/**
* Returns the bindings that a given binding directly requests as a dependency. Does not include
* any {@link MissingBinding}s.
*
* @see #requestedMaybeMissingBindings(Binding)
*/
public ImmutableSet<Binding> requestedBindings(Binding binding) {
return network().successors(binding).stream()
.flatMap(instancesOf(Binding.class))
.collect(toImmutableSet());
}
/**
* Returns the bindings or missing bindings that a given binding directly requests as a
* dependency.
*
* @see #requestedBindings(Binding)
*/
public ImmutableSet<MaybeBinding> requestedMaybeMissingBindings(Binding binding) {
return network().successors(binding).stream()
.flatMap(instancesOf(MaybeBinding.class))
.collect(toImmutableSet());
}
/** Returns a subnetwork that contains all nodes but only {@link DependencyEdge}s. */
protected ImmutableNetwork<Node, DependencyEdge> dependencyGraph() {
MutableNetwork<Node, DependencyEdge> dependencyGraph =
NetworkBuilder.from(network())
.expectedNodeCount(network().nodes().size())
.expectedEdgeCount((int) dependencyEdgeStream().count())
.build();
network().nodes().forEach(dependencyGraph::addNode); // include disconnected nodes
dependencyEdgeStream()
.forEach(
edge -> {
EndpointPair<Node> endpoints = network().incidentNodes(edge);
dependencyGraph.addEdge(endpoints.source(), endpoints.target(), edge);
});
return ImmutableNetwork.copyOf(dependencyGraph);
}
@SuppressWarnings({"rawtypes", "unchecked"})
private <N extends Node> ImmutableSet<N> nodes(Class<N> clazz) {
return (ImmutableSet) nodesByClass().get(clazz);
}
private static final ImmutableSet<Class<? extends Node>> NODE_TYPES =
ImmutableSet.of(Binding.class, MissingBinding.class, ComponentNode.class);
protected ImmutableSetMultimap<Class<? extends Node>, ? extends Node> nodesByClass() {
return network().nodes().stream()
.collect(
toImmutableSetMultimap(
node ->
NODE_TYPES.stream().filter(clazz -> clazz.isInstance(node)).findFirst().get(),
node -> node));
}
private Stream<DependencyEdge> dependencyEdgeStream() {
return network().edges().stream().flatMap(instancesOf(DependencyEdge.class));
}
private Stream<DependencyEdge> entryPointEdgeStream() {
return dependencyEdgeStream().filter(DependencyEdge::isEntryPoint);
}
/**
* An edge in the binding graph. Either a {@link DependencyEdge}, a {@link
* ChildFactoryMethodEdge}, or a {@link SubcomponentCreatorBindingEdge}.
*/
public | BindingGraph |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/util/xml/StaxEventXMLReaderTests.java | {
"start": 1161,
"end": 2293
} | class ____ extends AbstractStaxXMLReaderTests {
public static final String CONTENT = "<root xmlns='http://springframework.org/spring-ws'><child/></root>";
@Override
protected AbstractStaxXMLReader createStaxXmlReader(InputStream inputStream) throws XMLStreamException {
return new StaxEventXMLReader(inputFactory.createXMLEventReader(inputStream));
}
@Test
void partial() throws Exception {
XMLInputFactory inputFactory = XMLInputFactory.newInstance();
XMLEventReader eventReader = inputFactory.createXMLEventReader(new StringReader(CONTENT));
eventReader.nextTag(); // skip to root
StaxEventXMLReader xmlReader = new StaxEventXMLReader(eventReader);
ContentHandler contentHandler = mock();
xmlReader.setContentHandler(contentHandler);
xmlReader.parse(new InputSource());
verify(contentHandler).startDocument();
verify(contentHandler).startElement(eq("http://springframework.org/spring-ws"), eq("child"), eq("child"), any(Attributes.class));
verify(contentHandler).endElement("http://springframework.org/spring-ws", "child", "child");
verify(contentHandler).endDocument();
}
}
| StaxEventXMLReaderTests |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_1742/Target.java | {
"start": 232,
"end": 437
} | class ____ {
private NestedTarget nested;
public NestedTarget getNested() {
return nested;
}
public void setNested(NestedTarget nested) {
this.nested = nested;
}
}
| Target |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/groovy/GroovyBeanDefinitionReaderTests.java | {
"start": 28123,
"end": 28958
} | class ____ {
int age;
Bean1 bean1;
Bean3 bean3;
String person;
Bean1 parent;
List<Bean1> children = new ArrayList<>();
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
public String getPerson() {
return person;
}
public void setPerson(String person) {
this.person = person;
}
public Bean1 getParent() {
return parent;
}
public void setParent(Bean1 parent) {
this.parent = parent;
}
public Bean1 getBean1() {
return bean1;
}
public void setBean1(Bean1 bean1) {
this.bean1 = bean1;
}
public Bean3 getBean3() {
return bean3;
}
public void setBean3(Bean3 bean3) {
this.bean3 = bean3;
}
public List<Bean1> getChildren() {
return children;
}
public void setChildren(List<Bean1> children) {
this.children = children;
}
}
// bean with constructor args
| Bean2 |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/observers/async/AsyncObserverTest.java | {
"start": 2876,
"end": 3177
} | class ____ {
@Inject
Event<String> event;
void produce(String value) {
event.fire(value);
}
CompletionStage<String> produceAsync(String value) {
return event.fireAsync(value);
}
}
@RequestScoped
static | StringProducer |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/TraceReporterSetup.java | {
"start": 1267,
"end": 1986
} | class ____ extends AbstractReporterSetup<TraceReporter, SpanBuilder> {
public TraceReporterSetup(
final String name,
final MetricConfig configuration,
TraceReporter reporter,
ReporterFilter<SpanBuilder> spanFilter,
final Map<String, String> additionalVariables) {
super(name, configuration, reporter, spanFilter, additionalVariables);
}
@Override
protected ConfigOption<String> getDelimiterConfigOption() {
return TraceOptions.REPORTER_SCOPE_DELIMITER;
}
@Override
protected ConfigOption<String> getExcludedVariablesConfigOption() {
return TraceOptions.REPORTER_EXCLUDED_VARIABLES;
}
}
| TraceReporterSetup |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/web/annotation/WebEndpointDiscoverer.java | {
"start": 2583,
"end": 5334
} | class ____ extends EndpointDiscoverer<ExposableWebEndpoint, WebOperation>
implements WebEndpointsSupplier {
private final List<PathMapper> endpointPathMappers;
private final List<AdditionalPathsMapper> additionalPathsMappers;
private final RequestPredicateFactory requestPredicateFactory;
/**
* Create a new {@link WebEndpointDiscoverer} instance.
* @param applicationContext the source application context
* @param parameterValueMapper the parameter value mapper
* @param endpointMediaTypes the endpoint media types
* @param endpointPathMappers the endpoint path mappers
* @param additionalPathsMappers the
* @param invokerAdvisors invoker advisors to apply
* @param endpointFilters endpoint filters to apply
* @param operationFilters operation filters to apply
* @since 3.4.0
*/
public WebEndpointDiscoverer(ApplicationContext applicationContext, ParameterValueMapper parameterValueMapper,
EndpointMediaTypes endpointMediaTypes, @Nullable List<PathMapper> endpointPathMappers,
@Nullable List<AdditionalPathsMapper> additionalPathsMappers,
Collection<OperationInvokerAdvisor> invokerAdvisors,
Collection<EndpointFilter<ExposableWebEndpoint>> endpointFilters,
Collection<OperationFilter<WebOperation>> operationFilters) {
super(applicationContext, parameterValueMapper, invokerAdvisors, endpointFilters, operationFilters);
this.endpointPathMappers = (endpointPathMappers != null) ? endpointPathMappers : Collections.emptyList();
this.additionalPathsMappers = (additionalPathsMappers != null) ? additionalPathsMappers
: Collections.emptyList();
this.requestPredicateFactory = new RequestPredicateFactory(endpointMediaTypes);
}
@Override
protected ExposableWebEndpoint createEndpoint(Object endpointBean, EndpointId id, Access defaultAccess,
Collection<WebOperation> operations) {
String rootPath = PathMapper.getRootPath(this.endpointPathMappers, id);
return new DiscoveredWebEndpoint(this, endpointBean, id, rootPath, defaultAccess, operations,
this.additionalPathsMappers);
}
@Override
protected WebOperation createOperation(EndpointId endpointId, DiscoveredOperationMethod operationMethod,
OperationInvoker invoker) {
String rootPath = PathMapper.getRootPath(this.endpointPathMappers, endpointId);
WebOperationRequestPredicate requestPredicate = this.requestPredicateFactory.getRequestPredicate(rootPath,
operationMethod);
return new DiscoveredWebOperation(endpointId, operationMethod, invoker, requestPredicate);
}
@Override
protected OperationKey createOperationKey(WebOperation operation) {
return new OperationKey(operation.getRequestPredicate(),
() -> "web request predicate " + operation.getRequestPredicate());
}
static | WebEndpointDiscoverer |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/validation/beanvalidation/MethodValidationProxyTests.java | {
"start": 9411,
"end": 9753
} | class ____ {
@Bean
public static MethodValidationPostProcessor methodValidationPostProcessor(@Lazy Validator validator) {
MethodValidationPostProcessor postProcessor = new MethodValidationPostProcessor();
postProcessor.setValidator(validator);
return postProcessor;
}
}
@Configuration
public static | LazyMethodValidationConfig |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java | {
"start": 1731,
"end": 4887
} | class ____ {
private static final Path PATH = new Path("/path");
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
@AfterEach
public void shutdown() throws Exception {
IOUtils.cleanupWithLogger(null, fs);
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
@Test
public void testSetXAttr() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException(() -> fs.setXAttr(PATH, "user.foo", null));
}
@Test
public void testGetXAttrs() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException(() -> fs.getXAttrs(PATH));
}
@Test
public void testRemoveXAttr() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException(() -> fs.removeXAttr(PATH, "user.foo"));
}
@Test
public void testEditLog() throws Exception {
// With XAttrs enabled, set an XAttr.
initCluster(true, true);
fs.mkdirs(PATH);
fs.setXAttr(PATH, "user.foo", null);
// Restart with XAttrs disabled. Expect successful restart.
restart(false, false);
}
@Test
public void testFsImage() throws Exception {
// With XAttrs enabled, set an XAttr.
initCluster(true, true);
fs.mkdirs(PATH);
fs.setXAttr(PATH, "user.foo", null);
// Save a new checkpoint and restart with XAttrs still enabled.
restart(true, true);
// Restart with XAttrs disabled. Expect successful restart.
restart(false, false);
}
/**
* We expect an IOException, and we want the exception text to state the
* configuration key that controls XAttr support.
*/
private void expectException(Executable exec) {
IOException ex = assertThrows(IOException.class, exec);
assertTrue(ex.getMessage().contains(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY));
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @param xattrsEnabled if true, XAttr support is enabled
* @throws Exception if any step fails
*/
private void initCluster(boolean format, boolean xattrsEnabled)
throws Exception {
Configuration conf = new Configuration();
// not explicitly setting to false, should be false by default
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, xattrsEnabled);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
/**
* Restart the cluster, optionally saving a new checkpoint.
*
* @param checkpoint boolean true to save a new checkpoint
* @param xattrsEnabled if true, XAttr support is enabled
* @throws Exception if restart fails
*/
private void restart(boolean checkpoint, boolean xattrsEnabled)
throws Exception {
NameNode nameNode = cluster.getNameNode();
if (checkpoint) {
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
}
shutdown();
initCluster(false, xattrsEnabled);
}
}
| TestXAttrConfigFlag |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/CatchFailTest.java | {
"start": 4352,
"end": 4735
} | class ____ {
public void test() {
try {
System.err.println();
} catch (Exception expected) {
org.junit.Assert.fail();
} catch (Error e) {
}
}
}
""")
.addOutputLines(
"out/Test.java",
"""
| Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/joinedsubclass/Person.java | {
"start": 180,
"end": 1606
} | class ____ {
private long id;
private String name;
private char sex;
private int version;
private double heightInches;
private Address address = new Address();
/**
* @return Returns the address.
*/
public Address getAddress() {
return address;
}
public void setAddress(String string) {
this.address.address = string;
}
public void setZip(String string) {
this.address.zip = string;
}
public void setCountry(String string) {
this.address.country = string;
}
/**
* @return Returns the sex.
*/
public char getSex() {
return sex;
}
/**
* @param sex The sex to set.
*/
public void setSex(char sex) {
this.sex = sex;
}
/**
* @return Returns the id.
*/
public long getId() {
return id;
}
/**
* @param id The id to set.
*/
public void setId(long id) {
this.id = id;
}
/**
* @return Returns the identity.
*/
public String getName() {
return name;
}
/**
* @param identity The identity to set.
*/
public void setName(String identity) {
this.name = identity;
}
/**
* @return Returns the height in inches.
*/
public double getHeightInches() {
return heightInches;
}
/**
* @param heightInches The height in inches to set.
*/
public void setHeightInches(double heightInches) {
this.heightInches = heightInches;
}
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
}
| Person |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/Windows.java | {
"start": 1693,
"end": 3142
} | class ____<W extends Window> {
/**
* By default grace period is 24 hours for all windows in other words we allow out-of-order data for up to a day
* This behavior is now deprecated and additional details are available in the motivation for the KIP
* Check out <a href="https://cwiki.apache.org/confluence/x/Ho2NCg">KIP-633</a> for more details
*/
protected static final long DEPRECATED_DEFAULT_24_HR_GRACE_PERIOD = 24 * 60 * 60 * 1000L;
/**
* This constant is used as the specified grace period where we do not have any grace periods instead of magic constants
*/
protected static final long NO_GRACE_PERIOD = 0L;
protected Windows() {}
/**
* Create all windows that contain the provided timestamp, indexed by non-negative window start timestamps.
*
* @param timestamp the timestamp window should get created for
* @return a map of {@code windowStartTimestamp -> Window} entries
*/
public abstract Map<Long, W> windowsFor(final long timestamp);
/**
* Return the size of the specified windows in milliseconds.
*
* @return the size of the specified windows
*/
public abstract long size();
/**
* Return the window grace period (the time to admit
* out-of-order events after the end of the window.)
*
* Delay is defined as (stream_time - record_timestamp).
*/
public abstract long gracePeriodMs();
}
| Windows |
java | grpc__grpc-java | xds/src/test/java/io/grpc/xds/GcpAuthenticationFilterTest.java | {
"start": 3425,
"end": 25684
} | class ____ {
private static final GcpAuthenticationFilter.Provider FILTER_PROVIDER =
new GcpAuthenticationFilter.Provider();
private static final LdsUpdate ldsUpdate = getLdsUpdate();
private static final EdsUpdate edsUpdate = getEdsUpdate();
private static final RdsUpdate rdsUpdate = getRdsUpdate();
private static final CdsUpdate cdsUpdate = getCdsUpdate();
@Before
public void setUp() {
System.setProperty("GRPC_EXPERIMENTAL_XDS_GCP_AUTHENTICATION_FILTER", "true");
}
@Test
public void testNewFilterInstancesPerFilterName() {
assertThat(new GcpAuthenticationFilter("FILTER_INSTANCE_NAME1", 10))
.isNotEqualTo(new GcpAuthenticationFilter("FILTER_INSTANCE_NAME1", 10));
}
@Test
public void filterType_clientOnly() {
assertThat(FILTER_PROVIDER.isClientFilter()).isTrue();
assertThat(FILTER_PROVIDER.isServerFilter()).isFalse();
}
@Test
public void testParseFilterConfig_withValidConfig() {
GcpAuthnFilterConfig config = GcpAuthnFilterConfig.newBuilder()
.setCacheConfig(TokenCacheConfig.newBuilder().setCacheSize(UInt64Value.of(20)))
.build();
Any anyMessage = Any.pack(config);
ConfigOrError<GcpAuthenticationConfig> result = FILTER_PROVIDER.parseFilterConfig(anyMessage);
assertNotNull(result.config);
assertNull(result.errorDetail);
assertEquals(20L, result.config.getCacheSize());
}
@Test
public void testParseFilterConfig_withZeroCacheSize() {
GcpAuthnFilterConfig config = GcpAuthnFilterConfig.newBuilder()
.setCacheConfig(TokenCacheConfig.newBuilder().setCacheSize(UInt64Value.of(0)))
.build();
Any anyMessage = Any.pack(config);
ConfigOrError<GcpAuthenticationConfig> result = FILTER_PROVIDER.parseFilterConfig(anyMessage);
assertNull(result.config);
assertNotNull(result.errorDetail);
assertTrue(result.errorDetail.contains("cache_config.cache_size must be greater than zero"));
}
@Test
public void testParseFilterConfig_withInvalidMessageType() {
Message invalidMessage = Empty.getDefaultInstance();
ConfigOrError<GcpAuthenticationConfig> result =
FILTER_PROVIDER.parseFilterConfig(invalidMessage);
assertNull(result.config);
assertThat(result.errorDetail).contains("Invalid config type");
}
@Test
public void testClientInterceptor_success() throws IOException, ResourceInvalidException {
XdsConfig.XdsClusterConfig clusterConfig = new XdsConfig.XdsClusterConfig(
CLUSTER_NAME,
cdsUpdate,
new EndpointConfig(StatusOr.fromValue(edsUpdate)));
XdsConfig defaultXdsConfig = new XdsConfig.XdsConfigBuilder()
.setListener(ldsUpdate)
.setRoute(rdsUpdate)
.setVirtualHost(rdsUpdate.virtualHosts.get(0))
.addCluster(CLUSTER_NAME, StatusOr.fromValue(clusterConfig)).build();
CallOptions callOptionsWithXds = CallOptions.DEFAULT
.withOption(CLUSTER_SELECTION_KEY, "cluster:cluster0")
.withOption(XDS_CONFIG_CALL_OPTION_KEY, defaultXdsConfig);
GcpAuthenticationConfig config = new GcpAuthenticationConfig(10);
GcpAuthenticationFilter filter = new GcpAuthenticationFilter("FILTER_INSTANCE_NAME", 10);
ClientInterceptor interceptor = filter.buildClientInterceptor(config, null, null);
MethodDescriptor<Void, Void> methodDescriptor = TestMethodDescriptors.voidMethod();
Channel mockChannel = Mockito.mock(Channel.class);
ArgumentCaptor<CallOptions> callOptionsCaptor = ArgumentCaptor.forClass(CallOptions.class);
interceptor.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel);
verify(mockChannel).newCall(eq(methodDescriptor), callOptionsCaptor.capture());
CallOptions capturedOptions = callOptionsCaptor.getAllValues().get(0);
assertNotNull(capturedOptions.getCredentials());
}
@Test
public void testClientInterceptor_createsAndReusesCachedCredentials()
throws IOException, ResourceInvalidException {
XdsConfig.XdsClusterConfig clusterConfig = new XdsConfig.XdsClusterConfig(
CLUSTER_NAME,
cdsUpdate,
new EndpointConfig(StatusOr.fromValue(edsUpdate)));
XdsConfig defaultXdsConfig = new XdsConfig.XdsConfigBuilder()
.setListener(ldsUpdate)
.setRoute(rdsUpdate)
.setVirtualHost(rdsUpdate.virtualHosts.get(0))
.addCluster(CLUSTER_NAME, StatusOr.fromValue(clusterConfig)).build();
CallOptions callOptionsWithXds = CallOptions.DEFAULT
.withOption(CLUSTER_SELECTION_KEY, "cluster:cluster0")
.withOption(XDS_CONFIG_CALL_OPTION_KEY, defaultXdsConfig);
GcpAuthenticationConfig config = new GcpAuthenticationConfig(10);
GcpAuthenticationFilter filter = new GcpAuthenticationFilter("FILTER_INSTANCE_NAME", 10);
ClientInterceptor interceptor = filter.buildClientInterceptor(config, null, null);
MethodDescriptor<Void, Void> methodDescriptor = TestMethodDescriptors.voidMethod();
Channel mockChannel = Mockito.mock(Channel.class);
ArgumentCaptor<CallOptions> callOptionsCaptor = ArgumentCaptor.forClass(CallOptions.class);
interceptor.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel);
interceptor.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel);
verify(mockChannel, times(2))
.newCall(eq(methodDescriptor), callOptionsCaptor.capture());
CallOptions firstCapturedOptions = callOptionsCaptor.getAllValues().get(0);
CallOptions secondCapturedOptions = callOptionsCaptor.getAllValues().get(1);
assertNotNull(firstCapturedOptions.getCredentials());
assertNotNull(secondCapturedOptions.getCredentials());
assertSame(firstCapturedOptions.getCredentials(), secondCapturedOptions.getCredentials());
}
@Test
public void testClientInterceptor_withoutClusterSelectionKey() throws Exception {
GcpAuthenticationConfig config = new GcpAuthenticationConfig(10);
GcpAuthenticationFilter filter = new GcpAuthenticationFilter("FILTER_INSTANCE_NAME", 10);
ClientInterceptor interceptor = filter.buildClientInterceptor(config, null, null);
MethodDescriptor<Void, Void> methodDescriptor = TestMethodDescriptors.voidMethod();
Channel mockChannel = mock(Channel.class);
CallOptions callOptionsWithXds = CallOptions.DEFAULT;
ClientCall<Void, Void> call = interceptor.interceptCall(
methodDescriptor, callOptionsWithXds, mockChannel);
assertTrue(call instanceof FailingClientCall);
FailingClientCall<Void, Void> clientCall = (FailingClientCall<Void, Void>) call;
assertThat(clientCall.error.getDescription()).contains("does not contain cluster resource");
}
@Test
public void testClientInterceptor_clusterSelectionKeyWithoutPrefix() throws Exception {
XdsConfig.XdsClusterConfig clusterConfig = new XdsConfig.XdsClusterConfig(
CLUSTER_NAME,
cdsUpdate,
new EndpointConfig(StatusOr.fromValue(edsUpdate)));
XdsConfig defaultXdsConfig = new XdsConfig.XdsConfigBuilder()
.setListener(ldsUpdate)
.setRoute(rdsUpdate)
.setVirtualHost(rdsUpdate.virtualHosts.get(0))
.addCluster(CLUSTER_NAME, StatusOr.fromValue(clusterConfig)).build();
CallOptions callOptionsWithXds = CallOptions.DEFAULT
.withOption(CLUSTER_SELECTION_KEY, "cluster0")
.withOption(XDS_CONFIG_CALL_OPTION_KEY, defaultXdsConfig);
Channel mockChannel = mock(Channel.class);
GcpAuthenticationConfig config = new GcpAuthenticationConfig(10);
GcpAuthenticationFilter filter = new GcpAuthenticationFilter("FILTER_INSTANCE_NAME", 10);
ClientInterceptor interceptor = filter.buildClientInterceptor(config, null, null);
MethodDescriptor<Void, Void> methodDescriptor = TestMethodDescriptors.voidMethod();
interceptor.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel);
verify(mockChannel).newCall(methodDescriptor, callOptionsWithXds);
}
@Test
public void testClientInterceptor_xdsConfigDoesNotExist() throws Exception {
GcpAuthenticationConfig config = new GcpAuthenticationConfig(10);
GcpAuthenticationFilter filter = new GcpAuthenticationFilter("FILTER_INSTANCE_NAME", 10);
ClientInterceptor interceptor = filter.buildClientInterceptor(config, null, null);
MethodDescriptor<Void, Void> methodDescriptor = TestMethodDescriptors.voidMethod();
Channel mockChannel = mock(Channel.class);
CallOptions callOptionsWithXds = CallOptions.DEFAULT
.withOption(CLUSTER_SELECTION_KEY, "cluster:cluster0");
ClientCall<Void, Void> call =
interceptor.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel);
assertTrue(call instanceof FailingClientCall);
FailingClientCall<Void, Void> clientCall = (FailingClientCall<Void, Void>) call;
assertThat(clientCall.error.getDescription()).contains("does not contain xds configuration");
}
@Test
public void testClientInterceptor_incorrectClusterName() throws Exception {
XdsConfig.XdsClusterConfig clusterConfig = new XdsConfig.XdsClusterConfig(
CLUSTER_NAME,
cdsUpdate,
new EndpointConfig(StatusOr.fromValue(edsUpdate)));
XdsConfig defaultXdsConfig = new XdsConfig.XdsConfigBuilder()
.setListener(ldsUpdate)
.setRoute(rdsUpdate)
.setVirtualHost(rdsUpdate.virtualHosts.get(0))
.addCluster("custer0", StatusOr.fromValue(clusterConfig)).build();
CallOptions callOptionsWithXds = CallOptions.DEFAULT
.withOption(CLUSTER_SELECTION_KEY, "cluster:cluster")
.withOption(XDS_CONFIG_CALL_OPTION_KEY, defaultXdsConfig);
GcpAuthenticationConfig config = new GcpAuthenticationConfig(10);
GcpAuthenticationFilter filter = new GcpAuthenticationFilter("FILTER_INSTANCE_NAME", 10);
ClientInterceptor interceptor = filter.buildClientInterceptor(config, null, null);
MethodDescriptor<Void, Void> methodDescriptor = TestMethodDescriptors.voidMethod();
Channel mockChannel = mock(Channel.class);
ClientCall<Void, Void> call =
interceptor.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel);
assertTrue(call instanceof FailingClientCall);
FailingClientCall<Void, Void> clientCall = (FailingClientCall<Void, Void>) call;
assertThat(clientCall.error.getDescription()).contains("does not contain xds cluster");
}
@Test
public void testClientInterceptor_statusOrError() throws Exception {
StatusOr<XdsClusterConfig> errorCluster =
StatusOr.fromStatus(Status.NOT_FOUND.withDescription("Cluster resource not found"));
XdsConfig defaultXdsConfig = new XdsConfig.XdsConfigBuilder()
.setListener(ldsUpdate)
.setRoute(rdsUpdate)
.setVirtualHost(rdsUpdate.virtualHosts.get(0))
.addCluster(CLUSTER_NAME, errorCluster).build();
CallOptions callOptionsWithXds = CallOptions.DEFAULT
.withOption(CLUSTER_SELECTION_KEY, "cluster:cluster0")
.withOption(XDS_CONFIG_CALL_OPTION_KEY, defaultXdsConfig);
GcpAuthenticationConfig config = new GcpAuthenticationConfig(10);
GcpAuthenticationFilter filter = new GcpAuthenticationFilter("FILTER_INSTANCE_NAME", 10);
ClientInterceptor interceptor = filter.buildClientInterceptor(config, null, null);
MethodDescriptor<Void, Void> methodDescriptor = TestMethodDescriptors.voidMethod();
Channel mockChannel = mock(Channel.class);
ClientCall<Void, Void> call =
interceptor.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel);
assertTrue(call instanceof FailingClientCall);
FailingClientCall<Void, Void> clientCall = (FailingClientCall<Void, Void>) call;
assertThat(clientCall.error.getDescription()).contains("Cluster resource not found");
}
@Test
public void testClientInterceptor_notAudienceWrapper()
throws IOException, ResourceInvalidException {
XdsConfig.XdsClusterConfig clusterConfig = new XdsConfig.XdsClusterConfig(
CLUSTER_NAME,
getCdsUpdateWithIncorrectAudienceWrapper(),
new EndpointConfig(StatusOr.fromValue(edsUpdate)));
XdsConfig defaultXdsConfig = new XdsConfig.XdsConfigBuilder()
.setListener(ldsUpdate)
.setRoute(rdsUpdate)
.setVirtualHost(rdsUpdate.virtualHosts.get(0))
.addCluster(CLUSTER_NAME, StatusOr.fromValue(clusterConfig)).build();
CallOptions callOptionsWithXds = CallOptions.DEFAULT
.withOption(CLUSTER_SELECTION_KEY, "cluster:cluster0")
.withOption(XDS_CONFIG_CALL_OPTION_KEY, defaultXdsConfig);
GcpAuthenticationConfig config = new GcpAuthenticationConfig(10);
GcpAuthenticationFilter filter = new GcpAuthenticationFilter("FILTER_INSTANCE_NAME", 10);
ClientInterceptor interceptor = filter.buildClientInterceptor(config, null, null);
MethodDescriptor<Void, Void> methodDescriptor = TestMethodDescriptors.voidMethod();
Channel mockChannel = Mockito.mock(Channel.class);
ClientCall<Void, Void> call =
interceptor.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel);
assertTrue(call instanceof FailingClientCall);
FailingClientCall<Void, Void> clientCall = (FailingClientCall<Void, Void>) call;
assertThat(clientCall.error.getDescription()).contains("GCP Authn found wrong type");
}
@Test
public void testLruCacheAcrossInterceptors() throws IOException, ResourceInvalidException {
XdsConfig.XdsClusterConfig clusterConfig = new XdsConfig.XdsClusterConfig(
CLUSTER_NAME, cdsUpdate, new EndpointConfig(StatusOr.fromValue(edsUpdate)));
XdsConfig defaultXdsConfig = new XdsConfig.XdsConfigBuilder()
.setListener(ldsUpdate)
.setRoute(rdsUpdate)
.setVirtualHost(rdsUpdate.virtualHosts.get(0))
.addCluster(CLUSTER_NAME, StatusOr.fromValue(clusterConfig)).build();
CallOptions callOptionsWithXds = CallOptions.DEFAULT
.withOption(CLUSTER_SELECTION_KEY, "cluster:cluster0")
.withOption(XDS_CONFIG_CALL_OPTION_KEY, defaultXdsConfig);
GcpAuthenticationFilter filter = new GcpAuthenticationFilter("FILTER_INSTANCE_NAME", 2);
ClientInterceptor interceptor1
= filter.buildClientInterceptor(new GcpAuthenticationConfig(2), null, null);
MethodDescriptor<Void, Void> methodDescriptor = TestMethodDescriptors.voidMethod();
Channel mockChannel = Mockito.mock(Channel.class);
ArgumentCaptor<CallOptions> callOptionsCaptor = ArgumentCaptor.forClass(CallOptions.class);
interceptor1.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel);
verify(mockChannel).newCall(eq(methodDescriptor), callOptionsCaptor.capture());
CallOptions capturedOptions1 = callOptionsCaptor.getAllValues().get(0);
assertNotNull(capturedOptions1.getCredentials());
ClientInterceptor interceptor2
= filter.buildClientInterceptor(new GcpAuthenticationConfig(1), null, null);
interceptor2.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel);
verify(mockChannel, times(2))
.newCall(eq(methodDescriptor), callOptionsCaptor.capture());
CallOptions capturedOptions2 = callOptionsCaptor.getAllValues().get(1);
assertNotNull(capturedOptions2.getCredentials());
assertSame(capturedOptions1.getCredentials(), capturedOptions2.getCredentials());
}
@Test
public void testLruCacheEvictionOnResize() throws IOException, ResourceInvalidException {
XdsConfig.XdsClusterConfig clusterConfig = new XdsConfig.XdsClusterConfig(
CLUSTER_NAME, cdsUpdate, new EndpointConfig(StatusOr.fromValue(edsUpdate)));
XdsConfig defaultXdsConfig = new XdsConfig.XdsConfigBuilder()
.setListener(ldsUpdate)
.setRoute(rdsUpdate)
.setVirtualHost(rdsUpdate.virtualHosts.get(0))
.addCluster(CLUSTER_NAME, StatusOr.fromValue(clusterConfig)).build();
CallOptions callOptionsWithXds = CallOptions.DEFAULT
.withOption(CLUSTER_SELECTION_KEY, "cluster:cluster0")
.withOption(XDS_CONFIG_CALL_OPTION_KEY, defaultXdsConfig);
GcpAuthenticationFilter filter = new GcpAuthenticationFilter("FILTER_INSTANCE_NAME", 2);
MethodDescriptor<Void, Void> methodDescriptor = TestMethodDescriptors.voidMethod();
ClientInterceptor interceptor1 =
filter.buildClientInterceptor(new GcpAuthenticationConfig(2), null, null);
Channel mockChannel1 = Mockito.mock(Channel.class);
ArgumentCaptor<CallOptions> captor = ArgumentCaptor.forClass(CallOptions.class);
interceptor1.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel1);
verify(mockChannel1).newCall(eq(methodDescriptor), captor.capture());
CallOptions options1 = captor.getValue();
// This will recreate the cache with max size of 1 and copy the credential for audience1.
ClientInterceptor interceptor2 =
filter.buildClientInterceptor(new GcpAuthenticationConfig(1), null, null);
Channel mockChannel2 = Mockito.mock(Channel.class);
interceptor2.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel2);
verify(mockChannel2).newCall(eq(methodDescriptor), captor.capture());
CallOptions options2 = captor.getValue();
assertSame(options1.getCredentials(), options2.getCredentials());
clusterConfig = new XdsConfig.XdsClusterConfig(
CLUSTER_NAME, getCdsUpdate2(), new EndpointConfig(StatusOr.fromValue(edsUpdate)));
defaultXdsConfig = new XdsConfig.XdsConfigBuilder()
.setListener(ldsUpdate)
.setRoute(rdsUpdate)
.setVirtualHost(rdsUpdate.virtualHosts.get(0))
.addCluster(CLUSTER_NAME, StatusOr.fromValue(clusterConfig)).build();
callOptionsWithXds = CallOptions.DEFAULT
.withOption(CLUSTER_SELECTION_KEY, "cluster:cluster0")
.withOption(XDS_CONFIG_CALL_OPTION_KEY, defaultXdsConfig);
// This will evict the credential for audience1 and add new credential for audience2
ClientInterceptor interceptor3 =
filter.buildClientInterceptor(new GcpAuthenticationConfig(1), null, null);
Channel mockChannel3 = Mockito.mock(Channel.class);
interceptor3.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel3);
verify(mockChannel3).newCall(eq(methodDescriptor), captor.capture());
CallOptions options3 = captor.getValue();
assertNotSame(options1.getCredentials(), options3.getCredentials());
clusterConfig = new XdsConfig.XdsClusterConfig(
CLUSTER_NAME, cdsUpdate, new EndpointConfig(StatusOr.fromValue(edsUpdate)));
defaultXdsConfig = new XdsConfig.XdsConfigBuilder()
.setListener(ldsUpdate)
.setRoute(rdsUpdate)
.setVirtualHost(rdsUpdate.virtualHosts.get(0))
.addCluster(CLUSTER_NAME, StatusOr.fromValue(clusterConfig)).build();
callOptionsWithXds = CallOptions.DEFAULT
.withOption(CLUSTER_SELECTION_KEY, "cluster:cluster0")
.withOption(XDS_CONFIG_CALL_OPTION_KEY, defaultXdsConfig);
// This will create new credential for audience1 because it has been evicted
ClientInterceptor interceptor4 =
filter.buildClientInterceptor(new GcpAuthenticationConfig(1), null, null);
Channel mockChannel4 = Mockito.mock(Channel.class);
interceptor4.interceptCall(methodDescriptor, callOptionsWithXds, mockChannel4);
verify(mockChannel4).newCall(eq(methodDescriptor), captor.capture());
CallOptions options4 = captor.getValue();
assertNotSame(options1.getCredentials(), options4.getCredentials());
}
private static LdsUpdate getLdsUpdate() {
Filter.NamedFilterConfig routerFilterConfig = new Filter.NamedFilterConfig(
"router", RouterFilter.ROUTER_CONFIG);
HttpConnectionManager httpConnectionManager = HttpConnectionManager.forRdsName(
0L, RDS_NAME, Collections.singletonList(routerFilterConfig));
return XdsListenerResource.LdsUpdate.forApiListener(httpConnectionManager);
}
private static RdsUpdate getRdsUpdate() {
RouteConfiguration routeConfiguration =
buildRouteConfiguration("my-server", RDS_NAME, CLUSTER_NAME);
XdsResourceType.Args args = new XdsResourceType.Args(
XdsTestUtils.EMPTY_BOOTSTRAPPER_SERVER_INFO, "0", "0", null, null, null);
try {
return XdsRouteConfigureResource.getInstance().doParse(args, routeConfiguration);
} catch (ResourceInvalidException ex) {
return null;
}
}
private static EdsUpdate getEdsUpdate() {
Map<Locality, LocalityLbEndpoints> lbEndpointsMap = new HashMap<>();
LbEndpoint lbEndpoint = LbEndpoint.create(
"127.0.0.5", ENDPOINT_PORT, 0, true, ENDPOINT_HOSTNAME, ImmutableMap.of());
lbEndpointsMap.put(
Locality.create("", "", ""),
LocalityLbEndpoints.create(ImmutableList.of(lbEndpoint), 10, 0, ImmutableMap.of()));
return new XdsEndpointResource.EdsUpdate(EDS_NAME, lbEndpointsMap, Collections.emptyList());
}
private static CdsUpdate getCdsUpdate() {
ImmutableMap.Builder<String, Object> parsedMetadata = ImmutableMap.builder();
parsedMetadata.put("FILTER_INSTANCE_NAME", new AudienceWrapper("TEST_AUDIENCE"));
try {
CdsUpdate.Builder cdsUpdate = CdsUpdate.forEds(
CLUSTER_NAME, EDS_NAME, null, null, null, null, false, null)
.lbPolicyConfig(getWrrLbConfigAsMap());
return cdsUpdate.parsedMetadata(parsedMetadata.build()).build();
} catch (IOException ex) {
return null;
}
}
private static CdsUpdate getCdsUpdate2() {
ImmutableMap.Builder<String, Object> parsedMetadata = ImmutableMap.builder();
parsedMetadata.put("FILTER_INSTANCE_NAME", new AudienceWrapper("NEW_TEST_AUDIENCE"));
try {
CdsUpdate.Builder cdsUpdate = CdsUpdate.forEds(
CLUSTER_NAME, EDS_NAME, null, null, null, null, false, null)
.lbPolicyConfig(getWrrLbConfigAsMap());
return cdsUpdate.parsedMetadata(parsedMetadata.build()).build();
} catch (IOException ex) {
return null;
}
}
private static CdsUpdate getCdsUpdateWithIncorrectAudienceWrapper() throws IOException {
ImmutableMap.Builder<String, Object> parsedMetadata = ImmutableMap.builder();
parsedMetadata.put("FILTER_INSTANCE_NAME", "TEST_AUDIENCE");
CdsUpdate.Builder cdsUpdate = CdsUpdate.forEds(
CLUSTER_NAME, EDS_NAME, null, null, null, null, false, null)
.lbPolicyConfig(getWrrLbConfigAsMap());
return cdsUpdate.parsedMetadata(parsedMetadata.build()).build();
}
}
| GcpAuthenticationFilterTest |
java | spring-projects__spring-boot | module/spring-boot-r2dbc/src/test/java/org/springframework/boot/r2dbc/autoconfigure/R2dbcAutoConfigurationTests.java | {
"start": 17934,
"end": 18541
} | class ____ extends URLClassLoader {
DisableEmbeddedDatabaseClassLoader() {
super(new URL[0], DisableEmbeddedDatabaseClassLoader.class.getClassLoader());
}
@Override
protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
for (EmbeddedDatabaseConnection candidate : EmbeddedDatabaseConnection.values()) {
if (name.equals(candidate.getDriverClassName())) {
throw new ClassNotFoundException();
}
}
return super.loadClass(name, resolve);
}
}
@Configuration(proxyBeanMethods = false)
private static final | DisableEmbeddedDatabaseClassLoader |
java | spring-projects__spring-security | messaging/src/test/java/org/springframework/security/messaging/handler/invocation/reactive/CurrentSecurityContextArgumentResolverTests.java | {
"start": 9415,
"end": 9503
} | interface ____ {
String property() default "";
}
static | CurrentAuthenticationProperty |
java | google__auto | common/src/test/java/com/google/auto/common/BasicAnnotationProcessorTest.java | {
"start": 3042,
"end": 3130
} | class ____ by {@link GeneratesCode}'s processor is present.
*/
private static | generated |
java | FasterXML__jackson-core | src/main/java/tools/jackson/core/io/DataOutputAsStream.java | {
"start": 197,
"end": 761
} | class ____ extends OutputStream
{
protected final DataOutput _output;
public DataOutputAsStream(DataOutput out) {
super();
_output = out;
}
@Override
public void write(int b) throws IOException {
_output.write(b);
}
@Override
public void write(byte b[]) throws IOException {
_output.write(b, 0, b.length);
}
@Override
public void write(byte b[], int offset, int length) throws IOException {
_output.write(b, offset, length);
}
// These are no-ops, base | DataOutputAsStream |
java | spring-projects__spring-boot | module/spring-boot-artemis/src/main/java/org/springframework/boot/artemis/autoconfigure/ArtemisNoOpBindingRegistry.java | {
"start": 938,
"end": 1236
} | class ____ implements BindingRegistry {
@Override
public @Nullable Object lookup(String s) {
return null;
}
@Override
public boolean bind(String s, Object o) {
return false;
}
@Override
public void unbind(String s) {
}
@Override
public void close() {
}
}
| ArtemisNoOpBindingRegistry |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/RequestEntity.java | {
"start": 18125,
"end": 21828
} | class ____ implements BodyBuilder {
private final HttpMethod method;
private final HttpHeaders headers = new HttpHeaders();
private final @Nullable URI uri;
private final @Nullable String uriTemplate;
private final @Nullable Object @Nullable [] uriVarsArray;
private final @Nullable Map<String, ? extends @Nullable Object> uriVarsMap;
DefaultBodyBuilder(HttpMethod method, URI url) {
this.method = method;
this.uri = url;
this.uriTemplate = null;
this.uriVarsArray = null;
this.uriVarsMap = null;
}
DefaultBodyBuilder(HttpMethod method, String uriTemplate, @Nullable Object... uriVars) {
this.method = method;
this.uri = null;
this.uriTemplate = uriTemplate;
this.uriVarsArray = uriVars;
this.uriVarsMap = null;
}
DefaultBodyBuilder(HttpMethod method, String uriTemplate, Map<String, ? extends @Nullable Object> uriVars) {
this.method = method;
this.uri = null;
this.uriTemplate = uriTemplate;
this.uriVarsArray = null;
this.uriVarsMap = uriVars;
}
@Override
public BodyBuilder header(String headerName, String... headerValues) {
for (String headerValue : headerValues) {
this.headers.add(headerName, headerValue);
}
return this;
}
@Override
public BodyBuilder headers(@Nullable HttpHeaders headers) {
if (headers != null) {
this.headers.putAll(headers);
}
return this;
}
@Override
public BodyBuilder headers(Consumer<HttpHeaders> headersConsumer) {
headersConsumer.accept(this.headers);
return this;
}
@Override
public BodyBuilder accept(MediaType... acceptableMediaTypes) {
this.headers.setAccept(Arrays.asList(acceptableMediaTypes));
return this;
}
@Override
public BodyBuilder acceptCharset(Charset... acceptableCharsets) {
this.headers.setAcceptCharset(Arrays.asList(acceptableCharsets));
return this;
}
@Override
public BodyBuilder contentLength(long contentLength) {
this.headers.setContentLength(contentLength);
return this;
}
@Override
public BodyBuilder contentType(MediaType contentType) {
this.headers.setContentType(contentType);
return this;
}
@Override
public BodyBuilder ifModifiedSince(ZonedDateTime ifModifiedSince) {
this.headers.setIfModifiedSince(ifModifiedSince);
return this;
}
@Override
public BodyBuilder ifModifiedSince(Instant ifModifiedSince) {
this.headers.setIfModifiedSince(ifModifiedSince);
return this;
}
@Override
public BodyBuilder ifModifiedSince(long ifModifiedSince) {
this.headers.setIfModifiedSince(ifModifiedSince);
return this;
}
@Override
public BodyBuilder ifNoneMatch(String... ifNoneMatches) {
this.headers.setIfNoneMatch(Arrays.asList(ifNoneMatches));
return this;
}
@Override
public RequestEntity<Void> build() {
return buildInternal(null, null);
}
@Override
public <T> RequestEntity<T> body(T body) {
return buildInternal(body, null);
}
@Override
public <T> RequestEntity<T> body(T body, Type type) {
return buildInternal(body, type);
}
private <T> RequestEntity<T> buildInternal(@Nullable T body, @Nullable Type type) {
if (this.uri != null) {
return new RequestEntity<>(body, this.headers, this.method, this.uri, type);
}
else if (this.uriTemplate != null){
return new UriTemplateRequestEntity<>(body, this.headers, this.method, type,
this.uriTemplate, this.uriVarsArray, this.uriVarsMap);
}
else {
throw new IllegalStateException("Neither URI nor URI template");
}
}
}
/**
* RequestEntity initialized with a URI template and variables instead of a {@link URI}.
* @since 5.3
* @param <T> the body type
*/
public static | DefaultBodyBuilder |
java | micronaut-projects__micronaut-core | benchmarks/src/jmh/java/io/micronaut/core/CopyOnWriteMapBenchmark.java | {
"start": 2367,
"end": 2413
} | enum ____ {
CHM,
COW,
}
}
| Type |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractionUtils.java | {
"start": 4017,
"end": 6794
} | class ____ a lambda function
if (serialVersion != null
&& serialVersion.getClass() == SerializedLambda.class) {
serializedLambda = (SerializedLambda) serialVersion;
break;
}
} catch (NoSuchMethodException e) {
// thrown if the method is not there. fall through the loop
}
}
// not a lambda method -> return null
if (serializedLambda == null) {
return null;
}
// find lambda method
String className = serializedLambda.getImplClass();
String methodName = serializedLambda.getImplMethodName();
String methodSig = serializedLambda.getImplMethodSignature();
Class<?> implClass =
Class.forName(
className.replace('/', '.'),
true,
Thread.currentThread().getContextClassLoader());
// find constructor
if (methodName.equals("<init>")) {
Constructor<?>[] constructors = implClass.getDeclaredConstructors();
for (Constructor<?> constructor : constructors) {
if (getConstructorDescriptor(constructor).equals(methodSig)) {
return new LambdaExecutable(constructor);
}
}
}
// find method
else {
List<Method> methods = getAllDeclaredMethods(implClass);
for (Method method : methods) {
if (method.getName().equals(methodName)
&& getMethodDescriptor(method).equals(methodSig)) {
return new LambdaExecutable(method);
}
}
}
throw new TypeExtractionException("No lambda method found.");
} catch (Exception e) {
throw new TypeExtractionException(
"Could not extract lambda method out of function: "
+ e.getClass().getSimpleName()
+ " - "
+ e.getMessage(),
e);
}
}
/**
* Extracts type from given index from lambda. It supports nested types.
*
* @param baseClass SAM function that the lambda implements
* @param exec lambda function to extract the type from
* @param lambdaTypeArgumentIndices position of type to extract in type hierarchy
* @param paramLen count of total parameters of the lambda (including closure parameters)
* @param baseParametersLen count of lambda | is |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/RequestEntity.java | {
"start": 2218,
"end": 6072
} | class ____<T> extends HttpEntity<T> {
private final @Nullable HttpMethod method;
private final @Nullable URI url;
private final @Nullable Type type;
/**
* Constructor with method and URL but without body nor headers.
* @param method the method
* @param url the URL
*/
public RequestEntity(HttpMethod method, URI url) {
this(null, (HttpHeaders) null, method, url, null);
}
/**
* Constructor with method, URL and body but without headers.
* @param body the body
* @param method the method
* @param url the URL
*/
public RequestEntity(@Nullable T body, HttpMethod method, URI url) {
this(body, (HttpHeaders) null, method, url, null);
}
/**
* Constructor with method, URL, body and type but without headers.
* @param body the body
* @param method the method
* @param url the URL
* @param type the type used for generic type resolution
* @since 4.3
*/
public RequestEntity(@Nullable T body, HttpMethod method, URI url, Type type) {
this(body, (HttpHeaders) null, method, url, type);
}
/**
* Constructor with method, URL and headers but without body.
* @param headers the headers
* @param method the method
* @param url the URL
* @since 7.0
*/
public RequestEntity(HttpHeaders headers, HttpMethod method, URI url) {
this(null, headers, method, url, null);
}
/**
* Constructor with method, URL, headers and body.
* @param body the body
* @param headers the headers
* @param method the method
* @param url the URL
* @since 7.0
*/
public RequestEntity(@Nullable T body, @Nullable HttpHeaders headers,
@Nullable HttpMethod method, URI url) {
this(body, headers, method, url, null);
}
/**
* Constructor with method, URL, headers, body and type.
* @param body the body
* @param headers the headers
* @param method the method
* @param url the URL
* @param type the type used for generic type resolution
* @since 7.0
*/
public RequestEntity(@Nullable T body, @Nullable HttpHeaders headers,
@Nullable HttpMethod method, @Nullable URI url, @Nullable Type type) {
super(body, headers);
this.method = method;
this.url = url;
this.type = type;
}
/**
* Constructor with method, URL and headers but without body.
* @param headers the headers
* @param method the method
* @param url the URL
* @deprecated in favor of {@link #RequestEntity(HttpHeaders, HttpMethod, URI)}
*/
@Deprecated(since = "7.0", forRemoval = true)
public RequestEntity(MultiValueMap<String, String> headers, HttpMethod method, URI url) {
this(null, headers, method, url, null);
}
/**
* Constructor with method, URL, headers and body.
* @param body the body
* @param headers the headers
* @param method the method
* @param url the URL
* @deprecated in favor of {@link #RequestEntity(Object, HttpHeaders, HttpMethod, URI)}
*/
@Deprecated(since = "7.0", forRemoval = true)
public RequestEntity(
@Nullable T body, @Nullable MultiValueMap<String, String> headers,
@Nullable HttpMethod method, URI url) {
this(body, headers, method, url, null);
}
/**
* Constructor with method, URL, headers, body and type.
* @param body the body
* @param headers the headers
* @param method the method
* @param url the URL
* @param type the type used for generic type resolution
* @since 4.3
* @deprecated in favor of {@link #RequestEntity(Object, HttpHeaders, HttpMethod, URI, Type)}
*/
@SuppressWarnings("removal")
@Deprecated(since = "7.0", forRemoval = true)
public RequestEntity(@Nullable T body, @Nullable MultiValueMap<String, String> headers,
@Nullable HttpMethod method, @Nullable URI url, @Nullable Type type) {
super(body, headers);
this.method = method;
this.url = url;
this.type = type;
}
/**
* Return the HTTP method of the request.
* @return the HTTP method as an {@code HttpMethod} | RequestEntity |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/create/MySqlCreateFullTextDictTest.java | {
"start": 891,
"end": 2647
} | class ____ extends MysqlTest {
@Test
public void test_one() throws Exception {
String sql = "create fulltext dictionary test1 ("
+ " word varchar comment 'comment1' "
+ ") comment 'comment1'";
List<SQLStatement> stmtList = SQLUtils.toStatementList(sql, JdbcConstants.MYSQL);
SQLStatement stmt = stmtList.get(0);
String output = SQLUtils.toMySqlString(stmt);
assertEquals("CREATE FULLTEXT DICTIONARY test1(\n"
+ "word varchar COMMENT 'comment1'\n"
+ ")\n" +
"COMMENT 'comment1'", output);
}
@Test
public void test_1() throws Exception {
String sql = "show fulltext dictionaries";
List<SQLStatement> stmtList = SQLUtils.toStatementList(sql, JdbcConstants.MYSQL);
SQLStatement stmt = stmtList.get(0);
String output = SQLUtils.toMySqlString(stmt);
assertEquals("SHOW FULLTEXT DICTIONARIES", output);
}
@Test
public void test_2() throws Exception {
String sql = "show fulltext dictionaries";
List<SQLStatement> stmtList = SQLUtils.toStatementList(sql, JdbcConstants.MYSQL);
SQLStatement stmt = stmtList.get(0);
String output = SQLUtils.toMySqlString(stmt);
assertEquals("SHOW FULLTEXT DICTIONARIES", output);
}
@Test
public void test_3() throws Exception {
String sql = "drop fulltext dictionary dic_name";
List<SQLStatement> stmtList = SQLUtils.toStatementList(sql, JdbcConstants.MYSQL);
SQLStatement stmt = stmtList.get(0);
String output = SQLUtils.toMySqlString(stmt);
assertEquals("DROP FULLTEXT DICTIONARY dic_name", output);
}
}
| MySqlCreateFullTextDictTest |
java | spring-projects__spring-security | oauth2/oauth2-core/src/main/java/org/springframework/security/oauth2/core/endpoint/OAuth2AuthorizationRequest.java | {
"start": 8296,
"end": 8802
} | class ____ extends AbstractBuilder<OAuth2AuthorizationRequest, Builder> {
/**
* Builds a new {@link OAuth2AuthorizationRequest}.
* @return a {@link OAuth2AuthorizationRequest}
*/
@Override
public OAuth2AuthorizationRequest build() {
return new OAuth2AuthorizationRequest(this);
}
}
/**
* A builder for subclasses of {@link OAuth2AuthorizationRequest}.
*
* @param <T> the type of authorization request
* @param <B> the type of the builder
*/
protected abstract static | Builder |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/VersionId.java | {
"start": 523,
"end": 582
} | class ____ represents a version id of some kind
*/
public | that |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/util/NettyShuffleDescriptorBuilder.java | {
"start": 1772,
"end": 4410
} | class ____ {
private ResourceID producerLocation = ResourceID.generate();
private ResultPartitionID id = new ResultPartitionID();
private InetAddress address = InetAddress.getLoopbackAddress();
private int dataPort;
private int connectionIndex;
public NettyShuffleDescriptorBuilder setProducerLocation(ResourceID producerLocation) {
this.producerLocation = producerLocation;
return this;
}
public NettyShuffleDescriptorBuilder setId(ResultPartitionID id) {
this.id = id;
return this;
}
public NettyShuffleDescriptorBuilder setAddress(InetAddress address) {
this.address = address;
return this;
}
public NettyShuffleDescriptorBuilder setDataPort(int dataPort) {
this.dataPort = dataPort;
return this;
}
public NettyShuffleDescriptorBuilder setProducerInfoFromTaskManagerLocation(
TaskManagerLocation producerTaskManagerLocation) {
return setProducerLocation(producerTaskManagerLocation.getResourceID())
.setAddress(producerTaskManagerLocation.address())
.setDataPort(producerTaskManagerLocation.dataPort());
}
public NettyShuffleDescriptorBuilder setConnectionIndex(int connectionIndex) {
this.connectionIndex = connectionIndex;
return this;
}
public NettyShuffleDescriptor buildRemote() {
return new NettyShuffleDescriptor(
producerLocation,
new NetworkPartitionConnectionInfo(
new InetSocketAddress(address, dataPort), connectionIndex),
id);
}
public NettyShuffleDescriptor buildLocal() {
List<TierShuffleDescriptor> tierShuffleDescriptors = new ArrayList<>();
tierShuffleDescriptors.add(NoOpTierShuffleDescriptor.INSTANCE);
tierShuffleDescriptors.add(NoOpTierShuffleDescriptor.INSTANCE);
return new NettyShuffleDescriptor(
producerLocation,
LocalExecutionPartitionConnectionInfo.INSTANCE,
id,
tierShuffleDescriptors);
}
public static NettyShuffleDescriptorBuilder newBuilder() {
return new NettyShuffleDescriptorBuilder();
}
public static NettyShuffleDescriptor createRemoteWithIdAndLocation(
IntermediateResultPartitionID partitionId, ResourceID producerLocation) {
return newBuilder()
.setId(new ResultPartitionID(partitionId, createExecutionAttemptId()))
.setProducerLocation(producerLocation)
.buildRemote();
}
}
| NettyShuffleDescriptorBuilder |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java | {
"start": 1511,
"end": 4502
} | class ____ extends ValuesSourceAggregatorFactory {
private final int precision;
private final int requiredSize;
private final int shardSize;
private final GeoBoundingBox geoBoundingBox;
GeoTileGridAggregatorFactory(
String name,
ValuesSourceConfig config,
int precision,
int requiredSize,
int shardSize,
GeoBoundingBox geoBoundingBox,
AggregationContext context,
AggregatorFactory parent,
AggregatorFactories.Builder subFactoriesBuilder,
Map<String, Object> metadata
) throws IOException {
super(name, config, context, parent, subFactoriesBuilder, metadata);
this.precision = precision;
this.requiredSize = requiredSize;
this.shardSize = shardSize;
this.geoBoundingBox = geoBoundingBox;
}
@Override
protected Aggregator createUnmapped(Aggregator parent, Map<String, Object> metadata) throws IOException {
final InternalAggregation aggregation = new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata);
return new NonCollectingAggregator(name, context, parent, factories, metadata) {
@Override
public InternalAggregation buildEmptyAggregation() {
return aggregation;
}
};
}
@Override
protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map<String, Object> metadata)
throws IOException {
return context.getValuesSourceRegistry()
.getAggregator(GeoTileGridAggregationBuilder.REGISTRY_KEY, config)
.build(
name,
factories,
config.getValuesSource(),
precision,
geoBoundingBox,
requiredSize,
shardSize,
context,
parent,
cardinality,
metadata
);
}
static void registerAggregators(ValuesSourceRegistry.Builder builder) {
builder.register(
GeoTileGridAggregationBuilder.REGISTRY_KEY,
CoreValuesSourceType.GEOPOINT,
(
name,
factories,
valuesSource,
precision,
geoBoundingBox,
requiredSize,
shardSize,
aggregationContext,
parent,
cardinality,
metadata) -> new GeoTileGridAggregator(
name,
factories,
cb -> new GeoTileCellIdSource((ValuesSource.GeoPoint) valuesSource, precision, geoBoundingBox, cb),
requiredSize,
shardSize,
aggregationContext,
parent,
cardinality,
metadata
),
true
);
}
}
| GeoTileGridAggregatorFactory |
java | google__auto | factory/src/main/java/com/google/auto/factory/processor/PackageAndClass.java | {
"start": 833,
"end": 969
} | class ____. For {@code java.util.Map.Entry}, it would be {@code
* java.util}.
*/
abstract String packageName();
/**
* The | name |
java | apache__camel | components/camel-hashicorp-vault/src/test/java/org/apache/camel/component/hashicorp/vault/integration/operations/HashicorpProducerCreateSecretPOJOIT.java | {
"start": 1353,
"end": 3523
} | class ____ extends HashicorpVaultBase {
@EndpointInject("mock:result-write")
private MockEndpoint mockWrite;
@EndpointInject("mock:result-read")
private MockEndpoint mockRead;
@Test
public void createSecretTest() throws InterruptedException {
mockWrite.expectedMessageCount(1);
mockRead.expectedMessageCount(1);
Exchange exchange = template.request("direct:createSecret", new Processor() {
@Override
public void process(Exchange exchange) {
Secrets sec = new Secrets();
sec.username = "admin";
sec.password = "password";
exchange.getIn().setBody(sec);
}
});
exchange = template.request("direct:readSecret", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getMessage().setHeader(HashicorpVaultConstants.SECRET_PATH, "test");
}
});
MockEndpoint.assertIsSatisfied(context);
Exchange ret = mockRead.getExchanges().get(0);
assertNotNull(ret);
assertEquals("admin", ((Map) ret.getMessage().getBody(Map.class).get("data")).get("username"));
assertEquals("password", ((Map) ret.getMessage().getBody(Map.class).get("data")).get("password"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:createSecret")
.toF("hashicorp-vault://secret?operation=createSecret&token=RAW(%s)&host=%s&port=%s&scheme=http&secretPath=test",
service.token(), service.host(), service.port())
.to("mock:result-write");
from("direct:readSecret")
.toF("hashicorp-vault://secret?operation=getSecret&token=RAW(%s)&host=%s&port=%s&scheme=http",
service.token(), service.host(), service.port())
.to("mock:result-read");
}
};
}
| HashicorpProducerCreateSecretPOJOIT |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/utils/DefaultSerializeClassChecker.java | {
"start": 1449,
"end": 3940
} | class ____ implements AllowClassNotifyListener {
private static final long MAGIC_HASH_CODE = 0xcbf29ce484222325L;
private static final long MAGIC_PRIME = 0x100000001b3L;
private static final ErrorTypeAwareLogger logger =
LoggerFactory.getErrorTypeAwareLogger(DefaultSerializeClassChecker.class);
private volatile SerializeCheckStatus checkStatus = AllowClassNotifyListener.DEFAULT_STATUS;
private volatile boolean checkSerializable = true;
private final SerializeSecurityManager serializeSecurityManager;
private final ClassHolder classHolder;
private volatile long[] allowPrefixes = new long[0];
private volatile long[] disAllowPrefixes = new long[0];
public DefaultSerializeClassChecker(FrameworkModel frameworkModel) {
serializeSecurityManager = frameworkModel.getBeanFactory().getOrRegisterBean(SerializeSecurityManager.class);
serializeSecurityManager.registerListener(this);
classHolder =
NativeDetector.inNativeImage() ? frameworkModel.getBeanFactory().getBean(ClassHolder.class) : null;
}
@Override
public synchronized void notifyPrefix(Set<String> allowedList, Set<String> disAllowedList) {
this.allowPrefixes = loadPrefix(allowedList);
this.disAllowPrefixes = loadPrefix(disAllowedList);
}
@Override
public synchronized void notifyCheckStatus(SerializeCheckStatus status) {
this.checkStatus = status;
}
@Override
public synchronized void notifyCheckSerializable(boolean checkSerializable) {
this.checkSerializable = checkSerializable;
}
private static long[] loadPrefix(Set<String> allowedList) {
long[] array = new long[allowedList.size()];
int index = 0;
for (String name : allowedList) {
if (name == null || name.isEmpty()) {
continue;
}
long hashCode = MAGIC_HASH_CODE;
for (int j = 0; j < name.length(); ++j) {
char ch = name.charAt(j);
if (ch == '$') {
ch = '.';
}
hashCode ^= ch;
hashCode *= MAGIC_PRIME;
}
array[index++] = hashCode;
}
if (index != array.length) {
array = Arrays.copyOf(array, index);
}
Arrays.sort(array);
return array;
}
/**
* Try load class
*
* @param className | DefaultSerializeClassChecker |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/builder/RouteTemplateLocalBeanTest.java | {
"start": 30106,
"end": 30541
} | class ____ implements Processor {
private String prefix = "";
public BuilderTwoProcessor() {
}
public BuilderTwoProcessor(String prefix) {
this.prefix = prefix;
}
@Override
public void process(Exchange exchange) {
exchange.getMessage().setBody(prefix + "Builder2 " + exchange.getMessage().getBody());
}
}
public static | BuilderTwoProcessor |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/android/WakelockReleasedDangerouslyTest.java | {
"start": 985,
"end": 1392
} | class ____ {
private final BugCheckerRefactoringTestHelper refactoringHelper =
BugCheckerRefactoringTestHelper.newInstance(WakelockReleasedDangerously.class, getClass())
.setArgs(ImmutableList.of("-XDandroidCompatible=true"))
.addInputLines(
"PowerManager.java",
"""
package android.os;
public | WakelockReleasedDangerouslyTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/objectarray/ObjectArrayAssert_hasSizeGreaterThan_Test.java | {
"start": 806,
"end": 1168
} | class ____ extends ObjectArrayAssertBaseTest {
@Override
protected ObjectArrayAssert<Object> invoke_api_method() {
return assertions.hasSizeGreaterThan(6);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasSizeGreaterThan(getInfo(assertions), getActual(assertions), 6);
}
}
| ObjectArrayAssert_hasSizeGreaterThan_Test |
java | google__dagger | javatests/dagger/internal/codegen/InjectConstructorFactoryGeneratorTest.java | {
"start": 57305,
"end": 58174
} | interface ____ {}");
daggerCompiler(
someBinding,
fieldQualifier,
constructorParameterQualifier,
methodParameterQualifier,
misplacedQualifier,
nonQualifier)
.compile(
subject -> {
subject.hasErrorCount(0);
assertSourceMatchesGolden(subject, "test/SomeBinding_Factory");
subject.generatedSource(
goldenFileRule.goldenSource("test/SomeBinding_MembersInjector"));
});
}
@Test
public void testComplexQualifierMetadata() throws Exception {
Source someBinding =
CompilerTests.javaSource(
"test.SomeBinding",
"package test;",
"",
"import javax.inject.Inject;",
"import javax.inject.Inject;",
"",
" | NonQualifier |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceCredentialProvider.java | {
"start": 1407,
"end": 1774
} | class ____ IAM and container
* authentication.
* <p>
* When it fails to authenticate, it raises a
* {@link NoAwsCredentialsException} which can be recognized by retry handlers
* as a non-recoverable failure.
* <p>
* It is implicitly public; marked evolving as we can change its semantics.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final | for |
java | elastic__elasticsearch | x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/expression/function/scalar/string/StartsWithFunctionProcessorTests.java | {
"start": 333,
"end": 473
} | class ____ extends
org.elasticsearch.xpack.ql.expression.function.scalar.string.StartsWithProcessorTests {}
| StartsWithFunctionProcessorTests |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolvedIndexSerializingTests.java | {
"start": 744,
"end": 2735
} | class ____ extends AbstractWireSerializingTestCase<ResolveIndexAction.ResolvedIndex> {
@Override
protected Writeable.Reader<ResolveIndexAction.ResolvedIndex> instanceReader() {
return ResolveIndexAction.ResolvedIndex::new;
}
@Override
protected ResolveIndexAction.ResolvedIndex mutateInstance(ResolveIndexAction.ResolvedIndex instance) {
String name = instance.getName();
String[] aliases = instance.getAliases();
String[] attributes = instance.getAttributes();
String dataStream = instance.getDataStream();
IndexMode mode = instance.getMode();
mode = randomValueOtherThan(mode, () -> randomFrom(IndexMode.values()));
return new ResolveIndexAction.ResolvedIndex(name, aliases, attributes, dataStream, mode);
}
@Override
protected ResolveIndexAction.ResolvedIndex createTestInstance() {
return createTestItem();
}
private static ResolveIndexAction.ResolvedIndex createTestItem() {
// Random index name
final String name = randomAlphaOfLengthBetween(5, 20);
// Random aliases (possibly empty)
final String[] aliases = randomBoolean()
? new String[0]
: randomArray(0, 4, String[]::new, () -> randomAlphaOfLengthBetween(3, 15));
// Attributes: always one of "open"/"closed", plus optional flags
final List<String> attrs = new ArrayList<>();
attrs.add(randomBoolean() ? "open" : "closed");
if (randomBoolean()) attrs.add("hidden");
if (randomBoolean()) attrs.add("system");
if (randomBoolean()) attrs.add("frozen");
final String[] attributes = attrs.toArray(new String[0]);
final String dataStream = randomBoolean() ? randomAlphaOfLengthBetween(3, 15) : null;
final IndexMode mode = randomFrom(IndexMode.values());
return new ResolveIndexAction.ResolvedIndex(name, aliases, attributes, dataStream, mode);
}
}
| ResolvedIndexSerializingTests |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestsBuildService.java | {
"start": 1304,
"end": 4240
} | class ____ implements BuildService<MutedTestsBuildService.Params> {
private final Set<String> excludePatterns = new LinkedHashSet<>();
private final ObjectMapper objectMapper = new ObjectMapper(new YAMLFactory());
public MutedTestsBuildService() {
File infoPath = getParameters().getInfoPath().get().getAsFile();
File mutedTestsFile = new File(infoPath, "muted-tests.yml");
excludePatterns.addAll(buildExcludePatterns(mutedTestsFile));
for (RegularFile regularFile : getParameters().getAdditionalFiles().get()) {
excludePatterns.addAll(buildExcludePatterns(regularFile.getAsFile()));
}
}
public Set<String> getExcludePatterns() {
return excludePatterns;
}
private Set<String> buildExcludePatterns(File file) {
List<MutedTest> mutedTests;
try (InputStream is = new BufferedInputStream(new FileInputStream(file))) {
mutedTests = objectMapper.readValue(is, MutedTests.class).getTests();
if (mutedTests == null) {
return Collections.emptySet();
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Set<String> excludes = new LinkedHashSet<>();
if (mutedTests.isEmpty() == false) {
for (MutedTestsBuildService.MutedTest mutedTest : mutedTests) {
if (mutedTest.getClassName() != null && mutedTest.getMethods().isEmpty() == false) {
for (String method : mutedTest.getMethods()) {
// Tests that use the randomized runner and parameters end up looking like this:
// test {yaml=analysis-common/30_tokenizers/letter}
// We need to detect this and handle them a little bit different than non-parameterized tests, because of some
// quirks in the randomized runner
int index = method.indexOf(" {");
String methodWithoutParams = index >= 0 ? method.substring(0, index) : method;
String paramString = index >= 0 ? method.substring(index) : null;
excludes.add(mutedTest.getClassName() + "." + method);
if (paramString != null) {
// Because of randomized runner quirks, we need skip the test method by itself whenever we want to skip a test
// that has parameters
// This is because the runner has *two* separate checks that can cause the test to end up getting executed, so
// we need filters that cover both checks
excludes.add(mutedTest.getClassName() + "." + methodWithoutParams);
} else {
// We need to add the following, in case we're skipping an entire | MutedTestsBuildService |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.