language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/synthbean/SynthBeanWithParameterizedConstructorsAndInterceptionTest.java
|
{
"start": 4855,
"end": 5651
}
|
class ____ {
private final int i;
private final int j;
MyNonbean() {
this.i = 0;
this.j = 0;
}
MyNonbean(int i) {
this.i = i;
this.j = 0;
}
MyNonbean(int i, int j) {
this.i = i;
this.j = j;
}
MyNonbean(byte i, short j) {
this.i = i;
this.j = j;
}
@MyBinding2
String hello1() {
return "hello1_" + i + "_" + j;
}
@NoClassInterceptors
@MyBinding2
String hello2(int k) {
return "hello2_" + i + "_" + j + "_" + k;
}
@NoClassInterceptors
String hello3() {
return "hello3_" + i + "_" + j;
}
}
}
|
MyNonbean
|
java
|
alibaba__nacos
|
ai/src/test/java/com/alibaba/nacos/ai/utils/McpRequestUtilsTest.java
|
{
"start": 876,
"end": 1305
}
|
class ____ {
@Test
void fillNamespaceId() {
QueryMcpServerRequest request = new QueryMcpServerRequest();
McpRequestUtil.fillNamespaceId(request);
assertEquals(AiConstants.Mcp.MCP_DEFAULT_NAMESPACE, request.getNamespaceId());
request.setNamespaceId("test");
McpRequestUtil.fillNamespaceId(request);
assertEquals("test", request.getNamespaceId());
}
}
|
McpRequestUtilsTest
|
java
|
spring-projects__spring-security
|
test/src/test/java/org/springframework/security/test/web/servlet/request/SecurityMockMvcRequestPostProcessorsOAuth2LoginTests.java
|
{
"start": 3675,
"end": 6749
}
|
class ____ {
@Autowired
WebApplicationContext context;
MockMvc mvc;
@BeforeEach
public void setup() {
// @formatter:off
this.mvc = MockMvcBuilders
.webAppContextSetup(this.context)
.apply(springSecurity())
.build();
// @formatter:on
}
@Test
public void oauth2LoginWhenUsingDefaultsThenProducesDefaultAuthentication() throws Exception {
this.mvc.perform(get("/name").with(oauth2Login())).andExpect(content().string("user"));
this.mvc.perform(get("/admin/id-token/name").with(oauth2Login())).andExpect(status().isForbidden());
}
@Test
public void oauth2LoginWhenUsingDefaultsThenProducesDefaultAuthorizedClient() throws Exception {
this.mvc.perform(get("/client-id").with(oauth2Login())).andExpect(content().string("test-client"));
}
@Test
public void oauth2LoginWhenAuthoritiesSpecifiedThenGrantsAccess() throws Exception {
this.mvc
.perform(get("/admin/scopes").with(oauth2Login().authorities(new SimpleGrantedAuthority("SCOPE_admin"))))
.andExpect(content().string("[\"SCOPE_admin\"]"));
}
@Test
public void oauth2LoginWhenAttributeSpecifiedThenUserHasAttribute() throws Exception {
this.mvc
.perform(get("/attributes/iss")
.with(oauth2Login().attributes((a) -> a.put("iss", "https://idp.example.org"))))
.andExpect(content().string("https://idp.example.org"));
}
@Test
public void oauth2LoginWhenNameSpecifiedThenUserHasName() throws Exception {
OAuth2User oauth2User = new DefaultOAuth2User(AuthorityUtils.commaSeparatedStringToAuthorityList("SCOPE_read"),
Collections.singletonMap("custom-attribute", "test-subject"), "custom-attribute");
this.mvc.perform(get("/attributes/custom-attribute").with(oauth2Login().oauth2User(oauth2User)))
.andExpect(content().string("test-subject"));
this.mvc.perform(get("/name").with(oauth2Login().oauth2User(oauth2User)))
.andExpect(content().string("test-subject"));
this.mvc.perform(get("/client-name").with(oauth2Login().oauth2User(oauth2User)))
.andExpect(content().string("test-subject"));
}
@Test
public void oauth2LoginWhenClientRegistrationSpecifiedThenUses() throws Exception {
this.mvc
.perform(get("/client-id")
.with(oauth2Login().clientRegistration(TestClientRegistrations.clientRegistration().build())))
.andExpect(content().string("client-id"));
}
@Test
public void oauth2LoginWhenOAuth2UserSpecifiedThenLastCalledTakesPrecedence() throws Exception {
OAuth2User oauth2User = new DefaultOAuth2User(AuthorityUtils.createAuthorityList("SCOPE_read"),
Collections.singletonMap("username", "user"), "username");
this.mvc
.perform(get("/attributes/sub")
.with(oauth2Login().attributes((a) -> a.put("sub", "bar")).oauth2User(oauth2User)))
.andExpect(status().isOk())
.andExpect(content().string("no-attribute"));
this.mvc
.perform(get("/attributes/sub")
.with(oauth2Login().oauth2User(oauth2User).attributes((a) -> a.put("sub", "bar"))))
.andExpect(content().string("bar"));
}
@Configuration
@EnableWebSecurity
@EnableWebMvc
static
|
SecurityMockMvcRequestPostProcessorsOAuth2LoginTests
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/KubernetesCronjobComponentBuilderFactory.java
|
{
"start": 1921,
"end": 4644
}
|
interface ____ extends ComponentBuilder<KubernetesCronJobComponent> {
/**
* To use an existing kubernetes client.
*
* The option is a:
* <code>io.fabric8.kubernetes.client.KubernetesClient</code> type.
*
* Group: producer
*
* @param kubernetesClient the value to set
* @return the dsl builder
*/
default KubernetesCronjobComponentBuilder kubernetesClient(io.fabric8.kubernetes.client.KubernetesClient kubernetesClient) {
doSetProperty("kubernetesClient", kubernetesClient);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default KubernetesCronjobComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default KubernetesCronjobComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
|
KubernetesCronjobComponentBuilder
|
java
|
google__dagger
|
dagger-producers/main/java/dagger/producers/monitoring/internal/Monitors.java
|
{
"start": 6348,
"end": 7539
}
|
class ____
extends ProductionComponentMonitor {
private final ImmutableList<ProductionComponentMonitor> delegates;
DelegatingProductionComponentMonitor(ImmutableList<ProductionComponentMonitor> delegates) {
this.delegates = delegates;
}
@Override
public ProducerMonitor producerMonitorFor(ProducerToken token) {
ImmutableList.Builder<ProducerMonitor> monitorsBuilder = ImmutableList.builder();
for (ProductionComponentMonitor delegate : delegates) {
try {
ProducerMonitor monitor = delegate.producerMonitorFor(token);
if (monitor != null) {
monitorsBuilder.add(monitor);
}
} catch (RuntimeException e) {
logProducerMonitorForException(e, delegate, token);
}
}
ImmutableList<ProducerMonitor> monitors = monitorsBuilder.build();
if (monitors.isEmpty()) {
return ProducerMonitor.noOp();
} else if (monitors.size() == 1) {
return new NonThrowingProducerMonitor(Iterables.getOnlyElement(monitors));
} else {
return new DelegatingProducerMonitor(monitors);
}
}
static final
|
DelegatingProductionComponentMonitor
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/aot/hint/annotation/ReflectiveRuntimeHintsRegistrarTests.java
|
{
"start": 7195,
"end": 7325
}
|
class ____ {
@Reflective
String managed;
String notManaged;
}
@SuppressWarnings("unused")
static
|
SampleFieldAnnotatedBean
|
java
|
google__error-prone
|
annotations/src/main/java/com/google/errorprone/annotations/RequiredModifiers.java
|
{
"start": 1063,
"end": 1433
}
|
interface ____ {}
* }</pre>
*
* <p>will be considered illegal when used on non-public elements such as:
*
* <pre>{@code
* @MyAnnotation void foo() {}
* }</pre>
*
* @author benyu@google.com (Jige Yu)
*/
@Documented
@Retention(RetentionPolicy.CLASS) // Element's source might not be available during analysis
@Target(ElementType.ANNOTATION_TYPE)
public @
|
MyAnnotation
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/criteria/JpaCriteriaQuery.java
|
{
"start": 799,
"end": 4625
}
|
interface ____<T> extends CriteriaQuery<T>, JpaQueryableCriteria<T>, JpaSelectCriteria<T>, JpaCriteriaSelect<T> {
/**
* A query that returns the number of results of this query.
*
* @since 6.4
*
* @see org.hibernate.query.SelectionQuery#getResultCount()
*/
JpaCriteriaQuery<Long> createCountQuery();
/**
* A query that returns {@code true} if this query has any results.
*
* @since 7.1
*/
@Incubating
JpaCriteriaQuery<Boolean> createExistsQuery();
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Limit/Offset/Fetch clause
@Nullable JpaExpression<Number> getOffset();
JpaCriteriaQuery<T> offset(@Nullable JpaExpression<? extends Number> offset);
JpaCriteriaQuery<T> offset(@Nullable Number offset);
@Nullable JpaExpression<Number> getFetch();
JpaCriteriaQuery<T> fetch(@Nullable JpaExpression<? extends Number> fetch);
JpaCriteriaQuery<T> fetch(JpaExpression<? extends Number> fetch, FetchClauseType fetchClauseType);
JpaCriteriaQuery<T> fetch(@Nullable Number fetch);
JpaCriteriaQuery<T> fetch(Number fetch, FetchClauseType fetchClauseType);
FetchClauseType getFetchClauseType();
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Accessors
/**
* Return the {@linkplain #getRoots() roots} as a list.
*/
List<? extends JpaRoot<?>> getRootList();
/**
* Get a {@linkplain Root query root} element at the given position
* with the given type.
*
* @param position the position of this root element
* @param type the type of the root entity
*
* @throws IllegalArgumentException if the root entity at the given
* position is not of the given type, or if there are not
* enough root entities in the query
*/
<E> JpaRoot<? extends E> getRoot(int position, Class<E> type);
/**
* Get a {@linkplain Root query root} element with the given alias
* and the given type.
*
* @param alias the identification variable of the root element
* @param type the type of the root entity
*
* @throws IllegalArgumentException if the root entity with the
* given alias is not of the given type, or if there is
* no root entities with the given alias
*/
<E> JpaRoot<? extends E> getRoot(String alias, Class<E> type);
/**
* {@inheritDoc}
*
* @apiNote Warning! This actually walks the criteria tree looking
* for parameters nodes.
*/
@Override
Set<ParameterExpression<?>> getParameters();
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Mutators
@Override
<X> JpaRoot<X> from(Class<X> entityClass);
@Override
<X> JpaRoot<X> from(EntityType<X> entity);
@Override
JpaCriteriaQuery<T> distinct(boolean distinct);
@Override
JpaCriteriaQuery<T> select(Selection<? extends T> selection);
@Override @Deprecated
JpaCriteriaQuery<T> multiselect(Selection<?>... selections);
@Override @Deprecated
JpaCriteriaQuery<T> multiselect(List<Selection<?>> selectionList);
@Override
JpaCriteriaQuery<T> where(@Nullable Expression<Boolean> restriction);
@Override
JpaCriteriaQuery<T> where(Predicate @Nullable... restrictions);
@Override
JpaCriteriaQuery<T> where(List<Predicate> restrictions);
@Override
JpaCriteriaQuery<T> groupBy(Expression<?>... grouping);
@Override
JpaCriteriaQuery<T> groupBy(List<Expression<?>> grouping);
@Override
JpaCriteriaQuery<T> having(@Nullable Expression<Boolean> restriction);
@Override
JpaCriteriaQuery<T> having(Predicate @Nullable... restrictions);
@Override
JpaCriteriaQuery<T> having(List<Predicate> restrictions);
@Override
JpaCriteriaQuery<T> orderBy(Order... o);
@Override
JpaCriteriaQuery<T> orderBy(List<Order> o);
@Override
<U> JpaSubQuery<U> subquery(EntityType<U> type);
HibernateCriteriaBuilder getCriteriaBuilder();
}
|
JpaCriteriaQuery
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/main/java/io/quarkus/resteasy/reactive/server/deployment/CustomResourceProducersGenerator.java
|
{
"start": 2171,
"end": 2726
}
|
class ____ {
*
* private final String queryParamValue;
* private final UriInfo uriInfo;
*
* public QueryParamResource(@QueryParam("p1") String headerValue, @Context UriInfo uriInfo) {
* this.headerValue = headerValue;
* }
*
* @GET
* public String get() {
* // DO something
* }
* }
*
* </pre></code>
*
*
* then the generated producer would look like this:
*
* <code><pre>
*
* @Singleton
* public
|
QueryParamResource
|
java
|
google__guice
|
core/test/com/googlecode/guice/JakartaTest.java
|
{
"start": 12688,
"end": 13131
}
|
class ____ {
final Provider<B> bProvider;
@Inject Provider<C> cProvider;
Provider<D> dProvider;
Provider<E> eProvider;
@Inject
G(@Named("jodie") Provider<B> bProvider) {
this.bProvider = bProvider;
}
@Inject
void injectD(@Red Provider<D> dProvider, Provider<E> eProvider) {
this.dProvider = dProvider;
this.eProvider = eProvider;
}
}
@jakarta.inject.Scope
@Retention(RUNTIME)
@
|
G
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/MergePolymorphicTest.java
|
{
"start": 874,
"end": 949
}
|
class ____ extends Child {
public String name;
}
static
|
ChildA
|
java
|
spring-projects__spring-boot
|
module/spring-boot-data-r2dbc-test/src/test/java/org/springframework/boot/data/r2dbc/test/autoconfigure/DataR2dbcTestPropertiesIntegrationTests.java
|
{
"start": 1373,
"end": 1728
}
|
class ____ {
@Autowired
private Environment innerEnvironment;
@Test
void propertiesFromEnclosingClassAffectNestedTests() {
assertThat(DataR2dbcTestPropertiesIntegrationTests.this.environment.getActiveProfiles())
.containsExactly("test");
assertThat(this.innerEnvironment.getActiveProfiles()).containsExactly("test");
}
}
}
|
NestedTests
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
|
{
"start": 4700,
"end": 40538
}
|
class ____ extends RMContainerRequestor
implements ContainerAllocator {
static final Logger LOG = LoggerFactory.getLogger(RMContainerAllocator.class);
public static final
float DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART = 0.05f;
static final Priority PRIORITY_FAST_FAIL_MAP;
static final Priority PRIORITY_REDUCE;
static final Priority PRIORITY_MAP;
static final Priority PRIORITY_OPPORTUNISTIC_MAP;
@VisibleForTesting
public static final String RAMPDOWN_DIAGNOSTIC = "Reducer preempted "
+ "to make room for pending map attempts";
private SubjectInheritingThread eventHandlingThread;
private final AtomicBoolean stopped;
static {
PRIORITY_FAST_FAIL_MAP = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
PRIORITY_FAST_FAIL_MAP.setPriority(5);
PRIORITY_REDUCE = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
PRIORITY_REDUCE.setPriority(10);
PRIORITY_MAP = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class);
PRIORITY_MAP.setPriority(20);
PRIORITY_OPPORTUNISTIC_MAP =
RecordFactoryProvider.getRecordFactory(null).newRecordInstance(
Priority.class);
PRIORITY_OPPORTUNISTIC_MAP.setPriority(19);
}
/*
Vocabulary Used:
pending -> requests which are NOT yet sent to RM
scheduled -> requests which are sent to RM but not yet assigned
assigned -> requests which are assigned to a container
completed -> request corresponding to which container has completed
Lifecycle of map
scheduled->assigned->completed
Lifecycle of reduce
pending->scheduled->assigned->completed
Maps are scheduled as soon as their requests are received. Reduces are
added to the pending and are ramped up (added to scheduled) based
on completed maps and current availability in the cluster.
*/
//reduces which are not yet scheduled
private final LinkedList<ContainerRequest> pendingReduces =
new LinkedList<ContainerRequest>();
//holds information about the assigned containers to task attempts
private final AssignedRequests assignedRequests;
//holds scheduled requests to be fulfilled by RM
private final ScheduledRequests scheduledRequests = new ScheduledRequests();
private int containersAllocated = 0;
private int containersReleased = 0;
private int hostLocalAssigned = 0;
private int rackLocalAssigned = 0;
private int lastCompletedTasks = 0;
private boolean recalculateReduceSchedule = false;
private Resource mapResourceRequest = Resources.none();
private Resource reduceResourceRequest = Resources.none();
private boolean reduceStarted = false;
private float maxReduceRampupLimit = 0;
private float maxReducePreemptionLimit = 0;
// Mapper allocation timeout, after which a reducer is forcibly preempted
private long reducerUnconditionalPreemptionDelayMs;
// Duration to wait before preempting a reducer when there is NO room
private long reducerNoHeadroomPreemptionDelayMs = 0;
private float reduceSlowStart = 0;
private int maxRunningMaps = 0;
private int maxRunningReduces = 0;
private long retryInterval;
private long retrystartTime;
private Clock clock;
private final AMPreemptionPolicy preemptionPolicy;
@VisibleForTesting
protected BlockingQueue<ContainerAllocatorEvent> eventQueue
= new LinkedBlockingQueue<ContainerAllocatorEvent>();
private ScheduleStats scheduleStats = new ScheduleStats();
private String mapNodeLabelExpression;
private String reduceNodeLabelExpression;
public RMContainerAllocator(ClientService clientService, AppContext context,
AMPreemptionPolicy preemptionPolicy) {
super(clientService, context);
this.preemptionPolicy = preemptionPolicy;
this.stopped = new AtomicBoolean(false);
this.clock = context.getClock();
this.assignedRequests = createAssignedRequests();
}
protected AssignedRequests createAssignedRequests() {
return new AssignedRequests();
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
reduceSlowStart = conf.getFloat(
MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,
DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART);
maxReduceRampupLimit = conf.getFloat(
MRJobConfig.MR_AM_JOB_REDUCE_RAMPUP_UP_LIMIT,
MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_RAMP_UP_LIMIT);
maxReducePreemptionLimit = conf.getFloat(
MRJobConfig.MR_AM_JOB_REDUCE_PREEMPTION_LIMIT,
MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_PREEMPTION_LIMIT);
reducerUnconditionalPreemptionDelayMs = 1000 * conf.getInt(
MRJobConfig.MR_JOB_REDUCER_UNCONDITIONAL_PREEMPT_DELAY_SEC,
MRJobConfig.DEFAULT_MR_JOB_REDUCER_UNCONDITIONAL_PREEMPT_DELAY_SEC);
reducerNoHeadroomPreemptionDelayMs = conf.getInt(
MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC,
MRJobConfig.DEFAULT_MR_JOB_REDUCER_PREEMPT_DELAY_SEC) * 1000;//sec -> ms
maxRunningMaps = conf.getInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT,
MRJobConfig.DEFAULT_JOB_RUNNING_MAP_LIMIT);
maxRunningReduces = conf.getInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT,
MRJobConfig.DEFAULT_JOB_RUNNING_REDUCE_LIMIT);
RackResolver.init(conf);
retryInterval = getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS,
MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS);
mapNodeLabelExpression = conf.get(MRJobConfig.MAP_NODE_LABEL_EXP);
reduceNodeLabelExpression = conf.get(MRJobConfig.REDUCE_NODE_LABEL_EXP);
// Init startTime to current time. If all goes well, it will be reset after
// first attempt to contact RM.
retrystartTime = System.currentTimeMillis();
this.scheduledRequests.setNumOpportunisticMapsPercent(
conf.getInt(MRJobConfig.MR_NUM_OPPORTUNISTIC_MAPS_PERCENT,
MRJobConfig.DEFAULT_MR_NUM_OPPORTUNISTIC_MAPS_PERCENT));
LOG.info(this.scheduledRequests.getNumOpportunisticMapsPercent() +
"% of the mappers will be scheduled using OPPORTUNISTIC containers");
}
@Override
protected void serviceStart() throws Exception {
this.eventHandlingThread = new SubjectInheritingThread() {
@SuppressWarnings("unchecked")
@Override
public void work() {
ContainerAllocatorEvent event;
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = RMContainerAllocator.this.eventQueue.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, interrupted : " + e);
}
return;
}
try {
handleEvent(event);
} catch (Throwable t) {
LOG.error("Error in handling event type " + event.getType()
+ " to the ContainreAllocator", t);
// Kill the AM
eventHandler.handle(new JobEvent(getJob().getID(),
JobEventType.INTERNAL_ERROR));
return;
}
}
}
};
this.eventHandlingThread.start();
super.serviceStart();
}
@Override
protected synchronized void heartbeat() throws Exception {
scheduleStats.updateAndLogIfChanged("Before Scheduling: ");
List<Container> allocatedContainers = getResources();
if (allocatedContainers != null && allocatedContainers.size() > 0) {
scheduledRequests.assign(allocatedContainers);
}
int completedMaps = getJob().getCompletedMaps();
int completedTasks = completedMaps + getJob().getCompletedReduces();
if ((lastCompletedTasks != completedTasks) ||
(scheduledRequests.maps.size() > 0)) {
lastCompletedTasks = completedTasks;
recalculateReduceSchedule = true;
}
if (recalculateReduceSchedule) {
boolean reducerPreempted = preemptReducesIfNeeded();
if (!reducerPreempted) {
// Only schedule new reducers if no reducer preemption happens for
// this heartbeat
scheduleReduces(getJob().getTotalMaps(), completedMaps,
scheduledRequests.maps.size(), scheduledRequests.reduces.size(),
assignedRequests.maps.size(), assignedRequests.reduces.size(),
mapResourceRequest, reduceResourceRequest, pendingReduces.size(),
maxReduceRampupLimit, reduceSlowStart);
}
recalculateReduceSchedule = false;
}
scheduleStats.updateAndLogIfChanged("After Scheduling: ");
}
@Override
protected void serviceStop() throws Exception {
if (stopped.getAndSet(true)) {
// return if already stopped
return;
}
if (eventHandlingThread != null) {
eventHandlingThread.interrupt();
}
super.serviceStop();
scheduleStats.log("Final Stats: ");
}
@Private
@VisibleForTesting
AssignedRequests getAssignedRequests() {
return assignedRequests;
}
@Private
@VisibleForTesting
ScheduledRequests getScheduledRequests() {
return scheduledRequests;
}
@Private
@VisibleForTesting
int getNumOfPendingReduces() {
return pendingReduces.size();
}
public boolean getIsReduceStarted() {
return reduceStarted;
}
public void setIsReduceStarted(boolean reduceStarted) {
this.reduceStarted = reduceStarted;
}
@Override
public void handle(ContainerAllocatorEvent event) {
int qSize = eventQueue.size();
if (qSize != 0 && qSize % 1000 == 0) {
LOG.info("Size of event-queue in RMContainerAllocator is " + qSize);
}
int remCapacity = eventQueue.remainingCapacity();
if (remCapacity < 1000) {
LOG.warn("Very low remaining capacity in the event-queue "
+ "of RMContainerAllocator: " + remCapacity);
}
try {
eventQueue.put(event);
} catch (InterruptedException e) {
throw new YarnRuntimeException(e);
}
}
protected synchronized void handleEvent(ContainerAllocatorEvent event) {
recalculateReduceSchedule = true;
if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
ContainerRequestEvent reqEvent = (ContainerRequestEvent) event;
boolean isMap = reqEvent.getAttemptID().getTaskId().getTaskType().
equals(TaskType.MAP);
if (isMap) {
handleMapContainerRequest(reqEvent);
} else {
handleReduceContainerRequest(reqEvent);
}
} else if (
event.getType() == ContainerAllocator.EventType.CONTAINER_DEALLOCATE) {
LOG.info("Processing the event " + event.toString());
TaskAttemptId aId = event.getAttemptID();
boolean removed = scheduledRequests.remove(aId);
if (!removed) {
ContainerId containerId = assignedRequests.get(aId);
if (containerId != null) {
removed = true;
assignedRequests.remove(aId);
containersReleased++;
pendingRelease.add(containerId);
release(containerId);
}
}
if (!removed) {
LOG.error("Could not deallocate container for task attemptId " +
aId);
}
preemptionPolicy.handleCompletedContainer(event.getAttemptID());
} else if (
event.getType() == ContainerAllocator.EventType.CONTAINER_FAILED) {
ContainerFailedEvent fEv = (ContainerFailedEvent) event;
String host = getHost(fEv.getContMgrAddress());
containerFailedOnHost(host);
// propagate failures to preemption policy to discard checkpoints for
// failed tasks
preemptionPolicy.handleFailedContainer(event.getAttemptID());
}
}
@SuppressWarnings({ "unchecked" })
private void handleReduceContainerRequest(ContainerRequestEvent reqEvent) {
assert(reqEvent.getAttemptID().getTaskId().getTaskType().equals(
TaskType.REDUCE));
Resource supportedMaxContainerCapability = getMaxContainerCapability();
JobId jobId = getJob().getID();
if (reduceResourceRequest.equals(Resources.none())) {
reduceResourceRequest = reqEvent.getCapability();
eventHandler.handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(
org.apache.hadoop.mapreduce.TaskType.REDUCE,
reduceResourceRequest.getMemorySize())));
LOG.info("reduceResourceRequest:" + reduceResourceRequest);
}
boolean reduceContainerRequestAccepted = true;
if (reduceResourceRequest.getMemorySize() >
supportedMaxContainerCapability.getMemorySize()
||
reduceResourceRequest.getVirtualCores() >
supportedMaxContainerCapability.getVirtualCores()) {
reduceContainerRequestAccepted = false;
}
if (reduceContainerRequestAccepted) {
// set the resources
reqEvent.getCapability().setVirtualCores(
reduceResourceRequest.getVirtualCores());
reqEvent.getCapability().setMemorySize(
reduceResourceRequest.getMemorySize());
if (reqEvent.getEarlierAttemptFailed()) {
//previously failed reducers are added to the front for fail fast
pendingReduces.addFirst(new ContainerRequest(reqEvent,
PRIORITY_REDUCE, reduceNodeLabelExpression));
} else {
//reduces are added to pending queue and are slowly ramped up
pendingReduces.add(new ContainerRequest(reqEvent,
PRIORITY_REDUCE, reduceNodeLabelExpression));
}
} else {
String diagMsg = "REDUCE capability required is more than the " +
"supported max container capability in the cluster. Killing" +
" the Job. reduceResourceRequest: " + reduceResourceRequest +
" maxContainerCapability:" + supportedMaxContainerCapability;
LOG.info(diagMsg);
eventHandler.handle(new JobDiagnosticsUpdateEvent(jobId, diagMsg));
eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
}
}
@SuppressWarnings({ "unchecked" })
private void handleMapContainerRequest(ContainerRequestEvent reqEvent) {
assert(reqEvent.getAttemptID().getTaskId().getTaskType().equals(
TaskType.MAP));
Resource supportedMaxContainerCapability = getMaxContainerCapability();
JobId jobId = getJob().getID();
if (mapResourceRequest.equals(Resources.none())) {
mapResourceRequest = reqEvent.getCapability();
eventHandler.handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(
org.apache.hadoop.mapreduce.TaskType.MAP,
mapResourceRequest.getMemorySize())));
LOG.info("mapResourceRequest:" + mapResourceRequest);
}
boolean mapContainerRequestAccepted = true;
if (mapResourceRequest.getMemorySize() >
supportedMaxContainerCapability.getMemorySize()
||
mapResourceRequest.getVirtualCores() >
supportedMaxContainerCapability.getVirtualCores()) {
mapContainerRequestAccepted = false;
}
if(mapContainerRequestAccepted) {
// set the resources
reqEvent.getCapability().setMemorySize(
mapResourceRequest.getMemorySize());
reqEvent.getCapability().setVirtualCores(
mapResourceRequest.getVirtualCores());
scheduledRequests.addMap(reqEvent); //maps are immediately scheduled
} else {
String diagMsg = "The required MAP capability is more than the " +
"supported max container capability in the cluster. Killing" +
" the Job. mapResourceRequest: " + mapResourceRequest +
" maxContainerCapability:" + supportedMaxContainerCapability;
LOG.info(diagMsg);
eventHandler.handle(new JobDiagnosticsUpdateEvent(jobId, diagMsg));
eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
}
}
private static String getHost(String contMgrAddress) {
String host = contMgrAddress;
String[] hostport = host.split(":");
if (hostport.length == 2) {
host = hostport[0];
}
return host;
}
@Private
@VisibleForTesting
synchronized void setReduceResourceRequest(Resource res) {
this.reduceResourceRequest = res;
}
@Private
@VisibleForTesting
synchronized void setMapResourceRequest(Resource res) {
this.mapResourceRequest = res;
}
@Private
@VisibleForTesting
boolean preemptReducesIfNeeded() {
if (reduceResourceRequest.equals(Resources.none())) {
return false; // no reduces
}
if (assignedRequests.maps.size() > 0) {
// there are assigned mappers
return false;
}
if (scheduledRequests.maps.size() <= 0) {
// there are no pending requests for mappers
return false;
}
// At this point:
// we have pending mappers and all assigned resources are taken by reducers
if (reducerUnconditionalPreemptionDelayMs >= 0) {
// Unconditional preemption is enabled.
// If mappers are pending for longer than the configured threshold,
// preempt reducers irrespective of what the headroom is.
if (preemptReducersForHangingMapRequests(
reducerUnconditionalPreemptionDelayMs)) {
return true;
}
}
// The pending mappers haven't been waiting for too long. Let us see if
// there are enough resources for a mapper to run. This is calculated by
// excluding scheduled reducers from headroom and comparing it against
// resources required to run one mapper.
Resource scheduledReducesResource = Resources.multiply(
reduceResourceRequest, scheduledRequests.reduces.size());
Resource availableResourceForMap =
Resources.subtract(getAvailableResources(), scheduledReducesResource);
if (ResourceCalculatorUtils.computeAvailableContainers(availableResourceForMap,
mapResourceRequest, getSchedulerResourceTypes()) > 0) {
// Enough room to run a mapper
return false;
}
// Available resources are not enough to run mapper. See if we should hold
// off before preempting reducers and preempt if okay.
return preemptReducersForHangingMapRequests(reducerNoHeadroomPreemptionDelayMs);
}
private boolean preemptReducersForHangingMapRequests(long pendingThreshold) {
int hangingMapRequests = getNumHangingRequests(
pendingThreshold, scheduledRequests.maps);
if (hangingMapRequests > 0) {
preemptReducer(hangingMapRequests);
return true;
}
return false;
}
private void clearAllPendingReduceRequests() {
rampDownReduces(Integer.MAX_VALUE);
}
private void preemptReducer(int hangingMapRequests) {
clearAllPendingReduceRequests();
// preempt for making space for at least one map
int preemptionReduceNumForOneMap =
ResourceCalculatorUtils.divideAndCeilContainers(mapResourceRequest,
reduceResourceRequest, getSchedulerResourceTypes());
int preemptionReduceNumForPreemptionLimit =
ResourceCalculatorUtils.divideAndCeilContainers(
Resources.multiply(getResourceLimit(), maxReducePreemptionLimit),
reduceResourceRequest, getSchedulerResourceTypes());
int preemptionReduceNumForAllMaps =
ResourceCalculatorUtils.divideAndCeilContainers(
Resources.multiply(mapResourceRequest, hangingMapRequests),
reduceResourceRequest, getSchedulerResourceTypes());
int toPreempt =
Math.min(Math.max(preemptionReduceNumForOneMap,
preemptionReduceNumForPreemptionLimit),
preemptionReduceNumForAllMaps);
LOG.info("Going to preempt " + toPreempt
+ " due to lack of space for maps");
assignedRequests.preemptReduce(toPreempt);
}
private int getNumHangingRequests(long allocationDelayThresholdMs,
Map<TaskAttemptId, ContainerRequest> requestMap) {
if (allocationDelayThresholdMs <= 0)
return requestMap.size();
int hangingRequests = 0;
long currTime = clock.getTime();
for (ContainerRequest request: requestMap.values()) {
long delay = currTime - request.requestTimeMs;
if (delay > allocationDelayThresholdMs)
hangingRequests++;
}
return hangingRequests;
}
@Private
public void scheduleReduces(
int totalMaps, int completedMaps,
int scheduledMaps, int scheduledReduces,
int assignedMaps, int assignedReduces,
Resource mapResourceReqt, Resource reduceResourceReqt,
int numPendingReduces,
float maxReduceRampupLimit, float reduceSlowStart) {
if (numPendingReduces == 0) {
return;
}
// get available resources for this job
Resource headRoom = getAvailableResources();
LOG.info("Recalculating schedule, headroom=" + headRoom);
//check for slow start
if (!getIsReduceStarted()) {//not set yet
int completedMapsForReduceSlowstart = (int)Math.ceil(reduceSlowStart *
totalMaps);
if(completedMaps < completedMapsForReduceSlowstart) {
LOG.info("Reduce slow start threshold not met. " +
"completedMapsForReduceSlowstart " +
completedMapsForReduceSlowstart);
return;
} else {
LOG.info("Reduce slow start threshold reached. Scheduling reduces.");
setIsReduceStarted(true);
}
}
//if all maps are assigned, then ramp up all reduces irrespective of the
//headroom
if (scheduledMaps == 0 && numPendingReduces > 0) {
LOG.info("All maps assigned. " +
"Ramping up all remaining reduces:" + numPendingReduces);
scheduleAllReduces();
return;
}
float completedMapPercent = 0f;
if (totalMaps != 0) {//support for 0 maps
completedMapPercent = (float)completedMaps/totalMaps;
} else {
completedMapPercent = 1;
}
Resource netScheduledMapResource =
Resources.multiply(mapResourceReqt, (scheduledMaps + assignedMaps));
Resource netScheduledReduceResource =
Resources.multiply(reduceResourceReqt,
(scheduledReduces + assignedReduces));
Resource finalMapResourceLimit;
Resource finalReduceResourceLimit;
// ramp up the reduces based on completed map percentage
Resource totalResourceLimit = getResourceLimit();
Resource idealReduceResourceLimit =
Resources.multiply(totalResourceLimit,
Math.min(completedMapPercent, maxReduceRampupLimit));
Resource ideaMapResourceLimit =
Resources.subtract(totalResourceLimit, idealReduceResourceLimit);
// check if there aren't enough maps scheduled, give the free map capacity
// to reduce.
// Even when container number equals, there may be unused resources in one
// dimension
if (ResourceCalculatorUtils.computeAvailableContainers(ideaMapResourceLimit,
mapResourceReqt, getSchedulerResourceTypes()) >= (scheduledMaps + assignedMaps)) {
// enough resource given to maps, given the remaining to reduces
Resource unusedMapResourceLimit =
Resources.subtract(ideaMapResourceLimit, netScheduledMapResource);
finalReduceResourceLimit =
Resources.add(idealReduceResourceLimit, unusedMapResourceLimit);
finalMapResourceLimit =
Resources.subtract(totalResourceLimit, finalReduceResourceLimit);
} else {
finalMapResourceLimit = ideaMapResourceLimit;
finalReduceResourceLimit = idealReduceResourceLimit;
}
LOG.info("completedMapPercent " + completedMapPercent
+ " totalResourceLimit:" + totalResourceLimit
+ " finalMapResourceLimit:" + finalMapResourceLimit
+ " finalReduceResourceLimit:" + finalReduceResourceLimit
+ " netScheduledMapResource:" + netScheduledMapResource
+ " netScheduledReduceResource:" + netScheduledReduceResource);
int rampUp =
ResourceCalculatorUtils.computeAvailableContainers(Resources.subtract(
finalReduceResourceLimit, netScheduledReduceResource),
reduceResourceReqt, getSchedulerResourceTypes());
if (rampUp > 0) {
rampUp = Math.min(rampUp, numPendingReduces);
LOG.info("Ramping up " + rampUp);
rampUpReduces(rampUp);
} else if (rampUp < 0) {
int rampDown = -1 * rampUp;
rampDown = Math.min(rampDown, scheduledReduces);
LOG.info("Ramping down " + rampDown);
rampDownReduces(rampDown);
}
}
@Private
public void scheduleAllReduces() {
for (ContainerRequest req : pendingReduces) {
scheduledRequests.addReduce(req);
}
pendingReduces.clear();
}
@Private
public void rampUpReduces(int rampUp) {
//more reduce to be scheduled
for (int i = 0; i < rampUp; i++) {
ContainerRequest request = pendingReduces.removeFirst();
scheduledRequests.addReduce(request);
}
}
@Private
public void rampDownReduces(int rampDown) {
//remove from the scheduled and move back to pending
while (rampDown > 0) {
ContainerRequest request = scheduledRequests.removeReduce();
if (request == null) {
return;
}
pendingReduces.add(request);
rampDown--;
}
}
@SuppressWarnings("unchecked")
private List<Container> getResources() throws Exception {
applyConcurrentTaskLimits();
// will be null the first time
Resource headRoom = Resources.clone(getAvailableResources());
AllocateResponse response;
/*
* If contact with RM is lost, the AM will wait MR_AM_TO_RM_WAIT_INTERVAL_MS
* milliseconds before aborting. During this interval, AM will still try
* to contact the RM.
*/
try {
response = makeRemoteRequest();
// Reset retry count if no exception occurred.
retrystartTime = System.currentTimeMillis();
} catch (ApplicationAttemptNotFoundException e ) {
// This can happen if the RM has been restarted. If it is in that state,
// this application must clean itself up.
eventHandler.handle(new JobEvent(this.getJob().getID(),
JobEventType.JOB_AM_REBOOT));
throw new RMContainerAllocationException(
"Resource Manager doesn't recognize AttemptId: "
+ this.getContext().getApplicationAttemptId(), e);
} catch (ApplicationMasterNotRegisteredException e) {
LOG.info("ApplicationMaster is out of sync with ResourceManager,"
+ " hence resync and send outstanding requests.");
// RM may have restarted, re-register with RM.
lastResponseID = 0;
register();
addOutstandingRequestOnResync();
return null;
} catch (InvalidLabelResourceRequestException e) {
// If Invalid label exception is received means the requested label doesnt
// have access so killing job in this case.
String diagMsg = "Requested node-label-expression is invalid: "
+ StringUtils.stringifyException(e);
LOG.info(diagMsg);
JobId jobId = this.getJob().getID();
eventHandler.handle(new JobDiagnosticsUpdateEvent(jobId, diagMsg));
eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
throw e;
} catch (Exception e) {
// This can happen when the connection to the RM has gone down. Keep
// re-trying until the retryInterval has expired.
if (System.currentTimeMillis() - retrystartTime >= retryInterval) {
LOG.error("Could not contact RM after " + retryInterval + " milliseconds.");
eventHandler.handle(new JobEvent(this.getJob().getID(),
JobEventType.JOB_AM_REBOOT));
throw new RMContainerAllocationException("Could not contact RM after " +
retryInterval + " milliseconds.");
}
// Throw this up to the caller, which may decide to ignore it and
// continue to attempt to contact the RM.
throw e;
}
Resource newHeadRoom = getAvailableResources();
List<Container> newContainers = response.getAllocatedContainers();
// Setting NMTokens
if (response.getNMTokens() != null) {
for (NMToken nmToken : response.getNMTokens()) {
NMTokenCache.setNMToken(nmToken.getNodeId().toString(),
nmToken.getToken());
}
}
// Setting AMRMToken
if (response.getAMRMToken() != null) {
updateAMRMToken(response.getAMRMToken());
}
List<ContainerStatus> finishedContainers =
response.getCompletedContainersStatuses();
// propagate preemption requests
final PreemptionMessage preemptReq = response.getPreemptionMessage();
if (preemptReq != null) {
preemptionPolicy.preempt(
new PreemptionContext(assignedRequests), preemptReq);
}
if (newContainers.size() + finishedContainers.size() > 0
|| !headRoom.equals(newHeadRoom)) {
//something changed
recalculateReduceSchedule = true;
if (LOG.isDebugEnabled() && !headRoom.equals(newHeadRoom)) {
LOG.debug("headroom=" + newHeadRoom);
}
}
if (LOG.isDebugEnabled()) {
for (Container cont : newContainers) {
LOG.debug("Received new Container :" + cont);
}
}
//Called on each allocation. Will know about newly blacklisted/added hosts.
computeIgnoreBlacklisting();
handleUpdatedNodes(response);
handleJobPriorityChange(response);
// Handle receiving the timeline collector address and token for this app.
MRAppMaster.RunningAppContext appContext =
(MRAppMaster.RunningAppContext)this.getContext();
if (appContext.getTimelineV2Client() != null) {
appContext.getTimelineV2Client().
setTimelineCollectorInfo(response.getCollectorInfo());
}
for (ContainerStatus cont : finishedContainers) {
processFinishedContainer(cont);
}
return newContainers;
}
@SuppressWarnings("unchecked")
@VisibleForTesting
void processFinishedContainer(ContainerStatus container) {
LOG.info("Received completed container " + container.getContainerId());
TaskAttemptId attemptID = assignedRequests.get(container.getContainerId());
if (attemptID == null) {
LOG.error("Container complete event for unknown container "
+ container.getContainerId());
} else {
pendingRelease.remove(container.getContainerId());
assignedRequests.remove(attemptID);
// Send the diagnostics
String diagnostic = StringInterner.weakIntern(container.getDiagnostics());
eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptID,
diagnostic));
// send the container completed event to Task attempt
eventHandler.handle(createContainerFinishedEvent(container, attemptID));
preemptionPolicy.handleCompletedContainer(attemptID);
}
}
private void applyConcurrentTaskLimits() {
int numScheduledMaps = scheduledRequests.maps.size();
if (maxRunningMaps > 0 && numScheduledMaps > 0 &&
getJob().getTotalMaps() > maxRunningMaps) {
int maxRequestedMaps = Math.max(0,
maxRunningMaps - assignedRequests.maps.size());
int numScheduledFailMaps = scheduledRequests.earlierFailedMaps.size();
int failedMapRequestLimit = Math.min(maxRequestedMaps,
numScheduledFailMaps);
int normalMapRequestLimit = Math.min(
maxRequestedMaps - failedMapRequestLimit,
numScheduledMaps - numScheduledFailMaps);
setRequestLimit(PRIORITY_FAST_FAIL_MAP, mapResourceRequest,
failedMapRequestLimit);
setRequestLimit(PRIORITY_MAP, mapResourceRequest, normalMapRequestLimit);
setRequestLimit(PRIORITY_OPPORTUNISTIC_MAP, mapResourceRequest,
normalMapRequestLimit);
}
int numScheduledReduces = scheduledRequests.reduces.size();
if (maxRunningReduces > 0 && numScheduledReduces > 0 &&
getJob().getTotalReduces() > maxRunningReduces) {
int maxRequestedReduces = Math.max(0,
maxRunningReduces - assignedRequests.reduces.size());
int reduceRequestLimit = Math.min(maxRequestedReduces,
numScheduledReduces);
setRequestLimit(PRIORITY_REDUCE, reduceResourceRequest,
reduceRequestLimit);
}
}
private boolean canAssignMaps() {
return (maxRunningMaps <= 0
|| assignedRequests.maps.size() < maxRunningMaps);
}
private boolean canAssignReduces() {
return (maxRunningReduces <= 0
|| assignedRequests.reduces.size() < maxRunningReduces);
}
private void updateAMRMToken(Token token) throws IOException {
org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken =
new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(token
.getIdentifier().array(), token.getPassword().array(), new Text(
token.getKind()), new Text(token.getService()));
UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser();
currentUGI.addToken(amrmToken);
amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig()));
}
@VisibleForTesting
public TaskAttemptEvent createContainerFinishedEvent(ContainerStatus cont,
TaskAttemptId attemptId) {
TaskAttemptEvent event;
switch (cont.getExitStatus()) {
case ContainerExitStatus.ABORTED:
case ContainerExitStatus.PREEMPTED:
case ContainerExitStatus.KILLED_BY_CONTAINER_SCHEDULER:
// killed by YARN
event = new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL);
break;
default:
event = new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_COMPLETED);
}
return event;
}
@SuppressWarnings("unchecked")
private void handleUpdatedNodes(AllocateResponse response) {
// send event to the job about on updated nodes
List<NodeReport> updatedNodes = response.getUpdatedNodes();
if (!updatedNodes.isEmpty()) {
// send event to the job to act upon completed tasks
eventHandler.handle(new JobUpdatedNodesEvent(getJob().getID(),
updatedNodes));
// act upon running tasks
HashSet<NodeId> unusableNodes = new HashSet<NodeId>();
for (NodeReport nr : updatedNodes) {
NodeState nodeState = nr.getNodeState();
if (nodeState.isUnusable()) {
unusableNodes.add(nr.getNodeId());
}
}
for (int i = 0; i < 2; ++i) {
HashMap<TaskAttemptId, Container> taskSet = i == 0 ? assignedRequests.maps
: assignedRequests.reduces;
// kill running containers
for (Map.Entry<TaskAttemptId, Container> entry : taskSet.entrySet()) {
TaskAttemptId tid = entry.getKey();
NodeId taskAttemptNodeId = entry.getValue().getNodeId();
if (unusableNodes.contains(taskAttemptNodeId)) {
LOG.info("Killing taskAttempt:" + tid
+ " because it is running on unusable node:"
+ taskAttemptNodeId);
// If map, reschedule next task attempt.
boolean rescheduleNextAttempt = (i == 0) ? true : false;
eventHandler.handle(new TaskAttemptKillEvent(tid,
"TaskAttempt killed because it ran on unusable node"
+ taskAttemptNodeId, rescheduleNextAttempt));
}
}
}
}
}
void handleJobPriorityChange(AllocateResponse response) {
Priority applicationPriority = response.getApplicationPriority();
if (null != applicationPriority) {
Priority priorityFromResponse = Priority
.newInstance(applicationPriority.getPriority());
// Update the job priority to Job directly.
getJob().setJobPriority(priorityFromResponse);
}
}
@Private
public Resource getResourceLimit() {
Resource headRoom = getAvailableResources();
Resource assignedMapResource =
Resources.multiply(mapResourceRequest, assignedRequests.maps.size());
Resource assignedReduceResource =
Resources.multiply(reduceResourceRequest,
assignedRequests.reduces.size());
return Resources.add(headRoom,
Resources.add(assignedMapResource, assignedReduceResource));
}
@Private
@VisibleForTesting
|
RMContainerAllocator
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ConstEnumCounters.java
|
{
"start": 874,
"end": 1046
}
|
enum ____.
*
* It's the const version of EnumCounters. Any modification ends with a
* ConstEnumException.
*
* @see org.apache.hadoop.hdfs.util.EnumCounters
*/
public
|
type
|
java
|
apache__flink
|
flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/RowDataToJsonConverters.java
|
{
"start": 4047,
"end": 16525
}
|
interface ____ extends Serializable {
JsonNode convert(ObjectMapper mapper, JsonNode reuse, Object value);
}
/** Creates a runtime converter which is null safe. */
public RowDataToJsonConverter createConverter(LogicalType type) {
return wrapIntoNullableConverter(createNotNullConverter(type));
}
/** Creates a runtime converter which assuming input object is not null. */
private RowDataToJsonConverter createNotNullConverter(LogicalType type) {
switch (type.getTypeRoot()) {
case NULL:
return (mapper, reuse, value) -> mapper.getNodeFactory().nullNode();
case BOOLEAN:
return (mapper, reuse, value) ->
mapper.getNodeFactory().booleanNode((boolean) value);
case TINYINT:
return (mapper, reuse, value) -> mapper.getNodeFactory().numberNode((byte) value);
case SMALLINT:
return (mapper, reuse, value) -> mapper.getNodeFactory().numberNode((short) value);
case INTEGER:
case INTERVAL_YEAR_MONTH:
return (mapper, reuse, value) -> mapper.getNodeFactory().numberNode((int) value);
case BIGINT:
case INTERVAL_DAY_TIME:
return (mapper, reuse, value) -> mapper.getNodeFactory().numberNode((long) value);
case FLOAT:
return (mapper, reuse, value) -> mapper.getNodeFactory().numberNode((float) value);
case DOUBLE:
return (mapper, reuse, value) -> mapper.getNodeFactory().numberNode((double) value);
case CHAR:
case VARCHAR:
// value is BinaryString
return (mapper, reuse, value) -> mapper.getNodeFactory().textNode(value.toString());
case BINARY:
case VARBINARY:
return (mapper, reuse, value) -> mapper.getNodeFactory().binaryNode((byte[]) value);
case DATE:
return createDateConverter();
case TIME_WITHOUT_TIME_ZONE:
return createTimeConverter();
case TIMESTAMP_WITHOUT_TIME_ZONE:
return createTimestampConverter();
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return createTimestampWithLocalZone();
case DECIMAL:
return createDecimalConverter();
case ARRAY:
return createArrayConverter((ArrayType) type);
case MAP:
MapType mapType = (MapType) type;
return createMapConverter(
mapType.asSummaryString(), mapType.getKeyType(), mapType.getValueType());
case MULTISET:
MultisetType multisetType = (MultisetType) type;
return createMapConverter(
multisetType.asSummaryString(),
multisetType.getElementType(),
new IntType());
case ROW:
return createRowConverter((RowType) type);
case RAW:
default:
throw new UnsupportedOperationException("Not support to parse type: " + type);
}
}
private RowDataToJsonConverter createDecimalConverter() {
return (mapper, reuse, value) -> {
BigDecimal bd = ((DecimalData) value).toBigDecimal();
return mapper.getNodeFactory()
.numberNode(
mapper.isEnabled(WRITE_BIGDECIMAL_AS_PLAIN)
? bd
: bd.stripTrailingZeros());
};
}
private RowDataToJsonConverter createDateConverter() {
return (mapper, reuse, value) -> {
int days = (int) value;
LocalDate date = LocalDate.ofEpochDay(days);
return mapper.getNodeFactory().textNode(ISO_LOCAL_DATE.format(date));
};
}
private RowDataToJsonConverter createTimeConverter() {
return (mapper, reuse, value) -> {
int millisecond = (int) value;
LocalTime time = LocalTime.ofSecondOfDay(millisecond / 1000L);
return mapper.getNodeFactory().textNode(SQL_TIME_FORMAT.format(time));
};
}
private RowDataToJsonConverter createTimestampConverter() {
switch (timestampFormat) {
case ISO_8601:
return (mapper, reuse, value) -> {
TimestampData timestamp = (TimestampData) value;
return mapper.getNodeFactory()
.textNode(ISO8601_TIMESTAMP_FORMAT.format(timestamp.toLocalDateTime()));
};
case SQL:
return (mapper, reuse, value) -> {
TimestampData timestamp = (TimestampData) value;
return mapper.getNodeFactory()
.textNode(SQL_TIMESTAMP_FORMAT.format(timestamp.toLocalDateTime()));
};
default:
throw new TableException(
"Unsupported timestamp format. Validator should have checked that.");
}
}
private RowDataToJsonConverter createTimestampWithLocalZone() {
switch (timestampFormat) {
case ISO_8601:
return (mapper, reuse, value) -> {
TimestampData timestampWithLocalZone = (TimestampData) value;
return mapper.getNodeFactory()
.textNode(
ISO8601_TIMESTAMP_WITH_LOCAL_TIMEZONE_FORMAT.format(
timestampWithLocalZone
.toInstant()
.atOffset(ZoneOffset.UTC)));
};
case SQL:
return (mapper, reuse, value) -> {
TimestampData timestampWithLocalZone = (TimestampData) value;
return mapper.getNodeFactory()
.textNode(
SQL_TIMESTAMP_WITH_LOCAL_TIMEZONE_FORMAT.format(
timestampWithLocalZone
.toInstant()
.atOffset(ZoneOffset.UTC)));
};
default:
throw new TableException(
"Unsupported timestamp format. Validator should have checked that.");
}
}
private RowDataToJsonConverter createArrayConverter(ArrayType type) {
final LogicalType elementType = type.getElementType();
final RowDataToJsonConverter elementConverter = createConverter(elementType);
final ArrayData.ElementGetter elementGetter = ArrayData.createElementGetter(elementType);
return (mapper, reuse, value) -> {
ArrayNode node;
// reuse could be a NullNode if last record is null.
if (reuse == null || reuse.isNull()) {
node = mapper.createArrayNode();
} else {
node = (ArrayNode) reuse;
node.removeAll();
}
ArrayData array = (ArrayData) value;
int numElements = array.size();
for (int i = 0; i < numElements; i++) {
Object element = elementGetter.getElementOrNull(array, i);
node.add(elementConverter.convert(mapper, null, element));
}
return node;
};
}
private RowDataToJsonConverter createMapConverter(
String typeSummary, LogicalType keyType, LogicalType valueType) {
if (!keyType.is(LogicalTypeFamily.CHARACTER_STRING)) {
throw new UnsupportedOperationException(
"JSON format doesn't support non-string as key type of map. "
+ "The type is: "
+ typeSummary);
}
final RowDataToJsonConverter valueConverter = createConverter(valueType);
final ArrayData.ElementGetter valueGetter = ArrayData.createElementGetter(valueType);
return (mapper, reuse, object) -> {
ObjectNode node;
// reuse could be a NullNode if last record is null.
if (reuse == null || reuse.isNull()) {
node = mapper.createObjectNode();
} else {
node = (ObjectNode) reuse;
node.removeAll();
}
MapData map = (MapData) object;
ArrayData keyArray = map.keyArray();
ArrayData valueArray = map.valueArray();
int numElements = map.size();
for (int i = 0; i < numElements; i++) {
String fieldName = null;
if (keyArray.isNullAt(i)) {
// when map key is null
switch (mapNullKeyMode) {
case LITERAL:
fieldName = mapNullKeyLiteral;
break;
case DROP:
continue;
case FAIL:
throw new RuntimeException(
String.format(
"JSON format doesn't support to serialize map data with null keys. "
+ "You can drop null key entries or encode null in literals by specifying %s option.",
JsonFormatOptions.MAP_NULL_KEY_MODE.key()));
default:
throw new RuntimeException(
"Unsupported map null key mode. Validator should have checked that.");
}
} else {
fieldName = keyArray.getString(i).toString();
}
Object value = valueGetter.getElementOrNull(valueArray, i);
node.set(fieldName, valueConverter.convert(mapper, node.get(fieldName), value));
}
return node;
};
}
private RowDataToJsonConverter createRowConverter(RowType type) {
final String[] fieldNames = type.getFieldNames().toArray(new String[0]);
final LogicalType[] fieldTypes =
type.getFields().stream()
.map(RowType.RowField::getType)
.toArray(LogicalType[]::new);
final RowDataToJsonConverter[] fieldConverters =
Arrays.stream(fieldTypes)
.map(this::createConverter)
.toArray(RowDataToJsonConverter[]::new);
final int fieldCount = type.getFieldCount();
final RowData.FieldGetter[] fieldGetters = new RowData.FieldGetter[fieldTypes.length];
for (int i = 0; i < fieldCount; i++) {
fieldGetters[i] = RowData.createFieldGetter(fieldTypes[i], i);
}
return (mapper, reuse, value) -> {
ObjectNode node;
// reuse could be a NullNode if last record is null.
if (reuse == null || reuse.isNull()) {
node = mapper.createObjectNode();
} else {
node = (ObjectNode) reuse;
}
RowData row = (RowData) value;
for (int i = 0; i < fieldCount; i++) {
String fieldName = fieldNames[i];
try {
Object field = fieldGetters[i].getFieldOrNull(row);
if (field != null || !ignoreNullFields) {
node.set(
fieldName,
fieldConverters[i].convert(mapper, node.get(fieldName), field));
}
} catch (Throwable t) {
throw new RuntimeException(
String.format("Fail to serialize at field: %s.", fieldName), t);
}
}
return node;
};
}
private RowDataToJsonConverter wrapIntoNullableConverter(RowDataToJsonConverter converter) {
return (mapper, reuse, object) -> {
if (object == null) {
return mapper.getNodeFactory().nullNode();
}
return converter.convert(mapper, reuse, object);
};
}
}
|
RowDataToJsonConverter
|
java
|
apache__kafka
|
connect/transforms/src/main/java/org/apache/kafka/connect/transforms/HoistField.java
|
{
"start": 4544,
"end": 5130
}
|
class ____<R extends ConnectRecord<R>> extends HoistField<R> {
@Override
protected Schema operatingSchema(R record) {
return record.valueSchema();
}
@Override
protected Object operatingValue(R record) {
return record.value();
}
@Override
protected R newRecord(R record, Schema updatedSchema, Object updatedValue) {
return record.newRecord(record.topic(), record.kafkaPartition(), record.keySchema(), record.key(), updatedSchema, updatedValue, record.timestamp());
}
}
}
|
Value
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/reflection/ReflectorTest.java
|
{
"start": 3419,
"end": 5762
}
|
class ____ extends AbstractEntity implements Entity<Long> {
}
@Test
void shouldResolveSetterParam() {
ReflectorFactory reflectorFactory = new DefaultReflectorFactory();
Reflector reflector = reflectorFactory.findForClass(Child.class);
assertEquals(String.class, reflector.getSetterType("id"));
}
@Test
void shouldResolveParameterizedSetterParam() {
ReflectorFactory reflectorFactory = new DefaultReflectorFactory();
Reflector reflector = reflectorFactory.findForClass(Child.class);
assertEquals(List.class, reflector.getSetterType("list"));
}
@Test
void shouldResolveArraySetterParam() {
ReflectorFactory reflectorFactory = new DefaultReflectorFactory();
Reflector reflector = reflectorFactory.findForClass(Child.class);
Class<?> clazz = reflector.getSetterType("array");
assertTrue(clazz.isArray());
assertEquals(String.class, clazz.getComponentType());
}
@Test
void shouldResolveGetterType() {
ReflectorFactory reflectorFactory = new DefaultReflectorFactory();
Reflector reflector = reflectorFactory.findForClass(Child.class);
assertEquals(String.class, reflector.getGetterType("id"));
}
@Test
void shouldResolveSetterTypeFromPrivateField() {
ReflectorFactory reflectorFactory = new DefaultReflectorFactory();
Reflector reflector = reflectorFactory.findForClass(Child.class);
assertEquals(String.class, reflector.getSetterType("fld"));
}
@Test
void shouldResolveGetterTypeFromPublicField() {
ReflectorFactory reflectorFactory = new DefaultReflectorFactory();
Reflector reflector = reflectorFactory.findForClass(Child.class);
assertEquals(String.class, reflector.getGetterType("pubFld"));
}
@Test
void shouldResolveParameterizedGetterType() {
ReflectorFactory reflectorFactory = new DefaultReflectorFactory();
Reflector reflector = reflectorFactory.findForClass(Child.class);
assertEquals(List.class, reflector.getGetterType("list"));
}
@Test
void shouldResolveArrayGetterType() {
ReflectorFactory reflectorFactory = new DefaultReflectorFactory();
Reflector reflector = reflectorFactory.findForClass(Child.class);
Class<?> clazz = reflector.getGetterType("array");
assertTrue(clazz.isArray());
assertEquals(String.class, clazz.getComponentType());
}
abstract static
|
Section
|
java
|
apache__camel
|
dsl/camel-jbang/camel-jbang-it/src/test/java/org/apache/camel/dsl/jbang/it/RouteFromDirITCase.java
|
{
"start": 978,
"end": 1284
}
|
class ____ extends JBangTestSupport {
@Test
public void runFromDirTest() throws IOException {
copyResourceInDataFolder(TestResources.DIR_ROUTE);
executeBackground(String.format("run --source-dir=%s", mountPoint()));
checkLogContains("Hello world!");
}
}
|
RouteFromDirITCase
|
java
|
google__guice
|
core/test/com/google/inject/InjectorTest.java
|
{
"start": 1263,
"end": 1329
}
|
interface ____ {}
@Retention(RUNTIME)
@BindingAnnotation
@
|
Other
|
java
|
apache__camel
|
test-infra/camel-test-infra-kafka/src/main/java/org/apache/camel/test/infra/kafka/services/RedpandaInfraService.java
|
{
"start": 1549,
"end": 2537
}
|
class ____ implements KafkaInfraService, ContainerService<RedpandaContainer> {
private static final Logger LOG = LoggerFactory.getLogger(RedpandaInfraService.class);
private final RedpandaContainer redpandaContainer;
public RedpandaInfraService() {
this("redpanda-" + TestUtils.randomWithRange(1, 100));
}
public RedpandaInfraService(String redpandaInstanceName) {
Network network = Network.newNetwork();
redpandaContainer = initRedpandaContainer(network, redpandaInstanceName);
String name = ContainerEnvironmentUtil.containerName(this.getClass());
if (name != null) {
redpandaContainer.withCreateContainerCmdModifier(cmd -> cmd.withName(name));
}
}
public RedpandaInfraService(RedpandaContainer redpandaContainer) {
this.redpandaContainer = redpandaContainer;
}
protected RedpandaContainer initRedpandaContainer(Network network, String instanceName) {
|
RedpandaInfraService
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/sql/ast/tree/select/QueryGroup.java
|
{
"start": 365,
"end": 1614
}
|
class ____ extends QueryPart {
private final SetOperator setOperator;
private final List<QueryPart> queryParts;
public QueryGroup(boolean isRoot, SetOperator setOperator, List<QueryPart> queryParts) {
super( isRoot );
this.setOperator = setOperator;
this.queryParts = queryParts;
}
@Override
public QuerySpec getFirstQuerySpec() {
return queryParts.get( 0 ).getFirstQuerySpec();
}
@Override
public QuerySpec getLastQuerySpec() {
return queryParts.get( queryParts.size() - 1 ).getLastQuerySpec();
}
@Override
public void visitQuerySpecs(Consumer<QuerySpec> querySpecConsumer) {
for ( int i = 0; i < queryParts.size(); i++ ) {
queryParts.get( i ).visitQuerySpecs( querySpecConsumer );
}
}
@Override
public <T> T queryQuerySpecs(Function<QuerySpec, T> querySpecConsumer) {
for ( int i = 0; i < queryParts.size(); i++ ) {
T result = queryParts.get( i ).queryQuerySpecs( querySpecConsumer );
if ( result != null ) {
return result;
}
}
return null;
}
public SetOperator getSetOperator() {
return setOperator;
}
public List<QueryPart> getQueryParts() {
return queryParts;
}
@Override
public void accept(SqlAstWalker sqlTreeWalker) {
sqlTreeWalker.visitQueryGroup( this );
}
}
|
QueryGroup
|
java
|
apache__camel
|
core/camel-support/src/main/java/org/apache/camel/support/component/PropertiesInterceptor.java
|
{
"start": 943,
"end": 1264
}
|
interface ____ {
/**
* Intercept method invocation arguments used to find and invoke API method. Can be overridden to add custom/hidden
* method arguments.
*
* @param properties method invocation arguments.
*/
void interceptProperties(Map<String, Object> properties);
}
|
PropertiesInterceptor
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java
|
{
"start": 1309,
"end": 3602
}
|
class ____ extends ESIntegTestCase {
public void testPendingTasksWithIndexBlocks() {
createIndex("test");
ensureGreen("test");
// This test checks that the Pending Cluster Tasks operation is never blocked, even if an index is read only or whatever.
for (String blockSetting : Arrays.asList(
SETTING_BLOCKS_READ,
SETTING_BLOCKS_WRITE,
SETTING_READ_ONLY,
SETTING_BLOCKS_METADATA,
SETTING_READ_ONLY_ALLOW_DELETE
)) {
try {
enableIndexBlock("test", blockSetting);
PendingClusterTasksResponse response = getClusterPendingTasks();
assertNotNull(response.pendingTasks());
} finally {
disableIndexBlock("test", blockSetting);
}
}
}
public void testPendingTasksWithClusterReadOnlyBlock() {
if (randomBoolean()) {
createIndex("test");
ensureGreen("test");
}
try {
setClusterReadOnly(true);
PendingClusterTasksResponse response = getClusterPendingTasks();
assertNotNull(response.pendingTasks());
} finally {
setClusterReadOnly(false);
}
}
public void testPendingTasksWithClusterNotRecoveredBlock() throws Exception {
if (randomBoolean()) {
createIndex("test");
ensureGreen("test");
}
// restart the cluster but prevent it from performing state recovery
final int nodeCount = clusterAdmin().prepareNodesInfo("data:true").get().getNodes().size();
internalCluster().fullRestart(new InternalTestCluster.RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) {
return Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), nodeCount + 1).build();
}
@Override
public boolean validateClusterForming() {
return false;
}
});
assertNotNull(getClusterPendingTasks().pendingTasks());
// starting one more node allows the cluster to recover
internalCluster().startNode();
ensureGreen();
}
}
|
PendingTasksBlocksIT
|
java
|
micronaut-projects__micronaut-core
|
inject-java/src/test/groovy/io/micronaut/inject/lifecycle/proxytargetbeanprototypewithpredestroy/Root.java
|
{
"start": 733,
"end": 886
}
|
class ____ {
public final B b;
public Root(B b) {
this.b = b;
}
void triggerProxyInitializeForB() {
b.getA();
}
}
|
Root
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/processor/ExtensionTest.java
|
{
"start": 48505,
"end": 50485
}
|
class ____ {",
" abstract Builder setString(String x);",
" abstract Baz oddBuild();",
" Baz build(int butNotReallyBecauseOfThisParameter) {",
" return null;",
" }",
" }",
"}");
ContextChecker checker =
context -> {
assertThat(context.builder()).isPresent();
BuilderContext builderContext = context.builder().get();
assertThat(builderContext.builderMethods()).isEmpty();
assertThat(builderContext.toBuilderMethods()).isEmpty();
assertThat(builderContext.buildMethod()).isEmpty();
assertThat(builderContext.autoBuildMethod().getSimpleName().toString())
.isEqualTo("oddBuild");
Map<String, Set<ExecutableElement>> setters = builderContext.setters();
assertThat(setters.keySet()).containsExactly("string");
Set<ExecutableElement> thingSetters = setters.get("string");
assertThat(thingSetters).hasSize(1);
ExecutableElement thingSetter = Iterables.getOnlyElement(thingSetters);
assertThat(thingSetter.getSimpleName().toString()).isEqualTo("setString");
assertThat(builderContext.propertyBuilders()).isEmpty();
};
ContextCheckingExtension extension = new ContextCheckingExtension(checker);
Compilation compilation =
javac()
.withProcessors(new AutoValueProcessor(ImmutableList.of(extension)))
.compile(autoValueClass);
assertThat(compilation).succeededWithoutWarnings();
}
// https://github.com/google/auto/issues/809
@Test
public void propertyErrorShouldNotCrash() {
JavaFileObject autoValueClass =
JavaFileObjects.forSourceLines(
"test.Test",
"package test;",
"import com.google.auto.value.AutoValue;",
"import java.util.List;",
"",
"@AutoValue",
"public abstract
|
Builder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/PostPhysicalOptimizationVerificationAware.java
|
{
"start": 467,
"end": 730
}
|
interface ____ {
/**
* Validates the implementing expression - discovered failures are reported to the given
* {@link Failures} class.
*/
void postPhysicalOptimizationVerification(Failures failures);
}
|
PostPhysicalOptimizationVerificationAware
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/issues/ChangeHeaderCaseIssueTest.java
|
{
"start": 1262,
"end": 2622
}
|
class ____ extends ContextTestSupport {
@Test
public void testChangeHeaderCaseIssue() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
mock.expectedHeaderReceived("SoapAction", "cool");
template.sendBodyAndHeader("direct:start", "Hello World", "SOAPAction", "cool");
assertMockEndpointsSatisfied();
// only the changed case header should exist
Map<String, Object> headers = new HashMap<>(mock.getReceivedExchanges().get(0).getIn().getHeaders());
assertEquals("cool", headers.get("SoapAction"));
assertNull(headers.get("SOAPAction"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
interceptSendToEndpoint("mock:result").process(new Processor() {
public void process(Exchange exchange) {
// change the case of the header
Object value = exchange.getIn().removeHeader("SOAPAction");
exchange.getIn().setHeader("SoapAction", value);
}
});
from("direct:start").to("mock:result");
}
};
}
}
|
ChangeHeaderCaseIssueTest
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ComponentValidationTest.java
|
{
"start": 13102,
"end": 13422
}
|
interface ____ {",
"}");
Source mediumLifetime =
CompilerTests.javaSource(
"test.ComponentMedium",
"package test;",
"",
"import dagger.Component;",
"",
"@Component(dependencies = ComponentLong.class)",
"
|
ComponentLong
|
java
|
apache__flink
|
flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java
|
{
"start": 37755,
"end": 40064
}
|
class ____ extends AbstractHandler {
private final List<Bucket> buckets = new ArrayList<Bucket>();
private Owner bucketsOwner = null;
private Bucket currentBucket = null;
/**
* @return the buckets listed in the document.
*/
public List<Bucket> getBuckets() {
return buckets;
}
/**
* @return the owner of the buckets.
*/
public Owner getOwner() {
return bucketsOwner;
}
@Override
protected void doStartElement(String uri, String name, String qName, Attributes attrs) {
if (in("ListAllMyBucketsResult")) {
if (name.equals("Owner")) {
bucketsOwner = new Owner();
}
} else if (in("ListAllMyBucketsResult", "Buckets")) {
if (name.equals("Bucket")) {
currentBucket = new Bucket();
currentBucket.setOwner(bucketsOwner);
}
}
}
@Override
protected void doEndElement(String uri, String name, String qName) {
if (in("ListAllMyBucketsResult", "Owner")) {
if (name.equals("ID")) {
bucketsOwner.setId(getText());
} else if (name.equals("DisplayName")) {
bucketsOwner.setDisplayName(getText());
}
} else if (in("ListAllMyBucketsResult", "Buckets")) {
if (name.equals("Bucket")) {
buckets.add(currentBucket);
currentBucket = null;
}
} else if (in("ListAllMyBucketsResult", "Buckets", "Bucket")) {
if (name.equals("Name")) {
currentBucket.setName(getText());
} else if (name.equals("CreationDate")) {
Date creationDate = DateUtils.parseISO8601Date(getText());
currentBucket.setCreationDate(creationDate);
}
}
}
}
/**
* Handler for AccessControlList response XML documents. The document is parsed into an {@link
* AccessControlList} object available via the {@link #getAccessControlList()} method.
*/
public static
|
ListAllMyBucketsHandler
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java
|
{
"start": 1179,
"end": 2031
}
|
class ____ {
public static final Logger LOG =
LoggerFactory.getLogger(HttpRequestLog.class);
private static final Map<String, String> serverToComponent;
static {
Map<String, String > map = new HashMap<String, String>();
map.put("cluster", "resourcemanager");
map.put("hdfs", "namenode");
map.put("node", "nodemanager");
serverToComponent = Collections.unmodifiableMap(map);
}
public static RequestLog getRequestLog(String name) {
String lookup = serverToComponent.get(name);
if (lookup != null) {
name = lookup;
}
String loggerName = "http.requests." + name;
Slf4jRequestLogWriter writer = new Slf4jRequestLogWriter();
writer.setLoggerName(loggerName);
return new CustomRequestLog(writer, CustomRequestLog.EXTENDED_NCSA_FORMAT);
}
private HttpRequestLog() {
}
}
|
HttpRequestLog
|
java
|
apache__rocketmq
|
client/src/test/java/org/apache/rocketmq/client/impl/consumer/DefaultLitePullConsumerImplTest.java
|
{
"start": 1198,
"end": 3845
}
|
class ____ {
private final DefaultLitePullConsumerImpl consumer = new DefaultLitePullConsumerImpl(new DefaultLitePullConsumer(), null);
private static Method isSetEqualMethod;
@BeforeClass
public static void initReflectionMethod() throws NoSuchMethodException {
Class<DefaultLitePullConsumerImpl> consumerClass = DefaultLitePullConsumerImpl.class;
Method testMethod = consumerClass.getDeclaredMethod("isSetEqual", Set.class, Set.class);
testMethod.setAccessible(true);
isSetEqualMethod = testMethod;
}
/**
* The two empty sets should be equal
*/
@Test
public void testIsSetEqual1() throws InvocationTargetException, IllegalAccessException {
Set<MessageQueue> set1 = new HashSet<>();
Set<MessageQueue> set2 = new HashSet<>();
boolean equalResult = (boolean) isSetEqualMethod.invoke(consumer, set1, set2);
Assert.assertTrue(equalResult);
}
/**
* When a set has elements and one does not, the two sets are not equal
*/
@Test
public void testIsSetEqual2() throws InvocationTargetException, IllegalAccessException {
Set<MessageQueue> set1 = new HashSet<>();
set1.add(new MessageQueue("testTopic","testBroker",111));
Set<MessageQueue> set2 = new HashSet<>();
boolean equalResult = (boolean) isSetEqualMethod.invoke(consumer, set1, set2);
Assert.assertFalse(equalResult);
}
/**
* The two null sets should be equal
*/
@Test
public void testIsSetEqual3() throws InvocationTargetException, IllegalAccessException {
Set<MessageQueue> set1 = null;
Set<MessageQueue> set2 = null;
boolean equalResult = (boolean) isSetEqualMethod.invoke(consumer, set1, set2);
Assert.assertTrue(equalResult);
}
@Test
public void testIsSetEqual4() throws InvocationTargetException, IllegalAccessException {
Set<MessageQueue> set1 = null;
Set<MessageQueue> set2 = new HashSet<>();
boolean equalResult = (boolean) isSetEqualMethod.invoke(consumer, set1, set2);
Assert.assertFalse(equalResult);
}
@Test
public void testIsSetEqual5() throws InvocationTargetException, IllegalAccessException {
Set<MessageQueue> set1 = new HashSet<>();
set1.add(new MessageQueue("testTopic","testBroker",111));
Set<MessageQueue> set2 = new HashSet<>();
set2.add(new MessageQueue("testTopic","testBroker",111));
boolean equalResult = (boolean) isSetEqualMethod.invoke(consumer, set1, set2);
Assert.assertTrue(equalResult);
}
}
|
DefaultLitePullConsumerImplTest
|
java
|
quarkusio__quarkus
|
extensions/micrometer/deployment/src/test/java/io/quarkus/micrometer/deployment/export/JsonRegistryEnabledTest.java
|
{
"start": 515,
"end": 2243
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withConfigurationResource("test-logging.properties")
.overrideConfigKey("quarkus.http.root-path", "/app")
.overrideConfigKey("quarkus.http.non-application-root-path", "relative")
.overrideConfigKey("quarkus.micrometer.binder-enabled-default", "false")
.overrideConfigKey("quarkus.micrometer.export.json.enabled", "true")
.overrideConfigKey("quarkus.micrometer.registry-enabled-default", "false")
.overrideConfigKey("quarkus.redis.devservices.enabled", "false")
.withEmptyApplication();
@Inject
MeterRegistry registry;
@Inject
JsonMeterRegistry jsonMeterRegistry;
@Test
public void testMeterRegistryPresent() {
// Prometheus is enabled (only registry)
Assertions.assertNotNull(registry, "A registry should be configured");
Set<MeterRegistry> subRegistries = ((CompositeMeterRegistry) registry).getRegistries();
JsonMeterRegistry subPromRegistry = (JsonMeterRegistry) subRegistries.iterator().next();
Assertions.assertEquals(JsonMeterRegistry.class, subPromRegistry.getClass(), "Should be JsonMeterRegistry");
Assertions.assertEquals(subPromRegistry, jsonMeterRegistry,
"The only MeterRegistry should be the same bean as the JsonMeterRegistry");
}
@Test
public void metricsEndpoint() {
// RestAssured prepends /app for us
RestAssured.given()
.accept("application/json")
.get("/relative/metrics")
.then()
.statusCode(200);
}
}
|
JsonRegistryEnabledTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractConstantMillisEvaluator.java
|
{
"start": 4307,
"end": 5196
}
|
class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory value;
private final ChronoField chronoField;
private final ZoneId zone;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory value,
ChronoField chronoField, ZoneId zone) {
this.source = source;
this.value = value;
this.chronoField = chronoField;
this.zone = zone;
}
@Override
public DateExtractConstantMillisEvaluator get(DriverContext context) {
return new DateExtractConstantMillisEvaluator(source, value.get(context), chronoField, zone, context);
}
@Override
public String toString() {
return "DateExtractConstantMillisEvaluator[" + "value=" + value + ", chronoField=" + chronoField + ", zone=" + zone + "]";
}
}
}
|
Factory
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/runtime/src/main/java/io/quarkus/rest/client/reactive/runtime/spi/JsonMissingMessageBodyReaderErrorMessageContextualizer.java
|
{
"start": 198,
"end": 667
}
|
class ____ implements
MissingMessageBodyReaderErrorMessageContextualizer {
@Override
public String provideContextMessage(Input input) {
if ((input.mediaType() != null) && input.mediaType().isCompatible(MediaType.APPLICATION_JSON_TYPE)) {
return "Consider adding one the 'quarkus-rest-client-jackson' or 'quarkus-rest-client-jsonb' extensions";
}
return null;
}
}
|
JsonMissingMessageBodyReaderErrorMessageContextualizer
|
java
|
apache__camel
|
components/camel-sjms/src/generated/java/org/apache/camel/component/sjms/SjmsEndpointUriFactory.java
|
{
"start": 514,
"end": 4027
}
|
class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":destinationType:destinationName";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(47);
props.add("acknowledgementMode");
props.add("allowNullBody");
props.add("asyncConsumer");
props.add("asyncStartListener");
props.add("asyncStopListener");
props.add("autoStartup");
props.add("bridgeErrorHandler");
props.add("clientId");
props.add("concurrentConsumers");
props.add("connectionFactory");
props.add("deliveryMode");
props.add("deliveryPersistent");
props.add("destinationCreationStrategy");
props.add("destinationName");
props.add("destinationType");
props.add("disableReplyTo");
props.add("disableTimeToLive");
props.add("durableSubscriptionName");
props.add("eagerLoadingOfProperties");
props.add("eagerPoisonBody");
props.add("exceptionHandler");
props.add("exceptionListener");
props.add("exchangePattern");
props.add("explicitQosEnabled");
props.add("headerFilterStrategy");
props.add("includeAllJMSXProperties");
props.add("jmsKeyFormatStrategy");
props.add("jmsMessageType");
props.add("lazyStartProducer");
props.add("mapJmsMessage");
props.add("messageCreatedStrategy");
props.add("messageSelector");
props.add("preserveMessageQos");
props.add("priority");
props.add("recoveryInterval");
props.add("replyTo");
props.add("replyToConcurrentConsumers");
props.add("replyToDeliveryPersistent");
props.add("replyToOverride");
props.add("replyToSameDestinationAllowed");
props.add("replyToType");
props.add("requestTimeout");
props.add("synchronous");
props.add("testConnectionOnStartup");
props.add("timeToLive");
props.add("transacted");
props.add("transferException");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
SECRET_PROPERTY_NAMES = Collections.emptySet();
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "sjms".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "destinationType", "queue", false, copy);
uri = buildPathParameter(syntax, uri, "destinationName", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
|
SjmsEndpointUriFactory
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_1685/ContactDataDTO.java
|
{
"start": 222,
"end": 1158
}
|
class ____ {
private String email;
private String phone;
private String address;
private List<String> preferences;
private String[] settings;
public String getEmail() {
return email;
}
public void setEmail(String email) {
this.email = email;
}
public String getPhone() {
return phone;
}
public void setPhone(String phone) {
this.phone = phone;
}
public String getAddress() {
return address;
}
public void setAddress(String address) {
this.address = address;
}
public List<String> getPreferences() {
return preferences;
}
public void setPreferences(List<String> preferences) {
this.preferences = preferences;
}
public String[] getSettings() {
return settings;
}
public void setSettings(String[] settings) {
this.settings = settings;
}
}
|
ContactDataDTO
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/util/InternalException.java
|
{
"start": 1034,
"end": 1896
}
|
class ____ extends RuntimeException {
private static final long serialVersionUID = 6366395965071580537L;
/**
* Construct an exception with a message.
*
* @param message The reason for the exception
*/
public InternalException(final String message) {
super(message);
}
/**
* Construct an exception with a message and underlying cause.
*
* @param message The reason for the exception
* @param cause The underlying cause of the exception
*/
public InternalException(final String message, final Throwable cause) {
super(message, cause);
}
/**
* Construct an exception with an underlying cause.
*
* @param cause The underlying cause of the exception
*/
public InternalException(final Throwable cause) {
super(cause);
}
}
|
InternalException
|
java
|
apache__maven
|
compat/maven-plugin-api/src/main/java/org/apache/maven/plugin/MojoFailureException.java
|
{
"start": 1033,
"end": 2603
}
|
class ____ extends AbstractMojoExecutionException {
/**
* Construct a new <code>MojoFailureException</code> exception providing the source and a short and long message:
* these messages are used to improve the message written at the end of Maven build.
*
* @param source
* @param shortMessage
* @param longMessage
*/
public MojoFailureException(Object source, String shortMessage, String longMessage) {
super(shortMessage);
this.source = source;
this.longMessage = longMessage;
}
/**
* Construct a new <code>MojoFailureException</code> exception providing a message.
*
* @param message
*/
public MojoFailureException(String message) {
super(message);
}
/**
* Construct a new <code>MojoFailureException</code> exception wrapping an underlying <code>Throwable</code>
* and providing a <code>message</code>.
*
* @param message
* @param cause
* @since 2.0.9
*/
public MojoFailureException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructs a new {@code MojoFailureException} exception wrapping an underlying {@code Throwable}.
*
* @param cause the cause which is saved for later retrieval by the {@link #getCause()} method.
* A {@code null} value is permitted, and indicates that the cause is nonexistent or unknown.
* @since 3.8.3
*/
public MojoFailureException(Throwable cause) {
super(cause);
}
}
|
MojoFailureException
|
java
|
apache__rocketmq
|
remoting/src/main/java/org/apache/rocketmq/remoting/protocol/filter/FilterAPI.java
|
{
"start": 1066,
"end": 3061
}
|
class ____ {
public static SubscriptionData buildSubscriptionData(String topic, String subString) throws Exception {
final SubscriptionData subscriptionData = new SubscriptionData();
subscriptionData.setTopic(topic);
subscriptionData.setSubString(subString);
if (StringUtils.isEmpty(subString) || subString.equals(SubscriptionData.SUB_ALL)) {
subscriptionData.setSubString(SubscriptionData.SUB_ALL);
return subscriptionData;
}
String[] tags = subString.split("\\|\\|");
if (tags.length > 0) {
Arrays.stream(tags).map(String::trim).filter(tag -> !tag.isEmpty()).forEach(tag -> {
subscriptionData.getTagsSet().add(tag);
subscriptionData.getCodeSet().add(tag.hashCode());
});
} else {
throw new Exception("subString split error");
}
return subscriptionData;
}
public static SubscriptionData buildSubscriptionData(String topic, String subString, String expressionType) throws Exception {
final SubscriptionData subscriptionData = buildSubscriptionData(topic, subString);
if (StringUtils.isNotBlank(expressionType)) {
subscriptionData.setExpressionType(expressionType);
}
return subscriptionData;
}
public static SubscriptionData build(final String topic, final String subString,
final String type) throws Exception {
if (ExpressionType.TAG.equals(type) || type == null) {
return buildSubscriptionData(topic, subString);
}
if (StringUtils.isEmpty(subString)) {
throw new IllegalArgumentException("Expression can't be null! " + type);
}
SubscriptionData subscriptionData = new SubscriptionData();
subscriptionData.setTopic(topic);
subscriptionData.setSubString(subString);
subscriptionData.setExpressionType(type);
return subscriptionData;
}
}
|
FilterAPI
|
java
|
apache__rocketmq
|
proxy/src/test/java/org/apache/rocketmq/proxy/grpc/v2/client/ClientActivityTest.java
|
{
"start": 3527,
"end": 19084
}
|
class ____ extends BaseActivityTest {
private static final String TOPIC = "topic";
private static final String CONSUMER_GROUP = "consumerGroup";
private ClientActivity clientActivity;
@Mock
private GrpcChannelManager grpcChannelManagerMock;
@Mock
private CompletableFuture<ProxyRelayResult<ConsumerRunningInfo>> runningInfoFutureMock;
@Captor
ArgumentCaptor<ProxyRelayResult<ConsumerRunningInfo>> runningInfoArgumentCaptor;
@Mock
private CompletableFuture<ProxyRelayResult<ConsumeMessageDirectlyResult>> resultFutureMock;
@Captor
ArgumentCaptor<ProxyRelayResult<ConsumeMessageDirectlyResult>> resultArgumentCaptor;
@Before
public void before() throws Throwable {
super.before();
this.clientActivity = new ClientActivity(this.messagingProcessor, this.grpcClientSettingsManager, grpcChannelManager);
}
protected TelemetryCommand sendProducerTelemetry(ProxyContext context) throws Throwable {
return this.sendClientTelemetry(
context,
Settings.newBuilder()
.setClientType(ClientType.PRODUCER)
.setPublishing(Publishing.newBuilder()
.addTopics(Resource.newBuilder().setName(TOPIC).build())
.build())
.build()).get();
}
protected HeartbeatResponse sendProducerHeartbeat(ProxyContext context) throws Throwable {
return this.clientActivity.heartbeat(context, HeartbeatRequest.newBuilder()
.setClientType(ClientType.PRODUCER)
.build()).get();
}
@Test
public void testProducerHeartbeat() throws Throwable {
ProxyContext context = createContext();
this.sendProducerTelemetry(context);
ArgumentCaptor<String> registerProducerGroupArgumentCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<ClientChannelInfo> channelInfoArgumentCaptor = ArgumentCaptor.forClass(ClientChannelInfo.class);
doNothing().when(this.messagingProcessor).registerProducer(any(),
registerProducerGroupArgumentCaptor.capture(),
channelInfoArgumentCaptor.capture());
ArgumentCaptor<String> txProducerGroupArgumentCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> txProducerTopicArgumentCaptor = ArgumentCaptor.forClass(String.class);
doNothing().when(this.messagingProcessor).addTransactionSubscription(any(),
txProducerGroupArgumentCaptor.capture(),
txProducerTopicArgumentCaptor.capture()
);
when(this.metadataService.getTopicMessageType(any(), anyString())).thenReturn(TopicMessageType.TRANSACTION);
HeartbeatResponse response = this.sendProducerHeartbeat(context);
assertEquals(Code.OK, response.getStatus().getCode());
assertEquals(Lists.newArrayList(TOPIC), registerProducerGroupArgumentCaptor.getAllValues());
ClientChannelInfo clientChannelInfo = channelInfoArgumentCaptor.getValue();
assertClientChannelInfo(clientChannelInfo, TOPIC);
assertEquals(Lists.newArrayList(TOPIC), txProducerGroupArgumentCaptor.getAllValues());
assertEquals(Lists.newArrayList(TOPIC), txProducerTopicArgumentCaptor.getAllValues());
}
protected TelemetryCommand sendConsumerTelemetry(ProxyContext context) throws Throwable {
return this.sendClientTelemetry(
context,
Settings.newBuilder()
.setClientType(ClientType.PUSH_CONSUMER)
.setSubscription(Subscription.newBuilder()
.setGroup(Resource.newBuilder().setName("Group").build())
.addSubscriptions(SubscriptionEntry.newBuilder()
.setExpression(FilterExpression.newBuilder()
.setExpression("tag")
.setType(FilterType.TAG)
.build())
.setTopic(Resource.newBuilder().setName(TOPIC).build())
.build())
.build())
.build()).get();
}
protected HeartbeatResponse sendConsumerHeartbeat(ProxyContext context) throws Throwable {
return this.clientActivity.heartbeat(context, HeartbeatRequest.newBuilder()
.setClientType(ClientType.PUSH_CONSUMER)
.setGroup(Resource.newBuilder().setName(CONSUMER_GROUP).build())
.build()).get();
}
@Test
public void testConsumerHeartbeat() throws Throwable {
ProxyContext context = createContext();
this.sendConsumerTelemetry(context);
ArgumentCaptor<Set<SubscriptionData>> subscriptionDatasArgumentCaptor = ArgumentCaptor.forClass(Set.class);
ArgumentCaptor<ClientChannelInfo> channelInfoArgumentCaptor = ArgumentCaptor.forClass(ClientChannelInfo.class);
doNothing().when(this.messagingProcessor).registerConsumer(any(),
anyString(),
channelInfoArgumentCaptor.capture(),
any(),
any(),
any(),
subscriptionDatasArgumentCaptor.capture(),
anyBoolean()
);
HeartbeatResponse response = this.sendConsumerHeartbeat(context);
assertEquals(Code.OK, response.getStatus().getCode());
ClientChannelInfo clientChannelInfo = channelInfoArgumentCaptor.getValue();
assertClientChannelInfo(clientChannelInfo, CONSUMER_GROUP);
SubscriptionData data = subscriptionDatasArgumentCaptor.getValue().stream().findAny().get();
assertEquals("TAG", data.getExpressionType());
assertEquals("tag", data.getSubString());
}
protected void assertClientChannelInfo(ClientChannelInfo clientChannelInfo, String group) {
assertEquals(LanguageCode.JAVA, clientChannelInfo.getLanguage());
assertEquals(CLIENT_ID, clientChannelInfo.getClientId());
assertTrue(clientChannelInfo.getChannel() instanceof GrpcClientChannel);
GrpcClientChannel channel = (GrpcClientChannel) clientChannelInfo.getChannel();
assertEquals(REMOTE_ADDR, channel.getRemoteAddress());
assertEquals(LOCAL_ADDR, channel.getLocalAddress());
}
@Test
public void testProducerNotifyClientTermination() throws Throwable {
ProxyContext context = createContext();
when(this.grpcClientSettingsManager.removeAndGetClientSettings(any())).thenReturn(Settings.newBuilder()
.setClientType(ClientType.PRODUCER)
.setPublishing(Publishing.newBuilder()
.addTopics(Resource.newBuilder().setName(TOPIC).build())
.build())
.build());
ArgumentCaptor<ClientChannelInfo> channelInfoArgumentCaptor = ArgumentCaptor.forClass(ClientChannelInfo.class);
doNothing().when(this.messagingProcessor).unRegisterProducer(any(), anyString(), channelInfoArgumentCaptor.capture());
when(this.metadataService.getTopicMessageType(any(), anyString())).thenReturn(TopicMessageType.NORMAL);
this.sendProducerTelemetry(context);
this.sendProducerHeartbeat(context);
NotifyClientTerminationResponse response = this.clientActivity.notifyClientTermination(
context,
NotifyClientTerminationRequest.newBuilder()
.build()
).get();
assertEquals(Code.OK, response.getStatus().getCode());
ClientChannelInfo clientChannelInfo = channelInfoArgumentCaptor.getValue();
assertClientChannelInfo(clientChannelInfo, TOPIC);
}
@Test
public void testConsumerNotifyClientTermination() throws Throwable {
ProxyContext context = createContext();
when(this.grpcClientSettingsManager.removeAndGetClientSettings(any())).thenReturn(Settings.newBuilder()
.setClientType(ClientType.PUSH_CONSUMER)
.build());
ArgumentCaptor<ClientChannelInfo> channelInfoArgumentCaptor = ArgumentCaptor.forClass(ClientChannelInfo.class);
doNothing().when(this.messagingProcessor).unRegisterConsumer(any(), anyString(), channelInfoArgumentCaptor.capture());
this.sendConsumerTelemetry(context);
this.sendConsumerHeartbeat(context);
NotifyClientTerminationResponse response = this.clientActivity.notifyClientTermination(
context,
NotifyClientTerminationRequest.newBuilder()
.setGroup(Resource.newBuilder().setName(CONSUMER_GROUP).build())
.build()
).get();
assertEquals(Code.OK, response.getStatus().getCode());
ClientChannelInfo clientChannelInfo = channelInfoArgumentCaptor.getValue();
assertClientChannelInfo(clientChannelInfo, CONSUMER_GROUP);
}
@Test
public void testErrorConsumerGroupName() throws Throwable {
ProxyContext context = createContext();
try {
this.sendClientTelemetry(
context,
Settings.newBuilder()
.setClientType(ClientType.PUSH_CONSUMER)
.setSubscription(Subscription.newBuilder()
.addSubscriptions(SubscriptionEntry.newBuilder()
.setExpression(FilterExpression.newBuilder()
.setExpression("tag")
.setType(FilterType.TAG)
.build())
.setTopic(Resource.newBuilder().setName(TOPIC).build())
.build())
.build())
.build()).get();
fail();
} catch (ExecutionException e) {
StatusRuntimeException exception = (StatusRuntimeException) e.getCause();
assertEquals(Status.Code.INVALID_ARGUMENT, exception.getStatus().getCode());
}
}
@Test
public void testErrorProducerConfig() throws Throwable {
ProxyContext context = createContext();
try {
this.sendClientTelemetry(
context,
Settings.newBuilder()
.setClientType(ClientType.PRODUCER)
.setPublishing(Publishing.newBuilder()
.addTopics(Resource.newBuilder().setName("()").build())
.build())
.build()).get();
fail();
} catch (ExecutionException e) {
StatusRuntimeException exception = (StatusRuntimeException) e.getCause();
assertEquals(Status.Code.INVALID_ARGUMENT, exception.getStatus().getCode());
}
}
@Test
public void testEmptySettings() throws Throwable {
ProxyContext context = createContext();
try {
this.sendClientTelemetry(
context,
Settings.getDefaultInstance()).get();
fail();
} catch (ExecutionException e) {
StatusRuntimeException exception = (StatusRuntimeException) e.getCause();
assertEquals(Status.Code.INVALID_ARGUMENT, exception.getStatus().getCode());
}
}
@Test
public void testEmptyProducerSettings() throws Throwable {
ProxyContext context = createContext();
TelemetryCommand command = this.sendClientTelemetry(
context,
Settings.newBuilder()
.setClientType(ClientType.PRODUCER)
.setPublishing(Publishing.getDefaultInstance())
.build()).get();
assertTrue(command.hasSettings());
assertTrue(command.getSettings().hasPublishing());
}
@Test
public void testReportThreadStackTrace() {
this.clientActivity = new ClientActivity(this.messagingProcessor, this.grpcClientSettingsManager, grpcChannelManagerMock);
String jstack = "jstack";
String nonce = "123";
when(grpcChannelManagerMock.getAndRemoveResponseFuture(anyString())).thenReturn((CompletableFuture) runningInfoFutureMock);
ProxyContext context = createContext();
ContextStreamObserver<TelemetryCommand> streamObserver = clientActivity.telemetry(new StreamObserver<TelemetryCommand>() {
@Override
public void onNext(TelemetryCommand value) {
}
@Override
public void onError(Throwable t) {
}
@Override
public void onCompleted() {
}
});
streamObserver.onNext(context, TelemetryCommand.newBuilder()
.setThreadStackTrace(ThreadStackTrace.newBuilder()
.setThreadStackTrace(jstack)
.setNonce(nonce)
.build())
.setStatus(ResponseBuilder.getInstance().buildStatus(Code.OK, Code.OK.name()))
.build());
verify(runningInfoFutureMock, times(1)).complete(runningInfoArgumentCaptor.capture());
ProxyRelayResult<ConsumerRunningInfo> result = runningInfoArgumentCaptor.getValue();
assertThat(result.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(result.getResult().getJstack()).isEqualTo(jstack);
}
@Test
public void testReportVerifyMessageResult() {
this.clientActivity = new ClientActivity(this.messagingProcessor, this.grpcClientSettingsManager, grpcChannelManagerMock);
String nonce = "123";
when(grpcChannelManagerMock.getAndRemoveResponseFuture(anyString())).thenReturn((CompletableFuture) resultFutureMock);
ProxyContext context = createContext();
ContextStreamObserver<TelemetryCommand> streamObserver = clientActivity.telemetry(new StreamObserver<TelemetryCommand>() {
@Override
public void onNext(TelemetryCommand value) {
}
@Override
public void onError(Throwable t) {
}
@Override
public void onCompleted() {
}
});
streamObserver.onNext(context, TelemetryCommand.newBuilder()
.setVerifyMessageResult(VerifyMessageResult.newBuilder()
.setNonce(nonce)
.build())
.setStatus(ResponseBuilder.getInstance().buildStatus(Code.OK, Code.OK.name()))
.build());
verify(resultFutureMock, times(1)).complete(resultArgumentCaptor.capture());
ProxyRelayResult<ConsumeMessageDirectlyResult> result = resultArgumentCaptor.getValue();
assertThat(result.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(result.getResult().getConsumeResult()).isEqualTo(CMResult.CR_SUCCESS);
}
protected CompletableFuture<TelemetryCommand> sendClientTelemetry(ProxyContext ctx, Settings settings) {
when(grpcClientSettingsManager.getClientSettings(any())).thenReturn(settings);
CompletableFuture<TelemetryCommand> future = new CompletableFuture<>();
StreamObserver<TelemetryCommand> responseObserver = new StreamObserver<TelemetryCommand>() {
@Override
public void onNext(TelemetryCommand value) {
future.complete(value);
}
@Override
public void onError(Throwable t) {
future.completeExceptionally(t);
}
@Override
public void onCompleted() {
}
};
ContextStreamObserver<TelemetryCommand> requestObserver = this.clientActivity.telemetry(responseObserver);
requestObserver.onNext(ctx, TelemetryCommand.newBuilder()
.setSettings(settings)
.build());
return future;
}
}
|
ClientActivityTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopIntLongAggregator.java
|
{
"start": 1185,
"end": 1415
}
|
class ____ generated. Edit `X-TopAggregator.java.st` to edit this file.
* </p>
*/
@Aggregator({ @IntermediateState(name = "top", type = "INT_BLOCK"), @IntermediateState(name = "output", type = "LONG_BLOCK") })
@GroupingAggregator
|
is
|
java
|
apache__camel
|
components/camel-azure/camel-azure-cosmosdb/src/main/java/org/apache/camel/component/azure/cosmosdb/CosmosDbProducer.java
|
{
"start": 2160,
"end": 21050
}
|
class ____ extends DefaultAsyncProducer {
private static final Logger LOG = LoggerFactory.getLogger(CosmosDbProducer.class);
private CosmosAsyncClientWrapper clientWrapper;
private CosmosDbConfigurationOptionsProxy configurationOptionsProxy;
private final Map<CosmosDbOperationsDefinition, BiConsumer<Exchange, AsyncCallback>> operations
= new EnumMap<>(CosmosDbOperationsDefinition.class);
{
bind(CosmosDbOperationsDefinition.listDatabases, listDatabases());
bind(CosmosDbOperationsDefinition.createDatabase, createDatabase());
bind(CosmosDbOperationsDefinition.queryDatabases, queryDatabases());
bind(CosmosDbOperationsDefinition.deleteDatabase, deleteDatabase());
bind(CosmosDbOperationsDefinition.createContainer, createContainer());
bind(CosmosDbOperationsDefinition.listContainers, listContainers());
bind(CosmosDbOperationsDefinition.queryContainers, queryContainers());
bind(CosmosDbOperationsDefinition.replaceDatabaseThroughput, replaceDatabaseThroughput());
bind(CosmosDbOperationsDefinition.deleteContainer, deleteContainer());
bind(CosmosDbOperationsDefinition.replaceContainerThroughput, replaceContainerThroughput());
bind(CosmosDbOperationsDefinition.createItem, createItem());
bind(CosmosDbOperationsDefinition.upsertItem, upsertItem());
bind(CosmosDbOperationsDefinition.deleteItem, deleteItem());
bind(CosmosDbOperationsDefinition.replaceItem, replaceItem());
bind(CosmosDbOperationsDefinition.readItem, readItem());
bind(CosmosDbOperationsDefinition.readAllItems, readAllItems());
bind(CosmosDbOperationsDefinition.queryItems, queryItems());
}
public CosmosDbProducer(final Endpoint endpoint) {
super(endpoint);
}
@Override
protected void doInit() throws Exception {
super.doInit();
this.clientWrapper = new CosmosAsyncClientWrapper(getEndpoint().getCosmosAsyncClient());
this.configurationOptionsProxy = new CosmosDbConfigurationOptionsProxy(getConfiguration());
}
@Override
public boolean process(Exchange exchange, AsyncCallback callback) {
try {
invokeOperation(configurationOptionsProxy.getOperation(exchange), exchange, callback);
return false;
} catch (Exception e) {
exchange.setException(e);
callback.done(true);
return true;
}
}
@Override
public CosmosDbEndpoint getEndpoint() {
return (CosmosDbEndpoint) super.getEndpoint();
}
private void bind(CosmosDbOperationsDefinition operation, BiConsumer<Exchange, AsyncCallback> fn) {
operations.put(operation, fn);
}
/**
* Entry method that selects the appropriate CosmosDbOperations operation and executes it
*/
private void invokeOperation(
final CosmosDbOperationsDefinition operation, final Exchange exchange, final AsyncCallback callback) {
final CosmosDbOperationsDefinition operationsToInvoke;
// we put listDatabases operation as default in case no operation has been selected
if (ObjectHelper.isEmpty(operation)) {
operationsToInvoke = CosmosDbOperationsDefinition.listDatabases;
} else {
operationsToInvoke = operation;
}
final BiConsumer<Exchange, AsyncCallback> fnToInvoke = operations.get(operationsToInvoke);
if (fnToInvoke != null) {
fnToInvoke.accept(exchange, callback);
} else {
throw new RuntimeCamelException("Operation not supported. Value: " + operationsToInvoke);
}
}
private CosmosDbConfiguration getConfiguration() {
return getEndpoint().getConfiguration();
}
private BiConsumer<Exchange, AsyncCallback> listDatabases() {
return (exchange, callback) -> {
final Mono<List<CosmosDatabaseProperties>> operation = CosmosDbClientOperations.withClient(clientWrapper)
.readAllDatabases()
.collectList();
subscribeToMono(operation, exchange, results -> setMessageBody(exchange, results), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> createDatabase() {
return (exchange, callback) -> {
final Mono<CosmosDatabaseResponse> operation = CosmosDbClientOperations.withClient(clientWrapper)
.createDatabase(configurationOptionsProxy.getDatabaseName(exchange),
configurationOptionsProxy.getThroughputProperties(exchange));
subscribeToMono(operation, exchange, setCosmosDatabaseResponseOnExchange(exchange), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> queryDatabases() {
return (exchange, callback) -> {
final Mono<List<CosmosDatabaseProperties>> operation = CosmosDbClientOperations.withClient(clientWrapper)
.queryDatabases(configurationOptionsProxy.getQuery(exchange),
configurationOptionsProxy.getQueryRequestOptions(exchange))
.collectList();
subscribeToMono(operation, exchange, results -> setMessageBody(exchange, results), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> deleteDatabase() {
return (exchange, callback) -> {
final Mono<CosmosDatabaseResponse> operation = CosmosDbClientOperations.withClient(clientWrapper)
.getDatabaseOperations(configurationOptionsProxy.getDatabaseName(exchange))
.deleteDatabase(configurationOptionsProxy.getCosmosDatabaseRequestOptions(exchange));
subscribeToMono(operation, exchange, setCosmosDatabaseResponseOnExchange(exchange), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> createContainer() {
return (exchange, callback) -> {
final Mono<CosmosContainerResponse> operation = getDatabaseOperations(exchange)
.createContainer(configurationOptionsProxy.getContainerName(exchange),
configurationOptionsProxy.getContainerPartitionKeyPath(exchange),
configurationOptionsProxy.getThroughputProperties(exchange),
configurationOptionsProxy.getIndexingPolicy(exchange));
subscribeToMono(operation, exchange, setCosmosContainerResponseOnExchange(exchange), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> replaceDatabaseThroughput() {
return (exchange, callback) -> {
final Mono<ThroughputResponse> operation = getDatabaseOperations(exchange)
.replaceDatabaseThroughput(configurationOptionsProxy.getThroughputProperties(exchange));
subscribeToMono(operation, exchange, setThroughputResponseOnExchange(exchange), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> listContainers() {
return (exchange, callback) -> {
final Mono<List<CosmosContainerProperties>> operation = getDatabaseOperations(exchange)
.readAllContainers(configurationOptionsProxy.getQueryRequestOptions(exchange))
.collectList();
subscribeToMono(operation, exchange, results -> setMessageBody(exchange, results), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> queryContainers() {
return (exchange, callback) -> {
final Mono<List<CosmosContainerProperties>> operation = getDatabaseOperations(exchange)
.queryContainers(configurationOptionsProxy.getQuery(exchange),
configurationOptionsProxy.getQueryRequestOptions(exchange))
.collectList();
subscribeToMono(operation, exchange, results -> setMessageBody(exchange, results), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> deleteContainer() {
return (exchange, callback) -> {
final Mono<CosmosContainerResponse> operation = CosmosDbClientOperations.withClient(clientWrapper)
.getDatabaseOperations(configurationOptionsProxy.getDatabaseName(exchange))
.getContainerOperations(configurationOptionsProxy.getContainerName(exchange))
.deleteContainer(configurationOptionsProxy.getContainerRequestOptions(exchange));
subscribeToMono(operation, exchange, setCosmosContainerResponseOnExchange(exchange), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> replaceContainerThroughput() {
return (exchange, callback) -> {
final Mono<ThroughputResponse> operation = getContainerOperations(exchange)
.replaceContainerThroughput(configurationOptionsProxy.getThroughputProperties(exchange));
subscribeToMono(operation, exchange, setThroughputResponseOnExchange(exchange), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> createItem() {
return (exchange, callback) -> {
final Mono<CosmosItemResponse<Object>> operation = getContainerOperations(exchange)
.createItem(configurationOptionsProxy.getItem(exchange),
configurationOptionsProxy.getItemPartitionKey(exchange),
configurationOptionsProxy.getItemRequestOptions(exchange));
subscribeToMono(operation, exchange, setCosmosItemResponseOnExchange(exchange), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> upsertItem() {
return (exchange, callback) -> {
final Mono<CosmosItemResponse<Object>> operation = getContainerOperations(exchange)
.upsertItem(configurationOptionsProxy.getItem(exchange),
configurationOptionsProxy.getItemPartitionKey(exchange),
configurationOptionsProxy.getItemRequestOptions(exchange));
subscribeToMono(operation, exchange, setCosmosItemResponseOnExchange(exchange), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> deleteItem() {
return (exchange, callback) -> {
final Mono<CosmosItemResponse<Object>> operation = getDatabaseOperations(exchange)
.getContainerOperations(configurationOptionsProxy.getContainerName(exchange))
.deleteItem(configurationOptionsProxy.getItemId(exchange),
configurationOptionsProxy.getItemPartitionKey(exchange),
configurationOptionsProxy.getItemRequestOptions(exchange));
subscribeToMono(operation, exchange, setCosmosItemResponseOnExchange(exchange), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> replaceItem() {
return (exchange, callback) -> {
final Mono<CosmosItemResponse<Object>> operation = getContainerOperations(exchange)
.replaceItem(configurationOptionsProxy.getItem(exchange),
configurationOptionsProxy.getItemId(exchange),
configurationOptionsProxy.getItemPartitionKey(exchange),
configurationOptionsProxy.getItemRequestOptions(exchange));
subscribeToMono(operation, exchange, setCosmosItemResponseOnExchange(exchange), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> readItem() {
return (exchange, callback) -> {
final Mono<CosmosItemResponse<Object>> operation = getContainerOperations(exchange)
.readItem(configurationOptionsProxy.getItemId(exchange),
configurationOptionsProxy.getItemPartitionKey(exchange),
configurationOptionsProxy.getItemRequestOptions(exchange),
Object.class);
subscribeToMono(operation, exchange, setCosmosItemResponseOnExchange(exchange), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> readAllItems() {
return (exchange, callback) -> {
final Mono<List<Object>> operation = getContainerOperations(exchange)
.readAllItems(configurationOptionsProxy.getItemPartitionKey(exchange),
configurationOptionsProxy.getQueryRequestOptions(exchange),
Object.class)
.collectList();
subscribeToMono(operation, exchange, results -> setMessageBody(exchange, results), callback);
};
}
private BiConsumer<Exchange, AsyncCallback> queryItems() {
return (exchange, callback) -> {
final Mono<List<Object>> operation = getContainerOperations(exchange)
.queryItems(configurationOptionsProxy.getQuery(exchange),
configurationOptionsProxy.getQueryRequestOptions(exchange),
Object.class)
.collectList();
subscribeToMono(operation, exchange, results -> setMessageBody(exchange, results), callback);
};
}
private <T> void subscribeToMono(
final Mono<T> inputMono, final Exchange exchange, final Consumer<T> resultsCallback, final AsyncCallback callback) {
inputMono
.subscribe(resultsCallback, error -> {
// error but we continue
if (LOG.isDebugEnabled()) {
LOG.debug("Error processing async exchange with error: {}", error.getMessage());
}
exchange.setException(error);
callback.done(false);
}, () -> {
// we are done from everything, so mark it as sync done
LOG.trace("All events with exchange have been sent successfully.");
callback.done(false);
});
}
private CosmosDbContainerOperations getContainerOperations(final Exchange exchange) {
return CosmosDbOperationsBuilder.withClient(clientWrapper)
.withDatabaseName(configurationOptionsProxy.getDatabaseName(exchange))
.withCreateDatabaseIfNotExist(configurationOptionsProxy.isCreateDatabaseIfNotExist(exchange))
.withThroughputProperties(configurationOptionsProxy.getThroughputProperties(exchange))
.withContainerName(configurationOptionsProxy.getContainerName(exchange))
.withContainerPartitionKeyPath(configurationOptionsProxy.getContainerPartitionKeyPath(exchange))
.withCreateContainerIfNotExist(configurationOptionsProxy.isCreateContainerIfNotExist(exchange))
.withIndexingPolicy(configurationOptionsProxy.getIndexingPolicy(exchange))
.buildContainerOperations();
}
private CosmosDbDatabaseOperations getDatabaseOperations(final Exchange exchange) {
return CosmosDbOperationsBuilder.withClient(clientWrapper)
.withDatabaseName(configurationOptionsProxy.getDatabaseName(exchange))
.withCreateDatabaseIfNotExist(configurationOptionsProxy.isCreateDatabaseIfNotExist(exchange))
.withThroughputProperties(configurationOptionsProxy.getThroughputProperties(exchange))
.buildDatabaseOperations();
}
private Consumer<CosmosDatabaseResponse> setCosmosDatabaseResponseOnExchange(final Exchange exchange) {
return response -> {
if (ObjectHelper.isNotEmpty(response.getProperties())) {
setMessageHeader(exchange, CosmosDbConstants.RESOURCE_ID, response.getProperties().getResourceId());
setMessageHeader(exchange, CosmosDbConstants.E_TAG, response.getProperties().getETag());
setMessageHeader(exchange, CosmosDbConstants.TIMESTAMP, response.getProperties().getTimestamp());
}
setCommonResponseOnExchange(exchange, response);
};
}
private Consumer<CosmosContainerResponse> setCosmosContainerResponseOnExchange(final Exchange exchange) {
return response -> {
if (ObjectHelper.isNotEmpty(response.getProperties())) {
setMessageHeader(exchange, CosmosDbConstants.RESOURCE_ID, response.getProperties().getResourceId());
setMessageHeader(exchange, CosmosDbConstants.E_TAG, response.getProperties().getETag());
setMessageHeader(exchange, CosmosDbConstants.TIMESTAMP, response.getProperties().getTimestamp());
setMessageHeader(exchange, CosmosDbConstants.DEFAULT_TIME_TO_LIVE_SECONDS,
response.getProperties().getDefaultTimeToLiveInSeconds());
}
setCommonResponseOnExchange(exchange, response);
};
}
private Consumer<ThroughputResponse> setThroughputResponseOnExchange(final Exchange exchange) {
return response -> {
if (ObjectHelper.isNotEmpty(response.getProperties())) {
setMessageHeader(exchange, CosmosDbConstants.AUTOSCALE_MAX_THROUGHPUT,
response.getProperties().getAutoscaleMaxThroughput());
setMessageHeader(exchange, CosmosDbConstants.MANUAL_THROUGHPUT, response.getProperties().getManualThroughput());
setMessageHeader(exchange, CosmosDbConstants.E_TAG, response.getProperties().getETag());
setMessageHeader(exchange, CosmosDbConstants.TIMESTAMP, response.getProperties().getTimestamp());
}
setCommonResponseOnExchange(exchange, response);
};
}
private <T> Consumer<CosmosItemResponse<T>> setCosmosItemResponseOnExchange(final Exchange exchange) {
return response -> {
setMessageHeader(exchange, CosmosDbConstants.E_TAG, response.getETag());
setMessageHeader(exchange, CosmosDbConstants.RESPONSE_HEADERS, response.getResponseHeaders());
setMessageHeader(exchange, CosmosDbConstants.STATUS_CODE, response.getStatusCode());
setMessageBody(exchange, response.getItem());
};
}
private <T> void setCommonResponseOnExchange(final Exchange exchange, final CosmosResponse<T> response) {
setMessageHeader(exchange, CosmosDbConstants.RESPONSE_HEADERS, response.getResponseHeaders());
setMessageHeader(exchange, CosmosDbConstants.STATUS_CODE, response.getStatusCode());
}
private void setMessageBody(final Exchange exchange, final Object body) {
exchange.getMessage().setBody(body);
}
private void setMessageHeader(final Exchange exchange, final String headerKey, final Object headerValue) {
exchange.getMessage().setHeader(headerKey, headerValue);
}
}
|
CosmosDbProducer
|
java
|
apache__flink
|
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/FileSink.java
|
{
"start": 31103,
"end": 31543
}
|
class ____<IN>
extends BulkFormatBuilder<IN, DefaultBulkFormatBuilder<IN>> {
private static final long serialVersionUID = 7493169281036370228L;
private DefaultBulkFormatBuilder(
Path basePath,
BulkWriter.Factory<IN> writerFactory,
BucketAssigner<IN, String> assigner) {
super(basePath, writerFactory, assigner);
}
}
}
|
DefaultBulkFormatBuilder
|
java
|
netty__netty
|
common/src/test/java/io/netty/util/concurrent/FastThreadLocalTest.java
|
{
"start": 1795,
"end": 6861
}
|
class ____ {
@BeforeEach
public void setUp() {
FastThreadLocal.removeAll();
assertEquals(0, FastThreadLocal.size());
}
@Test
public void testGetAndSetReturnsOldValue() {
FastThreadLocal<Boolean> threadLocal = new FastThreadLocal<Boolean>() {
@Override
protected Boolean initialValue() {
return Boolean.TRUE;
}
};
assertNull(threadLocal.getAndSet(Boolean.FALSE));
assertEquals(Boolean.FALSE, threadLocal.get());
assertEquals(Boolean.FALSE, threadLocal.getAndSet(Boolean.TRUE));
assertEquals(Boolean.TRUE, threadLocal.get());
threadLocal.remove();
}
@Test
public void testGetIfExists() {
FastThreadLocal<Boolean> threadLocal = new FastThreadLocal<Boolean>() {
@Override
protected Boolean initialValue() {
return Boolean.TRUE;
}
};
assertNull(threadLocal.getIfExists());
assertTrue(threadLocal.get());
assertTrue(threadLocal.getIfExists());
FastThreadLocal.removeAll();
assertNull(threadLocal.getIfExists());
}
@Test
@Timeout(value = 10000, unit = TimeUnit.MILLISECONDS)
public void testRemoveAll() throws Exception {
final AtomicBoolean removed = new AtomicBoolean();
final FastThreadLocal<Boolean> var = new FastThreadLocal<Boolean>() {
@Override
protected void onRemoval(Boolean value) {
removed.set(true);
}
};
// Initialize a thread-local variable.
assertNull(var.get());
assertEquals(1, FastThreadLocal.size());
// And then remove it.
FastThreadLocal.removeAll();
assertTrue(removed.get());
assertEquals(0, FastThreadLocal.size());
}
@Test
@Timeout(value = 10000, unit = TimeUnit.MILLISECONDS)
public void testRemoveAllFromFTLThread() throws Throwable {
final AtomicReference<Throwable> throwable = new AtomicReference<Throwable>();
final Thread thread = new FastThreadLocalThread() {
@Override
public void run() {
try {
testRemoveAll();
} catch (Throwable t) {
throwable.set(t);
}
}
};
thread.start();
thread.join();
Throwable t = throwable.get();
if (t != null) {
throw t;
}
}
@Test
public void testMultipleSetRemove() throws Exception {
final FastThreadLocal<String> threadLocal = new FastThreadLocal<String>();
final Runnable runnable = new Runnable() {
@Override
public void run() {
threadLocal.set("1");
threadLocal.remove();
threadLocal.set("2");
threadLocal.remove();
}
};
final int sizeWhenStart = ObjectCleaner.getLiveSetCount();
Thread thread = new Thread(runnable);
thread.start();
thread.join();
assertEquals(0, ObjectCleaner.getLiveSetCount() - sizeWhenStart);
Thread thread2 = new Thread(runnable);
thread2.start();
thread2.join();
assertEquals(0, ObjectCleaner.getLiveSetCount() - sizeWhenStart);
}
@Test
public void testMultipleSetRemove_multipleThreadLocal() throws Exception {
final FastThreadLocal<String> threadLocal = new FastThreadLocal<String>();
final FastThreadLocal<String> threadLocal2 = new FastThreadLocal<String>();
final Runnable runnable = new Runnable() {
@Override
public void run() {
threadLocal.set("1");
threadLocal.remove();
threadLocal.set("2");
threadLocal.remove();
threadLocal2.set("1");
threadLocal2.remove();
threadLocal2.set("2");
threadLocal2.remove();
}
};
final int sizeWhenStart = ObjectCleaner.getLiveSetCount();
Thread thread = new Thread(runnable);
thread.start();
thread.join();
assertEquals(0, ObjectCleaner.getLiveSetCount() - sizeWhenStart);
Thread thread2 = new Thread(runnable);
thread2.start();
thread2.join();
assertEquals(0, ObjectCleaner.getLiveSetCount() - sizeWhenStart);
}
@Test
public void testWrappedProperties() {
assertFalse(FastThreadLocalThread.currentThreadWillCleanupFastThreadLocals());
assertFalse(FastThreadLocalThread.currentThreadHasFastThreadLocal());
FastThreadLocalThread.runWithFastThreadLocal(() -> {
assertTrue(FastThreadLocalThread.currentThreadWillCleanupFastThreadLocals());
assertTrue(FastThreadLocalThread.currentThreadHasFastThreadLocal());
});
}
@Test
public void testWrapMany() throws ExecutionException, InterruptedException {
|
FastThreadLocalTest
|
java
|
apache__maven
|
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4400RepositoryOrderTest.java
|
{
"start": 1131,
"end": 3231
}
|
class ____ extends AbstractMavenIntegrationTestCase {
/**
* Verify that repositories declared in the settings.xml are accessed in their declaration order.
*
* @throws Exception in case of failure
*/
@Test
public void testitSettingsRepos() throws Exception {
File testDir = extractResources("/mng-4400");
Verifier verifier = newVerifier(new File(testDir, "settings").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteArtifacts("org.apache.maven.its.mng4400");
verifier.filterFile("settings-template.xml", "settings.xml");
verifier.addCliArgument("-s");
verifier.addCliArgument("settings.xml");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
Properties checksums = verifier.loadProperties("target/checksum.properties");
assertChecksum("d0a4998ff37a55f8de1dffccdff826eca365400f", checksums);
}
/**
* Verify that repositories declared in the POM are accessed in their declaration order.
*
* @throws Exception in case of failure
*/
@Test
public void testitPomRepos() throws Exception {
File testDir = extractResources("/mng-4400");
Verifier verifier = newVerifier(new File(testDir, "pom").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteArtifacts("org.apache.maven.its.mng4400");
verifier.filterFile("pom-template.xml", "pom.xml");
verifier.addCliArgument("-s");
verifier.addCliArgument("settings.xml");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
Properties checksums = verifier.loadProperties("target/checksum.properties");
assertChecksum("d0a4998ff37a55f8de1dffccdff826eca365400f", checksums);
}
private void assertChecksum(String checksum, Properties checksums) {
assertEquals(checksum, checksums.getProperty("dep-0.1.jar").toLowerCase(java.util.Locale.ENGLISH));
}
}
|
MavenITmng4400RepositoryOrderTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/metadata/Address.java
|
{
"start": 268,
"end": 779
}
|
class ____ {
private String address1;
private String address2;
private String city;
@Basic(optional = true)
public String getAddress1() {
return address1;
}
public void setAddress1(String address1) {
this.address1 = address1;
}
@Basic(optional = false)
public String getAddress2() {
return address2;
}
public void setAddress2(String address2) {
this.address2 = address2;
}
public String getCity() {
return city;
}
public void setCity(String city) {
this.city = city;
}
}
|
Address
|
java
|
elastic__elasticsearch
|
build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/SplitPackagesAuditTask.java
|
{
"start": 4605,
"end": 11453
}
|
class ____ implements WorkAction<Parameters> {
@Override
public void execute() {
final Parameters parameters = getParameters();
final String projectPath = parameters.getProjectPath().get();
// First determine all the packages that exist in the dependencies. There might be
// split packages across the dependencies, which is "ok", in that we don't care
// about it for the purpose of this project, that split will be detected in
// the other project
Map<String, List<File>> dependencyPackages = getDependencyPackages();
// Next read each of the source directories and find if we define any package directories
// that match those in our dependencies.
Map<String, Set<String>> splitPackages = findSplitPackages(dependencyPackages.keySet());
// Then filter out any known split packages/classes that we want to ignore.
filterSplitPackages(splitPackages);
// Finally, print out (and fail) if we have any split packages
for (var entry : splitPackages.entrySet()) {
String packageName = entry.getKey();
List<File> deps = dependencyPackages.get(packageName);
List<String> msg = new ArrayList<>();
msg.add("Project " + projectPath + " defines classes in package " + packageName + " exposed by dependencies");
msg.add(" Dependencies:");
deps.forEach(f -> msg.add(" " + formatDependency(f)));
msg.add(" Classes:");
entry.getValue().forEach(c -> msg.add(" '" + c + "',"));
LOGGER.error(String.join(System.lineSeparator(), msg));
}
if (splitPackages.isEmpty() == false) {
throw new GradleException(
"Verification failed: Split packages found! See errors above for details.\n"
+ "DO NOT ADD THESE SPLIT PACKAGES TO THE IGNORE LIST! Choose a new package name for the classes added."
);
}
try {
Files.write(parameters.getMarkerFile().getAsFile().get().toPath(), new byte[] {}, StandardOpenOption.CREATE);
} catch (IOException e) {
throw new RuntimeException("Failed to create marker file", e);
}
}
private Map<String, List<File>> getDependencyPackages() {
Map<String, List<File>> packages = new HashMap<>();
for (File classpathElement : getParameters().getClasspath().getFiles()) {
for (String packageName : readPackages(classpathElement)) {
packages.computeIfAbsent(packageName, k -> new ArrayList<>()).add(classpathElement);
}
}
if (LOGGER.isInfoEnabled()) {
List<String> msg = new ArrayList<>();
msg.add("Packages from dependencies:");
packages.entrySet()
.stream()
.sorted(Map.Entry.comparingByKey())
.forEach(e -> msg.add(" -" + e.getKey() + " -> " + e.getValue()));
LOGGER.info(String.join(System.lineSeparator(), msg));
}
return packages;
}
private Map<String, Set<String>> findSplitPackages(Set<String> dependencyPackages) {
Map<String, Set<String>> splitPackages = new HashMap<>();
for (File srcDir : getParameters().getSrcDirs().get()) {
try {
walkJavaFiles(srcDir.toPath(), ".java", path -> {
String packageName = getPackageName(path);
String className = path.subpath(path.getNameCount() - 1, path.getNameCount()).toString();
className = className.substring(0, className.length() - ".java".length());
LOGGER.info(
"Inspecting "
+ path
+ System.lineSeparator()
+ " package: "
+ packageName
+ System.lineSeparator()
+ " class: "
+ className
);
if (dependencyPackages.contains(packageName)) {
splitPackages.computeIfAbsent(packageName, k -> new TreeSet<>()).add(packageName + "." + className);
}
});
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
if (LOGGER.isInfoEnabled()) {
List<String> msg = new ArrayList<>();
msg.add("Split packages:");
splitPackages.entrySet()
.stream()
.sorted(Map.Entry.comparingByKey())
.forEach(e -> msg.add(" -" + e.getKey() + " -> " + e.getValue()));
LOGGER.info(String.join(System.lineSeparator(), msg));
}
return splitPackages;
}
private void filterSplitPackages(Map<String, Set<String>> splitPackages) {
String lastPackageName = null;
Set<String> currentClasses = null;
boolean filterErrorsFound = false;
for (String fqcn : getParameters().getIgnoreClasses().get().stream().sorted().toList()) {
int lastDot = fqcn.lastIndexOf('.');
if (lastDot == -1) {
LOGGER.error("Missing package in classname in split package ignores: " + fqcn);
filterErrorsFound = true;
continue;
}
String packageName = fqcn.substring(0, lastDot);
String className = fqcn.substring(lastDot + 1);
LOGGER.info("IGNORING package: " + packageName + ", class: " + className);
if (packageName.equals(lastPackageName) == false) {
currentClasses = splitPackages.get(packageName);
lastPackageName = packageName;
}
if (currentClasses == null) {
LOGGER.error("Package is not split: " + fqcn);
filterErrorsFound = true;
} else {
if (className.equals("*")) {
currentClasses.clear();
} else if (currentClasses.remove(fqcn) == false) {
LOGGER.error("Class does not exist: " + fqcn);
filterErrorsFound = true;
}
// cleanup if we have ignored the last
|
SplitPackagesAuditAction
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/commons/support/ModifierSupportTests.java
|
{
"start": 7198,
"end": 7267
}
|
class ____ {
abstract void abstractMethod();
}
static
|
AbstractClass
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/jdbc/InferredDataSourceTransactionalSqlScriptsTests.java
|
{
"start": 1844,
"end": 2851
}
|
class ____ {
@Autowired
DataSource dataSource1;
@Autowired
DataSource dataSource2;
@Test
@Transactional("txMgr1")
@Sql(scripts = "data-add-dogbert.sql", config = @SqlConfig(transactionManager = "txMgr1"))
void database1() {
assertThatTransaction().isActive();
assertUsers(new JdbcTemplate(dataSource1), "Dilbert", "Dogbert");
}
@Test
@Transactional("txMgr2")
@Sql(scripts = "data-add-catbert.sql", config = @SqlConfig(transactionManager = "txMgr2"))
void database2() {
assertThatTransaction().isActive();
assertUsers(new JdbcTemplate(dataSource2), "Dilbert", "Catbert");
}
private void assertUsers(JdbcTemplate jdbcTemplate, String... users) {
List<String> expected = Arrays.asList(users);
Collections.sort(expected);
List<String> actual = jdbcTemplate.queryForList("select name from user", String.class);
Collections.sort(actual);
assertThat(actual).as("Users in database;").isEqualTo(expected);
}
@Configuration
static
|
InferredDataSourceTransactionalSqlScriptsTests
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/ShardingUnwrapTest.java
|
{
"start": 180,
"end": 1521
}
|
class ____ extends TestCase {
SQLASTOutputVisitor visitor = new SQLASTOutputVisitor(new StringBuilder());
public void test_sharding_unwrap() throws Exception {
assertEquals("t_like_count", visitor.unwrapShardingTable("t_like_count0057"));
assertEquals("t_like_count", visitor.unwrapShardingTable("`t_like_count0057`"));
assertEquals("t_like_count", visitor.unwrapShardingTable("\"t_like_count0057\""));
}
public void test_sharding_unwrap_2() throws Exception {
assertEquals("t_like_count", visitor.unwrapShardingTable("t_like_count_0057"));
assertEquals("t_like_count", visitor.unwrapShardingTable("`t_like_count_0057`"));
assertEquals("t_like_count", visitor.unwrapShardingTable("\"t_like_count_0057\""));
}
public void test_sharding_unwrap_3() throws Exception {
assertEquals("fc_sms", visitor.unwrapShardingTable("fc_sms_0011_201704"));
}
public void test_sharding_unwrap_4() throws Exception {
assertEquals("ads_tb_sycm_eff_slr_itm_1d_s015_p", visitor.unwrapShardingTable("ads_tb_sycm_eff_slr_itm_1d_s015_p033"));
}
public void test_sharding_unwrap_5() throws Exception {
assertEquals("t", visitor.unwrapShardingTable("t_00"));
assertEquals("t", visitor.unwrapShardingTable("t_1"));
}
//
}
|
ShardingUnwrapTest
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java
|
{
"start": 1484,
"end": 5086
}
|
class ____ extends QueueUserACLInfo {
QueueUserACLInfoProto proto = QueueUserACLInfoProto.getDefaultInstance();
QueueUserACLInfoProto.Builder builder = null;
boolean viaProto = false;
List<QueueACL> userAclsList;
public QueueUserACLInfoPBImpl() {
builder = QueueUserACLInfoProto.newBuilder();
}
public QueueUserACLInfoPBImpl(QueueUserACLInfoProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public String getQueueName() {
QueueUserACLInfoProtoOrBuilder p = viaProto ? proto : builder;
return (p.hasQueueName()) ? p.getQueueName() : null;
}
@Override
public List<QueueACL> getUserAcls() {
initLocalQueueUserAclsList();
return this.userAclsList;
}
@Override
public void setQueueName(String queueName) {
maybeInitBuilder();
if (queueName == null) {
builder.clearQueueName();
return;
}
builder.setQueueName(queueName);
}
@Override
public void setUserAcls(List<QueueACL> userAclsList) {
if (userAclsList == null) {
builder.clearUserAcls();
}
this.userAclsList = userAclsList;
}
public QueueUserACLInfoProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void initLocalQueueUserAclsList() {
if (this.userAclsList != null) {
return;
}
QueueUserACLInfoProtoOrBuilder p = viaProto ? proto : builder;
List<QueueACLProto> list = p.getUserAclsList();
userAclsList = new ArrayList<QueueACL>();
for (QueueACLProto a : list) {
userAclsList.add(convertFromProtoFormat(a));
}
}
private void addQueueACLsToProto() {
maybeInitBuilder();
builder.clearUserAcls();
if (userAclsList == null)
return;
Iterable<QueueACLProto> iterable = new Iterable<QueueACLProto>() {
@Override
public Iterator<QueueACLProto> iterator() {
return new Iterator<QueueACLProto>() {
Iterator<QueueACL> iter = userAclsList.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public QueueACLProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllUserAcls(iterable);
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = QueueUserACLInfoProto.newBuilder(proto);
}
viaProto = false;
}
private void mergeLocalToBuilder() {
if (this.userAclsList != null) {
addQueueACLsToProto();
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private QueueACL convertFromProtoFormat(QueueACLProto q) {
return ProtoUtils.convertFromProtoFormat(q);
}
private QueueACLProto convertToProtoFormat(QueueACL queueAcl) {
return ProtoUtils.convertToProtoFormat(queueAcl);
}
}
|
QueueUserACLInfoPBImpl
|
java
|
apache__dubbo
|
dubbo-metadata/dubbo-metadata-api/src/main/java/org/apache/dubbo/metadata/AbstractCacheManager.java
|
{
"start": 1850,
"end": 5833
}
|
class ____<V> implements Disposable {
protected final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(getClass());
private ScheduledExecutorService executorService;
protected FileCacheStore cacheStore;
protected LRUCache<String, V> cache;
protected void init(
boolean enableFileCache,
String filePath,
String fileName,
int entrySize,
long fileSize,
int interval,
ScheduledExecutorService executorService) {
this.cache = new LRUCache<>(entrySize);
try {
cacheStore = FileCacheStoreFactory.getInstance(filePath, fileName, enableFileCache);
Map<String, String> properties = cacheStore.loadCache(entrySize);
if (logger.isDebugEnabled()) {
logger.debug("Successfully loaded " + getName() + " cache from file " + fileName + ", entries "
+ properties.size());
}
for (Map.Entry<String, String> entry : properties.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
V v = toValueType(value);
put(key, v);
}
// executorService can be empty if FileCacheStore fails
if (executorService == null) {
this.executorService = Executors.newSingleThreadScheduledExecutor(
new NamedThreadFactory("Dubbo-cache-refreshing-scheduler", true));
} else {
this.executorService = executorService;
}
this.executorService.scheduleWithFixedDelay(
new CacheRefreshTask<>(this.cacheStore, this.cache, this, fileSize),
10,
interval,
TimeUnit.MINUTES);
} catch (Exception e) {
logger.error(COMMON_FAILED_LOAD_MAPPING_CACHE, "", "", "Load mapping from local cache file error ", e);
}
}
protected abstract V toValueType(String value);
protected abstract String getName();
protected boolean validate(String key, V value) {
return value != null;
}
public V get(String key) {
return cache.get(key);
}
public void put(String key, V apps) {
if (validate(key, apps)) {
cache.put(key, apps);
}
}
public V remove(String key) {
return cache.remove(key);
}
public Map<String, V> getAll() {
if (cache.isEmpty()) {
return Collections.emptyMap();
}
Map<String, V> copyMap = new HashMap<>();
cache.lock();
try {
for (Map.Entry<String, V> entry : cache.entrySet()) {
copyMap.put(entry.getKey(), entry.getValue());
}
} finally {
cache.releaseLock();
}
return Collections.unmodifiableMap(copyMap);
}
public void update(Map<String, V> newCache) {
for (Map.Entry<String, V> entry : newCache.entrySet()) {
put(entry.getKey(), entry.getValue());
}
}
public void destroy() {
if (executorService != null) {
executorService.shutdownNow();
try {
if (!executorService.awaitTermination(
ConfigurationUtils.reCalShutdownTime(DEFAULT_SERVER_SHUTDOWN_TIMEOUT), TimeUnit.MILLISECONDS)) {
logger.warn(
COMMON_UNEXPECTED_EXCEPTION, "", "", "Wait global executor service terminated timeout.");
}
} catch (InterruptedException e) {
logger.warn(COMMON_UNEXPECTED_EXCEPTION, "", "", "destroy resources failed: " + e.getMessage(), e);
}
}
if (cacheStore != null) {
cacheStore.destroy();
}
if (cache != null) {
cache.clear();
}
}
public static
|
AbstractCacheManager
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/time/JavaPeriodGetDaysTest.java
|
{
"start": 3516,
"end": 4104
}
|
class ____ {
public static void foo(Period period) {
long months = period.getMonths();
}
public static void bar(Period period) {
// BUG: Diagnostic contains: JavaPeriodGetDays
int days = period.getDays();
}
}
""")
.doTest();
}
@Test
public void getMonths() {
compilationHelper
.addSourceLines(
"test/TestCase.java",
"""
package test;
import java.time.Period;
public
|
TestCase
|
java
|
google__guava
|
android/guava/src/com/google/common/reflect/TypeResolver.java
|
{
"start": 24471,
"end": 25916
}
|
class ____ {
private final TypeVariable<?> var;
TypeVariableKey(TypeVariable<?> var) {
this.var = checkNotNull(var);
}
@Override
public int hashCode() {
return Objects.hash(var.getGenericDeclaration(), var.getName());
}
@Override
public boolean equals(@Nullable Object obj) {
if (obj instanceof TypeVariableKey) {
TypeVariableKey that = (TypeVariableKey) obj;
return equalsTypeVariable(that.var);
} else {
return false;
}
}
@Override
public String toString() {
return var.toString();
}
/** Wraps {@code t} in a {@code TypeVariableKey} if it's a type variable. */
static @Nullable TypeVariableKey forLookup(Type t) {
if (t instanceof TypeVariable) {
return new TypeVariableKey((TypeVariable<?>) t);
} else {
return null;
}
}
/**
* Returns true if {@code type} is a {@code TypeVariable} with the same name and declared by the
* same {@code GenericDeclaration}.
*/
boolean equalsType(Type type) {
if (type instanceof TypeVariable) {
return equalsTypeVariable((TypeVariable<?>) type);
} else {
return false;
}
}
private boolean equalsTypeVariable(TypeVariable<?> that) {
return var.getGenericDeclaration().equals(that.getGenericDeclaration())
&& var.getName().equals(that.getName());
}
}
}
|
TypeVariableKey
|
java
|
apache__spark
|
common/network-common/src/main/java/org/apache/spark/network/protocol/MessageDecoder.java
|
{
"start": 1341,
"end": 2862
}
|
class ____ extends MessageToMessageDecoder<ByteBuf> {
private static final SparkLogger logger = SparkLoggerFactory.getLogger(MessageDecoder.class);
public static final MessageDecoder INSTANCE = new MessageDecoder();
private MessageDecoder() {}
@Override
public void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) {
Message.Type msgType = Message.Type.decode(in);
Message decoded = decode(msgType, in);
assert decoded.type() == msgType;
logger.trace("Received message {}: {}", msgType, decoded);
out.add(decoded);
}
private Message decode(Message.Type msgType, ByteBuf in) {
return switch (msgType) {
case ChunkFetchRequest -> ChunkFetchRequest.decode(in);
case ChunkFetchSuccess -> ChunkFetchSuccess.decode(in);
case ChunkFetchFailure -> ChunkFetchFailure.decode(in);
case RpcRequest -> RpcRequest.decode(in);
case RpcResponse -> RpcResponse.decode(in);
case RpcFailure -> RpcFailure.decode(in);
case OneWayMessage -> OneWayMessage.decode(in);
case StreamRequest -> StreamRequest.decode(in);
case StreamResponse -> StreamResponse.decode(in);
case StreamFailure -> StreamFailure.decode(in);
case UploadStream -> UploadStream.decode(in);
case MergedBlockMetaRequest -> MergedBlockMetaRequest.decode(in);
case MergedBlockMetaSuccess -> MergedBlockMetaSuccess.decode(in);
default -> throw new IllegalArgumentException("Unexpected message type: " + msgType);
};
}
}
|
MessageDecoder
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/RequestMappingHandlerMappingTests.java
|
{
"start": 24357,
"end": 24827
}
|
class ____ {
@HttpExchange
public void defaultValuesExchange() {}
@PostExchange(url = "/custom", contentType = "application/json", accept = "text/plain;charset=UTF-8")
public void customValuesExchange(){}
@HttpExchange(method="GET", url = "/headers",
headers = {"h1=hv1", "!h2", "Accept=application/ignored"})
public String customHeadersExchange() {
return "info";
}
}
@HttpExchange("/exchange")
@ExtraHttpExchange
static
|
HttpExchangeController
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/VariantSerializerTest.java
|
{
"start": 1088,
"end": 2443
}
|
class ____ extends SerializerTestBase<Variant> {
@Override
protected TypeSerializer<Variant> createSerializer() {
return VariantSerializer.INSTANCE;
}
@Override
protected int getLength() {
return -1;
}
@Override
protected Class<Variant> getTypeClass() {
return Variant.class;
}
@Override
protected Variant[] getTestData() {
VariantBuilder builder = Variant.newBuilder();
return new Variant[] {
builder.of(1),
builder.object()
.add("k", builder.of(1))
.add("object", builder.object().add("k", builder.of("hello")).build())
.add(
"array",
builder.array()
.add(builder.of(1))
.add(builder.of(2))
.add(builder.object().add("kk", builder.of(1.123f)).build())
.build())
.build(),
builder.array()
.add(builder.object().add("k", builder.of(1)).build())
.add(builder.of("hello"))
.add(builder.object().add("k", builder.of(2)).build())
.build()
};
}
}
|
VariantSerializerTest
|
java
|
spring-projects__spring-boot
|
build-plugin/spring-boot-maven-plugin/src/dockerTest/projects/build-image-bad-buildpack/src/main/java/org/test/SampleApplication.java
|
{
"start": 652,
"end": 837
}
|
class ____ {
public static void main(String[] args) throws Exception {
System.out.println("Launched");
synchronized(args) {
args.wait(); // Prevent exit
}
}
}
|
SampleApplication
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/serializer/writeJSONStringToTest.java
|
{
"start": 183,
"end": 712
}
|
class ____ extends TestCase {
public void test_writeJSONStringTo() throws Exception {
Model model = new Model();
model.id = 1001;
model.name = "中文名称";
ByteArrayOutputStream os = new ByteArrayOutputStream();
JSON.writeJSONString(os, model);
os.close();
byte[] bytes = os.toByteArray();
String text = new String(bytes, "UTF-8");
Assert.assertEquals("{\"id\":1001,\"name\":\"中文名称\"}", text);
}
public static
|
writeJSONStringToTest
|
java
|
spring-projects__spring-framework
|
spring-r2dbc/src/test/java/org/springframework/r2dbc/core/R2dbcBeanPropertyRowMapperTests.java
|
{
"start": 1500,
"end": 4308
}
|
class ____ {
@Test
void mappingUnknownReadableRejected() {
BeanPropertyRowMapper<Person> mapper = new BeanPropertyRowMapper<>(Person.class);
assertThatIllegalArgumentException().isThrownBy(() -> mapper.apply(Mockito.mock(Readable.class)))
.withMessageStartingWith("Can only map Readable Row or OutParameters, got io.r2dbc.spi.Readable$MockitoMock$");
}
@Test
void mappingOutParametersAccepted() {
BeanPropertyRowMapper<Person> mapper = new BeanPropertyRowMapper<>(Person.class);
assertThatNoException().isThrownBy(() -> mapper.apply(MockOutParameters.empty()));
}
@Test
void mappingRowSimpleObject() {
BeanPropertyRowMapper<Person> mapper = new BeanPropertyRowMapper<>(Person.class);
Person result = mapper.apply(SIMPLE_PERSON_ROW);
assertThat(result.firstName).as("firstName").isEqualTo("John");
assertThat(result.lastName).as("lastName").isEqualTo("Doe");
assertThat(result.age).as("age").isEqualTo(30);
}
@Test
void mappingRowMissingAttributeAccepted() {
BeanPropertyRowMapper<ExtendedPerson> mapper = new BeanPropertyRowMapper<>(ExtendedPerson.class);
ExtendedPerson result = mapper.apply(SIMPLE_PERSON_ROW);
assertThat(result.firstName).as("firstName").isEqualTo("John");
assertThat(result.lastName).as("lastName").isEqualTo("Doe");
assertThat(result.age).as("age").isEqualTo(30);
assertThat(result.address).as("address").isNull();
}
@Test
void mappingRowWithDifferentName() {
BeanPropertyRowMapper<EmailPerson> mapper = new BeanPropertyRowMapper<>(EmailPerson.class);
EmailPerson result = mapper.apply(EMAIL_PERSON_ROW);
assertThat(result.firstName).as("firstName").isEqualTo("John");
assertThat(result.lastName).as("lastName").isEqualTo("Doe");
assertThat(result.age).as("age").isEqualTo(30);
assertThat(result.email).as("email").isEqualTo("mail@example.org");
}
@Test
void rowTypeAndMappingTypeMisaligned() {
BeanPropertyRowMapper<TypeMismatchExtendedPerson> mapper = new BeanPropertyRowMapper<>(TypeMismatchExtendedPerson.class);
assertThatExceptionOfType(TypeMismatchException.class)
.isThrownBy(() -> mapper.apply(EXTENDED_PERSON_ROW))
.withMessage("Failed to convert property value of type 'java.lang.String' to required type " +
"'java.lang.String' for property 'address'; simulating type mismatch for address");
}
@ParameterizedTest
@CsvSource({
"age, age",
"lastName, last_name",
"Name, name",
"FirstName, first_name",
"EMail, e_mail",
"URL, u_r_l", // likely undesirable, but that's the status quo
})
void underscoreName(String input, String expected) {
BeanPropertyRowMapper<?> mapper = new BeanPropertyRowMapper<>(Object.class);
assertThat(mapper.underscoreName(input)).isEqualTo(expected);
}
@SuppressWarnings("unused")
private static
|
R2dbcBeanPropertyRowMapperTests
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/RedisEndpointBuilderFactory.java
|
{
"start": 7330,
"end": 14444
}
|
interface ____
extends
EndpointConsumerBuilder {
default RedisEndpointConsumerBuilder basic() {
return (RedisEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedRedisEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedRedisEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedRedisEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedRedisEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedRedisEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedRedisEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Reference to a pre-configured RedisMessageListenerContainer instance
* to use.
*
* The option is a:
* <code>org.springframework.data.redis.listener.RedisMessageListenerContainer</code> type.
*
* Group: consumer (advanced)
*
* @param listenerContainer the value to set
* @return the dsl builder
*/
default AdvancedRedisEndpointConsumerBuilder listenerContainer(org.springframework.data.redis.listener.RedisMessageListenerContainer listenerContainer) {
doSetProperty("listenerContainer", listenerContainer);
return this;
}
/**
* Reference to a pre-configured RedisMessageListenerContainer instance
* to use.
*
* The option will be converted to a
* <code>org.springframework.data.redis.listener.RedisMessageListenerContainer</code> type.
*
* Group: consumer (advanced)
*
* @param listenerContainer the value to set
* @return the dsl builder
*/
default AdvancedRedisEndpointConsumerBuilder listenerContainer(String listenerContainer) {
doSetProperty("listenerContainer", listenerContainer);
return this;
}
}
/**
* Builder for endpoint producers for the Spring Redis component.
*/
public
|
AdvancedRedisEndpointConsumerBuilder
|
java
|
dropwizard__dropwizard
|
dropwizard-views-mustache/src/test/java/io/dropwizard/views/mustache/MustacheViewRendererTest.java
|
{
"start": 1040,
"end": 4183
}
|
class ____ {
@GET
@Path("/absolute")
public AbsoluteView showAbsolute() {
return new AbsoluteView("yay");
}
@GET
@Path("/relative")
public RelativeView showRelative() {
return new RelativeView();
}
@GET
@Path("/bad")
public BadView showBad() {
return new BadView();
}
@GET
@Path("/error")
public ErrorView showError() {
return new ErrorView();
}
}
@Override
@BeforeEach
public void setUp() throws Exception {
super.setUp();
}
@Override
@AfterEach
public void tearDown() throws Exception {
super.tearDown();
}
@Override
protected Application configure() {
ResourceConfig config = DropwizardResourceConfig.forTesting();
final ViewRenderer renderer = new MustacheViewRenderer();
config.register(new ViewMessageBodyWriter(new MetricRegistry(), Collections.singletonList(renderer)));
config.register(new ViewRenderExceptionMapper());
config.register(new ExampleResource());
return config;
}
@Test
void rendersViewsWithAbsoluteTemplatePaths() {
assertThat(target("/test/absolute").request().get(String.class))
.isEqualTo("Woop woop. yay\n");
}
@Test
void rendersViewsWithRelativeTemplatePaths() {
assertThat(target("/test/relative").request().get(String.class))
.isEqualTo("Ok.\n");
}
@Test
void returnsA500ForViewsWithBadTemplatePaths() {
assertThatExceptionOfType(WebApplicationException.class)
.isThrownBy(() -> target("/test/bad").request().get(String.class))
.extracting(WebApplicationException::getResponse)
.satisfies(response -> assertThat(response.getStatus())
.isEqualTo(500))
.satisfies(response -> assertThat(response.readEntity(String.class))
.isEqualTo(ViewRenderExceptionMapper.TEMPLATE_ERROR_MSG));
}
@Test
void returnsA500ForViewsThatCantCompile() {
assertThatExceptionOfType(WebApplicationException.class)
.isThrownBy(() -> target("/test/error").request().get(String.class))
.extracting(WebApplicationException::getResponse)
.satisfies(response -> assertThat(response.getStatus())
.isEqualTo(500))
.satisfies(response -> assertThat(response.readEntity(String.class))
.isEqualTo(ViewRenderExceptionMapper.TEMPLATE_ERROR_MSG));
}
@Test
void cacheByDefault() {
MustacheViewRenderer mustacheViewRenderer = new MustacheViewRenderer();
mustacheViewRenderer.configure(Collections.emptyMap());
assertThat(mustacheViewRenderer.isUseCache()).isTrue();
}
@Test
void canDisableCache() {
MustacheViewRenderer mustacheViewRenderer = new MustacheViewRenderer();
mustacheViewRenderer.configure(Collections.singletonMap("cache", "false"));
assertThat(mustacheViewRenderer.isUseCache()).isFalse();
}
}
|
ExampleResource
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/inheritance/version/InheritanceVersionedParentTest.java
|
{
"start": 3129,
"end": 3632
}
|
class ____ extends VersionedFruit {
@OneToMany
private Set<Drupelet> drupelets = new HashSet<>();
private String maturity;
public Raspberry() {
}
public Raspberry(Long id, String maturity) {
super( id );
this.maturity = maturity;
}
public Set<Drupelet> getDrupelets() {
return drupelets;
}
public String getMaturity() {
return maturity;
}
public void setMaturity(String name) {
this.maturity = name;
}
}
@Entity( name = "Drupelet" )
public static
|
Raspberry
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/SingleOutputStreamOperator.java
|
{
"start": 12640,
"end": 12894
}
|
class ____ not be null.");
try {
return returns(TypeInformation.of(typeClass));
} catch (InvalidTypesException e) {
throw new InvalidTypesException(
"Cannot infer the type information from the
|
must
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/valuehandlingmode/inline/PredicateTest.java
|
{
"start": 3192,
"end": 3945
}
|
class ____ {
private String id;
private String name;
private Integer age;
public Customer() {
}
public Customer(String id, String name) {
this.id = id;
this.name = name;
}
// Used by test case for HHH-8699.
public Customer(String id, String name, String greeting, Boolean something) {
this.id = id;
this.name = name;
}
@Id
@Column(name = "ID")
public String getId() {
return id;
}
public void setId(String v) {
this.id = v;
}
@Column(name = "NAME")
public String getName() {
return name;
}
public void setName(String v) {
this.name = v;
}
@Column(name = "AGE")
public Integer getAge() {
return age;
}
public void setAge(Integer age) {
this.age = age;
}
}
}
|
Customer
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/util/MethodInvoker.java
|
{
"start": 10076,
"end": 11694
}
|
class ____ would increase the weight by 1 accordingly, due to the
* superclass 1 step up the hierarchy (i.e. Number) still matching the required type Number.
* Therefore, with an arg of type Integer, a constructor (Integer) would be preferred to a
* constructor (Number) which would in turn be preferred to a constructor (Object).
* All argument weights get accumulated.
* <p>Note: This is the algorithm used by MethodInvoker itself and also the algorithm
* used for constructor and factory method selection in Spring's bean container (in case
* of lenient constructor resolution which is the default for regular bean definitions).
* @param paramTypes the parameter types to match
* @param args the arguments to match
* @return the accumulated weight for all arguments
*/
public static int getTypeDifferenceWeight(Class<?>[] paramTypes, @Nullable Object[] args) {
int result = 0;
for (int i = 0; i < paramTypes.length; i++) {
Class<?> paramType = paramTypes[i];
Object arg = args[i];
if (!ClassUtils.isAssignableValue(paramType, arg)) {
return Integer.MAX_VALUE;
}
if (arg != null) {
Class<?> superClass = arg.getClass().getSuperclass();
while (superClass != null) {
if (paramType.equals(superClass)) {
result = result + 2;
superClass = null;
}
else if (ClassUtils.isAssignable(paramType, superClass)) {
result = result + 2;
superClass = superClass.getSuperclass();
}
else {
superClass = null;
}
}
if (paramType.isInterface()) {
result = result + 1;
}
}
}
return result;
}
}
|
Integer
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/ComponentScanAnnotationIntegrationTests.java
|
{
"start": 17013,
"end": 17291
}
|
class ____ {
}
@ComposedConfiguration(basePackages = "org.springframework.context.annotation.componentscan.simple")
@ComponentScan("example.scannable_implicitbasepackage")
@ComponentScan("example.scannable.sub")
static
|
LocalAnnotationOverridesMultipleComposedAnnotationsConfig
|
java
|
netty__netty
|
codec-classes-quic/src/main/java/io/netty/handler/codec/quic/QuicPathEvent.java
|
{
"start": 4482,
"end": 5514
}
|
class ____ extends QuicPathEvent {
/**
* The related network path between local and remote has been closed and is now unusable on this connection.
*
* @param local local address.
* @param remote remote address.
*/
public Closed(InetSocketAddress local, InetSocketAddress remote) {
super(local, remote);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
return super.equals(o);
}
@Override
public int hashCode() {
return 31 * super.hashCode();
}
@Override
public String toString() {
return "QuicPathEvent.Closed{" +
"local=" + local() +
", remote=" + remote() +
'}';
}
}
public static final
|
Closed
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/test/java/io/vertx/it/servicehelper/FakeFactoryImplB.java
|
{
"start": 531,
"end": 737
}
|
class ____ implements FakeFactory {
@Override
public String name() {
return "B";
}
@Override
public ClassLoader classloader() {
return this.getClass().getClassLoader();
}
}
|
FakeFactoryImplB
|
java
|
micronaut-projects__micronaut-core
|
inject/src/main/java/io/micronaut/context/DefaultBeanContext.java
|
{
"start": 165862,
"end": 166258
}
|
class ____ implements ListenersSupplier<BeanCreatedEventListener> {
@Override
public Iterable<ListenerAndOrder<BeanCreatedEventListener>> get(BeanResolutionContext beanResolutionContext) {
return Collections.singletonList(new ListenerAndOrder<>(new BeanDefinitionProcessorListener(), 0));
}
}
private static final
|
BeanDefinitionProcessorListenerSupplier
|
java
|
apache__flink
|
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/CustomSSLEngineProvider.java
|
{
"start": 1593,
"end": 4388
}
|
class ____ extends ConfigSSLEngineProvider {
private final String sslTrustStore;
private final String sslTrustStorePassword;
private final List<String> sslCertFingerprints;
private final String sslKeyStoreType;
private final String sslTrustStoreType;
public CustomSSLEngineProvider(ActorSystem system) {
super(system);
final Config securityConfig =
system.settings().config().getConfig("pekko.remote.classic.netty.ssl.security");
sslTrustStore = securityConfig.getString("trust-store");
sslTrustStorePassword = securityConfig.getString("trust-store-password");
sslCertFingerprints = securityConfig.getStringList("cert-fingerprints");
sslKeyStoreType = securityConfig.getString("key-store-type");
sslTrustStoreType = securityConfig.getString("trust-store-type");
}
@Override
public TrustManager[] trustManagers() {
try {
final TrustManagerFactory trustManagerFactory =
sslCertFingerprints.isEmpty()
? TrustManagerFactory.getInstance(
TrustManagerFactory.getDefaultAlgorithm())
: FingerprintTrustManagerFactory.builder("SHA1")
.fingerprints(sslCertFingerprints)
.build();
trustManagerFactory.init(
loadKeystore(sslTrustStore, sslTrustStorePassword, sslTrustStoreType));
return trustManagerFactory.getTrustManagers();
} catch (GeneralSecurityException | IOException e) {
// replicate exception handling from SSLEngineProvider
throw new RemoteTransportException(
"Server SSL connection could not be established because SSL context could not be constructed",
e);
}
}
@Override
public KeyStore loadKeystore(String filename, String password) {
try {
return loadKeystore(filename, password, sslKeyStoreType);
} catch (IOException | GeneralSecurityException e) {
throw new RemoteTransportException(
"Server SSL connection could not be established because key store could not be loaded",
e);
}
}
private KeyStore loadKeystore(String filename, String password, String keystoreType)
throws IOException, GeneralSecurityException {
KeyStore keyStore = KeyStore.getInstance(keystoreType);
try (InputStream fin = Files.newInputStream(Paths.get(filename))) {
char[] passwordCharArray = password.toCharArray();
keyStore.load(fin, passwordCharArray);
}
return keyStore;
}
}
|
CustomSSLEngineProvider
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/main/java/org/springframework/jdbc/datasource/init/CannotReadScriptException.java
|
{
"start": 911,
"end": 1303
}
|
class ____ extends ScriptException {
/**
* Create a new {@code CannotReadScriptException}.
* @param resource the resource that cannot be read from
* @param cause the underlying cause of the resource access failure
*/
public CannotReadScriptException(EncodedResource resource, Throwable cause) {
super("Cannot read SQL script from " + resource, cause);
}
}
|
CannotReadScriptException
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/commons/util/AnnotationUtilsTests.java
|
{
"start": 28381,
"end": 28476
}
|
class ____ extends ContainerExtensionClass {
}
@ExtendWithFoo
static
|
SubContainerExtensionClass
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/lucene/spatial/CartesianShapeIndexer.java
|
{
"start": 1481,
"end": 1952
}
|
class ____ implements ShapeIndexer {
private final String name;
public CartesianShapeIndexer(String name) {
this.name = name;
}
public List<IndexableField> indexShape(Geometry shape) {
if (shape == null) {
return Collections.emptyList();
}
LuceneGeometryVisitor visitor = new LuceneGeometryVisitor(name);
shape.visit(visitor);
return visitor.fields;
}
private static
|
CartesianShapeIndexer
|
java
|
apache__camel
|
components/camel-observation/src/main/java/org/apache/camel/observation/MicrometerObservationTracer.java
|
{
"start": 1886,
"end": 8838
}
|
class ____ extends org.apache.camel.tracing.Tracer {
private static final String SPAN_DECORATOR_INTERNAL = "camel.micrometer.abstract-internal";
private static final String CAMEL_CONTEXT_NAME = "camel.component";
private Tracer tracer;
private ObservationRegistry observationRegistry;
public Tracer getTracer() {
return tracer;
}
public void setTracer(Tracer tracer) {
this.tracer = tracer;
}
public ObservationRegistry getObservationRegistry() {
return observationRegistry;
}
public void setObservationRegistry(ObservationRegistry observationRegistry) {
this.observationRegistry = observationRegistry;
}
private Observation.Context spanKindToContextOnExtract(
org.apache.camel.tracing.SpanKind kind, SpanDecorator sd, Exchange exchange) {
ExtractAdapter adapter = sd.getExtractAdapter(exchange.getIn().getHeaders(), encoding);
switch (kind) {
case PRODUCER:
throw new UnsupportedOperationException("Cannot extract when sending a message");
case SPAN_KIND_SERVER:
RequestReplyReceiverContext<Object, Message> replyReceiverContext
= new RequestReplyReceiverContext<>((carrier, key) -> {
Object val = adapter.get(key);
return val != null ? val.toString() : null;
});
replyReceiverContext.setResponse(exchange.getMessage());
replyReceiverContext.setCarrier(exchange.getIn());
return replyReceiverContext;
case CONSUMER:
case SPAN_KIND_CLIENT:
ReceiverContext<Message> receiverContext
= new ReceiverContext<>((carrier, key) -> {
Object val = adapter.get(key);
return val != null ? val.toString() : null;
});
receiverContext.setCarrier(exchange.getIn());
return receiverContext;
default:
return new Observation.Context();
}
}
private Observation.Context spanKindToContextOnInject(
org.apache.camel.tracing.SpanKind kind, InjectAdapter adapter, Exchange exchange) {
switch (kind) {
case SPAN_KIND_CLIENT:
RequestReplySenderContext<Object, Message> senderContext
= new RequestReplySenderContext<>((carrier, key, value) -> adapter.put(key, value));
senderContext.setResponse(exchange.getMessage());
senderContext.setCarrier(exchange.getIn());
return senderContext;
case PRODUCER:
SenderContext<Message> context = new SenderContext<>((carrier, key, value) -> adapter.put(key, value));
context.setCarrier(exchange.getIn());
return context;
case SPAN_KIND_SERVER:
case CONSUMER:
throw new UnsupportedOperationException("Cannot inject when receiving a message");
default:
return new Observation.Context();
}
}
@Override
protected void initTracer() {
if (tracer == null) {
tracer = CamelContextHelper.findSingleByType(getCamelContext(), Tracer.class);
}
if (observationRegistry == null) {
observationRegistry = CamelContextHelper.findSingleByType(getCamelContext(), ObservationRegistry.class);
}
if (observationRegistry == null) {
// No Observation Registry is available, so setup Noop
observationRegistry = ObservationRegistry.NOOP;
}
if (tracer == null) {
tracer = Tracer.NOOP;
}
}
@Override
protected SpanAdapter startSendingEventSpan(
String operationName, SpanKind kind, SpanAdapter parent, Exchange exchange,
InjectAdapter injectAdapter) {
Observation.Context context = spanKindToContextOnInject(kind, injectAdapter, exchange);
Observation observation = Observation.createNotStarted(CAMEL_CONTEXT_NAME, () -> context, observationRegistry);
observation.contextualName(operationName);
Observation parentObservation = getParentObservation(parent);
Tracer.SpanInScope scope = null;
try {
if (parentObservation != null && parentObservation != observationRegistry.getCurrentObservation()) {
// Because Camel allows to close scopes multiple times
TracingObservationHandler.TracingContext tracingContext
= parentObservation.getContextView().get(TracingObservationHandler.TracingContext.class);
if (tracingContext != null) {
Span parentSpan = tracingContext.getSpan();
scope = tracer.withSpan(parentSpan);
}
}
if (parentObservation != null) {
observation.parentObservation(parentObservation);
}
return new MicrometerObservationSpanAdapter(observation.start(), tracer);
} finally {
if (scope != null) {
scope.close();
}
}
}
@Override
protected void initContextPropagators() {
// noop
}
private static Observation getParentObservation(SpanAdapter parentObservation) {
if (parentObservation == null) {
return null;
}
MicrometerObservationSpanAdapter observationWrapper = (MicrometerObservationSpanAdapter) parentObservation;
return observationWrapper.getMicrometerObservation();
}
@Override
protected SpanAdapter startExchangeBeginSpan(
Exchange exchange, SpanDecorator sd, String operationName, org.apache.camel.tracing.SpanKind kind,
SpanAdapter parent) {
boolean parentPresent = parent != null;
Observation.Context context = spanKindToContextOnExtract(kind, sd, exchange);
boolean internalSpanDecorator = sd instanceof AbstractInternalSpanDecorator;
context.put(SPAN_DECORATOR_INTERNAL, internalSpanDecorator);
Observation observation = Observation.createNotStarted(operationName, () -> context, observationRegistry);
if (parentPresent) {
observation.parentObservation(getParentObservation(parent));
}
return new MicrometerObservationSpanAdapter(observation.start(), tracer);
}
@Override
protected void finishSpan(SpanAdapter span) {
MicrometerObservationSpanAdapter observationSpanAdapter = (MicrometerObservationSpanAdapter) span;
observationSpanAdapter.getMicrometerObservation().stop();
}
@Override
protected void inject(SpanAdapter span, InjectAdapter adapter) {
// Inject happens on start of an observation
}
}
|
MicrometerObservationTracer
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/tool/schema/internal/ExceptionHandlerLoggedImpl.java
|
{
"start": 335,
"end": 827
}
|
class ____ implements ExceptionHandler {
private static final Logger LOG = Logger.getLogger( ExceptionHandlerLoggedImpl.class );
/**
* Singleton access
*/
public static final ExceptionHandlerLoggedImpl INSTANCE = new ExceptionHandlerLoggedImpl();
@Override
public void handleException(CommandAcceptanceException exception) {
LOG.warnf(
exception,
"GenerationTarget encountered exception accepting command : %s",
exception.getMessage()
);
}
}
|
ExceptionHandlerLoggedImpl
|
java
|
quarkusio__quarkus
|
integration-tests/gradle/src/test/java/io/quarkus/gradle/continuoustesting/ContinuousTestingClient.java
|
{
"start": 449,
"end": 3714
}
|
class ____ {
private static final int DEFAULT_PORT = 8080;
long runtToWaitFor = 1;
final String host;
protected static String getDefaultHost(int port) {
return "http://localhost:" + port;
}
public ContinuousTestingClient() {
this(getDefaultHost(DEFAULT_PORT));
}
public ContinuousTestingClient(int port) {
this(getDefaultHost(port));
}
public ContinuousTestingClient(String host) {
this.host = host;
}
public TestStatus waitForNextCompletion() {
try {
Awaitility.waitAtMost(2, TimeUnit.MINUTES).pollInterval(200, TimeUnit.MILLISECONDS).until(() -> {
TestStatus ts = getTestStatus();
if (ts.getLastRun() > runtToWaitFor) {
throw new RuntimeException(
"Waiting for run " + runtToWaitFor + " but run " + ts.getLastRun() + " has already occurred");
}
boolean runComplete = ts.getLastRun() == runtToWaitFor;
if (runComplete && ts.getRunning() > 0) {
//there is a small chance of a race, where changes are picked up twice, due to how filesystems work
//this works around it by waiting for the next run
runtToWaitFor = ts.getRunning();
return false;
} else if (runComplete) {
runtToWaitFor++;
}
return runComplete;
});
return getTestStatus();
} catch (Exception e) {
TestStatus ts;
try {
ts = getTestStatus();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
throw new ConditionTimeoutException("Failed to wait for test run " + runtToWaitFor + " " + ts, e);
}
}
private TestStatus getTestStatus() {
DevUIJsonRPCTest devUIJsonRPCTest = new DevUIJsonRPCTest("devui-continuous-testing", this.host);
try {
TypeReference<Map<String, Long>> typeRef = new TypeReference<Map<String, Long>>() {
};
Map<String, Long> testStatus = devUIJsonRPCTest.executeJsonRPCMethod(typeRef, "getStatus");
long lastRun = testStatus.getOrDefault("lastRun", -1L);
long running = testStatus.getOrDefault("running", -1L);
long testsRun = testStatus.getOrDefault("testsRun", -1L);
long testsPassed = testStatus.getOrDefault("testsPassed", -1L);
long testsFailed = testStatus.getOrDefault("testsFailed", -1L);
long testsSkipped = testStatus.getOrDefault("testsSkipped", -1L);
long totalTestsPassed = testStatus.getOrDefault("totalTestsPassed", -1L);
long totalTestsFailed = testStatus.getOrDefault("totalTestsFailed", -1L);
long totalTestsSkipped = testStatus.getOrDefault("totalTestsSkipped", -1L);
return new TestStatus(lastRun, running, testsRun, testsPassed, testsFailed, testsSkipped, totalTestsPassed,
totalTestsFailed, totalTestsSkipped);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static
|
ContinuousTestingClient
|
java
|
quarkusio__quarkus
|
extensions/tls-registry/deployment/src/test/java/io/quarkus/tls/MissingP12TrustStoreFromFileSystemTest.java
|
{
"start": 823,
"end": 1636
}
|
class ____ {
private static final String configuration = """
quarkus.tls.trust-store.p12.path=target/certs/missing.p12
quarkus.tls.trust-store.p12.password=password
""";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.add(new StringAsset(configuration), "application.properties"))
.assertException(t -> {
assertThat(t.getMessage()).contains("default", "P12", "file", "missing.p12", "trust");
});
@Test
void test() throws KeyStoreException, CertificateParsingException {
fail("Should not be called as the extension should fail before.");
}
}
|
MissingP12TrustStoreFromFileSystemTest
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/web/SpringServletContainerInitializer.java
|
{
"start": 1514,
"end": 3689
}
|
class ____ be loaded and instantiated and have its {@link #onStartup}
* method invoked by any Servlet-compliant container during container startup assuming
* that the {@code spring-web} module JAR is present on the classpath. This occurs through
* the JAR Services API {@link ServiceLoader#load(Class)} method detecting the
* {@code spring-web} module's {@code META-INF/services/jakarta.servlet.ServletContainerInitializer}
* service provider configuration file.
*
* <h3>In combination with {@code web.xml}</h3>
* A web application can choose to limit the amount of classpath scanning the Servlet
* container does at startup either through the {@code metadata-complete} attribute in
* {@code web.xml}, which controls scanning for Servlet annotations or through an
* {@code <absolute-ordering>} element also in {@code web.xml}, which controls which
* web fragments (i.e. jars) are allowed to perform a {@code ServletContainerInitializer}
* scan. When using this feature, the {@link SpringServletContainerInitializer}
* can be enabled by adding "spring_web" to the list of named web fragments in
* {@code web.xml} as follows:
*
* <pre class="code">
* <absolute-ordering>
* <name>some_web_fragment</name>
* <name>spring_web</name>
* </absolute-ordering>
* </pre>
*
* <h2>Relationship to Spring's {@code WebApplicationInitializer}</h2>
* Spring's {@code WebApplicationInitializer} SPI consists of just one method:
* {@link WebApplicationInitializer#onStartup(ServletContext)}. The signature is intentionally
* quite similar to {@link ServletContainerInitializer#onStartup(Set, ServletContext)}:
* simply put, {@code SpringServletContainerInitializer} is responsible for instantiating
* and delegating the {@code ServletContext} to any user-defined
* {@code WebApplicationInitializer} implementations. It is then the responsibility of
* each {@code WebApplicationInitializer} to do the actual work of initializing the
* {@code ServletContext}. The exact process of delegation is described in detail in the
* {@link #onStartup onStartup} documentation below.
*
* <h2>General Notes</h2>
* In general, this
|
will
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-changelog/src/main/java/org/apache/flink/state/changelog/ChangelogState.java
|
{
"start": 1192,
"end": 1429
}
|
interface ____ {
StateChangeApplier getChangeApplier(ChangelogApplierFactory factory);
<IS> void setDelegatedState(IS state);
/** Enable logging meta data before next writes. */
void resetWritingMetaFlag();
}
|
ChangelogState
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/PreferredInterfaceTypeTest.java
|
{
"start": 14654,
"end": 15318
}
|
class ____ {
final List<String> foo() {
if (true) {
return ImmutableList.of();
} else {
return new ArrayList<>();
}
}
}
""")
.doTest();
}
@Test
public void
returnTypeList_multipleReturnStatementsImmutableSetImmutableList_suggestsImmutableCollection() {
testHelper
.addSourceLines(
"Test.java",
"""
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import java.util.Collection;
|
Test
|
java
|
apache__camel
|
components/camel-telegram/src/test/java/org/apache/camel/component/telegram/TelegramConsumerMappingTest.java
|
{
"start": 1844,
"end": 5452
}
|
class ____ extends TelegramTestSupport {
@EndpointInject("mock:telegram")
private MockEndpoint endpoint;
@Test
public void testMessageMapping() throws Exception {
endpoint.expectedMinimumMessageCount(1);
endpoint.expectedMessageCount(1);
endpoint.assertIsSatisfied(5000);
Exchange ex = endpoint.getExchanges().get(0);
Message m = ex.getIn();
assertNotNull(m);
// checking headers
assertEquals("-45658", m.getHeader(TelegramConstants.TELEGRAM_CHAT_ID));
// checking body
assertNotNull(m.getBody());
assertTrue(m.getBody() instanceof IncomingMessage);
IncomingMessage body = (IncomingMessage) m.getBody();
assertEquals("a message", body.getText());
assertEquals(Long.valueOf(179L), body.getMessageId());
assertEquals(Instant.ofEpochSecond(1463436626L), body.getDate());
// checking from
User user = body.getFrom();
assertNotNull(user);
assertEquals("John", user.getFirstName());
assertEquals("Doe", user.getLastName());
assertEquals(Long.valueOf(1585844777), user.getId());
// checking chat
Chat chat = body.getChat();
assertNotNull(chat);
assertEquals("-45658", chat.getId());
assertEquals("A chat group", chat.getTitle());
assertEquals("group", chat.getType());
}
@Test
public void testMessageResultMapping() {
MessageResult messageResult = getJSONResource("messages/updates-sendLocation.json", MessageResult.class);
assertTrue(messageResult.isOk());
assertTrue(messageResult.isOk());
assertEquals((Long) 33L, messageResult.getMessage().getMessageId());
assertEquals(Instant.ofEpochSecond(1548091564).getEpochSecond(), messageResult.getMessage().getDate().getEpochSecond());
assertEquals((Long) 665977497L, messageResult.getMessage().getFrom().getId());
assertTrue(messageResult.getMessage().getFrom().isBot());
assertEquals("camelbot", messageResult.getMessage().getFrom().getFirstName());
assertEquals("camel_component_bot", messageResult.getMessage().getFrom().getUsername());
assertEquals("-182520913", messageResult.getMessage().getChat().getId());
assertEquals("testgroup", messageResult.getMessage().getChat().getTitle());
assertEquals("group", messageResult.getMessage().getChat().getType());
assertTrue(messageResult.getMessage().getChat().isAllMembersAreAdministrators());
assertEquals(59.9386292, messageResult.getMessage().getLocation().getLatitude(), 1.0E-07);
assertEquals(30.3141308, messageResult.getMessage().getLocation().getLongitude(), 1.0E-07);
}
@Override
protected RoutesBuilder[] createRouteBuilders() {
return new RoutesBuilder[] {
getMockRoutes(),
new RouteBuilder() {
@Override
public void configure() {
from("telegram:bots?authorizationToken=mock-token").to("mock:telegram");
}
} };
}
@Override
protected TelegramMockRoutes createMockRoutes() {
return new TelegramMockRoutes(port)
.addEndpoint(
"getUpdates",
"GET",
String.class,
TelegramTestUtil.stringResource("messages/updates-single.json"),
TelegramTestUtil.stringResource("messages/updates-empty.json"));
}
}
|
TelegramConsumerMappingTest
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java
|
{
"start": 1959,
"end": 10436
}
|
class ____ extends AbstractWireSerializingTestCase<FieldCapabilitiesResponse> {
@Override
protected FieldCapabilitiesResponse createTestInstance() {
FieldCapabilitiesResponse randomResponse;
List<FieldCapabilitiesIndexResponse> responses = new ArrayList<>();
int numResponse = randomIntBetween(0, 10);
for (int i = 0; i < numResponse; i++) {
Map<String, IndexFieldCapabilities> fieldCaps = FieldCapabilitiesIndexResponseTests.randomFieldCaps();
var indexMode = randomFrom(IndexMode.values());
responses.add(new FieldCapabilitiesIndexResponse("index_" + i, null, fieldCaps, randomBoolean(), indexMode));
}
randomResponse = FieldCapabilitiesResponse.builder().withIndexResponses(responses).build();
return randomResponse;
}
@Override
protected Writeable.Reader<FieldCapabilitiesResponse> instanceReader() {
return FieldCapabilitiesResponse::new;
}
@Override
protected FieldCapabilitiesResponse mutateInstance(FieldCapabilitiesResponse response) {
Map<String, Map<String, FieldCapabilities>> mutatedResponses = new HashMap<>(response.get());
int mutation = response.get().isEmpty() ? 0 : randomIntBetween(0, 2);
switch (mutation) {
case 0 -> {
String toAdd = randomAlphaOfLength(10);
mutatedResponses.put(
toAdd,
Collections.singletonMap(randomAlphaOfLength(10), FieldCapabilitiesTests.randomFieldCaps(toAdd))
);
}
case 1 -> {
String toRemove = randomFrom(mutatedResponses.keySet());
mutatedResponses.remove(toRemove);
}
case 2 -> {
String toReplace = randomFrom(mutatedResponses.keySet());
mutatedResponses.put(
toReplace,
Collections.singletonMap(randomAlphaOfLength(10), FieldCapabilitiesTests.randomFieldCaps(toReplace))
);
}
}
return FieldCapabilitiesResponse.builder().withFields(mutatedResponses).build();
}
public void testFailureSerialization() throws IOException {
FieldCapabilitiesResponse randomResponse = createResponseWithFailures();
FieldCapabilitiesResponse deserialized = copyInstance(randomResponse);
assertThat(deserialized.getIndices(), Matchers.equalTo(randomResponse.getIndices()));
// only match size of failure list and indices, most exceptions don't support 'equals'
List<FieldCapabilitiesFailure> deserializedFailures = deserialized.getFailures();
assertEquals(deserializedFailures.size(), randomResponse.getFailures().size());
int i = 0;
for (FieldCapabilitiesFailure originalFailure : randomResponse.getFailures()) {
FieldCapabilitiesFailure deserializedFaliure = deserializedFailures.get(i);
assertThat(deserializedFaliure.getIndices(), Matchers.equalTo(originalFailure.getIndices()));
i++;
}
}
public void testFailureParsing() throws IOException {
FieldCapabilitiesResponse randomResponse = createResponseWithFailures();
boolean humanReadable = randomBoolean();
XContentType xContentType = randomFrom(XContentType.values());
BytesReference originalBytes = toShuffledXContent(
ChunkedToXContent.wrapAsToXContent(randomResponse),
xContentType,
ToXContent.EMPTY_PARAMS,
humanReadable
);
FieldCapabilitiesResponse parsedResponse;
try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) {
parsedResponse = FieldCapsUtils.parseFieldCapsResponse(parser);
assertNull(parser.nextToken());
}
assertNotSame(parsedResponse, randomResponse);
assertThat(parsedResponse.getIndices(), Matchers.equalTo(randomResponse.getIndices()));
// only match size of failure list and indices, most exceptions don't support 'equals'
List<FieldCapabilitiesFailure> deserializedFailures = parsedResponse.getFailures();
assertEquals(deserializedFailures.size(), randomResponse.getFailures().size());
int i = 0;
for (FieldCapabilitiesFailure originalFailure : randomResponse.getFailures()) {
FieldCapabilitiesFailure deserializedFaliure = deserializedFailures.get(i);
assertThat(deserializedFaliure.getIndices(), Matchers.equalTo(originalFailure.getIndices()));
i++;
}
}
public static FieldCapabilitiesResponse createResponseWithFailures() {
String[] indices = randomArray(randomIntBetween(1, 5), String[]::new, () -> randomAlphaOfLength(5));
List<FieldCapabilitiesFailure> failures = new ArrayList<>();
for (String index : indices) {
if (randomBoolean() || failures.size() == 0) {
failures.add(new FieldCapabilitiesFailure(new String[] { index }, ElasticsearchExceptionTests.randomExceptions().v2()));
} else {
failures.get(failures.size() - 1).addIndex(index);
}
}
return FieldCapabilitiesResponse.builder().withIndices(indices).withFailures(failures).build();
}
private static FieldCapabilitiesResponse randomCCSResponse(List<FieldCapabilitiesIndexResponse> indexResponses) {
int numFailures = between(0, 4);
List<FieldCapabilitiesFailure> failures = new ArrayList<>();
for (int i = 0; i < numFailures; i++) {
String index = "index_" + i;
failures.add(new FieldCapabilitiesFailure(new String[] { index }, ElasticsearchExceptionTests.randomExceptions().v2()));
}
return FieldCapabilitiesResponse.builder().withIndexResponses(indexResponses).withFailures(failures).build();
}
public void testSerializeCCSResponseBetweenNewClusters() throws Exception {
Map<String, List<String>> mappingHashToIndices = randomMappingHashToIndices();
List<FieldCapabilitiesIndexResponse> indexResponses = CollectionUtils.concatLists(
randomIndexResponsesWithMappingHash(mappingHashToIndices),
randomIndexResponsesWithoutMappingHash()
);
Randomness.shuffle(indexResponses);
FieldCapabilitiesResponse inResponse = randomCCSResponse(indexResponses);
final TransportVersion version = TransportVersionUtils.randomVersionBetween(
random(),
TransportVersions.V_8_2_0,
TransportVersion.current()
);
final FieldCapabilitiesResponse outResponse = copyInstance(inResponse, version);
assertThat(
outResponse.getFailures().stream().flatMap(f -> Arrays.stream(f.getIndices())).toList(),
equalTo(inResponse.getFailures().stream().flatMap(f -> Arrays.stream(f.getIndices())).toList())
);
final List<FieldCapabilitiesIndexResponse> inList = inResponse.getIndexResponses();
final List<FieldCapabilitiesIndexResponse> outList = outResponse.getIndexResponses();
assertThat(outList, hasSize(inList.size()));
assertThat(
outList.stream().sorted(Comparator.comparing(FieldCapabilitiesIndexResponse::getIndexName)).toList(),
equalTo(inList.stream().sorted(Comparator.comparing(FieldCapabilitiesIndexResponse::getIndexName)).toList())
);
Map<String, List<FieldCapabilitiesIndexResponse>> groupedResponses = outList.stream()
.filter(r -> r.canMatch() && r.getIndexMappingHash() != null)
.collect(Collectors.groupingBy(FieldCapabilitiesIndexResponse::getIndexMappingHash));
assertThat(groupedResponses.keySet(), equalTo(mappingHashToIndices.keySet()));
// Asserts responses of indices with the same mapping hash must be shared.
for (Map.Entry<String, List<FieldCapabilitiesIndexResponse>> e : groupedResponses.entrySet()) {
List<String> indices = mappingHashToIndices.get(e.getKey());
List<FieldCapabilitiesIndexResponse> rs = e.getValue();
assertThat(rs.stream().map(FieldCapabilitiesIndexResponse::getIndexName).sorted().toList(), equalTo(indices));
for (FieldCapabilitiesIndexResponse r : rs) {
assertTrue(r.canMatch());
assertSame(r.get(), rs.get(0).get());
}
}
}
}
|
FieldCapabilitiesResponseTests
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/state/changelog/inmemory/StateChangelogStorageLoaderTest.java
|
{
"start": 5340,
"end": 6152
}
|
class ____ implements StateChangelogStorageFactory {
@Override
public String getIdentifier() {
// same identifier for overlapping test.
return InMemoryStateChangelogStorageFactory.IDENTIFIER;
}
@Override
public StateChangelogStorage<?> createStorage(
JobID jobID,
Configuration configuration,
TaskManagerJobMetricGroup metricGroup,
LocalRecoveryConfig localRecoveryConfig) {
return new TestStateChangelogStorage();
}
@Override
public StateChangelogStorageView<?> createStorageView(Configuration configuration)
throws IOException {
return new TestStateChangelogStorage();
}
}
}
|
TestStateChangelogStorageFactory
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java
|
{
"start": 1773,
"end": 7825
}
|
interface ____ {
ClusterUpdateSettingsRequest create();
}
private static final ObjectParser<ClusterUpdateSettingsRequest, Factory> PARSER = ObjectParser.fromBuilder(
"cluster_update_settings_request",
Factory::create
);
static {
PARSER.declareObject((r, p) -> r.persistentSettings = p, (p, c) -> Settings.fromXContent(p), PERSISTENT);
PARSER.declareObject((r, t) -> r.transientSettings = t, (p, c) -> Settings.fromXContent(p), TRANSIENT);
}
private Settings transientSettings = Settings.EMPTY;
private Settings persistentSettings = Settings.EMPTY;
public ClusterUpdateSettingsRequest(StreamInput in) throws IOException {
super(in);
transientSettings = readSettingsFromStream(in);
persistentSettings = readSettingsFromStream(in);
}
public ClusterUpdateSettingsRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) {
super(masterNodeTimeout, ackTimeout);
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (transientSettings.isEmpty() && persistentSettings.isEmpty()) {
validationException = addValidationError("no settings to update", validationException);
}
// for bwc we have to reject logger settings on the REST level instead of using a validator
for (String error : Loggers.checkRestrictedLoggers(transientSettings)) {
validationException = addValidationError(error, validationException);
}
for (String error : Loggers.checkRestrictedLoggers(persistentSettings)) {
validationException = addValidationError(error, validationException);
}
return validationException;
}
/**
* @deprecated Transient settings are in the process of being removed. Use
* persistent settings to update your cluster settings instead.
*/
@Deprecated
public Settings transientSettings() {
return transientSettings;
}
public Settings persistentSettings() {
return persistentSettings;
}
/**
* Sets the transient settings to be updated. They will not survive a full cluster restart
*
* @deprecated Transient settings are in the process of being removed. Use
* persistent settings to update your cluster settings instead.
*/
@Deprecated
public ClusterUpdateSettingsRequest transientSettings(Settings settings) {
this.transientSettings = settings;
return this;
}
/**
* Sets the transient settings to be updated. They will not survive a full cluster restart
*
* @deprecated Transient settings are in the process of being removed. Use
* persistent settings to update your cluster settings instead.
*/
@Deprecated
public ClusterUpdateSettingsRequest transientSettings(Settings.Builder settings) {
this.transientSettings = settings.build();
return this;
}
/**
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
*
* @deprecated Transient settings are in the process of being removed. Use
* persistent settings to update your cluster settings instead.
*/
@Deprecated
public ClusterUpdateSettingsRequest transientSettings(String source, XContentType xContentType) {
this.transientSettings = Settings.builder().loadFromSource(source, xContentType).build();
return this;
}
/**
* Sets the transient settings to be updated. They will not survive a full cluster restart
*
* @deprecated Transient settings are in the process of being removed. Use
* persistent settings to update your cluster settings instead.
*/
@Deprecated
public ClusterUpdateSettingsRequest transientSettings(Map<String, ?> source) {
this.transientSettings = Settings.builder().loadFromMap(source).build();
return this;
}
/**
* Sets the persistent settings to be updated. They will get applied cross restarts
*/
public ClusterUpdateSettingsRequest persistentSettings(Settings settings) {
this.persistentSettings = settings;
return this;
}
/**
* Sets the persistent settings to be updated. They will get applied cross restarts
*/
public ClusterUpdateSettingsRequest persistentSettings(Settings.Builder settings) {
this.persistentSettings = settings.build();
return this;
}
/**
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
*/
public ClusterUpdateSettingsRequest persistentSettings(String source, XContentType xContentType) {
this.persistentSettings = Settings.builder().loadFromSource(source, xContentType).build();
return this;
}
/**
* Sets the persistent settings to be updated. They will get applied cross restarts
*/
public ClusterUpdateSettingsRequest persistentSettings(Map<String, ?> source) {
this.persistentSettings = Settings.builder().loadFromMap(source).build();
return this;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
transientSettings.writeTo(out);
persistentSettings.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.startObject(PERSISTENT.getPreferredName());
persistentSettings.toXContent(builder, params);
builder.endObject();
builder.startObject(TRANSIENT.getPreferredName());
transientSettings.toXContent(builder, params);
builder.endObject();
builder.endObject();
return builder;
}
public static ClusterUpdateSettingsRequest fromXContent(Factory factory, XContentParser parser) {
return PARSER.apply(parser, factory);
}
}
|
Factory
|
java
|
spring-projects__spring-security
|
config/src/main/java/org/springframework/security/config/aot/hint/OAuth2LoginRuntimeHints.java
|
{
"start": 1013,
"end": 1328
}
|
class ____ implements RuntimeHintsRegistrar {
@Override
public void registerHints(RuntimeHints hints, ClassLoader classLoader) {
hints.reflection()
.registerTypeIfPresent(classLoader, "org.springframework.security.oauth2.jwt.JwtDecoder",
MemberCategory.INVOKE_PUBLIC_METHODS);
}
}
|
OAuth2LoginRuntimeHints
|
java
|
elastic__elasticsearch
|
qa/smoke-test-http/src/internalClusterTest/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java
|
{
"start": 2509,
"end": 2835
}
|
class ____ testing that cancellation works at the REST layer for requests that need to acquire a searcher on one or more shards.
*
* It works by blocking searcher acquisition in order to catch the request mid-execution, and then to check that all the tasks are cancelled
* before they complete normally.
*/
public abstract
|
for
|
java
|
apache__flink
|
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/writer/FileWriterBucketStateSerializer.java
|
{
"start": 2326,
"end": 10825
}
|
class ____
implements SimpleVersionedSerializer<FileWriterBucketState> {
private static final int MAGIC_NUMBER = 0x1e764b79;
private final SimpleVersionedSerializer<InProgressFileRecoverable>
inProgressFileRecoverableSerializer;
private final SimpleVersionedSerializer<PendingFileRecoverable>
pendingFileRecoverableSerializer;
public FileWriterBucketStateSerializer(
SimpleVersionedSerializer<InProgressFileRecoverable>
inProgressFileRecoverableSerializer,
SimpleVersionedSerializer<PendingFileRecoverable> pendingFileRecoverableSerializer) {
this.inProgressFileRecoverableSerializer =
checkNotNull(inProgressFileRecoverableSerializer);
this.pendingFileRecoverableSerializer = checkNotNull(pendingFileRecoverableSerializer);
}
@Override
public int getVersion() {
return 3;
}
@Override
public byte[] serialize(FileWriterBucketState state) throws IOException {
DataOutputSerializer out = new DataOutputSerializer(256);
out.writeInt(MAGIC_NUMBER);
serializeV3(state, out);
return out.getCopyOfBuffer();
}
@Override
public FileWriterBucketState deserialize(int version, byte[] serialized) throws IOException {
DataInputDeserializer in = new DataInputDeserializer(serialized);
switch (version) {
case 1:
validateMagicNumber(in);
return deserializeV1(in);
case 2:
validateMagicNumber(in);
return deserializeV2(in);
case 3:
validateMagicNumber(in);
return deserializeV3(in);
default:
throw new IOException("Unrecognized version or corrupt state: " + version);
}
}
private void serializeV3(FileWriterBucketState state, DataOutputView dataOutputView)
throws IOException {
SimpleVersionedSerialization.writeVersionAndSerialize(
SimpleVersionedStringSerializer.INSTANCE, state.getBucketId(), dataOutputView);
dataOutputView.writeUTF(state.getBucketPath().toString());
dataOutputView.writeLong(state.getInProgressFileCreationTime());
// put the current open part file
if (state.hasInProgressFileRecoverable()) {
InProgressFileRecoverable inProgressFileRecoverable =
state.getInProgressFileRecoverable();
dataOutputView.writeBoolean(true);
SimpleVersionedSerialization.writeVersionAndSerialize(
inProgressFileRecoverableSerializer, inProgressFileRecoverable, dataOutputView);
} else {
dataOutputView.writeBoolean(false);
}
}
private FileWriterBucketState deserializeV1(DataInputView in) throws IOException {
final SimpleVersionedSerializer<RecoverableWriter.CommitRecoverable> commitableSerializer =
getCommitableSerializer();
final SimpleVersionedSerializer<RecoverableWriter.ResumeRecoverable> resumableSerializer =
getResumableSerializer();
return internalDeserialize(
in,
dataInputView ->
new OutputStreamBasedPartFileWriter
.OutputStreamBasedInProgressFileRecoverable(
SimpleVersionedSerialization.readVersionAndDeSerialize(
resumableSerializer, dataInputView)),
(version, bytes) ->
new OutputStreamBasedPartFileWriter.OutputStreamBasedPendingFileRecoverable(
commitableSerializer.deserialize(version, bytes)));
}
private FileWriterBucketState deserializeV2(DataInputView in) throws IOException {
return internalDeserialize(
in,
dataInputView ->
SimpleVersionedSerialization.readVersionAndDeSerialize(
inProgressFileRecoverableSerializer, dataInputView),
pendingFileRecoverableSerializer::deserialize);
}
private FileWriterBucketState deserializeV3(DataInputView in) throws IOException {
return internalDeserialize(
in,
dataInputView ->
SimpleVersionedSerialization.readVersionAndDeSerialize(
inProgressFileRecoverableSerializer, dataInputView),
null);
}
private FileWriterBucketState internalDeserialize(
DataInputView dataInputView,
FunctionWithException<DataInputView, InProgressFileRecoverable, IOException>
inProgressFileParser,
@Nullable
BiFunctionWithException<Integer, byte[], PendingFileRecoverable, IOException>
pendingFileParser)
throws IOException {
String bucketId =
SimpleVersionedSerialization.readVersionAndDeSerialize(
SimpleVersionedStringSerializer.INSTANCE, dataInputView);
String bucketPathStr = dataInputView.readUTF();
long creationTime = dataInputView.readLong();
// then get the current resumable stream
InProgressFileRecoverable current = null;
if (dataInputView.readBoolean()) {
current = inProgressFileParser.apply(dataInputView);
}
HashMap<Long, List<InProgressFileWriter.PendingFileRecoverable>>
pendingFileRecoverablesPerCheckpoint = new HashMap<>();
if (pendingFileParser != null) {
final int pendingFileRecoverableSerializerVersion = dataInputView.readInt();
final int numCheckpoints = dataInputView.readInt();
for (int i = 0; i < numCheckpoints; i++) {
final long checkpointId = dataInputView.readLong();
final int numOfPendingFileRecoverables = dataInputView.readInt();
final List<InProgressFileWriter.PendingFileRecoverable> pendingFileRecoverables =
new ArrayList<>(numOfPendingFileRecoverables);
for (int j = 0; j < numOfPendingFileRecoverables; j++) {
final byte[] bytes = new byte[dataInputView.readInt()];
dataInputView.readFully(bytes);
pendingFileRecoverables.add(
pendingFileParser.apply(
pendingFileRecoverableSerializerVersion, bytes));
}
pendingFileRecoverablesPerCheckpoint.put(checkpointId, pendingFileRecoverables);
}
}
return new FileWriterBucketState(
bucketId,
new Path(bucketPathStr),
creationTime,
current,
pendingFileRecoverablesPerCheckpoint);
}
private void validateMagicNumber(DataInputView in) throws IOException {
int magicNumber = in.readInt();
if (magicNumber != MAGIC_NUMBER) {
throw new IOException(
String.format("Corrupt data: Unexpected magic number %08X", magicNumber));
}
}
private SimpleVersionedSerializer<RecoverableWriter.ResumeRecoverable>
getResumableSerializer() {
final OutputStreamBasedPartFileWriter.OutputStreamBasedInProgressFileRecoverableSerializer
outputStreamBasedInProgressFileRecoverableSerializer =
(OutputStreamBasedPartFileWriter
.OutputStreamBasedInProgressFileRecoverableSerializer)
inProgressFileRecoverableSerializer;
return outputStreamBasedInProgressFileRecoverableSerializer.getResumeSerializer();
}
private SimpleVersionedSerializer<RecoverableWriter.CommitRecoverable>
getCommitableSerializer() {
final OutputStreamBasedPartFileWriter.OutputStreamBasedPendingFileRecoverableSerializer
outputStreamBasedPendingFileRecoverableSerializer =
(OutputStreamBasedPartFileWriter
.OutputStreamBasedPendingFileRecoverableSerializer)
pendingFileRecoverableSerializer;
return outputStreamBasedPendingFileRecoverableSerializer.getCommitSerializer();
}
}
|
FileWriterBucketStateSerializer
|
java
|
quarkusio__quarkus
|
extensions/kafka-client/runtime-dev/src/main/java/io/quarkus/kafka/client/runtime/dev/ui/model/request/KafkaMessagesRequest.java
|
{
"start": 153,
"end": 1244
}
|
class ____ {
private String topicName;
private Order order;
private int pageSize;
private Integer pageNumber;
private Map<Integer, Long> partitionOffset;
public KafkaMessagesRequest() {
}
public KafkaMessagesRequest(String topicName, Order order, int pageSize, int pageNumber) {
this.topicName = topicName;
this.order = order;
this.pageSize = pageSize;
this.pageNumber = pageNumber;
}
public KafkaMessagesRequest(String topicName, Order order, int pageSize, Map<Integer, Long> partitionOffset) {
this.topicName = topicName;
this.order = order;
this.pageSize = pageSize;
this.partitionOffset = partitionOffset;
}
public String getTopicName() {
return topicName;
}
public Order getOrder() {
return order;
}
public int getPageSize() {
return pageSize;
}
public int getPageNumber() {
return pageNumber;
}
public Map<Integer, Long> getPartitionOffset() {
return partitionOffset;
}
}
|
KafkaMessagesRequest
|
java
|
dropwizard__dropwizard
|
dropwizard-logging/src/main/java/io/dropwizard/logging/common/AppenderFactory.java
|
{
"start": 1074,
"end": 2006
}
|
interface ____<E extends DeferredProcessingAware> extends Discoverable {
/**
* Given a Logback context, an application name, a layout,
* a levelFilterFactory, and an asyncAppenderFactory build a new appender.
*
* @param context the Logback context
* @param applicationName the application name
* @param layoutFactory the factory for the layout for logging
* @param levelFilterFactory the factory for the level filter
* @param asyncAppenderFactory the factory for the async appender
* @return a new, started {@link Appender}
*/
Appender<E> build(LoggerContext context,
String applicationName,
LayoutFactory<E> layoutFactory,
LevelFilterFactory<E> levelFilterFactory,
AsyncAppenderFactory<E> asyncAppenderFactory);
}
|
AppenderFactory
|
java
|
bumptech__glide
|
integration/sqljournaldiskcache/src/main/java/com/bumptech/glide/integration/sqljournaldiskcache/SizeJournal.java
|
{
"start": 4039,
"end": 4565
}
|
class ____ implements SQLiteTransactionListener, Poolable {
private long updatedSize;
void clear() {
updatedSize = 0;
}
@Override
public void onBegin() {}
@Override
public void onCommit() {}
@Override
public void onRollback() {
// Revert the increment of size on transaction failure.
size.addAndGet(-updatedSize);
}
@NonNull
@Override
public StateVerifier getVerifier() {
return StateVerifier.newInstance();
}
}
}
|
SizeSQLiteTransactionListener
|
java
|
apache__camel
|
components/camel-platform-http-vertx/src/test/java/org/apache/camel/component/platform/http/vertx/VertxPlatformHttpJacksonTest.java
|
{
"start": 1141,
"end": 2111
}
|
class ____ {
@Test
public void testJackson() throws Exception {
final CamelContext context = VertxPlatformHttpEngineTest.createCamelContext();
try {
// turn on jackson type converter
context.getGlobalOptions().put(JacksonConstants.ENABLE_TYPE_CONVERTER, "true");
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("platform-http:/hello")
.setBody().constant("{\"hello\": \"world\"}")
.unmarshal().json();
}
});
context.start();
given()
.when()
.get("/hello")
.then()
.statusCode(200)
.body(equalTo("{\"hello\":\"world\"}"));
} finally {
context.stop();
}
}
}
|
VertxPlatformHttpJacksonTest
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/filter/ReadOrWriteOnlyTest.java
|
{
"start": 586,
"end": 987
}
|
class ____ {
@JsonProperty(access=JsonProperty.Access.READ_ONLY)
public int x = 1;
@JsonProperty(access=JsonProperty.Access.WRITE_ONLY)
public int y = 2;
public void setX(int x) {
throw new Error("Should NOT set x");
}
public int getY() {
throw new Error("Should NOT get y");
}
}
public static
|
ReadXWriteY
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/service/StandardHibernateORMInitiatorListProvider.java
|
{
"start": 2545,
"end": 5856
}
|
class ____ implements InitialInitiatorListProvider {
@Override
public List<StandardServiceInitiator<?>> initialInitiatorList() {
// Note to maintainers: always remember to check for consistency needs with both:
// io.quarkus.hibernate.orm.runtime.boot.registry.PreconfiguredServiceRegistryBuilder#buildQuarkusServiceInitiatorList(RecordedState)
// and ReactiveHibernateInitiatorListProvider
final ArrayList<StandardServiceInitiator<?>> serviceInitiators = new ArrayList<>();
//This one needs to be replaced after Metadata has been recorded:
serviceInitiators.add(BootstrapOnlyProxyFactoryFactoryInitiator.INSTANCE);
serviceInitiators.add(CfgXmlAccessServiceInitiator.INSTANCE);
serviceInitiators.add(ConfigurationServiceInitiator.INSTANCE);
serviceInitiators.add(PropertyAccessStrategyResolverInitiator.INSTANCE);
// Custom one!
serviceInitiators.add(QuarkusImportSqlCommandExtractorInitiator.INSTANCE);
serviceInitiators.add(SchemaManagementToolInitiator.INSTANCE);
serviceInitiators.add(JdbcEnvironmentInitiator.INSTANCE);
// Custom one!
serviceInitiators.add(QuarkusJndiServiceInitiator.INSTANCE);
serviceInitiators.add(PersisterClassResolverInitiator.INSTANCE);
serviceInitiators.add(PersisterFactoryInitiator.INSTANCE);
// Custom one!
serviceInitiators.add(QuarkusStaticInitConnectionProviderInitiator.INSTANCE);
serviceInitiators.add(MultiTenantConnectionProviderInitiator.INSTANCE);
serviceInitiators.add(DialectResolverInitiator.INSTANCE);
// Custom one!
serviceInitiators.add(QuarkusStaticInitDialectFactoryInitiator.INSTANCE);
serviceInitiators.add(BatchBuilderInitiator.INSTANCE);
serviceInitiators.add(JdbcServicesInitiator.INSTANCE);
serviceInitiators.add(RefCursorSupportInitiator.INSTANCE);
serviceInitiators.add(QuarkusJtaPlatformInitiator.INSTANCE);
serviceInitiators.add(SessionFactoryServiceRegistryFactoryInitiator.INSTANCE);
serviceInitiators.add(QuarkusRegionFactoryInitiator.INSTANCE);
serviceInitiators.add(TransactionCoordinatorBuilderInitiator.INSTANCE);
// Replaces ManagedBeanRegistryInitiator.INSTANCE
serviceInitiators.add(QuarkusManagedBeanRegistryInitiator.INSTANCE);
serviceInitiators.add(EntityCopyObserverFactoryInitiator.INSTANCE);
// Default implementation
serviceInitiators.add(JdbcValuesMappingProducerProviderInitiator.INSTANCE);
// Default implementation
serviceInitiators.add(SqmMultiTableMutationStrategyProviderInitiator.INSTANCE);
// Default implementation
serviceInitiators.add(ParameterMarkerStrategyInitiator.INSTANCE);
// Default implementation
serviceInitiators.add(BatchLoaderFactoryInitiator.INSTANCE);
// Default implementation
serviceInitiators.add(SqlStatementLoggerInitiator.INSTANCE);
// Custom Quarkus implementation: overrides the internal cache to leverage Caffeine
serviceInitiators.add(QuarkusInternalCacheFactoryInitiator.INSTANCE);
serviceInitiators.trimToSize();
return serviceInitiators;
}
}
|
StandardHibernateORMInitiatorListProvider
|
java
|
netty__netty
|
transport/src/main/java/io/netty/channel/group/ChannelMatchers.java
|
{
"start": 4859,
"end": 5181
}
|
class ____ implements ChannelMatcher {
private final Class<? extends Channel> clazz;
ClassMatcher(Class<? extends Channel> clazz) {
this.clazz = clazz;
}
@Override
public boolean matches(Channel ch) {
return clazz.isInstance(ch);
}
}
}
|
ClassMatcher
|
java
|
reactor__reactor-core
|
reactor-test/src/main/java/reactor/test/DefaultStepVerifierBuilder.java
|
{
"start": 55363,
"end": 56422
}
|
class ____<T> extends DefaultVerifySubscriber<T>
implements Fuseable.ConditionalSubscriber<T> {
final Predicate<? super T> tryOnNextPredicate;
/**
* @param tryOnNextPredicate the {@link Predicate} that drives {@link #tryOnNext(Object)} behavior
*/
DefaultConditionalVerifySubscriber(List<Event<T>> script,
MessageFormatter messageFormatter,
long initialRequest,
int requestedFusionMode,
int expectedFusionMode,
boolean debugEnabled,
@Nullable Context initialContext,
@Nullable VirtualTimeScheduler vts,
@Nullable Disposable postVerifyCleanup,
Predicate<? super T> tryOnNextPredicate) {
super(script, messageFormatter, initialRequest, requestedFusionMode, expectedFusionMode, debugEnabled, initialContext, vts, postVerifyCleanup);
this.tryOnNextPredicate = tryOnNextPredicate;
}
@Override
public boolean tryOnNext(T t) {
boolean consumed = tryOnNextPredicate.test(t);
if (consumed) {
onNext(t); //record the value
}
return consumed;
}
}
static
|
DefaultConditionalVerifySubscriber
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/analysis/AbstractCharFilterFactory.java
|
{
"start": 528,
"end": 784
}
|
class ____ implements CharFilterFactory {
private final String name;
public AbstractCharFilterFactory(String name) {
this.name = name;
}
@Override
public String name() {
return this.name;
}
}
|
AbstractCharFilterFactory
|
java
|
apache__flink
|
flink-table/flink-sql-parser/src/main/java/org/apache/flink/sql/parser/dql/SqlShowDatabases.java
|
{
"start": 1417,
"end": 2599
}
|
class ____ extends SqlShowCall {
public static final SqlSpecialOperator OPERATOR =
new SqlSpecialOperator("SHOW DATABASES", SqlKind.OTHER);
public SqlShowDatabases(
SqlParserPos pos,
String preposition,
SqlIdentifier catalogName,
String likeType,
SqlCharStringLiteral likeLiteral,
boolean notLike)
throws ParseException {
super(pos, preposition, catalogName, likeType, likeLiteral, notLike);
if (catalogName != null && catalogName.names.size() > 1) {
throw new ParseException(
String.format(
"Show databases from/in identifier [ %s ] format error, catalog must be a single part identifier.",
String.join(".", catalogName.names)));
}
}
public String getCatalogName() {
return getSqlIdentifierNameList().isEmpty() ? null : getSqlIdentifierNameList().get(0);
}
@Override
public SqlOperator getOperator() {
return OPERATOR;
}
@Override
String getOperationName() {
return "SHOW DATABASES";
}
}
|
SqlShowDatabases
|
java
|
junit-team__junit5
|
junit-platform-engine/src/main/java/org/junit/platform/engine/support/hierarchical/DefaultParallelExecutionConfigurationStrategy.java
|
{
"start": 8388,
"end": 9069
}
|
class ____ of the
* {@link ParallelExecutionConfigurationStrategy} to be used by the
* {@link #CUSTOM} configuration strategy.
*
* @see #CUSTOM
*/
public static final String CONFIG_CUSTOM_CLASS_PROPERTY_NAME = "custom.class";
static ParallelExecutionConfiguration toConfiguration(ConfigurationParameters configurationParameters) {
return getStrategy(configurationParameters).createConfiguration(configurationParameters);
}
static ParallelExecutionConfigurationStrategy getStrategy(ConfigurationParameters configurationParameters) {
return valueOf(
configurationParameters.get(CONFIG_STRATEGY_PROPERTY_NAME).orElse("dynamic").toUpperCase(Locale.ROOT));
}
}
|
name
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/internal/LogManagerStatus.java
|
{
"start": 914,
"end": 1182
}
|
class ____ {
private static boolean initialized = false;
public static void setInitialized(final boolean managerStatus) {
initialized = managerStatus;
}
public static boolean isInitialized() {
return initialized;
}
}
|
LogManagerStatus
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.