language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | tooling/maven/camel-package-maven-plugin/src/main/java/org/apache/camel/maven/packaging/GeneratePojoBeanMojo.java | {
"start": 3106,
"end": 4731
} | class ____ {
private String name;
private String title;
private String className;
private String interfaceName;
private String description;
private boolean deprecated;
private final List<BeanPojoOptionModel> options = new ArrayList<>();
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getClassName() {
return className;
}
public void setClassName(String className) {
this.className = className;
}
public String getInterfaceName() {
return interfaceName;
}
public void setInterfaceName(String interfaceName) {
this.interfaceName = interfaceName;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public boolean isDeprecated() {
return deprecated;
}
public void setDeprecated(boolean deprecated) {
this.deprecated = deprecated;
}
public void addOption(BeanPojoOptionModel option) {
this.options.add(option);
}
public List<BeanPojoOptionModel> getOptions() {
return options;
}
}
private static | BeanPojoModel |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/exceptions/base/MockitoException.java | {
"start": 1023,
"end": 1768
} | class ____ extends RuntimeException {
private static final long serialVersionUID = 1L;
private StackTraceElement[] unfilteredStackTrace;
// TODO lazy filtered stacktrace initialization
public MockitoException(String message, Throwable t) {
super(message, t);
filterStackTrace();
}
public MockitoException(String message) {
super(message);
filterStackTrace();
}
private void filterStackTrace() {
unfilteredStackTrace = getStackTrace();
ConditionalStackTraceFilter filter = new ConditionalStackTraceFilter();
filter.filter(this);
}
public StackTraceElement[] getUnfilteredStackTrace() {
return unfilteredStackTrace;
}
}
| MockitoException |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/strategy/TestingSchedulingTopology.java | {
"start": 15567,
"end": 17374
} | class ____ {
protected final List<TestingSchedulingExecutionVertex> producers;
protected final List<TestingSchedulingExecutionVertex> consumers;
protected ResultPartitionType resultPartitionType = ResultPartitionType.BLOCKING;
protected ResultPartitionState resultPartitionState =
ResultPartitionState.ALL_DATA_PRODUCED;
protected ProducerConsumerConnectionBuilder(
final List<TestingSchedulingExecutionVertex> producers,
final List<TestingSchedulingExecutionVertex> consumers) {
this.producers = producers;
this.consumers = consumers;
}
public ProducerConsumerConnectionBuilder withResultPartitionType(
final ResultPartitionType resultPartitionType) {
this.resultPartitionType = resultPartitionType;
return this;
}
public ProducerConsumerConnectionBuilder withResultPartitionState(
final ResultPartitionState state) {
this.resultPartitionState = state;
return this;
}
public List<TestingSchedulingResultPartition> finish() {
final List<TestingSchedulingResultPartition> resultPartitions = connect();
producers.stream()
.forEach(TestingSchedulingTopology.this::updateVertexResultPartitions);
consumers.stream()
.forEach(TestingSchedulingTopology.this::updateVertexResultPartitions);
return resultPartitions;
}
protected abstract List<TestingSchedulingResultPartition> connect();
}
/**
* Builder for {@link TestingSchedulingResultPartition} of {@link
* DistributionPattern#POINTWISE}.
*/
private | ProducerConsumerConnectionBuilder |
java | apache__dubbo | dubbo-serialization/dubbo-serialization-api/src/main/java/org/apache/dubbo/common/serialize/DefaultMultipleSerialization.java | {
"start": 976,
"end": 2229
} | class ____ implements MultipleSerialization {
@Override
public void serialize(URL url, String serializeType, Class<?> clz, Object obj, OutputStream os) throws IOException {
serializeType = convertHessian(serializeType);
final Serialization serialization = url.getOrDefaultFrameworkModel()
.getExtensionLoader(Serialization.class)
.getExtension(serializeType);
final ObjectOutput serialize = serialization.serialize(null, os);
serialize.writeObject(obj);
serialize.flushBuffer();
}
@Override
public Object deserialize(URL url, String serializeType, Class<?> clz, InputStream os)
throws IOException, ClassNotFoundException {
serializeType = convertHessian(serializeType);
final Serialization serialization = url.getOrDefaultFrameworkModel()
.getExtensionLoader(Serialization.class)
.getExtension(serializeType);
final ObjectInput in = serialization.deserialize(null, os);
return in.readObject(clz);
}
private String convertHessian(String ser) {
if (ser.equals("hessian4")) {
return "hessian2";
}
return ser;
}
}
| DefaultMultipleSerialization |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java | {
"start": 1731,
"end": 5319
} | class ____ extends AcknowledgedTransportMasterNodeAction<PutComponentTemplateAction.Request> {
private final MetadataIndexTemplateService indexTemplateService;
private final ProjectResolver projectResolver;
@Inject
public TransportPutComponentTemplateAction(
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
MetadataIndexTemplateService indexTemplateService,
ActionFilters actionFilters,
ProjectResolver projectResolver
) {
super(
PutComponentTemplateAction.NAME,
transportService,
clusterService,
threadPool,
actionFilters,
PutComponentTemplateAction.Request::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.indexTemplateService = indexTemplateService;
this.projectResolver = projectResolver;
}
@Override
protected ClusterBlockException checkBlock(PutComponentTemplateAction.Request request, ClusterState state) {
return state.blocks().globalBlockedException(projectResolver.getProjectId(), ClusterBlockLevel.METADATA_WRITE);
}
@Override
protected void masterOperation(
Task task,
final PutComponentTemplateAction.Request request,
final ClusterState state,
final ActionListener<AcknowledgedResponse> listener
) throws Exception {
final var project = projectResolver.getProjectMetadata(state);
final ComponentTemplate componentTemplate = indexTemplateService.normalizeComponentTemplate(request.componentTemplate());
final ComponentTemplate existingTemplate = project.componentTemplates().get(request.name());
if (existingTemplate != null) {
if (request.create()) {
listener.onFailure(new IllegalArgumentException("component template [" + request.name() + "] already exists"));
return;
}
// We have an early return here in case the component template already exists and is identical in content. We still need to do
// this check in the cluster state update task in case the cluster state changed since this check.
if (componentTemplate.contentEquals(existingTemplate)) {
listener.onResponse(AcknowledgedResponse.TRUE);
return;
}
}
indexTemplateService.putComponentTemplate(
request.cause(),
request.create(),
request.name(),
request.masterNodeTimeout(),
componentTemplate,
project.id(),
listener
);
}
@Override
public Optional<String> reservedStateHandlerName() {
return Optional.of(ReservedComposableIndexTemplateAction.NAME);
}
@Override
public Set<String> modifiedKeys(PutComponentTemplateAction.Request request) {
return Set.of(ReservedComposableIndexTemplateAction.reservedComponentName(request.name()));
}
@Override
@FixForMultiProject // does this need to be a more general concept?
protected void validateForReservedState(PutComponentTemplateAction.Request request, ClusterState state) {
super.validateForReservedState(request, state);
validateForReservedState(
ProjectStateRegistry.get(state).reservedStateMetadata(projectResolver.getProjectId()).values(),
reservedStateHandlerName().get(),
modifiedKeys(request),
request::toString
);
}
}
| TransportPutComponentTemplateAction |
java | elastic__elasticsearch | x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformConfigAutoMigration.java | {
"start": 1673,
"end": 1789
} | class ____ more designed for serverless rolling updates to apply a smaller non-breaking subset of changes.
*/
public | is |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialEvaluatorFactory.java | {
"start": 8164,
"end": 10703
} | class ____ extends SpatialEvaluatorFactory<
EvalOperator.ExpressionEvaluator.Factory,
Component2D[]> {
SpatialEvaluatorWithConstantArrayFactory(
TriFunction<
Source,
EvalOperator.ExpressionEvaluator.Factory,
Component2D[],
EvalOperator.ExpressionEvaluator.Factory> factoryCreator
) {
super(factoryCreator);
}
@Override
public EvalOperator.ExpressionEvaluator.Factory get(SpatialSourceSupplier s, EvaluatorMapper.ToEvaluator toEvaluator) {
return factoryCreator.apply(
s.source(),
toEvaluator.apply(s.left()),
asLuceneComponent2Ds(toEvaluator.foldCtx(), s.crsType(), s.right())
);
}
}
protected record SpatialEvaluatorFieldKey(DataType dataType, boolean isConstant) {}
record SpatialEvaluatorKey(
BinarySpatialFunction.SpatialCrsType crsType,
boolean leftDocValues,
boolean rightDocValues,
SpatialEvaluatorFieldKey left,
SpatialEvaluatorFieldKey right
) {
SpatialEvaluatorKey(BinarySpatialFunction.SpatialCrsType crsType, SpatialEvaluatorFieldKey left, SpatialEvaluatorFieldKey right) {
this(crsType, false, false, left, right);
}
SpatialEvaluatorKey withLeftDocValues() {
return new SpatialEvaluatorKey(crsType, true, false, left, right);
}
SpatialEvaluatorKey swapSides() {
return new SpatialEvaluatorKey(crsType, rightDocValues, leftDocValues, right, left);
}
static SpatialEvaluatorKey fromSourceAndConstant(DataType left, DataType right) {
return new SpatialEvaluatorKey(
BinarySpatialFunction.SpatialCrsType.fromDataType(left),
new SpatialEvaluatorFieldKey(left, false),
new SpatialEvaluatorFieldKey(right, true)
);
}
static SpatialEvaluatorKey fromSources(DataType left, DataType right) {
return new SpatialEvaluatorKey(
BinarySpatialFunction.SpatialCrsType.fromDataType(left),
new SpatialEvaluatorFieldKey(left, false),
new SpatialEvaluatorFieldKey(right, false)
);
}
UnsupportedOperationException unsupported() {
return new UnsupportedOperationException("Unsupported spatial relation combination: " + this);
}
}
}
| SpatialEvaluatorWithConstantArrayFactory |
java | resilience4j__resilience4j | resilience4j-spring/src/main/java/io/github/resilience4j/bulkhead/configure/BulkheadConfiguration.java | {
"start": 2628,
"end": 9820
} | class ____ {
@Bean
@Qualifier("compositeBulkheadCustomizer")
public CompositeCustomizer<BulkheadConfigCustomizer> compositeBulkheadCustomizer(
@Autowired(required = false) List<BulkheadConfigCustomizer> customizers) {
return new CompositeCustomizer<>(customizers);
}
/**
* @param bulkheadConfigurationProperties bulk head spring configuration properties
* @param bulkheadEventConsumerRegistry the bulk head event consumer registry
* @return the BulkheadRegistry with all needed setup in place
*/
@Bean
public BulkheadRegistry bulkheadRegistry(
BulkheadConfigurationProperties bulkheadConfigurationProperties,
EventConsumerRegistry<BulkheadEvent> bulkheadEventConsumerRegistry,
RegistryEventConsumer<Bulkhead> bulkheadRegistryEventConsumer,
@Qualifier("compositeBulkheadCustomizer") CompositeCustomizer<BulkheadConfigCustomizer> compositeBulkheadCustomizer) {
BulkheadRegistry bulkheadRegistry = createBulkheadRegistry(bulkheadConfigurationProperties,
bulkheadRegistryEventConsumer, compositeBulkheadCustomizer);
registerEventConsumer(bulkheadRegistry, bulkheadEventConsumerRegistry,
bulkheadConfigurationProperties);
initBulkheadRegistry(bulkheadConfigurationProperties, compositeBulkheadCustomizer, bulkheadRegistry);
return bulkheadRegistry;
}
/**
* Initializes the Bulkhead registry with resilience4j instances.
*
* @param bulkheadRegistry The bulkhead registry.
* @param compositeBulkheadCustomizer customizers for instances and configs
*/
private void initBulkheadRegistry(BulkheadConfigurationProperties bulkheadConfigurationProperties,
CompositeCustomizer<BulkheadConfigCustomizer> compositeBulkheadCustomizer, BulkheadRegistry bulkheadRegistry) {
bulkheadConfigurationProperties.getInstances().forEach((name, properties) ->
bulkheadRegistry.bulkhead(name,
bulkheadConfigurationProperties.createBulkheadConfig(properties, compositeBulkheadCustomizer, name)));
compositeBulkheadCustomizer.instanceNames()
.stream()
.filter(name -> bulkheadRegistry.getConfiguration(name).isEmpty())
.forEach(name -> bulkheadRegistry.bulkhead(name,
bulkheadConfigurationProperties.createBulkheadConfig(null, compositeBulkheadCustomizer, name)));
}
@Bean
@Primary
public RegistryEventConsumer<Bulkhead> bulkheadRegistryEventConsumer(
Optional<List<RegistryEventConsumer<Bulkhead>>> optionalRegistryEventConsumers) {
return new CompositeRegistryEventConsumer<>(
optionalRegistryEventConsumers.orElseGet(ArrayList::new));
}
/**
* Initializes a bulkhead registry.
*
* @param bulkheadConfigurationProperties The bulkhead configuration properties.
* @param compositeBulkheadCustomizer
* @return a BulkheadRegistry
*/
private BulkheadRegistry createBulkheadRegistry(
BulkheadConfigurationProperties bulkheadConfigurationProperties,
RegistryEventConsumer<Bulkhead> bulkheadRegistryEventConsumer,
CompositeCustomizer<BulkheadConfigCustomizer> compositeBulkheadCustomizer) {
Map<String, BulkheadConfig> configs = bulkheadConfigurationProperties.getConfigs()
.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey,
entry -> bulkheadConfigurationProperties.createBulkheadConfig(entry.getValue(),
compositeBulkheadCustomizer, entry.getKey())));
return BulkheadRegistry.of(configs, bulkheadRegistryEventConsumer, Map.copyOf(bulkheadConfigurationProperties.getTags()));
}
/**
* Registers the post creation consumer function that registers the consumer events to the
* bulkheads.
*
* @param bulkheadRegistry The BulkHead registry.
* @param eventConsumerRegistry The event consumer registry.
*/
private void registerEventConsumer(BulkheadRegistry bulkheadRegistry,
EventConsumerRegistry<BulkheadEvent> eventConsumerRegistry,
BulkheadConfigurationProperties properties) {
bulkheadRegistry.getEventPublisher()
.onEntryAdded(event -> registerEventConsumer(eventConsumerRegistry, event.getAddedEntry(), properties))
.onEntryReplaced(event -> registerEventConsumer(eventConsumerRegistry, event.getNewEntry(), properties))
.onEntryRemoved(event -> unregisterEventConsumer(eventConsumerRegistry, event.getRemovedEntry()));
}
private void unregisterEventConsumer(EventConsumerRegistry<BulkheadEvent> eventConsumerRegistry, Bulkhead bulkHead) {
eventConsumerRegistry.removeEventConsumer(bulkHead.getName());
}
private void registerEventConsumer(EventConsumerRegistry<BulkheadEvent> eventConsumerRegistry,
Bulkhead bulkHead, BulkheadConfigurationProperties bulkheadConfigurationProperties) {
int eventConsumerBufferSize = Optional
.ofNullable(bulkheadConfigurationProperties.getBackendProperties(bulkHead.getName()))
.map(InstanceProperties::getEventConsumerBufferSize)
.orElse(100);
bulkHead.getEventPublisher().onEvent(
eventConsumerRegistry.createEventConsumer(bulkHead.getName(), eventConsumerBufferSize));
}
@Bean
@Conditional(value = {AspectJOnClasspathCondition.class})
public BulkheadAspect bulkheadAspect(
BulkheadConfigurationProperties bulkheadConfigurationProperties,
ThreadPoolBulkheadRegistry threadPoolBulkheadRegistry,
BulkheadRegistry bulkheadRegistry,
@Autowired(required = false) List<BulkheadAspectExt> bulkHeadAspectExtList,
FallbackExecutor fallbackExecutor,
SpelResolver spelResolver
) {
return new BulkheadAspect(bulkheadConfigurationProperties, threadPoolBulkheadRegistry,
bulkheadRegistry, bulkHeadAspectExtList, fallbackExecutor, spelResolver);
}
@Bean
@Conditional(value = {RxJava2OnClasspathCondition.class, AspectJOnClasspathCondition.class})
public RxJava2BulkheadAspectExt rxJava2BulkHeadAspectExt() {
return new RxJava2BulkheadAspectExt();
}
@Bean
@Conditional(value = {RxJava3OnClasspathCondition.class, AspectJOnClasspathCondition.class})
public RxJava3BulkheadAspectExt rxJava3BulkHeadAspectExt() {
return new RxJava3BulkheadAspectExt();
}
@Bean
@Conditional(value = {ReactorOnClasspathCondition.class, AspectJOnClasspathCondition.class})
public ReactorBulkheadAspectExt reactorBulkHeadAspectExt() {
return new ReactorBulkheadAspectExt();
}
/**
* The EventConsumerRegistry is used to manage EventConsumer instances. The
* EventConsumerRegistry is used by the BulkheadHealthIndicator to show the latest Bulkhead
* events for each Bulkhead instance.
*
* @return a default EventConsumerRegistry {@link DefaultEventConsumerRegistry}
*/
@Bean
public EventConsumerRegistry<BulkheadEvent> bulkheadEventConsumerRegistry() {
return new DefaultEventConsumerRegistry<>();
}
}
| BulkheadConfiguration |
java | google__dagger | javatests/dagger/hilt/android/testsubpackage/UsesSharedComponent1Test.java | {
"start": 2031,
"end": 2186
} | class ____ matches the simple name of {@link
* dagger.hilt.android.UsesSharedComponent1Test}. This is intentional and used to verify generated
* code | exactly |
java | apache__camel | components/camel-soap/src/main/java/org/apache/camel/dataformat/soap/SoapConstants.java | {
"start": 859,
"end": 1046
} | class ____ {
public static String SOAP_METHOD_NAME = "CamelSoapMethodName";
public static String SOAP_ACTION = "CamelSoapAction";
private SoapConstants() {
}
}
| SoapConstants |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/writer/TimelineEntitySetWriter.java | {
"start": 1653,
"end": 2738
} | class ____ implements MessageBodyWriter<Set<TimelineEntity>> {
private ObjectMapper objectMapper = new ObjectMapper();
private String timelineEntityType =
"java.util.Set<org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity>";
@Override
public boolean isWriteable(Class<?> type, Type genericType,
Annotation[] annotations, MediaType mediaType) {
return timelineEntityType.equals(genericType.getTypeName());
}
@Override
public void writeTo(Set<TimelineEntity> timelinePutResponse, Class<?> type,
Type genericType, Annotation[] annotations, MediaType mediaType,
MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream)
throws IOException, WebApplicationException {
String entity = objectMapper.writeValueAsString(timelinePutResponse);
entityStream.write(entity.getBytes(StandardCharsets.UTF_8));
}
@Override
public long getSize(Set<TimelineEntity> timelineEntities, Class<?> type, Type genericType,
Annotation[] annotations, MediaType mediaType) {
return -1L;
}
}
| TimelineEntitySetWriter |
java | elastic__elasticsearch | plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriPartOfSpeechStopFilterFactory.java | {
"start": 1011,
"end": 1882
} | class ____ extends AbstractTokenFilterFactory {
private final Set<POS.Tag> stopTags;
public NoriPartOfSpeechStopFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(name);
List<String> tagList = Analysis.getWordList(env, settings, "stoptags");
this.stopTags = tagList != null ? resolvePOSList(tagList) : KoreanPartOfSpeechStopFilter.DEFAULT_STOP_TAGS;
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new KoreanPartOfSpeechStopFilter(tokenStream, stopTags);
}
static Set<POS.Tag> resolvePOSList(List<String> tagList) {
Set<POS.Tag> stopTags = EnumSet.noneOf(POS.Tag.class);
for (String tag : tagList) {
stopTags.add(POS.resolveTag(tag));
}
return stopTags;
}
}
| NoriPartOfSpeechStopFilterFactory |
java | apache__camel | components/camel-spring-parent/camel-spring-ws/src/generated/java/org/apache/camel/component/spring/ws/SpringWebserviceComponentConfigurer.java | {
"start": 736,
"end": 3158
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
SpringWebserviceComponent target = (SpringWebserviceComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": target.setUseGlobalSslContextParameters(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
SpringWebserviceComponent target = (SpringWebserviceComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "useglobalsslcontextparameters":
case "useGlobalSslContextParameters": return target.isUseGlobalSslContextParameters();
default: return null;
}
}
}
| SpringWebserviceComponentConfigurer |
java | micronaut-projects__micronaut-core | http-client-core/src/main/java/io/micronaut/http/client/bind/binders/AttributeClientRequestBinder.java | {
"start": 1192,
"end": 2245
} | class ____ implements AnnotatedClientRequestBinder<RequestAttribute> {
@Override
public void bind(
@NonNull MethodInvocationContext<Object, Object> context,
@NonNull ClientRequestUriContext uriContext,
@NonNull MutableHttpRequest<?> request
) {
List<AnnotationValue<RequestAttribute>> attributeAnnotations =
context.getAnnotationValuesByType(RequestAttribute.class);
for (AnnotationValue<RequestAttribute> attributeAnnotation : attributeAnnotations) {
String attributeName = attributeAnnotation.stringValue("name").orElse(null);
Object attributeValue = attributeAnnotation.getValue(Object.class).orElse(null);
if (StringUtils.isNotEmpty(attributeName) && attributeValue != null) {
request.setAttribute(attributeName, attributeValue);
}
}
}
@Override
@NonNull
public Class<RequestAttribute> getAnnotationType() {
return RequestAttribute.class;
}
}
| AttributeClientRequestBinder |
java | google__auto | value/src/test/java/com/google/auto/value/processor/TemplateVarsTest.java | {
"start": 2420,
"end": 3049
} | class ____ extends HappyVars {
Character character;
@Override
Template parsedTemplate() {
return parsedTemplateForString(
"integer=$integer string=$string list=$list character=$character");
}
}
@Test
public void testSubSub() {
SubHappyVars vars = new SubHappyVars();
vars.integer = 23;
vars.string = "wibble";
vars.list = ImmutableList.of(5, 17, 23);
vars.character = 'ß';
String expectedText = "integer=23 string=wibble list=[5, 17, 23] character=ß";
String actualText = vars.toText();
assertThat(actualText).isEqualTo(expectedText);
}
static | SubHappyVars |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/language/Name.java | {
"start": 704,
"end": 1172
} | class ____ {
private int id;
private String firstName;
private String lastName;
public String getFirstName() {
return firstName;
}
public void setFirstName(String firstName) {
this.firstName = firstName;
}
public String getLastName() {
return lastName;
}
public void setLastName(String lastName) {
this.lastName = lastName;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
}
| Name |
java | apache__logging-log4j2 | log4j-api-test/src/main/java/org/apache/logging/log4j/test/junit/ThreadContextExtension.java | {
"start": 1129,
"end": 2549
} | class ____ implements BeforeEachCallback, AfterEachCallback {
@Override
public void beforeEach(final ExtensionContext context) throws Exception {
final Class<?> testClass = context.getRequiredTestClass();
final ThreadContextHolder holder;
if (testClass.isAnnotationPresent(UsingAnyThreadContext.class)) {
holder = new ThreadContextHolder(true, true);
ThreadContext.clearAll();
} else if (testClass.isAnnotationPresent(UsingThreadContextMap.class)) {
holder = new ThreadContextHolder(true, false);
ThreadContext.clearMap();
} else if (testClass.isAnnotationPresent(UsingThreadContextStack.class)) {
holder = new ThreadContextHolder(false, true);
ThreadContext.clearStack();
} else {
return;
}
getStore(context).put(ThreadContextHolder.class, holder);
}
@Override
public void afterEach(final ExtensionContext context) throws Exception {
final ThreadContextHolder holder = getStore(context).get(ThreadContextHolder.class, ThreadContextHolder.class);
if (holder != null) {
holder.restore();
}
}
private ExtensionContext.Store getStore(final ExtensionContext context) {
return context.getStore(ExtensionContext.Namespace.create(getClass(), context.getRequiredTestInstance()));
}
}
| ThreadContextExtension |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/node/NodeClosedException.java | {
"start": 758,
"end": 1160
} | class ____ extends ElasticsearchException {
public NodeClosedException(DiscoveryNode node) {
super("node closed " + node);
}
public NodeClosedException(StreamInput in) throws IOException {
super(in);
}
@Override
public Throwable fillInStackTrace() {
return this; // this exception doesn't imply a bug, no need for a stack trace
}
}
| NodeClosedException |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/DialectChecks.java | {
"start": 2975,
"end": 3205
} | class ____ implements DialectCheck {
public boolean isMatch(Dialect dialect) {
return dialect.getLimitHandler().supportsLimit() && dialect.getLimitHandler().supportsLimitOffset();
}
}
public static | SupportLimitAndOffsetCheck |
java | spring-projects__spring-boot | module/spring-boot-couchbase/src/dockerTest/java/org/springframework/boot/couchbase/autoconfigure/CouchbaseAutoConfigurationIntegrationTests.java | {
"start": 1868,
"end": 3739
} | class ____ {
private static final String BUCKET_NAME = "cbbucket";
@Container
static final CouchbaseContainer couchbase = TestImage.container(CouchbaseContainer.class)
.withEnabledServices(CouchbaseService.KV)
.withCredentials("spring", "password")
.withBucket(new BucketDefinition(BUCKET_NAME).withPrimaryIndex(false));
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(CouchbaseAutoConfiguration.class))
.withPropertyValues("spring.couchbase.connection-string: " + couchbase.getConnectionString(),
"spring.couchbase.username:spring", "spring.couchbase.password:password",
"spring.couchbase.bucket.name:" + BUCKET_NAME, "spring.couchbase.env.timeouts.connect=2m",
"spring.couchbase.env.timeouts.key-value=1m");
@Test
void defaultConfiguration() {
this.contextRunner.run((context) -> {
assertThat(context).hasSingleBean(Cluster.class).hasSingleBean(ClusterEnvironment.class);
Cluster cluster = context.getBean(Cluster.class);
Bucket bucket = cluster.bucket(BUCKET_NAME);
bucket.waitUntilReady(Duration.ofMinutes(5));
DiagnosticsResult diagnostics = cluster.diagnostics();
assertThat(diagnostics.state()).isEqualTo(ClusterState.ONLINE);
});
}
@Test
void whenCouchbaseIsUsingCustomObjectMapperThenJsonCanBeRoundTripped() {
this.contextRunner.withBean(ObjectMapper.class, ObjectMapper::new).run((context) -> {
Cluster cluster = context.getBean(Cluster.class);
Bucket bucket = cluster.bucket(BUCKET_NAME);
bucket.waitUntilReady(Duration.ofMinutes(5));
Collection collection = bucket.defaultCollection();
collection.insert("test-document", JsonObject.create().put("a", "alpha"));
assertThat(collection.get("test-document").contentAsObject().get("a")).isEqualTo("alpha");
});
}
}
| CouchbaseAutoConfigurationIntegrationTests |
java | apache__camel | components/camel-disruptor/src/main/java/org/apache/camel/component/disruptor/ExchangeEvent.java | {
"start": 1001,
"end": 1834
} | class ____ {
private SynchronizedExchange synchronizedExchange;
public SynchronizedExchange getSynchronizedExchange() {
return synchronizedExchange;
}
public void setExchange(final Exchange exchange, int expectedConsumers) {
synchronizedExchange = createSynchronizedExchange(exchange, expectedConsumers);
}
private SynchronizedExchange createSynchronizedExchange(Exchange exchange, int expectedConsumers) {
if (expectedConsumers > 1) {
return new MultipleConsumerSynchronizedExchange(exchange, expectedConsumers);
} else {
return new SingleConsumerSynchronizedExchange(exchange);
}
}
@Override
public String toString() {
return "ExchangeEvent{" + "exchange=" + synchronizedExchange.getExchange() + '}';
}
}
| ExchangeEvent |
java | apache__flink | flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopFreeTests.java | {
"start": 1277,
"end": 1303
} | class ____.
*/
// this | loader |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/internal/transformation/impl/PomBuilder.java | {
"start": 1319,
"end": 1507
} | interface ____ {
Model build(RepositorySystemSession session, MavenProject project, ModelSource src)
throws ModelBuilderException, IOException, XMLStreamException;
}
| PomBuilder |
java | elastic__elasticsearch | x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java | {
"start": 1053,
"end": 7456
} | class ____ extends ESTestCase {
public void testInnerDelete() {
Map<String, List<String>> existingAlreadyFollowedIndexUUIDS = new HashMap<>();
Map<String, Map<String, String>> existingHeaders = new HashMap<>();
Map<String, AutoFollowPattern> existingAutoFollowPatterns = new HashMap<>();
{
List<String> existingPatterns = new ArrayList<>();
existingPatterns.add("transactions-*");
existingAutoFollowPatterns.put(
"name1",
new AutoFollowPattern(
"eu_cluster",
existingPatterns,
Collections.emptyList(),
null,
Settings.EMPTY,
true,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null
)
);
List<String> existingUUIDS = new ArrayList<>();
existingUUIDS.add("_val");
existingAlreadyFollowedIndexUUIDS.put("name1", existingUUIDS);
existingHeaders.put("name1", Collections.singletonMap("key", "val"));
}
{
List<String> existingPatterns = new ArrayList<>();
existingPatterns.add("logs-*");
existingAutoFollowPatterns.put(
"name2",
new AutoFollowPattern(
"asia_cluster",
existingPatterns,
Collections.emptyList(),
null,
Settings.EMPTY,
true,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null
)
);
List<String> existingUUIDS = new ArrayList<>();
existingUUIDS.add("_val");
existingAlreadyFollowedIndexUUIDS.put("name2", existingUUIDS);
existingHeaders.put("name2", Collections.singletonMap("key", "val"));
}
final var projectId = randomProjectIdOrDefault();
ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster"))
.putProjectMetadata(
ProjectMetadata.builder(projectId)
.putCustom(
AutoFollowMetadata.TYPE,
new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS, existingHeaders)
)
)
.build();
Request request = new Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "name1");
AutoFollowMetadata result = TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState.projectState(projectId))
.getMetadata()
.getProject(projectId)
.custom(AutoFollowMetadata.TYPE);
assertThat(result.getPatterns().size(), equalTo(1));
assertThat(result.getPatterns().get("name2"), notNullValue());
assertThat(result.getPatterns().get("name2").getRemoteCluster(), equalTo("asia_cluster"));
assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1));
assertThat(result.getFollowedLeaderIndexUUIDs().get("name2"), notNullValue());
assertThat(result.getHeaders().size(), equalTo(1));
assertThat(result.getHeaders().get("name2"), notNullValue());
}
public void testInnerDeleteDoesNotExist() {
Map<String, List<String>> existingAlreadyFollowedIndexUUIDS = new HashMap<>();
Map<String, AutoFollowPattern> existingAutoFollowPatterns = new HashMap<>();
Map<String, Map<String, String>> existingHeaders = new HashMap<>();
{
List<String> existingPatterns = new ArrayList<>();
existingPatterns.add("transactions-*");
existingAutoFollowPatterns.put(
"name1",
new AutoFollowPattern(
"eu_cluster",
existingPatterns,
Collections.emptyList(),
null,
Settings.EMPTY,
true,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null
)
);
existingHeaders.put("key", Collections.singletonMap("key", "val"));
}
final var projectId = randomProjectIdOrDefault();
ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster"))
.putProjectMetadata(
ProjectMetadata.builder(projectId)
.putCustom(
AutoFollowMetadata.TYPE,
new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS, existingHeaders)
)
)
.build();
Request request = new Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "name2");
Exception e = expectThrows(
ResourceNotFoundException.class,
() -> TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState.projectState(projectId))
);
assertThat(e.getMessage(), equalTo("auto-follow pattern [name2] is missing"));
}
public void testInnerDeleteNoAutoFollowMetadata() {
final var projectId = randomProjectIdOrDefault();
ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster"))
.putProjectMetadata(ProjectMetadata.builder(projectId))
.build();
Request request = new Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "name1");
Exception e = expectThrows(
ResourceNotFoundException.class,
() -> TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState.projectState(projectId))
);
assertThat(e.getMessage(), equalTo("auto-follow pattern [name1] is missing"));
}
}
| TransportDeleteAutoFollowPatternActionTests |
java | apache__camel | components/camel-cassandraql/src/main/java/org/apache/camel/processor/idempotent/cassandra/CassandraIdempotentRepository.java | {
"start": 2985,
"end": 10361
} | class ____ extends ServiceSupport implements IdempotentRepository {
private static final Logger LOGGER = LoggerFactory.getLogger(CassandraIdempotentRepository.class);
@Metadata(description = "Cassandra session", required = true)
private CassandraSessionHolder session;
@Metadata(description = "The table name for storing the data", defaultValue = "CAMEL_IDEMPOTENT")
private String table = "CAMEL_IDEMPOTENT";
@Metadata(description = "Values used as primary key prefix. Multiple values can be separated by comma.",
displayName = "Prefix Primary Key Values")
private String prefixPKValues;
@Metadata(description = "Primary key columns. Multiple values can be separated by comma.",
displayName = "Primary Key Columns",
javaType = "java.lang.String", defaultValue = "KEY")
private String pkColumns = "KEY";
@Metadata(description = "Time to live in seconds used for inserts", displayName = "Time to Live")
private Integer ttl;
@Metadata(description = "Write consistency level",
enums = "ANY,ONE,TWO,THREE,QUORUM,ALL,LOCAL_ONE,LOCAL_QUORUM,EACH_QUORUM,SERIAL,LOCAL_SERIAL")
private ConsistencyLevel writeConsistencyLevel;
@Metadata(description = "Read consistency level",
enums = "ANY,ONE,TWO,THREE,QUORUM,ALL,LOCAL_ONE,LOCAL_QUORUM,EACH_QUORUM,SERIAL,LOCAL_SERIAL")
private ConsistencyLevel readConsistencyLevel;
private PreparedStatement insertStatement;
private PreparedStatement selectStatement;
private PreparedStatement deleteStatement;
private PreparedStatement truncateStatement;
public CassandraIdempotentRepository() {
}
public CassandraIdempotentRepository(CqlSession session) {
this.session = new CassandraSessionHolder(session);
}
private boolean isKey(ResultSet resultSet) {
Row row = resultSet.one();
if (row == null) {
LOGGER.debug("No row to check key");
return false;
} else {
LOGGER.debug("Row with {} columns to check key", row.getColumnDefinitions());
int len = pkColumns.split(",").length;
return row.getColumnDefinitions().size() >= len;
}
}
protected final boolean isApplied(ResultSet resultSet) {
Row row = resultSet.one();
return row == null || row.getBoolean("[applied]");
}
protected Object[] getPKValues(String key) {
if (prefixPKValues != null) {
return append(prefixPKValues.split(","), key);
} else {
return new Object[] { key };
}
}
// -------------------------------------------------------------------------
// Lifecycle methods
@Override
protected void doStart() throws Exception {
ObjectHelper.notNull(session, "session", this);
session.start();
initInsertStatement();
initSelectStatement();
initDeleteStatement();
initClearStatement();
}
@Override
protected void doStop() throws Exception {
if (session != null) {
session.stop();
}
}
// -------------------------------------------------------------------------
// Add key to repository
protected void initInsertStatement() {
Insert insert = generateInsert(table, pkColumns.split(","), true, ttl);
SimpleStatement statement = applyConsistencyLevel(insert.build(), writeConsistencyLevel);
LOGGER.debug("Generated Insert {}", statement);
insertStatement = getSession().prepare(statement);
}
@Override
public boolean add(String key) {
Object[] idValues = getPKValues(key);
LOGGER.debug("Inserting key {}", (Object) idValues);
return isApplied(getSession().execute(insertStatement.bind(idValues)));
}
// -------------------------------------------------------------------------
// Check if key is in repository
protected void initSelectStatement() {
Select select = generateSelect(table, pkColumns.split(","), pkColumns.split(","));
SimpleStatement statement = applyConsistencyLevel(select.build(), readConsistencyLevel);
LOGGER.debug("Generated Select {}", statement);
selectStatement = getSession().prepare(statement);
}
@Override
public boolean contains(String key) {
Object[] idValues = getPKValues(key);
LOGGER.debug("Checking key {}", (Object) idValues);
return isKey(getSession().execute(selectStatement.bind(idValues)));
}
@Override
public boolean confirm(String key) {
return true;
}
// -------------------------------------------------------------------------
// Remove key from repository
protected void initDeleteStatement() {
Delete delete = generateDelete(table, pkColumns.split(","), true);
SimpleStatement statement = applyConsistencyLevel(delete.build(), writeConsistencyLevel);
LOGGER.debug("Generated Delete {}", statement);
deleteStatement = getSession().prepare(statement);
}
@Override
public boolean remove(String key) {
Object[] idValues = getPKValues(key);
LOGGER.debug("Deleting key {}", (Object) idValues);
return isApplied(getSession().execute(deleteStatement.bind(idValues)));
}
// -------------------------------------------------------------------------
// Clear the repository
protected void initClearStatement() {
Truncate truncate = generateTruncate(table);
SimpleStatement statement = applyConsistencyLevel(truncate.build(), writeConsistencyLevel);
LOGGER.debug("Generated truncate for clear operation {}", statement);
truncateStatement = getSession().prepare(statement);
}
@Override
public void clear() {
LOGGER.debug("Clear table {}", table);
getSession().execute(truncateStatement.bind());
}
// -------------------------------------------------------------------------
// Getters & Setters
public CqlSession getSession() {
return session.getSession();
}
public void setSession(CqlSession session) {
this.session = new CassandraSessionHolder(session);
}
public String getTable() {
return table;
}
public void setTable(String table) {
this.table = table;
}
public Integer getTtl() {
return ttl;
}
public void setTtl(Integer ttl) {
this.ttl = ttl;
}
public ConsistencyLevel getWriteConsistencyLevel() {
return writeConsistencyLevel;
}
public void setWriteConsistencyLevel(ConsistencyLevel writeConsistencyLevel) {
this.writeConsistencyLevel = writeConsistencyLevel;
}
public ConsistencyLevel getReadConsistencyLevel() {
return readConsistencyLevel;
}
public void setReadConsistencyLevel(ConsistencyLevel readConsistencyLevel) {
this.readConsistencyLevel = readConsistencyLevel;
}
public String getPrefixPKValues() {
return prefixPKValues;
}
public void setPrefixPKValues(String prefixPKValues) {
this.prefixPKValues = prefixPKValues;
}
public String getPkColumns() {
return pkColumns;
}
public void setPkColumns(String pkColumns) {
this.pkColumns = pkColumns;
}
}
| CassandraIdempotentRepository |
java | apache__camel | components/camel-pg-replication-slot/src/test/java/org/apache/camel/component/pg/replication/slot/integration/PgReplicationITSupport.java | {
"start": 1186,
"end": 1736
} | class ____ extends CamelTestSupport {
@RegisterExtension
static PostgresLocalContainerService service;
static {
PostgreSQLContainer container = new PostgreSQLContainer<>(
DockerImageName.parse(PostgresLocalContainerService.DEFAULT_POSTGRES_CONTAINER)
.asCompatibleSubstituteFor("postgres"))
.withDatabaseName("camel")
.withCommand("postgres -c wal_level=logical");
service = new PostgresLocalContainerService(container);
}
}
| PgReplicationITSupport |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/EdgeManager.java | {
"start": 1409,
"end": 4680
} | class ____ {
private final Map<IntermediateResultPartitionID, List<ConsumerVertexGroup>> partitionConsumers =
new HashMap<>();
private final Map<ExecutionVertexID, List<ConsumedPartitionGroup>> vertexConsumedPartitions =
new HashMap<>();
private final Map<IntermediateResultPartitionID, List<ConsumedPartitionGroup>>
consumedPartitionsById = new HashMap<>();
public void connectPartitionWithConsumerVertexGroup(
IntermediateResultPartitionID resultPartitionId,
ConsumerVertexGroup consumerVertexGroup) {
checkNotNull(consumerVertexGroup);
List<ConsumerVertexGroup> groups =
getConsumerVertexGroupsForPartitionInternal(resultPartitionId);
groups.add(consumerVertexGroup);
}
public void connectVertexWithConsumedPartitionGroup(
ExecutionVertexID executionVertexId, ConsumedPartitionGroup consumedPartitionGroup) {
checkNotNull(consumedPartitionGroup);
final List<ConsumedPartitionGroup> consumedPartitions =
getConsumedPartitionGroupsForVertexInternal(executionVertexId);
consumedPartitions.add(consumedPartitionGroup);
}
private List<ConsumerVertexGroup> getConsumerVertexGroupsForPartitionInternal(
IntermediateResultPartitionID resultPartitionId) {
return partitionConsumers.computeIfAbsent(resultPartitionId, id -> new ArrayList<>());
}
private List<ConsumedPartitionGroup> getConsumedPartitionGroupsForVertexInternal(
ExecutionVertexID executionVertexId) {
return vertexConsumedPartitions.computeIfAbsent(executionVertexId, id -> new ArrayList<>());
}
public List<ConsumerVertexGroup> getConsumerVertexGroupsForPartition(
IntermediateResultPartitionID resultPartitionId) {
return Collections.unmodifiableList(
getConsumerVertexGroupsForPartitionInternal(resultPartitionId));
}
public List<ConsumedPartitionGroup> getConsumedPartitionGroupsForVertex(
ExecutionVertexID executionVertexId) {
return Collections.unmodifiableList(
getConsumedPartitionGroupsForVertexInternal(executionVertexId));
}
public void registerConsumedPartitionGroup(ConsumedPartitionGroup group) {
for (IntermediateResultPartitionID partitionId : group) {
consumedPartitionsById
.computeIfAbsent(partitionId, ignore -> new ArrayList<>())
.add(group);
}
}
private List<ConsumedPartitionGroup> getConsumedPartitionGroupsByIdInternal(
IntermediateResultPartitionID resultPartitionId) {
return consumedPartitionsById.computeIfAbsent(resultPartitionId, id -> new ArrayList<>());
}
public List<ConsumedPartitionGroup> getConsumedPartitionGroupsById(
IntermediateResultPartitionID resultPartitionId) {
return Collections.unmodifiableList(
getConsumedPartitionGroupsByIdInternal(resultPartitionId));
}
public int getNumberOfConsumedPartitionGroupsById(
IntermediateResultPartitionID resultPartitionId) {
return getConsumedPartitionGroupsByIdInternal(resultPartitionId).size();
}
}
| EdgeManager |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/web/servlet/springmvc/CustomerRepository.java | {
"start": 772,
"end": 885
} | interface ____ extends CrudRepository<Customer, Long> {
List<Customer> findByUser(User user);
}
| CustomerRepository |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/iterable/Iterable_generics_with_varargs_Test.java | {
"start": 1038,
"end": 2168
} | class ____ {
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
void testWithoutGenerics() {
List strings = asList("a", "b", "c");
assertThat(strings).contains("a", "b");
}
@Test
void testConcreteType() {
List<String> strings = asList("a", "b", "c");
assertThat(strings).contains("a", "b");
}
@Test
@Disabled
void testListAssertWithGenerics() {
// List<? extends String> strings = asList("a", "b", "c");
// does not compile as Java 8 is stricter with generics ...
// assertThat(strings).contains("a", "b");
}
@Test
void testListAssertWithGenericsAndExtracting() {
List<? extends String> strings = asList("a", "b", "c");
Function<? super String, String> doubleFunction = new Function<String, String>() {
@Override
public String apply(String s) {
return s + s;
}
};
assertThat(strings)
.extracting(doubleFunction, doubleFunction)
.contains(
tuple("aa", "aa"),
tuple("bb", "bb"));
}
}
| Iterable_generics_with_varargs_Test |
java | apache__spark | examples/src/main/java/org/apache/spark/examples/ml/JavaVarianceThresholdSelectorExample.java | {
"start": 1414,
"end": 2851
} | class ____ {
public static void main(String[] args) {
SparkSession spark = SparkSession
.builder()
.appName("JavaVarianceThresholdSelectorExample")
.getOrCreate();
// $example on$
List<Row> data = Arrays.asList(
RowFactory.create(1, Vectors.dense(6.0, 7.0, 0.0, 7.0, 6.0, 0.0)),
RowFactory.create(2, Vectors.dense(0.0, 9.0, 6.0, 0.0, 5.0, 9.0)),
RowFactory.create(3, Vectors.dense(0.0, 9.0, 3.0, 0.0, 5.0, 5.0)),
RowFactory.create(4, Vectors.dense(0.0, 9.0, 8.0, 5.0, 6.0, 4.0)),
RowFactory.create(5, Vectors.dense(8.0, 9.0, 6.0, 5.0, 4.0, 4.0)),
RowFactory.create(6, Vectors.dense(8.0, 9.0, 6.0, 0.0, 0.0, 0.0))
);
StructType schema = new StructType(new StructField[]{
new StructField("id", DataTypes.IntegerType, false, Metadata.empty()),
new StructField("features", new VectorUDT(), false, Metadata.empty())
});
Dataset<Row> df = spark.createDataFrame(data, schema);
VarianceThresholdSelector selector = new VarianceThresholdSelector()
.setVarianceThreshold(8.0)
.setFeaturesCol("features")
.setOutputCol("selectedFeatures");
Dataset<Row> result = selector.fit(df).transform(df);
System.out.println("Output: Features with variance lower than "
+ selector.getVarianceThreshold() + " are removed.");
result.show();
// $example off$
spark.stop();
}
}
| JavaVarianceThresholdSelectorExample |
java | apache__flink | flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/operator/CEPMigrationTest.java | {
"start": 28599,
"end": 28894
} | class ____ extends SimpleCondition<Event> {
private static final long serialVersionUID = 5726188262756267490L;
@Override
public boolean filter(Event value) throws Exception {
return value.getName().equals("start");
}
}
private static | StartFilter |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/value/enum2enum/EnumToEnumMappingTest.java | {
"start": 1188,
"end": 9416
} | class ____ {
@RegisterExtension
final GeneratedSource generatedSource = new GeneratedSource().addComparisonToFixtureFor(
DefaultOrderMapper.class,
OrderMapper.class,
SpecialOrderMapper.class
);
@ProcessorTest
public void shouldGenerateEnumMappingMethod() {
ExternalOrderType target = OrderMapper.INSTANCE.orderTypeToExternalOrderType( OrderType.B2B );
assertThat( target ).isEqualTo( ExternalOrderType.B2B );
target = OrderMapper.INSTANCE.orderTypeToExternalOrderType( OrderType.RETAIL );
assertThat( target ).isEqualTo( ExternalOrderType.RETAIL );
}
@ProcessorTest
public void shouldConsiderConstantMappings() {
ExternalOrderType target = OrderMapper.INSTANCE.orderTypeToExternalOrderType( OrderType.EXTRA );
assertThat( target ).isEqualTo( ExternalOrderType.SPECIAL );
target = OrderMapper.INSTANCE.orderTypeToExternalOrderType( OrderType.STANDARD );
assertThat( target ).isEqualTo( ExternalOrderType.DEFAULT );
target = OrderMapper.INSTANCE.orderTypeToExternalOrderType( OrderType.NORMAL );
assertThat( target ).isEqualTo( ExternalOrderType.DEFAULT );
}
@ProcessorTest
public void shouldInvokeEnumMappingMethodForPropertyMapping() {
OrderEntity order = new OrderEntity();
order.setOrderType( OrderType.EXTRA );
OrderDto orderDto = OrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isEqualTo( ExternalOrderType.SPECIAL );
}
@ProcessorTest
public void shouldApplyReverseMappings() {
OrderType result = OrderMapper.INSTANCE.externalOrderTypeToOrderType( ExternalOrderType.SPECIAL );
assertThat( result ).isEqualTo( OrderType.EXTRA );
result = OrderMapper.INSTANCE.externalOrderTypeToOrderType( ExternalOrderType.DEFAULT );
assertThat( result ).isEqualTo( OrderType.STANDARD );
result = OrderMapper.INSTANCE.externalOrderTypeToOrderType( ExternalOrderType.RETAIL );
assertThat( result ).isEqualTo( OrderType.RETAIL );
result = OrderMapper.INSTANCE.externalOrderTypeToOrderType( ExternalOrderType.B2B );
assertThat( result ).isEqualTo( OrderType.B2B );
}
@ProcessorTest
public void shouldApplyNullMapping() {
OrderEntity order = new OrderEntity();
order.setOrderType( null );
OrderDto orderDto = SpecialOrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isEqualTo( ExternalOrderType.DEFAULT );
}
@ProcessorTest
public void shouldApplyTargetIsNullMapping() {
OrderEntity order = new OrderEntity();
order.setOrderType( OrderType.STANDARD );
OrderDto orderDto = SpecialOrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isNull();
}
@ProcessorTest
public void shouldApplyDefaultMappings() {
OrderEntity order = new OrderEntity();
// try all other
order.setOrderType( OrderType.B2B );
OrderDto orderDto = SpecialOrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isEqualTo( ExternalOrderType.B2B );
order.setOrderType( OrderType.EXTRA );
orderDto = SpecialOrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isEqualTo( ExternalOrderType.SPECIAL );
order.setOrderType( OrderType.NORMAL );
orderDto = SpecialOrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isEqualTo( ExternalOrderType.SPECIAL );
order.setOrderType( OrderType.RETAIL );
orderDto = SpecialOrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isEqualTo( ExternalOrderType.RETAIL );
}
@ProcessorTest
public void shouldApplyDefaultReverseMappings() {
OrderType result = SpecialOrderMapper.INSTANCE.externalOrderTypeToOrderType( ExternalOrderType.SPECIAL );
assertThat( result ).isEqualTo( OrderType.EXTRA );
result = SpecialOrderMapper.INSTANCE.externalOrderTypeToOrderType( ExternalOrderType.DEFAULT );
assertThat( result ).isNull();
result = SpecialOrderMapper.INSTANCE.externalOrderTypeToOrderType( ExternalOrderType.RETAIL );
assertThat( result ).isEqualTo( OrderType.RETAIL );
result = SpecialOrderMapper.INSTANCE.externalOrderTypeToOrderType( ExternalOrderType.B2B );
assertThat( result ).isEqualTo( OrderType.B2B );
}
@ProcessorTest
public void shouldMappAllUnmappedToDefault() {
OrderEntity order = new OrderEntity();
order.setOrderType( OrderType.RETAIL );
OrderDto orderDto = DefaultOrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isEqualTo( ExternalOrderType.DEFAULT );
order.setOrderType( OrderType.B2B );
orderDto = DefaultOrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isEqualTo( ExternalOrderType.DEFAULT );
order.setOrderType( OrderType.EXTRA );
orderDto = DefaultOrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isEqualTo( ExternalOrderType.DEFAULT );
order.setOrderType( OrderType.STANDARD );
orderDto = DefaultOrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isEqualTo( ExternalOrderType.DEFAULT );
order.setOrderType( OrderType.NORMAL );
orderDto = DefaultOrderMapper.INSTANCE.orderEntityToDto( order );
assertThat( orderDto ).isNotNull();
assertThat( orderDto.getOrderType() ).isEqualTo( ExternalOrderType.DEFAULT );
}
@IssueKey("1091")
@ProcessorTest
public void shouldMapAnyRemainingToNullCorrectly() {
ExternalOrderType externalOrderType = SpecialOrderMapper.INSTANCE.anyRemainingToNull( OrderType.RETAIL );
assertThat( externalOrderType )
.isNotNull()
.isEqualTo( ExternalOrderType.RETAIL );
externalOrderType = SpecialOrderMapper.INSTANCE.anyRemainingToNull( OrderType.B2B );
assertThat( externalOrderType )
.isNotNull()
.isEqualTo( ExternalOrderType.B2B );
externalOrderType = SpecialOrderMapper.INSTANCE.anyRemainingToNull( OrderType.EXTRA );
assertThat( externalOrderType ).isNull();
externalOrderType = SpecialOrderMapper.INSTANCE.anyRemainingToNull( OrderType.STANDARD );
assertThat( externalOrderType ).isNull();
externalOrderType = SpecialOrderMapper.INSTANCE.anyRemainingToNull( OrderType.NORMAL );
assertThat( externalOrderType ).isNull();
}
@ProcessorTest
@WithClasses(ErroneousOrderMapperMappingSameConstantTwice.class)
@ExpectedCompilationOutcome(
value = CompilationResult.FAILED,
diagnostics = {
@Diagnostic(type = ErroneousOrderMapperMappingSameConstantTwice.class,
kind = Kind.ERROR,
line = 27,
message = "Source value mapping: \"EXTRA\" cannot be mapped more than once.")
}
)
public void shouldRaiseErrorIfSameSourceEnumConstantIsMappedTwice() {
}
@ProcessorTest
@WithClasses(ErroneousOrderMapperUsingUnknownEnumConstants.class)
@ExpectedCompilationOutcome(
value = CompilationResult.FAILED,
diagnostics = {
@Diagnostic(type = ErroneousOrderMapperUsingUnknownEnumConstants.class,
kind = Kind.ERROR,
line = 26,
message = "Constant FOO doesn't exist in | EnumToEnumMappingTest |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/SpringLdapComponentBuilderFactory.java | {
"start": 4036,
"end": 4836
} | class ____
extends AbstractComponentBuilder<SpringLdapComponent>
implements SpringLdapComponentBuilder {
@Override
protected SpringLdapComponent buildConcreteComponent() {
return new SpringLdapComponent();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "lazyStartProducer": ((SpringLdapComponent) component).setLazyStartProducer((boolean) value); return true;
case "autowiredEnabled": ((SpringLdapComponent) component).setAutowiredEnabled((boolean) value); return true;
default: return false;
}
}
}
} | SpringLdapComponentBuilderImpl |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/ClientWithWrongScopeTest.java | {
"start": 421,
"end": 1024
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyClient.class))
.withConfigurationResource("wrong-scope-test-application.properties")
.assertException(t -> assertThat(t).hasMessageContaining("Not possible to define the scope"));
@Test
public void testValidationFailed() {
// This method should not be invoked
fail();
}
@Path("/client")
@RegisterRestClient(configKey = "my-client")
public | ClientWithWrongScopeTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/struct/FormatFeatureUnwrapSingleTest.java | {
"start": 862,
"end": 1297
} | class ____
{
@JsonProperty("strings")
@JsonFormat(with={ JsonFormat.Feature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED })
public String[] _strings = new String[] {
"a"
};
@JsonFormat(without={ JsonFormat.Feature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED })
public int[] ints = new int[] { 1 };
public boolean[] bools = new boolean[] { true };
}
static | WrapWriteWithArrays |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/configuration/AutowiredConfigurationTests.java | {
"start": 10689,
"end": 10868
} | class ____ {
@Autowired
private Colour colour;
@Bean
public TestBean testBean() {
return new TestBean(colour.toString());
}
}
@Configuration
static | AutowiredConfig |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/filter/DynamicFilterTest.java | {
"start": 2466,
"end": 36837
} | class ____ {
private static final Logger log = Logger.getLogger( DynamicFilterTest.class );
public static final String CACHE_ACCESS_STRATEGY = "nonstrict-read-write";
private TestData testData;
@BeforeEach
public void createTestData(DomainModelScope modelScope, SessionFactoryScope factoryScope) {
for ( PersistentClass entityBinding : modelScope.getDomainModel().getEntityBindings() ) {
if ( !entityBinding.isInherited() ) {
entityBinding.getRootClass().setCacheConcurrencyStrategy( CACHE_ACCESS_STRATEGY );
entityBinding.setCached( true );
}
}
for ( Collection collectionBinding : modelScope.getDomainModel().getCollectionBindings() ) {
collectionBinding.setCacheConcurrencyStrategy( CACHE_ACCESS_STRATEGY );
}
testData = new TestData();
testData.prepare( factoryScope );
}
@AfterEach
public void releaseTestData(SessionFactoryScope factoryScope) {
testData.release( factoryScope );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsUnionInSubquery.class)
public void testSqlSyntaxOfFiltersWithUnions(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( (session) -> {
session.enableFilter( "unioned" );
//noinspection deprecation
session.createQuery( "from Category" ).list();
} );
}
@Test
public void testSecondLevelCachedCollectionsFiltering(SessionFactoryScope factoryScope) {
var sessionFactory = factoryScope.getSessionFactory();
var persister = sessionFactory
.getRuntimeMetamodels()
.getMappingMetamodel()
.getCollectionDescriptor(Salesperson.class.getName() + ".orders");
var cache = persister.getCacheAccessStrategy();
var cachedData = factoryScope.fromSession(session -> {
// Force a collection into the second level cache, with its non-filtered elements
var sp = session.getReference( Salesperson.class, testData.steveId );
Hibernate.initialize( sp.getOrders() );
assertTrue( persister.hasCache(), "No cache for collection" );
var cacheKey = cache.generateCacheKey(
testData.steveId,
persister,
sessionFactory,
session.getTenantIdentifier()
);
var cached = (CollectionCacheEntry) cache.get(
session,
cacheKey
);
assertNotNull( cached, "collection was not in cache" );
return cached;
} );
factoryScope.inSession(session -> {
session.enableFilter( "fulfilledOrders" ).setParameter( "asOfDate", testData.lastMonth.getTime() );
//noinspection deprecation
var sp = (Salesperson) session.createQuery( "from Salesperson as s where s.id = :id" )
.setParameter( "id", testData.steveId )
.uniqueResult();
assertEquals( 1, sp.getOrders().size(), "Filtered-collection not bypassing 2L-cache" );
var cacheKey2 = cache.generateCacheKey(
testData.steveId,
persister,
sessionFactory,
session.getTenantIdentifier()
);
var cachedData2 = (CollectionCacheEntry) persister.getCacheAccessStrategy()
.get( session, cacheKey2 );
assertNotNull( cachedData2, "collection no longer in cache!" );
assertSame( cachedData, cachedData2, "Different cache values!" );
} );
factoryScope.inSession(session -> {
session.enableFilter( "fulfilledOrders" ).setParameter( "asOfDate", testData.lastMonth.getTime() );
var sp = session.getReference( Salesperson.class, testData.steveId );
assertEquals( 1, sp.getOrders().size(), "Filtered-collection not bypassing 2L-cache" );
} );
// Finally, make sure that the original cached version did not get over-written
factoryScope.inSession(session -> {
var sp = session.getReference( Salesperson.class, testData.steveId );
assertEquals( 2, sp.getOrders().size(), "Actual cached version got over-written" );
} );
}
@Test
public void testCombinedClassAndCollectionFiltersEnabled(SessionFactoryScope factoryScope) {
factoryScope.inSession(session -> {
session.enableFilter( "regionlist" ).setParameterList( "regions", new String[] { "LA", "APAC" } );
session.enableFilter( "fulfilledOrders" ).setParameter( "asOfDate", testData.lastMonth.getTime() );
// test retrieval through hql with the collection as non-eager
//noinspection removal
var salespersons = session.createQuery(
"select s from Salesperson as s",
Salesperson.class
)
.getResultList();
assertEquals( 1, salespersons.size(), "Incorrect salesperson count" );
var sp = salespersons.get( 0 );
assertEquals( 1, sp.getOrders().size(), "Incorrect order count" );
session.clear();
session.disableFilter( "regionlist" );
session.enableFilter( "regionlist" ).setParameterList(
"regions",
new String[] { "LA", "APAC", "APAC" }
);
// Second test retrieval through hql with the collection as non-eager with different region list
//noinspection removal
salespersons = session.createQuery( "select s from Salesperson as s", Salesperson.class )
.getResultList();
assertEquals( 1, salespersons.size(), "Incorrect salesperson count" );
sp = salespersons.get( 0 );
assertEquals( 1, sp.getOrders().size(), "Incorrect order count" );
session.clear();
// test retrieval through hql with the collection join fetched
//noinspection removal
salespersons = session.createQuery(
"select s from Salesperson as s left join fetch s.orders",
Salesperson.class
).getResultList();
assertEquals( 1, salespersons.size(), "Incorrect salesperson count" );
sp = salespersons.get( 0 );
assertEquals( 1, sp.getOrders().size(), "Incorrect order count" );
} );
}
@Test
public void testHqlFilters(SessionFactoryScope factoryScope) {
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// HQL test
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
log.info( "Starting HQL filter tests" );
factoryScope.inSession(session -> {
session.enableFilter( "region" ).setParameter( "region", "APAC" );
session.enableFilter( "effectiveDate" )
.setParameter( "asOfDate", testData.lastMonth.getTime() );
log.info( "HQL against Salesperson..." );
//noinspection deprecation
var results = session.createQuery( "select s from Salesperson as s left join fetch s.orders" )
.list();
assertEquals( 1, results.size(),
"Incorrect filtered HQL result count [" + results.size() + "]" );
var result = (Salesperson) results.get( 0 );
assertEquals( 1, result.getOrders().size(), "Incorrect collectionfilter count" );
log.info( "HQL against Product..." );
//noinspection deprecation
results = session.createQuery( "from Product as p where p.stockNumber = ?1" )
.setParameter( 1, 124 )
.list();
assertEquals( 1, results.size() );
} );
}
@Test
@JiraKey(value = "HHH-14567")
public void testHqlFiltersAppliedAfterQueryCreation(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( session -> {
//noinspection removal
var query = session.createQuery(
"select s from Salesperson s",
Salesperson.class
);
var list = query.list();
assertThat( list ).hasSize( 2 );
session.enableFilter( "region" ).setParameter( "region", "APAC" );
assertThat( query.list() ).hasSize( 1 );
} );
}
@Test
public void testFiltersWithCustomerReadAndWrite(SessionFactoryScope factoryScope) {
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Custom SQL read/write with filter
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
log.info( "Starting HQL filter with custom SQL get/set tests" );
factoryScope.inSession(session -> {
session.enableFilter( "heavyProducts" ).setParameter( "weightKilograms", 4d );
log.info( "HQL against Product..." );
//noinspection removal
var results = session.createQuery( "from Product", Product.class ).getResultList();
assertEquals( 1, results.size() );
} );
}
@Test
public void testCriteriaQueryFilters(SessionFactoryScope factoryScope) {
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Criteria-query test
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
log.info( "Starting Criteria-query filter tests" );
factoryScope.inSession(session -> {
session.enableFilter( "region" ).setParameter( "region", "APAC" );
session.enableFilter( "fulfilledOrders" )
.setParameter( "asOfDate", testData.lastMonth.getTime() );
session.enableFilter( "effectiveDate" )
.setParameter( "asOfDate", testData.lastMonth.getTime() );
log.info( "Criteria query against Salesperson..." );
var criteriaBuilder = session.getCriteriaBuilder();
var criteria = criteriaBuilder.createQuery( Salesperson.class );
var from = criteria.from( Salesperson.class );
from.fetch( "orders", JoinType.LEFT );
//noinspection removal
var salespersons = session.createQuery( criteria ).getResultList();
assertEquals( 1, salespersons.size(), "Incorrect salesperson count" );
assertEquals( 1, ( salespersons.get( 0 ) ).getOrders().size(), "Incorrect order count" );
log.info( "Criteria query against Product..." );
var productCriteria = criteriaBuilder.createQuery( Product.class );
var productRoot = productCriteria.from( Product.class );
productCriteria.where( criteriaBuilder.equal( productRoot.get( "stockNumber" ), 124 ) );
//noinspection removal
var products = session.createQuery( productCriteria ).getResultList();
assertEquals( 1, products.size(), "Incorrect product count" );
} );
}
@Test
public void testCriteriaControl(SessionFactoryScope factoryScope) {
var sessionFactory = factoryScope.getSessionFactory();
// the subquery...
var detachedCriteriaBuilder = sessionFactory.getCriteriaBuilder();
var query = detachedCriteriaBuilder.createQuery( Salesperson.class );
var subquery = query.subquery( String.class );
var salespersonRoot = subquery.from( Salesperson.class );
subquery.select( salespersonRoot.get( "name" ) );
factoryScope.inTransaction(session -> {
session.enableFilter( "fulfilledOrders" ).setParameter( "asOfDate", testData.lastMonth.getTime() );
session.enableFilter( "regionlist" ).setParameterList( "regions", new String[] { "APAC" } );
var criteriaBuilder = session.getCriteriaBuilder();
var criteria = criteriaBuilder.createQuery( Order.class );
criteria.from( Order.class );
criteria.where( criteriaBuilder.in( subquery ).value( "steve" ) );
//noinspection removal
var result = session.createQuery( criteria ).getResultList();
assertEquals( 1, result.size() );
} );
}
@Test
public void testCriteriaSubqueryWithFilters(SessionFactoryScope factoryScope) {
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Criteria-subquery test
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
log.info( "Starting Criteria-subquery filter tests" );
factoryScope.inSession(session -> {
session.enableFilter( "region" ).setParameter( "region", "APAC" );
log.info( "Criteria query against Department with a subquery on Salesperson in the APAC reqion..." );
var detachedCriteriaBuilder = session.getCriteriaBuilder();
var subquery = detachedCriteriaBuilder
.createQuery( Salesperson.class )
.subquery( Department.class );
var subqueryRoot = subquery.from( Salesperson.class );
subquery.where( detachedCriteriaBuilder.equal( subqueryRoot.get( "name" ), "steve" ) );
subquery.select( subqueryRoot.get( "department" ) );
var criteriaBuilder = session.getCriteriaBuilder();
var criteria = criteriaBuilder.createQuery( Department.class );
criteria.where( criteriaBuilder.in( criteria.from( Department.class ) ).value( subquery ) );
//noinspection removal
var departmentsQuery = session.createQuery( criteria );
var departments = departmentsQuery.list();
assertEquals( 1, departments.size(), "Incorrect department count" );
log.info( "Criteria query against Department with a subquery on Salesperson in the FooBar reqion..." );
session.enableFilter( "region" ).setParameter( "region", "Foobar" );
departments = departmentsQuery.list();
assertEquals( 0, departments.size(), "Incorrect department count" );
log.info(
"Criteria query against Order with a subquery for line items with a subquery on product and sold by a given sales person..." );
session.enableFilter( "region" ).setParameter( "region", "APAC" );
var lineItemSubquery = detachedCriteriaBuilder.createQuery()
.subquery( LineItem.class );
var itemRoot = lineItemSubquery.from( LineItem.class );
var product = itemRoot.join( "product", JoinType.INNER );
lineItemSubquery.where(
detachedCriteriaBuilder.and(
detachedCriteriaBuilder.ge( itemRoot.get( "quantity" ), 1L ),
detachedCriteriaBuilder.equal( product.get( "name" ), "Acme Hair Gel" )
)
);
lineItemSubquery.select( product.get( "id" ) );
var orderCriteria = criteriaBuilder.createQuery( Order.class );
var orderRoot = orderCriteria.from( Order.class );
orderCriteria.where(
criteriaBuilder.and(
criteriaBuilder.exists( lineItemSubquery ),
criteriaBuilder.equal( orderRoot.get( "buyer" ), "gavin" )
)
);
//noinspection removal
var orders = session.createQuery( orderCriteria ).list();
assertEquals( 1, orders.size(), "Incorrect orders count" );
log.info(
"query against Order with a subquery for line items with a subquery line items where the product name is Acme Hair Gel and the quantity is greater than 1 in a given region and the product is effective as of last month" );
session.enableFilter( "region" ).setParameter( "region", "APAC" );
session.enableFilter( "effectiveDate" ).setParameter( "asOfDate", testData.lastMonth.getTime() );
var productSubquery = detachedCriteriaBuilder.createQuery().subquery( Long.class );
var productRoot = productSubquery.from( Product.class );
productSubquery.select( productRoot.get( "id" ) );
productSubquery.where( detachedCriteriaBuilder.equal(
productRoot.get( "name" ),
"Acme Hair Gel"
) );
lineItemSubquery = detachedCriteriaBuilder.createQuery().subquery( LineItem.class );
itemRoot = lineItemSubquery.from( LineItem.class );
product = itemRoot.join( "product", JoinType.INNER );
lineItemSubquery.where(
detachedCriteriaBuilder.and(
detachedCriteriaBuilder.ge( itemRoot.get( "quantity" ), 1L ),
detachedCriteriaBuilder.in( product.get( "id" ) ).value( productSubquery )
)
);
lineItemSubquery.select( product.get( "id" ) );
orderCriteria = criteriaBuilder.createQuery( Order.class );
orderRoot = orderCriteria.from( Order.class );
orderCriteria.where(
criteriaBuilder.and(
criteriaBuilder.exists( lineItemSubquery ),
criteriaBuilder.equal( orderRoot.get( "buyer" ), "gavin" )
)
);
//noinspection removal
orders = session.createQuery( orderCriteria ).list();
assertEquals( 1, orders.size(), "Incorrect orders count" );
log.info(
"query against Order with a subquery for line items with a subquery line items where the product name is Acme Hair Gel and the quantity is greater than 1 in a given region and the product is effective as of 4 months ago" );
session.enableFilter( "region" ).setParameter( "region", "APAC" );
session.enableFilter( "effectiveDate" ).setParameter(
"asOfDate",
testData.fourMonthsAgo.getTime()
);
orderCriteria = criteriaBuilder.createQuery( Order.class );
orderRoot = orderCriteria.from( Order.class );
orderCriteria.where(
criteriaBuilder.and(
criteriaBuilder.exists( lineItemSubquery ),
criteriaBuilder.equal( orderRoot.get( "buyer" ), "gavin" )
)
);
//noinspection removal
orders = session.createQuery( orderCriteria ).list();
assertEquals( 0, orders.size(), "Incorrect orders count" );
} );
}
@Test
public void testHQLSubqueryWithFilters(SessionFactoryScope factoryScope) {
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// HQL subquery with filters test
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
log.info( "Starting HQL subquery with filters tests" );
factoryScope.inSession(session -> {
session.enableFilter( "region" ).setParameter( "region", "APAC" );
log.info( "query against Department with a subquery on Salesperson in the APAC reqion..." );
//noinspection deprecation
var departments = session.createQuery(
"select d from Department as d where d in (select s.department from Salesperson s where s.name = ?1)"
).setParameter( 1, "steve" ).list();
assertEquals( 1, departments.size(), "Incorrect department count" );
log.info( "query against Department with a subquery on Salesperson in the FooBar reqion..." );
session.enableFilter( "region" ).setParameter( "region", "Foobar" );
//noinspection deprecation
departments = session.createQuery(
"select d from Department as d where d in (select s.department from Salesperson s where s.name = ?1)" )
.setParameter( 1, "steve" )
.list();
assertEquals( 0, departments.size(), "Incorrect department count" );
log.info(
"query against Order with a subquery for line items with a subquery line items where the product name is Acme Hair Gel and the quantity is greater than 1 in a given region for a given buyer" );
session.enableFilter( "region" ).setParameter( "region", "APAC" );
//noinspection deprecation
var orders = session.createQuery(
"select o from Order as o where exists (select li.id from LineItem li, Product as p where p.id = li.product.id and li.quantity >= ?1 and p.name = ?2) and o.buyer = ?3" )
.setParameter( 1, 1L ).setParameter( 2, "Acme Hair Gel" ).setParameter( 3, "gavin" ).list();
assertEquals( 1, orders.size(), "Incorrect orders count" );
log.info(
"query against Order with a subquery for line items with a subquery line items where the product name is Acme Hair Gel and the quantity is greater than 1 in a given region and the product is effective as of last month" );
session.enableFilter( "region" ).setParameter( "region", "APAC" );
session.enableFilter( "effectiveDate" ).setParameter( "asOfDate", testData.lastMonth.getTime() );
//noinspection deprecation
orders = session.createQuery(
"select o from Order as o where exists (select li.id from LineItem li where li.quantity >= ?1 and li.product.id in (select p.id from Product p where p.name = ?2)) and o.buyer = ?3" )
.setParameter( 1, 1L ).setParameter( 2, "Acme Hair Gel" ).setParameter( 3, "gavin" ).list();
assertEquals( 1, orders.size(), "Incorrect orders count" );
log.info(
"query against Order with a subquery for line items with a subquery line items where the product name is Acme Hair Gel and the quantity is greater than 1 in a given region and the product is effective as of 4 months ago"
);
session.enableFilter( "region" ).setParameter( "region", "APAC" );
session.enableFilter( "effectiveDate" ).setParameter(
"asOfDate",
testData.fourMonthsAgo.getTime()
);
//noinspection deprecation
orders = session.createQuery(
"select o from Order as o where exists (select li.id from LineItem li where li.quantity >= ?1 and li.product in (select p from Product p where p.name = ?2)) and o.buyer = ?3" )
.setParameter( 1, 1L ).setParameter( 2, "Acme Hair Gel" ).setParameter( 3, "gavin" ).list();
assertEquals( 0, orders.size(), "Incorrect orders count" );
log.info(
"query against Order with a subquery for line items with a subquery line items where the product name is Acme Hair Gel and the quantity is greater than 1 in a given region and the product is effective as of last month with named types" );
session.enableFilter( "region" ).setParameter( "region", "APAC" );
session.enableFilter( "effectiveDate" ).setParameter( "asOfDate", testData.lastMonth.getTime() );
//noinspection deprecation
orders = session.createQuery(
"select o from Order as o where exists (select li.id from LineItem li where li.quantity >= ?1 and li.product in (select p from Product p where p.name = ?2)) and o.buyer = ?3" )
.setParameter( 1, 1L ).setParameter( 2, "Acme Hair Gel" ).setParameter( 3, "gavin" ).list();
assertEquals( 1, orders.size(), "Incorrect orders count" );
log.info(
"query against Order with a subquery for line items with a subquery line items where the product name is Acme Hair Gel and the quantity is greater than 1 in a given region and the product is effective as of last month with mixed types" );
session.enableFilter( "region" ).setParameter( "region", "APAC" );
session.enableFilter( "effectiveDate" ).setParameter( "asOfDate", testData.lastMonth.getTime() );
//noinspection deprecation
orders = session.createQuery(
"select o from Order as o where exists (select li.id from LineItem li where li.quantity >= ?1 and li.product in (select p from Product p where p.name = ?2)) and o.buyer = ?3" )
.setParameter( 1, 1L ).setParameter( 2, "Acme Hair Gel" ).setParameter( 3, "gavin" ).list();
assertEquals( 1, orders.size(), "Incorrect orders count" );
} );
}
@Test
@JiraKey(value = "HHH-5932")
public void testHqlQueryWithColons(SessionFactoryScope factoryScope) {
factoryScope.inSession(session -> {
session.enableFilter( "region" ).setParameter( "region", "PACA" );
//noinspection deprecation
session.createQuery( "from Salesperson p where p.name = ':hibernate'" ).list();
} );
}
@Test
public void testFilterApplicationOnHqlQueryWithImplicitSubqueryContainingPositionalParameter(SessionFactoryScope factoryScope) {
factoryScope.inTransaction(session -> {
var queryString = "from Order o where ?1 in ( select sp.name from Salesperson sp )";
// first a control-group query
//noinspection deprecation
var result = session.createQuery( queryString ).setParameter( 1, "steve" ).list();
assertEquals( 2, result.size() );
// now lets enable filters on Order...
session.enableFilter( "fulfilledOrders" ).setParameter( "asOfDate", testData.lastMonth.getTime() );
//noinspection deprecation
result = session.createQuery( queryString ).setParameter( 1, "steve" ).list();
assertEquals( 1, result.size() );
// now, lets additionally enable filter on Salesperson. First a valid one...
session.enableFilter( "regionlist" ).setParameterList( "regions", new String[] { "APAC" } );
//noinspection deprecation
result = session.createQuery( queryString ).setParameter( 1, "steve" ).list();
assertEquals( 1, result.size() );
// ... then a silly one...
session.enableFilter( "regionlist" ).setParameterList(
"regions",
new String[] { "gamma quadrant" }
);
//noinspection deprecation
result = session.createQuery( queryString ).setParameter( 1, "steve" ).list();
assertEquals( 0, result.size() );
} );
}
@Test
public void testFilterApplicationOnHqlQueryWithImplicitSubqueryContainingNamedParameter(SessionFactoryScope factoryScope) {
factoryScope.inTransaction(session -> {
var queryString = "from Order o where :salesPersonName in ( select sp.name from Salesperson sp )";
// first a control-group query
//noinspection deprecation
var result = session.createQuery( queryString ).setParameter( "salesPersonName", "steve" ).list();
assertEquals( 2, result.size() );
// now lets enable filters on Order...
session.enableFilter( "fulfilledOrders" ).setParameter( "asOfDate", testData.lastMonth.getTime() );
//noinspection deprecation
result = session.createQuery( queryString ).setParameter( "salesPersonName", "steve" ).list();
assertEquals( 1, result.size() );
// now, lets additionally enable filter on Salesperson. First a valid one...
session.enableFilter( "regionlist" ).setParameterList( "regions", new String[] { "APAC" } );
//noinspection deprecation
result = session.createQuery( queryString ).setParameter( "salesPersonName", "steve" ).list();
assertEquals( 1, result.size() );
// ... then a silly one...
session.enableFilter( "regionlist" ).setParameterList(
"regions",
new String[] { "gamma quadrant" }
);
//noinspection deprecation
result = session.createQuery( queryString ).setParameter( "salesPersonName", "steve" ).list();
assertEquals( 0, result.size() );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsDmlTargetColumnQualifier.class)
public void testFiltersOnSimpleHqlDelete(SessionFactoryScope factoryScope) {
var sp = new Salesperson();
var sp2 = new Salesperson();
factoryScope.inTransaction(session -> {
sp.setName( "steve" );
sp.setRegion( "NA" );
session.persist( sp );
sp2.setName( "john" );
sp2.setRegion( "APAC" );
session.persist( sp2 );
} );
factoryScope.inTransaction(session -> {
session.enableFilter( "region" ).setParameter( "region", "NA" );
//noinspection deprecation
int count = session.createQuery( "delete from Salesperson" ).executeUpdate();
assertEquals( 1, count );
session.remove( sp2 );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsDmlTargetColumnQualifier.class)
public void testFiltersOnMultiTableHqlDelete(SessionFactoryScope factoryScope) {
var sp = new Salesperson();
var sp2 = new Salesperson();
factoryScope.inTransaction(session -> {
sp.setName( "steve" );
sp.setRegion( "NA" );
session.persist( sp );
sp2.setName( "john" );
sp2.setRegion( "APAC" );
session.persist( sp2 );
} );
factoryScope.inTransaction(session -> {
session.enableFilter( "region" ).setParameter( "region", "NA" );
//noinspection deprecation
int count = session.createQuery( "delete from Salesperson" ).executeUpdate();
assertEquals( 1, count );
session.remove( sp2 );
} );
}
@Test
public void testFindFilters(SessionFactoryScope factoryScope) {
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Get() test
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
log.info( "Starting find() filter tests (eager assoc. fetching)." );
factoryScope.inSession(session -> {
session.enableFilter( "region" ).setParameter( "region", "APAC" );
log.info( "Performing find()..." );
var salesperson = session.find( Salesperson.class, testData.steveId );
assertNotNull( salesperson );
assertEquals( 1, salesperson.getOrders().size(), "Incorrect order count" );
} );
}
@Test
public void testOneToManyFilters(SessionFactoryScope factoryScope) {
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// one-to-many loading tests
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
log.info( "Starting one-to-many collection loader filter tests." );
factoryScope.inSession(session -> {
session.enableFilter( "seniorSalespersons" )
.setParameter( "asOfDate", testData.lastMonth.getTime() );
log.info( "Performing load of Department..." );
var department = session.getReference( Department.class, testData.deptId );
var salespersons = department.getSalespersons();
assertEquals( 1, salespersons.size(), "Incorrect salesperson count" );
} );
}
@Test
public void testInStyleFilterParameter(SessionFactoryScope factoryScope) {
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// one-to-many loading tests
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
log.info( "Starting one-to-many collection loader filter tests." );
factoryScope.inSession(session -> {
session.enableFilter( "regionlist" )
.setParameterList( "regions", new String[] { "LA", "APAC" } );
log.debug( "Performing query of Salespersons" );
//noinspection deprecation
var salespersons = session.createQuery( "from Salesperson" ).list();
assertEquals( 1, salespersons.size(), "Incorrect salesperson count" );
} );
}
@Test
public void testManyToManyFilterOnCriteria(SessionFactoryScope factoryScope) {
factoryScope.inSession(session -> {
session.enableFilter( "effectiveDate" ).setParameter( "asOfDate", new Date() );
var criteriaBuilder = session.getCriteriaBuilder();
var criteria = criteriaBuilder.createQuery( Product.class );
var root = criteria.from( Product.class );
criteria.where( criteriaBuilder.equal( root.get( "id" ), testData.prod1Id ) );
//noinspection removal
var prod = session.createQuery( criteria )
.setTupleTransformer( (tuple, aliases) -> (Product) tuple[0] )
.uniqueResult();
assertNotNull( prod );
assertEquals( 1, prod.getCategories().size(), "Incorrect Product.categories count for filter" );
} );
}
@Test
public void testManyToManyFilterOnLoad(SessionFactoryScope factoryScope) {
var sessionFactory = factoryScope.getSessionFactory();
var stats = sessionFactory.getStatistics();
factoryScope.inSession(session -> {
session.enableFilter( "effectiveDate" ).setParameter( "asOfDate", new Date() );
var prod = session.find( Product.class, testData.prod1Id );
long initLoadCount = stats.getCollectionLoadCount();
long initFetchCount = stats.getCollectionFetchCount();
// should already have been initialized...
int size = prod.getCategories().size();
assertEquals( 1, size, "Incorrect filtered collection count" );
long currLoadCount = stats.getCollectionLoadCount();
long currFetchCount = stats.getCollectionFetchCount();
assertTrue( (initLoadCount == currLoadCount ) && (initFetchCount == currFetchCount ),
"load with join fetch of many-to-many did not trigger join fetch" );
// make sure we did not get back a collection of proxies
long initEntityLoadCount = stats.getEntityLoadCount();
for ( Object o : prod.getCategories() ) {
Category cat = (Category) o;
log.debugf( " ===> %s", cat.getName() );
}
long currEntityLoadCount = stats.getEntityLoadCount();
assertEquals( initEntityLoadCount, currEntityLoadCount,
"load with join fetch of many-to-many did not trigger *complete* join fetch" );
} );
}
@Test
public void testManyToManyOnCollectionLoadAfterHQL(SessionFactoryScope factoryScope) {
factoryScope.inSession(session -> {
session.enableFilter( "effectiveDate" ).setParameter( "asOfDate", new Date() );
// Force the categories to not get initialized here
//noinspection removal
var result = session.createQuery( "from Product as p where p.id = :id", Product.class )
.setParameter( "id", testData.prod1Id )
.getResultList();
assertFalse( result.isEmpty(), "No products returned from HQL" );
var prod = result.get( 0 );
assertNotNull( prod );
assertEquals( 1, prod.getCategories().size(),
"Incorrect Product.categories count for filter on collection load" );
} );
}
@Test
public void testManyToManyFilterOnQuery(SessionFactoryScope factoryScope) {
factoryScope.inSession(session -> {
session.enableFilter( "effectiveDate" ).setParameter( "asOfDate", new Date() );
//noinspection removal
var result = session.createQuery(
"from Product p inner join fetch p.categories",
Product.class
)
.getResultList();
assertFalse( result.isEmpty(), "No products returned from HQL many-to-many filter case" );
var prod = result.get( 0 );
assertNotNull( prod );
assertEquals( 1, prod.getCategories().size(),
"Incorrect Product.categories count for filter with HQL" );
} );
}
@Test
public void testManyToManyBase(SessionFactoryScope factoryScope) {
var stats = factoryScope.getSessionFactory().getStatistics();
factoryScope.inSession(session -> {
var prod = session.find( Product.class, testData.prod1Id );
long initLoadCount = stats.getCollectionLoadCount();
long initFetchCount = stats.getCollectionFetchCount();
// should already have been initialized...
int size = prod.getCategories().size();
assertEquals( 2, size, "Incorrect non-filtered collection count" );
long currLoadCount = stats.getCollectionLoadCount();
long currFetchCount = stats.getCollectionFetchCount();
assertTrue( (initLoadCount == currLoadCount ) && (initFetchCount == currFetchCount ),
"load with join fetch of many-to-many did not trigger join fetch" );
// make sure we did not get back a collection of proxies
long initEntityLoadCount = stats.getEntityLoadCount();
for ( Object o : prod.getCategories() ) {
var cat = (Category) o;
log.debugf( " ===> %s", cat.getName() );
}
long currEntityLoadCount = stats.getEntityLoadCount();
assertEquals( initEntityLoadCount, currEntityLoadCount,
"load with join fetch of many-to-many did not trigger *complete* join fetch" );
} );
}
@Test
public void testManyToManyBaseThruCriteria(SessionFactoryScope factoryScope) {
var stats = factoryScope.getSessionFactory().getStatistics();
factoryScope.inSession( session -> {
stats.clear();
var criteriaBuilder = session.getCriteriaBuilder();
var criteria = criteriaBuilder.createQuery( Product.class );
var root = criteria.from( Product.class );
root.fetch( "categories" );
criteria.where( criteriaBuilder.equal( root.get( "id" ), testData.prod1Id ) );
//noinspection removal
var result = session.createQuery( criteria ).list();
var prod = result.get( 0 );
long initLoadCount = stats.getCollectionLoadCount();
long initFetchCount = stats.getCollectionFetchCount();
// should already have been initialized...
int size = prod.getCategories().size();
assertEquals( 2, size, "Incorrect non-filtered collection count" );
long currLoadCount = stats.getCollectionLoadCount();
long currFetchCount = stats.getCollectionFetchCount();
assertTrue( (initLoadCount == currLoadCount ) && (initFetchCount == currFetchCount ),
"load with join fetch of many-to-many did not trigger join fetch" );
// make sure we did not get back a collection of proxies
long initEntityLoadCount = stats.getEntityLoadCount();
for ( Object o : prod.getCategories() ) {
var cat = (Category) o;
log.debugf( " ===> %s", cat.getName() );
}
long currEntityLoadCount = stats.getEntityLoadCount();
assertEquals( initEntityLoadCount, currEntityLoadCount,
"load with join fetch of many-to-many did not trigger *complete* join fetch" );
} );
}
private static | DynamicFilterTest |
java | spring-projects__spring-security | oauth2/oauth2-client/src/test/java/org/springframework/security/oauth2/client/authentication/OAuth2AuthorizationCodeAuthenticationTokenTests.java | {
"start": 1627,
"end": 5362
} | class ____ {
private ClientRegistration clientRegistration;
private OAuth2AuthorizationExchange authorizationExchange;
private OAuth2AccessToken accessToken;
@BeforeEach
public void setUp() {
this.clientRegistration = TestClientRegistrations.clientRegistration().build();
this.authorizationExchange = new OAuth2AuthorizationExchange(TestOAuth2AuthorizationRequests.request().build(),
TestOAuth2AuthorizationResponses.success().code("code").build());
this.accessToken = TestOAuth2AccessTokens.noScopes();
}
@Test
public void constructorAuthorizationRequestResponseWhenClientRegistrationIsNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new OAuth2AuthorizationCodeAuthenticationToken(null, this.authorizationExchange));
}
@Test
public void constructorAuthorizationRequestResponseWhenAuthorizationExchangeIsNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new OAuth2AuthorizationCodeAuthenticationToken(this.clientRegistration, null));
}
@Test
public void constructorAuthorizationRequestResponseWhenAllParametersProvidedAndValidThenCreated() {
OAuth2AuthorizationCodeAuthenticationToken authentication = new OAuth2AuthorizationCodeAuthenticationToken(
this.clientRegistration, this.authorizationExchange);
assertThat(authentication.getPrincipal()).isEqualTo(this.clientRegistration.getClientId());
assertThat(authentication.getCredentials())
.isEqualTo(this.authorizationExchange.getAuthorizationResponse().getCode());
assertThat(authentication.getAuthorities()).isEqualTo(Collections.emptyList());
assertThat(authentication.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authentication.getAuthorizationExchange()).isEqualTo(this.authorizationExchange);
assertThat(authentication.getAccessToken()).isNull();
assertThat(authentication.isAuthenticated()).isEqualTo(false);
}
@Test
public void constructorTokenRequestResponseWhenClientRegistrationIsNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> new OAuth2AuthorizationCodeAuthenticationToken(null,
this.authorizationExchange, this.accessToken));
}
@Test
public void constructorTokenRequestResponseWhenAuthorizationExchangeIsNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(
() -> new OAuth2AuthorizationCodeAuthenticationToken(this.clientRegistration, null, this.accessToken));
}
@Test
public void constructorTokenRequestResponseWhenAccessTokenIsNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new OAuth2AuthorizationCodeAuthenticationToken(this.clientRegistration,
this.authorizationExchange, null));
}
@Test
public void constructorTokenRequestResponseWhenAllParametersProvidedAndValidThenCreated() {
OAuth2AuthorizationCodeAuthenticationToken authentication = new OAuth2AuthorizationCodeAuthenticationToken(
this.clientRegistration, this.authorizationExchange, this.accessToken);
assertThat(authentication.getPrincipal()).isEqualTo(this.clientRegistration.getClientId());
assertThat(authentication.getCredentials()).isEqualTo(this.accessToken.getTokenValue());
assertThat(authentication.getAuthorities()).isEqualTo(Collections.emptyList());
assertThat(authentication.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authentication.getAuthorizationExchange()).isEqualTo(this.authorizationExchange);
assertThat(authentication.getAccessToken()).isEqualTo(this.accessToken);
assertThat(authentication.isAuthenticated()).isEqualTo(true);
}
}
| OAuth2AuthorizationCodeAuthenticationTokenTests |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/annotation/Mapper.java | {
"start": 2842,
"end": 4138
} | interface ____ {
String MEMBER_TO = "to";
String MEMBER_FROM = "from";
String MEMBER_CONDITION = "condition";
String MEMBER_FORMAT = "format";
String MEMBER_DEFAULT_VALUE = "defaultValue";
/**
* The property name to map to. When not specified assume the root bean is being mapped to.
*
* @return name of the property to map to.
*/
String to() default "";
/**
* Specifies the name of the property to map from. Can be an expression.
* @return Name of the property to map from.
*/
String from() default "";
/**
* @return An expression the evaluates to true if the mapping should apply.
*/
String condition() default "";
/**
* @return The default value to use.
*/
String defaultValue() default "";
/**
* @return The format to convert numbers or dates into a string.
*/
String format() default "";
}
/**
* The conflict strategy specifies the behaviour if a conflict is found.
*
* <p>A conflict could be if for the example the source input defines a property that doesn't exist in the output or the types don't match</p>
*/
| Mapping |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/serde/RexWindowBoundJsonDeserializer.java | {
"start": 2567,
"end": 4326
} | class ____ extends StdDeserializer<RexWindowBound> {
RexWindowBoundJsonDeserializer() {
super(RexWindowBound.class);
}
@Override
public RexWindowBound deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext)
throws IOException {
JsonNode jsonNode = jsonParser.readValueAsTree();
String kind = jsonNode.get(FIELD_NAME_KIND).asText().toUpperCase();
switch (kind) {
case KIND_CURRENT_ROW:
return RexWindowBounds.CURRENT_ROW;
case KIND_UNBOUNDED_FOLLOWING:
return RexWindowBounds.UNBOUNDED_FOLLOWING;
case KIND_UNBOUNDED_PRECEDING:
return RexWindowBounds.UNBOUNDED_PRECEDING;
case KIND_BOUNDED_WINDOW:
RexNode offset = null;
if (jsonNode.get(FIELD_NAME_OFFSET) != null) {
offset =
deserializationContext.readValue(
jsonNode.get(FIELD_NAME_OFFSET).traverse(jsonParser.getCodec()),
RexNode.class);
}
if (offset != null && jsonNode.get(FIELD_NAME_IS_FOLLOWING) != null) {
return RexWindowBounds.following(offset);
} else if (offset != null && jsonNode.get(FIELD_NAME_IS_PRECEDING) != null) {
return RexWindowBounds.preceding(offset);
} else {
throw new TableException("Unknown RexWindowBound: " + jsonNode.toString());
}
default:
throw new TableException("Unknown RexWindowBound: " + jsonNode.toString());
}
}
}
| RexWindowBoundJsonDeserializer |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/persister/collection/mutation/UpdateRowsCoordinator.java | {
"start": 314,
"end": 497
} | interface ____ extends CollectionOperationCoordinator {
void updateRows(Object key, PersistentCollection<?> collection, SharedSessionContractImplementor session);
}
| UpdateRowsCoordinator |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformAction.java | {
"start": 6750,
"end": 7592
} | class ____ not implement equals, therefore we need to check timeout ourselves
return Objects.equals(update, other.update)
&& this.deferValidation == other.deferValidation
&& this.id.equals(other.id)
&& Objects.equals(config, other.config)
&& Objects.equals(authState, other.authState)
&& getTimeout().equals(other.getTimeout());
}
@Override
public boolean match(Task task) {
if (task.getDescription().startsWith(TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX)) {
String taskId = task.getDescription().substring(TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX.length());
return taskId.equals(this.id);
}
return false;
}
}
public static | does |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/DetourTest.java | {
"start": 2929,
"end": 3156
} | class ____ {
private boolean detour;
public void setDetour(boolean detour) {
this.detour = detour;
}
public boolean isDetour() {
return detour;
}
}
}
| ControlBean |
java | netty__netty | codec-http3/src/main/java/io/netty/handler/codec/http3/Http3ServerPushStreamManager.java | {
"start": 2271,
"end": 13246
} | class ____ {
private static final AtomicLongFieldUpdater<Http3ServerPushStreamManager> nextIdUpdater =
newUpdater(Http3ServerPushStreamManager.class, "nextId");
private static final Object CANCELLED_STREAM = new Object();
private static final Object PUSH_ID_GENERATED = new Object();
private static final Object AWAITING_STREAM_ESTABLISHMENT = new Object();
private final QuicChannel channel;
private final ConcurrentMap<Long, Object> pushStreams;
private final ChannelInboundHandler controlStreamListener;
private volatile long nextId;
/**
* Creates a new instance.
*
* @param channel for which this manager is created.
*/
public Http3ServerPushStreamManager(QuicChannel channel) {
this(channel, 8);
}
/**
* Creates a new instance.
*
* @param channel for which this manager is created.
* @param initialPushStreamsCountHint a hint for the number of push streams that may be created.
*/
public Http3ServerPushStreamManager(QuicChannel channel, int initialPushStreamsCountHint) {
this.channel = requireNonNull(channel, "channel");
pushStreams = newConcurrentHashMap(initialPushStreamsCountHint);
controlStreamListener = new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
if (msg instanceof Http3CancelPushFrame) {
final long pushId = ((Http3CancelPushFrame) msg).id();
if (pushId >= nextId) {
connectionError(ctx, H3_ID_ERROR, "CANCEL_PUSH id greater than the last known id", true);
return;
}
pushStreams.computeIfPresent(pushId, (id, existing) -> {
if (existing == AWAITING_STREAM_ESTABLISHMENT) {
return CANCELLED_STREAM;
}
if (existing == PUSH_ID_GENERATED) {
throw new IllegalStateException("Unexpected push stream state " + existing +
" for pushId: " + id);
}
assert existing instanceof QuicStreamChannel;
((QuicStreamChannel) existing).close();
// remove the push stream from the map.
return null;
});
}
ReferenceCountUtil.release(msg);
}
};
}
/**
* Returns {@code true} if server push is allowed at this point.
*
* @return {@code true} if server push is allowed at this point.
*/
public boolean isPushAllowed() {
return isPushAllowed(maxPushIdReceived(channel));
}
/**
* Reserves a push ID to be used to create a new push stream subsequently. A push ID can only be used to create
* exactly one push stream.
*
* @return Next push ID.
* @throws IllegalStateException If it is not allowed to create any more push streams on the associated
* {@link QuicChannel}. Use {@link #isPushAllowed()} to check if server push is allowed.
*/
public long reserveNextPushId() {
final long maxPushId = maxPushIdReceived(channel);
if (isPushAllowed(maxPushId)) {
return nextPushId();
}
throw new IllegalStateException("MAX allowed push ID: " + maxPushId + ", next push ID: " + nextId);
}
/**
* Returns a new HTTP/3 push-stream that will use the given {@link ChannelHandler}
* to dispatch {@link Http3PushStreamFrame}s too. The needed HTTP/3 codecs are automatically added to the
* pipeline as well.
*
* @param pushId for the push stream. This MUST be obtained using {@link #reserveNextPushId()}.
* @param handler the {@link ChannelHandler} to add. Can be {@code null}.
* @return the {@link Future} that will be notified once the push-stream was opened.
*/
public Future<QuicStreamChannel> newPushStream(long pushId, @Nullable ChannelHandler handler) {
final Promise<QuicStreamChannel> promise = channel.eventLoop().newPromise();
newPushStream(pushId, handler, promise);
return promise;
}
/**
* Returns a new HTTP/3 push-stream that will use the given {@link ChannelHandler}
* to dispatch {@link Http3PushStreamFrame}s too. The needed HTTP/3 codecs are automatically added to the
* pipeline as well.
*
* @param pushId for the push stream. This MUST be obtained using {@link #reserveNextPushId()}.
* @param handler the {@link ChannelHandler} to add. Can be {@code null}.
* @param promise to indicate creation of the push stream.
*/
public void newPushStream(long pushId, @Nullable ChannelHandler handler, Promise<QuicStreamChannel> promise) {
validatePushId(pushId);
channel.createStream(QuicStreamType.UNIDIRECTIONAL, pushStreamInitializer(pushId, handler), promise);
setupCancelPushIfStreamCreationFails(pushId, promise, channel);
}
/**
* Returns a new HTTP/3 push-stream that will use the given {@link ChannelHandler}
* to dispatch {@link Http3PushStreamFrame}s too. The needed HTTP/3 codecs are automatically added to the
* pipeline as well.
*
* @param pushId for the push stream. This MUST be obtained using {@link #reserveNextPushId()}.
* @param handler the {@link ChannelHandler} to add. Can be {@code null}.
* @param bootstrapConfigurator {@link UnaryOperator} to configure the {@link QuicStreamChannelBootstrap} used.
* @param promise to indicate creation of the push stream.
*/
public void newPushStream(long pushId, @Nullable ChannelHandler handler,
UnaryOperator<QuicStreamChannelBootstrap> bootstrapConfigurator,
Promise<QuicStreamChannel> promise) {
validatePushId(pushId);
QuicStreamChannelBootstrap bootstrap = bootstrapConfigurator.apply(channel.newStreamBootstrap());
bootstrap.type(QuicStreamType.UNIDIRECTIONAL)
.handler(pushStreamInitializer(pushId, handler))
.create(promise);
setupCancelPushIfStreamCreationFails(pushId, promise, channel);
}
/**
* A {@link ChannelInboundHandler} to be added to the {@link QuicChannel} associated with this
* {@link Http3ServerPushStreamManager} to listen to control stream frames.
*
* @return {@link ChannelInboundHandler} to be added to the {@link QuicChannel} associated with this
* {@link Http3ServerPushStreamManager} to listen to control stream frames.
*/
public ChannelInboundHandler controlStreamListener() {
return controlStreamListener;
}
private boolean isPushAllowed(long maxPushId) {
return nextId <= maxPushId;
}
private long nextPushId() {
final long pushId = nextIdUpdater.getAndIncrement(this);
pushStreams.put(pushId, PUSH_ID_GENERATED);
return pushId;
}
private void validatePushId(long pushId) {
if (!pushStreams.replace(pushId, PUSH_ID_GENERATED, AWAITING_STREAM_ESTABLISHMENT)) {
throw new IllegalArgumentException("Unknown push ID: " + pushId);
}
}
private Http3PushStreamServerInitializer pushStreamInitializer(long pushId, @Nullable ChannelHandler handler) {
final Http3PushStreamServerInitializer initializer;
if (handler instanceof Http3PushStreamServerInitializer) {
initializer = (Http3PushStreamServerInitializer) handler;
} else {
initializer = null;
}
return new Http3PushStreamServerInitializer(pushId) {
@Override
protected void initPushStream(QuicStreamChannel ch) {
ch.pipeline().addLast(new ChannelInboundHandlerAdapter() {
private boolean stateUpdated;
@Override
public void channelActive(ChannelHandlerContext ctx) {
if (!stateUpdated) {
updatePushStreamsMap();
}
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) {
if (!stateUpdated && ctx.channel().isActive()) {
updatePushStreamsMap();
}
}
private void updatePushStreamsMap() {
assert !stateUpdated;
stateUpdated = true;
pushStreams.compute(pushId, (id, existing) -> {
if (existing == AWAITING_STREAM_ESTABLISHMENT) {
return ch;
}
if (existing == CANCELLED_STREAM) {
ch.close();
return null; // remove push stream.
}
throw new IllegalStateException("Unexpected push stream state " +
existing + " for pushId: " + id);
});
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (evt == ChannelInputShutdownReadComplete.INSTANCE) {
pushStreams.remove(pushId);
}
ctx.fireUserEventTriggered(evt);
}
});
if (initializer != null) {
initializer.initPushStream(ch);
} else if (handler != null) {
ch.pipeline().addLast(handler);
}
}
};
}
private static void setupCancelPushIfStreamCreationFails(long pushId, Future<QuicStreamChannel> future,
QuicChannel channel) {
if (future.isDone()) {
sendCancelPushIfFailed(future, pushId, channel);
} else {
future.addListener(f -> sendCancelPushIfFailed(future, pushId, channel));
}
}
private static void sendCancelPushIfFailed(Future<QuicStreamChannel> future, long pushId, QuicChannel channel) {
// https://quicwg.org/base-drafts/draft-ietf-quic-http.html#name-cancel_push
// If we can not establish the stream, we can not send the promised push response, so send a CANCEL_PUSH
if (!future.isSuccess()) {
final QuicStreamChannel localControlStream = Http3.getLocalControlStream(channel);
assert localControlStream != null;
localControlStream.writeAndFlush(new DefaultHttp3CancelPushFrame(pushId));
}
}
}
| Http3ServerPushStreamManager |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/LabelledBreakTarget.java | {
"start": 1180,
"end": 1552
} | class ____ extends BugChecker implements LabeledStatementTreeMatcher {
@Override
public Description matchLabeledStatement(LabeledStatementTree tree, VisitorState state) {
return switch (tree.getStatement().getKind()) {
case DO_WHILE_LOOP, ENHANCED_FOR_LOOP, FOR_LOOP, WHILE_LOOP -> NO_MATCH;
default -> describeMatch(tree);
};
}
}
| LabelledBreakTarget |
java | elastic__elasticsearch | x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RestRepositoryVerifyIntegrityAction.java | {
"start": 747,
"end": 1735
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(POST, "/_snapshot/{repository}/_verify_integrity"));
}
@Override
public String getName() {
return "repository_verify_integrity";
}
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) {
final var requestParams = new RepositoryVerifyIntegrityParams(request);
return channel -> {
final var responseStream = new RepositoryVerifyIntegrityResponseStream(channel);
new RestCancellableNodeClient(client, request.getHttpChannel()).execute(
TransportRepositoryVerifyIntegrityCoordinationAction.INSTANCE,
new TransportRepositoryVerifyIntegrityCoordinationAction.Request(requestParams, responseStream),
responseStream.getCompletionListener()
);
};
}
}
| RestRepositoryVerifyIntegrityAction |
java | apache__flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/windowing/WindowWordCount.java | {
"start": 2583,
"end": 8864
} | class ____ {
// *************************************************************************
// PROGRAM
// *************************************************************************
public static void main(String[] args) throws Exception {
final CLI params = CLI.fromArgs(args);
// Create the execution environment. This is the main entrypoint
// to building a Flink application.
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// Apache Flink’s unified approach to stream and batch processing means that a DataStream
// application executed over bounded input will produce the same final results regardless
// of the configured execution mode. It is important to note what final means here: a job
// executing in STREAMING mode might produce incremental updates (think upserts in
// a database) while a BATCH job would only produce one final result at the end. The final
// result will be the same if interpreted correctly, but getting there can be different.
//
// The “classic” execution behavior of the DataStream API is called STREAMING execution
// mode. Applications should use streaming execution for unbounded jobs that require
// continuous incremental processing and are expected to stay online indefinitely.
//
// By enabling BATCH execution, we allow Flink to apply additional optimizations that we
// can only do when we know that our input is bounded. For example, different
// join/aggregation strategies can be used, in addition to a different shuffle
// implementation that allows more efficient task scheduling and failure recovery behavior.
//
// By setting the runtime mode to AUTOMATIC, Flink will choose BATCH if all sources
// are bounded and otherwise STREAMING.
env.setRuntimeMode(params.getExecutionMode());
// This optional step makes the input parameters
// available in the Flink UI.
env.getConfig().setGlobalJobParameters(params);
if (params.isAsyncState()) {
Configuration config = Configuration.fromMap(env.getConfiguration().toMap());
if (!config.containsKey(StateBackendOptions.STATE_BACKEND.key())) {
config.set(StateBackendOptions.STATE_BACKEND, FORST_STATE_BACKEND_NAME);
env.configure(config);
}
}
DataStream<String> text;
if (params.getInputs().isPresent()) {
// Create a new file source that will read files from a given set of directories.
// Each file will be processed as plain text and split based on newlines.
FileSource.FileSourceBuilder<String> builder =
FileSource.forRecordStreamFormat(
new TextLineInputFormat(), params.getInputs().get());
// If a discovery interval is provided, the source will
// continuously watch the given directories for new files.
params.getDiscoveryInterval().ifPresent(builder::monitorContinuously);
text = env.fromSource(builder.build(), WatermarkStrategy.noWatermarks(), "file-input");
} else {
text = env.fromData(WordCountData.WORDS).name("in-memory-input");
}
int windowSize = params.getInt("window").orElse(250);
int slideSize = params.getInt("slide").orElse(150);
KeyedStream<Tuple2<String, Integer>, String> keyedStream =
// The text lines read from the source are split into words
// using a user-defined function. The tokenizer, implemented below,
// will output each words as a (2-tuple) containing (word, 1)
text.flatMap(new WordCount.Tokenizer())
.name("tokenizer")
// keyBy groups tuples based on the "0" field, the word.
// Using a keyBy allows performing aggregations and other
// stateful transformations over data on a per-key basis.
// This is similar to a GROUP BY clause in a SQL query.
.keyBy(value -> value.f0);
if (params.isAsyncState()) {
keyedStream.enableAsyncState();
}
DataStream<Tuple2<String, Integer>> counts =
keyedStream
// create windows of windowSize records slided every slideSize records
.countWindow(windowSize, slideSize)
// For each key, we perform a simple sum of the "1" field, the count.
// If the input data set is bounded, sum will output a final count for
// each word. If it is unbounded, it will continuously output updates
// each time it sees a new instance of each word in the stream.
.sum(1)
.name("counter");
if (params.getOutput().isPresent()) {
// Given an output directory, Flink will write the results to a file
// using a simple string encoding. In a production environment, this might
// be something more structured like CSV, Avro, JSON, or Parquet.
counts.sinkTo(
FileSink.<Tuple2<String, Integer>>forRowFormat(
params.getOutput().get(), new SimpleStringEncoder<>())
.withRollingPolicy(
DefaultRollingPolicy.builder()
.withMaxPartSize(MemorySize.ofMebiBytes(1))
.withRolloverInterval(Duration.ofSeconds(10))
.build())
.build())
.name("file-sink");
} else {
counts.print().name("print-sink");
}
// Apache Flink applications are composed lazily. Calling execute
// submits the Job and begins processing.
env.execute("WindowWordCount");
}
}
| WindowWordCount |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/component/EmbeddableAndGenericExtendingSerializableMappedSuperclassTest.java | {
"start": 876,
"end": 1237
} | class ____ {
@JiraKey("HHH-17041")
@Test
public void testQueryEmbeddableFields(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
session.createQuery( "select m.embedded.text, m.embedded.name from MyEntity m" ).list();
}
);
}
@MappedSuperclass
public static abstract | EmbeddableAndGenericExtendingSerializableMappedSuperclassTest |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/offline/StartOfflineDevModeTest.java | {
"start": 611,
"end": 1455
} | class ____ {
@RegisterExtension
static QuarkusDevModeTest runner = new QuarkusDevModeTest()
.withApplicationRoot((jar) -> jar
.addClass(MyEntity.class)
.addClass(GreetingResource.class)
.addAsResource("application-start-offline.properties", "application.properties"))
.setLogRecordPredicate(record -> true);
@Test
public void testUnitSchemaManagementStrategyIsNone() {
RestAssured.when().get("/hello").then()
.statusCode(200)
.body(is("DB is offline but application is running"));
assertThat(runner.getLogRecords())
.map(l -> l.getMessage())
.doesNotContain("Failed to run post-boot validation");
}
@Path("/hello")
public static | StartOfflineDevModeTest |
java | quarkusio__quarkus | independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/AnnotationLiteralProcessor.java | {
"start": 1580,
"end": 4362
} | class ____ {
private static final String ANNOTATION_LITERAL_SUFFIX = "_ArcAnnotationLiteral";
private final ComputingCache<CacheKey, AnnotationLiteralClassInfo> cache;
private final IndexView beanArchiveIndex;
AnnotationLiteralProcessor(IndexView beanArchiveIndex, Predicate<DotName> applicationClassPredicate) {
this.cache = new ComputingCache<>(key -> new AnnotationLiteralClassInfo(
generateAnnotationLiteralClassName(key.annotationName()),
applicationClassPredicate.test(key.annotationName()),
key.annotationClass));
this.beanArchiveIndex = Objects.requireNonNull(beanArchiveIndex);
}
boolean hasLiteralsToGenerate() {
return !cache.isEmpty();
}
ComputingCache<CacheKey, AnnotationLiteralClassInfo> getCache() {
return cache;
}
/**
* Generates a bytecode sequence to create an instance of given annotation type, such that
* the annotation members have the same values as the given annotation instance.
* An implementation of the annotation type will be generated automatically.
* <p>
* It is expected that given annotation instance is runtime-retained; an exception is thrown
* if not. Further, it is expected that the annotation type is available (that is,
* {@code annotationClass != null}); an exception is thrown if not. Callers that expect
* they always deal with runtime-retained annotations whose classes are available do not
* have to check (and will get decent errors for free), but callers that can possibly deal
* with class-retained annotations or missing annotation classes must check explicitly.
* <p>
* We call the generated implementation of the annotation type an <em>annotation literal class</em>
* and the instance produced by the generated bytecode an <em>annotation literal instance</em>,
* even though the generated code doesn't use CDI's {@code AnnotationLiteral}.
*
* @param bc will receive the bytecode sequence for instantiating the annotation literal class
* as a sequence of {@link BlockCreator} method calls
* @param annotationClass the annotation type
* @param annotationInstance the annotation instance; must match the {@code annotationClass}
* @return an annotation literal instance result handle
*/
public Expr create(BlockCreator bc, ClassInfo annotationClass, AnnotationInstance annotationInstance) {
if (!annotationInstance.runtimeVisible()) {
throw new IllegalArgumentException("Annotation does not have @Retention(RUNTIME): " + annotationInstance);
}
if (annotationClass == null) {
throw new IllegalArgumentException("Annotation | AnnotationLiteralProcessor |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/globals/TemplateGlobalTest.java | {
"start": 2998,
"end": 3387
} | class ____ {
// field-level annotation overrides the class-level one
@TemplateGlobal(name = "_name")
static final String NAME = user();
// this method is ignored
private static String user() {
return "Lu";
}
static Color[] colors() {
return new Color[] { Color.RED, Color.BLUE };
}
}
}
| NextGlobals |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/JacksonTypesSerTest.java | {
"start": 468,
"end": 2082
} | class ____
extends DatabindTestUtil
{
private final ObjectMapper MAPPER = newJsonMapper();
@Test
public void testLocation() throws IOException
{
File f = new File("/tmp/test.json");
TokenStreamLocation loc = new TokenStreamLocation(ContentReference.rawReference(f),
-1, 100, 13);
Map<String,Object> result = writeAndMap(MAPPER, loc);
// 04-Apr-2021, tatu: Jackson 2.x used to output "sourceRef"; no longer in 3.x
// assertEquals(f.getAbsolutePath(), result.get("sourceRef"));
assertEquals(Integer.valueOf(-1), result.get("charOffset"));
assertEquals(Integer.valueOf(-1), result.get("byteOffset"));
assertEquals(Integer.valueOf(100), result.get("lineNr"));
assertEquals(Integer.valueOf(13), result.get("columnNr"));
assertEquals(4, result.size());
}
/**
* Verify that {@link TokenBuffer} can be properly serialized
* automatically, using the "standard" JSON sample document
*/
@Test
public void testTokenBuffer() throws Exception
{
// First, copy events from known good source (StringReader)
JsonParser p = createParserUsingReader(SAMPLE_DOC_JSON_SPEC);
TokenBuffer tb = TokenBuffer.forGeneration();
while (p.nextToken() != null) {
tb.copyCurrentEvent(p);
}
p.close();
// Then serialize as String
String str = MAPPER.writeValueAsString(tb);
tb.close();
// and verify it looks ok
verifyJsonSpecSampleDoc(createParserUsingReader(str), true);
}
}
| JacksonTypesSerTest |
java | dropwizard__dropwizard | dropwizard-jackson/src/main/java/io/dropwizard/jackson/DiscoverableSubtypeResolver.java | {
"start": 2411,
"end": 2674
} | class ____ lookup services
* @return the discovered services
*/
protected List<Class<?>> discoverServices(Class<?> klass) {
final List<Class<?>> serviceClasses = new ArrayList<>();
try {
// use classloader that loaded this | to |
java | apache__avro | lang/java/mapred/src/main/java/org/apache/avro/mapreduce/AvroSequenceFileOutputFormat.java | {
"start": 1805,
"end": 4605
} | class ____<K, V> extends FileOutputFormat<K, V> {
/** {@inheritDoc} */
@Override
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
// Configure compression if requested.
CompressionCodec codec = null;
CompressionType compressionType = CompressionType.NONE;
if (getCompressOutput(context)) {
// Find the kind of compression to do.
compressionType = getOutputCompressionType(conf);
// Find the right codec.
Class<?> codecClass = getOutputCompressorClass(context, DefaultCodec.class);
codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
}
// Get the path of the output file.
Path outputFile = getDefaultWorkFile(context, "");
FileSystem fs = outputFile.getFileSystem(conf);
// Configure the writer.
AvroSequenceFile.Writer.Options options = new AvroSequenceFile.Writer.Options().withFileSystem(fs)
.withConfiguration(conf).withOutputPath(outputFile).withKeyClass(context.getOutputKeyClass())
.withValueClass(context.getOutputValueClass()).withProgressable(context).withCompressionType(compressionType)
.withCompressionCodec(codec);
Schema keySchema = AvroJob.getOutputKeySchema(conf);
if (null != keySchema) {
options.withKeySchema(keySchema);
}
Schema valueSchema = AvroJob.getOutputValueSchema(conf);
if (null != valueSchema) {
options.withValueSchema(valueSchema);
}
final SequenceFile.Writer out = AvroSequenceFile.createWriter(options);
return new RecordWriter<K, V>() {
@Override
public void write(K key, V value) throws IOException {
out.append(key, value);
}
@Override
public void close(TaskAttemptContext context) throws IOException {
out.close();
}
};
}
/**
* Sets the type of compression for the output sequence file.
*
* @param job The job configuration.
* @param compressionType The compression type for the target sequence file.
*/
public static void setOutputCompressionType(Job job, CompressionType compressionType) {
setCompressOutput(job, true);
job.getConfiguration().set(FileOutputFormat.COMPRESS_TYPE, compressionType.name());
}
/**
* Gets type of compression for the output sequence file.
*
* @param conf The job configuration.
* @return The compression type.
*/
public static CompressionType getOutputCompressionType(Configuration conf) {
String typeName = conf.get(FileOutputFormat.COMPRESS_TYPE);
if (typeName != null) {
return CompressionType.valueOf(typeName);
}
return SequenceFile.getDefaultCompressionType(conf);
}
}
| AvroSequenceFileOutputFormat |
java | spring-projects__spring-boot | module/spring-boot-health/src/main/java/org/springframework/boot/health/registry/HealthContributorRegistry.java | {
"start": 992,
"end": 1716
} | interface ____ extends HealthContributors {
/**
* Register a contributor with the given {@code name}.
* @param name the name of the contributor
* @param contributor the contributor to register
* @throws IllegalStateException if the contributor cannot be registered with the
* given {@code name}.
*/
void registerContributor(String name, HealthContributor contributor);
/**
* Unregister a previously registered contributor.
* @param name the name of the contributor to unregister
* @return the unregistered indicator, or {@code null} if no indicator was found in
* the registry for the given {@code name}.
*/
@Nullable HealthContributor unregisterContributor(String name);
}
| HealthContributorRegistry |
java | apache__flink | flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/resources/KubernetesPodsWatcher.java | {
"start": 1133,
"end": 2410
} | class ____ extends AbstractKubernetesWatcher<Pod, KubernetesPod> {
public KubernetesPodsWatcher(
FlinkKubeClient.WatchCallbackHandler<KubernetesPod> callbackHandler) {
super(callbackHandler);
}
@Override
public void eventReceived(Action action, Pod pod) {
logger.debug(
"Received {} event for pod {}, details: {}{}",
action,
pod.getMetadata().getName(),
System.lineSeparator(),
KubernetesUtils.tryToGetPrettyPrintYaml(pod.getStatus()));
final List<KubernetesPod> pods = Collections.singletonList(new KubernetesPod(pod));
switch (action) {
case ADDED:
callbackHandler.onAdded(pods);
break;
case MODIFIED:
callbackHandler.onModified(pods);
break;
case ERROR:
callbackHandler.onError(pods);
break;
case DELETED:
callbackHandler.onDeleted(pods);
break;
default:
logger.debug(
"Ignore handling {} event for pod {}", action, pod.getMetadata().getName());
break;
}
}
}
| KubernetesPodsWatcher |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/authentication/configuration/AuthenticationConfigurationTests.java | {
"start": 17613,
"end": 17867
} | class ____ {
AuthenticationManager authenticationManager = mock(AuthenticationManager.class);
@Bean
AuthenticationManager authenticationManager() {
return this.authenticationManager;
}
}
@Configuration
static | AuthenticationManagerBeanConfig |
java | google__dagger | javatests/dagger/internal/codegen/MultipleRequestTest.java | {
"start": 1797,
"end": 2248
} | interface ____ {",
" ConstructorInjectsMultiple get();",
"}"))
.compile(subject -> subject.hasErrorCount(0));
}
@Test public void multipleRequests_field() {
CompilerTests.daggerCompiler(
CompilerTests.javaSource(
"test.Dep",
"package test;",
"",
"import javax.inject.Inject;",
"",
" | SimpleComponent |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CopyOnWriteStateMapTest.java | {
"start": 2101,
"end": 23277
} | class ____ {
/** Testing the basic map operations. */
@Test
void testPutGetRemoveContainsTransform() throws Exception {
final CopyOnWriteStateMap<Integer, Integer, ArrayList<Integer>> stateMap =
new CopyOnWriteStateMap<>(new ArrayListSerializer<>(IntSerializer.INSTANCE));
ArrayList<Integer> state11 = new ArrayList<>();
state11.add(41);
ArrayList<Integer> state21 = new ArrayList<>();
state21.add(42);
ArrayList<Integer> state12 = new ArrayList<>();
state12.add(43);
assertThat(stateMap.putAndGetOld(1, 1, state11)).isNull();
assertThat(stateMap.get(1, 1)).isEqualTo(state11);
assertThat(stateMap).hasSize(1);
assertThat(stateMap.putAndGetOld(2, 1, state21)).isNull();
assertThat(stateMap.get(2, 1)).isEqualTo(state21);
assertThat(stateMap).hasSize(2);
assertThat(stateMap.putAndGetOld(1, 2, state12)).isNull();
assertThat(stateMap.get(1, 2)).isEqualTo(state12);
assertThat(stateMap).hasSize(3);
assertThat(stateMap.containsKey(2, 1)).isTrue();
assertThat(stateMap.containsKey(3, 1)).isFalse();
assertThat(stateMap.containsKey(2, 3)).isFalse();
stateMap.put(2, 1, null);
assertThat(stateMap.containsKey(2, 1)).isTrue();
assertThat(stateMap).hasSize(3);
assertThat(stateMap.get(2, 1)).isNull();
stateMap.put(2, 1, state21);
assertThat(stateMap).hasSize(3);
assertThat(stateMap.removeAndGetOld(2, 1)).isEqualTo(state21);
assertThat(stateMap.containsKey(2, 1)).isFalse();
assertThat(stateMap).hasSize(2);
stateMap.remove(1, 2);
assertThat(stateMap.containsKey(1, 2)).isFalse();
assertThat(stateMap).hasSize(1);
assertThat(stateMap.removeAndGetOld(4, 2)).isNull();
assertThat(stateMap).hasSize(1);
StateTransformationFunction<ArrayList<Integer>, Integer> function =
(previousState, value) -> {
previousState.add(value);
return previousState;
};
final int value = 4711;
stateMap.transform(1, 1, value, function);
state11 = function.apply(state11, value);
assertThat(stateMap.get(1, 1)).isEqualTo(state11);
}
/** This test triggers incremental rehash and tests for corruptions. */
@Test
void testIncrementalRehash() {
final CopyOnWriteStateMap<Integer, Integer, ArrayList<Integer>> stateMap =
new CopyOnWriteStateMap<>(new ArrayListSerializer<>(IntSerializer.INSTANCE));
int insert = 0;
int remove = 0;
while (!stateMap.isRehashing()) {
stateMap.put(insert++, 0, new ArrayList<>());
if (insert % 8 == 0) {
stateMap.remove(remove++, 0);
}
}
assertThat(stateMap).hasSize(insert - remove);
while (stateMap.isRehashing()) {
stateMap.put(insert++, 0, new ArrayList<>());
if (insert % 8 == 0) {
stateMap.remove(remove++, 0);
}
}
assertThat(stateMap).hasSize(insert - remove);
for (int i = 0; i < insert; ++i) {
if (i < remove) {
assertThat(stateMap.containsKey(i, 0)).isFalse();
} else {
assertThat(stateMap.containsKey(i, 0)).isTrue();
}
}
}
/**
* This test does some random modifications to a state map and a reference (hash map). Then
* draws snapshots, performs more modifications and checks snapshot integrity.
*/
@Test
void testRandomModificationsAndCopyOnWriteIsolation() throws Exception {
final CopyOnWriteStateMap<Integer, Integer, ArrayList<Integer>> stateMap =
new CopyOnWriteStateMap<>(new ArrayListSerializer<>(IntSerializer.INSTANCE));
final HashMap<Tuple2<Integer, Integer>, ArrayList<Integer>> referenceMap = new HashMap<>();
final Random random = new Random(42);
// holds snapshots from the map under test
CopyOnWriteStateMap.StateMapEntry<Integer, Integer, ArrayList<Integer>>[] snapshot = null;
int snapshotSize = 0;
// holds a reference snapshot from our reference map that we compare against
Tuple3<Integer, Integer, ArrayList<Integer>>[] reference = null;
int val = 0;
int snapshotCounter = 0;
int referencedSnapshotId = 0;
final StateTransformationFunction<ArrayList<Integer>, Integer> transformationFunction =
(previousState, value) -> {
if (previousState == null) {
previousState = new ArrayList<>();
}
previousState.add(value);
// we give back the original, attempting to spot errors in to copy-on-write
return previousState;
};
StateIncrementalVisitor<Integer, Integer, ArrayList<Integer>> updatingIterator =
stateMap.getStateIncrementalVisitor(5);
// the main loop for modifications
for (int i = 0; i < 10_000_000; ++i) {
int key = random.nextInt(20);
int namespace = random.nextInt(4);
Tuple2<Integer, Integer> compositeKey = new Tuple2<>(key, namespace);
int op = random.nextInt(10);
ArrayList<Integer> state = null;
ArrayList<Integer> referenceState = null;
switch (op) {
case 0:
case 1:
{
state = stateMap.get(key, namespace);
referenceState = referenceMap.get(compositeKey);
if (null == state) {
state = new ArrayList<>();
stateMap.put(key, namespace, state);
referenceState = new ArrayList<>();
referenceMap.put(compositeKey, referenceState);
}
break;
}
case 2:
{
stateMap.put(key, namespace, new ArrayList<>());
referenceMap.put(compositeKey, new ArrayList<>());
break;
}
case 3:
{
state = stateMap.putAndGetOld(key, namespace, new ArrayList<>());
referenceState = referenceMap.put(compositeKey, new ArrayList<>());
break;
}
case 4:
{
stateMap.remove(key, namespace);
referenceMap.remove(compositeKey);
break;
}
case 5:
{
state = stateMap.removeAndGetOld(key, namespace);
referenceState = referenceMap.remove(compositeKey);
break;
}
case 6:
{
final int updateValue = random.nextInt(1000);
stateMap.transform(key, namespace, updateValue, transformationFunction);
referenceMap.put(
compositeKey,
transformationFunction.apply(
referenceMap.remove(compositeKey), updateValue));
break;
}
case 7:
case 8:
case 9:
if (!updatingIterator.hasNext()) {
updatingIterator = stateMap.getStateIncrementalVisitor(5);
if (!updatingIterator.hasNext()) {
break;
}
}
testStateIteratorWithUpdate(
updatingIterator, stateMap, referenceMap, op == 8, op == 9);
break;
default:
{
fail("Unknown op-code " + op);
}
}
assertThat(stateMap).hasSize(referenceMap.size());
if (state != null) {
assertThat(referenceState).isNotNull();
// mutate the states a bit...
if (random.nextBoolean() && !state.isEmpty()) {
state.remove(state.size() - 1);
referenceState.remove(referenceState.size() - 1);
} else {
state.add(val);
referenceState.add(val);
++val;
}
}
assertThat(state).isEqualTo(referenceState);
// snapshot triggering / comparison / release
if (i > 0 && i % 500 == 0) {
if (snapshot != null) {
// check our referenced snapshot
deepCheck(reference, convert(snapshot, snapshotSize));
if (i % 1_000 == 0) {
// draw and release some other snapshot while holding on the old snapshot
++snapshotCounter;
stateMap.snapshotMapArrays();
stateMap.releaseSnapshot(snapshotCounter);
}
// release the snapshot after some time
if (i % 5_000 == 0) {
snapshot = null;
reference = null;
snapshotSize = 0;
stateMap.releaseSnapshot(referencedSnapshotId);
}
} else {
// if there is no more referenced snapshot, we create one
++snapshotCounter;
referencedSnapshotId = snapshotCounter;
snapshot = stateMap.snapshotMapArrays();
snapshotSize = stateMap.size();
reference = manualDeepDump(referenceMap);
}
}
}
}
/**
* Test operations specific for StateIncrementalVisitor in {@code
* testRandomModificationsAndCopyOnWriteIsolation()}.
*
* <p>Check next, update and remove during global iteration of StateIncrementalVisitor.
*/
private static void testStateIteratorWithUpdate(
StateIncrementalVisitor<Integer, Integer, ArrayList<Integer>> updatingIterator,
CopyOnWriteStateMap<Integer, Integer, ArrayList<Integer>> stateMap,
HashMap<Tuple2<Integer, Integer>, ArrayList<Integer>> referenceMap,
boolean update,
boolean remove) {
for (StateEntry<Integer, Integer, ArrayList<Integer>> stateEntry :
updatingIterator.nextEntries()) {
Integer key = stateEntry.getKey();
Integer namespace = stateEntry.getNamespace();
Tuple2<Integer, Integer> compositeKey = new Tuple2<>(key, namespace);
assertThat(stateEntry.getState()).isEqualTo(referenceMap.get(compositeKey));
if (update) {
ArrayList<Integer> newState = new ArrayList<>(stateEntry.getState());
if (!newState.isEmpty()) {
newState.remove(0);
}
updatingIterator.update(stateEntry, newState);
referenceMap.put(compositeKey, new ArrayList<>(newState));
assertThat(stateMap.get(key, namespace)).isEqualTo(newState);
}
if (remove) {
updatingIterator.remove(stateEntry);
referenceMap.remove(compositeKey);
}
}
}
/**
* This tests for the copy-on-write contracts, e.g. ensures that no copy-on-write is active
* after all snapshots are released.
*/
@Test
void testCopyOnWriteContracts() {
final CopyOnWriteStateMap<Integer, Integer, ArrayList<Integer>> stateMap =
new CopyOnWriteStateMap<>(new ArrayListSerializer<>(IntSerializer.INSTANCE));
ArrayList<Integer> originalState1 = new ArrayList<>(1);
ArrayList<Integer> originalState2 = new ArrayList<>(1);
ArrayList<Integer> originalState3 = new ArrayList<>(1);
ArrayList<Integer> originalState4 = new ArrayList<>(1);
ArrayList<Integer> originalState5 = new ArrayList<>(1);
originalState1.add(1);
originalState2.add(2);
originalState3.add(3);
originalState4.add(4);
originalState5.add(5);
stateMap.put(1, 1, originalState1);
stateMap.put(2, 1, originalState2);
stateMap.put(4, 1, originalState4);
stateMap.put(5, 1, originalState5);
// no snapshot taken, we get the original back
assertThat(stateMap.get(1, 1)).isSameAs(originalState1);
CopyOnWriteStateMapSnapshot<Integer, Integer, ArrayList<Integer>> snapshot1 =
stateMap.stateSnapshot();
// after snapshot1 is taken, we get a copy...
final ArrayList<Integer> copyState = stateMap.get(1, 1);
assertThat(copyState).isNotSameAs(originalState1);
// ...and the copy is equal
assertThat(copyState).isEqualTo(originalState1);
// we make an insert AFTER snapshot1
stateMap.put(3, 1, originalState3);
// on repeated lookups, we get the same copy because no further snapshot was taken
assertThat(stateMap.get(1, 1)).isSameAs(copyState);
// we take snapshot2
CopyOnWriteStateMapSnapshot<Integer, Integer, ArrayList<Integer>> snapshot2 =
stateMap.stateSnapshot();
// after the second snapshot, copy-on-write is active again for old entries
assertThat(stateMap.get(1, 1)).isNotSameAs(copyState);
// and equality still holds
assertThat(stateMap.get(1, 1)).isEqualTo(copyState);
// after releasing snapshot2
stateMap.releaseSnapshot(snapshot2);
// we still get the original of the untouched late insert (after snapshot1)
assertThat(stateMap.get(3, 1)).isSameAs(originalState3);
// but copy-on-write is still active for older inserts (before snapshot1)
assertThat(stateMap.get(4, 1)).isNotSameAs(originalState4);
// after releasing snapshot1
stateMap.releaseSnapshot(snapshot1);
// no copy-on-write is active
assertThat(stateMap.get(5, 1)).isSameAs(originalState5);
}
@Test
void testIteratingOverSnapshot() {
ListSerializer<Integer> stateSerializer = new ListSerializer<>(IntSerializer.INSTANCE);
final CopyOnWriteStateMap<Integer, Integer, List<Integer>> stateMap =
new CopyOnWriteStateMap<>(stateSerializer);
List<Integer> originalState1 = new ArrayList<>(1);
List<Integer> originalState2 = new ArrayList<>(1);
List<Integer> originalState3 = new ArrayList<>(1);
List<Integer> originalState4 = new ArrayList<>(1);
List<Integer> originalState5 = new ArrayList<>(1);
originalState1.add(1);
originalState2.add(2);
originalState3.add(3);
originalState4.add(4);
originalState5.add(5);
stateMap.put(1, 1, originalState1);
stateMap.put(2, 1, originalState2);
stateMap.put(3, 1, originalState3);
stateMap.put(4, 1, originalState4);
stateMap.put(5, 1, originalState5);
CopyOnWriteStateMapSnapshot<Integer, Integer, List<Integer>> snapshot =
stateMap.stateSnapshot();
Iterator<StateEntry<Integer, Integer, List<Integer>>> iterator =
snapshot.getIterator(
IntSerializer.INSTANCE, IntSerializer.INSTANCE, stateSerializer, null);
assertThat(iterator)
.toIterable()
.is(
matching(
containsInAnyOrder(
entry(1, 1, originalState1),
entry(2, 1, originalState2),
entry(3, 1, originalState3),
entry(4, 1, originalState4),
entry(5, 1, originalState5))));
}
@Test
void testIteratingOverSnapshotWithTransform() {
final CopyOnWriteStateMap<Integer, Integer, Long> stateMap =
new CopyOnWriteStateMap<>(LongSerializer.INSTANCE);
stateMap.put(1, 1, 10L);
stateMap.put(2, 1, 11L);
stateMap.put(3, 1, 12L);
stateMap.put(4, 1, 13L);
stateMap.put(5, 1, 14L);
StateMapSnapshot<Integer, Integer, Long, ? extends StateMap<Integer, Integer, Long>>
snapshot = stateMap.stateSnapshot();
Iterator<StateEntry<Integer, Integer, Long>> iterator =
snapshot.getIterator(
IntSerializer.INSTANCE,
IntSerializer.INSTANCE,
LongSerializer.INSTANCE,
new StateSnapshotTransformer<Long>() {
@Nullable
@Override
public Long filterOrTransform(@Nullable Long value) {
if (value == 12L) {
return null;
} else {
return value + 2L;
}
}
});
assertThat(iterator)
.toIterable()
.is(
matching(
containsInAnyOrder(
entry(1, 1, 12L),
entry(2, 1, 13L),
entry(4, 1, 15L),
entry(5, 1, 16L))));
}
/** This tests that snapshot can be released correctly. */
@Test
void testSnapshotRelease() {
final CopyOnWriteStateMap<Integer, Integer, Integer> stateMap =
new CopyOnWriteStateMap<>(IntSerializer.INSTANCE);
for (int i = 0; i < 10; i++) {
stateMap.put(i, i, i);
}
CopyOnWriteStateMapSnapshot<Integer, Integer, Integer> snapshot = stateMap.stateSnapshot();
assertThat(snapshot.isReleased()).isFalse();
assertThat(stateMap.getSnapshotVersions()).contains(snapshot.getSnapshotVersion());
snapshot.release();
assertThat(snapshot.isReleased()).isTrue();
assertThat(stateMap.getSnapshotVersions()).isEmpty();
// verify that snapshot will release itself only once
snapshot.release();
assertThat(stateMap.getSnapshotVersions()).isEmpty();
}
@SuppressWarnings("unchecked")
private static <K, N, S> Tuple3<K, N, S>[] convert(
CopyOnWriteStateMap.StateMapEntry<K, N, S>[] snapshot, int mapSize) {
Tuple3<K, N, S>[] result = new Tuple3[mapSize];
int pos = 0;
for (CopyOnWriteStateMap.StateMapEntry<K, N, S> entry : snapshot) {
while (null != entry) {
result[pos++] =
new Tuple3<>(entry.getKey(), entry.getNamespace(), entry.getState());
entry = entry.next;
}
}
assertThat(pos).isEqualTo(mapSize);
return result;
}
@SuppressWarnings("unchecked")
private Tuple3<Integer, Integer, ArrayList<Integer>>[] manualDeepDump(
HashMap<Tuple2<Integer, Integer>, ArrayList<Integer>> map) {
Tuple3<Integer, Integer, ArrayList<Integer>>[] result = new Tuple3[map.size()];
int pos = 0;
for (Map.Entry<Tuple2<Integer, Integer>, ArrayList<Integer>> entry : map.entrySet()) {
Integer key = entry.getKey().f0;
Integer namespace = entry.getKey().f1;
result[pos++] = new Tuple3<>(key, namespace, new ArrayList<>(entry.getValue()));
}
return result;
}
private void deepCheck(
Tuple3<Integer, Integer, ArrayList<Integer>>[] a,
Tuple3<Integer, Integer, ArrayList<Integer>>[] b) {
if (a == b) {
return;
}
assertThat(a).hasSameSizeAs(b);
Comparator<Tuple3<Integer, Integer, ArrayList<Integer>>> comparator =
(o1, o2) -> {
int namespaceDiff = o1.f1 - o2.f1;
return namespaceDiff != 0 ? namespaceDiff : o1.f0 - o2.f0;
};
Arrays.sort(a, comparator);
Arrays.sort(b, comparator);
for (int i = 0; i < a.length; ++i) {
Tuple3<Integer, Integer, ArrayList<Integer>> av = a[i];
Tuple3<Integer, Integer, ArrayList<Integer>> bv = b[i];
assertThat(av.f0).isEqualTo(bv.f0);
assertThat(av.f1).isEqualTo(bv.f1);
assertThat(av.f2).isEqualTo(bv.f2);
}
}
}
| CopyOnWriteStateMapTest |
java | quarkusio__quarkus | extensions/cache/deployment/src/test/java/io/quarkus/cache/test/deployment/PrivateMethodExceptionsTest.java | {
"start": 700,
"end": 809
} | class ____ {@link DeploymentException} causes related to caching annotations on private methods.
*/
public | tests |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FsCheckpointStateToolset.java | {
"start": 1538,
"end": 3904
} | class ____ implements CheckpointStateToolset {
private final Path basePath;
private final PathsCopyingFileSystem fs;
public FsCheckpointStateToolset(Path basePath, PathsCopyingFileSystem fs) {
this.basePath = basePath;
this.fs = fs;
}
@Override
public boolean canFastDuplicate(StreamStateHandle stateHandle) throws IOException {
if (!(stateHandle instanceof FileStateHandle)) {
return false;
}
final Path srcPath = ((FileStateHandle) stateHandle).getFilePath();
final Path dst = getNewDstPath(srcPath.getName());
return fs.canCopyPaths(srcPath, dst);
}
@Override
public List<StreamStateHandle> duplicate(List<StreamStateHandle> stateHandles)
throws IOException {
final List<CopyRequest> requests = new ArrayList<>();
for (StreamStateHandle handle : stateHandles) {
if (!(handle instanceof FileStateHandle)) {
throw new IllegalArgumentException("We can duplicate only FileStateHandles.");
}
final Path srcPath = ((FileStateHandle) handle).getFilePath();
requests.add(
CopyRequest.of(
srcPath, getNewDstPath(srcPath.getName()), handle.getStateSize()));
}
fs.copyFiles(requests, new CloseableRegistry());
return IntStream.range(0, stateHandles.size())
.mapToObj(
idx -> {
final StreamStateHandle originalHandle = stateHandles.get(idx);
final Path dst = requests.get(idx).getDestination();
if (originalHandle instanceof RelativeFileStateHandle) {
return new RelativeFileStateHandle(
dst, dst.getName(), originalHandle.getStateSize());
} else {
return new FileStateHandle(dst, originalHandle.getStateSize());
}
})
.collect(Collectors.toList());
}
private Path getNewDstPath(String fileName) throws IOException {
final Path dst = new Path(basePath, fileName);
return EntropyInjector.addEntropy(dst.getFileSystem(), dst);
}
}
| FsCheckpointStateToolset |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/config/AutowireCapableBeanFactory.java | {
"start": 8860,
"end": 9470
} | interface ____ supported here.
* Can also be invoked with {@code AUTOWIRE_NO} in order to just apply
* before-instantiation callbacks (for example, for annotation-driven injection).
* <p>Does <i>not</i> apply standard {@link BeanPostProcessor BeanPostProcessors}
* callbacks or perform any further initialization of the bean. This interface
* offers distinct, fine-grained operations for those purposes, for example
* {@link #initializeBean}. However, {@link InstantiationAwareBeanPostProcessor}
* callbacks are applied, if applicable to the construction of the instance.
* @param beanClass the | are |
java | apache__camel | components/camel-tracing/src/main/java/org/apache/camel/tracing/decorators/AzureStorageQueueSpanDecorator.java | {
"start": 1031,
"end": 4391
} | class ____ extends AbstractMessagingSpanDecorator {
static final String STORAGE_QUEUE_INSERTION_TIME = "insertionTime";
static final String STORAGE_QUEUE_EXPIRATION_TIME = "expirationTime";
static final String STORAGE_QUEUE_TIME_NEXT_VISIBLE = "timeNextVisible";
static final String STORAGE_QUEUE_DEQUEUE_COUNT = "dequeueCount";
static final String STORAGE_QUEUE_NAME = "name";
static final String STORAGE_QUEUE_VISIBILITY_TIMEOUT = "visibilityTimeout";
static final String STORAGE_QUEUE_TIME_TO_LIVE = "ttl";
/**
* Constants copied from {@link org.apache.camel.component.azure.storage.queue.QueueConstants}
*/
static final String MESSAGE_ID = "CamelAzureStorageQueueMessageId";
static final String INSERTION_TIME = "CamelAzureStorageQueueInsertionTime";
static final String EXPIRATION_TIME = "CamelAzureStorageQueueExpirationTime";
static final String TIME_NEXT_VISIBLE = "CamelAzureStorageQueueTimeNextVisible";
static final String DEQUEUE_COUNT = "CamelAzureStorageQueueDequeueCount";
static final String NAME = "CamelAzureStorageQueueName";
static final String VISIBILITY_TIMEOUT = "CamelAzureStorageQueueVisibilityTimeout";
static final String TIME_TO_LIVE = "CamelAzureStorageQueueTimeToLive";
@Override
public String getComponent() {
return "azure-storage-queue";
}
@Override
public String getComponentClassName() {
return "org.apache.camel.component.azure.storage.queue.QueueComponent";
}
@Override
public void pre(SpanAdapter span, Exchange exchange, Endpoint endpoint) {
super.pre(span, exchange, endpoint);
OffsetDateTime insertionTime = exchange.getIn().getHeader(INSERTION_TIME, OffsetDateTime.class);
if (insertionTime != null) {
span.setTag(STORAGE_QUEUE_INSERTION_TIME, insertionTime.toString());
}
OffsetDateTime expirationTime = exchange.getIn().getHeader(EXPIRATION_TIME, OffsetDateTime.class);
if (expirationTime != null) {
span.setTag(STORAGE_QUEUE_EXPIRATION_TIME, expirationTime.toString());
}
OffsetDateTime timeNextVisible = exchange.getIn().getHeader(TIME_NEXT_VISIBLE, OffsetDateTime.class);
if (timeNextVisible != null) {
span.setTag(STORAGE_QUEUE_TIME_NEXT_VISIBLE, timeNextVisible.toString());
}
Long dequeueCount = exchange.getIn().getHeader(DEQUEUE_COUNT, Long.class);
if (dequeueCount != null) {
span.setTag(STORAGE_QUEUE_DEQUEUE_COUNT, dequeueCount);
}
String name = exchange.getIn().getHeader(NAME, String.class);
if (name != null) {
span.setTag(STORAGE_QUEUE_NAME, name);
}
Duration visibilityTimeout = exchange.getIn().getHeader(VISIBILITY_TIMEOUT, Duration.class);
if (visibilityTimeout != null) {
span.setTag(STORAGE_QUEUE_VISIBILITY_TIMEOUT, visibilityTimeout.toString());
}
Duration timeToLive = exchange.getIn().getHeader(TIME_TO_LIVE, Duration.class);
if (timeToLive != null) {
span.setTag(STORAGE_QUEUE_TIME_TO_LIVE, timeToLive.toString());
}
}
@Override
protected String getMessageId(Exchange exchange) {
return exchange.getIn().getHeader(MESSAGE_ID, String.class);
}
}
| AzureStorageQueueSpanDecorator |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GoogleBigQuerySQLEndpointBuilderFactory.java | {
"start": 6457,
"end": 8998
} | interface ____
extends
EndpointProducerBuilder {
default GoogleBigQuerySQLEndpointBuilder basic() {
return (GoogleBigQuerySQLEndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedGoogleBigQuerySQLEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedGoogleBigQuerySQLEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
public | AdvancedGoogleBigQuerySQLEndpointBuilder |
java | google__dagger | javatests/artifacts/hilt-android/simple/app/src/androidTest/java/dagger/hilt/android/simple/SimpleEmulatorTestRunner.java | {
"start": 866,
"end": 949
} | class ____ tests. */
@CustomTestApplication(BaseTestApplication.class)
public final | for |
java | elastic__elasticsearch | x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java | {
"start": 4208,
"end": 27475
} | class ____ extends ESIntegTestCase {
private Settings settings;
private LifecyclePolicy lifecyclePolicy;
private Phase mockPhase;
private static final ObservableAction OBSERVABLE_ACTION;
static {
List<Step> steps = new ArrayList<>();
Step.StepKey key = new Step.StepKey("mock", ObservableAction.NAME, ObservableClusterStateWaitStep.NAME);
Step.StepKey compKey = new Step.StepKey("mock", "complete", "complete");
steps.add(new ObservableClusterStateWaitStep(key, compKey));
OBSERVABLE_ACTION = new ObservableAction(steps, true);
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
Settings.Builder nodeSettings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings));
nodeSettings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false);
nodeSettings.put(XPackSettings.SECURITY_ENABLED.getKey(), false);
nodeSettings.put(XPackSettings.WATCHER_ENABLED.getKey(), false);
nodeSettings.put(XPackSettings.GRAPH_ENABLED.getKey(), false);
nodeSettings.put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s");
// This is necessary to prevent ILM installing a lifecycle policy, these tests assume a blank slate
nodeSettings.put(LifecycleSettings.LIFECYCLE_HISTORY_INDEX_ENABLED, false);
return nodeSettings.build();
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, TestILMPlugin.class);
}
@Before
public void init() {
settings = Settings.builder()
.put(indexSettings())
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, "test")
.build();
List<Step> steps = new ArrayList<>();
Step.StepKey key = new Step.StepKey("mock", ObservableAction.NAME, ObservableClusterStateWaitStep.NAME);
Step.StepKey compKey = new Step.StepKey("mock", "complete", "complete");
steps.add(new ObservableClusterStateWaitStep(key, compKey));
steps.add(new PhaseCompleteStep(compKey, null));
Map<String, LifecycleAction> actions = Map.of(ObservableAction.NAME, OBSERVABLE_ACTION);
mockPhase = new Phase("mock", TimeValue.timeValueSeconds(0), actions);
Map<String, Phase> phases = Map.of("mock", mockPhase);
lifecyclePolicy = newLockableLifecyclePolicy("test", phases);
}
public void testSingleNodeCluster() throws Exception {
settings = Settings.builder().put(settings).put("index.lifecycle.test.complete", true).build();
// start master node
logger.info("Starting server1");
final String server_1 = internalCluster().startNode();
final String node1 = getLocalNodeId(server_1);
// test get-lifecycle behavior when IndexLifecycleMetadata is null
GetLifecycleAction.Response getUninitializedLifecycleResponse = client().execute(
GetLifecycleAction.INSTANCE,
new GetLifecycleAction.Request(TEST_REQUEST_TIMEOUT)
).get();
assertThat(getUninitializedLifecycleResponse.getPolicies().size(), equalTo(0));
ExecutionException exception = expectThrows(
ExecutionException.class,
() -> client().execute(GetLifecycleAction.INSTANCE, new GetLifecycleAction.Request(TEST_REQUEST_TIMEOUT, "non-existent-policy"))
.get()
);
assertThat(exception.getMessage(), containsString("Lifecycle policy not found: [non-existent-policy]"));
logger.info("Creating lifecycle [test_lifecycle]");
PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, lifecyclePolicy);
long lowerBoundModifiedDate = Instant.now().toEpochMilli();
assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get());
long upperBoundModifiedDate = Instant.now().toEpochMilli();
// assert version and modified_date
GetLifecycleAction.Response getLifecycleResponse = client().execute(
GetLifecycleAction.INSTANCE,
new GetLifecycleAction.Request(TEST_REQUEST_TIMEOUT)
).get();
assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1));
GetLifecycleAction.LifecyclePolicyResponseItem responseItem = getLifecycleResponse.getPolicies().get(0);
assertThat(responseItem.getLifecyclePolicy(), equalTo(lifecyclePolicy));
assertThat(responseItem.getVersion(), equalTo(1L));
long actualModifiedDate = Instant.from(ISO_ZONED_DATE_TIME.parse(responseItem.getModifiedDate())).toEpochMilli();
assertThat(
actualModifiedDate,
is(both(greaterThanOrEqualTo(lowerBoundModifiedDate)).and(lessThanOrEqualTo(upperBoundModifiedDate)))
);
logger.info("Creating index [test]");
CreateIndexResponse createIndexResponse = indicesAdmin().create(new CreateIndexRequest("test").settings(settings)).actionGet();
assertAcked(createIndexResponse);
ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node1);
assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(1));
awaitIndexExists("test");
IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_1);
assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1));
assertNotNull(indexLifecycleService.getScheduledJob());
assertBusy(() -> {
LifecycleExecutionState lifecycleState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT)
.get()
.getState()
.getMetadata()
.getProject()
.index("test")
.getLifecycleExecutionState();
assertThat(lifecycleState.step(), equalTo("complete"));
});
}
public void testNoOpPolicyUpdates() throws Exception {
internalCluster().startNode();
Map<String, Phase> phases = new HashMap<>();
phases.put("hot", new Phase("hot", TimeValue.ZERO, Map.of()));
LifecyclePolicy policy = new LifecyclePolicy("mypolicy", phases);
PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, policy);
assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get());
GetLifecycleAction.Response getLifecycleResponse = client().execute(
GetLifecycleAction.INSTANCE,
new GetLifecycleAction.Request(TEST_REQUEST_TIMEOUT)
).get();
assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1));
GetLifecycleAction.LifecyclePolicyResponseItem responseItem = getLifecycleResponse.getPolicies().get(0);
assertThat(responseItem.getLifecyclePolicy(), equalTo(policy));
assertThat(responseItem.getVersion(), equalTo(1L));
// Put the same policy in place, which should be a no-op
putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, policy);
assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get());
getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, new GetLifecycleAction.Request(TEST_REQUEST_TIMEOUT)).get();
assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1));
responseItem = getLifecycleResponse.getPolicies().get(0);
assertThat(responseItem.getLifecyclePolicy(), equalTo(policy));
// Version should still be 1
assertThat(responseItem.getVersion(), equalTo(1L));
// Generate a brand new policy
Map<String, Phase> newPhases = new HashMap<>(phases);
newPhases.put("cold", new Phase("cold", TimeValue.timeValueDays(1), Map.of()));
policy = new LifecyclePolicy("mypolicy", newPhases);
putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, policy);
assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get());
getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, new GetLifecycleAction.Request(TEST_REQUEST_TIMEOUT)).get();
assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1));
responseItem = getLifecycleResponse.getPolicies().get(0);
assertThat(responseItem.getLifecyclePolicy(), equalTo(policy));
// Version should now be 2
assertThat(responseItem.getVersion(), equalTo(2L));
}
public void testExplainExecution() throws Exception {
// start node
logger.info("Starting server1");
internalCluster().startNode();
logger.info("Creating lifecycle [test_lifecycle]");
PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, lifecyclePolicy);
assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get());
GetLifecycleAction.Response getLifecycleResponse = client().execute(
GetLifecycleAction.INSTANCE,
new GetLifecycleAction.Request(TEST_REQUEST_TIMEOUT)
).get();
assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1));
GetLifecycleAction.LifecyclePolicyResponseItem responseItem = getLifecycleResponse.getPolicies().get(0);
assertThat(responseItem.getLifecyclePolicy(), equalTo(lifecyclePolicy));
assertThat(responseItem.getVersion(), equalTo(1L));
long actualModifiedDate = Instant.from(ISO_ZONED_DATE_TIME.parse(responseItem.getModifiedDate())).toEpochMilli();
logger.info("Creating index [test]");
CreateIndexResponse createIndexResponse = indicesAdmin().create(new CreateIndexRequest("test").settings(settings)).actionGet();
assertAcked(createIndexResponse);
// using AtomicLong only to extract a value from a lambda rather than the more traditional atomic update use-case
AtomicLong originalLifecycleDate = new AtomicLong();
{
PhaseExecutionInfo expectedExecutionInfo = new PhaseExecutionInfo(lifecyclePolicy.getName(), mockPhase, 1L, actualModifiedDate);
assertBusy(() -> {
IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse("test");
assertThat(indexResponse.getStep(), equalTo("observable_cluster_state_action"));
assertThat(indexResponse.getPhaseExecutionInfo(), equalTo(expectedExecutionInfo));
originalLifecycleDate.set(indexResponse.getLifecycleDate());
});
}
// set the origination date setting to an older value
updateIndexSettings(Settings.builder().put(IndexSettings.LIFECYCLE_ORIGINATION_DATE, 1000L), "test");
{
assertBusy(() -> {
IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse("test");
assertThat("The configured origination date dictates the lifecycle date", indexResponse.getLifecycleDate(), equalTo(1000L));
});
}
// set the origination date setting to null
updateIndexSettings(Settings.builder().putNull(IndexSettings.LIFECYCLE_ORIGINATION_DATE), "test");
{
assertBusy(() -> {
IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse("test");
assertThat(
"Without the origination date, the index create date should dictate the lifecycle date",
indexResponse.getLifecycleDate(),
equalTo(originalLifecycleDate.get())
);
});
}
// complete the step
updateIndexSettings(Settings.builder().put("index.lifecycle.test.complete", true), "test");
{
Phase phase = new Phase("mock", TimeValue.ZERO, Map.of("TEST_ACTION", OBSERVABLE_ACTION));
PhaseExecutionInfo expectedExecutionInfo = new PhaseExecutionInfo(lifecyclePolicy.getName(), phase, 1L, actualModifiedDate);
assertBusy(() -> {
IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse("test");
assertThat("expected to be in the 'mock' phase", indexResponse.getPhase(), equalTo("mock"));
assertThat("expected to be in the mock phase complete step", indexResponse.getStep(), equalTo("complete"));
assertThat(indexResponse.getPhaseExecutionInfo(), equalTo(expectedExecutionInfo));
});
}
}
public void testExplainParseOriginationDate() throws Exception {
// start node
logger.info("Starting server1");
internalCluster().startNode();
logger.info("Starting server2");
internalCluster().startNode();
logger.info("Creating lifecycle [test_lifecycle]");
PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, lifecyclePolicy);
assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get());
GetLifecycleAction.Response getLifecycleResponse = client().execute(
GetLifecycleAction.INSTANCE,
new GetLifecycleAction.Request(TEST_REQUEST_TIMEOUT)
).get();
assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1));
GetLifecycleAction.LifecyclePolicyResponseItem responseItem = getLifecycleResponse.getPolicies().get(0);
assertThat(responseItem.getLifecyclePolicy(), equalTo(lifecyclePolicy));
assertThat(responseItem.getVersion(), equalTo(1L));
String indexName = "test-2019.09.14";
logger.info("Creating index [{}]", indexName);
CreateIndexResponse createIndexResponse = indicesAdmin().create(
new CreateIndexRequest(indexName).settings(
Settings.builder().put(settings).put(IndexSettings.LIFECYCLE_PARSE_ORIGINATION_DATE, true)
)
).actionGet();
assertAcked(createIndexResponse);
DateFormatter dateFormatter = DateFormatter.forPattern("yyyy.MM.dd");
long expectedDate = dateFormatter.parseMillis("2019.09.14");
assertBusy(() -> {
IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse(indexName);
assertThat(indexResponse.getLifecycleDate(), is(expectedDate));
});
// disabling the lifecycle parsing would maintain the parsed value as that was set as the origination date
updateIndexSettings(Settings.builder().put(IndexSettings.LIFECYCLE_PARSE_ORIGINATION_DATE, false), indexName);
assertBusy(() -> {
IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse(indexName);
assertThat(indexResponse.getLifecycleDate(), is(expectedDate));
});
// setting the lifecycle origination date setting to null should make the lifecyle date fallback on the index creation date
updateIndexSettings(Settings.builder().putNull(IndexSettings.LIFECYCLE_ORIGINATION_DATE), indexName);
assertBusy(() -> {
IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse(indexName);
assertThat(indexResponse.getLifecycleDate(), is(greaterThan(expectedDate)));
});
// setting the lifecycle origination date to an explicit value overrides the date parsing
long originationDate = 42L;
updateIndexSettings(
Settings.builder()
.put(IndexSettings.LIFECYCLE_PARSE_ORIGINATION_DATE, true)
.put(IndexSettings.LIFECYCLE_ORIGINATION_DATE, originationDate),
indexName
);
assertBusy(() -> {
IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse(indexName);
assertThat(indexResponse.getLifecycleDate(), is(originationDate));
});
}
private IndexLifecycleExplainResponse executeExplainRequestAndGetTestIndexResponse(String indexName) throws ExecutionException,
InterruptedException {
ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest(TEST_REQUEST_TIMEOUT);
ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get();
assertThat(explainResponse.getIndexResponses().size(), equalTo(1));
return explainResponse.getIndexResponses().get(indexName);
}
public void testMasterDedicatedDataDedicated() throws Exception {
settings = Settings.builder().put(settings).put("index.lifecycle.test.complete", true).build();
// start master node
logger.info("Starting master-only server1");
final String server_1 = internalCluster().startMasterOnlyNode();
// start data node
logger.info("Starting data-only server2");
final String server_2 = internalCluster().startDataOnlyNode();
final String node2 = getLocalNodeId(server_2);
// check that the scheduler was started on the appropriate node
{
IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_1);
assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1));
assertNotNull(indexLifecycleService.getScheduledJob());
}
{
IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_2);
assertNull(indexLifecycleService.getScheduler());
assertNull(indexLifecycleService.getScheduledJob());
}
logger.info("Creating lifecycle [test_lifecycle]");
PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, lifecyclePolicy);
assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get());
logger.info("Creating index [test]");
CreateIndexResponse createIndexResponse = indicesAdmin().create(new CreateIndexRequest("test").settings(settings)).actionGet();
assertAcked(createIndexResponse);
ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState();
RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node2);
assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(1));
awaitIndexExists("test");
assertBusy(() -> {
LifecycleExecutionState lifecycleState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT)
.get()
.getState()
.getMetadata()
.getProject()
.index("test")
.getLifecycleExecutionState();
assertThat(lifecycleState.step(), equalTo("complete"));
});
}
public void testCreatePolicyWhenStopped() throws Exception {
// start master node
logger.info("Starting server1");
final String server_1 = internalCluster().startNode();
final String node1 = getLocalNodeId(server_1);
assertAcked(client().execute(ILMActions.STOP, new StopILMRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT)).get());
assertBusy(() -> {
OperationMode mode = client().execute(GetStatusAction.INSTANCE, new GetStatusAction.Request(TEST_REQUEST_TIMEOUT))
.get()
.getMode();
logger.info("--> waiting for STOPPED, currently: {}", mode);
assertThat(mode, equalTo(OperationMode.STOPPED));
});
logger.info("Creating lifecycle [test_lifecycle]");
PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, lifecyclePolicy);
long lowerBoundModifiedDate = Instant.now().toEpochMilli();
assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get());
long upperBoundModifiedDate = Instant.now().toEpochMilli();
// assert version and modified_date
GetLifecycleAction.Response getLifecycleResponse = client().execute(
GetLifecycleAction.INSTANCE,
new GetLifecycleAction.Request(TEST_REQUEST_TIMEOUT)
).get();
assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1));
GetLifecycleAction.LifecyclePolicyResponseItem responseItem = getLifecycleResponse.getPolicies().get(0);
assertThat(responseItem.getLifecyclePolicy(), equalTo(lifecyclePolicy));
assertThat(responseItem.getVersion(), equalTo(1L));
long actualModifiedDate = Instant.from(ISO_ZONED_DATE_TIME.parse(responseItem.getModifiedDate())).toEpochMilli();
assertThat(
actualModifiedDate,
is(both(greaterThanOrEqualTo(lowerBoundModifiedDate)).and(lessThanOrEqualTo(upperBoundModifiedDate)))
);
// assert ILM is still stopped
GetStatusAction.Response statusResponse = client().execute(
GetStatusAction.INSTANCE,
new GetStatusAction.Request(TEST_REQUEST_TIMEOUT)
).get();
assertThat(statusResponse.getMode(), equalTo(OperationMode.STOPPED));
}
public void testPollIntervalUpdate() throws Exception {
TimeValue pollInterval = TimeValue.timeValueSeconds(randomLongBetween(1, 5));
final String server_1 = internalCluster().startMasterOnlyNode(
Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, pollInterval.getStringRep()).build()
);
IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_1);
assertBusy(() -> {
assertNotNull(indexLifecycleService.getScheduler());
assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1));
});
{
TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().schedule();
assertThat(schedule.getInterval(), equalTo(pollInterval));
}
// update the poll interval
TimeValue newPollInterval = TimeValue.timeValueHours(randomLongBetween(6, 1000));
updateClusterSettings(Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, newPollInterval.getStringRep()));
{
TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().schedule();
assertThat(schedule.getInterval(), equalTo(newPollInterval));
}
}
private String getLocalNodeId(String name) {
TransportService transportService = internalCluster().getInstance(TransportService.class, name);
String nodeId = transportService.getLocalNode().getId();
assertThat(nodeId, not(nullValue()));
return nodeId;
}
public static | IndexLifecycleInitialisationTests |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/builder/RouteBuilderAddRoutesOnlyInterceptTest.java | {
"start": 925,
"end": 1590
} | class ____ extends ContextTestSupport {
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
interceptSendToEndpoint("mock:result").transform(constant("Foo was here"));
from("direct:start").to("mock:result");
}
};
}
@Test
public void testAddRoutes() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Foo was here");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
}
| RouteBuilderAddRoutesOnlyInterceptTest |
java | quarkusio__quarkus | extensions/smallrye-graphql/deployment/src/test/java/io/quarkus/smallrye/graphql/deployment/InstrumentationTest.java | {
"start": 2309,
"end": 2536
} | class ____ {
@Query
@NonBlocking
public Foo foo() {
return new Foo("foo");
}
public Foo nestedFoo(@Source Foo foo) {
return new Foo("foo");
}
}
}
| FooApi |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/jmx/JmxAutoConfigurationTests.java | {
"start": 1827,
"end": 4955
} | class ____ {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(JmxAutoConfiguration.class));
@Test
void testDefaultMBeanExport() {
this.contextRunner.run((context) -> {
assertThat(context).doesNotHaveBean(MBeanExporter.class);
assertThat(context).doesNotHaveBean(ObjectNamingStrategy.class);
});
}
@Test
void testDisabledMBeanExport() {
this.contextRunner.withPropertyValues("spring.jmx.enabled=false").run((context) -> {
assertThat(context).doesNotHaveBean(MBeanExporter.class);
assertThat(context).doesNotHaveBean(ObjectNamingStrategy.class);
});
}
@Test
void testEnabledMBeanExport() {
this.contextRunner.withPropertyValues("spring.jmx.enabled=true").run((context) -> {
assertThat(context).hasSingleBean(MBeanExporter.class);
assertThat(context).hasSingleBean(ParentAwareNamingStrategy.class);
MBeanExporter exporter = context.getBean(MBeanExporter.class);
assertThat(exporter).hasFieldOrPropertyWithValue("ensureUniqueRuntimeObjectNames", false);
assertThat(exporter).hasFieldOrPropertyWithValue("registrationPolicy", RegistrationPolicy.FAIL_ON_EXISTING);
MetadataNamingStrategy naming = (MetadataNamingStrategy) ReflectionTestUtils.getField(exporter,
"namingStrategy");
assertThat(naming).hasFieldOrPropertyWithValue("ensureUniqueRuntimeObjectNames", false);
});
}
@Test
void testDefaultDomainConfiguredOnMBeanExport() {
this.contextRunner
.withPropertyValues("spring.jmx.enabled=true", "spring.jmx.default-domain=my-test-domain",
"spring.jmx.unique-names=true", "spring.jmx.registration-policy=IGNORE_EXISTING")
.run((context) -> {
assertThat(context).hasSingleBean(MBeanExporter.class);
MBeanExporter exporter = context.getBean(MBeanExporter.class);
assertThat(exporter).hasFieldOrPropertyWithValue("ensureUniqueRuntimeObjectNames", true);
assertThat(exporter).hasFieldOrPropertyWithValue("registrationPolicy",
RegistrationPolicy.IGNORE_EXISTING);
MetadataNamingStrategy naming = (MetadataNamingStrategy) ReflectionTestUtils.getField(exporter,
"namingStrategy");
assertThat(naming).hasFieldOrPropertyWithValue("defaultDomain", "my-test-domain");
assertThat(naming).hasFieldOrPropertyWithValue("ensureUniqueRuntimeObjectNames", true);
});
}
@Test
void testBasicParentContext() {
try (AnnotationConfigApplicationContext parent = new AnnotationConfigApplicationContext()) {
parent.register(JmxAutoConfiguration.class);
parent.refresh();
this.contextRunner.withParent(parent).run((context) -> assertThat(context.isRunning()));
}
}
@Test
void testParentContext() {
try (AnnotationConfigApplicationContext parent = new AnnotationConfigApplicationContext()) {
parent.register(JmxAutoConfiguration.class, TestConfiguration.class);
parent.refresh();
this.contextRunner.withParent(parent)
.withConfiguration(UserConfigurations.of(TestConfiguration.class))
.run((context) -> assertThat(context.isRunning()));
}
}
@Configuration(proxyBeanMethods = false)
static | JmxAutoConfigurationTests |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/operators/CoGroupTaskExternalITCase.java | {
"start": 3938,
"end": 4996
} | class ____ extends RichCoGroupFunction<Record, Record, Record> {
private static final long serialVersionUID = 1L;
private final Record res = new Record();
@SuppressWarnings("unused")
@Override
public void coGroup(
Iterable<Record> records1, Iterable<Record> records2, Collector<Record> out) {
int val1Cnt = 0;
int val2Cnt = 0;
for (Record r : records1) {
val1Cnt++;
}
for (Record r : records2) {
val2Cnt++;
}
if (val1Cnt == 0) {
for (int i = 0; i < val2Cnt; i++) {
out.collect(this.res);
}
} else if (val2Cnt == 0) {
for (int i = 0; i < val1Cnt; i++) {
out.collect(this.res);
}
} else {
for (int i = 0; i < val2Cnt * val1Cnt; i++) {
out.collect(this.res);
}
}
}
}
}
| MockCoGroupStub |
java | google__guava | android/guava/src/com/google/common/collect/Multimaps.java | {
"start": 62320,
"end": 67402
} | class ____<
K extends @Nullable Object, V1 extends @Nullable Object, V2 extends @Nullable Object>
extends TransformedEntriesMultimap<K, V1, V2> implements ListMultimap<K, V2> {
TransformedEntriesListMultimap(
ListMultimap<K, V1> fromMultimap, EntryTransformer<? super K, ? super V1, V2> transformer) {
super(fromMultimap, transformer);
}
@Override
List<V2> transform(@ParametricNullness K key, Collection<V1> values) {
return Lists.transform((List<V1>) values, v1 -> transformer.transformEntry(key, v1));
}
@Override
public List<V2> get(@ParametricNullness K key) {
return transform(key, fromMultimap.get(key));
}
@SuppressWarnings("unchecked")
@Override
public List<V2> removeAll(@Nullable Object key) {
return transform((K) key, fromMultimap.removeAll(key));
}
@Override
public List<V2> replaceValues(@ParametricNullness K key, Iterable<? extends V2> values) {
throw new UnsupportedOperationException();
}
}
/**
* Creates an index {@code ImmutableListMultimap} that contains the results of applying a
* specified function to each item in an {@code Iterable} of values. Each value will be stored as
* a value in the resulting multimap, yielding a multimap with the same size as the input
* iterable. The key used to store that value in the multimap will be the result of calling the
* function on that value. The resulting multimap is created as an immutable snapshot. In the
* returned multimap, keys appear in the order they are first encountered, and the values
* corresponding to each key appear in the same order as they are encountered.
*
* <p>For example,
*
* {@snippet :
* List<String> badGuys =
* Arrays.asList("Inky", "Blinky", "Pinky", "Pinky", "Clyde");
* Function<String, Integer> stringLengthFunction = ...;
* Multimap<Integer, String> index =
* Multimaps.index(badGuys, stringLengthFunction);
* System.out.println(index);
* }
*
* <p>prints
*
* {@snippet :
* {4=[Inky], 6=[Blinky], 5=[Pinky, Pinky, Clyde]}
* }
*
* <p>The returned multimap is serializable if its keys and values are all serializable.
*
* @param values the values to use when constructing the {@code ImmutableListMultimap}
* @param keyFunction the function used to produce the key for each value
* @return {@code ImmutableListMultimap} mapping the result of evaluating the function {@code
* keyFunction} on each value in the input collection to that value
* @throws NullPointerException if any element of {@code values} is {@code null}, or if {@code
* keyFunction} produces {@code null} for any key
*/
public static <K, V> ImmutableListMultimap<K, V> index(
Iterable<V> values, Function<? super V, K> keyFunction) {
return index(values.iterator(), keyFunction);
}
/**
* Creates an index {@code ImmutableListMultimap} that contains the results of applying a
* specified function to each item in an {@code Iterator} of values. Each value will be stored as
* a value in the resulting multimap, yielding a multimap with the same size as the input
* iterator. The key used to store that value in the multimap will be the result of calling the
* function on that value. The resulting multimap is created as an immutable snapshot. In the
* returned multimap, keys appear in the order they are first encountered, and the values
* corresponding to each key appear in the same order as they are encountered.
*
* <p>For example,
*
* {@snippet :
* List<String> badGuys =
* Arrays.asList("Inky", "Blinky", "Pinky", "Pinky", "Clyde");
* Function<String, Integer> stringLengthFunction = ...;
* Multimap<Integer, String> index =
* Multimaps.index(badGuys.iterator(), stringLengthFunction);
* System.out.println(index);
* }
*
* <p>prints
*
* {@snippet :
* {4=[Inky], 6=[Blinky], 5=[Pinky, Pinky, Clyde]}
* }
*
* <p>The returned multimap is serializable if its keys and values are all serializable.
*
* @param values the values to use when constructing the {@code ImmutableListMultimap}
* @param keyFunction the function used to produce the key for each value
* @return {@code ImmutableListMultimap} mapping the result of evaluating the function {@code
* keyFunction} on each value in the input collection to that value
* @throws NullPointerException if any element of {@code values} is {@code null}, or if {@code
* keyFunction} produces {@code null} for any key
* @since 10.0
*/
public static <K, V> ImmutableListMultimap<K, V> index(
Iterator<V> values, Function<? super V, K> keyFunction) {
checkNotNull(keyFunction);
ImmutableListMultimap.Builder<K, V> builder = ImmutableListMultimap.builder();
while (values.hasNext()) {
V value = values.next();
checkNotNull(value, values);
builder.put(keyFunction.apply(value), value);
}
return builder.build();
}
static | TransformedEntriesListMultimap |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/simple/PeriodParamTest.java | {
"start": 480,
"end": 1070
} | class ____ {
@RegisterExtension
static ResteasyReactiveUnitTest test = new ResteasyReactiveUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(PeriodResource.class));
@Test
public void periodAsQueryParam() {
RestAssured.get("/period?value=P1Y2M3D")
.then().statusCode(200).body(equalTo("P1Y2M3D"));
}
@Test
public void periodAsPathParam() {
RestAssured.get("/period/P2Y")
.then().statusCode(200).body(equalTo("P2Y"));
}
@Path("period")
public static | PeriodParamTest |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/DelegatingCircuitBreakerServiceTests.java | {
"start": 996,
"end": 2969
} | class ____ extends ESTestCase {
/**
* the purpose of the test is not hitting the `IllegalStateException("already closed")` in
* PreallocatedCircuitBreaker#addEstimateBytesAndMaybeBreak in {@link PreallocatedCircuitBreakerService}
*/
public void testThreadedExecution() throws InterruptedException {
HierarchyCircuitBreakerService topBreaker = new HierarchyCircuitBreakerService(
CircuitBreakerMetrics.NOOP,
Settings.builder()
.put(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10mb")
// Disable the real memory checking because it causes other tests to interfere with this one.
.put(USE_REAL_MEMORY_USAGE_SETTING.getKey(), false)
.build(),
List.of(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
);
PreallocatedCircuitBreakerService preallocated = new PreallocatedCircuitBreakerService(
topBreaker,
CircuitBreaker.REQUEST,
10_000,
"test"
);
CircuitBreaker breaker = preallocated.getBreaker(CircuitBreaker.REQUEST);
DelegatingCircuitBreakerService delegatingCircuitBreakerService = new DelegatingCircuitBreakerService(breaker, (bytes -> {
breaker.addEstimateBytesAndMaybeBreak(bytes, "test");
}));
Thread consumerThread = new Thread(() -> {
for (int i = 0; i < 100; ++i) {
delegatingCircuitBreakerService.getBreaker("ignored").addEstimateBytesAndMaybeBreak(i % 2 == 0 ? 10 : -10, "ignored");
}
});
final Thread producerThread = new Thread(() -> {
delegatingCircuitBreakerService.disconnect();
preallocated.close();
});
consumerThread.start();
producerThread.start();
consumerThread.join();
producerThread.join();
}
}
| DelegatingCircuitBreakerServiceTests |
java | apache__maven | compat/maven-builder-support/src/main/java/org/apache/maven/building/FileSource.java | {
"start": 1175,
"end": 3115
} | class ____ implements Source {
private final Path path;
private final int hashCode;
/**
* Creates a new source backed by the specified file.
*
* @param file The file, must not be {@code null}.
* @deprecated Use {@link #FileSource(Path)} instead.
*/
@Deprecated
public FileSource(File file) {
this(Objects.requireNonNull(file, "file cannot be null").toPath());
}
/**
* Creates a new source backed by the specified file.
*
* @param path The file, must not be {@code null}.
* @since 4.0.0
*/
public FileSource(Path path) {
this.path = Objects.requireNonNull(path, "path cannot be null").toAbsolutePath();
this.hashCode = Objects.hash(path);
}
@Override
public InputStream getInputStream() throws IOException {
return Files.newInputStream(path);
}
@Override
public String getLocation() {
return path.toString();
}
/**
* Gets the file of this source.
*
* @return The underlying file, never {@code null}.
* @deprecated Use {@link #getPath()} instead.
*/
@Deprecated
public File getFile() {
return path.toFile();
}
/**
* Gets the file of this source.
*
* @return The underlying file, never {@code null}.
* @since 4.0.0
*/
public Path getPath() {
return path;
}
@Override
public String toString() {
return getLocation();
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!FileSource.class.equals(obj.getClass())) {
return false;
}
FileSource other = (FileSource) obj;
return this.path.equals(other.path);
}
}
| FileSource |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java | {
"start": 1897,
"end": 5239
} | class ____ {
private static final File TEST_ROOT_DIR = GenericTestUtils.getTestDir(
TestDockerClient.class.getName());
@BeforeEach
public void setup() {
TEST_ROOT_DIR.mkdirs();
}
@AfterEach
public void cleanup() {
FileUtil.fullyDelete(TEST_ROOT_DIR);
}
@Test
public void testWriteCommandToTempFile() throws Exception {
String absRoot = TEST_ROOT_DIR.getAbsolutePath();
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId cid = ContainerId.newContainerId(attemptId, 1);
DockerCommand dockerCmd = new DockerInspectCommand(cid.toString());
Configuration conf = new Configuration();
conf.set("hadoop.tmp.dir", absRoot);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, absRoot);
conf.set(YarnConfiguration.NM_LOG_DIRS, absRoot);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
Context mockContext = mock(Context.class);
doReturn(conf).when(mockContext).getConf();
doReturn(dirsHandler).when(mockContext).getLocalDirsHandler();
DockerClient dockerClient = new DockerClient();
dirsHandler.init(conf);
dirsHandler.start();
String tmpPath = dockerClient.writeCommandToTempFile(dockerCmd, cid,
mockContext);
dirsHandler.stop();
File tmpFile = new File(tmpPath);
assertTrue(tmpFile.exists(), tmpFile + " was not created");
}
@Test
public void testCommandValidation() throws Exception {
String absRoot = TEST_ROOT_DIR.getAbsolutePath();
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId cid = ContainerId.newContainerId(attemptId, 1);
Configuration conf = new Configuration();
conf.set("hadoop.tmp.dir", absRoot);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, absRoot);
conf.set(YarnConfiguration.NM_LOG_DIRS, absRoot);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
Context mockContext = mock(Context.class);
doReturn(conf).when(mockContext).getConf();
doReturn(dirsHandler).when(mockContext).getLocalDirsHandler();
DockerClient dockerClient = new DockerClient();
dirsHandler.init(conf);
dirsHandler.start();
try {
DockerRunCommand dockerCmd = new DockerRunCommand(cid.toString(), "user",
"image");
dockerCmd.addCommandArguments("prop=bad", "val");
dockerClient.writeCommandToTempFile(dockerCmd, cid,
mockContext);
fail("Expected exception writing command file");
} catch (ContainerExecutionException e) {
assertTrue(e.getMessage().contains("'=' found in entry for docker command file"),
"Expected key validation error");
}
try {
DockerRunCommand dockerCmd = new DockerRunCommand(cid.toString(), "user",
"image");
dockerCmd.setOverrideCommandWithArgs(Arrays.asList("sleep", "1000\n"));
dockerClient.writeCommandToTempFile(dockerCmd, cid, mockContext);
fail("Expected exception writing command file");
} catch (ContainerExecutionException e) {
assertTrue(e.getMessage().contains("'\\n' found in entry for docker command file"),
"Expected value validation error");
}
dirsHandler.stop();
}
}
| TestDockerClient |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/executor/statement/RoutingStatementHandler.java | {
"start": 1231,
"end": 3132
} | class ____ implements StatementHandler {
private final StatementHandler delegate;
public RoutingStatementHandler(Executor executor, MappedStatement ms, Object parameter, RowBounds rowBounds,
ResultHandler resultHandler, BoundSql boundSql) {
switch (ms.getStatementType()) {
case STATEMENT:
delegate = new SimpleStatementHandler(executor, ms, parameter, rowBounds, resultHandler, boundSql);
break;
case PREPARED:
delegate = new PreparedStatementHandler(executor, ms, parameter, rowBounds, resultHandler, boundSql);
break;
case CALLABLE:
delegate = new CallableStatementHandler(executor, ms, parameter, rowBounds, resultHandler, boundSql);
break;
default:
throw new ExecutorException("Unknown statement type: " + ms.getStatementType());
}
}
@Override
public Statement prepare(Connection connection, Integer transactionTimeout) throws SQLException {
return delegate.prepare(connection, transactionTimeout);
}
@Override
public void parameterize(Statement statement) throws SQLException {
delegate.parameterize(statement);
}
@Override
public void batch(Statement statement) throws SQLException {
delegate.batch(statement);
}
@Override
public int update(Statement statement) throws SQLException {
return delegate.update(statement);
}
@Override
public <E> List<E> query(Statement statement, ResultHandler resultHandler) throws SQLException {
return delegate.query(statement, resultHandler);
}
@Override
public <E> Cursor<E> queryCursor(Statement statement) throws SQLException {
return delegate.queryCursor(statement);
}
@Override
public BoundSql getBoundSql() {
return delegate.getBoundSql();
}
@Override
public ParameterHandler getParameterHandler() {
return delegate.getParameterHandler();
}
}
| RoutingStatementHandler |
java | quarkusio__quarkus | independent-projects/qute/core/src/main/java/io/quarkus/qute/ValueResolvers.java | {
"start": 1561,
"end": 2465
} | class ____ extends AbstractValueResolver {
private static final String RAW = "raw";
private static final String SAFE = "safe";
private static final Set<String> SUPPORTED_PROPERTIES = Set.of(RAW, SAFE);
public RawResolver() {
super(SUPPORTED_PROPERTIES, Collections.emptySet());
}
public boolean appliesTo(EvalContext context) {
if (context.getBase() == null) {
return false;
}
String name = context.getName();
return name.equals(RAW) || name.equals(SAFE);
}
@Override
public CompletionStage<Object> resolve(EvalContext context) {
return CompletedStage.of(new RawString(context.getBase().toString()));
}
}
public static ValueResolver listResolver() {
return new ListResolver();
}
public static final | RawResolver |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/autoconfigure/OptionalLiveReloadServer.java | {
"start": 1220,
"end": 2319
} | class ____ implements InitializingBean {
private static final Log logger = LogFactory.getLog(OptionalLiveReloadServer.class);
private @Nullable LiveReloadServer server;
/**
* Create a new {@link OptionalLiveReloadServer} instance.
* @param server the server to manage or {@code null}
*/
public OptionalLiveReloadServer(@Nullable LiveReloadServer server) {
this.server = server;
}
@Override
public void afterPropertiesSet() throws Exception {
startServer();
}
void startServer() {
if (this.server != null) {
try {
int port = this.server.getPort();
if (!this.server.isStarted()) {
port = this.server.start();
}
logger.info(LogMessage.format("LiveReload server is running on port %s", port));
}
catch (Exception ex) {
logger.warn("Unable to start LiveReload server");
logger.debug("Live reload start error", ex);
this.server = null;
}
}
}
/**
* Trigger LiveReload if the server is up and running.
*/
public void triggerReload() {
if (this.server != null) {
this.server.triggerReload();
}
}
}
| OptionalLiveReloadServer |
java | redisson__redisson | redisson/src/main/java/org/redisson/PubSubEntry.java | {
"start": 721,
"end": 864
} | interface ____<E> {
void acquire();
void acquire(int permits);
int release();
CompletableFuture<E> getPromise();
}
| PubSubEntry |
java | quarkusio__quarkus | extensions/kubernetes/vanilla/deployment/src/main/java/io/quarkus/kubernetes/deployment/OpenshiftDeployer.java | {
"start": 541,
"end": 1943
} | class ____ {
@BuildStep
public void checkEnvironment(Optional<SelectedKubernetesDeploymentTargetBuildItem> selectedDeploymentTarget,
List<GeneratedKubernetesResourceBuildItem> resources,
KubernetesClientBuildItem kubernetesClientBuilder,
BuildProducer<KubernetesDeploymentClusterBuildItem> deploymentCluster) {
selectedDeploymentTarget.ifPresent(target -> {
if (!KubernetesDeploy.INSTANCE.checkSilently(kubernetesClientBuilder)) {
return;
}
if (target.getEntry().getName().equals(OPENSHIFT)) {
try (var openShiftClient = kubernetesClientBuilder.buildClient().adapt(OpenShiftClient.class)) {
if (openShiftClient.hasApiGroup("openshift.io", false)) {
deploymentCluster.produce(new KubernetesDeploymentClusterBuildItem(OPENSHIFT));
} else {
throw new IllegalStateException(
"Openshift was requested as a deployment, but the target cluster is not an Openshift cluster!");
}
} catch (Exception e) {
throw new RuntimeException(
"Failed to configure OpenShift. Make sure you have the Quarkus OpenShift extension.", e);
}
}
});
}
}
| OpenshiftDeployer |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/ActiveAuditManagerS3A.java | {
"start": 3890,
"end": 4433
} | class ____ implements {@link ExecutionInterceptor} and
* returns itself in {@link #createExecutionInterceptors()};
* once registered with the S3 client, the implemented methods
* will be called during different parts of an SDK request lifecycle,
* which then locate the active span and forward the request.
* If any such invocation raises an {@link AuditFailureException}
* then the IOStatistics counter for {@code AUDIT_FAILURE}
* is incremented.
*
* Uses the WeakReferenceThreadMap to store spans for threads.
* Provided a calling | also |
java | apache__camel | components/camel-elasticsearch/src/main/java/org/apache/camel/component/es/ElasticsearchOperation.java | {
"start": 1740,
"end": 2216
} | enum ____ {
Index("Index"),
Update("Update"),
Bulk("Bulk"),
GetById("GetById"),
MultiGet("MultiGet"),
MultiSearch("MultiSearch"),
Delete("Delete"),
DeleteIndex("DeleteIndex"),
Search("Search"),
Exists("Exists"),
Ping("Ping");
private final String text;
ElasticsearchOperation(final String text) {
this.text = text;
}
@Override
public String toString() {
return text;
}
}
| ElasticsearchOperation |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/http/WebSocketClient.java | {
"start": 818,
"end": 4335
} | interface ____ extends Measured {
/**
* Create a WebSocket that is not yet connected to the server.
*
* @return the client WebSocket
*/
ClientWebSocket webSocket();
/**
* Connect a WebSocket to the specified port, host and relative request URI.
*
* @param port the port
* @param host the host
* @param requestURI the relative URI
* @return a future notified when the WebSocket when connected
*/
default Future<WebSocket> connect(int port, String host, String requestURI) {
return connect(new WebSocketConnectOptions().setURI(requestURI).setHost(host).setPort(port));
}
/**
* Connect a WebSocket to the default client port and specified host and relative request URI.
*
* @param host the host
* @param requestURI the relative URI
* @return a future notified when the WebSocket when connected
*/
default Future<WebSocket> connect(String host, String requestURI) {
return connect(new WebSocketConnectOptions().setURI(requestURI).setHost(host));
}
/**
* Connect a WebSocket to the default client port, default client host and specified, relative request URI.
*
* @param requestURI the relative URI
* @return a future notified when the WebSocket when connected
*/
default Future<WebSocket> connect(String requestURI) {
return connect(new WebSocketConnectOptions().setURI(requestURI));
}
/**
* Connect a WebSocket with the specified options.
*
* @param options the request options
* @return a future notified when the WebSocket when connected
*/
Future<WebSocket> connect(WebSocketConnectOptions options);
/**
* Shutdown with a 30 seconds timeout ({@code shutdown(30, TimeUnit.SECONDS)}).
*
* @return a future completed when shutdown has completed
*/
default Future<Void> shutdown() {
return shutdown(30, TimeUnit.SECONDS);
}
/**
* Close immediately ({@code shutdown(0, TimeUnit.SECONDS}).
*
* @return a future notified when the client is closed
*/
default Future<Void> close() {
return shutdown(0, TimeUnit.SECONDS);
}
/**
* Update the client with new SSL {@code options}, the update happens if the options object is valid and different
* from the existing options object.
*
* @param options the new SSL options
* @return a future signaling the update success
*/
default Future<Boolean> updateSSLOptions(ClientSSLOptions options) {
return updateSSLOptions(options, false);
}
/**
* <p>Update the client with new SSL {@code options}, the update happens if the options object is valid and different
* from the existing options object.
*
* <p>The {@code options} object is compared using its {@code equals} method against the existing options to prevent
* an update when the objects are equals since loading options can be costly, this can happen for share TCP servers.
* When object are equals, setting {@code force} to {@code true} forces the update.
*
* @param options the new SSL options
* @param force force the update when options are equals
* @return a future signaling the update success
*/
Future<Boolean> updateSSLOptions(ClientSSLOptions options, boolean force);
/**
* Initiate the client shutdown sequence.
*
* @return a future notified when the client is closed
* @param timeout the amount of time after which all resources are forcibly closed
* @param unit the of the timeout
*/
Future<Void> shutdown(long timeout, TimeUnit unit);
}
| WebSocketClient |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/dos/DeepArrayWrappingForDeser3590Test.java | {
"start": 421,
"end": 3112
} | class ____
{
// 05-Sep-2022, tatu: Before fix, failed with 5000
private final static int TOO_DEEP_NESTING = 9999;
private final ObjectMapper MAPPER = jsonMapperBuilder()
.enable(DeserializationFeature.UNWRAP_SINGLE_VALUE_ARRAYS)
.build();
private final static String TOO_DEEP_DOC = _nestedDoc(TOO_DEEP_NESTING, "[ ", "] ", "123");
@Test
public void testArrayWrappingForBoolean() throws Exception
{
_testArrayWrappingFor(Boolean.class);
_testArrayWrappingFor(Boolean.TYPE);
}
@Test
public void testArrayWrappingForByte() throws Exception
{
_testArrayWrappingFor(Byte.class);
_testArrayWrappingFor(Byte.TYPE);
}
@Test
public void testArrayWrappingForShort() throws Exception
{
_testArrayWrappingFor(Short.class);
_testArrayWrappingFor(Short.TYPE);
}
@Test
public void testArrayWrappingForInt() throws Exception
{
_testArrayWrappingFor(Integer.class);
_testArrayWrappingFor(Integer.TYPE);
}
@Test
public void testArrayWrappingForLong() throws Exception
{
_testArrayWrappingFor(Long.class);
_testArrayWrappingFor(Long.TYPE);
}
@Test
public void testArrayWrappingForFloat() throws Exception
{
_testArrayWrappingFor(Float.class);
_testArrayWrappingFor(Float.TYPE);
}
@Test
public void testArrayWrappingForDouble() throws Exception
{
_testArrayWrappingFor(Double.class);
_testArrayWrappingFor(Double.TYPE);
}
@Test
public void testArrayWrappingForDate() throws Exception
{
_testArrayWrappingFor(Date.class);
}
private void _testArrayWrappingFor(Class<?> cls) throws Exception
{
try {
MAPPER.readValue(TOO_DEEP_DOC, cls);
fail("Should not pass");
} catch (MismatchedInputException e) {
verifyException(e, "Cannot deserialize");
verifyException(e, "nested Arrays not allowed");
}
}
private static String _nestedDoc(int nesting, String open, String close, String content) {
StringBuilder sb = new StringBuilder(nesting * (open.length() + close.length()));
for (int i = 0; i < nesting; ++i) {
sb.append(open);
if ((i & 31) == 0) {
sb.append("\n");
}
}
sb.append("\n").append(content).append("\n");
for (int i = 0; i < nesting; ++i) {
sb.append(close);
if ((i & 31) == 0) {
sb.append("\n");
}
}
return sb.toString();
}
}
| DeepArrayWrappingForDeser3590Test |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/value/CustomIllegalArgumentException.java | {
"start": 227,
"end": 386
} | class ____ extends RuntimeException {
public CustomIllegalArgumentException(String message) {
super( message );
}
}
| CustomIllegalArgumentException |
java | google__dagger | javatests/dagger/internal/codegen/SwitchingProviderTest.java | {
"start": 7479,
"end": 7817
} | class ____ {}");
Source component =
CompilerTests.javaSource(
"test.TestComponent",
"package test;",
"",
"import dagger.Component;",
"import dagger.MembersInjector;",
"import javax.inject.Provider;",
"",
"@Component",
" | Foo |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1700/Issue1763.java | {
"start": 1611,
"end": 1885
} | class ____ {
private String sourceId;
public String getSourceId() {
return sourceId;
}
public void setSourceId(String sourceId) {
this.sourceId = sourceId;
}
}
public static | InteractiveOrderMaterielModel |
java | elastic__elasticsearch | x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java | {
"start": 1098,
"end": 4722
} | class ____ extends SqlSecurityTestCase {
static Properties adminProperties() {
// tag::admin_properties
Properties properties = new Properties();
properties.put("user", "test_admin");
properties.put("password", "x-pack-test-password");
// end::admin_properties
addSslPropertiesIfNeeded(properties);
return properties;
}
Connection es(Properties properties) throws SQLException {
Properties props = new Properties();
props.put("timezone", randomZone().getId());
props.putAll(properties);
String scheme = SSL_ENABLED ? "https" : "http";
return DriverManager.getConnection("jdbc:es://" + scheme + "://" + elasticsearchAddress(), props);
}
static Properties userProperties(String user) {
if (user == null) {
return adminProperties();
}
Properties prop = new Properties();
prop.put("user", user);
prop.put("password", "test-user-password");
addSslPropertiesIfNeeded(prop);
return prop;
}
private static void addSslPropertiesIfNeeded(Properties properties) {
if (false == SSL_ENABLED) {
return;
}
Path keyStore;
try {
keyStore = PathUtils.get(RestSqlIT.class.getResource("/test-node.jks").toURI());
} catch (URISyntaxException e) {
throw new RuntimeException("exception while reading the store", e);
}
if (Files.exists(keyStore) == false) {
throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist.");
}
String keyStoreStr = keyStore.toAbsolutePath().toString();
properties.put("ssl", "true");
properties.put("ssl.keystore.location", keyStoreStr);
properties.put("ssl.keystore.pass", "keypass");
properties.put("ssl.truststore.location", keyStoreStr);
properties.put("ssl.truststore.pass", "keypass");
}
void expectActionMatchesAdmin(
CheckedFunction<Connection, ResultSet, SQLException> adminAction,
String user,
CheckedFunction<Connection, ResultSet, SQLException> userAction
) throws Exception {
try (Connection adminConnection = es(adminProperties()); Connection userConnection = es(userProperties(user))) {
assertResultSets(adminAction.apply(adminConnection), userAction.apply(userConnection));
}
}
void expectForbidden(String user, CheckedConsumer<Connection, SQLException> action) throws Exception {
expectError(user, action, "is unauthorized for user [" + user + "]");
}
void expectUnknownIndex(String user, CheckedConsumer<Connection, SQLException> action) throws Exception {
expectError(user, action, "Unknown index");
}
void expectError(String user, CheckedConsumer<Connection, SQLException> action, String errorMessage) throws Exception {
SQLException e;
try (Connection connection = es(userProperties(user))) {
e = expectThrows(SQLException.class, () -> action.accept(connection));
}
assertThat(e.getMessage(), containsString(errorMessage));
}
void expectActionThrowsUnknownColumn(String user, CheckedConsumer<Connection, SQLException> action, String column) throws Exception {
SQLException e;
try (Connection connection = es(userProperties(user))) {
e = expectThrows(SQLException.class, () -> action.accept(connection));
}
assertThat(e.getMessage(), containsString("Unknown column [" + column + "]"));
}
private | JdbcSecurityIT |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/sql/partition/PartitionKeyAndAssociationTest.java | {
"start": 1248,
"end": 2833
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.persist( new SalesContact( 1L, 1L, "name_1" ) );
session.persist( new SalesContact( 2L, 2L, "name_2" ) );
session.persist( new ContactAddress( 1L, "address_1", 1L, null ) );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> session.createMutationQuery( "delete from ContactAddress" ).executeUpdate() );
scope.inTransaction( session -> session.createMutationQuery( "delete from SalesContact" ).executeUpdate() );
}
@Test
public void testUpdate(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final SalesContact contact = session.find( SalesContact.class, 1L );
contact.setName( "updated_name" );
} );
scope.inTransaction( session -> assertThat( session.find(
SalesContact.class,
1L
).getName() ).isEqualTo( "updated_name" ) );
}
@Test
public void testDelete(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final SalesContact contact = session.find( SalesContact.class, 2L );
session.remove( contact );
} );
scope.inTransaction( session -> assertThat( session.find(
SalesContact.class,
2L
) ).isNull() );
}
@Test
public void testNullPartitionKey(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final ContactAddress address = session.find( ContactAddress.class, 1L );
assertThat( address.contact ).isNull();
} );
}
@Entity( name = "SalesContact" )
public static | PartitionKeyAndAssociationTest |
java | square__retrofit | retrofit-adapters/rxjava3/src/test/java/retrofit2/adapter/rxjava3/MaybeWithSchedulerTest.java | {
"start": 1182,
"end": 2695
} | interface ____ {
@GET("/")
Maybe<String> body();
@GET("/")
Maybe<Response<String>> response();
@GET("/")
Maybe<Result<String>> result();
}
private final TestScheduler scheduler = new TestScheduler();
private Service service;
@Before
public void setUp() {
Retrofit retrofit =
new Retrofit.Builder()
.baseUrl(server.url("/"))
.addConverterFactory(new StringConverterFactory())
.addCallAdapterFactory(RxJava3CallAdapterFactory.createWithScheduler(scheduler))
.build();
service = retrofit.create(Service.class);
}
@Test
public void bodyUsesScheduler() {
server.enqueue(new MockResponse());
RecordingMaybeObserver<Object> observer = observerRule.create();
service.body().subscribe(observer);
observer.assertNoEvents();
scheduler.triggerActions();
observer.assertAnyValue();
}
@Test
public void responseUsesScheduler() {
server.enqueue(new MockResponse());
RecordingMaybeObserver<Object> observer = observerRule.create();
service.response().subscribe(observer);
observer.assertNoEvents();
scheduler.triggerActions();
observer.assertAnyValue();
}
@Test
public void resultUsesScheduler() {
server.enqueue(new MockResponse());
RecordingMaybeObserver<Object> observer = observerRule.create();
service.result().subscribe(observer);
observer.assertNoEvents();
scheduler.triggerActions();
observer.assertAnyValue();
}
}
| Service |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/discriminator/Owner.java | {
"start": 709,
"end": 1270
} | class ____ {
private Integer id;
private String name;
private Vehicle vehicle;
public Owner() {
}
public Owner(Integer id, Vehicle vehicle) {
this.id = id;
this.vehicle = vehicle;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Vehicle getVehicle() {
return vehicle;
}
public void setVehicle(Vehicle vehicle) {
this.vehicle = vehicle;
}
}
| Owner |
java | google__dagger | dagger-runtime/main/java/dagger/internal/Providers.java | {
"start": 811,
"end": 1309
} | class ____ {
/** Converts a javax provider to a Dagger internal provider. */
@SuppressWarnings("unchecked")
public static <T extends @Nullable Object> Provider<T> asDaggerProvider(
final javax.inject.Provider<T> provider) {
checkNotNull(provider);
if (provider instanceof Provider) {
return (Provider) provider;
}
return new Provider<T>() {
@Override public T get() {
return provider.get();
}
};
}
private Providers() {}
}
| Providers |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2704/TopLevel.java | {
"start": 318,
"end": 419
} | class ____ {
public void setE(@SuppressWarnings("unused") InnerEnum e) {
}
}
}
| Target |
java | google__guava | android/guava/src/com/google/common/hash/AbstractHasher.java | {
"start": 1063,
"end": 3555
} | class ____ implements Hasher {
@Override
@CanIgnoreReturnValue
public final Hasher putBoolean(boolean b) {
return putByte(b ? (byte) 1 : (byte) 0);
}
@Override
@CanIgnoreReturnValue
public final Hasher putDouble(double d) {
return putLong(Double.doubleToRawLongBits(d));
}
@Override
@CanIgnoreReturnValue
public final Hasher putFloat(float f) {
return putInt(Float.floatToRawIntBits(f));
}
@Override
@CanIgnoreReturnValue
public Hasher putUnencodedChars(CharSequence charSequence) {
for (int i = 0, len = charSequence.length(); i < len; i++) {
putChar(charSequence.charAt(i));
}
return this;
}
@Override
@CanIgnoreReturnValue
public Hasher putString(CharSequence charSequence, Charset charset) {
return putBytes(charSequence.toString().getBytes(charset));
}
@Override
@CanIgnoreReturnValue
public Hasher putBytes(byte[] bytes) {
return putBytes(bytes, 0, bytes.length);
}
@Override
@CanIgnoreReturnValue
public Hasher putBytes(byte[] bytes, int off, int len) {
Preconditions.checkPositionIndexes(off, off + len, bytes.length);
for (int i = 0; i < len; i++) {
putByte(bytes[off + i]);
}
return this;
}
@Override
@CanIgnoreReturnValue
public Hasher putBytes(ByteBuffer b) {
if (b.hasArray()) {
putBytes(b.array(), b.arrayOffset() + b.position(), b.remaining());
Java8Compatibility.position(b, b.limit());
} else {
for (int remaining = b.remaining(); remaining > 0; remaining--) {
putByte(b.get());
}
}
return this;
}
@Override
@CanIgnoreReturnValue
public Hasher putShort(short s) {
putByte((byte) s);
putByte((byte) (s >>> 8));
return this;
}
@Override
@CanIgnoreReturnValue
public Hasher putInt(int i) {
putByte((byte) i);
putByte((byte) (i >>> 8));
putByte((byte) (i >>> 16));
putByte((byte) (i >>> 24));
return this;
}
@Override
@CanIgnoreReturnValue
public Hasher putLong(long l) {
for (int i = 0; i < 64; i += 8) {
putByte((byte) (l >>> i));
}
return this;
}
@Override
@CanIgnoreReturnValue
public Hasher putChar(char c) {
putByte((byte) c);
putByte((byte) (c >>> 8));
return this;
}
@Override
@CanIgnoreReturnValue
public <T extends @Nullable Object> Hasher putObject(
@ParametricNullness T instance, Funnel<? super T> funnel) {
funnel.funnel(instance, this);
return this;
}
}
| AbstractHasher |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/JacksonSerializable.java | {
"start": 2219,
"end": 2551
} | class ____ implements JacksonSerializable
{
/**
* Method that may be called on instance to determine if it is considered
* "empty" for purposes of serialization filtering or not.
*/
public boolean isEmpty(SerializationContext serializers) {
return false;
}
}
}
| Base |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/AbstractMessageConverterMethodArgumentResolver.java | {
"start": 15405,
"end": 16123
} | class ____ implements HttpMessageConverter<String> {
@Override
public boolean canRead(Class<?> clazz, @Nullable MediaType mediaType) {
return false;
}
@Override
public boolean canWrite(Class<?> clazz, @Nullable MediaType mediaType) {
return false;
}
@Override
public List<MediaType> getSupportedMediaTypes() {
return Collections.emptyList();
}
@Override
public String read(Class<? extends String> clazz, HttpInputMessage inputMessage) {
throw new UnsupportedOperationException();
}
@Override
public void write(String s, @Nullable MediaType contentType, HttpOutputMessage outputMessage) {
throw new UnsupportedOperationException();
}
}
}
| NoContentTypeHttpMessageConverter |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/resilience/RetryInterceptorTests.java | {
"start": 18899,
"end": 19200
} | class ____ {
AtomicInteger counter = new AtomicInteger();
@Async
@Retryable(maxRetries = 2, delay = 10)
public CompletableFuture<Void> retryOperation() {
throw new IllegalStateException(Integer.toString(counter.incrementAndGet()));
}
}
@EnableResilientMethods
static | AsyncAnnotatedBean |
java | apache__camel | components/camel-mvel/src/main/java/org/apache/camel/language/mvel/MvelConstants.java | {
"start": 883,
"end": 1114
} | class ____ {
public static final String MVEL_RESOURCE_URI = "CamelMvelResourceUri";
public static final String MVEL_TEMPLATE = "CamelMvelTemplate";
private MvelConstants() {
// Utility class
}
}
| MvelConstants |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/naming/VersionsJoinTableRangeComponentNamingTest.java | {
"start": 1057,
"end": 1498
} | class ____ {@link VersionsJoinTableRangeComponentTestEntity}, to test
* various {@link org.hibernate.envers.AuditOverride} annotations.
*
* @author Erik-Berndt Scheper
*/
@EnversTest
@DomainModel(annotatedClasses = {
VersionsJoinTableRangeComponentTestEntity.class,
VersionsJoinTableRangeTestEntitySuperClass.class,
VersionsJoinTableRangeTestEntity.class,
VersionsJoinTableRangeTestAlternateEntity.class
})
@SessionFactory
public | for |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java | {
"start": 6894,
"end": 67496
} | class ____
<T extends SchedulerApplicationAttempt, N extends SchedulerNode>
extends AbstractService implements ResourceScheduler {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractYarnScheduler.class);
private static final Resource ZERO_RESOURCE = Resource.newInstance(0, 0);
protected final ClusterNodeTracker<N> nodeTracker =
new ClusterNodeTracker<>();
protected Resource minimumAllocation;
protected volatile RMContext rmContext;
private volatile Priority maxClusterLevelAppPriority;
protected ActivitiesManager activitiesManager;
protected SchedulerHealth schedulerHealth = new SchedulerHealth();
protected volatile long lastNodeUpdateTime;
// timeout to join when we stop this service
protected final long THREAD_JOIN_TIMEOUT_MS = 1000;
private volatile Clock clock;
/**
* To enable the update thread, subclasses should set updateInterval to a
* positive value during {@link #serviceInit(Configuration)}.
*/
protected long updateInterval = -1L;
@VisibleForTesting
Thread updateThread;
private final Object updateThreadMonitor = new Object();
private Timer releaseCache;
private boolean autoCorrectContainerAllocation;
/*
* All schedulers which are inheriting AbstractYarnScheduler should use
* concurrent version of 'applications' map.
*/
protected ConcurrentMap<ApplicationId, SchedulerApplication<T>> applications;
protected int nmExpireInterval;
protected long nmHeartbeatInterval;
private long skipNodeInterval;
private final static List<Container> EMPTY_CONTAINER_LIST =
new ArrayList<Container>();
protected static final Allocation EMPTY_ALLOCATION = new Allocation(
EMPTY_CONTAINER_LIST, Resources.createResource(0), null, null, null);
protected final ReentrantReadWriteLock.ReadLock readLock;
/*
* Use writeLock for any of operations below:
* - queue change (hierarchy / configuration / container allocation)
* - application(add/remove/allocate-container, but not include container
* finish)
* - node (add/remove/change-resource/container-allocation, but not include
* container finish)
*/
protected final ReentrantReadWriteLock.WriteLock writeLock;
// If set to true, then ALL container updates will be automatically sent to
// the NM in the next heartbeat.
private boolean autoUpdateContainers = false;
protected SchedulingMonitorManager schedulingMonitorManager =
new SchedulingMonitorManager();
private boolean migration;
/**
* Construct the service.
*
* @param name service name
*/
public AbstractYarnScheduler(String name) {
super(name);
clock = SystemClock.getInstance();
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
readLock = lock.readLock();
writeLock = lock.writeLock();
}
@Override
public void serviceInit(Configuration conf) throws Exception {
migration =
conf.getBoolean(FairSchedulerConfiguration.MIGRATION_MODE, false);
nmExpireInterval =
conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);
nmHeartbeatInterval =
conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS,
YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS);
skipNodeInterval = YarnConfiguration.getSkipNodeInterval(conf);
autoCorrectContainerAllocation =
conf.getBoolean(YarnConfiguration.RM_SCHEDULER_AUTOCORRECT_CONTAINER_ALLOCATION,
YarnConfiguration.DEFAULT_RM_SCHEDULER_AUTOCORRECT_CONTAINER_ALLOCATION);
long configuredMaximumAllocationWaitTime =
conf.getLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS,
YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS);
nodeTracker.setConfiguredMaxAllocationWaitTime(
configuredMaximumAllocationWaitTime);
maxClusterLevelAppPriority = getMaxPriorityFromConf(conf);
if (!migration) {
this.releaseCache = new Timer("Pending Container Clear Timer");
}
autoUpdateContainers =
conf.getBoolean(YarnConfiguration.RM_AUTO_UPDATE_CONTAINERS,
YarnConfiguration.DEFAULT_RM_AUTO_UPDATE_CONTAINERS);
if (updateInterval > 0) {
updateThread = new UpdateThread();
updateThread.setName("SchedulerUpdateThread");
updateThread.setUncaughtExceptionHandler(
new RMCriticalThreadUncaughtExceptionHandler(rmContext));
updateThread.setDaemon(true);
}
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
if (!migration) {
if (updateThread != null) {
updateThread.start();
}
schedulingMonitorManager.startAll();
createReleaseCache();
}
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (updateThread != null) {
updateThread.interrupt();
updateThread.join(THREAD_JOIN_TIMEOUT_MS);
}
//Stop Timer
if (releaseCache != null) {
releaseCache.cancel();
releaseCache = null;
}
schedulingMonitorManager.stop();
super.serviceStop();
}
@VisibleForTesting
public ClusterNodeTracker<N> getNodeTracker() {
return nodeTracker;
}
@VisibleForTesting
public SchedulingMonitorManager getSchedulingMonitorManager() {
return schedulingMonitorManager;
}
/*
* YARN-3136 removed synchronized lock for this method for performance
* purposes
*/
public List<Container> getTransferredContainers(
ApplicationAttemptId currentAttempt) {
ApplicationId appId = currentAttempt.getApplicationId();
SchedulerApplication<T> app = applications.get(appId);
List<Container> containerList = new ArrayList<Container>();
if (app == null) {
return containerList;
}
Collection<RMContainer> liveContainers = app.getCurrentAppAttempt()
.pullContainersToTransfer();
ContainerId amContainerId = null;
// For UAM, amContainer would be null
if (rmContext.getRMApps().get(appId).getCurrentAppAttempt()
.getMasterContainer() != null) {
amContainerId = rmContext.getRMApps().get(appId).getCurrentAppAttempt()
.getMasterContainer().getId();
}
for (RMContainer rmContainer : liveContainers) {
if (!rmContainer.getContainerId().equals(amContainerId)) {
containerList.add(rmContainer.getContainer());
}
}
return containerList;
}
public Map<ApplicationId, SchedulerApplication<T>>
getSchedulerApplications() {
return applications;
}
/**
* Add blacklisted NodeIds to the list that is passed.
*
* @param app application attempt.
* @return blacklisted NodeIds.
*/
public List<N> getBlacklistedNodes(final SchedulerApplicationAttempt app) {
NodeFilter nodeFilter = new NodeFilter() {
@Override
public boolean accept(SchedulerNode node) {
return SchedulerAppUtils.isPlaceBlacklisted(app, node, LOG);
}
};
return nodeTracker.getNodes(nodeFilter);
}
public List<N> getNodes(final NodeFilter filter) {
return nodeTracker.getNodes(filter);
}
public boolean shouldContainersBeAutoUpdated() {
return this.autoUpdateContainers;
}
@Override
public Resource getClusterResource() {
return nodeTracker.getClusterCapacity();
}
@Override
public Resource getMinimumResourceCapability() {
return minimumAllocation;
}
@Override
public Resource getMaximumResourceCapability() {
return nodeTracker.getMaxAllowedAllocation();
}
@Override
public Resource getMaximumResourceCapability(String queueName) {
return getMaximumResourceCapability();
}
protected void initMaximumResourceCapability(Resource maximumAllocation) {
nodeTracker.setConfiguredMaxAllocation(maximumAllocation);
}
public SchedulerHealth getSchedulerHealth() {
return this.schedulerHealth;
}
protected void setLastNodeUpdateTime(long time) {
this.lastNodeUpdateTime = time;
}
public long getLastNodeUpdateTime() {
return lastNodeUpdateTime;
}
public long getSkipNodeInterval(){
return skipNodeInterval;
}
protected void containerLaunchedOnNode(
ContainerId containerId, SchedulerNode node) {
readLock.lock();
try {
// Get the application for the finished container
SchedulerApplicationAttempt application =
getCurrentAttemptForContainer(containerId);
if (application == null) {
LOG.info("Unknown application " + containerId.getApplicationAttemptId()
.getApplicationId() + " launched container " + containerId
+ " on node: " + node);
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeCleanContainerEvent(node.getNodeID(), containerId));
return;
}
application.containerLaunchedOnNode(containerId, node.getNodeID());
node.containerStarted(containerId);
} finally {
readLock.unlock();
}
}
protected void containerIncreasedOnNode(ContainerId containerId,
SchedulerNode node, Container increasedContainerReportedByNM) {
/*
* No lock is required, as this method is protected by scheduler's writeLock
*/
// Get the application for the finished container
SchedulerApplicationAttempt application = getCurrentAttemptForContainer(
containerId);
if (application == null) {
LOG.info("Unknown application " + containerId.getApplicationAttemptId()
.getApplicationId() + " increased container " + containerId
+ " on node: " + node);
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeCleanContainerEvent(node.getNodeID(), containerId));
return;
}
RMContainer rmContainer = getRMContainer(containerId);
if (rmContainer == null) {
// Some unknown container sneaked into the system. Kill it.
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeCleanContainerEvent(node.getNodeID(), containerId));
return;
}
rmContainer.handle(new RMContainerNMDoneChangeResourceEvent(containerId,
increasedContainerReportedByNM.getResource()));
}
// TODO: Rename it to getCurrentApplicationAttempt
public T getApplicationAttempt(ApplicationAttemptId applicationAttemptId) {
SchedulerApplication<T> app = applications.get(
applicationAttemptId.getApplicationId());
return app == null ? null : app.getCurrentAppAttempt();
}
@Override
public SchedulerAppReport getSchedulerAppInfo(
ApplicationAttemptId appAttemptId) {
SchedulerApplicationAttempt attempt = getApplicationAttempt(appAttemptId);
if (attempt == null) {
LOG.debug("Request for appInfo of unknown attempt {}", appAttemptId);
return null;
}
return new SchedulerAppReport(attempt);
}
@Override
public ApplicationResourceUsageReport getAppResourceUsageReport(
ApplicationAttemptId appAttemptId) {
SchedulerApplicationAttempt attempt = getApplicationAttempt(appAttemptId);
if (attempt == null) {
LOG.debug("Request for appInfo of unknown attempt {}", appAttemptId);
return null;
}
return attempt.getResourceUsageReport();
}
public T getCurrentAttemptForContainer(ContainerId containerId) {
return getApplicationAttempt(containerId.getApplicationAttemptId());
}
@Override
public RMContainer getRMContainer(ContainerId containerId) {
SchedulerApplicationAttempt attempt =
getCurrentAttemptForContainer(containerId);
return (attempt == null) ? null : attempt.getRMContainer(containerId);
}
@Override
public SchedulerNodeReport getNodeReport(NodeId nodeId) {
return nodeTracker.getNodeReport(nodeId);
}
@Override
public String moveApplication(ApplicationId appId, String newQueue)
throws YarnException {
throw new YarnException(getClass().getSimpleName()
+ " does not support moving apps between queues");
}
@Override
public void preValidateMoveApplication(ApplicationId appId,
String newQueue) throws YarnException {
throw new YarnException(getClass().getSimpleName()
+ " does not support pre-validation of moving apps between queues");
}
public void removeQueue(String queueName) throws YarnException {
throw new YarnException(getClass().getSimpleName()
+ " does not support removing queues");
}
@Override
public void addQueue(Queue newQueue) throws YarnException, IOException {
throw new YarnException(getClass().getSimpleName()
+ " does not support this operation");
}
@Override
public void setEntitlement(String queue, QueueEntitlement entitlement)
throws YarnException {
throw new YarnException(getClass().getSimpleName()
+ " does not support this operation");
}
private void killOrphanContainerOnNode(RMNode node,
NMContainerStatus container) {
if (!container.getContainerState().equals(ContainerState.COMPLETE)) {
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeCleanContainerEvent(node.getNodeID(),
container.getContainerId()));
}
}
public void recoverContainersOnNode(List<NMContainerStatus> containerReports,
RMNode nm) {
writeLock.lock();
try {
if (!rmContext.isWorkPreservingRecoveryEnabled()
|| containerReports == null || (containerReports != null
&& containerReports.isEmpty())) {
return;
}
for (NMContainerStatus container : containerReports) {
ApplicationId appId =
container.getContainerId().getApplicationAttemptId()
.getApplicationId();
RMApp rmApp = rmContext.getRMApps().get(appId);
if (rmApp == null) {
LOG.error("Skip recovering container " + container
+ " for unknown application.");
killOrphanContainerOnNode(nm, container);
continue;
}
SchedulerApplication<T> schedulerApp = applications.get(appId);
if (schedulerApp == null) {
LOG.info("Skip recovering container " + container
+ " for unknown SchedulerApplication. "
+ "Application current state is " + rmApp.getState());
killOrphanContainerOnNode(nm, container);
continue;
}
LOG.info("Recovering container " + container);
SchedulerApplicationAttempt schedulerAttempt =
schedulerApp.getCurrentAppAttempt();
if (!rmApp.getApplicationSubmissionContext()
.getKeepContainersAcrossApplicationAttempts()) {
// Do not recover containers for stopped attempt or previous attempt.
if (schedulerAttempt.isStopped() || !schedulerAttempt
.getApplicationAttemptId().equals(
container.getContainerId().getApplicationAttemptId())) {
LOG.info("Skip recovering container " + container
+ " for already stopped attempt.");
killOrphanContainerOnNode(nm, container);
continue;
}
}
Queue queue = schedulerApp.getQueue();
//To make sure we don't face ambiguity, CS queues should be referenced
//by their full queue names
String queueName = queue instanceof CSQueue ?
((CSQueue)queue).getQueuePath() : queue.getQueueName();
// create container
RMContainer rmContainer = recoverAndCreateContainer(container, nm,
queueName);
// recover RMContainer
rmContainer.handle(
new RMContainerRecoverEvent(container.getContainerId(), container));
// recover scheduler node
SchedulerNode schedulerNode = nodeTracker.getNode(nm.getNodeID());
schedulerNode.recoverContainer(rmContainer);
// recover queue: update headroom etc.
Queue queueToRecover = schedulerAttempt.getQueue();
queueToRecover.recoverContainer(getClusterResource(), schedulerAttempt,
rmContainer);
// recover scheduler attempt
final boolean recovered = schedulerAttempt.recoverContainer(
schedulerNode, rmContainer);
if (recovered && rmContainer.getExecutionType() ==
ExecutionType.OPPORTUNISTIC) {
OpportunisticSchedulerMetrics.getMetrics()
.incrAllocatedOppContainers(1);
}
// set master container for the current running AMContainer for this
// attempt.
RMAppAttempt appAttempt = rmApp.getCurrentAppAttempt();
if (appAttempt != null) {
Container masterContainer = appAttempt.getMasterContainer();
// Mark current running AMContainer's RMContainer based on the master
// container ID stored in AppAttempt.
if (masterContainer != null && masterContainer.getId().equals(
rmContainer.getContainerId())) {
((RMContainerImpl) rmContainer).setAMContainer(true);
}
}
if (schedulerAttempt.getPendingRelease().remove(
container.getContainerId())) {
// release the container
rmContainer.handle(
new RMContainerFinishedEvent(container.getContainerId(),
SchedulerUtils
.createAbnormalContainerStatus(container.getContainerId(),
SchedulerUtils.RELEASED_CONTAINER),
RMContainerEventType.RELEASED));
LOG.info(container.getContainerId() + " is released by application.");
}
}
} finally {
writeLock.unlock();
}
}
/**
* Autocorrect container resourceRequests by decrementing the number of newly allocated containers
* from the current container request. This also updates the newlyAllocatedContainers to be within
* the limits of the current container resourceRequests.
* ResourceRequests locality/resourceName is not considered while autocorrecting the container
* request, hence when there are two types of resourceRequest which is same except for the
* locality/resourceName, it is counted as same {@link ContainerObjectType} and the container
* ask and number of newly allocated container is decremented accordingly.
* For example when a client requests for 4 containers with locality/resourceName
* as "node1", AMRMClientaugments the resourceRequest into two
* where R1(numContainer=4,locality=*) and R2(numContainer=4,locality=node1),
* if Yarn allocated 6 containers previously, it will release 2 containers as well as
* update the container ask to 0.
*
* If there is a client which directly calls Yarn (without AMRMClient) with
* two where R1(numContainer=4,locality=*) and R2(numContainer=4,locality=node1)
* the autocorrection may not work as expected. The use case of such client is very rare.
*
* <p>
* This method is called from {@link AbstractYarnScheduler#allocate} method. It is package private
* to be used within the scheduler package only.
* @param resourceRequests List of resources to be allocated
* @param application ApplicationAttempt
*/
@VisibleForTesting
protected void autoCorrectContainerAllocation(List<ResourceRequest> resourceRequests,
SchedulerApplicationAttempt application) {
// if there is no resourceRequests for containers or no newly allocated container from
// the previous request there is nothing to do.
if (!autoCorrectContainerAllocation || resourceRequests.isEmpty() ||
application.newlyAllocatedContainers.isEmpty()) {
return;
}
// iterate newlyAllocatedContainers and form a mapping of container type
// and number of its occurrence.
Map<ContainerObjectType, List<RMContainer>> allocatedContainerMap = new HashMap<>();
for (RMContainer rmContainer : application.newlyAllocatedContainers) {
Container container = rmContainer.getContainer();
ContainerObjectType containerObjectType = new ContainerObjectType(
container.getAllocationRequestId(), container.getPriority(),
container.getExecutionType(), container.getResource());
allocatedContainerMap.computeIfAbsent(containerObjectType,
k -> new ArrayList<>()).add(rmContainer);
}
Map<ContainerObjectType, Integer> extraContainerAllocatedMap = new HashMap<>();
// iterate through resourceRequests and update the request by
// decrementing the already allocated containers.
for (ResourceRequest request : resourceRequests) {
ContainerObjectType containerObjectType =
new ContainerObjectType(request.getAllocationRequestId(),
request.getPriority(), request.getExecutionTypeRequest().getExecutionType(),
request.getCapability());
int numContainerAllocated = allocatedContainerMap.getOrDefault(containerObjectType,
Collections.emptyList()).size();
if (numContainerAllocated > 0) {
int numContainerAsk = request.getNumContainers();
int updatedContainerRequest = numContainerAsk - numContainerAllocated;
if (updatedContainerRequest < 0) {
// add an entry to extra allocated map
extraContainerAllocatedMap.put(containerObjectType, Math.abs(updatedContainerRequest));
LOG.debug("{} container of the resource type: {} will be released",
Math.abs(updatedContainerRequest), request);
// if newlyAllocatedContainer count is more than the current container
// resourceRequests, reset it to 0.
updatedContainerRequest = 0;
}
// update the request
LOG.debug("Updating container resourceRequests from {} to {} for the resource type: {}",
numContainerAsk, updatedContainerRequest, request);
request.setNumContainers(updatedContainerRequest);
}
}
// Iterate over the entries in extraContainerAllocatedMap
for (Map.Entry<ContainerObjectType, Integer> entry : extraContainerAllocatedMap.entrySet()) {
ContainerObjectType containerObjectType = entry.getKey();
int extraContainers = entry.getValue();
// Get the list of allocated containers for the current ContainerObjectType
List<RMContainer> allocatedContainers = allocatedContainerMap.get(containerObjectType);
if (allocatedContainers != null) {
for (RMContainer rmContainer : allocatedContainers) {
if (extraContainers > 0) {
// Change the state of the container from ALLOCATED to EXPIRED since it is not required.
LOG.debug("Removing extra container:{}", rmContainer.getContainer());
completedContainer(rmContainer, SchedulerUtils.createAbnormalContainerStatus(
rmContainer.getContainerId(), SchedulerUtils.EXPIRED_CONTAINER),
RMContainerEventType.EXPIRE);
application.newlyAllocatedContainers.remove(rmContainer);
extraContainers--;
}
}
}
}
}
private RMContainer recoverAndCreateContainer(NMContainerStatus status,
RMNode node, String queueName) {
Container container =
Container.newInstance(status.getContainerId(), node.getNodeID(),
node.getHttpAddress(), status.getAllocatedResource(),
status.getPriority(), null);
container.setVersion(status.getVersion());
container.setExecutionType(status.getExecutionType());
container.setAllocationRequestId(status.getAllocationRequestId());
container.setAllocationTags(status.getAllocationTags());
ApplicationAttemptId attemptId =
container.getId().getApplicationAttemptId();
RMContainer rmContainer = new RMContainerImpl(container,
SchedulerRequestKey.extractFrom(container), attemptId, node.getNodeID(),
applications.get(attemptId.getApplicationId()).getUser(), rmContext,
status.getCreationTime(), status.getNodeLabelExpression());
((RMContainerImpl) rmContainer).setQueueName(queueName);
return rmContainer;
}
/**
* Recover resource request back from RMContainer when a container is
* preempted before AM pulled the same. If container is pulled by
* AM, then RMContainer will not have resource request to recover.
* @param rmContainer rmContainer
*/
private void recoverResourceRequestForContainer(RMContainer rmContainer) {
ContainerRequest containerRequest = rmContainer.getContainerRequest();
// If container state is moved to ACQUIRED, request will be empty.
if (containerRequest == null) {
return;
}
// when auto correct container allocation is enabled, there can be a case when extra containers
// go to expired state from allocated state. When such scenario happens do not re-attempt the
// container request since this is expected.
if (autoCorrectContainerAllocation &&
RMContainerState.EXPIRED.equals(rmContainer.getState())) {
return;
}
// Add resource request back to Scheduler ApplicationAttempt.
// We lookup the application-attempt here again using
// getCurrentApplicationAttempt() because there is only one app-attempt at
// any point in the scheduler. But in corner cases, AMs can crash,
// corresponding containers get killed and recovered to the same-attempt,
// but because the app-attempt is extinguished right after, the recovered
// requests don't serve any purpose, but that's okay.
SchedulerApplicationAttempt schedulerAttempt =
getCurrentAttemptForContainer(rmContainer.getContainerId());
if (schedulerAttempt != null) {
schedulerAttempt.recoverResourceRequestsForContainer(containerRequest);
}
}
protected void createReleaseCache() {
// Cleanup the cache after nm expire interval.
releaseCache.schedule(new TimerTask() {
@Override
public void run() {
clearPendingContainerCache();
LOG.info("Release request cache is cleaned up");
}
}, nmExpireInterval);
}
@VisibleForTesting
public void clearPendingContainerCache() {
for (SchedulerApplication<T> app : applications.values()) {
T attempt = app.getCurrentAppAttempt();
if (attempt != null) {
for (ContainerId containerId : attempt.getPendingRelease()) {
RMAuditLogger.logFailure(app.getUser(),
AuditConstants.RELEASE_CONTAINER,
"Unauthorized access or invalid container", "Scheduler",
"Trying to release container not owned by app "
+ "or with invalid id.", attempt.getApplicationId(),
containerId, null);
}
attempt.getPendingRelease().clear();
}
}
}
@VisibleForTesting
@Private
// clean up a completed container
public void completedContainer(RMContainer rmContainer,
ContainerStatus containerStatus, RMContainerEventType event) {
if (rmContainer == null) {
LOG.info("Container " + containerStatus.getContainerId()
+ " completed with event " + event
+ ", but corresponding RMContainer doesn't exist.");
return;
}
if (rmContainer.getExecutionType() == ExecutionType.GUARANTEED) {
completedContainerInternal(rmContainer, containerStatus, event);
completeOustandingUpdatesWhichAreReserved(
rmContainer, containerStatus, event);
} else {
ContainerId containerId = rmContainer.getContainerId();
// Inform the container
rmContainer.handle(
new RMContainerFinishedEvent(containerId, containerStatus, event));
SchedulerApplicationAttempt schedulerAttempt =
getCurrentAttemptForContainer(containerId);
if (schedulerAttempt != null) {
if (schedulerAttempt.removeRMContainer(containerId)) {
OpportunisticSchedulerMetrics.getMetrics()
.incrReleasedOppContainers(1);
}
}
LOG.debug("Completed container: {} in state: {} event:{}",
rmContainer.getContainerId(), rmContainer.getState(), event);
SchedulerNode node = getSchedulerNode(rmContainer.getNodeId());
if (node != null) {
node.releaseContainer(rmContainer.getContainerId(), false);
}
}
// If the container is getting killed in ACQUIRED state, the requester (AM
// for regular containers and RM itself for AM container) will not know what
// happened. Simply add the ResourceRequest back again so that requester
// doesn't need to do anything conditionally.
recoverResourceRequestForContainer(rmContainer);
}
// Optimization:
// Check if there are in-flight container updates and complete the
// associated temp containers. These are removed when the app completes,
// but removing them when the actual container completes would allow the
// scheduler to reallocate those resources sooner.
private void completeOustandingUpdatesWhichAreReserved(
RMContainer rmContainer, ContainerStatus containerStatus,
RMContainerEventType event) {
N schedulerNode = getSchedulerNode(rmContainer.getNodeId());
if (schedulerNode != null) {
RMContainer resContainer = schedulerNode.getReservedContainer();
if (resContainer != null && resContainer.getReservedSchedulerKey() != null) {
ContainerId containerToUpdate = resContainer
.getReservedSchedulerKey().getContainerToUpdate();
if (containerToUpdate != null &&
containerToUpdate.equals(containerStatus.getContainerId())) {
completedContainerInternal(resContainer,
ContainerStatus.newInstance(resContainer.getContainerId(),
containerStatus.getState(), containerStatus
.getDiagnostics(),
containerStatus.getExitStatus()), event);
}
}
}
}
// clean up a completed container
protected abstract void completedContainerInternal(RMContainer rmContainer,
ContainerStatus containerStatus, RMContainerEventType event);
protected void releaseContainers(List<ContainerId> containers,
SchedulerApplicationAttempt attempt) {
for (ContainerId containerId : containers) {
RMContainer rmContainer = getRMContainer(containerId);
if (rmContainer == null) {
if (System.currentTimeMillis() - ResourceManager.getClusterTimeStamp()
< nmExpireInterval) {
LOG.info(containerId + " doesn't exist. Add the container"
+ " to the release request cache as it maybe on recovery.");
attempt.getPendingRelease().add(containerId);
} else {
RMAuditLogger.logFailure(attempt.getUser(),
AuditConstants.RELEASE_CONTAINER,
"Unauthorized access or invalid container", "Scheduler",
"Trying to release container not owned by app or with invalid id.",
attempt.getApplicationId(), containerId, null);
}
}
completedContainer(rmContainer,
SchedulerUtils.createAbnormalContainerStatus(containerId,
SchedulerUtils.RELEASED_CONTAINER), RMContainerEventType.RELEASED);
}
}
@Override
public N getSchedulerNode(NodeId nodeId) {
return nodeTracker.getNode(nodeId);
}
@Override
public void moveAllApps(String sourceQueue, String destQueue)
throws YarnException {
writeLock.lock();
try {
// check if destination queue is a valid leaf queue
try {
getQueueInfo(destQueue, false, false);
} catch (IOException e) {
LOG.warn(e.toString());
throw new YarnException(e);
}
// generate move events for each pending/running app
for (ApplicationAttemptId appAttemptId : getAppsFromQueue(sourceQueue)) {
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMAppManagerEvent(appAttemptId.getApplicationId(),
destQueue, RMAppManagerEventType.APP_MOVE));
}
} finally {
writeLock.unlock();
}
}
@Override
public void killAllAppsInQueue(String queueName)
throws YarnException {
writeLock.lock();
try {
// generate kill events for each pending/running app
for (ApplicationAttemptId app : getAppsFromQueue(queueName)) {
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(app.getApplicationId(), RMAppEventType.KILL,
"Application killed due to expiry of reservation queue "
+ queueName + "."));
}
} finally {
writeLock.unlock();
}
}
/**
* Process resource update on a node.
*
* @param nm RMNode.
* @param resourceOption resourceOption.
*/
public void updateNodeResource(RMNode nm,
ResourceOption resourceOption) {
writeLock.lock();
try {
SchedulerNode node = getSchedulerNode(nm.getNodeID());
if (node == null) {
LOG.info("Node: " + nm.getNodeID() + " has already been taken out of " +
"scheduling. Skip updating its resource");
return;
}
Resource newResource = resourceOption.getResource();
final int timeout = resourceOption.getOverCommitTimeout();
Resource oldResource = node.getTotalResource();
if (!oldResource.equals(newResource)) {
// Notify NodeLabelsManager about this change
rmContext.getNodeLabelManager().updateNodeResource(nm.getNodeID(),
newResource);
// Log resource change
LOG.info("Update resource on node: {} from: {}, to: {} in {} ms",
node.getNodeName(), oldResource, newResource, timeout);
nodeTracker.removeNode(nm.getNodeID());
// update resource to node
node.updateTotalResource(newResource);
node.setOvercommitTimeOut(timeout);
signalContainersIfOvercommitted(node, timeout == 0);
nodeTracker.addNode((N) node);
} else{
// Log resource change
LOG.warn("Update resource on node: " + node.getNodeName()
+ " with the same resource: " + newResource);
}
} finally {
writeLock.unlock();
}
}
/** {@inheritDoc} */
@Override
public EnumSet<SchedulerResourceTypes> getSchedulingResourceTypes() {
return EnumSet.of(SchedulerResourceTypes.MEMORY);
}
@Override
public Set<String> getPlanQueues() throws YarnException {
throw new YarnException(getClass().getSimpleName()
+ " does not support reservations");
}
/**
* By default placement constraint is disabled. Schedulers which support
* placement constraint can override this value.
* @return enabled or not
*/
public boolean placementConstraintEnabled() {
return false;
}
protected void refreshMaximumAllocation(Resource newMaxAlloc) {
nodeTracker.setConfiguredMaxAllocation(newMaxAlloc);
}
@Override
public List<ResourceRequest> getPendingResourceRequestsForAttempt(
ApplicationAttemptId attemptId) {
SchedulerApplicationAttempt attempt = getApplicationAttempt(attemptId);
if (attempt != null) {
return attempt.getAppSchedulingInfo().getAllResourceRequests();
}
return null;
}
@Override
public List<SchedulingRequest> getPendingSchedulingRequestsForAttempt(
ApplicationAttemptId attemptId) {
SchedulerApplicationAttempt attempt = getApplicationAttempt(attemptId);
if (attempt != null) {
return attempt.getAppSchedulingInfo().getAllSchedulingRequests();
}
return null;
}
@Override
public Priority checkAndGetApplicationPriority(
Priority priorityRequestedByApp, UserGroupInformation user,
String queuePath, ApplicationId applicationId) throws YarnException {
// Dummy Implementation till Application Priority changes are done in
// specific scheduler.
return Priority.newInstance(0);
}
@Override
public Priority updateApplicationPriority(Priority newPriority,
ApplicationId applicationId, SettableFuture<Object> future,
UserGroupInformation user)
throws YarnException {
// Dummy Implementation till Application Priority changes are done in
// specific scheduler.
return Priority.newInstance(0);
}
@Override
public Priority getMaxClusterLevelAppPriority() {
return maxClusterLevelAppPriority;
}
private Priority getMaxPriorityFromConf(Configuration conf) {
return Priority.newInstance(conf.getInt(
YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY,
YarnConfiguration.DEFAULT_CLUSTER_LEVEL_APPLICATION_PRIORITY));
}
@Override
public void setClusterMaxPriority(Configuration conf)
throws YarnException {
try {
maxClusterLevelAppPriority = getMaxPriorityFromConf(conf);
} catch (NumberFormatException e) {
throw new YarnException(e);
}
LOG.info("Updated the cluste max priority to maxClusterLevelAppPriority = "
+ maxClusterLevelAppPriority);
}
/**
* Sanity check increase/decrease request, and return
* SchedulerContainerResourceChangeRequest according to given
* UpdateContainerRequest.
*
* <pre>
* - Returns non-null value means validation succeeded
* - Throw exception when any other error happens
* </pre>
*/
private SchedContainerChangeRequest createSchedContainerChangeRequest(
UpdateContainerRequest request, boolean increase)
throws YarnException {
ContainerId containerId = request.getContainerId();
RMContainer rmContainer = getRMContainer(containerId);
if (null == rmContainer) {
String msg =
"Failed to get rmContainer for "
+ (increase ? "increase" : "decrease")
+ " request, with container-id=" + containerId;
throw new InvalidResourceRequestException(msg);
}
SchedulerNode schedulerNode =
getSchedulerNode(rmContainer.getAllocatedNode());
return new SchedContainerChangeRequest(
this.rmContext, schedulerNode, rmContainer, request.getCapability());
}
protected List<SchedContainerChangeRequest>
createSchedContainerChangeRequests(
List<UpdateContainerRequest> changeRequests,
boolean increase) {
List<SchedContainerChangeRequest> schedulerChangeRequests =
new ArrayList<SchedContainerChangeRequest>();
for (UpdateContainerRequest r : changeRequests) {
SchedContainerChangeRequest sr = null;
try {
sr = createSchedContainerChangeRequest(r, increase);
} catch (YarnException e) {
LOG.warn("Error happens when checking increase request, Ignoring.."
+ " exception=", e);
continue;
}
schedulerChangeRequests.add(sr);
}
return schedulerChangeRequests;
}
public ActivitiesManager getActivitiesManager() {
return this.activitiesManager;
}
public Clock getClock() {
return clock;
}
@VisibleForTesting
public void setClock(Clock clock) {
this.clock = clock;
}
@Lock(Lock.NoLock.class)
public SchedulerNode getNode(NodeId nodeId) {
return nodeTracker.getNode(nodeId);
}
/**
* Get lists of new containers from NodeManager and process them.
* @param nm The RMNode corresponding to the NodeManager
* @param schedulerNode schedulerNode
* @return list of completed containers
*/
private List<ContainerStatus> updateNewContainerInfo(RMNode nm,
SchedulerNode schedulerNode) {
List<UpdatedContainerInfo> containerInfoList = nm.pullContainerUpdates();
List<ContainerStatus> newlyLaunchedContainers =
new ArrayList<>();
List<ContainerStatus> completedContainers =
new ArrayList<>();
List<Map.Entry<ApplicationId, ContainerStatus>> updateExistContainers =
new ArrayList<>();
for(UpdatedContainerInfo containerInfo : containerInfoList) {
newlyLaunchedContainers
.addAll(containerInfo.getNewlyLaunchedContainers());
completedContainers.addAll(containerInfo.getCompletedContainers());
updateExistContainers.addAll(containerInfo.getUpdateContainers());
}
// Processing the newly launched containers
for (ContainerStatus launchedContainer : newlyLaunchedContainers) {
containerLaunchedOnNode(launchedContainer.getContainerId(),
schedulerNode);
}
// Processing the newly increased containers
List<Container> newlyIncreasedContainers =
nm.pullNewlyIncreasedContainers();
for (Container container : newlyIncreasedContainers) {
containerIncreasedOnNode(container.getId(), schedulerNode, container);
}
// Processing the update exist containers
for (Map.Entry<ApplicationId, ContainerStatus> c : updateExistContainers) {
SchedulerApplication<T> app = applications.get(c.getKey());
ContainerId containerId = c.getValue().getContainerId();
if (app == null || app.getCurrentAppAttempt() == null) {
continue;
}
RMContainer rmContainer
= app.getCurrentAppAttempt().getRMContainer(containerId);
if (rmContainer == null) {
continue;
}
// exposed ports are already set for the container, skip
if (rmContainer.getExposedPorts() != null &&
rmContainer.getExposedPorts().size() > 0) {
continue;
}
String strExposedPorts = c.getValue().getExposedPorts();
if (null != strExposedPorts && !strExposedPorts.isEmpty()) {
Gson gson = new Gson();
Map<String, List<Map<String, String>>> exposedPorts =
gson.fromJson(strExposedPorts,
new TypeToken<Map<String, List<Map<String, String>>>>()
{}.getType());
LOG.info("update exist container " + containerId.getContainerId()
+ ", strExposedPorts = " + strExposedPorts);
rmContainer.setExposedPorts(exposedPorts);
}
}
return completedContainers;
}
/**
* Process completed container list.
* @param completedContainers Extracted list of completed containers
* @param releasedResources Reference resource object for completed containers
* @param nodeId NodeId corresponding to the NodeManager
* @param schedulerNode schedulerNode
* @return The total number of released containers
*/
private int updateCompletedContainers(List<ContainerStatus> completedContainers,
Resource releasedResources, NodeId nodeId, SchedulerNode schedulerNode) {
int releasedContainers = 0;
List<ContainerId> untrackedContainerIdList = new ArrayList<ContainerId>();
for (ContainerStatus completedContainer : completedContainers) {
ContainerId containerId = completedContainer.getContainerId();
LOG.debug("Container FINISHED: {}", containerId);
RMContainer container = getRMContainer(containerId);
completedContainer(container,
completedContainer, RMContainerEventType.FINISHED);
if (schedulerNode != null) {
schedulerNode.releaseContainer(containerId, true);
}
if (container != null) {
releasedContainers++;
Resource ars = container.getAllocatedResource();
if (ars != null) {
Resources.addTo(releasedResources, ars);
}
Resource rrs = container.getReservedResource();
if (rrs != null) {
Resources.addTo(releasedResources, rrs);
}
} else {
// Add containers which are untracked by RM.
untrackedContainerIdList.add(containerId);
}
}
// Acknowledge NM to remove RM-untracked-containers from NM context.
if (!untrackedContainerIdList.isEmpty()) {
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMNodeFinishedContainersPulledByAMEvent(nodeId,
untrackedContainerIdList));
}
return releasedContainers;
}
/**
* Update schedulerHealth information.
* @param releasedResources Reference resource object for completed containers
* @param releasedContainers Count of released containers
*/
protected void updateSchedulerHealthInformation(Resource releasedResources,
int releasedContainers) {
schedulerHealth.updateSchedulerReleaseDetails(getLastNodeUpdateTime(),
releasedResources);
schedulerHealth.updateSchedulerReleaseCounts(releasedContainers);
}
/**
* Update container and utilization information on the NodeManager.
* @param nm The NodeManager to update
* @param schedulerNode schedulerNode
*/
protected void updateNodeResourceUtilization(RMNode nm,
SchedulerNode schedulerNode) {
// Updating node resource utilization
schedulerNode.setAggregatedContainersUtilization(
nm.getAggregatedContainersUtilization());
schedulerNode.setNodeUtilization(nm.getNodeUtilization());
}
/**
* Process a heartbeat update from a node.
* @param nm The RMNode corresponding to the NodeManager
*/
protected void nodeUpdate(RMNode nm) {
LOG.debug("nodeUpdate: {} cluster capacity: {}",
nm, getClusterResource());
// Process new container information
// NOTICE: it is possible to not find the NodeID as a node can be
// decommissioned at the same time. Skip updates if node is null.
SchedulerNode schedulerNode = getNode(nm.getNodeID());
List<ContainerStatus> completedContainers = updateNewContainerInfo(nm,
schedulerNode);
// Notify Scheduler Node updated.
if (schedulerNode != null) {
schedulerNode.notifyNodeUpdate();
}
// Process completed containers
Resource releasedResources = Resource.newInstance(0, 0);
int releasedContainers = updateCompletedContainers(completedContainers,
releasedResources, nm.getNodeID(), schedulerNode);
// If the node is decommissioning, send an update to have the total
// resource equal to the used resource, so no available resource to
// schedule.
if (nm.getState() == NodeState.DECOMMISSIONING && schedulerNode != null
&& schedulerNode.getTotalResource().compareTo(
schedulerNode.getAllocatedResource()) != 0) {
this.rmContext
.getDispatcher()
.getEventHandler()
.handle(
new RMNodeResourceUpdateEvent(nm.getNodeID(), ResourceOption
.newInstance(schedulerNode.getAllocatedResource(), 0)));
}
updateSchedulerHealthInformation(releasedResources, releasedContainers);
if (schedulerNode != null) {
updateNodeResourceUtilization(nm, schedulerNode);
}
if (schedulerNode != null) {
signalContainersIfOvercommitted(schedulerNode, true);
}
// Now node data structures are up-to-date and ready for scheduling.
if(LOG.isDebugEnabled()) {
LOG.debug(
"Node being looked for scheduling " + nm + " availableResource: " +
(schedulerNode == null ? "unknown (decommissioned)" :
schedulerNode.getUnallocatedResource()));
}
}
/**
* Check if the node is overcommitted and needs to remove containers. If
* it is overcommitted, it will kill or preempt (notify the AM to stop them)
* containers. It also takes into account the overcommit timeout. It only
* notifies the application to preempt a container if the timeout hasn't
* passed. If the timeout has passed, it tries to kill the containers. If
* there is no timeout, it doesn't do anything and just prevents new
* allocations.
*
* This action is taken when the change of resources happens (to preempt
* containers or killing them if specified) or when the node heart beats
* (for killing only).
*
* @param schedulerNode The node to check whether is overcommitted.
* @param kill If the container should be killed or just notify the AM.
*/
private void signalContainersIfOvercommitted(
SchedulerNode schedulerNode, boolean kill) {
// If there is no time out, we don't do anything
if (!schedulerNode.isOvercommitTimeOutSet()) {
return;
}
SchedulerEventType eventType =
SchedulerEventType.MARK_CONTAINER_FOR_PREEMPTION;
if (kill) {
eventType = SchedulerEventType.MARK_CONTAINER_FOR_KILLABLE;
// If it hasn't timed out yet, don't kill
if (!schedulerNode.isOvercommitTimedOut()) {
return;
}
}
// Check if the node is overcommitted (negative resources)
ResourceCalculator rc = getResourceCalculator();
Resource unallocated = Resource.newInstance(
schedulerNode.getUnallocatedResource());
if (Resources.fitsIn(rc, ZERO_RESOURCE, unallocated)) {
return;
}
LOG.info("{} is overcommitted ({}), preempt/kill containers",
schedulerNode.getNodeID(), unallocated);
for (RMContainer container : schedulerNode.getContainersToKill()) {
LOG.info("Send {} to {} to free up {}", eventType,
container.getContainerId(), container.getAllocatedResource());
ApplicationAttemptId appId = container.getApplicationAttemptId();
ContainerPreemptEvent event =
new ContainerPreemptEvent(appId, container, eventType);
this.rmContext.getDispatcher().getEventHandler().handle(event);
Resources.addTo(unallocated, container.getAllocatedResource());
if (Resources.fitsIn(rc, ZERO_RESOURCE, unallocated)) {
LOG.debug("Enough unallocated resources {}", unallocated);
break;
}
}
}
@Override
public Resource getNormalizedResource(Resource requestedResource,
Resource maxResourceCapability) {
return SchedulerUtils.getNormalizedResource(requestedResource,
getResourceCalculator(),
getMinimumResourceCapability(),
maxResourceCapability,
getMinimumResourceCapability());
}
/**
* Normalize a list of resource requests.
*
* @param asks resource requests
*/
protected void normalizeResourceRequests(List<ResourceRequest> asks) {
normalizeResourceRequests(asks, null);
}
/**
* Normalize a list of resource requests
* using queue maximum resource allocations.
* @param asks resource requests
* @param queueName queue Name.
*/
protected void normalizeResourceRequests(List<ResourceRequest> asks,
String queueName) {
Resource maxAllocation = getMaximumResourceCapability(queueName);
for (ResourceRequest ask : asks) {
ask.setCapability(
getNormalizedResource(ask.getCapability(), maxAllocation));
}
}
protected void handleContainerUpdates(
SchedulerApplicationAttempt appAttempt, ContainerUpdates updates) {
List<UpdateContainerRequest> promotionRequests =
updates.getPromotionRequests();
if (promotionRequests != null && !promotionRequests.isEmpty()) {
LOG.info("Promotion Update requests : " + promotionRequests);
// Promotion is technically an increase request from
// 0 resources to target resources.
handleIncreaseRequests(appAttempt, promotionRequests);
}
List<UpdateContainerRequest> increaseRequests =
updates.getIncreaseRequests();
if (increaseRequests != null && !increaseRequests.isEmpty()) {
LOG.info("Resource increase requests : " + increaseRequests);
handleIncreaseRequests(appAttempt, increaseRequests);
}
List<UpdateContainerRequest> demotionRequests =
updates.getDemotionRequests();
if (demotionRequests != null && !demotionRequests.isEmpty()) {
LOG.info("Demotion Update requests : " + demotionRequests);
// Demotion is technically a decrease request from initial
// to 0 resources
handleDecreaseRequests(appAttempt, demotionRequests);
}
List<UpdateContainerRequest> decreaseRequests =
updates.getDecreaseRequests();
if (decreaseRequests != null && !decreaseRequests.isEmpty()) {
LOG.info("Resource decrease requests : " + decreaseRequests);
handleDecreaseRequests(appAttempt, decreaseRequests);
}
}
private void handleIncreaseRequests(
SchedulerApplicationAttempt applicationAttempt,
List<UpdateContainerRequest> updateContainerRequests) {
for (UpdateContainerRequest uReq : updateContainerRequests) {
RMContainer rmContainer =
rmContext.getScheduler().getRMContainer(uReq.getContainerId());
// Check if this is a container update
// And not in the middle of a Demotion
if (rmContainer != null) {
// Check if this is an executionType change request
// If so, fix the rr to make it look like a normal rr
// with relaxLocality=false and numContainers=1
SchedulerNode schedulerNode = rmContext.getScheduler()
.getSchedulerNode(rmContainer.getContainer().getNodeId());
// Add only if no outstanding promote requests exist.
if (!applicationAttempt.getUpdateContext()
.checkAndAddToOutstandingIncreases(
rmContainer, schedulerNode, uReq)) {
applicationAttempt.addToUpdateContainerErrors(
UpdateContainerError.newInstance(
RMServerUtils.UPDATE_OUTSTANDING_ERROR, uReq));
}
} else {
LOG.warn("Cannot promote non-existent (or completed) Container ["
+ uReq.getContainerId() + "]");
}
}
}
private void handleDecreaseRequests(SchedulerApplicationAttempt appAttempt,
List<UpdateContainerRequest> demotionRequests) {
OpportunisticContainerContext oppCntxt =
appAttempt.getOpportunisticContainerContext();
for (UpdateContainerRequest uReq : demotionRequests) {
RMContainer rmContainer =
rmContext.getScheduler().getRMContainer(uReq.getContainerId());
if (rmContainer != null) {
SchedulerNode schedulerNode = rmContext.getScheduler()
.getSchedulerNode(rmContainer.getContainer().getNodeId());
if (appAttempt.getUpdateContext()
.checkAndAddToOutstandingDecreases(uReq, schedulerNode,
rmContainer.getContainer())) {
if (ContainerUpdateType.DEMOTE_EXECUTION_TYPE ==
uReq.getContainerUpdateType()) {
RMContainer demotedRMContainer =
createDemotedRMContainer(appAttempt, oppCntxt, rmContainer);
if (demotedRMContainer != null) {
OpportunisticSchedulerMetrics.getMetrics()
.incrAllocatedOppContainers(1);
appAttempt.addToNewlyDemotedContainers(
uReq.getContainerId(), demotedRMContainer);
}
} else {
RMContainer demotedRMContainer = createDecreasedRMContainer(
appAttempt, uReq, rmContainer);
appAttempt.addToNewlyDecreasedContainers(
uReq.getContainerId(), demotedRMContainer);
}
} else {
appAttempt.addToUpdateContainerErrors(
UpdateContainerError.newInstance(
RMServerUtils.UPDATE_OUTSTANDING_ERROR, uReq));
}
} else {
LOG.warn("Cannot demote/decrease non-existent (or completed) " +
"Container [" + uReq.getContainerId() + "]");
}
}
}
private RMContainer createDecreasedRMContainer(
SchedulerApplicationAttempt appAttempt, UpdateContainerRequest uReq,
RMContainer rmContainer) {
SchedulerRequestKey sk =
SchedulerRequestKey.extractFrom(rmContainer.getContainer());
Container decreasedContainer = BuilderUtils.newContainer(
ContainerId.newContainerId(appAttempt.getApplicationAttemptId(),
appAttempt.getNewContainerId()),
rmContainer.getContainer().getNodeId(),
rmContainer.getContainer().getNodeHttpAddress(),
Resources.none(),
sk.getPriority(), null, rmContainer.getExecutionType(),
sk.getAllocationRequestId());
decreasedContainer.setVersion(rmContainer.getContainer().getVersion());
RMContainer newRmContainer = new RMContainerImpl(decreasedContainer,
sk, appAttempt.getApplicationAttemptId(),
decreasedContainer.getNodeId(), appAttempt.getUser(), rmContext,
rmContainer.isRemotelyAllocated());
appAttempt.addRMContainer(decreasedContainer.getId(), rmContainer);
((AbstractYarnScheduler) rmContext.getScheduler()).getNode(
decreasedContainer.getNodeId()).allocateContainer(newRmContainer);
return newRmContainer;
}
private RMContainer createDemotedRMContainer(
SchedulerApplicationAttempt appAttempt,
OpportunisticContainerContext oppCntxt,
RMContainer rmContainer) {
SchedulerRequestKey sk =
SchedulerRequestKey.extractFrom(rmContainer.getContainer());
Container demotedContainer = BuilderUtils.newContainer(
ContainerId.newContainerId(appAttempt.getApplicationAttemptId(),
oppCntxt.getContainerIdGenerator().generateContainerId()),
rmContainer.getContainer().getNodeId(),
rmContainer.getContainer().getNodeHttpAddress(),
rmContainer.getContainer().getResource(),
sk.getPriority(), null, ExecutionType.OPPORTUNISTIC,
sk.getAllocationRequestId());
demotedContainer.setVersion(rmContainer.getContainer().getVersion());
return SchedulerUtils.createOpportunisticRmContainer(
rmContext, demotedContainer, false);
}
/**
* Rollback container update after expiry.
* @param containerId ContainerId.
*/
protected void rollbackContainerUpdate(
ContainerId containerId) {
RMContainer rmContainer = getRMContainer(containerId);
if (rmContainer == null) {
LOG.info("Cannot rollback resource for container " + containerId
+ ". The container does not exist.");
return;
}
T app = getCurrentAttemptForContainer(containerId);
if (getCurrentAttemptForContainer(containerId) == null) {
LOG.info("Cannot rollback resource for container " + containerId
+ ". The application that the container "
+ "belongs to does not exist.");
return;
}
if (Resources.fitsIn(rmContainer.getLastConfirmedResource(),
rmContainer.getContainer().getResource())) {
LOG.info("Roll back resource for container " + containerId);
handleDecreaseRequests(app, Arrays.asList(
UpdateContainerRequest.newInstance(
rmContainer.getContainer().getVersion(),
rmContainer.getContainerId(),
ContainerUpdateType.DECREASE_RESOURCE,
rmContainer.getLastConfirmedResource(), null)));
}
}
@Override
public List<NodeId> getNodeIds(String resourceName) {
return nodeTracker.getNodeIdsByResourceName(resourceName);
}
/**
* To be used to release a container via a Scheduler Event rather than
* in the same thread.
* @param container Container.
*/
public void asyncContainerRelease(RMContainer container) {
this.rmContext.getDispatcher().getEventHandler().handle(
new ReleaseContainerEvent(container));
}
/*
* Get a Resource object with for the minimum allocation possible.
*
* @return a Resource object with the minimum allocation for the scheduler
*/
public Resource getMinimumAllocation() {
Resource ret = ResourceUtils.getResourceTypesMinimumAllocation();
LOG.info("Minimum allocation = " + ret);
return ret;
}
/**
* Get a Resource object with for the maximum allocation possible.
*
* @return a Resource object with the maximum allocation for the scheduler
*/
public Resource getMaximumAllocation() {
Resource ret = ResourceUtils.getResourceTypesMaximumAllocation();
LOG.info("Maximum allocation = " + ret);
return ret;
}
@Override
public long checkAndGetApplicationLifetime(String queueName, long lifetime,
RMAppImpl app) {
// Lifetime is the application lifetime by default.
return lifetime;
}
@Override
public long getMaximumApplicationLifetime(String queueName) {
return -1;
}
/**
* Kill a RMContainer. This is meant to be called in tests only to simulate
* AM container failures.
* @param container the container to kill
*/
@VisibleForTesting
public abstract void killContainer(RMContainer container);
/**
* Update internal state of the scheduler. This can be useful for scheduler
* implementations that maintain some state that needs to be periodically
* updated; for example, metrics or queue resources. It will be called by the
* {@link UpdateThread} every {@link #updateInterval}. By default, it will
* not run; subclasses should set {@link #updateInterval} to a
* positive value during {@link #serviceInit(Configuration)} if they want to
* enable the thread.
*/
@VisibleForTesting
public void update() {
// do nothing by default
}
/**
* Thread which calls {@link #update()} every
* <code>updateInterval</code> milliseconds.
*/
private | AbstractYarnScheduler |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.