language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/tool/schema/extract/spi/ExtractionContext.java | {
"start": 2097,
"end": 2674
} | interface ____ {
@Nullable TableInformation locateTableInformation(QualifiedTableName tableName);
SequenceInformation locateSequenceInformation(QualifiedSequenceName sequenceName);
@Nullable PrimaryKeyInformation locatePrimaryKeyInformation(QualifiedTableName tableName);
Iterable<ForeignKeyInformation> locateForeignKeyInformation(QualifiedTableName tableName);
Iterable<IndexInformation> locateIndexesInformation(QualifiedTableName tableName);
boolean isCaching();
}
DatabaseObjectAccess getDatabaseObjectAccess();
void cleanup();
abstract | DatabaseObjectAccess |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/compliance/CriteriaNoPredicateTest.java | {
"start": 2902,
"end": 3533
} | class ____ {
@Id
private Integer id;
private String name;
private Integer age;
@OneToMany
private Collection<Account> accounts;
Person() {
}
public Person(Integer id, String name, Integer age, Collection<Account> accounts) {
this.id = id;
this.name = name;
this.age = age;
this.accounts = accounts;
}
public Integer getId() {
return id;
}
public String getName() {
return name;
}
public Integer getAge() {
return age;
}
public Collection<Account> getAccounts() {
return accounts;
}
}
@Entity(name = "Account")
@Table(name = "ACCOUNT_TABLE")
public static | Person |
java | quarkusio__quarkus | integration-tests/mongodb-rest-data-panache/src/main/java/io/quarkus/it/mongodb/rest/data/panache/BookRepository.java | {
"start": 193,
"end": 270
} | class ____ implements PanacheMongoRepositoryBase<Book, String> {
}
| BookRepository |
java | alibaba__druid | druid-demo-petclinic/src/main/java/org/springframework/samples/petclinic/owner/OwnerController.java | {
"start": 1625,
"end": 5378
} | class ____ {
private static final String VIEWS_OWNER_CREATE_OR_UPDATE_FORM = "owners/createOrUpdateOwnerForm";
private final OwnerRepository owners;
public OwnerController(OwnerRepository clinicService) {
this.owners = clinicService;
}
@InitBinder
public void setAllowedFields(WebDataBinder dataBinder) {
dataBinder.setDisallowedFields("id");
}
@ModelAttribute("owner")
public Owner findOwner(@PathVariable(name = "ownerId", required = false) Integer ownerId) {
return ownerId == null ? new Owner() : this.owners.findById(ownerId);
}
@GetMapping("/owners/new")
public String initCreationForm(Map<String, Object> model) {
Owner owner = new Owner();
model.put("owner", owner);
return VIEWS_OWNER_CREATE_OR_UPDATE_FORM;
}
@PostMapping("/owners/new")
public String processCreationForm(@Valid Owner owner, BindingResult result) {
if (result.hasErrors()) {
return VIEWS_OWNER_CREATE_OR_UPDATE_FORM;
}
else {
this.owners.save(owner);
return "redirect:/owners/" + owner.getId();
}
}
@GetMapping("/owners/find")
public String initFindForm(Map<String, Object> model) {
model.put("owner", new Owner());
return "owners/findOwners";
}
@GetMapping("/owners")
public String processFindForm(@RequestParam(defaultValue = "1") int page, Owner owner, BindingResult result,
Model model) {
// allow parameterless GET request for /owners to return all records
if (owner.getLastName() == null) {
owner.setLastName(""); // empty string signifies broadest possible search
}
// find owners by last name
Page<Owner> ownersResults = findPaginatedForOwnersLastName(page, owner.getLastName());
if (ownersResults.isEmpty()) {
// no owners found
result.rejectValue("lastName", "notFound", "not found");
return "owners/findOwners";
}
else if (ownersResults.getTotalElements() == 1) {
// 1 owner found
owner = ownersResults.iterator().next();
return "redirect:/owners/" + owner.getId();
}
else {
// multiple owners found
return addPaginationModel(page, model, ownersResults);
}
}
private String addPaginationModel(int page, Model model, Page<Owner> paginated) {
model.addAttribute("listOwners", paginated);
List<Owner> listOwners = paginated.getContent();
model.addAttribute("currentPage", page);
model.addAttribute("totalPages", paginated.getTotalPages());
model.addAttribute("totalItems", paginated.getTotalElements());
model.addAttribute("listOwners", listOwners);
return "owners/ownersList";
}
private Page<Owner> findPaginatedForOwnersLastName(int page, String lastname) {
int pageSize = 5;
Pageable pageable = PageRequest.of(page - 1, pageSize);
return owners.findByLastName(lastname, pageable);
}
@GetMapping("/owners/{ownerId}/edit")
public String initUpdateOwnerForm(@PathVariable("ownerId") int ownerId, Model model) {
Owner owner = this.owners.findById(ownerId);
model.addAttribute(owner);
return VIEWS_OWNER_CREATE_OR_UPDATE_FORM;
}
@PostMapping("/owners/{ownerId}/edit")
public String processUpdateOwnerForm(@Valid Owner owner, BindingResult result,
@PathVariable("ownerId") int ownerId) {
if (result.hasErrors()) {
return VIEWS_OWNER_CREATE_OR_UPDATE_FORM;
}
else {
owner.setId(ownerId);
this.owners.save(owner);
return "redirect:/owners/{ownerId}";
}
}
/**
* Custom handler for displaying an owner.
* @param ownerId the ID of the owner to display
* @return a ModelMap with the model attributes for the view
*/
@GetMapping("/owners/{ownerId}")
public ModelAndView showOwner(@PathVariable("ownerId") int ownerId) {
ModelAndView mav = new ModelAndView("owners/ownerDetails");
Owner owner = this.owners.findById(ownerId);
mav.addObject(owner);
return mav;
}
}
| OwnerController |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/functions/StatefulSequenceSourceTest.java | {
"start": 1600,
"end": 6738
} | class ____ {
@Test
void testCheckpointRestore() throws Exception {
final int initElement = 0;
final int maxElement = 100;
final int maxParallelsim = 2;
final Set<Long> expectedOutput = new HashSet<>();
for (long i = initElement; i <= maxElement; i++) {
expectedOutput.add(i);
}
final ConcurrentHashMap<String, List<Long>> outputCollector = new ConcurrentHashMap<>();
final OneShotLatch latchToTrigger1 = new OneShotLatch();
final OneShotLatch latchToWait1 = new OneShotLatch();
final OneShotLatch latchToTrigger2 = new OneShotLatch();
final OneShotLatch latchToWait2 = new OneShotLatch();
final StatefulSequenceSource source1 = new StatefulSequenceSource(initElement, maxElement);
StreamSource<Long, StatefulSequenceSource> src1 = new StreamSource<>(source1);
final AbstractStreamOperatorTestHarness<Long> testHarness1 =
new AbstractStreamOperatorTestHarness<>(src1, maxParallelsim, 2, 0);
testHarness1.open();
final StatefulSequenceSource source2 = new StatefulSequenceSource(initElement, maxElement);
StreamSource<Long, StatefulSequenceSource> src2 = new StreamSource<>(source2);
final AbstractStreamOperatorTestHarness<Long> testHarness2 =
new AbstractStreamOperatorTestHarness<>(src2, maxParallelsim, 2, 1);
testHarness2.open();
// run the source asynchronously
CheckedThread runner1 =
new CheckedThread() {
@Override
public void go() throws Exception {
source1.run(
new BlockingSourceContext<>(
"1", latchToTrigger1, latchToWait1, outputCollector, 21));
}
};
// run the source asynchronously
CheckedThread runner2 =
new CheckedThread() {
@Override
public void go() throws Exception {
source2.run(
new BlockingSourceContext<>(
"2", latchToTrigger2, latchToWait2, outputCollector, 32));
}
};
runner1.start();
runner2.start();
if (!latchToTrigger1.isTriggered()) {
latchToTrigger1.await();
}
if (!latchToTrigger2.isTriggered()) {
latchToTrigger2.await();
}
OperatorSubtaskState snapshot =
AbstractStreamOperatorTestHarness.repackageState(
testHarness1.snapshot(0L, 0L), testHarness2.snapshot(0L, 0L));
final StatefulSequenceSource source3 = new StatefulSequenceSource(initElement, maxElement);
StreamSource<Long, StatefulSequenceSource> src3 = new StreamSource<>(source3);
final OperatorSubtaskState initState =
AbstractStreamOperatorTestHarness.repartitionOperatorState(
snapshot, maxParallelsim, 2, 1, 0);
final AbstractStreamOperatorTestHarness<Long> testHarness3 =
new AbstractStreamOperatorTestHarness<>(src3, maxParallelsim, 1, 0);
testHarness3.setup();
testHarness3.initializeState(initState);
testHarness3.open();
final OneShotLatch latchToTrigger3 = new OneShotLatch();
final OneShotLatch latchToWait3 = new OneShotLatch();
latchToWait3.trigger();
// run the source asynchronously
CheckedThread runner3 =
new CheckedThread() {
@Override
public void go() throws Exception {
source3.run(
new BlockingSourceContext<>(
"3", latchToTrigger3, latchToWait3, outputCollector, 3));
}
};
runner3.start();
runner3.sync();
assertThat(outputCollector).hasSize(3); // we have 3 tasks.
// test for at-most-once
Set<Long> dedupRes = new HashSet<>(Math.abs(maxElement - initElement) + 1);
for (Map.Entry<String, List<Long>> elementsPerTask : outputCollector.entrySet()) {
String key = elementsPerTask.getKey();
List<Long> elements = outputCollector.get(key);
// this tests the correctness of the latches in the test
assertThat(elements).isNotEmpty();
for (Long elem : elements) {
assertThat(dedupRes.add(elem)).as("Duplicate entry: " + elem).isTrue();
assertThat(expectedOutput.contains(elem))
.as("Unexpected element: " + elem)
.isTrue();
}
}
// test for exactly-once
assertThat(dedupRes).hasSize(Math.abs(initElement - maxElement) + 1);
latchToWait1.trigger();
latchToWait2.trigger();
// wait for everybody ot finish.
runner1.sync();
runner2.sync();
}
}
| StatefulSequenceSourceTest |
java | spring-projects__spring-security | access/src/main/java/org/springframework/security/access/prepost/PrePostInvocationAttributeFactory.java | {
"start": 1191,
"end": 1575
} | interface ____ extends AopInfrastructureBean {
PreInvocationAttribute createPreInvocationAttribute(@Nullable String preFilterAttribute,
@Nullable String filterObject, @Nullable String preAuthorizeAttribute);
PostInvocationAttribute createPostInvocationAttribute(@Nullable String postFilterAttribute,
@Nullable String postAuthorizeAttribute);
}
| PrePostInvocationAttributeFactory |
java | apache__dubbo | dubbo-config/dubbo-config-spring6/src/main/java/org/apache/dubbo/config/spring6/beans/factory/aot/ReferencedFieldValueResolver.java | {
"start": 2475,
"end": 8496
} | class ____ extends AutowiredElementResolver {
private final String fieldName;
private final boolean required;
@Nullable
private final String shortcut;
private ReferencedFieldValueResolver(String fieldName, boolean required, @Nullable String shortcut) {
Assert.hasText(fieldName, "'fieldName' must not be empty");
this.fieldName = fieldName;
this.required = required;
this.shortcut = shortcut;
}
/**
* Create a new {@link ReferencedFieldValueResolver} for the specified field
* where injection is optional.
*
* @param fieldName the field name
* @return a new {@link ReferencedFieldValueResolver} instance
*/
public static ReferencedFieldValueResolver forField(String fieldName) {
return new ReferencedFieldValueResolver(fieldName, false, null);
}
/**
* Create a new {@link ReferencedFieldValueResolver} for the specified field
* where injection is required.
*
* @param fieldName the field name
* @return a new {@link ReferencedFieldValueResolver} instance
*/
public static ReferencedFieldValueResolver forRequiredField(String fieldName) {
return new ReferencedFieldValueResolver(fieldName, true, null);
}
/**
* Return a new {@link ReferencedFieldValueResolver} instance that uses a
* direct bean name injection shortcut.
*
* @param beanName the bean name to use as a shortcut
* @return a new {@link ReferencedFieldValueResolver} instance that uses the
* shortcuts
*/
public ReferencedFieldValueResolver withShortcut(String beanName) {
return new ReferencedFieldValueResolver(this.fieldName, this.required, beanName);
}
/**
* Resolve the field for the specified registered bean and provide it to the
* given action.
*
* @param registeredBean the registered bean
* @param action the action to execute with the resolved field value
*/
public <T> void resolve(RegisteredBean registeredBean, ThrowingConsumer<T> action) {
Assert.notNull(registeredBean, "'registeredBean' must not be null");
Assert.notNull(action, "'action' must not be null");
T resolved = resolve(registeredBean);
if (resolved != null) {
action.accept(resolved);
}
}
/**
* Resolve the field value for the specified registered bean.
*
* @param registeredBean the registered bean
* @param requiredType the required type
* @return the resolved field value
*/
@Nullable
@SuppressWarnings("unchecked")
public <T> T resolve(RegisteredBean registeredBean, Class<T> requiredType) {
Object value = resolveObject(registeredBean);
Assert.isInstanceOf(requiredType, value);
return (T) value;
}
/**
* Resolve the field value for the specified registered bean.
*
* @param registeredBean the registered bean
* @return the resolved field value
*/
@Nullable
@SuppressWarnings("unchecked")
public <T> T resolve(RegisteredBean registeredBean) {
return (T) resolveObject(registeredBean);
}
/**
* Resolve the field value for the specified registered bean.
*
* @param registeredBean the registered bean
* @return the resolved field value
*/
@Nullable
public Object resolveObject(RegisteredBean registeredBean) {
Assert.notNull(registeredBean, "'registeredBean' must not be null");
return resolveValue(registeredBean, getField(registeredBean));
}
/**
* Resolve the field value for the specified registered bean and set it
* using reflection.
*
* @param registeredBean the registered bean
* @param instance the bean instance
*/
public void resolveAndSet(RegisteredBean registeredBean, Object instance) {
Assert.notNull(registeredBean, "'registeredBean' must not be null");
Assert.notNull(instance, "'instance' must not be null");
Field field = getField(registeredBean);
Object resolved = resolveValue(registeredBean, field);
if (resolved != null) {
ReflectionUtils.makeAccessible(field);
ReflectionUtils.setField(field, instance, resolved);
}
}
@Nullable
private Object resolveValue(RegisteredBean registeredBean, Field field) {
String beanName = registeredBean.getBeanName();
Class<?> beanClass = registeredBean.getBeanClass();
ConfigurableBeanFactory beanFactory = registeredBean.getBeanFactory();
DependencyDescriptor descriptor = new DependencyDescriptor(field, this.required);
descriptor.setContainingClass(beanClass);
if (this.shortcut != null) {
descriptor = new ShortcutDependencyDescriptor(descriptor, this.shortcut, field.getType());
}
Set<String> autowiredBeanNames = new LinkedHashSet<>(1);
TypeConverter typeConverter = beanFactory.getTypeConverter();
try {
Assert.isInstanceOf(AutowireCapableBeanFactory.class, beanFactory);
Object injectedObject = beanFactory.getBean(shortcut);
Object value = ((AutowireCapableBeanFactory) beanFactory)
.resolveDependency(descriptor, beanName, autowiredBeanNames, typeConverter);
registerDependentBeans(beanFactory, beanName, autowiredBeanNames);
return injectedObject;
} catch (BeansException ex) {
throw new UnsatisfiedDependencyException(null, beanName, new InjectionPoint(field), ex);
}
}
private Field getField(RegisteredBean registeredBean) {
Field field = ReflectionUtils.findField(registeredBean.getBeanClass(), this.fieldName);
Assert.notNull(
field,
() -> "No field '" + this.fieldName + "' found on "
+ registeredBean.getBeanClass().getName());
return field;
}
}
| ReferencedFieldValueResolver |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/assumptions/BDDAssumptionsTest.java | {
"start": 19063,
"end": 19522
} | class ____ {
@Test
void should_run_test_when_assumption_passes() {
thenCode(() -> givenCode(() -> { /* some code */ }).doesNotThrowAnyException()).doesNotThrowAnyException();
}
@Test
void should_ignore_test_when_assumption_fails() {
expectAssumptionNotMetException(() -> givenCode(() -> { /* some code */ }).hasMessage("Yoda time"));
}
}
@Nested
| BDDAssumptions_givenCode_no_exception_required_Test |
java | spring-projects__spring-framework | spring-aop/src/test/java/org/springframework/aop/support/AopUtilsTests.java | {
"start": 4950,
"end": 5024
} | interface ____ {
void handle(List<String> list);
}
static | ProxyInterface |
java | assertj__assertj-core | assertj-guava/src/main/java/org/assertj/guava/error/RangeSetShouldNotEnclose.java | {
"start": 799,
"end": 1495
} | class ____ extends BasicErrorMessageFactory {
public static ErrorMessageFactory shouldNotEnclose(Object actual, Object expected, Iterable<?> enclosed) {
return new RangeSetShouldNotEnclose(actual, expected, enclosed);
}
/**
* Creates a new <code>{@link BasicErrorMessageFactory}</code>.
*
* @param actual actual {@code RangeSet}.
* @param expected expected value.
* @param enclosed list of values that haven't to be enclosed, but they have.
*/
private RangeSetShouldNotEnclose(Object actual, Object expected, Object enclosed) {
super("%nExpecting:%n %s%nnot to enclose%n %s%nbut it encloses%n %s%n", actual, expected, enclosed);
}
}
| RangeSetShouldNotEnclose |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsInOutWithNoOutBodyTest.java | {
"start": 1528,
"end": 2979
} | class ____ extends AbstractJMSTest {
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension();
protected CamelContext context;
protected ProducerTemplate template;
protected ConsumerTemplate consumer;
@Test
public void testWithNoOutBodySet() {
String reply = template.requestBody("direct:start", "Foo", String.class);
assertEquals("Foo", reply);
}
@Override
protected String getComponentName() {
return "activemq";
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:start")
.to("log:before")
.to("activemq:JmsInOutWithNoOutBodyTest")
.to("log:after")
.to("mock:result");
from("activemq:JmsInOutWithNoOutBodyTest")
.to("log:receivedRequest");
}
};
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
@BeforeEach
void setUpRequirements() {
context = camelContextExtension.getContext();
template = camelContextExtension.getProducerTemplate();
consumer = camelContextExtension.getConsumerTemplate();
}
}
| JmsInOutWithNoOutBodyTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/builder/BuilderWithTypeParametersTest.java | {
"start": 2661,
"end": 4834
} | class ____<T> {
private List<T> data;
public Builder<T> withData(List<T> d) {
data = d;
return this;
}
public MyGenericPOJOWithCreator<T> build() {
return new MyGenericPOJOWithCreator<T>(data);
}
}
}
*/
@Test
public void testWithBuilderInferringBindings() throws Exception {
final ObjectMapper mapper = jsonMapperBuilder()
.enable(MapperFeature.INFER_BUILDER_TYPE_BINDINGS)
.build();
final String json = a2q("{ 'data': [ { 'x': 'x', 'y': 'y' } ] }");
final MyGenericPOJO<MyPOJO> deserialized =
mapper.readValue(json, new TypeReference<MyGenericPOJO<MyPOJO>>() {});
assertEquals(1, deserialized.data.size());
Object ob = deserialized.data.get(0);
assertNotNull(ob);
assertEquals(MyPOJO.class, ob.getClass());
}
@Test
public void testWithBuilderWithoutInferringBindings() throws Exception {
final ObjectMapper mapper = jsonMapperBuilder()
.disable(MapperFeature.INFER_BUILDER_TYPE_BINDINGS)
.build();
final String json = a2q("{ 'data': [ { 'x': 'x', 'y': 'y' } ] }");
final MyGenericPOJO<MyPOJO> deserialized =
mapper.readValue(json, new TypeReference<MyGenericPOJO<MyPOJO>>() {});
assertEquals(1, deserialized.data.size());
Object ob = deserialized.data.get(0);
assertNotNull(ob);
assertEquals(LinkedHashMap.class, ob.getClass());
}
// 05-Sep-2020, tatu: see above for reason why this cannot work
/*
@Test
public void testWithCreator() throws Exception {
final ObjectMapper mapper = new ObjectMapper();
final String json = a2q("{ 'data': [ { 'x': 'x', 'y': 'y' } ] }");
final MyGenericPOJOWithCreator<MyPOJO> deserialized =
mapper.readValue(json,
new TypeReference<MyGenericPOJOWithCreator<MyPOJO>>() {});
assertEquals(1, deserialized.data.size());
Object ob = deserialized.data.get(0);
assertNotNull(ob);
assertEquals(MyPOJO.class, ob.getClass());
}
*/
}
| Builder |
java | apache__kafka | tools/src/main/java/org/apache/kafka/tools/consumer/ConsoleShareConsumerOptions.java | {
"start": 9729,
"end": 18057
} | class ____ use for deserializing values.")
.withRequiredArg()
.describedAs("deserializer for values")
.ofType(String.class);
groupIdOpt = parser.accepts("group", "The share group id of the consumer.")
.withRequiredArg()
.describedAs("share group id")
.ofType(String.class);
enableSystestEventsLoggingOpt = parser.accepts("enable-systest-events",
"Log lifecycle events of the share consumer in addition to logging consumed messages. (This is specific for system tests.)");
try {
options = parser.parse(args);
} catch (OptionException oe) {
CommandLineUtils.printUsageAndExit(parser, oe.getMessage());
}
CommandLineUtils.maybePrintHelpOrVersion(this, "This tool helps to read data from Kafka topics using share groups and outputs it to standard output.");
checkRequiredArgs();
if (options.has(rejectOpt) && options.has(releaseOpt)) {
CommandLineUtils.printUsageAndExit(parser, "At most one of --reject and --release may be specified.");
}
if (options.has(consumerPropertyOpt) && options.has(commandPropertyOpt)) {
CommandLineUtils.printUsageAndExit(parser, "Options --consumer-property and --command-property cannot be specified together.");
}
if (options.has(consumerConfigOpt) && options.has(commandConfigOpt)) {
CommandLineUtils.printUsageAndExit(parser, "Options --consumer-config and --command-config cannot be specified together.");
}
if (options.has(consumerPropertyOpt)) {
System.out.println("Option --consumer-property is deprecated and will be removed in a future version. Use --command-property instead.");
commandPropertyOpt = consumerPropertyOpt;
}
if (options.has(consumerConfigOpt)) {
System.out.println("Option --consumer-config is deprecated and will be removed in a future version. Use --command-config instead.");
commandConfigOpt = consumerConfigOpt;
}
Properties consumerPropsFromFile = options.has(commandConfigOpt)
? Utils.loadProps(options.valueOf(commandConfigOpt))
: new Properties();
Properties extraConsumerProps = CommandLineUtils.parseKeyValueArgs(options.valuesOf(commandPropertyOpt));
Set<String> groupIdsProvided = checkShareGroup(consumerPropsFromFile, extraConsumerProps);
consumerProps = buildConsumerProps(consumerPropsFromFile, extraConsumerProps, groupIdsProvided);
formatter = buildFormatter();
}
private void checkRequiredArgs() {
if (!options.has(topicOpt)) {
CommandLineUtils.printUsageAndExit(parser, "--topic is a required argument");
}
CommandLineUtils.checkRequiredArgs(parser, options, bootstrapServerOpt);
}
private Set<String> checkShareGroup(Properties consumerPropsFromFile, Properties extraConsumerProps) {
// if the group id is provided in more than place (through different means) all values must be the same
Set<String> groupIdsProvided = new HashSet<>();
if (options.has(groupIdOpt)) {
groupIdsProvided.add(options.valueOf(groupIdOpt));
}
if (consumerPropsFromFile.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
groupIdsProvided.add((String) consumerPropsFromFile.get(ConsumerConfig.GROUP_ID_CONFIG));
}
if (extraConsumerProps.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
groupIdsProvided.add(extraConsumerProps.getProperty(ConsumerConfig.GROUP_ID_CONFIG));
}
// The default value for group.id is "console-share-consumer"
if (groupIdsProvided.isEmpty()) {
groupIdsProvided.add("console-share-consumer");
} else if (groupIdsProvided.size() > 1) {
CommandLineUtils.printUsageAndExit(parser, "The group ids provided in different places (directly using '--group', "
+ "via '--consumer-property', or via '--consumer-config') do not match. "
+ "Detected group ids: "
+ groupIdsProvided.stream().map(group -> "'" + group + "'").collect(Collectors.joining(", ")));
}
return groupIdsProvided;
}
private Properties buildConsumerProps(Properties consumerPropsFromFile, Properties extraConsumerProps, Set<String> groupIdsProvided) {
Properties consumerProps = new Properties();
consumerProps.putAll(consumerPropsFromFile);
consumerProps.putAll(extraConsumerProps);
consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer());
if (consumerProps.getProperty(ConsumerConfig.CLIENT_ID_CONFIG) == null) {
consumerProps.put(ConsumerConfig.CLIENT_ID_CONFIG, "console-share-consumer");
}
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupIdsProvided.iterator().next());
return consumerProps;
}
private MessageFormatter buildFormatter() {
MessageFormatter formatter = null;
try {
Class<?> messageFormatterClass = Class.forName(options.valueOf(messageFormatterOpt));
formatter = (MessageFormatter) messageFormatterClass.getDeclaredConstructor().newInstance();
if (options.has(messageFormatterArgOpt) && options.has(messageFormatterArgOptDeprecated)) {
CommandLineUtils.printUsageAndExit(parser, "Options --property and --formatter-property cannot be specified together.");
}
if (options.has(messageFormatterArgOptDeprecated)) {
System.out.println("Option --property is deprecated and will be removed in a future version. Use --formatter-property instead.");
messageFormatterArgOpt = messageFormatterArgOptDeprecated;
}
Properties formatterArgs = formatterArgs();
Map<String, String> formatterConfigs = new HashMap<>();
for (final String name : formatterArgs.stringPropertyNames()) {
formatterConfigs.put(name, formatterArgs.getProperty(name));
}
formatter.configure(formatterConfigs);
} catch (Exception e) {
CommandLineUtils.printUsageAndExit(parser, e.getMessage());
}
return formatter;
}
Properties consumerProps() {
return consumerProps;
}
boolean rejectMessageOnError() {
return options.has(rejectMessageOnErrorOpt);
}
AcknowledgeType acknowledgeType() {
if (options.has(rejectOpt)) {
return AcknowledgeType.REJECT;
} else if (options.has(releaseOpt)) {
return AcknowledgeType.RELEASE;
} else {
return AcknowledgeType.ACCEPT;
}
}
String topicArg() {
return options.valueOf(topicOpt);
}
int maxMessages() {
return options.has(maxMessagesOpt) ? options.valueOf(maxMessagesOpt) : -1;
}
int timeoutMs() {
return options.has(timeoutMsOpt) ? options.valueOf(timeoutMsOpt) : -1;
}
String bootstrapServer() {
return options.valueOf(bootstrapServerOpt);
}
Properties formatterArgs() throws IOException {
Properties formatterArgs = options.has(messageFormatterConfigOpt)
? Utils.loadProps(options.valueOf(messageFormatterConfigOpt))
: new Properties();
String keyDeserializer = options.valueOf(keyDeserializerOpt);
if (keyDeserializer != null && !keyDeserializer.isEmpty()) {
formatterArgs.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
}
String valueDeserializer = options.valueOf(valueDeserializerOpt);
if (valueDeserializer != null && !valueDeserializer.isEmpty()) {
formatterArgs.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
}
formatterArgs.putAll(CommandLineUtils.parseKeyValueArgs(options.valuesOf(messageFormatterArgOpt)));
return formatterArgs;
}
MessageFormatter formatter() {
return formatter;
}
boolean enableSystestEventsLogging() {
return options.has(enableSystestEventsLoggingOpt);
}
}
| to |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/LeaderElectionTest.java | {
"start": 6176,
"end": 8175
} | class ____ implements ServiceClass {
private TestingServer testingServer;
private CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper;
private DefaultLeaderElectionService leaderElectionService;
@Override
public void setup(FatalErrorHandler fatalErrorHandler) throws Exception {
try {
testingServer = ZooKeeperTestUtils.createAndStartZookeeperTestingServer();
} catch (Exception e) {
throw new RuntimeException("Could not start ZooKeeper testing cluster.", e);
}
final Configuration configuration = new Configuration();
configuration.set(
HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString());
configuration.set(HighAvailabilityOptions.HA_MODE, "zookeeper");
curatorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(configuration, fatalErrorHandler);
final LeaderElectionDriverFactory driverFactory =
new ZooKeeperLeaderElectionDriverFactory(
curatorFrameworkWrapper.asCuratorFramework());
leaderElectionService = new DefaultLeaderElectionService(driverFactory);
}
@Override
public void teardown() throws Exception {
if (leaderElectionService != null) {
leaderElectionService.close();
}
if (curatorFrameworkWrapper != null) {
curatorFrameworkWrapper.close();
curatorFrameworkWrapper = null;
}
if (testingServer != null) {
testingServer.close();
testingServer = null;
}
}
@Override
public LeaderElection createLeaderElection() {
return leaderElectionService.createLeaderElection("random-component-id");
}
}
private static final | ZooKeeperServiceClass |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/AbstractPropertyAccessorTests.java | {
"start": 65552,
"end": 66038
} | class ____ {
private String name;
private Address address;
private Person(String name, Address address) {
this.name = name;
this.address = address;
}
public Person() {
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
}
@SuppressWarnings("unused")
private static | Person |
java | bumptech__glide | samples/flickr/src/main/java/com/bumptech/glide/samples/flickr/api/Photo.java | {
"start": 268,
"end": 3172
} | class ____ implements Parcelable {
public static final Creator<Photo> CREATOR =
new Creator<Photo>() {
@Override
public Photo createFromParcel(Parcel parcel) {
return new Photo(parcel);
}
@Override
public Photo[] newArray(int i) {
return new Photo[i];
}
};
private final String id;
private final String owner;
private final String title;
private final String server;
private final String farm;
private final String secret;
private String partialUrl = null;
public Photo(JSONObject jsonPhoto) throws JSONException {
this.id = jsonPhoto.getString("id");
this.owner = jsonPhoto.getString("owner");
this.title = jsonPhoto.optString("title", "");
this.server = jsonPhoto.getString("server");
this.farm = jsonPhoto.getString("farm");
this.secret = jsonPhoto.getString("secret");
}
private Photo(Parcel in) {
id = in.readString();
owner = in.readString();
title = in.readString();
server = in.readString();
farm = in.readString();
secret = in.readString();
}
@Override
public void writeToParcel(Parcel parcel, int i) {
parcel.writeString(id);
parcel.writeString(owner);
parcel.writeString(title);
parcel.writeString(server);
parcel.writeString(farm);
parcel.writeString(secret);
}
public String getPartialUrl() {
if (partialUrl == null) {
partialUrl = Api.getCacheableUrl(this);
}
return partialUrl;
}
public String getId() {
return id;
}
public String getTitle() {
return title;
}
public String getServer() {
return server;
}
public String getFarm() {
return farm;
}
public String getSecret() {
return secret;
}
@Override
public String toString() {
return getPartialUrl();
}
@SuppressWarnings({"PMD.SimplifyBooleanReturns", "RedundantIfStatement"})
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Photo photo = (Photo) o;
if (!farm.equals(photo.farm)) {
return false;
}
if (!id.equals(photo.id)) {
return false;
}
if (!owner.equals(photo.owner)) {
return false;
}
if (!secret.equals(photo.secret)) {
return false;
}
if (!server.equals(photo.server)) {
return false;
}
if (!title.equals(photo.title)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = id.hashCode();
result = 31 * result + owner.hashCode();
result = 31 * result + title.hashCode();
result = 31 * result + server.hashCode();
result = 31 * result + farm.hashCode();
result = 31 * result + secret.hashCode();
return result;
}
@Override
public int describeContents() {
return 0;
}
}
| Photo |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java | {
"start": 3312,
"end": 26382
} | class ____<R extends AbstractResponse> implements RequestManager {
protected final Logger logger;
/**
* Max time allowed between invocations of poll, defined in the {@link ConsumerConfig#MAX_POLL_INTERVAL_MS_CONFIG} config.
* This is sent to the coordinator in the first heartbeat to join a group, to be used as rebalance timeout.
* Also, the consumer will proactively rejoin the group on a call to poll if this time has expired.
*/
protected final int maxPollIntervalMs;
/**
* CoordinatorRequestManager manages the connection to the group coordinator
*/
protected final CoordinatorRequestManager coordinatorRequestManager;
/**
* HeartbeatRequestState manages heartbeat request timing and retries
*/
private final HeartbeatRequestState heartbeatRequestState;
/**
* ErrorEventHandler allows the background thread to propagate errors back to the user
*/
private final BackgroundEventHandler backgroundEventHandler;
/**
* Timer for tracking the time since the last consumer poll. If the timer expires, the consumer will stop
* sending heartbeat until the next poll.
*/
private final Timer pollTimer;
/**
* Holding the heartbeat sensor to measure heartbeat timing and response latency
*/
private final HeartbeatMetricsManager metricsManager;
public static final String CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG = "The cluster does not support the new CONSUMER " +
"group protocol. Set group.protocol=classic on the consumer configs to revert to the CLASSIC protocol " +
"until the cluster is upgraded.";
AbstractHeartbeatRequestManager(
final LogContext logContext,
final Time time,
final ConsumerConfig config,
final CoordinatorRequestManager coordinatorRequestManager,
final BackgroundEventHandler backgroundEventHandler,
final HeartbeatMetricsManager metricsManager) {
this.coordinatorRequestManager = coordinatorRequestManager;
this.logger = logContext.logger(getClass());
this.backgroundEventHandler = backgroundEventHandler;
this.maxPollIntervalMs = config.getInt(CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG);
long retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG);
long retryBackoffMaxMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG);
this.heartbeatRequestState = new HeartbeatRequestState(logContext, time, 0, retryBackoffMs,
retryBackoffMaxMs, RETRY_BACKOFF_JITTER);
this.pollTimer = time.timer(maxPollIntervalMs);
this.metricsManager = metricsManager;
}
AbstractHeartbeatRequestManager(
final LogContext logContext,
final Timer timer,
final ConsumerConfig config,
final CoordinatorRequestManager coordinatorRequestManager,
final HeartbeatRequestState heartbeatRequestState,
final BackgroundEventHandler backgroundEventHandler,
final HeartbeatMetricsManager metricsManager) {
this.logger = logContext.logger(this.getClass());
this.maxPollIntervalMs = config.getInt(CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG);
this.coordinatorRequestManager = coordinatorRequestManager;
this.heartbeatRequestState = heartbeatRequestState;
this.backgroundEventHandler = backgroundEventHandler;
this.pollTimer = timer;
this.metricsManager = metricsManager;
}
/**
* This will build a heartbeat request if one must be sent, determined based on the member
* state. A heartbeat is sent in the following situations:
* <ol>
* <li>Member is part of the consumer group or wants to join it.</li>
* <li>The heartbeat interval has expired, or the member is in a state that indicates
* that it should heartbeat without waiting for the interval.</li>
* </ol>
* This will also determine the maximum wait time until the next poll based on the member's
* state.
* <ol>
* <li>If the member is without a coordinator or is in a failed state, the timer is set
* to Long.MAX_VALUE, as there's no need to send a heartbeat.</li>
* <li>If the member cannot send a heartbeat due to either exponential backoff, it will
* return the remaining time left on the backoff timer.</li>
* <li>If the member's heartbeat timer has not expired, It will return the remaining time
* left on the heartbeat timer.</li>
* <li>If the member can send a heartbeat, the timer is set to the current heartbeat interval.</li>
* </ol>
*
* @return {@link PollResult} that includes a heartbeat request if one must be sent, and the
* time to wait until the next poll.
*/
@Override
public NetworkClientDelegate.PollResult poll(long currentTimeMs) {
if (coordinatorRequestManager.coordinator().isEmpty() || membershipManager().shouldSkipHeartbeat()) {
membershipManager().onHeartbeatRequestSkipped();
maybePropagateCoordinatorFatalErrorEvent();
return NetworkClientDelegate.PollResult.EMPTY;
}
pollTimer.update(currentTimeMs);
if (pollTimer.isExpired() && !membershipManager().isLeavingGroup()) {
logger.warn("Consumer poll timeout has expired. This means the time between " +
"subsequent calls to poll() was longer than the configured max.poll.interval.ms, " +
"which typically implies that the poll loop is spending too much time processing " +
"messages. You can address this either by increasing max.poll.interval.ms or by " +
"reducing the maximum size of batches returned in poll() with max.poll.records.");
membershipManager().transitionToSendingLeaveGroup(true);
NetworkClientDelegate.UnsentRequest leaveHeartbeat = makeHeartbeatRequest(currentTimeMs, true);
// We can ignore the leave response because we can join before or after receiving the response.
heartbeatRequestState.reset();
resetHeartbeatState();
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs(), Collections.singletonList(leaveHeartbeat));
}
// Case 1: The member state is LEAVING - if the member is a share consumer, we should immediately send leave;
// if the member is an async consumer, this will also depend on leavingGroupOperation.
boolean heartbeatNow = shouldSendLeaveHeartbeatNow() ||
// Case 2: The member state indicates it should send a heartbeat without waiting for the interval,
// and there is no heartbeat request currently in-flight
(membershipManager().shouldHeartbeatNow() && !heartbeatRequestState.requestInFlight());
if (!heartbeatRequestState.canSendRequest(currentTimeMs) && !heartbeatNow) {
return new NetworkClientDelegate.PollResult(heartbeatRequestState.timeToNextHeartbeatMs(currentTimeMs));
}
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, false);
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs(), Collections.singletonList(request));
}
/**
* Returns the {@link AbstractMembershipManager} that this request manager is using to track the state of the group.
* This is provided so that the {@link ApplicationEventProcessor} can access the state for querying or updating.
*/
public abstract AbstractMembershipManager<R> membershipManager();
/**
* @return the member should send leave heartbeat immediately or not
*/
protected abstract boolean shouldSendLeaveHeartbeatNow();
/**
* Generate a heartbeat request to leave the group if the state is still LEAVING when this is
* called to close the consumer.
* <p/>
* Note that when closing the consumer, even though an event to Unsubscribe is generated
* (triggers callbacks and sends leave group), it could be the case that the Unsubscribe event
* processing does not complete in time and moves on to close the managers (ex. calls to
* close with zero timeout). So we could end up on this pollOnClose with the member in
* {@link MemberState#PREPARE_LEAVING} (ex. app thread did not have the time to process the
* event to execute callbacks), or {@link MemberState#LEAVING} (ex. the leave request could
* not be sent due to coordinator not available at that time). In all cases, the pollOnClose
* will be triggered right before sending the final requests, so we ensure that we generate
* the request to leave if needed.
*
* @param currentTimeMs The current system time in milliseconds at which the method was called
* @return PollResult containing the request to send
*/
@Override
public PollResult pollOnClose(long currentTimeMs) {
if (membershipManager().isLeavingGroup()) {
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, true);
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs(), Collections.singletonList(request));
}
return EMPTY;
}
/**
* Returns the delay for which the application thread can safely wait before it should be responsive
* to results from the request managers. For example, the subscription state can change when heartbeats
* are sent, so blocking for longer than the heartbeat interval might mean the application thread is not
* responsive to changes.
*
* <p>Similarly, we may have to unblock the application thread to send a {@link AsyncPollEvent} to make sure
* our poll timer will not expire while we are polling.
*
* <p>In the event that heartbeats are currently being skipped, this still returns the next heartbeat
* delay rather than {@code Long.MAX_VALUE} so that the application thread remains responsive.
*/
@Override
public long maximumTimeToWait(long currentTimeMs) {
pollTimer.update(currentTimeMs);
if (pollTimer.isExpired() || (membershipManager().shouldHeartbeatNow() && !heartbeatRequestState.requestInFlight())) {
return 0L;
}
return Math.min(pollTimer.remainingMs() / 2, heartbeatRequestState.timeToNextHeartbeatMs(currentTimeMs));
}
/**
* Reset the poll timer, indicating that the user has called consumer.poll(). If the member
* is in {@link MemberState#STALE} state due to expired poll timer, this will transition the
* member to {@link MemberState#JOINING}, so that it rejoins the group.
*/
public void resetPollTimer(final long pollMs) {
pollTimer.update(pollMs);
if (pollTimer.isExpired()) {
logger.warn("Time between subsequent calls to poll() was longer than the configured " +
"max.poll.interval.ms, exceeded approximately by {} ms. Member {} will rejoin the group now.",
pollTimer.isExpiredBy(), membershipManager().memberId());
membershipManager().maybeRejoinStaleMember();
}
pollTimer.reset(maxPollIntervalMs);
}
private void maybePropagateCoordinatorFatalErrorEvent() {
coordinatorRequestManager.getAndClearFatalError()
.ifPresent(fatalError -> backgroundEventHandler.add(new ErrorEvent(fatalError)));
}
private NetworkClientDelegate.UnsentRequest makeHeartbeatRequest(final long currentTimeMs, final boolean ignoreResponse) {
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(ignoreResponse);
heartbeatRequestState.onSendAttempt(currentTimeMs);
membershipManager().onHeartbeatRequestGenerated();
metricsManager.recordHeartbeatSentMs(currentTimeMs);
heartbeatRequestState.resetTimer();
return request;
}
@SuppressWarnings("unchecked")
private NetworkClientDelegate.UnsentRequest makeHeartbeatRequest(final boolean ignoreResponse) {
NetworkClientDelegate.UnsentRequest request = buildHeartbeatRequest();
if (ignoreResponse)
return logResponse(request);
else
return request.whenComplete((response, exception) -> {
long completionTimeMs = request.handler().completionTimeMs();
if (response != null) {
metricsManager.recordRequestLatency(response.requestLatencyMs());
onResponse((R) response.responseBody(), completionTimeMs);
} else {
onFailure(exception, completionTimeMs);
}
});
}
@SuppressWarnings("unchecked")
private NetworkClientDelegate.UnsentRequest logResponse(final NetworkClientDelegate.UnsentRequest request) {
return request.whenComplete((response, exception) -> {
if (response != null) {
metricsManager.recordRequestLatency(response.requestLatencyMs());
Errors error = errorForResponse((R) response.responseBody());
if (error == Errors.NONE)
logger.debug("{} responded successfully: {}", heartbeatRequestName(), response);
else
logger.error("{} failed because of {}: {}", heartbeatRequestName(), error, response);
} else {
logger.error("{} failed because of unexpected exception.", heartbeatRequestName(), exception);
}
});
}
private void onFailure(final Throwable exception, final long responseTimeMs) {
this.heartbeatRequestState.onFailedAttempt(responseTimeMs);
resetHeartbeatState();
if (exception instanceof RetriableException) {
coordinatorRequestManager.handleCoordinatorDisconnect(exception, responseTimeMs);
String message = String.format("%s failed because of the retriable exception. Will retry in %s ms: %s",
heartbeatRequestName(),
heartbeatRequestState.remainingBackoffMs(responseTimeMs),
exception.getMessage());
logger.debug(message);
} else if (!handleSpecificFailure(exception)) {
logger.error("{} failed due to fatal error: {}", heartbeatRequestName(), exception.getMessage());
handleFatalFailure(exception);
}
// Notify the group manager about the failure after all errors have been handled and propagated.
membershipManager().onHeartbeatFailure(exception instanceof RetriableException);
}
private void onResponse(final R response, final long currentTimeMs) {
if (errorForResponse(response) == Errors.NONE) {
heartbeatRequestState.updateHeartbeatIntervalMs(heartbeatIntervalForResponse(response));
heartbeatRequestState.onSuccessfulAttempt(currentTimeMs);
membershipManager().onHeartbeatSuccess(response);
return;
}
onErrorResponse(response, currentTimeMs);
}
private void onErrorResponse(final R response, final long currentTimeMs) {
Errors error = errorForResponse(response);
String errorMessage = errorMessageForResponse(response);
String message;
resetHeartbeatState();
this.heartbeatRequestState.onFailedAttempt(currentTimeMs);
switch (error) {
case NOT_COORDINATOR:
// the manager should retry immediately when the coordinator node becomes available again
message = String.format("%s failed because the group coordinator %s is incorrect. " +
"Will attempt to find the coordinator again and retry",
heartbeatRequestName(), coordinatorRequestManager.coordinator());
logInfo(message, response, currentTimeMs);
coordinatorRequestManager.markCoordinatorUnknown(errorMessage, currentTimeMs);
// Skip backoff so that the next HB is sent as soon as the new coordinator is discovered
heartbeatRequestState.reset();
break;
case COORDINATOR_NOT_AVAILABLE:
message = String.format("%s failed because the group coordinator %s is not available. " +
"Will attempt to find the coordinator again and retry",
heartbeatRequestName(), coordinatorRequestManager.coordinator());
logInfo(message, response, currentTimeMs);
coordinatorRequestManager.markCoordinatorUnknown(errorMessage, currentTimeMs);
// Skip backoff so that the next HB is sent as soon as the new coordinator is discovered
heartbeatRequestState.reset();
break;
case COORDINATOR_LOAD_IN_PROGRESS:
// the manager will backoff and retry
message = String.format("%s failed because the group coordinator %s is still loading. Will retry",
heartbeatRequestName(), coordinatorRequestManager.coordinator());
logInfo(message, response, currentTimeMs);
break;
case GROUP_AUTHORIZATION_FAILED:
GroupAuthorizationException exception =
GroupAuthorizationException.forGroupId(membershipManager().groupId());
logger.error("{} failed due to group authorization failure: {}",
heartbeatRequestName(), exception.getMessage());
handleFatalFailure(error.exception(exception.getMessage()));
break;
case TOPIC_AUTHORIZATION_FAILED:
logger.error("{} failed for member {} with state {} due to {}: {}", heartbeatRequestName(),
membershipManager().memberId, membershipManager().state, error, errorMessage);
// Propagate auth error received in HB so that it's returned on poll.
// Member should stay in its current state so it can recover if ever the missing ACLs are added.
backgroundEventHandler.add(new ErrorEvent(error.exception()));
break;
case INVALID_REQUEST:
case GROUP_MAX_SIZE_REACHED:
case UNSUPPORTED_ASSIGNOR:
logger.error("{} failed due to {}: {}", heartbeatRequestName(), error, errorMessage);
handleFatalFailure(error.exception(errorMessage));
break;
case FENCED_MEMBER_EPOCH:
message = String.format("%s failed for member %s because epoch %s is fenced.",
heartbeatRequestName(), membershipManager().memberId(), membershipManager().memberEpoch());
logInfo(message, response, currentTimeMs);
membershipManager().transitionToFenced();
// Skip backoff so that a next HB to rejoin is sent as soon as the fenced member releases its assignment
heartbeatRequestState.reset();
break;
case UNKNOWN_MEMBER_ID:
message = String.format("%s failed because member %s is unknown.",
heartbeatRequestName(), membershipManager().memberId());
logInfo(message, response, currentTimeMs);
membershipManager().transitionToFenced();
// Skip backoff so that a next HB to rejoin is sent as soon as the fenced member releases its assignment
heartbeatRequestState.reset();
break;
case INVALID_REGULAR_EXPRESSION:
logger.error("{} failed due to {}: {}", heartbeatRequestName(), error, errorMessage);
handleFatalFailure(error.exception("Invalid RE2J SubscriptionPattern provided in the call to " +
"subscribe. " + errorMessage));
break;
default:
if (!handleSpecificExceptionInResponse(response, currentTimeMs)) {
// If the manager receives an unknown error - there could be a bug in the code or a new error code
logger.error("{} failed due to unexpected error {}: {}", heartbeatRequestName(), error, errorMessage);
handleFatalFailure(error.exception(errorMessage));
}
break;
}
// Notify the group manager about the failure after all errors have been handled and propagated.
membershipManager().onHeartbeatFailure(false);
}
protected void logInfo(final String message, final R response, final long currentTimeMs) {
logger.info("{} in {}ms: {}",
message,
heartbeatRequestState.remainingBackoffMs(currentTimeMs),
errorMessageForResponse(response));
}
protected void handleFatalFailure(Throwable error) {
backgroundEventHandler.add(new ErrorEvent(error));
membershipManager().transitionToFatal();
}
/**
* Error handling specific failure to a group type when sending the request
* and no response has been received.
*
* @param exception The exception thrown building the request
* @return true if the error was handled, else false
*/
public boolean handleSpecificFailure(Throwable exception) {
return false;
}
/**
* Error handling specific response exception to a group type.
*
* @param response The heartbeat response
* @param currentTimeMs Current time
* @return true if the error was handled, else false
*/
public boolean handleSpecificExceptionInResponse(final R response, final long currentTimeMs) {
return false;
}
/**
* Resets the heartbeat state.
*/
public abstract void resetHeartbeatState();
/**
* Builds a heartbeat request using the heartbeat state to follow the protocol faithfully.
*
* @return The heartbeat request
*/
public abstract NetworkClientDelegate.UnsentRequest buildHeartbeatRequest();
/**
* Returns the heartbeat RPC request name to be used for logging.
*
* @return The heartbeat RPC request name
*/
public abstract String heartbeatRequestName();
/**
* Returns the error for the response.
*
* @param response The heartbeat response
* @return The error {@link Errors}
*/
public abstract Errors errorForResponse(R response);
/**
* Returns the error message for the response.
*
* @param response The heartbeat response
* @return The error message
*/
public abstract String errorMessageForResponse(R response);
/**
* Returns the heartbeat interval for the response.
*
* @param response The heartbeat response
* @return The heartbeat interval
*/
public abstract long heartbeatIntervalForResponse(R response);
}
| AbstractHeartbeatRequestManager |
java | hibernate__hibernate-orm | hibernate-spatial/src/main/java/org/hibernate/spatial/contributor/SpatialFunctionContributor.java | {
"start": 254,
"end": 759
} | class ____ implements FunctionContributor {
@Override
public void contributeFunctions(FunctionContributions functionContributions) {
final ContributorImplementor contributorImplementor = ContributorResolver.resolveSpatialtypeContributorImplementor(
functionContributions.getServiceRegistry()
);
if ( contributorImplementor != null ) {
contributorImplementor.contributeFunctions( functionContributions );
}
}
@Override
public int ordinal() {
return 200;
}
}
| SpatialFunctionContributor |
java | quarkusio__quarkus | extensions/funqy/funqy-amazon-lambda/runtime/src/main/java/io/quarkus/funqy/lambda/model/cloudevents/CloudEventDataV1.java | {
"start": 141,
"end": 589
} | class ____ implements CloudEventData {
private final byte[] data;
public CloudEventDataV1(final String data) {
if (data == null) {
this.data = null;
} else {
this.data = data.getBytes(StandardCharsets.UTF_8);
}
}
public CloudEventDataV1(final byte[] data) {
this.data = data;
}
@Override
public byte[] toBytes() {
return this.data;
}
}
| CloudEventDataV1 |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java | {
"start": 6718,
"end": 22264
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(FederationStateStoreFacade.class);
private static volatile FederationStateStoreFacade facade;
private static Random rand = new Random(System.currentTimeMillis());
private FederationStateStore stateStore;
private Configuration conf;
private SubClusterResolver subclusterResolver;
private FederationCache federationCache;
private FederationStateStoreFacade(Configuration conf) {
initializeFacadeInternal(conf);
}
private void initializeFacadeInternal(Configuration config) {
this.conf = config;
try {
this.stateStore = (FederationStateStore) createRetryInstance(this.conf,
YarnConfiguration.FEDERATION_STATESTORE_CLIENT_CLASS,
YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS,
FederationStateStore.class, createRetryPolicy(conf));
this.stateStore.init(conf);
this.subclusterResolver = createInstance(conf,
YarnConfiguration.FEDERATION_CLUSTER_RESOLVER_CLASS,
YarnConfiguration.DEFAULT_FEDERATION_CLUSTER_RESOLVER_CLASS,
SubClusterResolver.class);
this.subclusterResolver.load();
// We check the configuration of Cache,
// if the configuration is null, set it to FederationJCache
this.federationCache = createInstance(conf,
YarnConfiguration.FEDERATION_FACADE_CACHE_CLASS,
YarnConfiguration.DEFAULT_FEDERATION_FACADE_CACHE_CLASS,
FederationCache.class);
this.federationCache.initCache(config, stateStore);
} catch (YarnException ex) {
LOG.error("Failed to initialize the FederationStateStoreFacade object", ex);
throw new RuntimeException(ex);
}
}
/**
* Delete and re-initialize the cache, to force it to use the given
* configuration.
*
* @param store the {@link FederationStateStore} instance to reinitialize with
* @param config the updated configuration to reinitialize with
*/
@VisibleForTesting
public synchronized void reinitialize(FederationStateStore store,
Configuration config) {
this.conf = config;
this.stateStore = store;
federationCache.clearCache();
federationCache.initCache(config, stateStore);
}
/**
* Create a RetryPolicy for {@code FederationStateStoreFacade}. In case of
* failure, it retries for:
* <ul>
* <li>{@code FederationStateStoreRetriableException}</li>
* <li>{@code CacheLoaderException}</li>
* </ul>
*
* @param conf the updated configuration
* @return the RetryPolicy for FederationStateStoreFacade
*/
public static RetryPolicy createRetryPolicy(Configuration conf) {
// Retry settings for StateStore
RetryPolicy basePolicy = RetryPolicies.exponentialBackoffRetry(
conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, Integer.SIZE),
conf.getLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS,
YarnConfiguration.DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS),
TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<>();
exceptionToPolicyMap.put(FederationStateStoreRetriableException.class,
basePolicy);
exceptionToPolicyMap.put(CacheLoaderException.class, basePolicy);
exceptionToPolicyMap.put(PoolInitializationException.class, basePolicy);
RetryPolicy retryPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
return retryPolicy;
}
/**
* Returns the singleton instance of the FederationStateStoreFacade object.
*
* @return the singleton {@link FederationStateStoreFacade} instance
*/
public static FederationStateStoreFacade getInstance() {
return getInstanceInternal(new Configuration());
}
/**
* Returns the singleton instance of the FederationStateStoreFacade object.
*
* @param conf configuration.
* @return the singleton {@link FederationStateStoreFacade} instance
*/
public static FederationStateStoreFacade getInstance(Configuration conf) {
return getInstanceInternal(conf);
}
/**
* Returns the singleton instance of the FederationStateStoreFacade object.
*
* @param conf configuration.
* @return the singleton {@link FederationStateStoreFacade} instance
*/
private static FederationStateStoreFacade getInstanceInternal(Configuration conf){
if (facade != null) {
return facade;
}
generateStateStoreFacade(conf);
return facade;
}
/**
* Generate the singleton instance of the FederationStateStoreFacade object.
*
* @param conf configuration.
*/
private static void generateStateStoreFacade(Configuration conf){
if (facade == null) {
synchronized (FederationStateStoreFacade.class) {
if (facade == null) {
Configuration yarnConf = new Configuration();
if (conf != null) {
yarnConf = conf;
}
facade = new FederationStateStoreFacade(yarnConf);
}
}
}
}
/**
* Returns the {@link SubClusterInfo} for the specified {@link SubClusterId}.
*
* @param subClusterId the identifier of the sub-cluster
* @return the sub cluster information, or
* {@code null} if there is no mapping for the subClusterId
* @throws YarnException if the call to the state store is unsuccessful
*/
public SubClusterInfo getSubCluster(final SubClusterId subClusterId)
throws YarnException {
if (federationCache.isCachingEnabled()) {
return getSubClusters(false).get(subClusterId);
} else {
GetSubClusterInfoResponse response = stateStore
.getSubCluster(GetSubClusterInfoRequest.newInstance(subClusterId));
if (response == null) {
return null;
} else {
return response.getSubClusterInfo();
}
}
}
/**
* Updates the cache with the central {@link FederationStateStore} and returns
* the {@link SubClusterInfo} for the specified {@link SubClusterId}.
*
* @param subClusterId the identifier of the sub-cluster
* @param flushCache flag to indicate if the cache should be flushed or not
* @return the sub cluster information
* @throws YarnException if the call to the state store is unsuccessful
*/
public SubClusterInfo getSubCluster(final SubClusterId subClusterId,
final boolean flushCache) throws YarnException {
if (flushCache && federationCache.isCachingEnabled()) {
LOG.info("Flushing subClusters from cache and rehydrating from store,"
+ " most likely on account of RM failover.");
federationCache.removeSubCluster(false);
}
return getSubCluster(subClusterId);
}
/**
* Returns the {@link SubClusterInfo} of all active sub cluster(s).
*
* @param filterInactiveSubClusters whether to filter out inactive
* sub-clusters
* @return the information of all active sub cluster(s)
* @throws YarnException if the call to the state store is unsuccessful
*/
public Map<SubClusterId, SubClusterInfo> getSubClusters(final boolean filterInactiveSubClusters)
throws YarnException {
try {
if (federationCache.isCachingEnabled()) {
return federationCache.getSubClusters(filterInactiveSubClusters);
} else {
GetSubClustersInfoRequest request =
GetSubClustersInfoRequest.newInstance(filterInactiveSubClusters);
return buildSubClusterInfoMap(stateStore.getSubClusters(request));
}
} catch (Throwable ex) {
throw new YarnException(ex);
}
}
/**
* Updates the cache with the central {@link FederationStateStore} and returns
* the {@link SubClusterInfo} of all active sub cluster(s).
*
* @param filterInactiveSubClusters whether to filter out inactive
* sub-clusters
* @param flushCache flag to indicate if the cache should be flushed or not
* @return the sub cluster information
* @throws YarnException if the call to the state store is unsuccessful
*/
public Map<SubClusterId, SubClusterInfo> getSubClusters(
final boolean filterInactiveSubClusters, final boolean flushCache)
throws YarnException {
if (flushCache && federationCache.isCachingEnabled()) {
LOG.info("Flushing subClusters from cache and rehydrating from store.");
federationCache.removeSubCluster(flushCache);
}
return getSubClusters(filterInactiveSubClusters);
}
/**
* Returns the {@link SubClusterPolicyConfiguration} for the specified queue.
*
* @param queue the queue whose policy is required
* @return the corresponding configured policy, or {@code null} if there is no
* mapping for the queue
* @throws YarnException if the call to the state store is unsuccessful
*/
public SubClusterPolicyConfiguration getPolicyConfiguration(final String queue)
throws YarnException {
if (federationCache.isCachingEnabled()) {
return getPoliciesConfigurations().get(queue);
} else {
GetSubClusterPolicyConfigurationRequest request =
GetSubClusterPolicyConfigurationRequest.newInstance(queue);
GetSubClusterPolicyConfigurationResponse response =
stateStore.getPolicyConfiguration(request);
if (response == null) {
return null;
} else {
return response.getPolicyConfiguration();
}
}
}
/**
* Set a policy configuration into the state store.
*
* @param policyConf the policy configuration to set
* @throws YarnException if the request is invalid/fails
*/
public void setPolicyConfiguration(SubClusterPolicyConfiguration policyConf)
throws YarnException {
stateStore.setPolicyConfiguration(
SetSubClusterPolicyConfigurationRequest.newInstance(policyConf));
}
/**
* Get the policies that is represented as
* {@link SubClusterPolicyConfiguration} for all currently active queues in
* the system.
*
* @return the policies for all currently active queues in the system
* @throws YarnException if the call to the state store is unsuccessful
*/
public Map<String, SubClusterPolicyConfiguration> getPoliciesConfigurations()
throws YarnException {
try {
if (federationCache.isCachingEnabled()) {
return federationCache.getPoliciesConfigurations();
} else {
GetSubClusterPoliciesConfigurationsRequest request =
GetSubClusterPoliciesConfigurationsRequest.newInstance();
return buildPolicyConfigMap(stateStore.getPoliciesConfigurations(request));
}
} catch (Throwable ex) {
throw new YarnException(ex);
}
}
/**
* Adds the home {@link SubClusterId} for the specified {@link ApplicationId}.
*
* @param appHomeSubCluster the mapping of the application to it's home
* sub-cluster
* @return the stored Subcluster from StateStore
* @throws YarnException if the call to the state store is unsuccessful
*/
public SubClusterId addApplicationHomeSubCluster(
ApplicationHomeSubCluster appHomeSubCluster) throws YarnException {
AddApplicationHomeSubClusterResponse response =
stateStore.addApplicationHomeSubCluster(
AddApplicationHomeSubClusterRequest.newInstance(appHomeSubCluster));
return response.getHomeSubCluster();
}
/**
* Updates the home {@link SubClusterId} for the specified
* {@link ApplicationId}.
*
* @param appHomeSubCluster the mapping of the application to it's home
* sub-cluster
* @throws YarnException if the call to the state store is unsuccessful
*/
public void updateApplicationHomeSubCluster(
ApplicationHomeSubCluster appHomeSubCluster) throws YarnException {
stateStore.updateApplicationHomeSubCluster(
UpdateApplicationHomeSubClusterRequest.newInstance(appHomeSubCluster));
}
/**
* Returns the home {@link SubClusterId} for the specified
* {@link ApplicationId}.
*
* @param appId the identifier of the application
* @return the home sub cluster identifier
* @throws YarnException if the call to the state store is unsuccessful
*/
public SubClusterId getApplicationHomeSubCluster(ApplicationId appId)
throws YarnException {
try {
if (federationCache.isCachingEnabled()) {
return federationCache.getApplicationHomeSubCluster(appId);
} else {
GetApplicationHomeSubClusterResponse response = stateStore.getApplicationHomeSubCluster(
GetApplicationHomeSubClusterRequest.newInstance(appId));
return response.getApplicationHomeSubCluster().getHomeSubCluster();
}
} catch (Throwable ex) {
throw new YarnException(ex);
}
}
/**
* Get the singleton instance of SubClusterResolver.
*
* @return SubClusterResolver instance
*/
public SubClusterResolver getSubClusterResolver() {
return this.subclusterResolver;
}
/**
* Get the configuration.
*
* @return configuration object
*/
public Configuration getConf() {
return this.conf;
}
/**
* Adds the home {@link SubClusterId} for the specified {@link ReservationId}.
*
* @param appHomeSubCluster the mapping of the reservation to it's home
* sub-cluster
* @return the stored subCluster from StateStore
* @throws YarnException if the call to the state store is unsuccessful
*/
public SubClusterId addReservationHomeSubCluster(ReservationHomeSubCluster appHomeSubCluster)
throws YarnException {
AddReservationHomeSubClusterResponse response = stateStore.addReservationHomeSubCluster(
AddReservationHomeSubClusterRequest.newInstance(appHomeSubCluster));
return response.getHomeSubCluster();
}
/**
* Returns the home {@link SubClusterId} for the specified {@link ReservationId}.
*
* @param reservationId the identifier of the reservation
* @return the home subCluster identifier
* @throws YarnException if the call to the state store is unsuccessful
*/
public SubClusterId getReservationHomeSubCluster(ReservationId reservationId)
throws YarnException {
GetReservationHomeSubClusterResponse response = stateStore.getReservationHomeSubCluster(
GetReservationHomeSubClusterRequest.newInstance(reservationId));
return response.getReservationHomeSubCluster().getHomeSubCluster();
}
/**
* Updates the home {@link SubClusterId} for the specified
* {@link ReservationId}.
*
* @param appHomeSubCluster the mapping of the reservation to it's home
* sub-cluster
* @throws YarnException if the call to the state store is unsuccessful
*/
public void updateReservationHomeSubCluster(ReservationHomeSubCluster appHomeSubCluster)
throws YarnException {
UpdateReservationHomeSubClusterRequest request =
UpdateReservationHomeSubClusterRequest.newInstance(appHomeSubCluster);
stateStore.updateReservationHomeSubCluster(request);
}
/**
* Delete the home {@link SubClusterId} for the specified
* {@link ReservationId}.
*
* @param reservationId the identifier of the reservation
* @throws YarnException if the call to the state store is unsuccessful
*/
public void deleteReservationHomeSubCluster(ReservationId reservationId) throws YarnException {
DeleteReservationHomeSubClusterRequest request =
DeleteReservationHomeSubClusterRequest.newInstance(reservationId);
stateStore.deleteReservationHomeSubCluster(request);
}
/**
* Helper method to create instances of Object using the | FederationStateStoreFacade |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/internal/security/certprovider/CertProviderServerSslContextProvider.java | {
"start": 1605,
"end": 3552
} | class ____ extends CertProviderSslContextProvider {
CertProviderServerSslContextProvider(
Node node,
@Nullable Map<String, CertificateProviderInfo> certProviders,
CommonTlsContext.CertificateProviderInstance certInstance,
CommonTlsContext.CertificateProviderInstance rootCertInstance,
CertificateValidationContext staticCertValidationContext,
DownstreamTlsContext downstreamTlsContext,
CertificateProviderStore certificateProviderStore) {
super(
node,
certProviders,
checkNotNull(certInstance, "Server SSL requires certInstance"),
rootCertInstance,
staticCertValidationContext,
downstreamTlsContext,
certificateProviderStore);
}
@Override
protected final AbstractMap.SimpleImmutableEntry<SslContextBuilder, X509TrustManager>
getSslContextBuilderAndTrustManager(
CertificateValidationContext certificateValidationContextdationContext)
throws CertStoreException, CertificateException, IOException {
SslContextBuilder sslContextBuilder = SslContextBuilder.forServer(savedKey, savedCertChain);
XdsTrustManagerFactory trustManagerFactory = null;
if (isMtls() && savedSpiffeTrustMap != null) {
trustManagerFactory = new XdsTrustManagerFactory(
savedSpiffeTrustMap,
certificateValidationContextdationContext, false);
} else if (isMtls()) {
trustManagerFactory = new XdsTrustManagerFactory(
savedTrustedRoots.toArray(new X509Certificate[0]),
certificateValidationContextdationContext, false);
}
setClientAuthValues(sslContextBuilder, trustManagerFactory);
sslContextBuilder = GrpcSslContexts.configure(sslContextBuilder);
// TrustManager in the below return value is not used on the server side, so setting it to null
return new AbstractMap.SimpleImmutableEntry<>(sslContextBuilder, null);
}
}
| CertProviderServerSslContextProvider |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/dynamic/RedisCommandsIntegrationTests.java | {
"start": 5753,
"end": 5834
} | interface ____ extends Commands {
String gat(String key);
}
}
| WithTypo |
java | grpc__grpc-java | examples/src/main/java/io/grpc/examples/header/CustomHeaderServer.java | {
"start": 1233,
"end": 2963
} | class ____ {
private static final Logger logger = Logger.getLogger(CustomHeaderServer.class.getName());
/* The port on which the server should run */
private static final int PORT = 50051;
private Server server;
private void start() throws IOException {
server = Grpc.newServerBuilderForPort(PORT, InsecureServerCredentials.create())
.addService(ServerInterceptors.intercept(new GreeterImpl(), new HeaderServerInterceptor()))
.build()
.start();
logger.info("Server started, listening on " + PORT);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
// Use stderr here since the logger may have been reset by its JVM shutdown hook.
System.err.println("*** shutting down gRPC server since JVM is shutting down");
try {
CustomHeaderServer.this.stop();
} catch (InterruptedException e) {
e.printStackTrace(System.err);
}
System.err.println("*** server shut down");
}
});
}
private void stop() throws InterruptedException {
if (server != null) {
server.shutdown().awaitTermination(30, TimeUnit.SECONDS);
}
}
/**
* Await termination on the main thread since the grpc library uses daemon threads.
*/
private void blockUntilShutdown() throws InterruptedException {
if (server != null) {
server.awaitTermination();
}
}
/**
* Main launches the server from the command line.
*/
public static void main(String[] args) throws IOException, InterruptedException {
final CustomHeaderServer server = new CustomHeaderServer();
server.start();
server.blockUntilShutdown();
}
private static | CustomHeaderServer |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/internal/model/common/DateFormatValidatorFactoryTest.java | {
"start": 925,
"end": 6661
} | class ____ {
private static final String JAVA_LANG_STRING = "java.lang.String";
private TypeMirror voidTypeMirror = new TypeMirror() {
@Override
public List<? extends AnnotationMirror> getAnnotationMirrors() {
return null;
}
@Override
public <A extends Annotation> A getAnnotation(Class<A> annotationType) {
return null;
}
@Override
public <A extends Annotation> A[] getAnnotationsByType(Class<A> annotationType) {
return null;
}
@Override
public TypeKind getKind() {
return TypeKind.VOID;
}
@Override
public <R, P> R accept(TypeVisitor<R, P> v, P p) {
return null;
}
};
@Test
public void testUnsupportedTypes() {
Type sourceType = typeWithFQN( JAVA_LANG_STRING );
Type targetType = typeWithFQN( JAVA_LANG_STRING );
DateFormatValidator dateFormatValidator = DateFormatValidatorFactory.forTypes( sourceType, targetType );
assertThat( dateFormatValidator.validate( "XXXX" ).isValid() ).isTrue();
}
@Test
public void testJavaUtilDateValidator() {
Type sourceType = typeWithFQN( "java.util.Date" );
Type targetType = typeWithFQN( JAVA_LANG_STRING );
assertInvalidDateFormat( sourceType, targetType );
assertInvalidDateFormat( targetType, sourceType );
assertValidDateFormat( sourceType, targetType );
assertValidDateFormat( targetType, sourceType );
}
@Test
public void testJodaTimeValidator() {
Type targetType = typeWithFQN( JAVA_LANG_STRING );
Type sourceType = typeWithFQN( JodaTimeConstants.DATE_TIME_FQN );
assertInvalidDateFormat( sourceType, targetType );
assertInvalidDateFormat( targetType, sourceType );
assertValidDateFormat( sourceType, targetType );
assertValidDateFormat( targetType, sourceType );
sourceType = typeWithFQN( JodaTimeConstants.LOCAL_DATE_FQN );
assertInvalidDateFormat( sourceType, targetType );
assertInvalidDateFormat( targetType, sourceType );
assertValidDateFormat( sourceType, targetType );
assertValidDateFormat( targetType, sourceType );
sourceType = typeWithFQN( JodaTimeConstants.LOCAL_DATE_TIME_FQN );
assertInvalidDateFormat( sourceType, targetType );
assertInvalidDateFormat( targetType, sourceType );
assertValidDateFormat( sourceType, targetType );
assertValidDateFormat( targetType, sourceType );
sourceType = typeWithFQN( JodaTimeConstants.LOCAL_TIME_FQN );
assertInvalidDateFormat( sourceType, targetType );
assertInvalidDateFormat( targetType, sourceType );
assertValidDateFormat( sourceType, targetType );
assertValidDateFormat( targetType, sourceType );
}
@Test
public void testJavaTimeValidator() {
Type targetType = typeWithFQN( JAVA_LANG_STRING );
Type sourceType = typeWithFQN( ZonedDateTime.class.getCanonicalName() );
assertInvalidDateFormat( sourceType, targetType );
assertInvalidDateFormat( targetType, sourceType );
assertValidDateFormat( sourceType, targetType );
assertValidDateFormat( targetType, sourceType );
sourceType = typeWithFQN( LocalDate.class.getCanonicalName() );
assertInvalidDateFormat( sourceType, targetType );
assertInvalidDateFormat( targetType, sourceType );
assertValidDateFormat( sourceType, targetType );
assertValidDateFormat( targetType, sourceType );
sourceType = typeWithFQN( LocalDateTime.class.getCanonicalName() );
assertInvalidDateFormat( sourceType, targetType );
assertInvalidDateFormat( targetType, sourceType );
assertValidDateFormat( sourceType, targetType );
assertValidDateFormat( targetType, sourceType );
sourceType = typeWithFQN( LocalTime.class.getCanonicalName() );
assertInvalidDateFormat( sourceType, targetType );
assertInvalidDateFormat( targetType, sourceType );
assertValidDateFormat( sourceType, targetType );
assertValidDateFormat( targetType, sourceType );
}
private void assertInvalidDateFormat(Type sourceType, Type targetType) {
DateFormatValidator dateFormatValidator = DateFormatValidatorFactory.forTypes( sourceType, targetType );
DateFormatValidationResult result = dateFormatValidator.validate( "qwertz" );
assertThat( result.isValid() ).isFalse();
}
private void assertValidDateFormat(Type sourceType, Type targetType) {
DateFormatValidator dateFormatValidator = DateFormatValidatorFactory.forTypes( sourceType, targetType );
DateFormatValidationResult result = dateFormatValidator.validate( "YYYY" );
assertThat( result.isValid() ).isTrue();
}
private Type typeWithFQN(String fullQualifiedName) {
return new Type(
null,
null,
null,
null,
voidTypeMirror,
null,
null,
null,
null,
null,
null,
fullQualifiedName,
false,
false,
false,
false,
false,
false,
new HashMap<>( ),
new HashMap<>( ),
false,
false, false
);
}
}
| DateFormatValidatorFactoryTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/restore/FullSnapshotRestoreOperation.java | {
"start": 5792,
"end": 11088
} | class ____<K>
implements RestoreOperation<ThrowingIterator<SavepointRestoreResult>> {
private static final Logger LOG = LoggerFactory.getLogger(FullSnapshotRestoreOperation.class);
private final KeyGroupRange keyGroupRange;
private final ClassLoader userCodeClassLoader;
private final Collection<KeyedStateHandle> restoreStateHandles;
private final StateSerializerProvider<K> keySerializerProvider;
private boolean isKeySerializerCompatibilityChecked;
public FullSnapshotRestoreOperation(
KeyGroupRange keyGroupRange,
ClassLoader userCodeClassLoader,
Collection<KeyedStateHandle> restoreStateHandles,
StateSerializerProvider<K> keySerializerProvider) {
this.keyGroupRange = keyGroupRange;
this.userCodeClassLoader = userCodeClassLoader;
this.restoreStateHandles =
restoreStateHandles.stream().filter(Objects::nonNull).collect(Collectors.toList());
this.keySerializerProvider = keySerializerProvider;
}
@Override
public ThrowingIterator<SavepointRestoreResult> restore()
throws IOException, StateMigrationException {
return new ThrowingIterator<SavepointRestoreResult>() {
private final Iterator<KeyedStateHandle> keyedStateHandlesIterator =
restoreStateHandles.iterator();
@Override
public boolean hasNext() {
return keyedStateHandlesIterator.hasNext();
}
@Override
public SavepointRestoreResult next() throws IOException, StateMigrationException {
KeyedStateHandle keyedStateHandle = keyedStateHandlesIterator.next();
if (!(keyedStateHandle instanceof KeyGroupsStateHandle)) {
throw unexpectedStateHandleException(
KeyGroupsStateHandle.class, keyedStateHandle.getClass());
}
KeyGroupsStateHandle groupsStateHandle = (KeyGroupsStateHandle) keyedStateHandle;
return restoreKeyGroupsInStateHandle(groupsStateHandle);
}
@Override
public void close() {}
};
}
private SavepointRestoreResult restoreKeyGroupsInStateHandle(
@Nonnull KeyGroupsStateHandle keyedStateHandle)
throws IOException, StateMigrationException {
FSDataInputStream currentStateHandleInStream = keyedStateHandle.openInputStream();
KeyedBackendSerializationProxy<K> serializationProxy =
readMetaData(new DataInputViewStreamWrapper(currentStateHandleInStream));
KeyGroupsIterator groupsIterator =
new KeyGroupsIterator(
keyGroupRange,
keyedStateHandle,
currentStateHandleInStream,
serializationProxy.isUsingKeyGroupCompression()
? SnappyStreamCompressionDecorator.INSTANCE
: UncompressedStreamCompressionDecorator.INSTANCE);
return new SavepointRestoreResult(
serializationProxy.getStateMetaInfoSnapshots(), groupsIterator);
}
private KeyedBackendSerializationProxy<K> readMetaData(DataInputView dataInputView)
throws IOException, StateMigrationException {
// isSerializerPresenceRequired flag is set to false, since for the RocksDB state backend,
// deserialization of state happens lazily during runtime; we depend on the fact
// that the new serializer for states could be compatible, and therefore the restore can
// continue
// without old serializers required to be present.
KeyedBackendSerializationProxy<K> serializationProxy =
new KeyedBackendSerializationProxy<>(userCodeClassLoader);
serializationProxy.read(dataInputView);
if (!isKeySerializerCompatibilityChecked) {
// fetch current serializer now because if it is incompatible, we can't access
// it anymore to improve the error message
TypeSerializer<K> currentSerializer = keySerializerProvider.currentSchemaSerializer();
// check for key serializer compatibility; this also reconfigures the
// key serializer to be compatible, if it is required and is possible
TypeSerializerSchemaCompatibility<K> keySerializerSchemaCompat =
keySerializerProvider.setPreviousSerializerSnapshotForRestoredState(
serializationProxy.getKeySerializerSnapshot());
if (keySerializerSchemaCompat.isCompatibleAfterMigration()
|| keySerializerSchemaCompat.isIncompatible()) {
throw new StateMigrationException(
"The new key serializer ("
+ currentSerializer
+ ") must be compatible with the previous key serializer ("
+ keySerializerProvider.previousSchemaSerializer()
+ ").");
}
isKeySerializerCompatibilityChecked = true;
}
return serializationProxy;
}
private static | FullSnapshotRestoreOperation |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java | {
"start": 20067,
"end": 20359
} | class ____ implement
* @return The list of configured instances
*/
public <T> List<T> getConfiguredInstances(String key, Class<T> t) {
return getConfiguredInstances(key, t, Collections.emptyMap());
}
/**
* Get a list of configured instances of the given | should |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestProxyCA.java | {
"start": 2514,
"end": 24688
} | class ____ {
@Test
void testInit() throws Exception {
ProxyCA proxyCA = new ProxyCA();
assertNull(proxyCA.getCaCert());
assertNull(proxyCA.getCaKeyPair());
assertNull(proxyCA.getX509KeyManager());
assertNull(proxyCA.getHostnameVerifier());
proxyCA.init();
assertNotNull(proxyCA.getCaCert());
assertNotNull(proxyCA.getCaKeyPair());
assertNotNull(proxyCA.getX509KeyManager());
assertNotNull(proxyCA.getHostnameVerifier());
}
@Test
void testInit2Null() throws Exception {
ProxyCA proxyCA = new ProxyCA();
assertNull(proxyCA.getCaCert());
assertNull(proxyCA.getCaKeyPair());
assertNull(proxyCA.getX509KeyManager());
assertNull(proxyCA.getHostnameVerifier());
// null certificate and private key
proxyCA.init(null, null);
assertNotNull(proxyCA.getCaCert());
assertNotNull(proxyCA.getCaKeyPair());
assertNotNull(proxyCA.getX509KeyManager());
assertNotNull(proxyCA.getHostnameVerifier());
}
@Test
void testInit2Mismatch() throws Exception {
ProxyCA proxyCA = new ProxyCA();
assertNull(proxyCA.getCaCert());
assertNull(proxyCA.getCaKeyPair());
assertNull(proxyCA.getX509KeyManager());
assertNull(proxyCA.getHostnameVerifier());
// certificate and private key don't match
CertKeyPair pair1 = createCertAndKeyPair();
CertKeyPair pair2 = createCertAndKeyPair();
assertNotEquals(pair1.getCert(), pair2.getCert());
assertNotEquals(pair1.getKeyPair().getPrivate(),
pair2.getKeyPair().getPrivate());
assertNotEquals(pair1.getKeyPair().getPublic(),
pair2.getKeyPair().getPublic());
proxyCA.init(pair1.getCert(), pair2.getKeyPair().getPrivate());
assertNotNull(proxyCA.getCaCert());
assertNotNull(proxyCA.getCaKeyPair());
assertNotNull(proxyCA.getX509KeyManager());
assertNotNull(proxyCA.getHostnameVerifier());
assertNotEquals(proxyCA.getCaCert(), pair1.getCert());
assertNotEquals(proxyCA.getCaKeyPair().getPrivate(),
pair2.getKeyPair().getPrivate());
assertNotEquals(proxyCA.getCaKeyPair().getPublic(),
pair2.getKeyPair().getPublic());
}
@Test
void testInit2Invalid() throws Exception {
ProxyCA proxyCA = new ProxyCA();
assertNull(proxyCA.getCaCert());
assertNull(proxyCA.getCaKeyPair());
assertNull(proxyCA.getX509KeyManager());
assertNull(proxyCA.getHostnameVerifier());
// Invalid key - fail the verification
X509Certificate certificate = Mockito.mock(X509Certificate.class);
PrivateKey privateKey = Mockito.mock(PrivateKey.class);
try {
proxyCA.init(certificate, privateKey);
fail("Expected InvalidKeyException");
} catch (InvalidKeyException e) {
// expected
}
}
@Test
void testInit2() throws Exception {
ProxyCA proxyCA = new ProxyCA();
assertNull(proxyCA.getCaCert());
assertNull(proxyCA.getCaKeyPair());
assertNull(proxyCA.getX509KeyManager());
assertNull(proxyCA.getHostnameVerifier());
// certificate and private key do match
CertKeyPair pair = createCertAndKeyPair();
proxyCA.init(pair.getCert(), pair.getKeyPair().getPrivate());
assertEquals(pair.getCert(), proxyCA.getCaCert());
assertEquals(pair.getKeyPair().getPrivate(),
proxyCA.getCaKeyPair().getPrivate());
assertEquals(pair.getKeyPair().getPublic(),
proxyCA.getCaKeyPair().getPublic());
assertNotNull(proxyCA.getX509KeyManager());
assertNotNull(proxyCA.getHostnameVerifier());
}
@Test
void testCreateChildKeyStore() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
byte[] keystoreBytes = proxyCA.createChildKeyStore(appId,
"password");
KeyStore keyStore = KeyStoreTestUtil.bytesToKeyStore(keystoreBytes,
"password");
assertEquals(1, keyStore.size());
Certificate[] certChain = keyStore.getCertificateChain("server");
assertEquals(2, certChain.length);
X509Certificate caCert = (X509Certificate) certChain[1];
X509Certificate cert = (X509Certificate) certChain[0];
// check child cert
assertEquals(caCert.getSubjectX500Principal().toString(),
cert.getIssuerDN().toString());
assertEquals(new X500Principal("CN=" + appId),
cert.getSubjectX500Principal());
assertFalse(cert.getSubjectX500Principal().toString().contains(","),
"Found multiple fields in X500 Principal, when there " +
"should have only been one: " + cert.getSubjectX500Principal());
assertEquals("SHA512withRSA", cert.getSigAlgName());
assertEquals(cert.getNotBefore(), cert.getNotAfter());
assertTrue(cert.getNotAfter().before(new Date()),
"Expected certificate to be expired but was not: " + cert.getNotAfter());
assertEquals(new X500Principal("CN=" + appId).toString(),
cert.getSubjectDN().toString());
Key privateKey = keyStore.getKey("server", "password".toCharArray());
assertEquals("RSA", privateKey.getAlgorithm());
assertEquals(-1, cert.getBasicConstraints());
// verify signature on child cert
PublicKey caPublicKey = caCert.getPublicKey();
cert.verify(caPublicKey);
// check CA cert
checkCACert(caCert);
assertEquals(proxyCA.getCaCert(), caCert);
// verify signature on CA cert
caCert.verify(caPublicKey);
// verify CA public key matches private key
PrivateKey caPrivateKey =
proxyCA.getX509KeyManager().getPrivateKey(null);
checkPrivatePublicKeys(caPrivateKey, caPublicKey);
assertEquals(proxyCA.getCaKeyPair().getPublic(), caPublicKey);
assertEquals(proxyCA.getCaKeyPair().getPrivate(), caPrivateKey);
}
@Test
void testGetChildTrustStore() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
byte[] truststoreBytes = proxyCA.getChildTrustStore("password");
KeyStore truststore = KeyStoreTestUtil.bytesToKeyStore(truststoreBytes,
"password");
assertEquals(1, truststore.size());
X509Certificate caCert =
(X509Certificate) truststore.getCertificate("client");
// check CA cert
checkCACert(caCert);
assertEquals(proxyCA.getCaCert(), caCert);
// verify signature on CA cert
PublicKey caPublicKey = caCert.getPublicKey();
caCert.verify(caPublicKey);
// verify CA public key matches private key
PrivateKey caPrivateKey =
proxyCA.getX509KeyManager().getPrivateKey(null);
checkPrivatePublicKeys(caPrivateKey, caPublicKey);
assertEquals(proxyCA.getCaKeyPair().getPublic(), caPublicKey);
assertEquals(proxyCA.getCaKeyPair().getPrivate(), caPrivateKey);
}
@Test
void testGenerateKeyStorePassword() throws Exception {
// We can't possibly test every possible string, but we can at least verify
// a few things about a few of the generated strings as a sanity check
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
Set<String> passwords = new HashSet<>();
for (int i = 0; i < 5; i++) {
String password = proxyCA.generateKeyStorePassword();
assertEquals(16, password.length());
for (char c : password.toCharArray()) {
assertFalse(c < ' ', "Found character '" + c + "' in password '"
+ password + "' which is outside of the expected range");
assertFalse(c > 'z', "Found character '" + c + "' in password '"
+ password + "' which is outside of the expected range");
}
assertFalse(passwords.contains(password),
"Password " + password
+ " was generated twice, which is _extremely_ unlikely"
+ " and shouldn't practically happen: " + passwords);
passwords.add(password);
}
}
@Test
void testCreateTrustManagerDefaultTrustManager() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
X509TrustManager defaultTrustManager = Mockito.mock(X509TrustManager.class);
proxyCA.setDefaultTrustManager(defaultTrustManager);
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
X509TrustManager trustManager = proxyCA.createTrustManager(appId);
Mockito.when(defaultTrustManager.getAcceptedIssuers()).thenReturn(
new X509Certificate[]{KeyStoreTestUtil.generateCertificate(
"CN=foo", KeyStoreTestUtil.generateKeyPair("RSA"), 30,
"SHA1withRSA")});
assertArrayEquals(defaultTrustManager.getAcceptedIssuers(),
trustManager.getAcceptedIssuers());
trustManager.checkClientTrusted(null, null);
}
@Test
void testCreateTrustManagerYarnCert() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
X509TrustManager defaultTrustManager = Mockito.mock(X509TrustManager.class);
proxyCA.setDefaultTrustManager(defaultTrustManager);
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
X509TrustManager trustManager = proxyCA.createTrustManager(appId);
X509Certificate[] certChain = castCertificateArrayToX509CertificateArray(
KeyStoreTestUtil.bytesToKeyStore(
proxyCA.createChildKeyStore(appId, "password"), "password")
.getCertificateChain("server"));
trustManager.checkServerTrusted(certChain, "RSA");
Mockito.verify(defaultTrustManager, Mockito.times(0))
.checkServerTrusted(certChain, "RSA");
}
@Test
void testCreateTrustManagerWrongApp() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
X509TrustManager defaultTrustManager = Mockito.mock(X509TrustManager.class);
proxyCA.setDefaultTrustManager(defaultTrustManager);
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
ApplicationId appId2 =
ApplicationId.newInstance(System.currentTimeMillis(), 2);
X509TrustManager trustManager = proxyCA.createTrustManager(appId);
X509Certificate[] certChain = castCertificateArrayToX509CertificateArray(
KeyStoreTestUtil.bytesToKeyStore(
proxyCA.createChildKeyStore(appId2, "password"), "password")
.getCertificateChain("server"));
try {
trustManager.checkServerTrusted(certChain, "RSA");
fail("Should have thrown a CertificateException, but did not");
} catch (CertificateException ce) {
assertEquals("Expected to find Subject X500 Principal with CN=" +
appId + " but found CN=" + appId2, ce.getMessage());
}
}
@Test
void testCreateTrustManagerWrongRM() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
X509TrustManager defaultTrustManager = Mockito.mock(X509TrustManager.class);
proxyCA.setDefaultTrustManager(defaultTrustManager);
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
X509TrustManager trustManager = proxyCA.createTrustManager(appId);
ProxyCA proxyCA2 = new ProxyCA(); // Simulates another RM
proxyCA2.init();
X509Certificate[] certChain = castCertificateArrayToX509CertificateArray(
KeyStoreTestUtil.bytesToKeyStore(
proxyCA2.createChildKeyStore(appId, "password"), "password")
.getCertificateChain("server"));
Mockito.verify(defaultTrustManager, Mockito.times(0))
.checkServerTrusted(certChain, "RSA");
trustManager.checkServerTrusted(certChain, "RSA");
Mockito.verify(defaultTrustManager, Mockito.times(1))
.checkServerTrusted(certChain, "RSA");
}
@Test
void testCreateTrustManagerRealCert() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
X509TrustManager defaultTrustManager = Mockito.mock(X509TrustManager.class);
proxyCA.setDefaultTrustManager(defaultTrustManager);
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
X509TrustManager trustManager = proxyCA.createTrustManager(appId);
// "real" cert
X509Certificate[]
certChain = new X509Certificate[]{
KeyStoreTestUtil.generateCertificate("CN=foo.com",
KeyStoreTestUtil.generateKeyPair("RSA"), 30, "SHA1withRSA")};
Mockito.verify(defaultTrustManager, Mockito.times(0))
.checkServerTrusted(certChain, "RSA");
trustManager.checkServerTrusted(certChain, "RSA");
Mockito.verify(defaultTrustManager, Mockito.times(1))
.checkServerTrusted(certChain, "RSA");
// "real" cert x2
certChain = new X509Certificate[]{
KeyStoreTestUtil.generateCertificate("CN=foo.com",
KeyStoreTestUtil.generateKeyPair("RSA"), 30, "SHA1withRSA"),
KeyStoreTestUtil.generateCertificate("CN=foo.com",
KeyStoreTestUtil.generateKeyPair("RSA"), 30, "SHA1withRSA")};
Mockito.verify(defaultTrustManager, Mockito.times(0))
.checkServerTrusted(certChain, "RSA");
trustManager.checkServerTrusted(certChain, "RSA");
Mockito.verify(defaultTrustManager, Mockito.times(1))
.checkServerTrusted(certChain, "RSA");
}
@Test
void testCreateTrustManagerExceptions() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
X509TrustManager defaultTrustManager = Mockito.mock(X509TrustManager.class);
proxyCA.setDefaultTrustManager(defaultTrustManager);
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
X509TrustManager trustManager = proxyCA.createTrustManager(appId);
for (Exception e : new Exception[]{
new CertificateException(), new NoSuchAlgorithmException(),
new InvalidKeyException(), new SignatureException(),
new NoSuchProviderException()}) {
X509Certificate[] certChain = castCertificateArrayToX509CertificateArray(
KeyStoreTestUtil.bytesToKeyStore(
proxyCA.createChildKeyStore(appId, "password"), "password")
.getCertificateChain("server"));
X509Certificate cert = Mockito.spy(certChain[0]);
certChain[0] = cert;
// Throw e to simulate problems with verifying
Mockito.doThrow(e).when(certChain[0]).verify(Mockito.any());
Mockito.verify(defaultTrustManager, Mockito.times(0))
.checkServerTrusted(certChain, "RSA");
trustManager.checkServerTrusted(certChain, "RSA");
Mockito.verify(defaultTrustManager, Mockito.times(1))
.checkServerTrusted(certChain, "RSA");
}
}
@Test
void testCreateKeyManager() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
X509KeyManager keyManager = proxyCA.getX509KeyManager();
assertArrayEquals(new String[]{"client"},
keyManager.getClientAliases(null, null));
assertEquals("client",
keyManager.chooseClientAlias(null, null, null));
assertNull(keyManager.getServerAliases(null, null));
assertNull(keyManager.chooseServerAlias(null, null, null));
byte[] truststoreBytes = proxyCA.getChildTrustStore("password");
KeyStore truststore = KeyStoreTestUtil.bytesToKeyStore(truststoreBytes,
"password");
assertEquals(1, truststore.size());
X509Certificate caCert =
(X509Certificate) truststore.getCertificate("client");
assertArrayEquals(new X509Certificate[]{caCert},
keyManager.getCertificateChain(null));
assertEquals(proxyCA.getCaCert(), caCert);
PrivateKey caPrivateKey = keyManager.getPrivateKey(null);
PublicKey caPublicKey = caCert.getPublicKey();
checkPrivatePublicKeys(caPrivateKey, caPublicKey);
assertEquals(proxyCA.getCaKeyPair().getPublic(), caPublicKey);
assertEquals(proxyCA.getCaKeyPair().getPrivate(), caPrivateKey);
}
@Test
void testCreateHostnameVerifier() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
HostnameVerifier verifier = proxyCA.getHostnameVerifier();
SSLSession sslSession = Mockito.mock(SSLSession.class);
Mockito.when(sslSession.getPeerCertificates()).thenReturn(
KeyStoreTestUtil.bytesToKeyStore(
proxyCA.createChildKeyStore(
ApplicationId.newInstance(System.currentTimeMillis(), 1),
"password"), "password").getCertificateChain("server"));
assertTrue(verifier.verify("foo", sslSession));
}
@Test
void testCreateHostnameVerifierSSLPeerUnverifiedException()
throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
HostnameVerifier verifier = proxyCA.getHostnameVerifier();
SSLSession sslSession = Mockito.mock(SSLSession.class);
Mockito.when(sslSession.getPeerCertificates()).thenThrow(
new SSLPeerUnverifiedException(""));
assertFalse(verifier.verify("foo", sslSession));
}
@Test
void testCreateHostnameVerifierWrongRM() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
HostnameVerifier verifier = proxyCA.getHostnameVerifier();
SSLSession sslSession = Mockito.mock(SSLSession.class);
ProxyCA proxyCA2 = new ProxyCA(); // Simulate another RM
proxyCA2.init();
Mockito.when(sslSession.getPeerCertificates()).thenReturn(
KeyStoreTestUtil.bytesToKeyStore(
proxyCA2.createChildKeyStore(
ApplicationId.newInstance(System.currentTimeMillis(), 1),
"password"), "password").getCertificateChain("server"));
assertFalse(verifier.verify("foo", sslSession));
}
@Test
void testCreateHostnameVerifierExceptions() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
HostnameVerifier verifier = proxyCA.getHostnameVerifier();
for (Exception e : new Exception[]{
new CertificateException(), new NoSuchAlgorithmException(),
new InvalidKeyException(), new SignatureException(),
new NoSuchProviderException()}) {
SSLSession sslSession = Mockito.mock(SSLSession.class);
Mockito.when(sslSession.getPeerCertificates()).thenAnswer(
new Answer<Certificate[]>() {
@Override
public Certificate[] answer(InvocationOnMock invocation)
throws Throwable {
Certificate[] certChain = KeyStoreTestUtil.bytesToKeyStore(
proxyCA.createChildKeyStore(
ApplicationId.newInstance(System.currentTimeMillis(), 1),
"password"), "password").getCertificateChain("server");
Certificate cert = Mockito.spy(certChain[0]);
certChain[0] = cert;
// Throw e to simulate problems with verifying
Mockito.doThrow(e).when(cert).verify(Mockito.any());
return certChain;
}
});
assertFalse(verifier.verify("foo", sslSession));
}
}
@Test
void testCreateHostnameVerifierRealCert() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
HostnameVerifier verifier = proxyCA.getHostnameVerifier();
SSLSession sslSession = Mockito.mock(SSLSession.class);
Mockito.when(sslSession.getPeerCertificates()).thenAnswer(
new Answer<Certificate[]>() {
@Override
public Certificate[] answer(InvocationOnMock invocation)
throws Throwable {
// "real" cert
Certificate[] certChain = new Certificate[]{
KeyStoreTestUtil.generateCertificate("CN=foo.com",
KeyStoreTestUtil.generateKeyPair("RSA"), 30, "SHA1withRSA")
};
return certChain;
}
});
assertTrue(verifier.verify("foo.com", sslSession));
}
@Test
void testCreateHostnameVerifierRealCertBad() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
HostnameVerifier verifier = proxyCA.getHostnameVerifier();
SSLSession sslSession = Mockito.mock(SSLSession.class);
Mockito.when(sslSession.getPeerCertificates()).thenAnswer(
new Answer<Certificate[]>() {
@Override
public Certificate[] answer(InvocationOnMock invocation)
throws Throwable {
// "real" cert
Certificate[] certChain = new Certificate[]{
KeyStoreTestUtil.generateCertificate("CN=foo.com",
KeyStoreTestUtil.generateKeyPair("RSA"), 30, "SHA1withRSA")
};
return certChain;
}
});
assertFalse(verifier.verify("bar.com", sslSession));
}
private void checkCACert(X509Certificate caCert) {
assertEquals(caCert.getSubjectX500Principal().toString(),
caCert.getIssuerDN().toString());
assertEquals(caCert.getSubjectX500Principal().toString(),
caCert.getSubjectDN().toString());
assertTrue(caCert.getSubjectX500Principal().toString().startsWith("OU=YARN-"),
"Expected CA certificate X500 Principal to start with" +
" 'OU=YARN-', but did not: " + caCert.getSubjectX500Principal());
assertFalse(caCert.getSubjectX500Principal().toString().contains(","),
"Found multiple fields in X500 Principal, when there " +
"should have only been one: " + caCert.getSubjectX500Principal());
assertEquals("SHA512withRSA", caCert.getSigAlgName());
assertEquals(
new GregorianCalendar(2037, Calendar.DECEMBER, 31).getTime(),
caCert.getNotAfter());
assertTrue(caCert.getNotBefore().before(new Date()),
"Expected certificate to have started but was not: " + caCert.getNotBefore());
assertEquals(0, caCert.getBasicConstraints());
}
private void checkPrivatePublicKeys(PrivateKey privateKey,
PublicKey publicKey) throws NoSuchAlgorithmException, InvalidKeyException,
SignatureException {
byte[] data = new byte[2000];
new Random().nextBytes(data);
Signature signer = Signature.getInstance("SHA512withRSA");
signer.initSign(privateKey);
signer.update(data);
byte[] sig = signer.sign();
signer = Signature.getInstance("SHA512withRSA");
signer.initVerify(publicKey);
signer.update(data);
assertTrue(signer.verify(sig));
}
private X509Certificate[] castCertificateArrayToX509CertificateArray(
Certificate[] certs) {
return Arrays.copyOf(certs, certs.length, X509Certificate[].class);
}
private static | TestProxyCA |
java | elastic__elasticsearch | x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java | {
"start": 840,
"end": 4096
} | class ____ extends AbstractEsqlIntegTestCase {
public void testCollectWarnings() throws Exception {
final String node1, node2;
if (randomBoolean()) {
internalCluster().ensureAtLeastNumDataNodes(2);
node1 = randomDataNode().getName();
node2 = randomValueOtherThan(node1, () -> randomDataNode().getName());
} else {
node1 = randomDataNode().getName();
node2 = randomDataNode().getName();
}
int numDocs1 = randomIntBetween(1, 15);
assertAcked(
client().admin()
.indices()
.prepareCreate("index-1")
.setSettings(
Settings.builder()
.put("index.routing.allocation.require._name", node1)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5))
)
.setMapping("host", "type=keyword")
);
for (int i = 0; i < numDocs1; i++) {
client().prepareIndex("index-1").setSource("host", "192." + i).get();
}
int numDocs2 = randomIntBetween(1, 15);
assertAcked(
client().admin()
.indices()
.prepareCreate("index-2")
.setSettings(
Settings.builder()
.put("index.routing.allocation.require._name", node2)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5))
)
.setMapping("host", "type=keyword")
);
for (int i = 0; i < numDocs2; i++) {
client().prepareIndex("index-2").setSource("host", "10." + i).get();
}
DiscoveryNode coordinator = randomFrom(clusterService().state().nodes().stream().toList());
client().admin().indices().prepareRefresh("index-1", "index-2").get();
EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(
"FROM index-* | EVAL ip = to_ip(host) | STATS s = COUNT(*) by ip | KEEP ip | LIMIT 100"
).pragmas(randomPragmas());
CountDownLatch latch = new CountDownLatch(1);
client(coordinator.getName()).execute(EsqlQueryAction.INSTANCE, request, ActionListener.running(() -> {
try {
var threadpool = internalCluster().getInstance(TransportService.class, coordinator.getName()).getThreadPool();
Map<String, List<String>> responseHeaders = threadpool.getThreadContext().getResponseHeaders();
List<String> warnings = responseHeaders.getOrDefault("Warning", List.of())
.stream()
.filter(w -> w.contains("is not an IP string literal"))
.toList();
int expectedWarnings = Math.min(20, numDocs1 + numDocs2);
// we cap the number of warnings per node
assertThat(warnings.size(), greaterThanOrEqualTo(expectedWarnings));
} finally {
latch.countDown();
}
}));
latch.await(30, TimeUnit.SECONDS);
}
private DiscoveryNode randomDataNode() {
return randomFrom(clusterService().state().nodes().getDataNodes().values());
}
}
| WarningsIT |
java | elastic__elasticsearch | qa/smoke-test-http/src/internalClusterTest/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java | {
"start": 1740,
"end": 4557
} | class ____ extends HttpSmokeTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CollectionUtils.appendToCopy(super.nodePlugins(), AssertingCustomPlugin.class);
}
private void updateClusterState(ClusterService clusterService, UnaryOperator<ClusterState> updateOperator) {
final PlainActionFuture<Void> future = new PlainActionFuture<>();
clusterService.submitUnbatchedStateUpdateTask("update state", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return updateOperator.apply(currentState);
}
@Override
public void onFailure(Exception e) {
throw new AssertionError("update state", e);
}
@Override
public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
future.onResponse(null);
}
});
future.actionGet();
}
public void testClusterStateRestCancellation() throws Exception {
final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName());
updateClusterState(clusterService, s -> ClusterState.builder(s).putCustom(AssertingCustom.NAME, AssertingCustom.INSTANCE).build());
final Request clusterStateRequest = new Request(HttpGet.METHOD_NAME, "/_cluster/state");
clusterStateRequest.addParameter("wait_for_metadata_version", Long.toString(Long.MAX_VALUE));
clusterStateRequest.addParameter("wait_for_timeout", "1h");
if (randomBoolean()) {
clusterStateRequest.addParameter("local", "true");
}
final PlainActionFuture<Response> future = new PlainActionFuture<>();
logger.info("--> sending cluster state request");
final Cancellable cancellable = getRestClient().performRequestAsync(clusterStateRequest, wrapAsRestResponseListener(future));
awaitTaskWithPrefix(ClusterStateAction.NAME);
logger.info("--> cancelling cluster state request");
cancellable.cancel();
expectThrows(CancellationException.class, future::actionGet);
logger.info("--> checking cluster state task completed");
assertBusy(() -> {
updateClusterState(clusterService, s -> ClusterState.builder(s).build());
final List<TaskInfo> tasks = clusterAdmin().prepareListTasks().get().getTasks();
assertTrue(tasks.toString(), tasks.stream().noneMatch(t -> t.action().equals(ClusterStateAction.NAME)));
});
updateClusterState(clusterService, s -> ClusterState.builder(s).removeCustom(AssertingCustom.NAME).build());
}
private static | ClusterStateRestCancellationIT |
java | spring-projects__spring-framework | spring-messaging/src/main/java/org/springframework/messaging/handler/annotation/reactive/HeadersMethodArgumentResolver.java | {
"start": 1489,
"end": 3146
} | class ____ implements SyncHandlerMethodArgumentResolver {
@Override
public boolean supportsParameter(MethodParameter parameter) {
Class<?> paramType = parameter.getParameterType();
return ((parameter.hasParameterAnnotation(Headers.class) && Map.class.isAssignableFrom(paramType)) ||
MessageHeaders.class == paramType || MessageHeaderAccessor.class.isAssignableFrom(paramType));
}
@Override
public @Nullable Object resolveArgumentValue(MethodParameter parameter, Message<?> message) {
Class<?> paramType = parameter.getParameterType();
if (Map.class.isAssignableFrom(paramType)) {
return message.getHeaders();
}
else if (MessageHeaderAccessor.class == paramType) {
MessageHeaderAccessor accessor = MessageHeaderAccessor.getAccessor(message, MessageHeaderAccessor.class);
return accessor != null ? accessor : new MessageHeaderAccessor(message);
}
else if (MessageHeaderAccessor.class.isAssignableFrom(paramType)) {
MessageHeaderAccessor accessor = MessageHeaderAccessor.getAccessor(message, MessageHeaderAccessor.class);
if (accessor != null && paramType.isAssignableFrom(accessor.getClass())) {
return accessor;
}
else {
Method method = ReflectionUtils.findMethod(paramType, "wrap", Message.class);
if (method == null) {
throw new IllegalStateException(
"Cannot create accessor of type " + paramType + " for message " + message);
}
return ReflectionUtils.invokeMethod(method, null, message);
}
}
else {
throw new IllegalStateException("Unexpected parameter of type " + paramType +
" in method " + parameter.getMethod() + ". ");
}
}
}
| HeadersMethodArgumentResolver |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/file/FileBrowsableEndpointTest.java | {
"start": 1373,
"end": 5305
} | class ____ extends ContextTestSupport {
private static final String TEST_FILE_NAME_PREFIX = UUID.randomUUID().toString();
@Test
public void testBrowsableNoFiles() {
BrowsableEndpoint browse
= context.getEndpoint(fileUri("?initialDelay=0&delay=10"), BrowsableEndpoint.class);
assertNotNull(browse);
List<Exchange> list = browse.getExchanges();
assertNotNull(list);
assertEquals(0, list.size());
}
@Test
public void testBrowsableOneFile() {
template.sendBodyAndHeader(fileUri(), "A", Exchange.FILE_NAME, TEST_FILE_NAME_PREFIX + "a.txt");
FileEndpoint endpoint = context.getEndpoint(fileUri("?initialDelay=0&delay=10"), FileEndpoint.class);
assertNotNull(endpoint);
MemoryIdempotentRepository repo = (MemoryIdempotentRepository) endpoint.getInProgressRepository();
assertEquals(0, repo.getCacheSize());
List<Exchange> list = endpoint.getExchanges();
assertNotNull(list);
assertEquals(1, list.size());
assertEquals(TEST_FILE_NAME_PREFIX + "a.txt", list.get(0).getIn().getHeader(Exchange.FILE_NAME));
// the in progress repo should not leak
assertEquals(0, repo.getCacheSize());
// and the file is still there
assertTrue(Files.exists(testFile(TEST_FILE_NAME_PREFIX + "a.txt")), "File should exist a.txt");
}
@Test
public void testBrowsableTwoFiles() {
template.sendBodyAndHeader(fileUri(), "A", Exchange.FILE_NAME, TEST_FILE_NAME_PREFIX + "a.txt");
template.sendBodyAndHeader(fileUri(), "B", Exchange.FILE_NAME, TEST_FILE_NAME_PREFIX + "b.txt");
FileEndpoint endpoint
= context.getEndpoint(fileUri("?initialDelay=0&delay=10"), FileEndpoint.class);
assertNotNull(endpoint);
MemoryIdempotentRepository repo = (MemoryIdempotentRepository) endpoint.getInProgressRepository();
assertEquals(0, repo.getCacheSize());
List<Exchange> list = endpoint.getExchanges();
assertNotNull(list);
assertEquals(2, list.size());
// the in progress repo should not leak
assertEquals(0, repo.getCacheSize());
// and the files is still there
assertTrue(Files.exists(testFile(TEST_FILE_NAME_PREFIX + "a.txt")), "File should exist a.txt");
assertTrue(Files.exists(testFile(TEST_FILE_NAME_PREFIX + "b.txt")), "File should exist b.txt");
}
@Test
public void testBrowsableThreeFilesRecursive() {
template.sendBodyAndHeader(fileUri(), "A", Exchange.FILE_NAME, TEST_FILE_NAME_PREFIX + "a.txt");
template.sendBodyAndHeader(fileUri(), "B", Exchange.FILE_NAME,
"foo" + File.separator + TEST_FILE_NAME_PREFIX + "b.txt");
template.sendBodyAndHeader(fileUri(), "C", Exchange.FILE_NAME,
"bar" + File.separator + TEST_FILE_NAME_PREFIX + "c.txt");
FileEndpoint endpoint = context.getEndpoint(
fileUri("?initialDelay=0&delay=10&recursive=true"), FileEndpoint.class);
assertNotNull(endpoint);
MemoryIdempotentRepository repo = (MemoryIdempotentRepository) endpoint.getInProgressRepository();
assertEquals(0, repo.getCacheSize());
List<Exchange> list = endpoint.getExchanges();
assertNotNull(list);
assertEquals(3, list.size());
// the in progress repo should not leak
assertEquals(0, repo.getCacheSize());
// and the files is still there
assertTrue(Files.exists(testFile(TEST_FILE_NAME_PREFIX + "a.txt")), "File should exist a.txt");
assertTrue(Files.exists(testFile("foo" + File.separator + TEST_FILE_NAME_PREFIX + "b.txt")),
"File should exist foo/b.txt");
assertTrue(Files.exists(testFile("bar" + File.separator + TEST_FILE_NAME_PREFIX + "c.txt")),
"File should exist bar/c.txt");
}
}
| FileBrowsableEndpointTest |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/extension/ExtensionLoaderTest.java | {
"start": 17137,
"end": 17540
} | interface ____.apache.dubbo.common.extension.ext1.SimpleExt"));
}
}
@Test
void test_addExtension_with_interface() {
try {
getExtensionLoader(SimpleExt.class).addExtension("impl1", SimpleExt.class);
} catch (IllegalStateException expected) {
assertThat(
expected.getMessage(),
containsString("Input type | org |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MultipleParallelOrSequentialCallsTest.java | {
"start": 1639,
"end": 5805
} | class ____ {
public void basicCaseParallel(List<String> list) {
// BUG: Diagnostic contains: Did you mean 'list.stream().parallel();'?
list.stream().parallel().parallel();
}
public void basicCaseParallelNotFirst(List<String> list) {
// BUG: Diagnostic contains: Did you mean 'list.stream().parallel().map(m -> m);'?
list.stream().map(m -> m).parallel().parallel();
}
public void basicCollection(Collection<String> list) {
// BUG: Diagnostic contains: Did you mean 'list.stream().parallel();'?
list.stream().parallel().parallel();
}
public void parallelStream(List<String> list) {
// BUG: Diagnostic contains: Did you mean 'list.parallelStream();'?
list.parallelStream().parallel().parallel();
}
public void basicCaseParallelThisInMethodArg(List<String> list) {
// BUG: Diagnostic contains: Did you mean 'this.hello(list.stream().parallel());'?
this.hello(list.stream().parallel().parallel());
}
public void onlyOneError(List<String> list) {
this.hello(
// BUG: Diagnostic contains: Multiple calls
list.stream().parallel().parallel());
}
public void mapMethod(List<String> list) {
// BUG: Diagnostic contains: Did you mean 'hello(list.stream().parallel().map(m ->
// this.hello(null)));'?
hello(list.stream().map(m -> this.hello(null)).parallel().parallel());
}
public void betweenMethods(List<String> list) {
// BUG: Diagnostic contains: Did you mean 'list.stream().parallel().map(m -> m.toString());'?
list.stream().parallel().map(m -> m.toString()).parallel();
}
public void basicCaseParallelNotLast(List<String> list) {
// BUG: Diagnostic contains: Did you mean 'list.stream().parallel().map(m ->
// m.toString()).findFirst();'?
list.stream().parallel().map(m -> m.toString()).parallel().findFirst();
}
public void basicCaseSequential(List<String> list) {
// BUG: Diagnostic contains: Did you mean 'list.stream().sequential().map(m -> m.toString());'?
list.stream().sequential().map(m -> m.toString()).sequential();
}
public void bothSequentialAndParallel(List<String> list) {
// this case is unlikely (wrong, even) but just checking that this works
// BUG: Diagnostic contains: Did you mean 'list.stream().sequential().parallel();'?
list.stream().sequential().parallel().sequential();
}
public void bothSequentialAndParallelMultiple(List<String> list) {
// this is even more messed up, this test is here to make sure the checker doesn't throw an
// exception
// BUG: Diagnostic contains: Multiple calls
list.stream().sequential().parallel().sequential().parallel();
}
public void parallelMultipleLines(List<String> list) {
// BUG: Diagnostic contains: Did you mean 'list.stream().parallel()
list.stream().parallel().map(m -> m.toString()).parallel();
}
public void multipleParallelCalls(List<String> list) {
// BUG: Diagnostic contains: Did you mean 'list.parallelStream();'?
list.parallelStream().sequential();
}
public String hello(Stream st) {
return "";
}
public void streamWithinAStream(List<String> list, List<String> list2) {
// BUG: Diagnostic contains: Did you mean
list.stream()
.flatMap(childDir -> list2.stream())
.parallel()
.flatMap(a -> list2.stream())
.parallel();
}
public void streamWithinAStreamImmediatelyAfterOtherParallel(
List<String> list, List<String> list2) {
// BUG: Diagnostic contains: Did you mean
list.stream().parallel().map(m -> list2.stream().parallel()).parallel();
}
public void parallelAndNestedStreams(List<String> list, List<String> list2) {
// BUG: Diagnostic contains: Did you mean
list.parallelStream()
.flatMap(childDir -> list2.stream())
.parallel()
.filter(m -> (new TestClass("test")).testClass())
.map(
a -> {
if (a == null) {
return a;
}
return null;
})
.filter(a -> a != null)
.flatMap(a -> list2.stream())
.parallel();
}
private | MultipleParallelOrSequentialCallsPositiveCases |
java | apache__camel | components/camel-docker/src/test/java/org/apache/camel/component/docker/headers/ExecStartCmdHeaderTest.java | {
"start": 1521,
"end": 2818
} | class ____ extends BaseDockerHeaderTest<ExecStartCmd> {
private static final Logger LOG = LoggerFactory.getLogger(ExecStartCmdHeaderTest.class);
@Mock
private ExecStartCmd mockObject;
@Mock
private ResultCallback.Adapter<Frame> callback;
@Test
void execCreateHeaderTest() {
String id = "1";
boolean tty = true;
Map<String, Object> headers = getDefaultParameters();
headers.put(DockerConstants.DOCKER_EXEC_ID, id);
headers.put(DockerConstants.DOCKER_TTY, tty);
template.sendBodyAndHeaders("direct:in", "", headers);
Mockito.verify(dockerClient, Mockito.times(1)).execStartCmd(eq(id));
Mockito.verify(mockObject, Mockito.times(1)).withTty(eq(tty));
}
@Override
protected void setupMocks() {
Mockito.when(dockerClient.execStartCmd(anyString())).thenReturn(mockObject);
Mockito.when(mockObject.exec(any())).thenReturn(callback);
try {
Mockito.when(callback.awaitCompletion()).thenReturn(callback);
} catch (InterruptedException e) {
LOG.warn("Interrupted while setting up mocks", e);
}
}
@Override
protected DockerOperation getOperation() {
return DockerOperation.EXEC_START;
}
}
| ExecStartCmdHeaderTest |
java | elastic__elasticsearch | x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java | {
"start": 58098,
"end": 59017
} | class ____ implements ActionListener<AcknowledgedResponse> {
final ActionListener<AcknowledgedResponse> actionListener;
private final long startTime;
MeasurementActionListener(final long startTime, final ActionListener<AcknowledgedResponse> onFailure) {
this.startTime = startTime;
this.actionListener = onFailure;
}
@Override
public void onResponse(final AcknowledgedResponse response) {
recordSuccessMetrics(startTime);
logger.debug("Downsampling measured successfully");
actionListener.onResponse(AcknowledgedResponse.TRUE);
}
@Override
public void onFailure(Exception e) {
recordFailureMetrics(startTime);
logger.debug("Downsampling failure measured successfully", e);
this.actionListener.onFailure(e);
}
}
}
| MeasurementActionListener |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FromFtpSedaDeleteFileIT.java | {
"start": 1420,
"end": 3310
} | class ____ extends FtpServerTestSupport {
protected String getFtpUrl() {
return "ftp://admin@localhost:{{ftp.server.port}}/deletefile?password=admin&binary=false&delete=true";
}
@Test
public void testPollFileAndShouldBeDeleted() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.expectedBodiesReceived("Hello World this file will be deleted");
mock.assertIsSatisfied();
// assert the file is deleted
File file = service.ftpFile("deletefile/hello.txt").toFile();
await().atMost(500, TimeUnit.MILLISECONDS)
.untilAsserted(() -> assertFalse(file.exists(), "The file should have been deleted"));
}
@BeforeEach
public void prepareFtpServer() throws Exception {
// prepares the FTP Server by creating a file on the server that we want
// to unit
// test that we can pool and store as a local file
Endpoint endpoint = context.getEndpoint(getFtpUrl());
Exchange exchange = endpoint.createExchange();
exchange.getIn().setBody("Hello World this file will be deleted");
exchange.getIn().setHeader(Exchange.FILE_NAME, "hello.txt");
Producer producer = endpoint.createProducer();
producer.start();
producer.process(exchange);
producer.stop();
// assert file is created
File file = service.ftpFile("deletefile/hello.txt").toFile();
assertTrue(file.exists(), "The file should exists");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(getFtpUrl()).to("seda:foo");
from("seda:foo").delay(750).log("${body}").delay(750).to("mock:result");
}
};
}
}
| FromFtpSedaDeleteFileIT |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java | {
"start": 1484,
"end": 4320
} | class ____ extends ValuesSourceAggregationBuilder.SingleMetricAggregationBuilder<
ValueCountAggregationBuilder> {
public static final String NAME = "value_count";
public static final ValuesSourceRegistry.RegistryKey<MetricAggregatorSupplier> REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(
NAME,
MetricAggregatorSupplier.class
);
public static final ObjectParser<ValueCountAggregationBuilder, String> PARSER = ObjectParser.fromBuilder(
NAME,
ValueCountAggregationBuilder::new
);
static {
ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, false);
}
public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
ValueCountAggregatorFactory.registerAggregators(builder);
}
public ValueCountAggregationBuilder(String name) {
super(name);
}
protected ValueCountAggregationBuilder(
ValueCountAggregationBuilder clone,
AggregatorFactories.Builder factoriesBuilder,
Map<String, Object> metadata
) {
super(clone, factoriesBuilder, metadata);
}
@Override
protected ValuesSourceType defaultValueSourceType() {
return CoreValuesSourceType.KEYWORD;
}
@Override
protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata) {
return new ValueCountAggregationBuilder(this, factoriesBuilder, metadata);
}
@Override
public boolean supportsSampling() {
return true;
}
/**
* Read from a stream.
*/
public ValueCountAggregationBuilder(StreamInput in) throws IOException {
super(in);
}
@Override
protected void innerWriteTo(StreamOutput out) {
// Do nothing, no extra state to write to stream
}
@Override
protected boolean serializeTargetValueType(TransportVersion version) {
return true;
}
@Override
protected ValueCountAggregatorFactory innerBuild(
AggregationContext context,
ValuesSourceConfig config,
AggregatorFactory parent,
AggregatorFactories.Builder subFactoriesBuilder
) throws IOException {
MetricAggregatorSupplier aggregatorSupplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config);
return new ValueCountAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metadata, aggregatorSupplier);
}
@Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
return builder;
}
@Override
public String getType() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
}
| ValueCountAggregationBuilder |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/socket/WebSocketMessageBrokerSecurityConfigurationTests.java | {
"start": 31582,
"end": 31879
} | class ____ {
@Bean
Consumer<List<ChannelInterceptor>> channelInterceptorCustomizer() {
return (interceptors) -> interceptors.remove(1);
}
}
@Configuration
@EnableWebSocketSecurity
@EnableWebSocketMessageBroker
@Import(SyncExecutorConfig.class)
static | CsrfDisabledSockJsSecurityConfig |
java | apache__dubbo | dubbo-compatible/src/test/java/org/apache/dubbo/serialization/MyObjectOutput.java | {
"start": 1031,
"end": 2234
} | class ____ implements ObjectOutput {
private final BufferedWriter writer;
public MyObjectOutput(OutputStream outputStream) {
writer = new BufferedWriter(new OutputStreamWriter(outputStream));
}
@Override
public void writeObject(Object obj) throws IOException {}
@Override
public void writeBool(boolean v) throws IOException {}
@Override
public void writeByte(byte v) throws IOException {}
@Override
public void writeShort(short v) throws IOException {}
@Override
public void writeInt(int v) throws IOException {}
@Override
public void writeLong(long v) throws IOException {}
@Override
public void writeFloat(float v) throws IOException {}
@Override
public void writeDouble(double v) throws IOException {}
@Override
public void writeUTF(String v) throws IOException {
writer.write(v);
writer.write('\n');
}
@Override
public void writeBytes(byte[] v) throws IOException {}
@Override
public void writeBytes(byte[] v, int off, int len) throws IOException {}
@Override
public void flushBuffer() throws IOException {
writer.flush();
}
}
| MyObjectOutput |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/manytoone/ManyToOneBidirectionalEagerTest.java | {
"start": 3439,
"end": 4420
} | class ____ {
@Id
private String id;
@Column( nullable = false )
private String description;
@ManyToOne
@JoinColumn( name = "one_id" )
private OneEntity one;
public ManyEntity(String id, String description) {
this.id = id;
this.description = description;
}
public ManyEntity() {
}
public String getId() {
return id;
}
public String getDescription() {
return description;
}
public OneEntity getOne() {
return one;
}
public void setOne(OneEntity one) {
this.one = one;
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
final ManyEntity that = (ManyEntity) o;
return Objects.equals( id, that.id ) && Objects.equals( description, that.description );
}
@Override
public int hashCode() {
return Objects.hash( id, description );
}
}
@Entity( name = "OneEntity" )
public static | ManyEntity |
java | elastic__elasticsearch | modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternReplaceCharFilterFactory.java | {
"start": 1006,
"end": 2026
} | class ____ extends AbstractCharFilterFactory implements NormalizingCharFilterFactory {
private final Pattern pattern;
private final String replacement;
PatternReplaceCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(name);
String sPattern = settings.get("pattern");
if (Strings.hasLength(sPattern) == false) {
throw new IllegalArgumentException("pattern is missing for [" + name + "] char filter of type 'pattern_replace'");
}
pattern = Regex.compile(sPattern, settings.get("flags"));
replacement = settings.get("replacement", ""); // when not set or set to "", use "".
}
public Pattern getPattern() {
return pattern;
}
public String getReplacement() {
return replacement;
}
@Override
public Reader create(Reader tokenStream) {
return new PatternReplaceCharFilter(pattern, replacement, tokenStream);
}
}
| PatternReplaceCharFilterFactory |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest22.java | {
"start": 1016,
"end": 2441
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "CREATE TABLE t1 (" +
"c1 INT STORAGE DISK," +
"c2 INT STORAGE MEMORY " +
") TABLESPACE ts_1 ENGINE NDB;";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(2, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("t1")));
String output = SQLUtils.toMySqlString(stmt);
assertEquals("CREATE TABLE t1 (" +
"\n\tc1 INT STORAGE DISK," +
"\n\tc2 INT STORAGE MEMORY" +
"\n) TABLESPACE ts_1 ENGINE = NDB;", output);
}
}
| MySqlCreateTableTest22 |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/sync/ForStSyncListState.java | {
"start": 2556,
"end": 10445
} | class ____<K, N, V> extends AbstractForStSyncState<K, N, List<V>>
implements InternalListState<K, N, V> {
/** Serializer for the values. */
private TypeSerializer<V> elementSerializer;
private final ListDelimitedSerializer listSerializer;
/** Separator of StringAppendTestOperator in RocksDB. */
private static final byte DELIMITER = ',';
/**
* Creates a new {@code RocksDBListState}.
*
* @param columnFamily The RocksDB column family that this state is associated to.
* @param namespaceSerializer The serializer for the namespace.
* @param valueSerializer The serializer for the state.
* @param defaultValue The default value for the state.
* @param backend The backend for which this state is bind to.
*/
private ForStSyncListState(
ColumnFamilyHandle columnFamily,
TypeSerializer<N> namespaceSerializer,
TypeSerializer<List<V>> valueSerializer,
List<V> defaultValue,
ForStSyncKeyedStateBackend<K> backend) {
super(columnFamily, namespaceSerializer, valueSerializer, defaultValue, backend);
ListSerializer<V> castedListSerializer = (ListSerializer<V>) valueSerializer;
this.elementSerializer = castedListSerializer.getElementSerializer();
this.listSerializer = new ListDelimitedSerializer();
}
@Override
public TypeSerializer<K> getKeySerializer() {
return backend.getKeySerializer();
}
@Override
public TypeSerializer<N> getNamespaceSerializer() {
return namespaceSerializer;
}
@Override
public TypeSerializer<List<V>> getValueSerializer() {
return valueSerializer;
}
@Override
public Iterable<V> get() throws IOException, RocksDBException {
return getInternal();
}
@Override
public List<V> getInternal() throws IOException, RocksDBException {
byte[] key = serializeCurrentKeyWithGroupAndNamespace();
byte[] valueBytes = backend.db.get(columnFamily, key);
return listSerializer.deserializeList(valueBytes, elementSerializer);
}
@Override
public void add(V value) throws IOException, RocksDBException {
Preconditions.checkNotNull(value, "You cannot add null to a ListState.");
backend.db.merge(
columnFamily,
writeOptions,
serializeCurrentKeyWithGroupAndNamespace(),
serializeValue(value, elementSerializer));
}
@Override
public void mergeNamespaces(N target, Collection<N> sources) {
if (sources == null || sources.isEmpty()) {
return;
}
try {
// create the target full-binary-key
setCurrentNamespace(target);
final byte[] targetKey = serializeCurrentKeyWithGroupAndNamespace();
// merge the sources to the target
for (N source : sources) {
if (source != null) {
setCurrentNamespace(source);
final byte[] sourceKey = serializeCurrentKeyWithGroupAndNamespace();
byte[] valueBytes = backend.db.get(columnFamily, sourceKey);
if (valueBytes != null) {
backend.db.delete(columnFamily, writeOptions, sourceKey);
backend.db.merge(columnFamily, writeOptions, targetKey, valueBytes);
}
}
}
} catch (Exception e) {
throw new FlinkRuntimeException("Error while merging state in RocksDB", e);
}
}
@Override
public void update(List<V> valueToStore) throws IOException, RocksDBException {
updateInternal(valueToStore);
}
@Override
public void updateInternal(List<V> values) throws IOException, RocksDBException {
Preconditions.checkNotNull(values, "List of values to add cannot be null.");
if (!values.isEmpty()) {
backend.db.put(
columnFamily,
writeOptions,
serializeCurrentKeyWithGroupAndNamespace(),
listSerializer.serializeList(values, elementSerializer));
} else {
clear();
}
}
@Override
public void addAll(List<V> values) throws IOException, RocksDBException {
Preconditions.checkNotNull(values, "List of values to add cannot be null.");
if (!values.isEmpty()) {
backend.db.merge(
columnFamily,
writeOptions,
serializeCurrentKeyWithGroupAndNamespace(),
listSerializer.serializeList(values, elementSerializer));
}
}
@Override
public void migrateSerializedValue(
DataInputDeserializer serializedOldValueInput,
DataOutputSerializer serializedMigratedValueOutput,
TypeSerializer<List<V>> priorSerializer,
TypeSerializer<List<V>> newSerializer)
throws StateMigrationException {
Preconditions.checkArgument(priorSerializer instanceof ListSerializer);
Preconditions.checkArgument(newSerializer instanceof ListSerializer);
TypeSerializer<V> priorElementSerializer =
((ListSerializer<V>) priorSerializer).getElementSerializer();
TypeSerializer<V> newElementSerializer =
((ListSerializer<V>) newSerializer).getElementSerializer();
try {
while (serializedOldValueInput.available() > 0) {
V element =
ListDelimitedSerializer.deserializeNextElement(
serializedOldValueInput, priorElementSerializer);
newElementSerializer.serialize(element, serializedMigratedValueOutput);
if (serializedOldValueInput.available() > 0) {
serializedMigratedValueOutput.write(DELIMITER);
}
}
} catch (Exception e) {
throw new StateMigrationException(
"Error while trying to migrate RocksDB list state.", e);
}
}
@Override
protected ForStSyncListState<K, N, V> setValueSerializer(
TypeSerializer<List<V>> valueSerializer) {
super.setValueSerializer(valueSerializer);
this.elementSerializer = ((ListSerializer<V>) valueSerializer).getElementSerializer();
return this;
}
@SuppressWarnings("unchecked")
static <E, K, N, SV, S extends State, IS extends S> IS create(
StateDescriptor<S, SV> stateDesc,
Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>>
registerResult,
ForStSyncKeyedStateBackend<K> backend) {
return (IS)
new ForStSyncListState<>(
registerResult.f0,
registerResult.f1.getNamespaceSerializer(),
(TypeSerializer<List<E>>) registerResult.f1.getStateSerializer(),
(List<E>) stateDesc.getDefaultValue(),
backend);
}
@SuppressWarnings("unchecked")
static <E, K, N, SV, S extends State, IS extends S> IS update(
StateDescriptor<S, SV> stateDesc,
Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>>
registerResult,
IS existingState) {
return (IS)
((ForStSyncListState<K, N, E>) existingState)
.setNamespaceSerializer(registerResult.f1.getNamespaceSerializer())
.setValueSerializer(
(TypeSerializer<List<E>>) registerResult.f1.getStateSerializer())
.setDefaultValue((List<E>) stateDesc.getDefaultValue());
}
static | ForStSyncListState |
java | quarkusio__quarkus | extensions/smallrye-openapi/deployment/src/test/java/io/quarkus/smallrye/openapi/test/jaxrs/OpenApiWithSecurity.java | {
"start": 232,
"end": 868
} | class ____ {
@GET
@Path("/test-security/naked")
@Authenticated
public String secureEndpointWithoutSecurityAnnotation() {
return "secret";
}
@GET
@Path("/test-security/annotated")
@RolesAllowed("admin")
public String secureEndpointWithRolesAllowedAnnotation() {
return "secret";
}
@GET
@Path("/test-security/annotated2")
@RolesAllowed("user")
public String secureEndpointWithRolesAllowed2Annotation() {
return "secret";
}
@GET
@Path("/test-security/public")
public String publicEndpoint() {
return "boo";
}
}
| OpenApiWithSecurity |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncer.java | {
"start": 2924,
"end": 3090
} | interface ____ {
void backgroundSync(ShardId shardId, String primaryAllocationId, long primaryTerm, RetentionLeases retentionLeases);
}
}
| BackgroundSyncAction |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/spi/LoggerContextFactory.java | {
"start": 1068,
"end": 1966
} | class ____ of the caller.
* @param loader The ClassLoader to use or null.
* @param currentContext If true shuts down the current Context, if false shuts down the Context appropriate
* for the caller if a more appropriate Context can be determined.
* @param allContexts if true all LoggerContexts that can be located will be shutdown.
* @since 2.13.0
*/
default void shutdown(String fqcn, ClassLoader loader, boolean currentContext, boolean allContexts) {
if (hasContext(fqcn, loader, currentContext)) {
final LoggerContext ctx = getContext(fqcn, loader, null, currentContext);
if (ctx instanceof Terminable) {
((Terminable) ctx).terminate();
}
}
}
/**
* Checks to see if a LoggerContext is installed. The default implementation returns false.
* @param fqcn The fully qualified | name |
java | quarkusio__quarkus | extensions/micrometer/deployment/src/test/java/io/quarkus/micrometer/deployment/pathparams/HttpPathParamLimitWithReactiveRoutes500Test.java | {
"start": 538,
"end": 2174
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withConfigurationResource("test-logging.properties")
.overrideConfigKey("quarkus.micrometer.binder-enabled-default", "false")
.overrideConfigKey("quarkus.micrometer.binder.http-client.enabled", "true")
.overrideConfigKey("quarkus.micrometer.binder.http-server.enabled", "true")
.overrideConfigKey("quarkus.micrometer.binder.vertx.enabled", "true")
.overrideConfigKey("quarkus.redis.devservices.enabled", "false")
.withApplicationRoot((jar) -> jar
.addClasses(Util.class,
Resource.class));
@Inject
MeterRegistry registry;
public static final int COUNT = 101;
@Test
void testWithReactiveRoute500() throws InterruptedException {
registry.clear();
for (int i = 0; i < COUNT; i++) {
RestAssured.get("/rr").then().statusCode(500);
RestAssured.get("/rr/foo-" + i).then().statusCode(500);
}
Util.waitForMeters(registry.find("http.server.requests").timers(), COUNT);
Assertions.assertEquals(COUNT, registry.find("http.server.requests")
.tag("uri", "/rr").tag("method", "GET")
.timers().iterator().next().count());
Assertions.assertEquals(COUNT, registry.find("http.server.requests")
.tag("method", "GET").tag("uri", "/rr/{message}")
.timers().iterator().next().count());
}
@Singleton
public static | HttpPathParamLimitWithReactiveRoutes500Test |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/api/CatalogNotExistException.java | {
"start": 979,
"end": 1288
} | class ____ extends RuntimeException {
public CatalogNotExistException(String catalogName) {
this(catalogName, null);
}
public CatalogNotExistException(String catalogName, Throwable cause) {
super("Catalog " + catalogName + " does not exist.", cause);
}
}
| CatalogNotExistException |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/inheritance/ManyToOneInheritanceSubTypeTest.java | {
"start": 7327,
"end": 7367
} | class ____ extends UnionEntity {
}
}
| UnionB |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/EqualTest_any.java | {
"start": 245,
"end": 1238
} | class ____ extends TestCase {
public void test_exits() throws Exception {
String sql = "any(select id from t)";
String sql_c = "any(select id from t1)";
SQLAnyExpr exprA, exprB, exprC;
{
OracleExprParser parser = new OracleExprParser(sql);
exprA = (SQLAnyExpr) parser.expr();
}
{
OracleExprParser parser = new OracleExprParser(sql);
exprB = (SQLAnyExpr) parser.expr();
}
{
OracleExprParser parser = new OracleExprParser(sql_c);
exprC = (SQLAnyExpr) parser.expr();
}
assertEquals(exprA, exprB);
assertNotEquals(exprA, exprC);
assertTrue(exprA.equals(exprA));
assertFalse(exprA.equals(new Object()));
assertEquals(exprA.hashCode(), exprB.hashCode());
assertEquals(new SQLAnyExpr(), new SQLAnyExpr());
assertEquals(new SQLAnyExpr().hashCode(), new SQLAnyExpr().hashCode());
}
}
| EqualTest_any |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/env/ConnectionProviderBuilder.java | {
"start": 6151,
"end": 6944
} | class ____ implements InvocationHandler {
private final DataSource target;
private Connection actualConnection;
private Connection connectionProxy;
public DataSourceInvocationHandler(DataSource target) {
this.target = target;
}
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if ( "getConnection".equals( method.getName() ) ) {
if(actualConnection == null) {
actualConnection = (Connection) method.invoke( target, args);
connectionProxy = (Connection) Proxy.newProxyInstance(
this.getClass().getClassLoader(),
new Class[] { Connection.class },
new ConnectionInvocationHandler( actualConnection )
);
}
}
return connectionProxy;
}
private static | DataSourceInvocationHandler |
java | micronaut-projects__micronaut-core | aop/src/main/java/io/micronaut/aop/chain/ConstructorInterceptorChain.java | {
"start": 1899,
"end": 11080
} | class ____<T> extends AbstractInterceptorChain<T, T> implements ConstructorInvocationContext<T> {
private final BeanConstructor<T> beanConstructor;
private Object[] internalParameters = ArrayUtils.EMPTY_OBJECT_ARRAY;
/**
* Default constructor.
*
* @param beanConstructor The bean constructor
* @param interceptors The method interceptors to be passed to the final object to be constructed
* @param originalParameters The parameters
*/
private ConstructorInterceptorChain(
@NonNull BeanConstructor<T> beanConstructor,
@NonNull Interceptor<T, T>[] interceptors,
Object... originalParameters) {
super(interceptors, originalParameters);
this.beanConstructor = Objects.requireNonNull(beanConstructor, "Bean constructor cannot be null");
}
/**
* Default constructor.
*
* @param beanDefinition The bean constructor
* @param beanConstructor The bean constructor
* @param interceptors The interceptors
* @param originalParameters The parameters
* @param additionalInterceptorParametersCount The additional interceptor parameters count
*/
@UsedByGeneratedCode
private ConstructorInterceptorChain(
@NonNull BeanDefinition<T> beanDefinition,
@NonNull BeanConstructor<T> beanConstructor,
@NonNull Interceptor<T, T>[] interceptors,
int additionalInterceptorParametersCount,
Object... originalParameters) {
this(beanConstructor, interceptors, resolveConcreteSubset(beanDefinition, originalParameters, additionalInterceptorParametersCount));
internalParameters = resolveInterceptorArguments(beanDefinition, originalParameters, additionalInterceptorParametersCount);
}
@Override
@NonNull
public InterceptorKind getKind() {
return InterceptorKind.AROUND_CONSTRUCT;
}
@NonNull
@Override
public T getTarget() {
throw new UnsupportedOperationException("The target cannot be retrieved for Constructor interception");
}
@NonNull
@Override
public T proceed() throws RuntimeException {
Interceptor<T, T> interceptor;
if (interceptorCount == 0 || index == interceptorCount) {
final Object[] finalParameters;
if (ArrayUtils.isNotEmpty(internalParameters)) {
finalParameters = ArrayUtils.concat(getParameterValues(), internalParameters);
} else {
finalParameters = getParameterValues();
}
return beanConstructor.instantiate(finalParameters);
} else {
interceptor = this.interceptors[index++];
if (LOG.isTraceEnabled()) {
LOG.trace("Proceeded to next interceptor [{}] in chain for constructor invocation: {}", interceptor, beanConstructor);
}
return interceptor.intercept(this);
}
}
@Override
public @NonNull
Argument<?>[] getArguments() {
return beanConstructor.getArguments();
}
@Override
public T invoke(T instance, Object... arguments) {
throw new UnsupportedOperationException("Existing instances cannot be invoked with Constructor injection");
}
@Override
@NonNull
public BeanConstructor<T> getConstructor() {
return beanConstructor;
}
/**
* Internal methods that handles the logic of instantiating a bean that has constructor interception applied.
*
* @param resolutionContext The resolution context
* @param beanContext The bean context
* @param interceptors The interceptors. Can be null and if so should be resolved from the context.
* @param definition The definition
* @param constructor The bean constructor
* @param parameters The resolved parameters
* @param <T1> The bean type
* @return The instantiated bean
* @since 3.0.0
*/
@Internal
@UsedByGeneratedCode
@NonNull
@Deprecated
public static <T1> T1 instantiate(
@NonNull BeanResolutionContext resolutionContext,
@NonNull BeanContext beanContext,
@Nullable List<BeanRegistration<Interceptor<T1, T1>>> interceptors,
@NonNull BeanDefinition<T1> definition,
@NonNull BeanConstructor<T1> constructor,
@NonNull Object... parameters) {
int micronaut3additionalProxyConstructorParametersCount = 3;
return instantiate(resolutionContext, beanContext, interceptors, definition, constructor, micronaut3additionalProxyConstructorParametersCount, parameters);
}
/**
* Internal methods that handles the logic of instantiating a bean that has constructor interception applied.
*
* @param resolutionContext The resolution context
* @param beanContext The bean context
* @param interceptors The interceptors. Can be null and if so should be resolved from the context.
* @param definition The definition
* @param constructor The bean constructor
* @param additionalProxyConstructorParametersCount The additional proxy constructor parameters count
* @param parameters The resolved parameters
* @param <T1> The bean type
* @return The instantiated bean
* @since 3.0.0
*/
@Internal
@UsedByGeneratedCode
@NonNull
public static <T1> T1 instantiate(
@NonNull BeanResolutionContext resolutionContext,
@NonNull BeanContext beanContext,
@Nullable List<BeanRegistration<Interceptor<T1, T1>>> interceptors,
@NonNull BeanDefinition<T1> definition,
@NonNull BeanConstructor<T1> constructor,
int additionalProxyConstructorParametersCount,
@NonNull Object... parameters) {
if (interceptors == null) {
final AnnotationMetadataHierarchy hierarchy = new AnnotationMetadataHierarchy(definition.getAnnotationMetadata(), constructor.getAnnotationMetadata());
final Collection<AnnotationValue<?>> annotationValues = resolveInterceptorValues(hierarchy, InterceptorKind.AROUND_CONSTRUCT);
final Collection<BeanRegistration<Interceptor<?, ?>>> resolved = resolutionContext.getBeanRegistrations(
Interceptor.ARGUMENT,
Qualifiers.byInterceptorBindingValues(annotationValues)
);
interceptors = new ArrayList(resolved);
}
final InterceptorRegistry interceptorRegistry = beanContext.getBean(InterceptorRegistry.ARGUMENT);
final Interceptor<T1, T1>[] resolvedInterceptors = interceptorRegistry
.resolveConstructorInterceptors(constructor, interceptors);
return Objects.requireNonNull(new ConstructorInterceptorChain<>(
definition,
constructor,
resolvedInterceptors,
additionalProxyConstructorParametersCount,
parameters
).proceed(), "Constructor interceptor chain illegally returned null for constructor: " + constructor.getDescription());
}
private static Object[] resolveConcreteSubset(BeanDefinition<?> beanDefinition,
Object[] originalParameters,
int additionalProxyConstructorParametersCount) {
if (beanDefinition instanceof AdvisedBeanType) {
// intercepted bean constructors include additional arguments in
// addition to the arguments declared in the bean
// Here we subtract these from the parameters made visible to the interceptor consumer
if (originalParameters.length < additionalProxyConstructorParametersCount) {
throw new IllegalStateException("Invalid intercepted bean constructor. This should never happen. Report an issue to the project maintainers.");
}
return Arrays.copyOfRange(
originalParameters,
0,
originalParameters.length - additionalProxyConstructorParametersCount
);
}
return originalParameters;
}
private static Object[] resolveInterceptorArguments(BeanDefinition<?> beanDefinition,
Object[] originalParameters,
int additionalProxyConstructorParametersCount) {
if (beanDefinition instanceof AdvisedBeanType) {
// intercepted bean constructors include additional arguments in
// addition to the arguments declared in the bean
// Here we subtract these from the parameters made visible to the interceptor consumer
if (originalParameters.length < additionalProxyConstructorParametersCount) {
throw new IllegalStateException("Invalid intercepted bean constructor. This should never happen. Report an issue to the project maintainers.");
}
return Arrays.copyOfRange(
originalParameters,
originalParameters.length - additionalProxyConstructorParametersCount,
originalParameters.length
);
}
return originalParameters;
}
}
| ConstructorInterceptorChain |
java | apache__camel | components/camel-sql/src/main/java/org/apache/camel/component/sql/SqlNamedProcessingStrategy.java | {
"start": 1227,
"end": 2918
} | interface ____ extends SqlProcessingStrategy {
/**
* Commit callback if there are a query to be run after processing.
*
* @param endpoint the endpoint
* @param exchange The exchange after it has been processed
* @param data The original data delivered to the route
* @param namedJdbcTemplate The JDBC template
* @param parameterSource Parameter sources for the named JDBC template
* @param query The SQL query to execute
* @return the update count if the query returned an update count
* @throws Exception can be thrown in case of error
*/
int commit(
DefaultSqlEndpoint endpoint, Exchange exchange, Object data,
NamedParameterJdbcTemplate namedJdbcTemplate, SqlParameterSource parameterSource, String query)
throws Exception;
/**
* Commit callback when the batch is complete. This allows you to do one extra query after all rows has been
* processed in the batch.
*
* @param endpoint the endpoint
* @param namedJdbcTemplate The JDBC template
* @param parameterSource Parameter sources for the named JDBC template
* @param query The SQL query to execute
* @return the update count if the query returned an update count
* @throws Exception can be thrown in case of error
*/
int commitBatchComplete(
DefaultSqlEndpoint endpoint, NamedParameterJdbcTemplate namedJdbcTemplate,
SqlParameterSource parameterSource, String query)
throws Exception;
}
| SqlNamedProcessingStrategy |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/progress/ThreadSafeMockingProgress.java | {
"start": 365,
"end": 1324
} | class ____ {
private static final ThreadLocal<MockingProgress> MOCKING_PROGRESS_PROVIDER =
new ThreadLocal<MockingProgress>() {
@Override
protected MockingProgress initialValue() {
return new MockingProgressImpl();
}
};
private ThreadSafeMockingProgress() {}
/**
* Returns the {@link MockingProgress} for the current Thread.
* <p>
* <b>IMPORTANT</b>: Never assign and access the returned {@link MockingProgress} to an instance or static field. Thread safety can not be guaranteed in this case, cause the Thread that wrote the field might not be the same that read it. In other words multiple threads will access the same {@link MockingProgress}.
*
* @return never <code>null</code>
*/
public static final MockingProgress mockingProgress() {
return MOCKING_PROGRESS_PROVIDER.get();
}
}
| ThreadSafeMockingProgress |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/event/ApplicationEventListener.java | {
"start": 913,
"end": 1314
} | interface ____<E> extends EventListener {
/**
* Handle an application event.
*
* @param event the event to respond to
*/
void onApplicationEvent(E event);
/**
* Whether the given event is supported.
*
* @param event The event
* @return True if it is
*/
default boolean supports(E event) {
return true;
}
}
| ApplicationEventListener |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/Issue213Test.java | {
"start": 160,
"end": 552
} | class ____ extends TestCase {
public void test_0() throws Exception {
String text = "\t\t\t\t\t\t \u00A020:00-21:30</span><br />\r\n\r\n</p>\r\n<p>\r\n\t\r\n</p>\r\n<p>\r\n\t<br />\r\n</p>\r\n\t\t\t";
Product e = new Product();
e.setIntro(text);
byte[] r = JSON.toJSONBytes(e);
JSON.parseObject(r, Product.class);
}
public static | Issue213Test |
java | apache__camel | components/camel-kafka/src/main/java/org/apache/camel/component/kafka/consumer/KafkaManualCommitFactory.java | {
"start": 2034,
"end": 2804
} | class ____ {
public final TopicPartition partition;
public final long recordOffset;
public final long commitTimeout;
public KafkaRecordPayload(TopicPartition partition, long recordOffset, long commitTimeout) {
this.partition = partition;
this.recordOffset = recordOffset;
this.commitTimeout = commitTimeout;
}
}
/**
* Creates a new instance
*
* @param camelExchangePayload the exchange-related payload from Camel
* @param kafkaRecordPayload the record-related payload from Kafka
*/
KafkaManualCommit newInstance(
CamelExchangePayload camelExchangePayload, KafkaRecordPayload kafkaRecordPayload, CommitManager commitManager);
}
| KafkaRecordPayload |
java | apache__camel | core/camel-core-engine/src/main/java/org/apache/camel/impl/DefaultModelToStructureDumper.java | {
"start": 1348,
"end": 3880
} | class ____ implements ModelToStructureDumper {
@Override
public List<ModelDumpLine> dumpStructure(CamelContext context, Route def, boolean brief) throws Exception {
List<ModelDumpLine> answer = new ArrayList<>();
String loc = def.getSourceLocationShort();
answer.add(new ModelDumpLine(loc, "route", def.getRouteId(), 0, "route[" + def.getRouteId() + "]"));
String uri = brief ? def.getEndpoint().getEndpointBaseUri() : def.getEndpoint().getEndpointUri();
answer.add(new ModelDumpLine(loc, "from", def.getRouteId(), 1, "from[" + uri + "]"));
MBeanServer server = context.getManagementStrategy().getManagementAgent().getMBeanServer();
if (server != null) {
String jmxDomain = context.getManagementStrategy().getManagementAgent().getMBeanObjectDomainName();
// get all the processor mbeans and sort them accordingly to their index
String prefix = context.getManagementStrategy().getManagementAgent().getIncludeHostName() ? "*/" : "";
ObjectName query = ObjectName.getInstance(
jmxDomain + ":context=" + prefix + context.getManagementName() + ",type=processors,*");
Set<ObjectName> names = server.queryNames(query, null);
List<ManagedProcessorMBean> mps = new ArrayList<>();
for (ObjectName on : names) {
ManagedProcessorMBean processor = context.getManagementStrategy().getManagementAgent().newProxyClient(on,
ManagedProcessorMBean.class);
// the processor must belong to this route
if (def.getRouteId().equals(processor.getRouteId())) {
mps.add(processor);
}
}
// sort by index
mps.sort(new OrderProcessorMBeans());
// dump in text format padded by level
for (ManagedProcessorMBean processor : mps) {
loc = processor.getSourceLocationShort();
String kind = processor.getProcessorName();
String id = processor.getProcessorId();
int level = processor.getLevel() + 1;
String code = brief ? processor.getProcessorName() : processor.getModelLabel();
answer.add(new ModelDumpLine(loc, kind, id, level, code));
}
}
return answer;
}
/**
* Used for sorting the processor mbeans accordingly to their index.
*/
private static final | DefaultModelToStructureDumper |
java | elastic__elasticsearch | x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/XPackUsageRestCancellationIT.java | {
"start": 2659,
"end": 4362
} | class ____ extends ESIntegTestCase {
private static final CountDownLatch blockActionLatch = new CountDownLatch(1);
private static final CountDownLatch blockingXPackUsageActionExecuting = new CountDownLatch(1);
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(getTestTransportPlugin(), BlockingUsageActionXPackPlugin.class);
}
@Override
protected Settings nodeSettings(int ordinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(ordinal, otherSettings))
.put(NetworkModule.HTTP_DEFAULT_TYPE_SETTING.getKey(), Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME)
.build();
}
@Override
protected boolean addMockHttpTransport() {
return false; // enable http
}
public void testCancellation() throws Exception {
internalCluster().startMasterOnlyNode();
ensureStableCluster(1);
final String actionName = XPackUsageAction.NAME;
final Request request = new Request(HttpGet.METHOD_NAME, "/_xpack/usage");
final PlainActionFuture<Response> future = new PlainActionFuture<>();
final Cancellable cancellable = getRestClient().performRequestAsync(request, wrapAsRestResponseListener(future));
assertThat(future.isDone(), equalTo(false));
blockingXPackUsageActionExecuting.await();
cancellable.cancel();
assertAllCancellableTasksAreCancelled(actionName);
blockActionLatch.countDown();
expectThrows(CancellationException.class, future::actionGet);
assertAllTasksHaveFinished(actionName);
}
public static | XPackUsageRestCancellationIT |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java | {
"start": 2948,
"end": 18746
} | class ____ {
private final FieldExtraction extraction;
private final String id;
private final Attribute attribute;
public FieldInfo(FieldExtraction extraction, String id, Attribute attribute) {
this.extraction = extraction;
this.id = id;
this.attribute = attribute;
}
public FieldExtraction extraction() {
return extraction;
}
public String id() {
return id;
}
public Attribute attribute() {
return attribute;
}
}
// fields extracted from the response - not necessarily what the client sees
// for example in case of grouping or custom sorting, the response has extra columns
// that is filtered before getting to the client
private final List<FieldInfo> fields;
// aliases found in the tree
private final AttributeMap<Expression> aliases;
// pseudo functions (like count) - that are 'extracted' from other aggs
private final Map<String, GroupByKey> pseudoFunctions;
// scalar function processors - recorded as functions get folded;
// at scrolling, their inputs (leaves) get updated
private AttributeMap<Pipe> scalarFunctions;
private final Map<String, Sort> sort;
private final int limit;
private final boolean trackHits;
private final boolean includeFrozen;
// used when pivoting for retrieving at least one pivot row
private final int minPageSize;
private final boolean allowPartialSearchResults;
// computed
private Boolean aggsOnly;
private Boolean customSort;
// associate Attributes with aliased FieldAttributes (since they map directly to ES fields)
private Map<Attribute, FieldAttribute> fieldAlias;
public QueryContainer() {
this(null, null, null, null, null, null, null, -1, false, INDEX_INCLUDE_FROZEN, -1, ALLOW_PARTIAL_SEARCH_RESULTS);
}
public QueryContainer(
Query query,
Aggs aggs,
List<FieldInfo> fields,
AttributeMap<Expression> aliases,
Map<String, GroupByKey> pseudoFunctions,
AttributeMap<Pipe> scalarFunctions,
Map<String, Sort> sort,
int limit,
boolean trackHits,
boolean includeFrozen,
int minPageSize,
boolean allowPartialSearchResults
) {
this.query = query;
this.aggs = aggs == null ? Aggs.EMPTY : aggs;
this.fields = fields == null || fields.isEmpty() ? emptyList() : fields;
this.aliases = aliases == null || aliases.isEmpty() ? AttributeMap.emptyAttributeMap() : aliases;
this.pseudoFunctions = pseudoFunctions == null || pseudoFunctions.isEmpty() ? emptyMap() : pseudoFunctions;
this.scalarFunctions = scalarFunctions == null || scalarFunctions.isEmpty() ? AttributeMap.emptyAttributeMap() : scalarFunctions;
this.sort = sort == null || sort.isEmpty() ? emptyMap() : sort;
this.limit = limit;
this.trackHits = trackHits;
this.includeFrozen = includeFrozen;
this.minPageSize = minPageSize;
this.allowPartialSearchResults = allowPartialSearchResults;
}
/**
* If needed, create a comparator for each indicated column (which is indicated by an index pointing to the column number from the
* result set).
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public List<Tuple<Integer, Comparator>> sortingColumns() {
if (customSort == Boolean.FALSE) {
return emptyList();
}
for (Sort s : sort.values()) {
if (s instanceof AggregateSort) {
customSort = Boolean.TRUE;
break;
}
}
// If no custom sort is used break early
if (customSort == null) {
customSort = Boolean.FALSE;
return emptyList();
}
List<Tuple<Integer, Comparator>> sortingColumns = new ArrayList<>(sort.size());
for (Map.Entry<String, Sort> entry : sort.entrySet()) {
String expressionId = entry.getKey();
Sort s = entry.getValue();
int atIndex = -1;
for (int i = 0; i < fields.size(); i++) {
FieldInfo field = fields.get(i);
if (field.id().equals(expressionId)) {
atIndex = i;
break;
}
}
if (atIndex == -1) {
throw new SqlIllegalArgumentException("Cannot find backing column for ordering aggregation [{}]", s);
}
// assemble a comparator for it, if it's not an AggregateSort
// then it's pre-sorted by ES so use null
Comparator comp = null;
if (s instanceof AggregateSort) {
comp = s.direction() == Sort.Direction.ASC ? Comparator.naturalOrder() : Comparator.reverseOrder();
comp = s.missing() == Sort.Missing.FIRST ? Comparator.nullsFirst(comp) : Comparator.nullsLast(comp);
}
sortingColumns.add(new Tuple<>(Integer.valueOf(atIndex), comp));
}
return sortingColumns;
}
/**
* Since the container contains both the field extractors and the visible columns,
* compact the information in the listener through a bitset that acts as a mask
* on what extractors are used for the visible columns.
*/
public BitSet columnMask(List<Attribute> columns) {
BitSet mask = new BitSet(fields.size());
if (columns.size() > 0) {
aliasName(columns.get(0));
}
for (Attribute column : columns) {
Expression expression = aliases.resolve(column, column);
// find the column index
String id = Expressions.id(expression);
int index = -1;
for (int i = 0; i < fields.size(); i++) {
FieldInfo field = fields.get(i);
// if the index is already set there is a collision,
// so continue searching for the other field with the same id
if (mask.get(i) == false && field.id().equals(id)) {
index = i;
break;
}
}
if (index > -1) {
mask.set(index);
} else {
throw new SqlIllegalArgumentException("Cannot resolve field extractor index for column [{}]", column);
}
}
return mask;
}
public Query query() {
return query;
}
public Aggs aggs() {
return aggs;
}
public List<FieldInfo> fields() {
return fields;
}
public AttributeMap<Expression> aliases() {
return aliases;
}
public Map<String, GroupByKey> pseudoFunctions() {
return pseudoFunctions;
}
public Map<String, Sort> sort() {
return sort;
}
public int limit() {
return limit;
}
public boolean isAggsOnly() {
if (aggsOnly == null) {
aggsOnly = Boolean.valueOf(this.fields.stream().anyMatch(t -> t.extraction().supportedByAggsOnlyQuery()));
}
return aggsOnly.booleanValue();
}
public boolean hasColumns() {
return fields.size() > 0;
}
public boolean shouldTrackHits() {
return trackHits;
}
public boolean shouldIncludeFrozen() {
return includeFrozen;
}
public boolean allowPartialSearchResults() {
return allowPartialSearchResults;
}
public int minPageSize() {
return minPageSize;
}
//
// copy methods
//
public QueryContainer with(Query q) {
return new QueryContainer(
q,
aggs,
fields,
aliases,
pseudoFunctions,
scalarFunctions,
sort,
limit,
trackHits,
includeFrozen,
minPageSize,
allowPartialSearchResults
);
}
public QueryContainer withAliases(AttributeMap<Expression> a) {
return new QueryContainer(
query,
aggs,
fields,
a,
pseudoFunctions,
scalarFunctions,
sort,
limit,
trackHits,
includeFrozen,
minPageSize,
allowPartialSearchResults
);
}
public QueryContainer withPseudoFunctions(Map<String, GroupByKey> p) {
return new QueryContainer(
query,
aggs,
fields,
aliases,
p,
scalarFunctions,
sort,
limit,
trackHits,
includeFrozen,
minPageSize,
allowPartialSearchResults
);
}
public QueryContainer with(Aggs a) {
return new QueryContainer(
query,
a,
fields,
aliases,
pseudoFunctions,
scalarFunctions,
sort,
limit,
trackHits,
includeFrozen,
minPageSize,
allowPartialSearchResults
);
}
public QueryContainer withLimit(int l) {
return l == limit
? this
: new QueryContainer(
query,
aggs,
fields,
aliases,
pseudoFunctions,
scalarFunctions,
sort,
l,
trackHits,
includeFrozen,
minPageSize,
allowPartialSearchResults
);
}
public QueryContainer withTrackHits() {
return trackHits
? this
: new QueryContainer(
query,
aggs,
fields,
aliases,
pseudoFunctions,
scalarFunctions,
sort,
limit,
true,
includeFrozen,
minPageSize,
allowPartialSearchResults
);
}
public QueryContainer withFrozen() {
return includeFrozen
? this
: new QueryContainer(
query,
aggs,
fields,
aliases,
pseudoFunctions,
scalarFunctions,
sort,
limit,
trackHits,
true,
minPageSize,
allowPartialSearchResults
);
}
public QueryContainer withScalarProcessors(AttributeMap<Pipe> procs) {
return new QueryContainer(
query,
aggs,
fields,
aliases,
pseudoFunctions,
procs,
sort,
limit,
trackHits,
includeFrozen,
minPageSize,
allowPartialSearchResults
);
}
/**
* Adds a sort expression that takes precedence over all existing sort expressions. Expressions are prepended because the logical plan
* is folded from bottom up. So the most significant sort order will be added last.
*/
public QueryContainer prependSort(String expressionId, Sort sortable) {
Map<String, Sort> newSort = Maps.newLinkedHashMapWithExpectedSize(this.sort.size() + 1);
newSort.put(expressionId, sortable);
for (Map.Entry<String, Sort> entry : this.sort.entrySet()) {
newSort.putIfAbsent(entry.getKey(), entry.getValue());
}
return new QueryContainer(
query,
aggs,
fields,
aliases,
pseudoFunctions,
scalarFunctions,
newSort,
limit,
trackHits,
includeFrozen,
minPageSize,
allowPartialSearchResults
);
}
private String aliasName(Attribute attr) {
if (fieldAlias == null) {
fieldAlias = new LinkedHashMap<>();
for (Map.Entry<Attribute, Expression> entry : aliases.entrySet()) {
if (entry.getValue() instanceof FieldAttribute) {
fieldAlias.put(entry.getKey(), (FieldAttribute) entry.getValue());
}
}
}
FieldAttribute fa = fieldAlias.get(attr);
return fa != null ? fa.name() : attr.name();
}
//
// reference methods
//
private FieldExtraction topHitFieldRef(FieldAttribute fieldAttr) {
return new SearchHitFieldRef(aliasName(fieldAttr), fieldAttr.field().getDataType());
}
private Tuple<QueryContainer, FieldExtraction> nestedHitFieldRef(FieldAttribute attr) {
String name = aliasName(attr);
Query q = rewriteToContainNestedField(
query,
attr.source(),
attr.nestedParent().name(),
name,
SqlDataTypes.format(attr.field().getDataType()),
SqlDataTypes.isFromDocValuesOnly(attr.field().getDataType())
);
SearchHitFieldRef nestedFieldRef = new SearchHitFieldRef(name, attr.field().getDataType(), attr.nestedParent().name());
return new Tuple<>(
new QueryContainer(
q,
aggs,
fields,
aliases,
pseudoFunctions,
scalarFunctions,
sort,
limit,
trackHits,
includeFrozen,
minPageSize,
allowPartialSearchResults
),
nestedFieldRef
);
}
static Query rewriteToContainNestedField(
@Nullable Query query,
Source source,
String path,
String name,
String format,
boolean hasDocValues
) {
if (query == null) {
/* There is no query so we must add the nested query
* ourselves to fetch the field. */
return new NestedQuery(
source,
path,
singletonMap(name, new AbstractMap.SimpleImmutableEntry<>(hasDocValues, format)),
new MatchAll(source)
);
}
if (query.containsNestedField(path, name)) {
// The query already has the nested field. Nothing to do.
return query;
}
/* The query doesn't have the nested field so we have to ask
* it to add it. */
Query rewritten = query.addNestedField(path, name, format, hasDocValues);
if (rewritten != query) {
/* It successfully added it so we can use the rewritten
* query. */
return rewritten;
}
/* There is no nested query with a matching path so we must
* add the nested query ourselves just to fetch the field. */
NestedQuery nested = new NestedQuery(
source,
path,
singletonMap(name, new AbstractMap.SimpleImmutableEntry<>(hasDocValues, format)),
new MatchAll(source)
);
return new BoolQuery(source, true, query, nested);
}
// replace function/operators's input with references
private Tuple<QueryContainer, FieldExtraction> resolvedTreeComputingRef(ScalarFunction function, Attribute attr) {
Pipe proc = null;
if ((proc = scalarFunctions.resolve(attr)) == null) {
proc = function.asPipe();
scalarFunctions = AttributeMap.builder(scalarFunctions).put(attr, proc).build();
}
// find the processor inputs (Attributes) and convert them into references
// no need to promote them to the top since the container doesn't have to be aware
| FieldInfo |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/schedulers/Schedulers.java | {
"start": 5684,
"end": 6588
} | class ____ referenced in your code.
* <p><strong>Supported system properties ({@code System.getProperty()}):</strong>
* <ul>
* <li>{@code rx3.computation-threads} (int): sets the number of threads in the {@code computation()} {@code Scheduler}, default is the number of available CPUs</li>
* <li>{@code rx3.computation-priority} (int): sets the thread priority of the {@code computation()} {@code Scheduler}, default is {@link Thread#NORM_PRIORITY}</li>
* </ul>
* <p>
* The default value of this scheduler can be overridden at initialization time via the
* {@link RxJavaPlugins#setInitComputationSchedulerHandler(io.reactivex.rxjava3.functions.Function)} plugin method.
* Note that due to possible initialization cycles, using any of the other scheduler-returning methods will
* result in a {@link NullPointerException}.
* Once the {@code Schedulers} | is |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/ReservationInvariantsChecker.java | {
"start": 1208,
"end": 2291
} | class ____ extends InvariantsChecker {
private static final Logger LOG =
LoggerFactory.getLogger(ReservationInvariantsChecker.class);
private UTCClock clock = new UTCClock();
@Override
public void editSchedule() {
Collection<Plan> plans =
getContext().getReservationSystem().getAllPlans().values();
try {
for (Plan plan : plans) {
long currReservations =
plan.getReservationsAtTime(clock.getTime()).size();
long numberReservationQueues = getContext().getScheduler()
.getQueueInfo(plan.getQueueName(), true, false).getChildQueues()
.size();
if (currReservations != numberReservationQueues - 1) {
logOrThrow("Number of reservations (" + currReservations
+ ") does NOT match the number of reservationQueues ("
+ (numberReservationQueues - 1) + "), while it should.");
}
}
} catch (IOException io) {
throw new InvariantViolationException("Issue during invariant check: ",
io);
}
}
}
| ReservationInvariantsChecker |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/cache/NoCacheOnMethodsImplementationTest.java | {
"start": 819,
"end": 2430
} | class ____ {
@RegisterExtension
static ResteasyReactiveUnitTest test = new ResteasyReactiveUnitTest()
.addScanCustomizer(new Consumer<ResteasyReactiveDeploymentManager.ScanStep>() {
@Override
public void accept(ResteasyReactiveDeploymentManager.ScanStep scanStep) {
scanStep.addMethodScanner(new CacheControlScanner());
}
})
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(IResourceWithNoCache.class, ResourceWithNoCache.class);
}
});
@Test
public void testWithFields() {
RestAssured.get("/test/withFields")
.then()
.statusCode(200)
.body(equalTo("withFields"))
.header("Cache-Control", "no-cache=\"f1\", no-cache=\"f2\"");
}
@Test
public void testWithoutFields() {
RestAssured.get("/test/withoutFields")
.then()
.statusCode(200)
.body(equalTo("withoutFields"))
.header("Cache-Control", "no-cache");
}
@Test
public void testWithoutAnnotation() {
RestAssured.get("/test/withoutAnnotation")
.then()
.statusCode(200)
.body(equalTo("withoutAnnotation"))
.header("Cache-Control", nullValue());
}
@Path("test")
public | NoCacheOnMethodsImplementationTest |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableOnErrorCompleteTest.java | {
"start": 1001,
"end": 4127
} | class ____ {
@Test
public void normal() {
Flowable.range(1, 10)
.onErrorComplete()
.test()
.assertResult(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
}
@Test
public void normalBackpressured() {
Flowable.range(1, 10)
.onErrorComplete()
.test(0)
.assertEmpty()
.requestMore(3)
.assertValuesOnly(1, 2, 3)
.requestMore(3)
.assertValuesOnly(1, 2, 3, 4, 5, 6)
.requestMore(4)
.assertResult(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
}
@Test
public void empty() {
Flowable.empty()
.onErrorComplete()
.test()
.assertResult();
}
@Test
public void error() throws Throwable {
TestHelper.withErrorTracking(errors -> {
Flowable.error(new TestException())
.onErrorComplete()
.test()
.assertResult();
assertTrue("" + errors, errors.isEmpty());
});
}
@Test
public void errorMatches() throws Throwable {
TestHelper.withErrorTracking(errors -> {
Flowable.error(new TestException())
.onErrorComplete(error -> error instanceof TestException)
.test()
.assertResult();
assertTrue("" + errors, errors.isEmpty());
});
}
@Test
public void errorNotMatches() throws Throwable {
TestHelper.withErrorTracking(errors -> {
Flowable.error(new IOException())
.onErrorComplete(error -> error instanceof TestException)
.test()
.assertFailure(IOException.class);
assertTrue("" + errors, errors.isEmpty());
});
}
@Test
public void errorPredicateCrash() throws Throwable {
TestHelper.withErrorTracking(errors -> {
TestSubscriberEx<Object> ts = Flowable.error(new IOException())
.onErrorComplete(error -> { throw new TestException(); })
.subscribeWith(new TestSubscriberEx<>())
.assertFailure(CompositeException.class);
TestHelper.assertError(ts, 0, IOException.class);
TestHelper.assertError(ts, 1, TestException.class);
assertTrue("" + errors, errors.isEmpty());
});
}
@Test
public void itemsThenError() throws Throwable {
TestHelper.withErrorTracking(errors -> {
Flowable.range(1, 5)
.map(v -> 4 / (3 - v))
.onErrorComplete()
.test()
.assertResult(2, 4);
assertTrue("" + errors, errors.isEmpty());
});
}
@Test
public void cancel() {
PublishProcessor<Integer> pp = PublishProcessor.create();
TestSubscriber<Integer> ts = pp
.onErrorComplete()
.test();
assertTrue("No subscribers?!", pp.hasSubscribers());
ts.cancel();
assertFalse("Still subscribers?!", pp.hasSubscribers());
}
@Test
public void onSubscribe() {
TestHelper.checkDoubleOnSubscribeFlowable(f -> f.onErrorComplete());
}
}
| FlowableOnErrorCompleteTest |
java | spring-projects__spring-boot | build-plugin/spring-boot-maven-plugin/src/main/java/org/springframework/boot/maven/BuildImageMojo.java | {
"start": 12347,
"end": 12955
} | class ____ implements Consumer<TotalProgressEvent> {
private final String message;
private long last;
ProgressLog(String message) {
this.message = message;
this.last = System.currentTimeMillis();
}
@Override
public void accept(TotalProgressEvent progress) {
log(progress.getPercent());
}
private void log(int percent) {
if (percent == 100 || (System.currentTimeMillis() - this.last) > THRESHOLD) {
MojoBuildLog.this.log.get().info(this.message + " " + percent + "%");
this.last = System.currentTimeMillis();
}
}
}
}
/**
* Adapter | ProgressLog |
java | elastic__elasticsearch | x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/DeletePipelineRequest.java | {
"start": 597,
"end": 1560
} | class ____ extends LegacyActionRequest {
private final String id;
public DeletePipelineRequest(String id) {
this.id = Objects.requireNonNull(id);
}
public DeletePipelineRequest(StreamInput in) throws IOException {
super(in);
this.id = in.readString();
}
public String id() {
return id;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(id);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DeletePipelineRequest that = (DeletePipelineRequest) o;
return Objects.equals(id, that.id);
}
@Override
public int hashCode() {
return Objects.hash(id);
}
}
| DeletePipelineRequest |
java | apache__camel | components/camel-kamelet/src/test/java/org/apache/camel/component/kamelet/KameletLocalBeanIoCTest.java | {
"start": 1083,
"end": 2259
} | class ____ extends CamelTestSupport {
@Test
public void testOne() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Hi John we are going to Moes");
template.sendBody("direct:moe", "John");
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void testTwo() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Hi Jack we are going to Shamrock",
"Hi Mary we are going to Moes");
template.sendBody("direct:shamrock", "Jack");
template.sendBody("direct:moe", "Mary");
MockEndpoint.assertIsSatisfied(context);
}
// **********************************************
//
// test set-up
//
// **********************************************
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
routeTemplate("whereTo")
.templateParameter("bar") // name of bar
// use dependency injection to create the local bean via its fqn | KameletLocalBeanIoCTest |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GoogleSheetsEndpointBuilderFactory.java | {
"start": 38896,
"end": 41501
} | interface ____ extends EndpointProducerBuilder {
default GoogleSheetsEndpointProducerBuilder basic() {
return (GoogleSheetsEndpointProducerBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedGoogleSheetsEndpointProducerBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedGoogleSheetsEndpointProducerBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
/**
* Builder for endpoint for the Google Sheets component.
*/
public | AdvancedGoogleSheetsEndpointProducerBuilder |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/sql/parser/PerfTest.java | {
"start": 1220,
"end": 5155
} | class ____ extends TestCase {
public void test_perf() throws Exception {
for (int i = 0; i < 10; ++i) {
// perf("SELECT * FROM my_table WHERE TRUNC(SYSDATE) = DATE '2002-10-03';");
perfOracle("SELECT a.ID, a.GMT_CREATE, a.GMT_MODIFIED, a.COMPANY_NAME, a.BIZ_TYPE , b.SERVICE_TYPE, b.SERVICE_LEVEL, b.BUSINESS_ROLE, a.STATUS, a.RECOMMENDED , a.COUNTRY, a.PROVINCE, a.CITY, a.ADDRESS, a.ZIP , a.LOGO_FILE, a.EMAIL, a.BRIEF_PROFILE, a.DOMAIN_ID, a.IS_PASS_AV , a.KEYWORDS, a.PROVIDE_PRODUCTS, a.PURCHASE_PRODUCTS, a.BRAND_NAME, a.PROMOTION_VALUE , a.OWNER_MEMBER_ID, a.OWNER_SEQ, a.EMPLOYEES_COUNT, a.ANNUAL_REVENUE, a.HOMEPAGE_URL , a.REG_ADDRESS, a.TRADE_REGION, a.TRADE_REGION_USER, a.REG_CAPITAL, a.OWNERSHIP_TYPE , a.ESTABLISHED_YEAR, a.PRINCIPAL, a.ANNUAL_PURCHASE, a.CERTIFICATION, a.CERTIFICATION_2 , a.CONTACT_MANUFACTURING, a.YEARS_OEM, b.STAGE, a.VIDEO_PATH, a.ABOUTUS_IMAGE_PATH , a.ABOUTUS_IMAGE_TITLE, a.CHINESE_NAME, a.IMAGE_VERSION FROM COMPANY a, VACCOUNT b WHERE a.ID = b.ID AND a.id IN (?)");
perfMySql("SELECT a.ID, a.GMT_CREATE, a.GMT_MODIFIED, a.COMPANY_NAME, a.BIZ_TYPE , b.SERVICE_TYPE, b.SERVICE_LEVEL, b.BUSINESS_ROLE, a.STATUS, a.RECOMMENDED , a.COUNTRY, a.PROVINCE, a.CITY, a.ADDRESS, a.ZIP , a.LOGO_FILE, a.EMAIL, a.BRIEF_PROFILE, a.DOMAIN_ID, a.IS_PASS_AV , a.KEYWORDS, a.PROVIDE_PRODUCTS, a.PURCHASE_PRODUCTS, a.BRAND_NAME, a.PROMOTION_VALUE , a.OWNER_MEMBER_ID, a.OWNER_SEQ, a.EMPLOYEES_COUNT, a.ANNUAL_REVENUE, a.HOMEPAGE_URL , a.REG_ADDRESS, a.TRADE_REGION, a.TRADE_REGION_USER, a.REG_CAPITAL, a.OWNERSHIP_TYPE , a.ESTABLISHED_YEAR, a.PRINCIPAL, a.ANNUAL_PURCHASE, a.CERTIFICATION, a.CERTIFICATION_2 , a.CONTACT_MANUFACTURING, a.YEARS_OEM, b.STAGE, a.VIDEO_PATH, a.ABOUTUS_IMAGE_PATH , a.ABOUTUS_IMAGE_TITLE, a.CHINESE_NAME, a.IMAGE_VERSION FROM COMPANY a, VACCOUNT b WHERE a.ID = b.ID AND a.id IN (?)");
// perf(loadSql("bvt/parser/oracle-23.txt"));
}
}
String loadSql(String resource) throws Exception {
InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream(resource);
Reader reader = new InputStreamReader(is, "UTF-8");
String input = Utils.read(reader);
JdbcUtils.close(reader);
String[] items = input.split("---------------------------");
String sql = items[1].trim();
return sql;
}
void perfOracle(String sql) {
long startMillis = System.currentTimeMillis();
for (int i = 0; i < 1000 * 1000; ++i) {
execOracle(sql);
}
long millis = System.currentTimeMillis() - startMillis;
System.out.println("Oracle\t" + millis);
}
void perfMySql(String sql) {
long startMillis = System.currentTimeMillis();
for (int i = 0; i < 1000 * 1000; ++i) {
execMySql(sql);
}
long millis = System.currentTimeMillis() - startMillis;
System.out.println("MySql\t" + millis);
}
private String execOracle(String sql) {
StringBuilder out = new StringBuilder();
OracleOutputVisitor visitor = new OracleOutputVisitor(out);
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
for (SQLStatement statement : statementList) {
statement.accept(visitor);
visitor.println();
}
return out.toString();
}
private String execMySql(String sql) {
StringBuilder out = new StringBuilder();
MySqlOutputVisitor visitor = new MySqlOutputVisitor(out);
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
for (SQLStatement statement : statementList) {
statement.accept(visitor);
visitor.println();
}
return out.toString();
}
}
| PerfTest |
java | apache__dubbo | dubbo-metrics/dubbo-metrics-api/src/main/java/org/apache/dubbo/metrics/aggregate/TimeWindowAggregator.java | {
"start": 2632,
"end": 3250
} | class ____ extends SlidingWindow<SnapshotObservation> {
public SnapshotSlidingWindow(int sampleCount, long intervalInMs) {
super(sampleCount, intervalInMs);
}
@Override
public SnapshotObservation newEmptyValue(long timeMillis) {
return new SnapshotObservation();
}
@Override
protected Pane<SnapshotObservation> resetPaneTo(final Pane<SnapshotObservation> pane, long startTime) {
pane.setStartInMs(startTime);
pane.getValue().reset();
return pane;
}
}
public static | SnapshotSlidingWindow |
java | google__auto | value/src/main/java/com/google/auto/value/processor/AutoValueishProcessor.java | {
"start": 6339,
"end": 6576
} | class ____ JavaBeans-style properties that are accessible from
* templates. For example {@link #getType()} means we can write {@code $p.type} for a Velocity
* variable {@code $p} that is a {@code Property}.
*/
public static | define |
java | square__javapoet | src/test/java/com/squareup/javapoet/TypeSpecTest.java | {
"start": 25615,
"end": 26196
} | class ____ {\n"
+ " }\n"
+ "}\n");
}
@Test public void enumImplements() throws Exception {
TypeSpec typeSpec = TypeSpec.enumBuilder("Food")
.addSuperinterface(Serializable.class)
.addSuperinterface(Cloneable.class)
.addEnumConstant("LEAN_GROUND_BEEF")
.addEnumConstant("SHREDDED_CHEESE")
.build();
assertThat(toString(typeSpec)).isEqualTo(""
+ "package com.squareup.tacos;\n"
+ "\n"
+ "import java.io.Serializable;\n"
+ "import java.lang.Cloneable;\n"
+ "\n"
+ " | Inner |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/benckmark/proxy/BenchmarkCase.java | {
"start": 676,
"end": 1093
} | class ____ {
private final String name;
public BenchmarkCase(String name) {
super();
this.name = name;
}
public String getName() {
return name;
}
public void setUp(SQLExecutor sqlExec) throws Exception {
}
public abstract void execute(SQLExecutor sqlExec) throws Exception;
public void tearDown(SQLExecutor sqlExec) throws Exception {
}
}
| BenchmarkCase |
java | hibernate__hibernate-orm | hibernate-hikaricp/src/main/java/org/hibernate/hikaricp/internal/HikariConfigurationUtil.java | {
"start": 679,
"end": 3017
} | class ____ {
public static final String CONFIG_PREFIX = HikariCPSettings.HIKARI_CONFIG_PREFIX + ".";
/**
* Create/load a HikariConfig from Hibernate properties.
*
* @param properties a map of Hibernate properties
* @return a HikariConfig
*/
public static HikariConfig loadConfiguration(Map<String,Object> properties) {
final Properties hikariProps = new Properties();
copyProperty( JdbcSettings.AUTOCOMMIT, properties, "autoCommit", hikariProps );
copyProperty( JdbcSettings.POOL_SIZE, properties, "maximumPoolSize", hikariProps );
copyProperty(
properties,
"driverClassName",
hikariProps,
JdbcSettings.JAKARTA_JDBC_DRIVER,
JdbcSettings.DRIVER,
JdbcSettings.JPA_JDBC_DRIVER
);
copyProperty(
properties,
"jdbcUrl",
hikariProps,
JdbcSettings.JAKARTA_JDBC_URL,
JdbcSettings.URL,
JdbcSettings.JPA_JDBC_URL
);
copyProperty(
properties,
"username",
hikariProps,
JdbcSettings.JAKARTA_JDBC_USER,
JdbcSettings.USER,
JdbcSettings.JPA_JDBC_USER
);
copyProperty(
properties,
"password",
hikariProps,
JdbcSettings.JAKARTA_JDBC_PASSWORD,
JdbcSettings.PASS,
JdbcSettings.JPA_JDBC_PASSWORD
);
copyIsolationSetting( properties, hikariProps );
for ( var entry : properties.entrySet() ) {
final String key = entry.getKey();
if ( key.startsWith( CONFIG_PREFIX ) ) {
hikariProps.setProperty( key.substring( CONFIG_PREFIX.length() ), entry.getValue().toString() );
}
}
return new HikariConfig( hikariProps );
}
private static void copyProperty(String srcKey, Map<String,Object> src, String dstKey, Properties dst) {
if ( src.containsKey( srcKey ) ) {
dst.setProperty( dstKey, src.get( srcKey ).toString() );
}
}
private static void copyProperty(Map<String,Object> src, String dstKey, Properties dst, String... srcKeys) {
consumeSetting(
src,
(name, value) -> dst.setProperty( dstKey, value ),
srcKeys
);
}
private static void copyIsolationSetting(Map<String,Object> props, Properties hikariProps) {
final Integer isolation = ConnectionProviderInitiator.extractIsolation( props );
if ( isolation != null ) {
hikariProps.put( "transactionIsolation",
ConnectionProviderInitiator.toIsolationConnectionConstantName( isolation ) );
}
}
}
| HikariConfigurationUtil |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/utils/Utils.java | {
"start": 14001,
"end": 14456
} | class ____ be null");
try {
return c.getDeclaredConstructor().newInstance();
} catch (NoSuchMethodException e) {
throw new KafkaException("Could not find a public no-argument constructor for " + c.getName(), e);
} catch (ReflectiveOperationException | RuntimeException e) {
throw new KafkaException("Could not instantiate class " + c.getName(), e);
}
}
/**
* Look up the | cannot |
java | quarkusio__quarkus | extensions/hibernate-search-orm-elasticsearch/deployment/src/test/java/io/quarkus/hibernate/search/orm/elasticsearch/test/boot/AmbiguousSearchExtensionTest.java | {
"start": 2039,
"end": 2345
} | class ____ implements FailureHandler {
@Override
public void handle(FailureContext failureContext) {
}
@Override
public void handle(EntityIndexingFailureContext entityIndexingFailureContext) {
}
}
@SearchExtension
public static | SearchFailureHandler |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/customsql/CustomSqlRestrictionOverridesTest.java | {
"start": 3017,
"end": 3123
} | class ____ {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
Long id;
byte[] hash;
}
}
| Secure |
java | elastic__elasticsearch | modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexPluginMetricsIT.java | {
"start": 2416,
"end": 15813
} | class ____ extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(ReindexPlugin.class, TestTelemetryPlugin.class, MainRestPlugin.class);
}
@Override
protected boolean addMockHttpTransport() {
return false;
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "*:*")
.build();
}
protected ReindexRequestBuilder reindex() {
return new ReindexRequestBuilder(client());
}
protected UpdateByQueryRequestBuilder updateByQuery() {
return new UpdateByQueryRequestBuilder(client());
}
protected DeleteByQueryRequestBuilder deleteByQuery() {
return new DeleteByQueryRequestBuilder(client());
}
public static BulkIndexByScrollResponseMatcher matcher() {
return new BulkIndexByScrollResponseMatcher();
}
public void testReindexFromRemoteMetrics() throws Exception {
final String dataNodeName = internalCluster().startNode();
InetSocketAddress remoteAddress = randomFrom(cluster().httpAddresses());
RemoteInfo remote = new RemoteInfo(
"http",
remoteAddress.getHostString(),
remoteAddress.getPort(),
null,
new BytesArray("{\"match_all\":{}}"),
null,
null,
Map.of(),
RemoteInfo.DEFAULT_SOCKET_TIMEOUT,
RemoteInfo.DEFAULT_CONNECT_TIMEOUT
);
final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName)
.filterPlugins(TestTelemetryPlugin.class)
.findFirst()
.orElseThrow();
var expectedException = assertThrows(
"Source index not created yet, should throw not found exception",
ElasticsearchStatusException.class,
() -> reindex().source("source").setRemoteInfo(remote).destination("dest").get()
);
// assert failure metrics
assertBusy(() -> {
testTelemetryPlugin.collect();
assertThat(testTelemetryPlugin.getLongHistogramMeasurement(REINDEX_TIME_HISTOGRAM).size(), equalTo(1));
List<Measurement> completions = testTelemetryPlugin.getLongCounterMeasurement(REINDEX_COMPLETION_COUNTER);
assertThat(completions.size(), equalTo(1));
assertThat(completions.getFirst().attributes().get(ATTRIBUTE_NAME_ERROR_TYPE), equalTo(expectedException.status().name()));
assertThat(completions.getFirst().attributes().get(ATTRIBUTE_NAME_SOURCE), equalTo(ATTRIBUTE_VALUE_SOURCE_REMOTE));
});
// now create the source index
indexRandom(true, prepareIndex("source").setId("1").setSource("foo", "a"));
assertHitCount(prepareSearch("source").setSize(0), 1);
reindex().source("source").setRemoteInfo(remote).destination("dest").get();
// assert success metrics
assertBusy(() -> {
testTelemetryPlugin.collect();
assertThat(testTelemetryPlugin.getLongHistogramMeasurement(REINDEX_TIME_HISTOGRAM).size(), equalTo(2));
List<Measurement> completions = testTelemetryPlugin.getLongCounterMeasurement(REINDEX_COMPLETION_COUNTER);
assertThat(completions.size(), equalTo(2));
assertNull(completions.get(1).attributes().get(ATTRIBUTE_NAME_ERROR_TYPE));
assertThat(completions.get(1).attributes().get(ATTRIBUTE_NAME_SOURCE), equalTo(ATTRIBUTE_VALUE_SOURCE_REMOTE));
});
}
public void testReindexMetrics() throws Exception {
final String dataNodeName = internalCluster().startNode();
indexRandom(
true,
prepareIndex("source").setId("1").setSource("foo", "a"),
prepareIndex("source").setId("2").setSource("foo", "a"),
prepareIndex("source").setId("3").setSource("foo", "b"),
prepareIndex("source").setId("4").setSource("foo", "c")
);
assertHitCount(prepareSearch("source").setSize(0), 4);
final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName)
.filterPlugins(TestTelemetryPlugin.class)
.findFirst()
.orElseThrow();
// Copy all the docs
reindex().source("source").destination("dest").get();
// Use assertBusy to wait for all threads to complete so we get deterministic results
assertBusy(() -> {
testTelemetryPlugin.collect();
assertThat(testTelemetryPlugin.getLongHistogramMeasurement(REINDEX_TIME_HISTOGRAM).size(), equalTo(1));
assertThat(testTelemetryPlugin.getLongCounterMeasurement(REINDEX_COMPLETION_COUNTER).size(), equalTo(1));
});
// Now none of them
createIndex("none");
reindex().source("source").destination("none").filter(termQuery("foo", "no_match")).get();
assertBusy(() -> {
testTelemetryPlugin.collect();
assertThat(testTelemetryPlugin.getLongHistogramMeasurement(REINDEX_TIME_HISTOGRAM).size(), equalTo(2));
assertThat(testTelemetryPlugin.getLongCounterMeasurement(REINDEX_COMPLETION_COUNTER).size(), equalTo(2));
});
// Now half of them
reindex().source("source").destination("dest_half").filter(termQuery("foo", "a")).get();
assertBusy(() -> {
testTelemetryPlugin.collect();
assertThat(testTelemetryPlugin.getLongHistogramMeasurement(REINDEX_TIME_HISTOGRAM).size(), equalTo(3));
assertThat(testTelemetryPlugin.getLongCounterMeasurement(REINDEX_COMPLETION_COUNTER).size(), equalTo(3));
});
// Limit with maxDocs
reindex().source("source").destination("dest_size_one").maxDocs(1).get();
assertBusy(() -> {
testTelemetryPlugin.collect();
assertThat(testTelemetryPlugin.getLongHistogramMeasurement(REINDEX_TIME_HISTOGRAM).size(), equalTo(4));
assertThat(testTelemetryPlugin.getLongCounterMeasurement(REINDEX_COMPLETION_COUNTER).size(), equalTo(4));
// asset all metric attributes are correct
testTelemetryPlugin.getLongCounterMeasurement(REINDEX_COMPLETION_COUNTER).forEach(m -> {
assertNull(m.attributes().get(ATTRIBUTE_NAME_ERROR_TYPE));
assertThat(m.attributes().get(ATTRIBUTE_NAME_SOURCE), equalTo(ATTRIBUTE_VALUE_SOURCE_LOCAL));
});
});
}
public void testReindexMetricsWithBulkFailure() throws Exception {
final String dataNodeName = internalCluster().startNode();
final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName)
.filterPlugins(TestTelemetryPlugin.class)
.findFirst()
.orElseThrow();
// source and destination have conflicting mappings to cause bulk failures
indexRandom(true, prepareIndex("source").setId("2").setSource("test", "words words"));
indexRandom(true, prepareIndex("dest").setId("1").setSource("test", 10));
var response = reindex().source("source").destination("dest").get();
assertThat(response.getBulkFailures().size(), greaterThanOrEqualTo(1));
assertBusy(() -> {
testTelemetryPlugin.collect();
assertThat(testTelemetryPlugin.getLongHistogramMeasurement(REINDEX_TIME_HISTOGRAM).size(), equalTo(1));
assertThat(testTelemetryPlugin.getLongCounterMeasurement(REINDEX_COMPLETION_COUNTER).size(), equalTo(1));
assertThat(
testTelemetryPlugin.getLongCounterMeasurement(REINDEX_COMPLETION_COUNTER)
.getFirst()
.attributes()
.get(ATTRIBUTE_NAME_ERROR_TYPE),
equalTo("org.elasticsearch.index.mapper.DocumentParsingException")
);
});
}
public void testDeleteByQueryMetrics() throws Exception {
final String dataNodeName = internalCluster().startNode();
indexRandom(
true,
prepareIndex("test").setId("1").setSource("foo", "a"),
prepareIndex("test").setId("2").setSource("foo", "a"),
prepareIndex("test").setId("3").setSource("foo", "b"),
prepareIndex("test").setId("4").setSource("foo", "c"),
prepareIndex("test").setId("5").setSource("foo", "d"),
prepareIndex("test").setId("6").setSource("foo", "e"),
prepareIndex("test").setId("7").setSource("foo", "f")
);
assertHitCount(prepareSearch("test").setSize(0), 7);
final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName)
.filterPlugins(TestTelemetryPlugin.class)
.findFirst()
.orElseThrow();
// Deletes two docs that matches "foo:a"
deleteByQuery().source("test").filter(termQuery("foo", "a")).refresh(true).get();
assertBusy(() -> {
testTelemetryPlugin.collect();
List<Measurement> measurements = testTelemetryPlugin.getLongHistogramMeasurement(DELETE_BY_QUERY_TIME_HISTOGRAM);
assertThat(measurements.size(), equalTo(1));
});
// Deletes the two first docs with limit by size
DeleteByQueryRequestBuilder request = deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).size(2).refresh(true);
request.source().addSort("foo.keyword", SortOrder.ASC);
request.get();
assertBusy(() -> {
testTelemetryPlugin.collect();
List<Measurement> measurements = testTelemetryPlugin.getLongHistogramMeasurement(DELETE_BY_QUERY_TIME_HISTOGRAM);
assertThat(measurements.size(), equalTo(2));
});
// Deletes but match no docs
deleteByQuery().source("test").filter(termQuery("foo", "no_match")).refresh(true).get();
assertBusy(() -> {
testTelemetryPlugin.collect();
List<Measurement> measurements = testTelemetryPlugin.getLongHistogramMeasurement(DELETE_BY_QUERY_TIME_HISTOGRAM);
assertThat(measurements.size(), equalTo(3));
});
// Deletes all remaining docs
deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true).get();
assertBusy(() -> {
testTelemetryPlugin.collect();
List<Measurement> measurements = testTelemetryPlugin.getLongHistogramMeasurement(DELETE_BY_QUERY_TIME_HISTOGRAM);
assertThat(measurements.size(), equalTo(4));
});
}
public void testUpdateByQueryMetrics() throws Exception {
final String dataNodeName = internalCluster().startNode();
indexRandom(
true,
prepareIndex("test").setId("1").setSource("foo", "a"),
prepareIndex("test").setId("2").setSource("foo", "a"),
prepareIndex("test").setId("3").setSource("foo", "b"),
prepareIndex("test").setId("4").setSource("foo", "c")
);
assertHitCount(prepareSearch("test").setSize(0), 4);
assertEquals(1, client().prepareGet("test", "1").get().getVersion());
assertEquals(1, client().prepareGet("test", "4").get().getVersion());
final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName)
.filterPlugins(TestTelemetryPlugin.class)
.findFirst()
.orElseThrow();
// Reindex all the docs
updateByQuery().source("test").refresh(true).get();
assertBusy(() -> {
testTelemetryPlugin.collect();
List<Measurement> measurements = testTelemetryPlugin.getLongHistogramMeasurement(UPDATE_BY_QUERY_TIME_HISTOGRAM);
assertThat(measurements.size(), equalTo(1));
});
// Now none of them
updateByQuery().source("test").filter(termQuery("foo", "no_match")).refresh(true).get();
assertBusy(() -> {
testTelemetryPlugin.collect();
List<Measurement> measurements = testTelemetryPlugin.getLongHistogramMeasurement(UPDATE_BY_QUERY_TIME_HISTOGRAM);
assertThat(measurements.size(), equalTo(2));
});
// Now half of them
updateByQuery().source("test").filter(termQuery("foo", "a")).refresh(true).get();
assertBusy(() -> {
testTelemetryPlugin.collect();
List<Measurement> measurements = testTelemetryPlugin.getLongHistogramMeasurement(UPDATE_BY_QUERY_TIME_HISTOGRAM);
assertThat(measurements.size(), equalTo(3));
});
// Limit with size
UpdateByQueryRequestBuilder request = updateByQuery().source("test").size(3).refresh(true);
request.source().addSort("foo.keyword", SortOrder.ASC);
request.get();
assertBusy(() -> {
testTelemetryPlugin.collect();
List<Measurement> measurements = testTelemetryPlugin.getLongHistogramMeasurement(UPDATE_BY_QUERY_TIME_HISTOGRAM);
assertThat(measurements.size(), equalTo(4));
});
}
}
| ReindexPluginMetricsIT |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/TransportTracer.java | {
"start": 4783,
"end": 4871
} | interface ____ {
FlowControlWindows read();
}
public static final | FlowControlReader |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/sink/SinkUpsertMaterializerMigrationTest.java | {
"start": 9925,
"end": 10635
} | class ____ {
private final FlinkVersion version;
private final SinkUpsertMaterializerStateBackend stateBackend;
private final SinkUpsertMaterializerVersion sumVersion;
private SinkOperationMode(
FlinkVersion version,
SinkUpsertMaterializerStateBackend stateBackend,
SinkUpsertMaterializerVersion sumVersion) {
this.version = version;
this.stateBackend = stateBackend;
this.sumVersion = sumVersion;
}
@Override
public String toString() {
return String.format("flink=%s, state=%s, sum=%s}", version, stateBackend, sumVersion);
}
}
}
| SinkOperationMode |
java | quarkusio__quarkus | integration-tests/spring-data-jpa/src/main/java/io/quarkus/it/spring/data/jpa/complex/ParentMidRepository.java | {
"start": 314,
"end": 369
} | interface ____ {
int getAge();
}
}
| SmallParent2 |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/completable/CompletableMaterializeTest.java | {
"start": 946,
"end": 1872
} | class ____ extends RxJavaTest {
@Test
public void error() {
TestException ex = new TestException();
Completable.error(ex)
.materialize()
.test()
.assertResult(Notification.createOnError(ex));
}
@Test
public void empty() {
Completable.complete()
.materialize()
.test()
.assertResult(Notification.createOnComplete());
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeCompletableToSingle(new Function<Completable, SingleSource<Notification<Object>>>() {
@Override
public SingleSource<Notification<Object>> apply(Completable v) throws Exception {
return v.materialize();
}
});
}
@Test
public void dispose() {
TestHelper.checkDisposed(CompletableSubject.create().materialize());
}
}
| CompletableMaterializeTest |
java | google__dagger | dagger-android/main/java/dagger/android/DaggerContentProvider.java | {
"start": 841,
"end": 1008
} | class ____ extends ContentProvider {
@CallSuper
@Override
public boolean onCreate() {
AndroidInjection.inject(this);
return true;
}
}
| DaggerContentProvider |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableFromCallableTest.java | {
"start": 1344,
"end": 8657
} | class ____ extends RxJavaTest {
@SuppressWarnings("unchecked")
@Test
public void shouldNotInvokeFuncUntilSubscription() throws Exception {
Callable<Object> func = mock(Callable.class);
when(func.call()).thenReturn(new Object());
Flowable<Object> fromCallableFlowable = Flowable.fromCallable(func);
verifyNoInteractions(func);
fromCallableFlowable.subscribe();
verify(func).call();
}
@SuppressWarnings("unchecked")
@Test
public void shouldCallOnNextAndOnCompleted() throws Exception {
Callable<String> func = mock(Callable.class);
when(func.call()).thenReturn("test_value");
Flowable<String> fromCallableFlowable = Flowable.fromCallable(func);
Subscriber<String> subscriber = TestHelper.mockSubscriber();
fromCallableFlowable.subscribe(subscriber);
verify(subscriber).onNext("test_value");
verify(subscriber).onComplete();
verify(subscriber, never()).onError(any(Throwable.class));
}
@SuppressWarnings("unchecked")
@Test
public void shouldCallOnError() throws Exception {
Callable<Object> func = mock(Callable.class);
Throwable throwable = new IllegalStateException("Test exception");
when(func.call()).thenThrow(throwable);
Flowable<Object> fromCallableFlowable = Flowable.fromCallable(func);
Subscriber<Object> subscriber = TestHelper.mockSubscriber();
fromCallableFlowable.subscribe(subscriber);
verify(subscriber, never()).onNext(any());
verify(subscriber, never()).onComplete();
verify(subscriber).onError(throwable);
}
@SuppressWarnings("unchecked")
@Test
public void shouldNotDeliverResultIfSubscriberUnsubscribedBeforeEmission() throws Exception {
Callable<String> func = mock(Callable.class);
final CountDownLatch funcLatch = new CountDownLatch(1);
final CountDownLatch observerLatch = new CountDownLatch(1);
when(func.call()).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
observerLatch.countDown();
try {
funcLatch.await();
} catch (InterruptedException e) {
// It's okay, unsubscription causes Thread interruption
// Restoring interruption status of the Thread
Thread.currentThread().interrupt();
}
return "should_not_be_delivered";
}
});
Flowable<String> fromCallableFlowable = Flowable.fromCallable(func);
Subscriber<String> subscriber = TestHelper.mockSubscriber();
TestSubscriber<String> outer = new TestSubscriber<>(subscriber);
fromCallableFlowable
.subscribeOn(Schedulers.computation())
.subscribe(outer);
// Wait until func will be invoked
observerLatch.await();
// Unsubscribing before emission
outer.cancel();
// Emitting result
funcLatch.countDown();
// func must be invoked
verify(func).call();
// Observer must not be notified at all
verify(subscriber).onSubscribe(any(Subscription.class));
verifyNoMoreInteractions(subscriber);
}
@Test
public void shouldAllowToThrowCheckedException() {
final Exception checkedException = new Exception("test exception");
Flowable<Object> fromCallableFlowable = Flowable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
throw checkedException;
}
});
Subscriber<Object> subscriber = TestHelper.mockSubscriber();
fromCallableFlowable.subscribe(subscriber);
verify(subscriber).onSubscribe(any(Subscription.class));
verify(subscriber).onError(checkedException);
verifyNoMoreInteractions(subscriber);
}
@Test
public void fusedFlatMapExecution() {
final int[] calls = { 0 };
Flowable.just(1).flatMap(new Function<Integer, Publisher<? extends Object>>() {
@Override
public Publisher<? extends Object> apply(Integer v)
throws Exception {
return Flowable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
return ++calls[0];
}
});
}
})
.test()
.assertResult(1);
assertEquals(1, calls[0]);
}
@Test
public void fusedFlatMapExecutionHidden() {
final int[] calls = { 0 };
Flowable.just(1).hide().flatMap(new Function<Integer, Publisher<? extends Object>>() {
@Override
public Publisher<? extends Object> apply(Integer v)
throws Exception {
return Flowable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
return ++calls[0];
}
});
}
})
.test()
.assertResult(1);
assertEquals(1, calls[0]);
}
@Test
public void fusedFlatMapNull() {
Flowable.just(1).flatMap(new Function<Integer, Publisher<? extends Object>>() {
@Override
public Publisher<? extends Object> apply(Integer v)
throws Exception {
return Flowable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
return null;
}
});
}
})
.test()
.assertFailure(NullPointerException.class);
}
@Test
public void fusedFlatMapNullHidden() {
Flowable.just(1).hide().flatMap(new Function<Integer, Publisher<? extends Object>>() {
@Override
public Publisher<? extends Object> apply(Integer v)
throws Exception {
return Flowable.fromCallable(new Callable<Object>() {
@Override
public Object call() throws Exception {
return null;
}
});
}
})
.test()
.assertFailure(NullPointerException.class);
}
@Test
public void undeliverableUponCancellation() throws Exception {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
final TestSubscriber<Integer> ts = new TestSubscriber<>();
Flowable.fromCallable(new Callable<Integer>() {
@Override
public Integer call() throws Exception {
ts.cancel();
throw new TestException();
}
})
.subscribe(ts);
ts.assertEmpty();
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
}
| FlowableFromCallableTest |
java | quarkusio__quarkus | test-framework/junit5-config/src/main/java/io/quarkus/test/config/TestConfigCustomizer.java | {
"start": 338,
"end": 1059
} | class ____ implements SmallRyeConfigBuilderCustomizer {
@Override
public void configBuilder(SmallRyeConfigBuilder builder) {
builder.withDefaultValue("quarkus.log.file.path", String.join(File.separator, getLogFileLocationParts()));
}
private static List<String> getLogFileLocationParts() {
// TODO - This can probably be smarter. We can probably keep target as a default and have another source in Gradle to override with the Gradle directory layout
if (Files.isDirectory(Paths.get("build"))) {
return Arrays.asList("build", FileConfig.DEFAULT_LOG_FILE_NAME);
}
return Arrays.asList("target", FileConfig.DEFAULT_LOG_FILE_NAME);
}
}
| TestConfigCustomizer |
java | elastic__elasticsearch | x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/BlockConverter.java | {
"start": 2107,
"end": 2741
} | interface ____ {
long write(RecyclerBytesStreamOutput out) throws IOException;
}
/**
* Convert a block into Arrow buffers.
* @param block the ESQL block
* @param multivalued is this column multivalued? This block may not, but some blocks in that column are.
* @param bufs arrow buffers, used to track sizes
* @param bufWriters buffer writers, that will do the actual work of writing the data
*/
public abstract void convert(Block block, boolean multivalued, List<ArrowBuf> bufs, List<BufWriter> bufWriters);
/**
* Conversion of Double blocks
*/
public static | BufWriter |
java | apache__flink | flink-core/src/test/java/org/apache/flink/util/IOUtilsTest.java | {
"start": 1118,
"end": 2002
} | class ____ {
@Test
void testTryReadFullyFromLongerStream() throws IOException {
ByteArrayInputStream inputStream =
new ByteArrayInputStream("test-data".getBytes(StandardCharsets.UTF_8));
byte[] out = new byte[4];
int read = IOUtils.tryReadFully(inputStream, out);
assertThat(Arrays.copyOfRange(out, 0, read))
.containsExactly("test".getBytes(StandardCharsets.UTF_8));
}
@Test
void testTryReadFullyFromShorterStream() throws IOException {
ByteArrayInputStream inputStream =
new ByteArrayInputStream("t".getBytes(StandardCharsets.UTF_8));
byte[] out = new byte[4];
int read = IOUtils.tryReadFully(inputStream, out);
assertThat(Arrays.copyOfRange(out, 0, read))
.containsExactly("t".getBytes(StandardCharsets.UTF_8));
}
}
| IOUtilsTest |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/reflect/ConstructorUtils.java | {
"start": 18389,
"end": 19209
} | class ____ invocation of {@link SecurityManager#checkPackageAccess(String)} denies access to the
* package of the class.
* @see Constructor#newInstance(Object...)
*/
public static <T> T invokeExactConstructor(final Class<T> cls, final Object[] args, final Class<?>[] parameterTypes)
throws NoSuchMethodException, IllegalAccessException, InvocationTargetException, InstantiationException {
final Constructor<T> ctor = getAccessibleConstructor(cls, ArrayUtils.nullToEmpty(parameterTypes));
if (ctor == null) {
throw new NoSuchMethodException("No such accessible constructor on object: " + cls.getName());
}
return ctor.newInstance(ArrayUtils.nullToEmpty(args));
}
/**
* Tests whether the specified | and |
java | netty__netty | codec-mqtt/src/main/java/io/netty/handler/codec/mqtt/MqttReasonCodes.java | {
"start": 5539,
"end": 6251
} | enum ____ {
SUCCESS((byte) 0x00),
NO_MATCHING_SUBSCRIBERS((byte) 0x10),
UNSPECIFIED_ERROR((byte) 0x80),
IMPLEMENTATION_SPECIFIC_ERROR((byte) 0x83),
NOT_AUTHORIZED((byte) 0x87),
TOPIC_NAME_INVALID((byte) 0x90),
PACKET_IDENTIFIER_IN_USE((byte) 0x91),
QUOTA_EXCEEDED((byte) 0x97),
PAYLOAD_FORMAT_INVALID((byte) 0x99);
private static final PubAck[] VALUES;
static {
PubAck[] values = values();
VALUES = new PubAck[154];
for (PubAck code : values) {
final int unsignedByte = code.byteValue & 0xFF;
// Suppress a warning about out of bounds access since the | PubAck |
java | apache__kafka | raft/src/main/java/org/apache/kafka/raft/internals/BlockingMessageQueue.java | {
"start": 1229,
"end": 2440
} | class ____ implements RaftMessageQueue {
private static final RaftMessage WAKEUP_MESSAGE = new RaftMessage() {
@Override
public int correlationId() {
return 0;
}
@Override
public ApiMessage data() {
return null;
}
};
private final BlockingQueue<RaftMessage> queue = new LinkedBlockingQueue<>();
private final AtomicInteger size = new AtomicInteger(0);
@Override
public RaftMessage poll(long timeoutMs) {
try {
RaftMessage message = queue.poll(timeoutMs, TimeUnit.MILLISECONDS);
if (message == null || message == WAKEUP_MESSAGE) {
return null;
} else {
size.decrementAndGet();
return message;
}
} catch (InterruptedException e) {
throw new InterruptException(e);
}
}
@Override
public void add(RaftMessage message) {
queue.add(message);
size.incrementAndGet();
}
@Override
public boolean isEmpty() {
return size.get() == 0;
}
@Override
public void wakeup() {
queue.add(WAKEUP_MESSAGE);
}
}
| BlockingMessageQueue |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/VertexParallelismStore.java | {
"start": 1051,
"end": 1738
} | interface ____ {
/**
* Returns a given vertex's parallelism information.
*
* @param vertexId vertex for which the parallelism information should be returned
* @return a parallelism information for the given vertex
* @throws IllegalStateException if there is no parallelism information for the given vertex
*/
VertexParallelismInformation getParallelismInfo(JobVertexID vertexId);
/**
* Gets a map of all vertex parallelism information.
*
* @return A map containing JobVertexID and corresponding VertexParallelismInformation.
*/
Map<JobVertexID, VertexParallelismInformation> getAllParallelismInfo();
}
| VertexParallelismStore |
java | apache__kafka | storage/src/test/java/org/apache/kafka/tiered/storage/utils/LocalTieredStorageOutput.java | {
"start": 1403,
"end": 3951
} | class ____<K, V> implements LocalTieredStorageTraverser {
private final Deserializer<K> keyDe;
private final Deserializer<V> valueDe;
private String output = row("File", "Offsets", "Records", "Broker ID");
private String currentTopic = "";
public LocalTieredStorageOutput(Deserializer<K> keyDe, Deserializer<V> valueDe) {
this.keyDe = keyDe;
this.valueDe = valueDe;
// Columns length + 5 column separators.
output += "-".repeat(51 + 8 + 13 + 10 + (3 * 2)) + System.lineSeparator();
}
private String row(String file, Object offset, String record, String ident) {
return String.format("%-51s |%8s |%13s %n", ident + file, offset.toString(), record);
}
private String row(String file, Object offset, String record) {
return row(file, offset, record, " ");
}
private String row() {
return row("", "", "");
}
@Override
public void visitTopicIdPartition(TopicIdPartition topicIdPartition) {
currentTopic = topicIdPartition.topicPartition().topic();
output += row(topicIdPartition.topicPartition().toString(), "", "", "");
}
@Override
public void visitSegment(RemoteLogSegmentFileset fileset) {
try {
List<Record> records = fileset.getRecords();
String segFilename = fileset.getFile(SEGMENT).getName();
if (records.isEmpty()) {
output += row(segFilename, -1, "");
} else {
List<Map.Entry<Long, String>> offsetKeyValues = records
.stream()
.map(record -> Map.entry(record.offset(),
"(" + des(keyDe, record.key()) + ", " + des(valueDe, record.value()) + ")"))
.toList();
output += row(segFilename, offsetKeyValues.get(0).getKey(), offsetKeyValues.get(0).getValue());
if (offsetKeyValues.size() > 1) {
offsetKeyValues.subList(1, records.size()).forEach(offsetKeyValue ->
output += row("", offsetKeyValue.getKey(), offsetKeyValue.getValue()));
}
}
output += row();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
public String getOutput() {
return output;
}
private String des(Deserializer<?> de, ByteBuffer bytes) {
return de.deserialize(currentTopic, Utils.toNullableArray(bytes)).toString();
}
}
| LocalTieredStorageOutput |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataPropertiesRuntimeHints.java | {
"start": 1198,
"end": 1686
} | class ____ implements RuntimeHintsRegistrar {
@Override
public void registerHints(RuntimeHints hints, @Nullable ClassLoader classLoader) {
BindableRuntimeHintsRegistrar.forTypes(ConfigDataProperties.class).registerHints(hints);
Method method = ReflectionUtils.findMethod(ConfigDataLocation.class, "of", String.class);
Assert.state(method != null, "'method' must not be null");
hints.reflection().registerMethod(method, ExecutableMode.INVOKE);
}
}
| ConfigDataPropertiesRuntimeHints |
java | resilience4j__resilience4j | resilience4j-spring6/src/test/java/io/github/resilience4j/spring6/micrometer/configure/RxJava3TimerTest.java | {
"start": 1785,
"end": 7313
} | class ____ {
@Autowired
private MeterRegistry meterRegistry;
@Autowired
private TimerRegistry timerRegistry;
@Autowired
private RxJava3TimedService service;
@Test
public void shouldTimeCompletable() {
Timer timer = timerRegistry.timer(COMPLETABLE_TIMER_NAME);
assertThat(service.succeedCompletable().toObservable().isEmpty().blockingGet()).isTrue();
thenSuccessTimed(meterRegistry, timer);
Throwable result2;
try {
result2 = (Throwable) service.failCompletable().toObservable().blockingFirst();
} catch (Throwable e) {
result2 = e;
}
thenFailureTimed(meterRegistry, timer, result2);
then(result2).isInstanceOf(IllegalStateException.class);
assertThat(service.recoverCompletable().toObservable().isEmpty().blockingGet()).isTrue();
thenFailureTimed(meterRegistry, timer, new IllegalStateException());
Timer spelTimer = timerRegistry.timer(COMPLETABLE_TIMER_NAME + "SpEl");
assertThat(service.recoverCompletable(spelTimer.getName()).toObservable().isEmpty().blockingGet()).isTrue();
thenFailureTimed(meterRegistry, spelTimer, new IllegalStateException());
}
@Test
public void shouldTimeSingle() {
Timer timer = timerRegistry.timer(SINGLE_TIMER_NAME);
String result1 = service.succeedSingle(123).blockingGet();
thenSuccessTimed(meterRegistry, timer);
then(result1).isEqualTo("123");
try {
service.failSingle().blockingGet();
failBecauseExceptionWasNotThrown(IllegalStateException.class);
} catch (IllegalStateException e) {
thenFailureTimed(meterRegistry, timer, e);
}
String result2 = service.recoverSingle(123).blockingGet();
thenFailureTimed(meterRegistry, timer, new IllegalStateException());
then(result2).isEqualTo("Single recovered 123");
Timer spelTimer = timerRegistry.timer(SINGLE_TIMER_NAME + "SpEl");
String result3 = service.recoverSingle(spelTimer.getName(), 123).blockingGet();
thenFailureTimed(meterRegistry, spelTimer, new IllegalStateException());
then(result3).isEqualTo("Single recovered 123");
}
@Test
public void shouldTimeMaybe() {
Timer timer = timerRegistry.timer(MAYBE_TIMER_NAME);
String result1 = service.succeedMaybe(123).blockingGet();
thenSuccessTimed(meterRegistry, timer);
then(result1).isEqualTo("123");
try {
service.failMaybe().blockingGet();
failBecauseExceptionWasNotThrown(IllegalStateException.class);
} catch (IllegalStateException e) {
thenFailureTimed(meterRegistry, timer, e);
}
String result2 = service.recoverMaybe(123).blockingGet();
thenFailureTimed(meterRegistry, timer, new IllegalStateException());
then(result2).isEqualTo("Maybe recovered 123");
Timer spelTimer = timerRegistry.timer(MAYBE_TIMER_NAME + "SpEl");
String result3 = service.recoverMaybe(spelTimer.getName(), 123).blockingGet();
thenFailureTimed(meterRegistry, spelTimer, new IllegalStateException());
then(result3).isEqualTo("Maybe recovered 123");
}
@Test
public void shouldTimeObservable() {
Timer timer = timerRegistry.timer(OBSERVABLE_TIMER_NAME);
List<String> result1 = service.succeedObservable(123).toList().blockingGet();
thenSuccessTimed(meterRegistry, timer);
then(result1).containsExactly("123");
try {
service.failObservable().toList().blockingGet();
failBecauseExceptionWasNotThrown(IllegalStateException.class);
} catch (IllegalStateException e) {
thenFailureTimed(meterRegistry, timer, e);
}
List<String> result2 = service.recoverObservable(123).toList().blockingGet();
thenFailureTimed(meterRegistry, timer, new IllegalStateException());
then(result2).containsExactly("Observable recovered 123");
Timer spelTimer = timerRegistry.timer(OBSERVABLE_TIMER_NAME + "SpEl");
List<String> result3 = service.recoverObservable(spelTimer.getName(), 123).toList().blockingGet();
thenFailureTimed(meterRegistry, spelTimer, new IllegalStateException());
then(result3).containsExactly("Observable recovered 123");
}
@Test
public void shouldTimeFlowable() {
Timer timer = timerRegistry.timer(FLOWABLE_TIMER_NAME);
List<String> result1 = service.succeedFlowable(123).toList().blockingGet();
thenSuccessTimed(meterRegistry, timer);
then(result1).containsExactly("123");
try {
service.failFlowable().toList().blockingGet();
failBecauseExceptionWasNotThrown(IllegalStateException.class);
} catch (IllegalStateException e) {
thenFailureTimed(meterRegistry, timer, e);
}
List<String> result2 = service.recoverFlowable(123).toList().blockingGet();
thenFailureTimed(meterRegistry, timer, new IllegalStateException());
then(result2).containsExactly("Flowable recovered 123");
Timer spelTimer = timerRegistry.timer(FLOWABLE_TIMER_NAME + "SpEl");
List<String> result3 = service.recoverFlowable(spelTimer.getName(), 123).toList().blockingGet();
thenFailureTimed(meterRegistry, spelTimer, new IllegalStateException());
then(result3).containsExactly("Flowable recovered 123");
}
}
| RxJava3TimerTest |
java | netty__netty | microbench/src/main/java/io/netty/handler/codec/http/HttpRequestEncoderInsertBenchmark.java | {
"start": 1267,
"end": 2347
} | class ____ extends AbstractMicrobenchmark {
private final String uri = "http://localhost?eventType=CRITICAL&from=0&to=1497437160327&limit=10&offset=0";
private final OldHttpRequestEncoder encoderOld = new OldHttpRequestEncoder();
private final HttpRequestEncoder encoderNew = new HttpRequestEncoder();
@Benchmark
public ByteBuf oldEncoder() throws Exception {
ByteBuf buffer = Unpooled.buffer(100);
try {
encoderOld.encodeInitialLine(buffer, new DefaultHttpRequest(HttpVersion.HTTP_1_1,
HttpMethod.GET, uri));
return buffer;
} finally {
buffer.release();
}
}
@Benchmark
public ByteBuf newEncoder() throws Exception {
ByteBuf buffer = Unpooled.buffer(100);
try {
encoderNew.encodeInitialLine(buffer, new DefaultHttpRequest(HttpVersion.HTTP_1_1,
HttpMethod.GET, uri));
return buffer;
} finally {
buffer.release();
}
}
private static | HttpRequestEncoderInsertBenchmark |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/BridgeMethodResolverTests.java | {
"start": 28941,
"end": 28981
} | class ____<T> {
}
public static | Business |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/AbstractShouldHaveTextContent.java | {
"start": 882,
"end": 923
} | class ____ text content error.
*/
public | for |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/inject/guice/AssistedInjectScopingTest.java | {
"start": 4333,
"end": 4553
} | class ____ {
public TestClass1(String unassisted1, String unassisted2) {}
}
/** Class is not assisted and has no scoping annotation, but has an unrelated annotation. */
@SuppressWarnings("foo")
public | TestClass1 |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/bytes/Bytes_assertEqual_Test.java | {
"start": 1454,
"end": 3437
} | class ____ extends BytesBaseTest {
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> bytes.assertEqual(someInfo(), null, (byte) 8))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_bytes_are_equal() {
bytes.assertEqual(someInfo(), (byte) 8, (byte) 8);
}
@Test
void should_fail_if_bytes_are_not_equal() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> bytes.assertEqual(info, (byte) 6, (byte) 8));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeEqual((byte) 6, (byte) 8, info.representation()));
}
@Test
void should_fail_if_actual_is_null_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> bytesWithAbsValueComparisonStrategy.assertEqual(someInfo(),
null,
(byte) 8))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_bytes_are_equal_according_to_custom_comparison_strategy() {
bytesWithAbsValueComparisonStrategy.assertEqual(someInfo(), (byte) 8, (byte) -8);
}
@Test
void should_fail_if_bytes_are_not_equal_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> bytesWithAbsValueComparisonStrategy.assertEqual(info, (byte) 6, (byte) 8));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeEqual((byte) 6, (byte) 8, absValueComparisonStrategy,
new StandardRepresentation()));
}
}
| Bytes_assertEqual_Test |
java | apache__dubbo | dubbo-plugin/dubbo-rest-jaxrs/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/support/jaxrs/MatrixParamArgumentResolver.java | {
"start": 1475,
"end": 2468
} | class ____ extends AbstractJaxrsArgumentResolver {
@Override
public Class<Annotation> accept() {
return Annotations.MatrixParam.type();
}
@Override
protected ParamType getParamType(NamedValueMeta meta) {
return ParamType.MatrixVariable;
}
@Override
protected Object resolveValue(NamedValueMeta meta, HttpRequest request, HttpResponse response) {
return CollectionUtils.first(doResolveCollectionValue(meta, request));
}
@Override
protected Object resolveCollectionValue(NamedValueMeta meta, HttpRequest request, HttpResponse response) {
return doResolveCollectionValue(meta, request);
}
private static List<String> doResolveCollectionValue(NamedValueMeta meta, HttpRequest request) {
Map<String, String> variableMap = request.attribute(RestConstants.URI_TEMPLATE_VARIABLES_ATTRIBUTE);
return RequestUtils.parseMatrixVariableValues(variableMap, meta.name());
}
}
| MatrixParamArgumentResolver |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.