language
stringclasses
1 value
repo
stringclasses
60 values
path
stringlengths
22
294
class_span
dict
source
stringlengths
13
1.16M
target
stringlengths
1
113
java
spring-projects__spring-framework
spring-tx/src/test/java/org/springframework/transaction/annotation/AnnotationTransactionAttributeSourceTests.java
{ "start": 24552, "end": 24585 }
interface ____ { } @Tx static
Tx
java
alibaba__nacos
naming/src/test/java/com/alibaba/nacos/naming/remote/rpc/handler/PersistentInstanceRequestHandlerTest.java
{ "start": 1491, "end": 2954 }
class ____ { @InjectMocks private PersistentInstanceRequestHandler persistentInstanceRequestHandler; @Mock private PersistentClientOperationServiceImpl clientOperationService; @Test void testHandle() throws NacosException { PersistentInstanceRequest instanceRequest = new PersistentInstanceRequest(); instanceRequest.setType(NamingRemoteConstants.REGISTER_INSTANCE); instanceRequest.setServiceName("service1"); instanceRequest.setGroupName("group1"); Instance instance = new Instance(); instanceRequest.setInstance(instance); RequestMeta requestMeta = new RequestMeta(); persistentInstanceRequestHandler.handle(instanceRequest, requestMeta); Mockito.verify(clientOperationService).registerInstance(Mockito.any(), Mockito.any(), Mockito.anyString()); instanceRequest.setType(NamingRemoteConstants.DE_REGISTER_INSTANCE); persistentInstanceRequestHandler.handle(instanceRequest, requestMeta); Mockito.verify(clientOperationService).deregisterInstance(Mockito.any(), Mockito.any(), Mockito.anyString()); instanceRequest.setType("xxx"); try { persistentInstanceRequestHandler.handle(instanceRequest, requestMeta); } catch (Exception e) { assertEquals(NacosException.INVALID_PARAM, ((NacosException) e).getErrCode()); } } }
PersistentInstanceRequestHandlerTest
java
quarkusio__quarkus
extensions/grpc/cli/src/main/java/io/quarkus/grpc/cli/DescribeCommand.java
{ "start": 505, "end": 1652 }
class ____ extends GcurlBaseCommand { public String getAction() { return "describe"; } @Override protected void execute(MutinyServerReflectionGrpc.MutinyServerReflectionStub stub) { ServerReflectionRequest request = ServerReflectionRequest .newBuilder() .setFileContainingSymbol(unmatched.get(1)) .build(); Multi<ServerReflectionResponse> response = stub.serverReflectionInfo(Multi.createFrom().item(request)); response.toUni().map(r -> { List<ByteString> list = r.getFileDescriptorResponse().getFileDescriptorProtoList(); for (ByteString bs : list) { try { DescriptorProtos.FileDescriptorProto fdp = DescriptorProtos.FileDescriptorProto.parseFrom(bs); log(JsonFormat.printer().print(fdp)); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new RuntimeException(e); } } return null; }).await().indefinitely(); } }
DescribeCommand
java
apache__hadoop
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/Parser.java
{ "start": 6332, "end": 9154 }
class ____ extends ComposableInputFormat { /** * Return the node type registered for the particular identifier. * By default, this is a CNode for any composite node and a WNode * for &quot;wrapped&quot; nodes. User nodes will likely be composite * nodes. * @see #addIdentifier(java.lang.String, java.lang.Class[], java.lang.Class, java.lang.Class) * @see CompositeInputFormat#setFormat(org.apache.hadoop.mapred.JobConf) */ static Node forIdent(String ident) throws IOException { try { if (!nodeCstrMap.containsKey(ident)) { throw new IOException("No nodetype for " + ident); } return nodeCstrMap.get(ident).newInstance(ident); } catch (IllegalAccessException e) { throw new IOException(e); } catch (InstantiationException e) { throw new IOException(e); } catch (InvocationTargetException e) { throw new IOException(e); } } private static final Class<?>[] ncstrSig = { String.class }; private static final Map<String,Constructor<? extends Node>> nodeCstrMap = new HashMap<String,Constructor<? extends Node>>(); protected static final Map<String,Constructor<? extends ComposableRecordReader>> rrCstrMap = new HashMap<String,Constructor<? extends ComposableRecordReader>>(); /** * For a given identifier, add a mapping to the nodetype for the parse * tree and to the ComposableRecordReader to be created, including the * formals required to invoke the constructor. * The nodetype and constructor signature should be filled in from the * child node. */ protected static void addIdentifier(String ident, Class<?>[] mcstrSig, Class<? extends Node> nodetype, Class<? extends ComposableRecordReader> cl) throws NoSuchMethodException { Constructor<? extends Node> ncstr = nodetype.getDeclaredConstructor(ncstrSig); ncstr.setAccessible(true); nodeCstrMap.put(ident, ncstr); Constructor<? extends ComposableRecordReader> mcstr = cl.getDeclaredConstructor(mcstrSig); mcstr.setAccessible(true); rrCstrMap.put(ident, mcstr); } // inst protected int id = -1; protected String ident; protected Class<? extends WritableComparator> cmpcl; protected Node(String ident) { this.ident = ident; } protected void setID(int id) { this.id = id; } protected void setKeyComparator( Class<? extends WritableComparator> cmpcl) { this.cmpcl = cmpcl; } abstract void parse(List<Token> args, Configuration conf) throws IOException; } /** * Nodetype in the parse tree for &quot;wrapped&quot; InputFormats. */ static
Node
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/SimpleEmbeddableTest.java
{ "start": 1831, "end": 2552 }
class ____ { @Column(name = "publisher_name") private String name; @Column(name = "publisher_country") private String country; //Getters and setters, equals and hashCode methods omitted for brevity //end::embeddable-type-mapping-example[] public Publisher(String name, String country) { this.name = name; this.country = country; } private Publisher() {} public String getName() { return name; } public void setName(String name) { this.name = name; } public String getCountry() { return country; } public void setCountry(String country) { this.country = country; } //tag::embeddable-type-mapping-example[] } //end::embeddable-type-mapping-example[] }
Publisher
java
apache__kafka
metadata/src/main/java/org/apache/kafka/metadata/storage/FormatterException.java
{ "start": 851, "end": 1074 }
class ____ extends RuntimeException { public FormatterException(String what) { super(what); } public FormatterException(String what, Exception cause) { super(what, cause); } }
FormatterException
java
apache__kafka
share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareGroupOffset.java
{ "start": 1484, "end": 6408 }
class ____ { public static final int NO_TIMESTAMP = 0; public static final int UNINITIALIZED_EPOCH = 0; public static final int UNINITIALIZED_DELIVERY_COMPLETE_COUNT = -1; public static final int DEFAULT_EPOCH = 0; private final int snapshotEpoch; private final int stateEpoch; private final int leaderEpoch; private final long startOffset; private final int deliveryCompleteCount; private final List<PersisterStateBatch> stateBatches; private final long createTimestamp; private final long writeTimestamp; private ShareGroupOffset( int snapshotEpoch, int stateEpoch, int leaderEpoch, long startOffset, int deliveryCompleteCount, List<PersisterStateBatch> stateBatches, long createTimestamp, long writeTimestamp ) { this.snapshotEpoch = snapshotEpoch; this.stateEpoch = stateEpoch; this.leaderEpoch = leaderEpoch; this.startOffset = startOffset; this.deliveryCompleteCount = deliveryCompleteCount; this.stateBatches = stateBatches; this.createTimestamp = createTimestamp; this.writeTimestamp = writeTimestamp; } public int snapshotEpoch() { return snapshotEpoch; } public int stateEpoch() { return stateEpoch; } public int leaderEpoch() { return leaderEpoch; } public long startOffset() { return startOffset; } public int deliveryCompleteCount() { return deliveryCompleteCount; } public long createTimestamp() { return createTimestamp; } public long writeTimestamp() { return writeTimestamp; } public List<PersisterStateBatch> stateBatches() { return Collections.unmodifiableList(stateBatches); } private static PersisterStateBatch toPersisterOffsetsStateBatch(ShareSnapshotValue.StateBatch stateBatch) { return new PersisterStateBatch(stateBatch.firstOffset(), stateBatch.lastOffset(), stateBatch.deliveryState(), stateBatch.deliveryCount()); } private static PersisterStateBatch toPersisterOffsetsStateBatch(ShareUpdateValue.StateBatch stateBatch) { return new PersisterStateBatch(stateBatch.firstOffset(), stateBatch.lastOffset(), stateBatch.deliveryState(), stateBatch.deliveryCount()); } public static ShareGroupOffset fromRecord(ShareSnapshotValue record) { return new ShareGroupOffset( record.snapshotEpoch(), record.stateEpoch(), record.leaderEpoch(), record.startOffset(), record.deliveryCompleteCount(), record.stateBatches().stream() .map(ShareGroupOffset::toPersisterOffsetsStateBatch) .toList(), record.createTimestamp(), record.writeTimestamp() ); } public static ShareGroupOffset fromRecord(ShareUpdateValue record) { return new ShareGroupOffset( record.snapshotEpoch(), UNINITIALIZED_EPOCH, record.leaderEpoch(), record.startOffset(), record.deliveryCompleteCount(), record.stateBatches().stream() .map(ShareGroupOffset::toPersisterOffsetsStateBatch) .toList(), NO_TIMESTAMP, NO_TIMESTAMP ); } public static ShareGroupOffset fromRequest(WriteShareGroupStateRequestData.PartitionData data, long timestamp) { return fromRequest(data, DEFAULT_EPOCH, timestamp); } public static ShareGroupOffset fromRequest(WriteShareGroupStateRequestData.PartitionData data, int snapshotEpoch, long timestamp) { return new ShareGroupOffset( snapshotEpoch, data.stateEpoch(), data.leaderEpoch(), data.startOffset(), data.deliveryCompleteCount(), data.stateBatches().stream() .map(PersisterStateBatch::from) .toList(), timestamp, timestamp ); } public static ShareGroupOffset fromRequest(InitializeShareGroupStateRequestData.PartitionData data, long timestamp) { return fromRequest(data, DEFAULT_EPOCH, timestamp); } public static ShareGroupOffset fromRequest(InitializeShareGroupStateRequestData.PartitionData data, int snapshotEpoch, long timestamp) { // This method is invoked during InitializeShareGroupStateRequest. Since the deliveryCompleteCount is not yet // known at this stage, it is initialized to its default value. return new ShareGroupOffset( snapshotEpoch, data.stateEpoch(), UNINITIALIZED_EPOCH, data.startOffset(), UNINITIALIZED_DELIVERY_COMPLETE_COUNT, List.of(), timestamp, timestamp ); } public static
ShareGroupOffset
java
spring-projects__spring-framework
spring-beans/src/main/java/org/springframework/beans/factory/support/BeanDefinitionRegistryPostProcessor.java
{ "start": 1373, "end": 2420 }
interface ____ extends BeanFactoryPostProcessor { /** * Modify the application context's internal bean definition registry after its * standard initialization. All regular bean definitions will have been loaded, * but no beans will have been instantiated yet. This allows for adding further * bean definitions before the next post-processing phase kicks in. * @param registry the bean definition registry used by the application context * @throws org.springframework.beans.BeansException in case of errors */ void postProcessBeanDefinitionRegistry(BeanDefinitionRegistry registry) throws BeansException; /** * Empty implementation of {@link BeanFactoryPostProcessor#postProcessBeanFactory} * since custom {@code BeanDefinitionRegistryPostProcessor} implementations will * typically only provide a {@link #postProcessBeanDefinitionRegistry} method. * @since 6.1 */ @Override default void postProcessBeanFactory(ConfigurableListableBeanFactory beanFactory) throws BeansException { } }
BeanDefinitionRegistryPostProcessor
java
apache__kafka
streams/upgrade-system-tests-20/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java
{ "start": 1482, "end": 6150 }
class ____ { @SuppressWarnings("unchecked") public static void main(final String[] args) throws Exception { if (args.length < 1) { System.err.println("StreamsUpgradeToCooperativeRebalanceTest requires one argument (properties-file) but none provided"); } System.out.println("Args are " + Arrays.toString(args)); final String propFileName = args[0]; final Properties streamsProperties = Utils.loadProps(propFileName); final Properties config = new Properties(); System.out.println("StreamsTest instance started (StreamsUpgradeToCooperativeRebalanceTest v2.0)"); System.out.println("props=" + streamsProperties); config.put(StreamsConfig.APPLICATION_ID_CONFIG, "cooperative-rebalance-upgrade"); config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); config.putAll(streamsProperties); final String sourceTopic = streamsProperties.getProperty("source.topic", "source"); final String sinkTopic = streamsProperties.getProperty("sink.topic", "sink"); final String taskDelimiter = streamsProperties.getProperty("task.delimiter", "#"); final int reportInterval = Integer.parseInt(streamsProperties.getProperty("report.interval", "100")); final String upgradePhase = streamsProperties.getProperty("upgrade.phase", ""); final StreamsBuilder builder = new StreamsBuilder(); builder.<String, String>stream(sourceTopic) .peek(new ForeachAction<String, String>() { int recordCounter = 0; @Override public void apply(final String key, final String value) { if (recordCounter++ % reportInterval == 0) { System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); System.out.flush(); } } } ).to(sinkTopic); final KafkaStreams streams = new KafkaStreams(builder.build(), config); streams.setStateListener((newState, oldState) -> { if (newState == State.RUNNING && oldState == State.REBALANCING) { System.out.printf("%sSTREAMS in a RUNNING State%n", upgradePhase); final Set<ThreadMetadata> allThreadMetadata = streams.localThreadsMetadata(); final StringBuilder taskReportBuilder = new StringBuilder(); final List<String> activeTasks = new ArrayList<>(); final List<String> standbyTasks = new ArrayList<>(); for (final ThreadMetadata threadMetadata : allThreadMetadata) { getTasks(threadMetadata.activeTasks(), activeTasks); if (!threadMetadata.standbyTasks().isEmpty()) { getTasks(threadMetadata.standbyTasks(), standbyTasks); } } addTasksToBuilder(activeTasks, taskReportBuilder); taskReportBuilder.append(taskDelimiter); if (!standbyTasks.isEmpty()) { addTasksToBuilder(standbyTasks, taskReportBuilder); } System.out.println("TASK-ASSIGNMENTS:" + taskReportBuilder); } if (newState == State.REBALANCING) { System.out.printf("%sStarting a REBALANCE%n", upgradePhase); } }); streams.start(); Runtime.getRuntime().addShutdownHook(new Thread(() -> { streams.close(); System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); System.out.flush(); })); } private static void addTasksToBuilder(final List<String> tasks, final StringBuilder builder) { if (!tasks.isEmpty()) { for (final String task : tasks) { builder.append(task).append(","); } builder.setLength(builder.length() - 1); } } private static void getTasks(final Set<TaskMetadata> taskMetadata, final List<String> taskList) { for (final TaskMetadata task : taskMetadata) { final Set<TopicPartition> topicPartitions = task.topicPartitions(); for (final TopicPartition topicPartition : topicPartitions) { taskList.add(topicPartition.toString()); } } } }
StreamsUpgradeToCooperativeRebalanceTest
java
quarkusio__quarkus
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/validation/InitializerMethodMarkedAsyncObserverTest.java
{ "start": 499, "end": 908 }
class ____ { @RegisterExtension public ArcTestContainer container = ArcTestContainer.builder().beanClasses(MyBean.class).shouldFail().build(); @Test public void testFailure() { Throwable error = container.getFailure(); assertNotNull(error); assertTrue(error instanceof DefinitionException); } @ApplicationScoped static
InitializerMethodMarkedAsyncObserverTest
java
spring-projects__spring-boot
module/spring-boot-pulsar/src/main/java/org/springframework/boot/pulsar/autoconfigure/PulsarProperties.java
{ "start": 4786, "end": 6246 }
class ____ { /** * Pulsar web URL for the admin endpoint in the format '(http|https)://host:port'. */ private String serviceUrl = "http://localhost:8080"; /** * Duration to wait for a connection to server to be established. */ private Duration connectionTimeout = Duration.ofMinutes(1); /** * Server response read time out for any request. */ private Duration readTimeout = Duration.ofMinutes(1); /** * Server request time out for any request. */ private Duration requestTimeout = Duration.ofMinutes(5); /** * Authentication settings. */ private final Authentication authentication = new Authentication(); public String getServiceUrl() { return this.serviceUrl; } public void setServiceUrl(String serviceUrl) { this.serviceUrl = serviceUrl; } public Duration getConnectionTimeout() { return this.connectionTimeout; } public void setConnectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; } public Duration getReadTimeout() { return this.readTimeout; } public void setReadTimeout(Duration readTimeout) { this.readTimeout = readTimeout; } public Duration getRequestTimeout() { return this.requestTimeout; } public void setRequestTimeout(Duration requestTimeout) { this.requestTimeout = requestTimeout; } public Authentication getAuthentication() { return this.authentication; } } public static
Admin
java
spring-projects__spring-boot
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/StructuredLogFormatterFactory.java
{ "start": 5754, "end": 6911 }
class ____ or one of the common formats: %s" .formatted(format, this.commonFormatters.getCommonNames())); } @SuppressWarnings("unchecked") private @Nullable StructuredLogFormatter<E> getUsingClassName(String className) { Object formatter = this.instantiator.instantiate(className); if (formatter != null) { Assert.state(formatter instanceof StructuredLogFormatter, () -> "'%s' is not a StructuredLogFormatter".formatted(className)); checkTypeArgument(formatter); } return (StructuredLogFormatter<E>) formatter; } private void checkTypeArgument(Object formatter) { Class<?> typeArgument = GenericTypeResolver.resolveTypeArgument(formatter.getClass(), StructuredLogFormatter.class); Assert.state(this.logEventType.equals(typeArgument), () -> "Type argument of %s must be %s but was %s".formatted(formatter.getClass().getName(), this.logEventType.getName(), (typeArgument != null) ? typeArgument.getName() : "null")); } /** * Callback used for configure the {@link CommonFormatterFactory} to use for a given * {@link CommonStructuredLogFormat}. * * @param <E> the log event type */ public static
name
java
apache__camel
core/camel-core/src/test/java/org/apache/camel/support/cache/SimpleSoftCacheTest.java
{ "start": 1310, "end": 1349 }
class ____ {@link SimpleSoftCache}. */
for
java
apache__flink
flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/operations/ModifyType.java
{ "start": 964, "end": 1021 }
enum ____ { INSERT, UPDATE, DELETE }
ModifyType
java
google__guava
android/guava-tests/test/com/google/common/collect/CompactHashSetTest.java
{ "start": 1529, "end": 4165 }
class ____ extends TestCase { @AndroidIncompatible // test-suite builders public static Test suite() { List<Feature<?>> allFeatures = Arrays.<Feature<?>>asList( CollectionSize.ANY, CollectionFeature.ALLOWS_NULL_VALUES, CollectionFeature.FAILS_FAST_ON_CONCURRENT_MODIFICATION, CollectionFeature.GENERAL_PURPOSE, CollectionFeature.REMOVE_OPERATIONS, CollectionFeature.SERIALIZABLE, CollectionFeature.SUPPORTS_ADD, CollectionFeature.SUPPORTS_REMOVE); TestSuite suite = new TestSuite(); suite.addTestSuite(CompactHashSetTest.class); suite.addTest( SetTestSuiteBuilder.using( new TestStringSetGenerator() { @Override protected Set<String> create(String[] elements) { return CompactHashSet.create(asList(elements)); } }) .named("CompactHashSet") .withFeatures(allFeatures) .createTestSuite()); suite.addTest( SetTestSuiteBuilder.using( new TestStringSetGenerator() { @Override protected Set<String> create(String[] elements) { CompactHashSet<String> set = CompactHashSet.create(asList(elements)); for (int i = 0; i < 100; i++) { set.add("extra" + i); } for (int i = 0; i < 100; i++) { set.remove("extra" + i); } set.trimToSize(); return set; } }) .named("CompactHashSet#TrimToSize") .withFeatures(allFeatures) .createTestSuite()); return suite; } public void testAllocArraysDefault() { CompactHashSet<Integer> set = CompactHashSet.create(); assertThat(set.needsAllocArrays()).isTrue(); assertThat(set.elements).isNull(); set.add(1); assertThat(set.needsAllocArrays()).isFalse(); assertThat(set.elements).hasLength(CompactHashing.DEFAULT_SIZE); } public void testAllocArraysExpectedSize() { for (int i = 0; i <= CompactHashing.DEFAULT_SIZE; i++) { CompactHashSet<Integer> set = CompactHashSet.createWithExpectedSize(i); assertThat(set.needsAllocArrays()).isTrue(); assertThat(set.elements).isNull(); set.add(1); assertThat(set.needsAllocArrays()).isFalse(); int expectedSize = max(1, i); assertThat(set.elements).hasLength(expectedSize); } } }
CompactHashSetTest
java
quarkusio__quarkus
extensions/panache/mongodb-panache-common/deployment/src/main/java/io/quarkus/mongodb/panache/common/deployment/BasePanacheMongoResourceProcessor.java
{ "start": 19599, "end": 23480 }
class ____.produce(ReflectiveHierarchyBuildItem.builder(parameterType) .source(BasePanacheMongoResourceProcessor.class.getName()) .ignoreFieldPredicate(f -> f.hasAnnotation(BSON_IGNORE)) .ignoreMethodPredicate(m -> m.hasAnnotation(BSON_IGNORE)) .build()); // Register for building the property mapping cache propertyMappingClass.produce(new PropertyMappingClassBuildStep(parameterType.name().toString())); } } protected void processTypes(CombinedIndexBuildItem index, BuildProducer<BytecodeTransformerBuildItem> transformers, BuildProducer<ReflectiveClassBuildItem> reflectiveClass, BuildProducer<ReflectiveHierarchyBuildItem> reflectiveHierarchy, BuildProducer<PropertyMappingClassBuildStep> propertyMappingClass, TypeBundle typeBundle, PanacheRepositoryEnhancer repositoryEnhancer, PanacheEntityEnhancer entityEnhancer, MetamodelInfo modelInfo) { processRepositories(index, transformers, reflectiveHierarchy, propertyMappingClass, repositoryEnhancer, typeBundle); processEntities(index, transformers, reflectiveClass, propertyMappingClass, entityEnhancer, typeBundle, modelInfo); } @BuildStep ReflectiveHierarchyIgnoreWarningBuildItem ignoreBsonTypes() { return new ReflectiveHierarchyIgnoreWarningBuildItem(dotname -> dotname.toString().startsWith(BSON_PACKAGE)); } @BuildStep protected void registerJacksonSerDeser(BuildProducer<JacksonModuleBuildItem> customSerDeser) { customSerDeser.produce( new JacksonModuleBuildItem.Builder("ObjectIdModule") .add(ObjectIdSerializer.class.getName(), ObjectIdDeserializer.class.getName(), ObjectId.class.getName()) .build()); } @BuildStep protected void registerJsonbSerDeser(BuildProducer<JsonbSerializerBuildItem> jsonbSerializers, BuildProducer<JsonbDeserializerBuildItem> jsonbDeserializers) { jsonbSerializers .produce(new JsonbSerializerBuildItem( io.quarkus.mongodb.panache.common.jsonb.ObjectIdSerializer.class.getName())); jsonbDeserializers .produce(new JsonbDeserializerBuildItem( io.quarkus.mongodb.panache.common.jsonb.ObjectIdDeserializer.class.getName())); } @BuildStep public void unremovableClients(BuildProducer<MongoUnremovableClientsBuildItem> unremovable) { unremovable.produce(new MongoUnremovableClientsBuildItem()); } @BuildStep protected void unremovableMongoDatabaseResolvers(BuildProducer<UnremovableBeanBuildItem> unremovable) { unremovable.produce(UnremovableBeanBuildItem.beanTypes(MONGO_DATABASE_RESOLVER)); } @BuildStep protected ValidationPhaseBuildItem.ValidationErrorBuildItem validate(ValidationPhaseBuildItem validationPhase, CombinedIndexBuildItem index) throws BuildException { // we verify that no ID fields are defined (via @BsonId) when extending PanacheMongoEntity or ReactivePanacheMongoEntity for (AnnotationInstance annotationInstance : index.getComputingIndex().getAnnotations(BSON_ID)) { ClassInfo info = JandexUtil.getEnclosingClass(annotationInstance); if (JandexUtil.isSubclassOf(index.getComputingIndex(), info, getImperativeTypeBundle().entity().dotName())) { BuildException be = new BuildException("You provide a MongoDB identifier via @BsonId inside '" + info.name() + "' but one is already provided by PanacheMongoEntity, " + "your
reflectiveHierarchy
java
elastic__elasticsearch
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/PerPartitionCategorizationConfig.java
{ "start": 805, "end": 4253 }
class ____ implements ToXContentObject, Writeable { public static final ParseField TYPE_FIELD = new ParseField("per_partition_categorization"); public static final ParseField ENABLED_FIELD = new ParseField("enabled"); public static final ParseField STOP_ON_WARN = new ParseField("stop_on_warn"); // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly public static final ConstructingObjectParser<PerPartitionCategorizationConfig, Void> LENIENT_PARSER = createParser(true); public static final ConstructingObjectParser<PerPartitionCategorizationConfig, Void> STRICT_PARSER = createParser(false); private static ConstructingObjectParser<PerPartitionCategorizationConfig, Void> createParser(boolean ignoreUnknownFields) { ConstructingObjectParser<PerPartitionCategorizationConfig, Void> parser = new ConstructingObjectParser<>( TYPE_FIELD.getPreferredName(), ignoreUnknownFields, a -> new PerPartitionCategorizationConfig((boolean) a[0], (Boolean) a[1]) ); parser.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED_FIELD); parser.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), STOP_ON_WARN); return parser; } private final boolean enabled; private final boolean stopOnWarn; public PerPartitionCategorizationConfig() { this(false, null); } public PerPartitionCategorizationConfig(boolean enabled, Boolean stopOnWarn) { this.enabled = enabled; this.stopOnWarn = (stopOnWarn == null) ? false : stopOnWarn; if (this.enabled == false && this.stopOnWarn) { throw ExceptionsHelper.badRequestException( STOP_ON_WARN.getPreferredName() + " cannot be true in " + TYPE_FIELD.getPreferredName() + " when " + ENABLED_FIELD.getPreferredName() + " is false" ); } } public PerPartitionCategorizationConfig(StreamInput in) throws IOException { enabled = in.readBoolean(); stopOnWarn = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(enabled); out.writeBoolean(stopOnWarn); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(ENABLED_FIELD.getPreferredName(), enabled); if (enabled) { builder.field(STOP_ON_WARN.getPreferredName(), stopOnWarn); } builder.endObject(); return builder; } public boolean isEnabled() { return enabled; } public boolean isStopOnWarn() { return stopOnWarn; } @Override public boolean equals(Object other) { if (this == other) { return true; } if (other instanceof PerPartitionCategorizationConfig == false) { return false; } PerPartitionCategorizationConfig that = (PerPartitionCategorizationConfig) other; return this.enabled == that.enabled && this.stopOnWarn == that.stopOnWarn; } @Override public int hashCode() { return Objects.hash(enabled, stopOnWarn); } }
PerPartitionCategorizationConfig
java
elastic__elasticsearch
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java
{ "start": 1115, "end": 8209 }
class ____ extends UnaryExec implements EstimatesRowSize { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( PhysicalPlan.class, "FieldExtractExec", FieldExtractExec::new ); protected final List<Attribute> attributesToExtract; protected final @Nullable Attribute sourceAttribute; /** * The default for {@link #fieldExtractPreference} if the plan doesn't require * a preference. */ protected final MappedFieldType.FieldExtractPreference defaultPreference; /** * Attributes that may be extracted as doc values even if that makes them * less accurate. This is mostly used for geo fields which lose a lot of * precision in their doc values, but in some cases doc values provides * <strong>enough</strong> precision to do the job. * <p> * This is never serialized between nodes and only used locally. * </p> */ protected final Set<Attribute> docValuesAttributes; /** * Attributes of a shape whose extent can be extracted directly from the doc-values encoded geometry. * <p> * This is never serialized between nodes and only used locally. * </p> */ protected final Set<Attribute> boundsAttributes; private List<Attribute> lazyOutput; public FieldExtractExec( Source source, PhysicalPlan child, List<Attribute> attributesToExtract, MappedFieldType.FieldExtractPreference defaultPreference ) { this(source, child, attributesToExtract, defaultPreference, Set.of(), Set.of()); } protected FieldExtractExec( Source source, PhysicalPlan child, List<Attribute> attributesToExtract, MappedFieldType.FieldExtractPreference defaultPreference, Set<Attribute> docValuesAttributes, Set<Attribute> boundsAttributes ) { super(source, child); this.attributesToExtract = attributesToExtract; this.sourceAttribute = extractSourceAttributesFrom(child); this.docValuesAttributes = docValuesAttributes; this.boundsAttributes = boundsAttributes; this.defaultPreference = defaultPreference; } private FieldExtractExec(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(PhysicalPlan.class), in.readNamedWriteableCollectionAsList(Attribute.class), MappedFieldType.FieldExtractPreference.NONE ); // defaultPreference is only used on the data node and never serialized. // docValueAttributes, fieldFunctionAttributes and boundsAttributes are only used on the data node and never serialized. } @Override public void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); out.writeNamedWriteable(child()); out.writeNamedWriteableCollection(attributesToExtract()); // defaultPreference is only used on the data node and never serialized. // docValueAttributes, fieldFunctionAttributes and boundsAttributes are only used on the data node and never serialized. } @Override public String getWriteableName() { return ENTRY.name; } public static @Nullable Attribute extractSourceAttributesFrom(PhysicalPlan plan) { for (Attribute attribute : plan.outputSet()) { if (EsQueryExec.isDocAttribute(attribute)) { return attribute; } } return null; } @Override protected AttributeSet computeReferences() { return sourceAttribute != null ? AttributeSet.of(sourceAttribute) : AttributeSet.EMPTY; } @Override protected NodeInfo<? extends FieldExtractExec> info() { return NodeInfo.create(this, FieldExtractExec::new, child(), attributesToExtract, defaultPreference); } @Override public UnaryExec replaceChild(PhysicalPlan newChild) { return new FieldExtractExec(source(), newChild, attributesToExtract, defaultPreference, docValuesAttributes, boundsAttributes); } public FieldExtractExec withDocValuesAttributes(Set<Attribute> docValuesAttributes) { return new FieldExtractExec(source(), child(), attributesToExtract, defaultPreference, docValuesAttributes, boundsAttributes); } public FieldExtractExec withBoundsAttributes(Set<Attribute> boundsAttributes) { return new FieldExtractExec(source(), child(), attributesToExtract, defaultPreference, docValuesAttributes, boundsAttributes); } public FieldExtractExec withAttributesToExtract(List<Attribute> attributesToExtract) { return new FieldExtractExec(source(), child(), attributesToExtract, defaultPreference, docValuesAttributes, boundsAttributes); } public List<Attribute> attributesToExtract() { return attributesToExtract; } public @Nullable Attribute sourceAttribute() { return sourceAttribute; } public Set<Attribute> docValuesAttributes() { return docValuesAttributes; } public Set<Attribute> boundsAttributes() { return boundsAttributes; } @Override public List<Attribute> output() { if (lazyOutput == null) { List<Attribute> childOutput = child().output(); lazyOutput = new ArrayList<>(childOutput.size() + attributesToExtract.size()); lazyOutput.addAll(childOutput); lazyOutput.addAll(attributesToExtract); } return lazyOutput; } @Override public PhysicalPlan estimateRowSize(State state) { state.add(true, attributesToExtract); return this; } @Override public int hashCode() { return Objects.hash(attributesToExtract, docValuesAttributes, boundsAttributes, child()); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } FieldExtractExec other = (FieldExtractExec) obj; return Objects.equals(attributesToExtract, other.attributesToExtract) && Objects.equals(docValuesAttributes, other.docValuesAttributes) && Objects.equals(boundsAttributes, other.boundsAttributes) && Objects.equals(child(), other.child()); } @Override public String nodeString() { return Strings.format( "%s<%s,%s>", nodeName() + NodeUtils.limitedToString(attributesToExtract), docValuesAttributes, boundsAttributes ); } public MappedFieldType.FieldExtractPreference fieldExtractPreference(Attribute attr) { if (boundsAttributes.contains(attr)) { return MappedFieldType.FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS; } if (docValuesAttributes.contains(attr)) { return MappedFieldType.FieldExtractPreference.DOC_VALUES; } return defaultPreference; } }
FieldExtractExec
java
elastic__elasticsearch
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java
{ "start": 1263, "end": 4032 }
class ____ extends AbstractScalarFunctionTestCase { public ToGeoShapeTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable<Object[]> parameters() { // TODO multivalue fields final String attribute = "Attribute[channel=0]"; final Function<String, String> evaluatorName = s -> "ToGeoShape" + s + "Evaluator[in=" + attribute + "]"; final List<TestCaseSupplier> suppliers = new ArrayList<>(); // Points and shapes TestCaseSupplier.forUnaryGeoPoint(suppliers, attribute, DataType.GEO_SHAPE, v -> v, List.of()); TestCaseSupplier.forUnaryGeoShape(suppliers, attribute, DataType.GEO_SHAPE, v -> v, List.of()); // Geo-Grid types for (DataType gridType : new DataType[] { DataType.GEOHASH, DataType.GEOTILE, DataType.GEOHEX }) { TestCaseSupplier.forUnaryGeoGrid( suppliers, "ToGeoShapeFromGeoGridEvaluator[in=Attribute[channel=0], dataType=" + gridType + "]", gridType, DataType.GEO_SHAPE, v -> EsqlDataTypeConverter.geoGridToShape((long) v, gridType), List.of() ); } // random strings that don't look like a geo shape TestCaseSupplier.forUnaryStrings(suppliers, evaluatorName.apply("FromString"), DataType.GEO_SHAPE, bytesRef -> null, bytesRef -> { var exception = expectThrows(Exception.class, () -> GEO.wktToWkb(bytesRef.utf8ToString())); return List.of( "Line 1:1: evaluation of [source] failed, treating result as null. Only first 20 failures recorded.", "Line 1:1: " + exception ); }); // strings that are geo_shape representations for (DataType dt : List.of(DataType.KEYWORD, DataType.TEXT)) { TestCaseSupplier.unary( suppliers, evaluatorName.apply("FromString"), List.of( new TestCaseSupplier.TypedDataSupplier( "<geo_shape as string>", () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean()))), dt ) ), DataType.GEO_SHAPE, bytesRef -> GEO.wktToWkb(((BytesRef) bytesRef).utf8ToString()), List.of() ); } return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override protected Expression build(Source source, List<Expression> args) { return new ToGeoShape(source, args.get(0)); } }
ToGeoShapeTests
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/CsiAdaptorProtocolPBServiceImpl.java
{ "start": 2442, "end": 4881 }
class ____ implements CsiAdaptorPB { private final CsiAdaptorProtocol real; public CsiAdaptorProtocolPBServiceImpl(CsiAdaptorProtocol impl) { this.real = impl; } @Override public CsiAdaptorProtos.GetPluginInfoResponse getPluginInfo( RpcController controller, CsiAdaptorProtos.GetPluginInfoRequest request) throws ServiceException { try { GetPluginInfoRequest req = new GetPluginInfoRequestPBImpl(request); GetPluginInfoResponse response = real.getPluginInfo(req); return ((GetPluginInfoResponsePBImpl) response).getProto(); } catch (YarnException | IOException e) { throw new ServiceException(e); } } @Override public CsiAdaptorProtos.ValidateVolumeCapabilitiesResponse validateVolumeCapacity(RpcController controller, CsiAdaptorProtos.ValidateVolumeCapabilitiesRequest request) throws ServiceException { try { ValidateVolumeCapabilitiesRequestPBImpl req = new ValidateVolumeCapabilitiesRequestPBImpl(request); ValidateVolumeCapabilitiesResponse response = real.validateVolumeCapacity(req); return ((ValidateVolumeCapabilitiesResponsePBImpl) response).getProto(); } catch (YarnException | IOException e) { throw new ServiceException(e); } } @Override public CsiAdaptorProtos.NodePublishVolumeResponse nodePublishVolume( RpcController controller, CsiAdaptorProtos.NodePublishVolumeRequest request) throws ServiceException { try { NodePublishVolumeRequestPBImpl req = new NodePublishVolumeRequestPBImpl(request); NodePublishVolumeResponse response = real.nodePublishVolume(req); return ((NodePublishVolumeResponsePBImpl) response).getProto(); } catch (YarnException | IOException e) { throw new ServiceException(e); } } @Override public CsiAdaptorProtos.NodeUnpublishVolumeResponse nodeUnpublishVolume( RpcController controller, CsiAdaptorProtos.NodeUnpublishVolumeRequest request) throws ServiceException { try { NodeUnpublishVolumeRequestPBImpl req = new NodeUnpublishVolumeRequestPBImpl(request); NodeUnpublishVolumeResponse response = real.nodeUnpublishVolume(req); return ((NodeUnpublishVolumeResponsePBImpl) response).getProto(); } catch (YarnException | IOException e) { throw new ServiceException(e); } } }
CsiAdaptorProtocolPBServiceImpl
java
dropwizard__dropwizard
dropwizard-db/src/main/java/io/dropwizard/db/PooledDataSourceFactory.java
{ "start": 1154, "end": 1222 }
class ____ the database driver. * * @return the JDBC driver
of
java
elastic__elasticsearch
server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java
{ "start": 1787, "end": 9814 }
class ____ extends AbstractSnapshotIntegTestCase { private InternalTestCluster secondCluster; private Path repoPath; @Before public void startSecondCluster() throws IOException, InterruptedException { repoPath = randomRepoPath(); secondCluster = new InternalTestCluster( randomLong(), createTempDir(), true, true, 0, 0, "second_cluster", new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return Settings.builder() .put(MultiClusterRepoAccessIT.this.nodeSettings(nodeOrdinal, otherSettings)) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .put(Environment.PATH_REPO_SETTING.getKey(), repoPath) .build(); } @Override public Path nodeConfigPath(int nodeOrdinal) { return null; } }, 0, "leader", Arrays.asList( ESIntegTestCase.TestSeedPlugin.class, MockHttpTransport.TestPlugin.class, MockTransportService.TestPlugin.class, InternalSettingsPlugin.class, getTestTransportPlugin() ), Function.identity(), TEST_ENTITLEMENTS::addEntitledNodePaths ); secondCluster.beforeTest(random()); } @After public void stopSecondCluster() throws IOException { IOUtils.close(secondCluster::close); } @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .build(); } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return CollectionUtils.appendToCopy(super.nodePlugins(), getTestTransportPlugin()); } public void testConcurrentDeleteFromOtherCluster() { internalCluster().startMasterOnlyNode(); internalCluster().startDataOnlyNode(); final String repoNameOnFirstCluster = "test-repo"; final String repoNameOnSecondCluster = randomBoolean() ? "test-repo" : "other-repo"; createRepository(repoNameOnFirstCluster, "fs", repoPath); secondCluster.startMasterOnlyNode(); secondCluster.startDataOnlyNode(); createIndexWithRandomDocs("test-idx-1", randomIntBetween(1, 100)); createFullSnapshot(repoNameOnFirstCluster, "snap-1"); createIndexWithRandomDocs("test-idx-2", randomIntBetween(1, 100)); createFullSnapshot(repoNameOnFirstCluster, "snap-2"); createIndexWithRandomDocs("test-idx-3", randomIntBetween(1, 100)); createFullSnapshot(repoNameOnFirstCluster, "snap-3"); secondCluster.client() .admin() .cluster() .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoNameOnSecondCluster) .setType("fs") .setSettings(Settings.builder().put("location", repoPath)) .get(); secondCluster.client().admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnSecondCluster, "snap-1").get(); secondCluster.client().admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnSecondCluster, "snap-2").get(); final SnapshotException sne; try (var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor()) { sne = expectThrows( SnapshotException.class, clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true) ); } assertThat(sne.getMessage(), containsString("failed to update snapshot in repository")); final RepositoryException cause = (RepositoryException) sne.getCause(); assertThat( cause.getMessage(), containsString( "[" + repoNameOnFirstCluster + "] concurrent modification of the index-N file, expected current generation [2] but it was not found in " + "the repository. The last cluster to write to this repository was [" + secondCluster.client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().clusterUUID() + "] at generation [4]." ) ); assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoNameOnFirstCluster).get()); createRepository(repoNameOnFirstCluster, "fs", repoPath); createFullSnapshot(repoNameOnFirstCluster, "snap-5"); } public void testConcurrentWipeAndRecreateFromOtherCluster() throws IOException { internalCluster().startMasterOnlyNode(); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "fs", repoPath); createIndexWithRandomDocs("test-idx-1", randomIntBetween(1, 100)); createFullSnapshot(repoName, "snap-1"); final String repoUuid = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() .filter(r -> r.name().equals(repoName)) .findFirst() .orElseThrow() .uuid(); secondCluster.startMasterOnlyNode(); secondCluster.startDataOnlyNode(); assertAcked( secondCluster.client() .admin() .cluster() .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType("fs") .setSettings(Settings.builder().put("location", repoPath).put(READONLY_SETTING_KEY, true)) ); assertThat( secondCluster.client() .admin() .cluster() .prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() .filter(r -> r.name().equals(repoName)) .findFirst() .orElseThrow() .uuid(), equalTo(repoUuid) ); assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)); IOUtils.rm(internalCluster().getCurrentMasterNodeInstance(Environment.class).resolveRepoDir(repoPath.toString())); createRepository(repoName, "fs", repoPath); createFullSnapshot(repoName, "snap-1"); final String newRepoUuid = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() .filter(r -> r.name().equals(repoName)) .findFirst() .orElseThrow() .uuid(); assertThat(newRepoUuid, not(equalTo((repoUuid)))); secondCluster.client().admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get(); // force another read of the // repo data assertThat( secondCluster.client() .admin() .cluster() .prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() .filter(r -> r.name().equals(repoName)) .findFirst() .orElseThrow() .uuid(), equalTo(newRepoUuid) ); } }
MultiClusterRepoAccessIT
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/query/sqm/tree/domain/SqmEntityDomainType.java
{ "start": 315, "end": 487 }
interface ____<E> extends EntityDomainType<E>, SqmTreatableDomainType<E> { @Override default @Nullable SqmDomainType<E> getSqmType() { return this; } }
SqmEntityDomainType
java
quarkusio__quarkus
integration-tests/jackson/src/main/java/io/quarkus/it/jackson/model/CustomTypeResolver.java
{ "start": 331, "end": 1575 }
class ____ extends TypeIdResolverBase { private JavaType baseType; public CustomTypeResolver() { } @Override public void init(JavaType bt) { baseType = bt; } @Override public String idFromValue(Object value) { return getId(value); } @Override public String idFromValueAndType(Object value, Class<?> suggestedType) { return getId(value); } @Override public JsonTypeInfo.Id getMechanism() { return JsonTypeInfo.Id.CUSTOM; } private String getId(Object value) { if (value instanceof ModelWithJsonTypeIdResolver) { return ((ModelWithJsonTypeIdResolver) value).getType(); } return null; } @Override public JavaType typeFromId(DatabindContext context, String id) { if (id != null) { switch (id) { case "ONE": return context.constructSpecializedType(baseType, ModelWithJsonTypeIdResolver.SubclassOne.class); case "TWO": return context.constructSpecializedType(baseType, ModelWithJsonTypeIdResolver.SubclassTwo.class); } } return TypeFactory.unknownType(); } }
CustomTypeResolver
java
mapstruct__mapstruct
processor/src/test/java/org/mapstruct/ap/test/decorator/spring/constructor/PersonMapperDecorator.java
{ "start": 449, "end": 818 }
class ____ implements PersonMapper { @Autowired @Qualifier("delegate") private PersonMapper delegate; @Override public PersonDto personToPersonDto(Person person) { PersonDto dto = delegate.personToPersonDto( person ); dto.setName( person.getFirstName() + " " + person.getLastName() ); return dto; } }
PersonMapperDecorator
java
assertj__assertj-core
assertj-core/src/test/java/org/assertj/core/condition/NestableConditionFixtures.java
{ "start": 3487, "end": 3741 }
class ____ { final String firstLine; final String postcode; final Country country; Address(String firstLine, String postcode, Country country) { this.firstLine = firstLine; this.postcode = postcode; this.country = country; } }
Address
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/dialect/lock/internal/MySQLLockingSupport.java
{ "start": 2655, "end": 4138 }
class ____ implements ConnectionLockTimeoutStrategy { @Override public Level getSupportedLevel() { return ConnectionLockTimeoutStrategy.Level.EXTENDED; } @Override public Timeout getLockTimeout(Connection connection, SessionFactoryImplementor factory) { return Helper.getLockTimeout( "SELECT @@SESSION.innodb_lock_wait_timeout", (resultSet) -> { // see https://dev.mysql.com/doc/refman/8.4/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout final int millis = resultSet.getInt( 1 ); return switch ( millis ) { case 0 -> Timeouts.NO_WAIT; case 100000000 -> Timeouts.WAIT_FOREVER; default -> Timeout.milliseconds( millis ); }; }, connection, factory ); } @Override public void setLockTimeout(Timeout timeout, Connection connection, SessionFactoryImplementor factory) { Helper.setLockTimeout( timeout, (t) -> { // see https://dev.mysql.com/doc/refman/8.4/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout final int milliseconds = timeout.milliseconds(); if ( milliseconds == SKIP_LOCKED_MILLI ) { throw new HibernateException( "Connection lock-timeout does not accept skip-locked" ); } if ( milliseconds == WAIT_FOREVER_MILLI ) { return 100000000; } return milliseconds; }, "SET @@SESSION.innodb_lock_wait_timeout = %s", connection, factory ); } } }
ConnectionLockTimeoutStrategyImpl
java
quarkusio__quarkus
extensions/resteasy-reactive/rest-client/runtime/src/main/java/io/quarkus/rest/client/reactive/runtime/NoOpHeaderFiller.java
{ "start": 154, "end": 403 }
class ____ implements HeaderFiller { @Override public void addHeaders(MultivaluedMap<String, String> headers) { } @SuppressWarnings("unused") public static final NoOpHeaderFiller INSTANCE = new NoOpHeaderFiller(); }
NoOpHeaderFiller
java
apache__camel
components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/FileRouteToJmsIT.java
{ "start": 1614, "end": 3271 }
class ____ extends AbstractJMSTest { @Order(2) @RegisterExtension public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension(); protected final String componentName = "activemq"; protected CamelContext context; protected ProducerTemplate template; protected ConsumerTemplate consumer; @Test public void testRouteToFile() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(1); mock.message(0).body().isInstanceOf(byte[].class); mock.message(0).body(String.class).isEqualTo("Hello World"); deleteDirectory("target/routefromfile"); template.sendBodyAndHeader("file://target/routefromfile", "Hello World", Exchange.FILE_NAME, "hello.txt"); MockEndpoint.assertIsSatisfied(context); } @Override public String getComponentName() { return componentName; } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { public void configure() { from("file://target/routefromfile").to("activemq:queue:FileRouteToJmsIT"); from("activemq:queue:FileRouteToJmsIT").to("mock:result"); } }; } @Override public CamelContextExtension getCamelContextExtension() { return camelContextExtension; } @BeforeEach void setUpRequirements() { context = camelContextExtension.getContext(); template = camelContextExtension.getProducerTemplate(); consumer = camelContextExtension.getConsumerTemplate(); } }
FileRouteToJmsIT
java
mapstruct__mapstruct
processor/src/test/java/org/mapstruct/ap/test/bugs/_2677/Issue2677Mapper.java
{ "start": 1943, "end": 2237 }
class ____ { private final int id; public ParentWithPresenceCheck(int id) { this.id = id; } public int getId() { return id; } public boolean hasId() { return id > 10; } }
ParentWithPresenceCheck
java
google__error-prone
core/src/test/java/com/google/errorprone/fixes/SuggestedFixesTest.java
{ "start": 46083, "end": 46313 }
class ____ { @SuppressWarnings({"RemoveMe", "KeepMe1", "KeepMe2"}) int BEST = 42; } """) .addOutputLines( "out/Test.java", """ public
Test
java
apache__hadoop
hadoop-cloud-storage-project/hadoop-huaweicloud/src/test/java/org/apache/hadoop/fs/obs/TestOBSContractOpen.java
{ "start": 1062, "end": 1250 }
class ____ extends AbstractContractOpenTest { @Override protected AbstractFSContract createContract(final Configuration conf) { return new OBSContract(conf); } }
TestOBSContractOpen
java
apache__camel
components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FileToFtpsImplicitSSLWithoutClientAuthIT.java
{ "start": 1283, "end": 2266 }
class ____ extends FtpsServerImplicitSSLWithoutClientAuthTestSupport { protected String getFtpUrl() { return "ftps://admin@localhost:{{ftp.server.port}}" + "/tmp2/camel?password=admin&initialDelay=2000&disableSecureDataChannelDefaults=true" + "&securityProtocol=SSLv3&implicit=true&delete=true"; } @Disabled("CAMEL-16784:Disable testFromFileToFtp tests") @Test public void testFromFileToFtp() throws Exception { MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(2); MockEndpoint.assertIsSatisfied(context); } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { public void configure() { from("file:src/test/data?noop=true").log("Got ${file:name}").to(getFtpUrl()); from(getFtpUrl()).to("mock:result"); } }; } }
FileToFtpsImplicitSSLWithoutClientAuthIT
java
apache__kafka
clients/src/main/java/org/apache/kafka/common/annotation/InterfaceStability.java
{ "start": 1930, "end": 2142 }
interface ____ { } /** * No guarantee is provided as to reliability or stability across any level of release granularity. */ @Documented @Retention(RetentionPolicy.RUNTIME) public @
Evolving
java
apache__hadoop
hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java
{ "start": 1405, "end": 13361 }
class ____ { private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatShellScope.class); private static final Random RANDOM = new Random(); private final HdfsCompatEnvironment env; private final HdfsCompatSuite suite; private File stdoutDir = null; private File passList = null; private File failList = null; private File skipList = null; private Path snapshotPath = null; private String storagePolicy = null; private Method disallowSnapshot = null; public HdfsCompatShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) { this.env = env; this.suite = suite; } public HdfsCompatReport apply() throws Exception { File localTmpDir = null; try { localTmpDir = new File(this.env.getLocalTmpDir()); LOG.info("Local tmp dir: " + localTmpDir.getAbsolutePath()); return runShell(localTmpDir); } finally { try { if (this.disallowSnapshot != null) { try { this.disallowSnapshot.invoke(this.env.getFileSystem(), this.snapshotPath); } catch (InvocationTargetException e) { LOG.error("Cannot disallow snapshot", e.getCause()); } catch (ReflectiveOperationException e) { LOG.error("Disallow snapshot method is invalid", e); } } } finally { FileUtils.deleteQuietly(localTmpDir); } } } private HdfsCompatReport runShell(File localTmpDir) throws Exception { File localDir = new File(localTmpDir, "test"); File scriptDir = new File(localTmpDir, "scripts"); File confDir = new File(localTmpDir, "hadoop-conf"); copyScriptsResource(scriptDir); try { setShellLogConf(confDir); } catch (Exception e) { LOG.error("Cannot set new conf dir", e); confDir = null; } prepareSnapshot(); this.storagePolicy = getStoragePolicy(); String[] confEnv = getEnv(localDir, scriptDir, confDir); ExecResult result = exec(confEnv, scriptDir); printLog(result); return export(); } private void copyScriptsResource(File scriptDir) throws IOException { Files.createDirectories(new File(scriptDir, "cases").toPath()); copyResource("/misc.sh", new File(scriptDir, "misc.sh")); String[] cases = suite.getShellCases(); for (String res : cases) { copyResource("/cases/" + res, new File(scriptDir, "cases/" + res)); } } private void setShellLogConf(File confDir) throws IOException { final String hadoopHome = System.getenv("HADOOP_HOME"); final String hadoopConfDir = System.getenv("HADOOP_CONF_DIR"); if ((hadoopHome == null) || hadoopHome.isEmpty()) { LOG.error("HADOOP_HOME not configured"); } if ((hadoopConfDir == null) || hadoopConfDir.isEmpty()) { throw new IOException("HADOOP_CONF_DIR not configured"); } File srcDir = new File(hadoopConfDir).getAbsoluteFile(); if (!srcDir.isDirectory()) { throw new IOException("HADOOP_CONF_DIR is not valid: " + srcDir); } Files.createDirectories(confDir.toPath()); FileUtils.copyDirectory(srcDir, confDir); File logConfFile = new File(confDir, "log4j.properties"); copyResource("/hadoop-compat-bench-log4j.properties", logConfFile, true); } @VisibleForTesting protected void copyResource(String res, File dst) throws IOException { copyResource(res, dst, false); } private void copyResource(String res, File dst, boolean overwrite) throws IOException { InputStream in = null; try { in = this.getClass().getResourceAsStream(res); if (in == null) { in = this.suite.getClass().getResourceAsStream(res); } if (in == null) { throw new IOException("Resource not found" + " during scripts prepare: " + res); } if (dst.exists() && !overwrite) { throw new IOException("Cannot overwrite existing resource file"); } Files.createDirectories(dst.getParentFile().toPath()); byte[] buf = new byte[1024]; try (OutputStream out = new FileOutputStream(dst)) { int nRead = in.read(buf); while (nRead != -1) { out.write(buf, 0, nRead); nRead = in.read(buf); } } } finally { if (in != null) { in.close(); } } } private void prepareSnapshot() { this.snapshotPath = AbstractHdfsCompatCase.getUniquePath(this.env.getBase()); Method allowSnapshot = null; try { FileSystem fs = this.env.getFileSystem(); fs.mkdirs(snapshotPath); Method allowSnapshotMethod = fs.getClass() .getMethod("allowSnapshot", Path.class); allowSnapshotMethod.setAccessible(true); allowSnapshotMethod.invoke(fs, snapshotPath); allowSnapshot = allowSnapshotMethod; Method disallowSnapshotMethod = fs.getClass() .getMethod("disallowSnapshot", Path.class); disallowSnapshotMethod.setAccessible(true); this.disallowSnapshot = disallowSnapshotMethod; } catch (IOException e) { LOG.error("Cannot prepare snapshot path", e); } catch (InvocationTargetException e) { LOG.error("Cannot allow snapshot", e.getCause()); } catch (ReflectiveOperationException e) { LOG.warn("Get admin snapshot methods failed."); } catch (Exception e) { LOG.warn("Prepare snapshot failed", e); } if (allowSnapshot == null) { LOG.warn("No allowSnapshot method found."); } if (this.disallowSnapshot == null) { LOG.warn("No disallowSnapshot method found."); } } private String getStoragePolicy() { BlockStoragePolicySpi def; String[] policies; try { FileSystem fs = this.env.getFileSystem(); Path base = this.env.getBase(); fs.mkdirs(base); def = fs.getStoragePolicy(base); policies = env.getStoragePolicyNames(); } catch (Exception e) { LOG.warn("Cannot get storage policy", e); return "Hot"; } List<String> differentPolicies = new ArrayList<>(); for (String policyName : policies) { if ((def == null) || !policyName.equalsIgnoreCase(def.getName())) { differentPolicies.add(policyName); } } if (differentPolicies.isEmpty()) { final String defPolicyName; if ((def == null) || (def.getName() == null)) { defPolicyName = "Hot"; LOG.warn("No valid storage policy name found, use Hot."); } else { defPolicyName = def.getName(); LOG.warn("There is only one storage policy: " + defPolicyName); } return defPolicyName; } else { return differentPolicies.get( RANDOM.nextInt(differentPolicies.size())); } } @VisibleForTesting protected String[] getEnv(File localDir, File scriptDir, File confDir) throws IOException { List<String> confEnv = new ArrayList<>(); final Map<String, String> environments = System.getenv(); for (Map.Entry<String, String> entry : environments.entrySet()) { confEnv.add(entry.getKey() + "=" + entry.getValue()); } if (confDir != null) { confEnv.add("HADOOP_CONF_DIR=" + confDir.getAbsolutePath()); } String timestamp = String.valueOf(System.currentTimeMillis()); Path baseUri = new Path(this.env.getBase(), timestamp); File localUri = new File(localDir, timestamp).getAbsoluteFile(); File resultDir = new File(localDir, timestamp); Files.createDirectories(resultDir.toPath()); this.stdoutDir = new File(resultDir, "output").getAbsoluteFile(); this.passList = new File(resultDir, "passed").getAbsoluteFile(); this.failList = new File(resultDir, "failed").getAbsoluteFile(); this.skipList = new File(resultDir, "skipped").getAbsoluteFile(); Files.createFile(this.passList.toPath()); Files.createFile(this.failList.toPath()); Files.createFile(this.skipList.toPath()); final String prefix = "HADOOP_COMPAT_"; confEnv.add(prefix + "BASE_URI=" + baseUri); confEnv.add(prefix + "LOCAL_URI=" + localUri.getAbsolutePath()); confEnv.add(prefix + "SNAPSHOT_URI=" + snapshotPath.toString()); confEnv.add(prefix + "STORAGE_POLICY=" + storagePolicy); confEnv.add(prefix + "STDOUT_DIR=" + stdoutDir.getAbsolutePath()); confEnv.add(prefix + "PASS_FILE=" + passList.getAbsolutePath()); confEnv.add(prefix + "FAIL_FILE=" + failList.getAbsolutePath()); confEnv.add(prefix + "SKIP_FILE=" + skipList.getAbsolutePath()); return confEnv.toArray(new String[0]); } private ExecResult exec(String[] confEnv, File scriptDir) throws IOException, InterruptedException { Process process = Runtime.getRuntime().exec( "prove -r cases", confEnv, scriptDir); StreamPrinter out = new StreamPrinter(process.getInputStream()); StreamPrinter err = new StreamPrinter(process.getErrorStream()); out.start(); err.start(); int code = process.waitFor(); out.join(); err.join(); return new ExecResult(code, out.lines, err.lines); } private void printLog(ExecResult execResult) { LOG.info("Shell prove\ncode: {}\nstdout:\n\t{}\nstderr:\n\t{}", execResult.code, String.join("\n\t", execResult.out), String.join("\n\t", execResult.err)); File casesRoot = new File(stdoutDir, "cases").getAbsoluteFile(); String[] casesDirList = casesRoot.list(); if (casesDirList == null) { LOG.error("stdout/stderr root directory is invalid: " + casesRoot); return; } Arrays.sort(casesDirList, (o1, o2) -> { if (o1.length() == o2.length()) { return o1.compareTo(o2); } else { return o1.length() - o2.length(); } }); for (String casesDir : casesDirList) { printCasesLog(new File(casesRoot, casesDir).getAbsoluteFile()); } } private void printCasesLog(File casesDir) { File stdout = new File(casesDir, "stdout").getAbsoluteFile(); File stderr = new File(casesDir, "stderr").getAbsoluteFile(); File[] stdoutFiles = stdout.listFiles(); File[] stderrFiles = stderr.listFiles(); Set<String> cases = new HashSet<>(); if (stdoutFiles != null) { for (File c : stdoutFiles) { cases.add(c.getName()); } } if (stderrFiles != null) { for (File c : stderrFiles) { cases.add(c.getName()); } } String[] caseNames = cases.stream().sorted((o1, o2) -> { if (o1.length() == o2.length()) { return o1.compareTo(o2); } else { return o1.length() - o2.length(); } }).toArray(String[]::new); for (String caseName : caseNames) { File stdoutFile = new File(stdout, caseName); File stderrFile = new File(stderr, caseName); try { List<String> stdoutLines = stdoutFile.exists() ? readLines(stdoutFile) : new ArrayList<>(); List<String> stderrLines = stderrFile.exists() ? readLines(stderrFile) : new ArrayList<>(); LOG.info("Shell case {} - #{}\nstdout:\n\t{}\nstderr:\n\t{}", casesDir.getName(), caseName, String.join("\n\t", stdoutLines), String.join("\n\t", stderrLines)); } catch (Exception e) { LOG.warn("Read shell stdout or stderr file failed", e); } } } private HdfsCompatReport export() throws IOException { HdfsCompatReport report = new HdfsCompatReport(); report.addPassedCase(readLines(this.passList)); report.addFailedCase(readLines(this.failList)); report.addSkippedCase(readLines(this.skipList)); return report; } private List<String> readLines(File file) throws IOException { List<String> lines = new ArrayList<>(); try (BufferedReader br = new BufferedReader(new InputStreamReader( new FileInputStream(file), StandardCharsets.UTF_8))) { String line = br.readLine(); while (line != null) { lines.add(line); line = br.readLine(); } } return lines; } private static final
HdfsCompatShellScope
java
apache__rocketmq
common/src/main/java/org/apache/rocketmq/common/utils/Shutdown.java
{ "start": 853, "end": 914 }
interface ____ { void shutdown() throws Exception; }
Shutdown
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java
{ "start": 1294, "end": 5837 }
class ____ extends ActionResponse implements ToXContentObject { private static final ParseField TEMPLATE = new ParseField("template"); private static final ParseField OVERLAPPING = new ParseField("overlapping"); private static final ParseField NAME = new ParseField("name"); private static final ParseField INDEX_PATTERNS = new ParseField("index_patterns"); @Nullable // the resolved settings, mappings and aliases for the matched templates, if any private final Template resolvedTemplate; @Nullable // a map of template names and their index patterns that would overlap when matching the given index name private final Map<String, List<String>> overlappingTemplates; @Nullable private final RolloverConfiguration rolloverConfiguration; public SimulateIndexTemplateResponse(@Nullable Template resolvedTemplate, @Nullable Map<String, List<String>> overlappingTemplates) { this(resolvedTemplate, overlappingTemplates, null); } public SimulateIndexTemplateResponse( @Nullable Template resolvedTemplate, @Nullable Map<String, List<String>> overlappingTemplates, @Nullable RolloverConfiguration rolloverConfiguration ) { this.resolvedTemplate = resolvedTemplate; this.overlappingTemplates = overlappingTemplates; this.rolloverConfiguration = rolloverConfiguration; } public RolloverConfiguration getRolloverConfiguration() { return rolloverConfiguration; } /** * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until * we no longer need to support calling this action remotely. */ @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(resolvedTemplate); if (overlappingTemplates != null) { out.writeBoolean(true); out.writeInt(overlappingTemplates.size()); for (Map.Entry<String, List<String>> entry : overlappingTemplates.entrySet()) { out.writeString(entry.getKey()); out.writeStringCollection(entry.getValue()); } } else { out.writeBoolean(false); } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(null); } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); if (resolvedTemplate != null) { builder.field(TEMPLATE.getPreferredName()); resolvedTemplate.toXContent(builder, ResettableValue.hideResetValues(params), rolloverConfiguration); } if (this.overlappingTemplates != null) { builder.startArray(OVERLAPPING.getPreferredName()); for (Map.Entry<String, List<String>> entry : overlappingTemplates.entrySet()) { builder.startObject(); builder.field(NAME.getPreferredName(), entry.getKey()); builder.stringListField(INDEX_PATTERNS.getPreferredName(), entry.getValue()); builder.endObject(); } builder.endArray(); } builder.endObject(); return builder; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } SimulateIndexTemplateResponse that = (SimulateIndexTemplateResponse) o; return Objects.equals(resolvedTemplate, that.resolvedTemplate) && Objects.deepEquals(overlappingTemplates, that.overlappingTemplates) && Objects.equals(rolloverConfiguration, that.rolloverConfiguration); } @Override public int hashCode() { return Objects.hash(resolvedTemplate, overlappingTemplates, rolloverConfiguration); } @Override public String toString() { return "SimulateIndexTemplateResponse{" + "resolved template=" + resolvedTemplate + ", overlapping templates=" + String.join("|", overlappingTemplates.keySet()) + "}"; } }
SimulateIndexTemplateResponse
java
google__dagger
javatests/dagger/internal/codegen/ProductionComponentProcessorTest.java
{ "start": 8003, "end": 8321 }
interface ____ {}"); Source myProductionSubcomponent = CompilerTests.javaSource( "test.MyProductionSubcomponent", "package test;", "", "import dagger.producers.ProductionSubcomponent;", "", "@ProductionSubcomponent", "
MyModule
java
elastic__elasticsearch
modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java
{ "start": 1408, "end": 1546 }
class ____ { public static final String[] PARAMETERS = {}; public abstract String execute(); public
DigestTestScript
java
apache__maven
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng3441MetadataUpdatedFromDeploymentRepositoryTest.java
{ "start": 1336, "end": 2981 }
class ____ extends AbstractMavenIntegrationTestCase { @Test public void testitMNG3441() throws Exception { File testDir = extractResources("/mng-3441"); File targetRepository = new File(testDir, "target-repo"); FileUtils.deleteDirectory(targetRepository); FileUtils.copyDirectoryStructure(new File(testDir, "deploy-repo"), targetRepository); Verifier verifier; verifier = newVerifier(testDir.getAbsolutePath()); verifier.addCliArgument("-s"); verifier.addCliArgument("settings.xml"); verifier.addCliArgument("deploy"); verifier.execute(); verifier.verifyErrorFreeLog(); Xpp3Dom dom = readDom(new File( targetRepository, "org/apache/maven/its/mng3441/test-artifact/1.0-SNAPSHOT/maven-metadata.xml")); assertEquals( "2", dom.getChild("versioning") .getChild("snapshot") .getChild("buildNumber") .getValue()); dom = readDom(new File(targetRepository, "org/apache/maven/its/mng3441/maven-metadata.xml")); Xpp3Dom[] plugins = dom.getChild("plugins").getChildren(); assertEquals("other-plugin", plugins[0].getChild("prefix").getValue()); assertEquals("test-artifact", plugins[1].getChild("prefix").getValue()); } private Xpp3Dom readDom(File file) throws XmlPullParserException, IOException { try (FileReader reader = new FileReader(file)) { return Xpp3DomBuilder.build(reader); } } }
MavenITmng3441MetadataUpdatedFromDeploymentRepositoryTest
java
quarkusio__quarkus
extensions/redis-client/runtime/src/test/java/io/quarkus/redis/datasource/ListCommandTest.java
{ "start": 16999, "end": 17227 }
class ____ extends Animal { private String id; public String getId() { return id; } public Cat setId(String id) { this.id = id; return this; } } }
Cat
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/DeleteReservationHomeSubClusterRequest.java
{ "start": 1346, "end": 2423 }
class ____ { @Private @Unstable public static DeleteReservationHomeSubClusterRequest newInstance( ReservationId reservationId) { DeleteReservationHomeSubClusterRequest deleteReservationRequest = Records.newRecord(DeleteReservationHomeSubClusterRequest.class); deleteReservationRequest.setReservationId(reservationId); return deleteReservationRequest; } /** * Get the identifier of the {@link ReservationId} to be removed from * <code>Federation state store</code> . * * @return the identifier of the Reservation to be removed from Federation * State Store. */ @Public @Unstable public abstract ReservationId getReservationId(); /** * Set the identifier of the {@link ReservationId} to be removed from * <code>Federation state store</code> . * * @param reservationId the identifier of the Reservation to be removed from * Federation State Store. */ @Private @Unstable public abstract void setReservationId(ReservationId reservationId); }
DeleteReservationHomeSubClusterRequest
java
quarkusio__quarkus
integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/OpenshiftWithBaseImageFromInternalRegistryTest.java
{ "start": 746, "end": 3763 }
class ____ { private static final String APP_NAME = "openshift-with-base-image-stream"; @RegisterExtension static final QuarkusProdModeTest config = new QuarkusProdModeTest() .withApplicationRoot((jar) -> jar.addClasses(GreetingResource.class)) .setApplicationName(APP_NAME) .setApplicationVersion("0.1-SNAPSHOT") .overrideConfigKey("quarkus.openshift.base-jvm-image", "image-registry.openshift-image-registry.svc:5000/myns/myimage:1.0") .setForcedDependencies(List.of(Dependency.of("io.quarkus", "quarkus-openshift", Version.getVersion()))); @ProdBuildResults private ProdModeTestResults prodModeTestResults; @Test public void assertGeneratedResources() throws IOException { Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes"); assertThat(kubernetesDir).isDirectoryContaining(p -> p.getFileName().endsWith("openshift.json")) .isDirectoryContaining(p -> p.getFileName().endsWith("openshift.yml")); List<HasMetadata> openshiftList = DeserializationUtil.deserializeAsList(kubernetesDir.resolve("openshift.yml")); assertThat(openshiftList).filteredOn(h -> "Deployment".equals(h.getKind())).singleElement().satisfies(h -> { assertThat(h.getMetadata()).satisfies(m -> { assertThat(m.getName()).isEqualTo(APP_NAME); }); assertThat(h).isInstanceOfSatisfying(Deployment.class, d -> { Container container = d.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(container.getImage()).endsWith(APP_NAME + ":0.1-SNAPSHOT"); }); }); assertThat(openshiftList).filteredOn(h -> "BuildConfig".equals(h.getKind())).singleElement().satisfies(h -> { assertThat(h.getMetadata()).satisfies(m -> { assertThat(m.getName()).isEqualTo(APP_NAME); }); assertThat(h).isInstanceOfSatisfying(BuildConfig.class, b -> { assertThat(b.getSpec().getStrategy().getSourceStrategy().getFrom()).satisfies(f -> { assertThat(f.getKind()).isEqualTo("ImageStreamTag"); assertThat(f.getNamespace()).isEqualTo("myns"); assertThat(f.getName()).isEqualTo("myimage:1.0"); }); }); }); //Verify that we only got one Image assertThat(openshiftList).filteredOn(h -> "ImageStream".equals(h.getKind())).singleElement() .satisfies(h -> { assertThat(h.getMetadata()).satisfies(m -> { assertThat(m.getName()).isEqualTo(APP_NAME); }); assertThat(h).isInstanceOfSatisfying(ImageStream.class, i -> { assertThat(i.getSpec().getDockerImageRepository()).isNull(); }); }); } }
OpenshiftWithBaseImageFromInternalRegistryTest
java
spring-projects__spring-framework
spring-web/src/main/java/org/springframework/http/codec/multipart/MultipartUtils.java
{ "start": 1173, "end": 3055 }
class ____ { /** * Return the character set of the given headers, as defined in the * {@link HttpHeaders#getContentType()} header. */ public static Charset charset(HttpHeaders headers) { MediaType contentType = headers.getContentType(); if (contentType != null) { Charset charset = contentType.getCharset(); if (charset != null) { return charset; } } return StandardCharsets.UTF_8; } public static byte @Nullable [] boundary(HttpMessage message, Charset headersCharset) { MediaType contentType = message.getHeaders().getContentType(); if (contentType != null) { String boundary = contentType.getParameter("boundary"); if (boundary != null) { int len = boundary.length(); if (len > 2 && boundary.charAt(0) == '"' && boundary.charAt(len - 1) == '"') { boundary = boundary.substring(1, len - 1); } return boundary.getBytes(headersCharset); } } return null; } /** * Concatenates the given array of byte arrays. */ public static byte[] concat(byte[]... byteArrays) { int len = 0; for (byte[] byteArray : byteArrays) { len += byteArray.length; } byte[] result = new byte[len]; len = 0; for (byte[] byteArray : byteArrays) { System.arraycopy(byteArray, 0, result, len, byteArray.length); len += byteArray.length; } return result; } public static void closeChannel(Channel channel) { try { if (channel.isOpen()) { channel.close(); } } catch (IOException ignore) { } } public static void deleteFile(Path file) { try { Files.delete(file); } catch (IOException ignore) { } } public static boolean isFormField(HttpHeaders headers) { MediaType contentType = headers.getContentType(); return (contentType == null || MediaType.TEXT_PLAIN.equalsTypeAndSubtype(contentType)) && headers.getContentDisposition().getFilename() == null; } }
MultipartUtils
java
apache__camel
components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/internal/processor/BulkApiV2Processor.java
{ "start": 2563, "end": 17437 }
class ____ extends AbstractSalesforceProcessor { private BulkApiV2Client bulkClient; public BulkApiV2Processor(SalesforceEndpoint endpoint) { super(endpoint); } @Override public boolean process(final Exchange exchange, final AsyncCallback callback) { boolean done = false; try { switch (operationName) { case BULK2_CREATE_JOB: processCreateJob(exchange, callback); break; case BULK2_GET_JOB: processGetJob(exchange, callback); break; case BULK2_CREATE_BATCH: processCreateBatch(exchange, callback); break; case BULK2_CLOSE_JOB: processCloseJob(exchange, callback); break; case BULK2_ABORT_JOB: processAbortJob(exchange, callback); break; case BULK2_DELETE_JOB: deleteJob(exchange, callback); break; case BULK2_GET_SUCCESSFUL_RESULTS: processGetSuccessfulResults(exchange, callback); break; case BULK2_GET_FAILED_RESULTS: processGetFailedResults(exchange, callback); break; case BULK2_GET_UNPROCESSED_RECORDS: processGetUnprocessedRecords(exchange, callback); break; case BULK2_GET_ALL_JOBS: processGetAllJobs(exchange, callback); break; case BULK2_CREATE_QUERY_JOB: processCreateQueryJob(exchange, callback); break; case BULK2_GET_QUERY_JOB: processGetQueryJob(exchange, callback); break; case BULK2_GET_QUERY_JOB_RESULTS: processGetQueryJobResults(exchange, callback); break; case BULK2_ABORT_QUERY_JOB: processAbortQueryJob(exchange, callback); break; case BULK2_DELETE_QUERY_JOB: processDeleteQueryJob(exchange, callback); break; case BULK2_GET_ALL_QUERY_JOBS: processGetAllQueryJobs(exchange, callback); break; default: throw new SalesforceException( "Unknown operation name: " + operationName.value(), null); } } catch (SalesforceException e) { exchange.setException(new SalesforceException( String.format("Error processing %s: [%s] \"%s\"", operationName.value(), e.getStatusCode(), e.getMessage()), e)); callback.done(true); done = true; } catch (InvalidPayloadException | RuntimeException e) { exchange.setException(new SalesforceException( String.format("Unexpected Error processing %s: \"%s\"", operationName.value(), e.getMessage()), e)); callback.done(true); done = true; } // continue routing asynchronously if false return done; } @Override protected void doStart() throws Exception { super.doStart(); this.bulkClient = new DefaultBulkApiV2Client( (String) endpointConfigMap.get(SalesforceEndpointConfig.API_VERSION), session, httpClient, loginConfig, endpoint); ServiceHelper.startService(bulkClient); } @Override public void doStop() { // stop the client ServiceHelper.stopService(bulkClient); } private void processCreateJob(Exchange exchange, AsyncCallback callback) throws InvalidPayloadException { Job job = exchange.getIn().getMandatoryBody(Job.class); bulkClient.createJob(job, determineHeaders(exchange), new JobResponseCallback() { @Override public void onResponse(Job job, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, job, headers, ex, callback); } }); } private void processGetJob(Exchange exchange, AsyncCallback callback) throws SalesforceException { Job job = exchange.getIn().getBody(Job.class); String jobId; if (job != null) { jobId = job.getId(); } else { jobId = getParameter(JOB_ID, exchange, USE_BODY, NOT_OPTIONAL); } bulkClient.getJob(jobId, determineHeaders(exchange), new JobResponseCallback() { @Override public void onResponse(Job job, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, job, headers, ex, callback); } }); } private void processCreateBatch(Exchange exchange, AsyncCallback callback) throws SalesforceException { String jobId = getParameter(JOB_ID, exchange, IGNORE_BODY, NOT_OPTIONAL); InputStream input; try { input = exchange.getIn().getMandatoryBody(InputStream.class); } catch (CamelException e) { String msg = "Error preparing batch request: " + e.getMessage(); throw new SalesforceException(msg, e); } bulkClient.createBatch(input, jobId, determineHeaders(exchange), new ResponseCallback() { @Override public void onResponse(Map<String, String> headers, SalesforceException ex) { processResponse(exchange, null, headers, ex, callback); } }); } private void deleteJob(Exchange exchange, AsyncCallback callback) throws SalesforceException { String jobId = getParameter(JOB_ID, exchange, IGNORE_BODY, NOT_OPTIONAL); bulkClient.deleteJob(jobId, determineHeaders(exchange), new ResponseCallback() { @Override public void onResponse(Map<String, String> headers, SalesforceException ex) { processResponse(exchange, null, headers, ex, callback); } }); } private void processAbortJob(Exchange exchange, AsyncCallback callback) throws SalesforceException { String jobId = getParameter(JOB_ID, exchange, IGNORE_BODY, NOT_OPTIONAL); bulkClient.changeJobState(jobId, JobStateEnum.ABORTED, determineHeaders(exchange), new JobResponseCallback() { @Override public void onResponse(Job job, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, job, headers, ex, callback); } }); } private void processCloseJob(Exchange exchange, AsyncCallback callback) throws SalesforceException { String jobId = getParameter(JOB_ID, exchange, IGNORE_BODY, NOT_OPTIONAL); bulkClient.changeJobState(jobId, JobStateEnum.UPLOAD_COMPLETE, determineHeaders(exchange), new JobResponseCallback() { @Override public void onResponse(Job job, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, job, headers, ex, callback); } }); } private void processGetAllJobs(Exchange exchange, AsyncCallback callback) throws SalesforceException { String queryLocator = getParameter(QUERY_LOCATOR, exchange, IGNORE_BODY, IS_OPTIONAL); bulkClient.getAllJobs(queryLocator, determineHeaders(exchange), new BulkApiV2Client.JobsResponseCallback() { @Override public void onResponse(Jobs jobs, Map<String, String> headers, SalesforceException ex) { BulkApiV2Processor.this.processResponse(exchange, jobs, headers, ex, callback); } }); } private void processGetSuccessfulResults(Exchange exchange, AsyncCallback callback) throws SalesforceException { String jobId = getParameter(JOB_ID, exchange, IGNORE_BODY, NOT_OPTIONAL); bulkClient.getSuccessfulResults(jobId, determineHeaders(exchange), new StreamResponseCallback() { @Override public void onResponse( InputStream inputStream, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, inputStream, headers, ex, callback); } }); } private void processGetFailedResults(Exchange exchange, AsyncCallback callback) throws SalesforceException { String jobId = getParameter(JOB_ID, exchange, IGNORE_BODY, NOT_OPTIONAL); bulkClient.getFailedResults(jobId, determineHeaders(exchange), new StreamResponseCallback() { @Override public void onResponse( InputStream inputStream, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, inputStream, headers, ex, callback); } }); } private void processGetUnprocessedRecords(Exchange exchange, AsyncCallback callback) throws SalesforceException { String jobId = getParameter(JOB_ID, exchange, IGNORE_BODY, NOT_OPTIONAL); bulkClient.getUnprocessedRecords(jobId, determineHeaders(exchange), new StreamResponseCallback() { @Override public void onResponse( InputStream inputStream, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, inputStream, headers, ex, callback); } }); } private void processCreateQueryJob(Exchange exchange, AsyncCallback callback) throws InvalidPayloadException { QueryJob job = exchange.getIn().getMandatoryBody(QueryJob.class); bulkClient.createQueryJob(job, determineHeaders(exchange), new BulkApiV2Client.QueryJobResponseCallback() { @Override public void onResponse( QueryJob job, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, job, headers, ex, callback); } }); } private void processGetQueryJob(Exchange exchange, AsyncCallback callback) throws SalesforceException { QueryJob job = exchange.getIn().getBody(QueryJob.class); String jobId; if (job != null) { jobId = job.getId(); } else { jobId = getParameter(JOB_ID, exchange, USE_BODY, NOT_OPTIONAL); } bulkClient.getQueryJob(jobId, determineHeaders(exchange), new BulkApiV2Client.QueryJobResponseCallback() { @Override public void onResponse(QueryJob job, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, job, headers, ex, callback); } }); } private void processGetQueryJobResults(Exchange exchange, AsyncCallback callback) throws SalesforceException { String jobId = getParameter(JOB_ID, exchange, IGNORE_BODY, NOT_OPTIONAL); String locator = getParameter(LOCATOR, exchange, false, true); Integer maxRecords = getParameter(MAX_RECORDS, exchange, false, true, Integer.class); bulkClient.getQueryJobResults(jobId, locator, maxRecords, determineHeaders(exchange), new StreamResponseCallback() { @Override public void onResponse(InputStream inputStream, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, inputStream, headers, ex, callback); } }); } private void processAbortQueryJob(Exchange exchange, AsyncCallback callback) throws SalesforceException { String jobId = getParameter(JOB_ID, exchange, IGNORE_BODY, NOT_OPTIONAL); bulkClient.changeQueryJobState(jobId, JobStateEnum.ABORTED, determineHeaders(exchange), new BulkApiV2Client.QueryJobResponseCallback() { @Override public void onResponse(QueryJob job, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, job, headers, ex, callback); } }); } private void processDeleteQueryJob(Exchange exchange, AsyncCallback callback) throws SalesforceException { String jobId = getParameter(JOB_ID, exchange, IGNORE_BODY, NOT_OPTIONAL); bulkClient.deleteQueryJob(jobId, determineHeaders(exchange), new ResponseCallback() { @Override public void onResponse(Map<String, String> headers, SalesforceException ex) { processResponse(exchange, null, headers, ex, callback); } }); } private void processGetAllQueryJobs(Exchange exchange, AsyncCallback callback) throws SalesforceException { String queryLocator = getParameter(QUERY_LOCATOR, exchange, IGNORE_BODY, IS_OPTIONAL); bulkClient.getAllQueryJobs(queryLocator, determineHeaders(exchange), new BulkApiV2Client.QueryJobsResponseCallback() { @Override public void onResponse(QueryJobs jobs, Map<String, String> headers, SalesforceException ex) { processResponse(exchange, jobs, headers, ex, callback); } }); } private void processResponse( Exchange exchange, Object body, Map<String, String> headers, SalesforceException ex, AsyncCallback callback) { final Message message = exchange.getMessage(); if (ex != null) { exchange.setException(ex); } else { message.setBody(body); } message.getHeaders().putAll(headers); callback.done(false); } }
BulkApiV2Processor
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/streaming/api/checkpoint/ListCheckpointed.java
{ "start": 3624, "end": 4793 }
class ____<T> implements MapFunction<T, Tuple2<T, Long>>, ListCheckpointed<Long> { * * // this count is the number of elements in the parallel subtask * private long count; * * {@literal @}Override * public List<Long> snapshotState(long checkpointId, long timestamp) { * // return a single element - our count * return Collections.singletonList(count); * } * * {@literal @}Override * public void restoreState(List<Long> state) throws Exception { * // in case of scale in, this adds up counters from different original subtasks * // in case of scale out, list this may be empty * for (Long l : state) { * count += l; * } * } * * {@literal @}Override * public Tuple2<T, Long> map(T value) { * count++; * return new Tuple2<>(value, count); * } * } * }</pre> * * @deprecated If you need to do non-keyed state snapshots of your operator, use {@link * CheckpointedFunction}. This should only be needed in rare cases, though. * @param <T> The type of the operator state. */ @PublicEvolving @Deprecated public
CountingFunction
java
spring-projects__spring-boot
module/spring-boot-devtools/src/test/java/org/springframework/boot/devtools/classpath/ClassPathFileSystemWatcherTests.java
{ "start": 1855, "end": 3384 }
class ____ { @Test @SuppressWarnings("NullAway") // Test null check void urlsMustNotBeNull() { assertThatIllegalArgumentException() .isThrownBy(() -> new ClassPathFileSystemWatcher(mock(FileSystemWatcherFactory.class), mock(ClassPathRestartStrategy.class), (URL[]) null)) .withMessageContaining("'urls' must not be null"); } @Test void configuredWithRestartStrategy(@TempDir File directory) throws Exception { AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); Map<String, Object> properties = new HashMap<>(); List<URL> urls = new ArrayList<>(); urls.add(new URL("https://spring.io")); urls.add(directory.toURI().toURL()); properties.put("urls", urls); MapPropertySource propertySource = new MapPropertySource("test", properties); context.getEnvironment().getPropertySources().addLast(propertySource); context.register(Config.class); context.refresh(); Thread.sleep(200); File classFile = new File(directory, "Example.class"); FileCopyUtils.copy("file".getBytes(), classFile); Thread.sleep(1000); List<ClassPathChangedEvent> events = context.getBean(Listener.class).getEvents(); for (int i = 0; i < 20; i++) { if (!events.isEmpty()) { break; } Thread.sleep(500); } assertThat(events).hasSize(1); assertThat(events.get(0).getChangeSet().iterator().next()).extracting(ChangedFile::getFile) .containsExactly(classFile); context.close(); } @Configuration(proxyBeanMethods = false) static
ClassPathFileSystemWatcherTests
java
hibernate__hibernate-orm
hibernate-envers/src/main/java/org/hibernate/envers/internal/entities/mapper/relation/lazy/initializor/ListCollectionInitializor.java
{ "start": 665, "end": 1500 }
class ____ extends AbstractCollectionInitializor<List> { private final MiddleComponentData elementComponentData; private final MiddleComponentData indexComponentData; public ListCollectionInitializor( EnversService enversService, AuditReaderImplementor versionsReader, RelationQueryGenerator queryGenerator, Object primaryKey, Number revision, boolean removed, MiddleComponentData elementComponentData, MiddleComponentData indexComponentData) { super( enversService, versionsReader, queryGenerator, primaryKey, revision, removed ); this.elementComponentData = elementComponentData; this.indexComponentData = indexComponentData; } @Override @SuppressWarnings("unchecked") protected List initializeCollection(int size) { // There are two types of List collections that this
ListCollectionInitializor
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/ExtendingJUnitAssertTest.java
{ "start": 886, "end": 1262 }
class ____ { private final BugCheckerRefactoringTestHelper refactoringTestHelper = BugCheckerRefactoringTestHelper.newInstance(ExtendingJUnitAssert.class, getClass()); @Test public void positive() { refactoringTestHelper .addInputLines( "in/Foo.java", """ import org.junit.Assert;
ExtendingJUnitAssertTest
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java
{ "start": 3016, "end": 4119 }
class ____ extends ActionResponse { private final CoordinationDiagnosticsResult result; public Response(StreamInput in) throws IOException { result = new CoordinationDiagnosticsResult(in); } public Response(CoordinationDiagnosticsResult result) { this.result = result; } public CoordinationDiagnosticsResult getCoordinationDiagnosticsResult() { return result; } @Override public void writeTo(StreamOutput out) throws IOException { result.writeTo(out); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Response response = (Response) o; return result.equals(response.result); } @Override public int hashCode() { return Objects.hash(result); } } /** * This transport action calls CoordinationDiagnosticsService#diagnoseMasterStability */ public static
Response
java
quarkusio__quarkus
extensions/reactive-routes/deployment/src/test/java/io/quarkus/vertx/web/mutiny/SSEMultiRouteWithAsEventStreamTest.java
{ "start": 10278, "end": 10747 }
class ____ implements ReactiveRoutes.ServerSentEvent<Person> { public String name; public int id; public PersonAsEventWithoutId(String name, int id) { this.name = name; this.id = id; } @Override public Person data() { return new Person(name, id); } @Override public String event() { return "person"; } } static
PersonAsEventWithoutId
java
elastic__elasticsearch
server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java
{ "start": 100936, "end": 107556 }
class ____ implements ClusterStateTaskListener { private final String expectedHeader = threadPool.getThreadContext().getHeader(testHeader); @Override public void onFailure(Exception e) { assertEquals(expectedHeader, threadPool.getThreadContext().getHeader(testHeader)); if ((e instanceof NotMasterException && e.getCause() instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown()) == false) { throw new AssertionError("unexpected exception", e); } actionCount.incrementAndGet(); } } final var queue = masterService.createTaskQueue("queue", randomFrom(Priority.values()), batchExecutionContext -> { throw new AssertionError("should not execute batch"); }); try (var ignored = threadPool.getThreadContext().stashContext()) { threadPool.getThreadContext().putHeader(testHeader, randomAlphaOfLength(10)); queue.submitTask("batched", new TestTask(), null); } try (var ignored = threadPool.getThreadContext().stashContext()) { threadPool.getThreadContext().putHeader(testHeader, randomAlphaOfLength(10)); masterService.submitUnbatchedStateUpdateTask("unbatched", new ClusterStateUpdateTask() { private final TestTask innerTask = new TestTask(); @Override public ClusterState execute(ClusterState currentState) { throw new AssertionError("should not execute task"); } @Override public void onFailure(Exception e) { innerTask.onFailure(e); } }); } assertFalse(deterministicTaskQueue.hasRunnableTasks()); assertFalse(deterministicTaskQueue.hasDeferredTasks()); assertEquals(2, actionCount.get()); } } public void testTimeoutRejectionBehaviourAtSubmission() { final var source = randomIdentifier(); final var taskDescription = randomIdentifier(); final var timeout = TimeValue.timeValueMillis(between(1, 100000)); final var actionCount = new AtomicInteger(); final var deterministicTaskQueue = new DeterministicTaskQueue(); final var threadPool = // a threadpool which simulates the rejection of a master service timeout handler, but runs all other tasks as normal deterministicTaskQueue.getThreadPool(r -> { if (r.toString().equals(MasterService.getTimeoutTaskDescription(source, taskDescription, timeout))) { // assertTrue because this should happen exactly once assertTrue(actionCount.compareAndSet(0, 1)); throw new EsRejectedExecutionException("simulated rejection", true); } else { return r; } }); try (var masterService = createMasterService(true, null, threadPool, new StoppableExecutorServiceWrapper(threadPool.generic()))) { masterService.createTaskQueue( "queue", randomFrom(Priority.values()), batchExecutionContext -> fail(null, "should not execute batch") ).submitTask(source, new ClusterStateTaskListener() { @Override public void onFailure(Exception e) { if (e instanceof NotMasterException && e.getMessage().startsWith("could not schedule timeout handler") && e.getCause() instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown() && esre.getMessage().equals("simulated rejection")) { // assertTrue because we must receive the exception we synthesized, exactly once, after triggering the rejection assertTrue(actionCount.compareAndSet(1, 2)); } else { // fail the test if we get anything else throw new AssertionError("unexpected exception", e); } } @Override public String toString() { return taskDescription; } }, timeout); assertFalse(deterministicTaskQueue.hasRunnableTasks()); assertFalse(deterministicTaskQueue.hasDeferredTasks()); assertEquals(2, actionCount.get()); // ensures this test doesn't accidentally become trivial: both expected actions happened } } @TestLogging(reason = "verifying DEBUG logs", value = "org.elasticsearch.cluster.service.MasterService:DEBUG") public void testRejectionBehaviourAtCompletion() { final var deterministicTaskQueue = new DeterministicTaskQueue(); final var threadPool = deterministicTaskQueue.getThreadPool(); final var threadPoolExecutor = new StoppableExecutorServiceWrapper(threadPool.generic()) { boolean executedTask = false; @Override public void execute(Runnable command) { if (command instanceof AbstractRunnable abstractRunnable) { if (executedTask) { abstractRunnable.onRejection(new EsRejectedExecutionException("simulated", true)); } else { executedTask = true; super.execute(command); } } else { fail("not an AbstractRunnable: " + command); } } }; try ( var mockLog = MockLog.capture(MasterService.class); var masterService = createMasterService(true, null, threadPool, threadPoolExecutor) ) { mockLog.addExpectation(new MockLog.UnseenEventExpectation("warning", MasterService.class.getCanonicalName(), Level.WARN, "*")); mockLog.addExpectation( new MockLog.SeenEventExpectation( "debug", MasterService.class.getCanonicalName(), Level.DEBUG, "shut down during publication of cluster state version*" ) ); final var testHeader = "test-header";
TestTask
java
apache__kafka
streams/src/main/java/org/apache/kafka/streams/processor/internals/RepartitionTopicConfig.java
{ "start": 1128, "end": 4017 }
class ____ extends InternalTopicConfig { private static final Map<String, String> REPARTITION_TOPIC_DEFAULT_OVERRIDES; static { final Map<String, String> tempTopicDefaultOverrides = new HashMap<>(INTERNAL_TOPIC_DEFAULT_OVERRIDES); tempTopicDefaultOverrides.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE); tempTopicDefaultOverrides.put(TopicConfig.SEGMENT_BYTES_CONFIG, "52428800"); // 50 MB tempTopicDefaultOverrides.put(TopicConfig.RETENTION_MS_CONFIG, String.valueOf(-1)); // Infinity REPARTITION_TOPIC_DEFAULT_OVERRIDES = Collections.unmodifiableMap(tempTopicDefaultOverrides); } RepartitionTopicConfig(final String name, final Map<String, String> topicConfigs) { super(name, topicConfigs); } RepartitionTopicConfig(final String name, final Map<String, String> topicConfigs, final int numberOfPartitions, final boolean enforceNumberOfPartitions) { super(name, topicConfigs, numberOfPartitions, enforceNumberOfPartitions); } /** * Get the configured properties for this topic. If retentionMs is set then * we add additionalRetentionMs to work out the desired retention when cleanup.policy=compact,delete * * @param additionalRetentionMs - added to retention to allow for clock drift etc * @return Properties to be used when creating the topic */ @Override public Map<String, String> properties(final Map<String, String> defaultProperties, final long additionalRetentionMs) { // internal topic config overridden rule: library overrides < global config overrides < per-topic config overrides final Map<String, String> topicConfig = new HashMap<>(REPARTITION_TOPIC_DEFAULT_OVERRIDES); topicConfig.putAll(defaultProperties); topicConfig.putAll(topicConfigs); return topicConfig; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final RepartitionTopicConfig that = (RepartitionTopicConfig) o; return Objects.equals(name, that.name) && Objects.equals(topicConfigs, that.topicConfigs) && Objects.equals(enforceNumberOfPartitions, that.enforceNumberOfPartitions); } @Override public int hashCode() { return Objects.hash(name, topicConfigs, enforceNumberOfPartitions); } @Override public String toString() { return "RepartitionTopicConfig(" + "name=" + name + ", topicConfigs=" + topicConfigs + ", enforceNumberOfPartitions=" + enforceNumberOfPartitions + ")"; } }
RepartitionTopicConfig
java
quarkusio__quarkus
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/proxy/Client1.java
{ "start": 279, "end": 330 }
interface ____ { @GET Response get(); }
Client1
java
hibernate__hibernate-orm
tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/mixedmode/MixedConfigurationTest.java
{ "start": 775, "end": 3351 }
class ____ { @Test @WithClasses({ Car.class, Vehicle.class }) @WithMappingFiles("car.xml") void testDefaultAccessTypeApplied() { assertMetamodelClassGeneratedFor( Vehicle.class ); assertMetamodelClassGeneratedFor( Car.class ); assertAbsenceOfFieldInMetamodelFor( Car.class, "horsePower", "'horsePower' should not appear in metamodel since it does have no field." ); } @Test @WithClasses({ Truck.class, Vehicle.class }) @WithMappingFiles("truck.xml") void testExplicitXmlConfiguredAccessTypeApplied() { assertMetamodelClassGeneratedFor( Vehicle.class ); assertMetamodelClassGeneratedFor( Truck.class ); assertPresenceOfFieldInMetamodelFor( Truck.class, "horsePower", "Property 'horsePower' has explicit access type and should be in metamodel" ); assertAttributeTypeInMetaModelFor( Truck.class, "horsePower", Integer.class, "Wrong meta model type" ); } @Test @WithClasses({ Car.class, Vehicle.class, RentalCar.class, RentalCompany.class }) @WithMappingFiles({ "car.xml", "rentalcar.xml" }) void testMixedConfiguration() { assertMetamodelClassGeneratedFor( RentalCar.class ); assertMetamodelClassGeneratedFor( RentalCompany.class ); assertPresenceOfFieldInMetamodelFor( RentalCar.class, "company", "Property 'company' should be included due to xml configuration" ); assertAttributeTypeInMetaModelFor( RentalCar.class, "company", RentalCompany.class, "Wrong meta model type" ); assertPresenceOfFieldInMetamodelFor( RentalCar.class, "insurance", "Property 'insurance' should be included since it is an embeddable" ); assertAttributeTypeInMetaModelFor( RentalCar.class, "insurance", Insurance.class, "Wrong meta model type" ); } @Test @WithClasses({ Coordinates.class, ZeroCoordinates.class, Location.class }) @WithMappingFiles("coordinates.xml") void testAccessTypeForXmlConfiguredEmbeddables() { assertMetamodelClassGeneratedFor( Coordinates.class ); assertPresenceOfFieldInMetamodelFor( Coordinates.class, "longitude", "field exists and should be in metamodel" ); assertPresenceOfFieldInMetamodelFor( Coordinates.class, "latitude", "field exists and should be in metamodel" ); assertMetamodelClassGeneratedFor( ZeroCoordinates.class ); assertAbsenceOfFieldInMetamodelFor( ZeroCoordinates.class, "longitude", "Field access should be used, but ZeroCoordinates does not define fields" ); assertAbsenceOfFieldInMetamodelFor( ZeroCoordinates.class, "latitude", "Field access should be used, but ZeroCoordinates does not define fields" ); } }
MixedConfigurationTest
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/StaticAssignmentInConstructorTest.java
{ "start": 1575, "end": 1892 }
class ____ { static Test latest; public Test() { latest = this; } } """) .doTest(); } @Test public void instanceField_noMatch() { helper .addSourceLines( "Test.java", """
Test
java
apache__hadoop
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsBackoffMetrics.java
{ "start": 4836, "end": 13199 }
class ____ extends AbstractAbfsStatisticsSource { private static final Logger LOG = LoggerFactory.getLogger(AbfsBackoffMetrics.class); private static final List<RetryValue> RETRY_LIST = Arrays.asList( RetryValue.values()); /** * Constructor to initialize the IOStatisticsStore with counters and gauges. */ public AbfsBackoffMetrics() { IOStatisticsStore ioStatisticsStore = iostatisticsStore() .withCounters(getMetricNames(TYPE_COUNTER)) .withGauges(getMetricNames(TYPE_GAUGE)) .build(); setIOStatistics(ioStatisticsStore); } /** * Retrieves the metric names based on the statistic type. * * @param type the type of the statistic (counter or gauge) * @return an array of metric names */ private String[] getMetricNames(StatisticTypeEnum type) { return Arrays.stream(AbfsBackoffMetricsEnum.values()) .filter(backoffMetricsEnum -> backoffMetricsEnum .getStatisticType() .equals(type)) .flatMap(backoffMetricsEnum -> RETRY.equals(backoffMetricsEnum.getType()) ? RETRY_LIST.stream().map(retryCount -> getMetricName(backoffMetricsEnum, retryCount)) : Stream.of(backoffMetricsEnum.getName()) ).toArray(String[]::new); } /** * Constructs the metric name based on the metric and retry value. * * @param metric the metric enum * @param retryValue the retry value * @return the constructed metric name */ private String getMetricName(AbfsBackoffMetricsEnum metric, RetryValue retryValue) { if (metric == null) { LOG.error("ABFS Backoff Metric should not be null"); return EMPTY_STRING; } if (RETRY.equals(metric.getType()) && retryValue != null) { return retryValue.getValue() + COLON + metric.getName(); } return metric.getName(); } /** * Retrieves the value of a specific metric. * * @param metric the metric enum * @param retryValue the retry value * @return the value of the metric */ public long getMetricValue(AbfsBackoffMetricsEnum metric, RetryValue retryValue) { String metricName = getMetricName(metric, retryValue); switch (metric.getStatisticType()) { case TYPE_COUNTER: return lookupCounterValue(metricName); case TYPE_GAUGE: return lookupGaugeValue(metricName); default: return 0; } } /** * Retrieves the value of a specific metric. * * @param metric the metric enum * @return the value of the metric */ public long getMetricValue(AbfsBackoffMetricsEnum metric) { return getMetricValue(metric, null); } /** * Increments the value of a specific metric. * * @param metric the metric enum * @param retryValue the retry value */ public void incrementMetricValue(AbfsBackoffMetricsEnum metric, RetryValue retryValue) { String metricName = getMetricName(metric, retryValue); switch (metric.getStatisticType()) { case TYPE_COUNTER: incCounterValue(metricName); break; case TYPE_GAUGE: incGaugeValue(metricName); break; default: // Do nothing break; } } /** * Increments the value of a specific metric. * * @param metric the metric enum */ public void incrementMetricValue(AbfsBackoffMetricsEnum metric) { incrementMetricValue(metric, null); } /** * Sets the value of a specific metric. * * @param metric the metric enum * @param value the new value of the metric * @param retryValue the retry value */ public void setMetricValue(AbfsBackoffMetricsEnum metric, long value, RetryValue retryValue) { String metricName = getMetricName(metric, retryValue); switch (metric.getStatisticType()) { case TYPE_COUNTER: setCounterValue(metricName, value); break; case TYPE_GAUGE: setGaugeValue(metricName, value); break; default: // Do nothing break; } } /** * Sets the value of a specific metric. * * @param metric the metric enum * @param value the new value of the metric */ public void setMetricValue(AbfsBackoffMetricsEnum metric, long value) { setMetricValue(metric, value, null); } /** * Get the precision metrics. * * @param metricName the metric name * @param retryCount the retry count * @param denominator the denominator * @return String metrics value with precision */ private String getPrecisionMetrics(AbfsBackoffMetricsEnum metricName, RetryValue retryCount, long denominator) { return format(DOUBLE_PRECISION_FORMAT, (double) getMetricValue(metricName, retryCount) / denominator); } /** * Retrieves the retry metrics. * * @param metricBuilder the string builder to append the metrics */ private void getRetryMetrics(StringBuilder metricBuilder) { for (RetryValue retryCount : RETRY_LIST) { long totalRequests = getMetricValue(TOTAL_REQUESTS, retryCount); metricBuilder.append(REQUEST_COUNT) .append(retryCount.getValue()) .append(REQUESTS) .append(getMetricValue(NUMBER_OF_REQUESTS_SUCCEEDED, retryCount)); if (totalRequests > 0) { metricBuilder.append(MIN_MAX_AVERAGE) .append(retryCount.getValue()) .append(REQUESTS) .append(getPrecisionMetrics(MIN_BACK_OFF, retryCount, THOUSAND)) .append(SECONDS) .append(getPrecisionMetrics(MAX_BACK_OFF, retryCount, THOUSAND)) .append(SECONDS) .append(getPrecisionMetrics(TOTAL_BACK_OFF, retryCount, totalRequests * THOUSAND)) .append(SECONDS); } else { metricBuilder.append(MIN_MAX_AVERAGE) .append(retryCount.getValue()) .append(REQUESTS + EQUAL + 0 + SECONDS); } } } /** * Retrieves the base metrics. * * @param metricBuilder the string builder to append the metrics */ private void getBaseMetrics(StringBuilder metricBuilder) { long totalRequestsThrottled = getMetricValue(NUMBER_OF_NETWORK_FAILED_REQUESTS) + getMetricValue(NUMBER_OF_IOPS_THROTTLED_REQUESTS) + getMetricValue(NUMBER_OF_OTHER_THROTTLED_REQUESTS) + getMetricValue(NUMBER_OF_BANDWIDTH_THROTTLED_REQUESTS); metricBuilder.append(BANDWIDTH_THROTTLED_REQUESTS) .append(getMetricValue(NUMBER_OF_BANDWIDTH_THROTTLED_REQUESTS)) .append(IOPS_THROTTLED_REQUESTS) .append(getMetricValue(NUMBER_OF_IOPS_THROTTLED_REQUESTS)) .append(OTHER_THROTTLED_REQUESTS) .append(getMetricValue(NUMBER_OF_OTHER_THROTTLED_REQUESTS)) .append(PERCENTAGE_THROTTLED_REQUESTS) .append(formatPercent(totalRequestsThrottled/ (double) getMetricValue(TOTAL_NUMBER_OF_REQUESTS), 3)) .append(NETWORK_ERROR_REQUESTS) .append(getMetricValue(NUMBER_OF_NETWORK_FAILED_REQUESTS)) .append(SUCCESS_REQUESTS_WITHOUT_RETRY) .append(getMetricValue(NUMBER_OF_REQUESTS_SUCCEEDED_WITHOUT_RETRYING)) .append(FAILED_REQUESTS) .append(getMetricValue(NUMBER_OF_REQUESTS_FAILED)) .append(TOTAL_REQUESTS_COUNT) .append(getMetricValue(TOTAL_NUMBER_OF_REQUESTS)) .append(MAX_RETRY) .append(getMetricValue(MAX_RETRY_COUNT)); } /** * Retrieves the string representation of the metrics. * * @return the string representation of the metrics */ @Override public String toString() { if (getMetricValue(TOTAL_NUMBER_OF_REQUESTS) == 0) { return EMPTY_STRING; } StringBuilder metricBuilder = new StringBuilder(); getRetryMetrics(metricBuilder); getBaseMetrics(metricBuilder); return metricBuilder.toString(); } /** * Retrieves the metric names based on the statistic type. * * @param type the type of the statistic (counter or gauge) * @return an array of metric names */ @VisibleForTesting String[] getMetricNamesByType(StatisticTypeEnum type) { return getMetricNames(type); } }
AbfsBackoffMetrics
java
google__error-prone
docgen/src/main/java/com/google/errorprone/BugPatternIndexWriter.java
{ "start": 1649, "end": 4371 }
class ____ { private record IndexEntry(boolean onByDefault, SeverityLevel severity) { static IndexEntry create(boolean onByDefault, SeverityLevel severity) { return new IndexEntry(onByDefault, severity); } String asCategoryHeader() { return (onByDefault() ? "On by default" : "Experimental") + " : " + severity(); } } private record MiniDescription(String name, String summary) { static MiniDescription create(BugPatternInstance bugPattern) { return new MiniDescription(bugPattern.name, bugPattern.summary); } } void dump( Collection<BugPatternInstance> patterns, Writer w, Target target, Set<String> enabledChecks) throws IOException { // (Default, Severity) -> [Pattern...] SortedSetMultimap<IndexEntry, MiniDescription> sorted = TreeMultimap.create( comparing(IndexEntry::onByDefault, trueFirst()).thenComparing(IndexEntry::severity), Comparator.comparing(MiniDescription::name)); for (BugPatternInstance pattern : patterns) { sorted.put( IndexEntry.create(enabledChecks.contains(pattern.name), pattern.severity), MiniDescription.create(pattern)); } Map<String, Object> templateData = new HashMap<>(); ImmutableList<Map<String, Object>> bugpatternData = Multimaps.asMap(sorted).entrySet().stream() .map( e -> ImmutableMap.of( "category", e.getKey().asCategoryHeader(), "checks", e.getValue())) .collect(toImmutableList()); templateData.put("bugpatterns", bugpatternData); if (target == Target.EXTERNAL) { ImmutableMap<String, String> frontmatterData = ImmutableMap.<String, String>builder() .put("title", "Bug Patterns") .put("layout", "bugpatterns") .buildOrThrow(); DumperOptions options = new DumperOptions(); options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); Yaml yaml = new Yaml(options); Writer yamlWriter = new StringWriter(); yamlWriter.write("---\n"); yaml.dump(frontmatterData, yamlWriter); yamlWriter.write("---\n"); templateData.put("frontmatter", yamlWriter.toString()); MustacheFactory mf = new DefaultMustacheFactory(); Mustache mustache = mf.compile("com/google/errorprone/resources/bugpatterns_external.mustache"); mustache.execute(w, templateData); } else { MustacheFactory mf = new DefaultMustacheFactory(); Mustache mustache = mf.compile("com/google/errorprone/resources/bugpatterns_internal.mustache"); mustache.execute(w, templateData); } } }
BugPatternIndexWriter
java
apache__camel
components/camel-jdbc/src/main/java/org/apache/camel/component/jdbc/DefaultJdbcPrepareStatementStrategy.java
{ "start": 1310, "end": 6045 }
class ____ implements JdbcPrepareStatementStrategy { private static final Logger LOG = LoggerFactory.getLogger(DefaultJdbcPrepareStatementStrategy.class); @Override public String prepareQuery(String query, boolean allowNamedParameters) throws SQLException { String answer; if (allowNamedParameters && hasNamedParameters(query)) { // replace all :?word with just ? answer = query.replaceAll("\\:\\?\\w+", "\\?"); } else { answer = query; } LOG.trace("Prepared query: {}", answer); return answer; } @Override public Iterator<?> createPopulateIterator( final String query, final String preparedQuery, final int expectedParams, final Exchange exchange, final Object value) throws SQLException { Map<?, ?> map = null; if (exchange.getIn().hasHeaders()) { if (exchange.getIn().getHeader(JdbcConstants.JDBC_PARAMETERS) != null) { // header JDBC_PARAMETERS takes precedence over regular headers map = exchange.getIn().getHeader(JdbcConstants.JDBC_PARAMETERS, Map.class); } else { map = exchange.getIn().getHeaders(); } } final Map<?, ?> headerMap = map; if (hasNamedParameters(query)) { // create an iterator that returns the value in the named order try { return new Iterator<>() { private final NamedQueryParser parser = new NamedQueryParser(query); private Object next; private boolean done; private boolean preFetched; @Override public boolean hasNext() { if (!done && !preFetched) { next(); preFetched = true; } return !done; } @Override public Object next() { if (!preFetched) { String key = parser.next(); if (key == null) { done = true; return null; } // the key is expected to exist, if not report so end user can see this boolean contains = headerMap != null && headerMap.containsKey(key); if (!contains) { throw new RuntimeExchangeException( "Cannot find key [" + key + "] in message body or headers to use when setting named parameter in query [" + query + "]", exchange); } next = headerMap.get(key); } preFetched = false; return next; } @Override public void remove() { // noop } }; } catch (Exception e) { throw new SQLException("Error iterating parameters for the query: " + query, e); } } else { // just use a regular iterator return exchange.getContext().getTypeConverter().convertTo(Iterator.class, headerMap != null ? headerMap.values() : null); } } @Override public void populateStatement(PreparedStatement ps, Iterator<?> iterator, int expectedParams) throws SQLException { int argNumber = 1; if (expectedParams > 0) { // as the headers may have more values than the SQL needs we just break out when we reached the expected number while (iterator != null && iterator.hasNext() && argNumber <= expectedParams) { Object value = iterator.next(); LOG.trace("Setting parameter #{} with value: {}", argNumber, value); ps.setObject(argNumber, value); argNumber++; } } if (argNumber - 1 != expectedParams) { throw new SQLException("Number of parameters mismatch. Expected: " + expectedParams + ", was:" + (argNumber - 1)); } } protected boolean hasNamedParameters(String query) { NamedQueryParser parser = new NamedQueryParser(query); return parser.next() != null; } private static final
DefaultJdbcPrepareStatementStrategy
java
apache__flink
flink-core-api/src/main/java/org/apache/flink/api/common/state/v2/ValueState.java
{ "start": 1543, "end": 3926 }
interface ____<T> extends State { /** * Returns the current value for the state asynchronously. When the state is not partitioned the * returned value is the same for all inputs in a given operator instance. If state partitioning * is applied, the value returned depends on the current operator input, as the operator * maintains an independent state for each partition. When no value was previously set using * {@link #asyncUpdate(Object)}, the future will return {@code null} asynchronously. * * @return The {@link StateFuture} that will return the value corresponding to the current * input. */ StateFuture<T> asyncValue(); /** * Updates the operator state accessible by {@link #asyncValue()} to the given value * asynchronously. The next time {@link #asyncValue()} is called (for the same state partition) * the returned state will represent the updated value. When a partitioned state is updated with * {@code null}, the state for the current key will be removed. * * @param value The new value for the state. * @return The {@link StateFuture} that will trigger the callback when update finishes. */ StateFuture<Void> asyncUpdate(T value); /** * Returns the current value for the state. When the state is not partitioned the returned value * is the same for all inputs in a given operator instance. If state partitioning is applied, * the value returned depends on the current operator input, as the operator maintains an * independent state for each partition. * * <p>If you didn't specify a default value when creating the ValueStateDescriptor this will * return {@code null} when no value was previously set using {@link #update(Object)}. * * @return The state value corresponding to the current input. */ T value(); /** * Updates the operator state accessible by {@link #value()} to the given value. The next time * {@link #value()} is called (for the same state partition) the returned state will represent * the updated value. When a partitioned state is updated with {@code null}, the state for the * current key will be removed and the default value is returned on the next access. * * @param value The new value for the state. */ void update(T value); }
ValueState
java
reactor__reactor-core
reactor-core/src/test/java/reactor/core/publisher/scenarios/AbstractReactorTest.java
{ "start": 1019, "end": 1942 }
class ____ { protected static Scheduler asyncGroup; protected static Scheduler ioGroup; protected final Map<Thread, AtomicLong> counters = new ConcurrentHashMap<>(); @BeforeAll public static void loadEnv() { ioGroup = Schedulers.newBoundedElastic(4, Integer.MAX_VALUE, "work"); asyncGroup = Schedulers.newParallel("parallel", 4); } @AfterAll public static void closeEnv() { ioGroup.dispose(); asyncGroup.dispose(); } static { System.setProperty("reactor.trace.cancel", "true"); } protected void monitorThreadUse() { monitorThreadUse(null); } protected void monitorThreadUse(Object val) { AtomicLong counter = counters.get(Thread.currentThread()); if (counter == null) { counter = new AtomicLong(); AtomicLong prev = counters.putIfAbsent(Thread.currentThread(), counter); if(prev != null){ counter = prev; } } counter.incrementAndGet(); } }
AbstractReactorTest
java
apache__camel
components/camel-infinispan/camel-infinispan/src/generated/java/org/apache/camel/component/infinispan/remote/InfinispanRemoteIdempotentRepositoryConfigurer.java
{ "start": 760, "end": 2724 }
class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter { @Override public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) { org.apache.camel.component.infinispan.remote.InfinispanRemoteIdempotentRepository target = (org.apache.camel.component.infinispan.remote.InfinispanRemoteIdempotentRepository) obj; switch (ignoreCase ? name.toLowerCase() : name) { case "cachecontainer": case "cacheContainer": target.setCacheContainer(property(camelContext, org.infinispan.client.hotrod.RemoteCacheManager.class, value)); return true; case "configuration": target.setConfiguration(property(camelContext, org.apache.camel.component.infinispan.remote.InfinispanRemoteConfiguration.class, value)); return true; default: return false; } } @Override public Class<?> getOptionType(String name, boolean ignoreCase) { switch (ignoreCase ? name.toLowerCase() : name) { case "cachecontainer": case "cacheContainer": return org.infinispan.client.hotrod.RemoteCacheManager.class; case "configuration": return org.apache.camel.component.infinispan.remote.InfinispanRemoteConfiguration.class; default: return null; } } @Override public Object getOptionValue(Object obj, String name, boolean ignoreCase) { org.apache.camel.component.infinispan.remote.InfinispanRemoteIdempotentRepository target = (org.apache.camel.component.infinispan.remote.InfinispanRemoteIdempotentRepository) obj; switch (ignoreCase ? name.toLowerCase() : name) { case "cachecontainer": case "cacheContainer": return target.getCacheContainer(); case "configuration": return target.getConfiguration(); default: return null; } } }
InfinispanRemoteIdempotentRepositoryConfigurer
java
apache__camel
components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/component/properties/BaseSpringPropertiesComponentTest.java
{ "start": 1095, "end": 2388 }
class ____ extends SpringTestSupport { @Override protected AbstractXmlApplicationContext createApplicationContext() { return new ClassPathXmlApplicationContext("org/apache/camel/component/properties/SpringPropertiesComponentTest.xml"); } @Test public void testSpringPropertiesComponentStart() throws Exception { getMockEndpoint("mock:result").expectedMessageCount(1); template.sendBody("direct:start", "Hello World"); assertMockEndpointsSatisfied(); } @Test public void testSpringPropertiesComponentBar() throws Exception { getMockEndpoint("mock:bar").expectedMessageCount(1); template.sendBody("direct:bar", "Hello World"); assertMockEndpointsSatisfied(); } @Test public void testSpringPropertiesComponentStart2() throws Exception { getMockEndpoint("mock:result").expectedMessageCount(1); template.sendBody("direct:start2", "Hello World"); assertMockEndpointsSatisfied(); } @Test public void testSpringPropertiesComponentBar2() throws Exception { getMockEndpoint("mock:bar").expectedMessageCount(1); template.sendBody("direct:bar2", "Hello World"); assertMockEndpointsSatisfied(); } }
BaseSpringPropertiesComponentTest
java
apache__camel
components/camel-jms/src/generated/java/org/apache/camel/component/jms/JmsEndpointUriFactory.java
{ "start": 513, "end": 6732 }
class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory { private static final String BASE = ":destinationType:destinationName"; private static final Set<String> PROPERTY_NAMES; private static final Set<String> SECRET_PROPERTY_NAMES; private static final Map<String, String> MULTI_VALUE_PREFIXES; static { Set<String> props = new HashSet<>(104); props.add("acceptMessagesWhileStopping"); props.add("acknowledgementModeName"); props.add("allowAdditionalHeaders"); props.add("allowNullBody"); props.add("allowReplyManagerQuickStop"); props.add("allowSerializedHeaders"); props.add("alwaysCopyMessage"); props.add("artemisConsumerPriority"); props.add("artemisStreamingEnabled"); props.add("asyncConsumer"); props.add("asyncStartListener"); props.add("asyncStopListener"); props.add("autoStartup"); props.add("browseLimit"); props.add("cacheLevel"); props.add("cacheLevelName"); props.add("clientId"); props.add("concurrentConsumers"); props.add("connectionFactory"); props.add("consumerType"); props.add("correlationProperty"); props.add("defaultTaskExecutorType"); props.add("deliveryDelay"); props.add("deliveryMode"); props.add("deliveryPersistent"); props.add("destinationName"); props.add("destinationResolver"); props.add("destinationType"); props.add("disableReplyTo"); props.add("disableTimeToLive"); props.add("durableSubscriptionName"); props.add("eagerLoadingOfProperties"); props.add("eagerPoisonBody"); props.add("errorHandler"); props.add("errorHandlerLogStackTrace"); props.add("errorHandlerLoggingLevel"); props.add("exceptionHandler"); props.add("exceptionListener"); props.add("exchangePattern"); props.add("explicitQosEnabled"); props.add("exposeListenerSession"); props.add("forceSendOriginalMessage"); props.add("formatDateHeadersToIso8601"); props.add("headerFilterStrategy"); props.add("idleConsumerLimit"); props.add("idleReceivesPerTaskLimit"); props.add("idleTaskExecutionLimit"); props.add("includeAllJMSXProperties"); props.add("includeSentJMSMessageID"); props.add("jmsKeyFormatStrategy"); props.add("jmsMessageType"); props.add("lazyCreateTransactionManager"); props.add("lazyStartProducer"); props.add("mapJmsMessage"); props.add("maxConcurrentConsumers"); props.add("maxMessagesPerTask"); props.add("messageConverter"); props.add("messageCreatedStrategy"); props.add("messageIdEnabled"); props.add("messageListenerContainerFactory"); props.add("messageTimestampEnabled"); props.add("password"); props.add("preserveMessageQos"); props.add("priority"); props.add("pubSubNoLocal"); props.add("receiveTimeout"); props.add("recoveryInterval"); props.add("replyCorrelationProperty"); props.add("replyTo"); props.add("replyToCacheLevelName"); props.add("replyToConcurrentConsumers"); props.add("replyToConsumerType"); props.add("replyToDeliveryPersistent"); props.add("replyToDestinationSelectorName"); props.add("replyToMaxConcurrentConsumers"); props.add("replyToOnTimeoutMaxConcurrentConsumers"); props.add("replyToOverride"); props.add("replyToSameDestinationAllowed"); props.add("replyToType"); props.add("requestTimeout"); props.add("requestTimeoutCheckerInterval"); props.add("selector"); props.add("streamMessageTypeEnabled"); props.add("subscriptionDurable"); props.add("subscriptionName"); props.add("subscriptionShared"); props.add("synchronous"); props.add("taskExecutor"); props.add("temporaryQueueResolver"); props.add("testConnectionOnStartup"); props.add("timeToLive"); props.add("transacted"); props.add("transactedInOut"); props.add("transactionManager"); props.add("transactionName"); props.add("transactionTimeout"); props.add("transferException"); props.add("transferExchange"); props.add("useMessageIDAsCorrelationID"); props.add("username"); props.add("waitForProvisionCorrelationToBeUpdatedCounter"); props.add("waitForProvisionCorrelationToBeUpdatedThreadSleepingTime"); props.add("waitForTemporaryReplyToToBeUpdatedCounter"); props.add("waitForTemporaryReplyToToBeUpdatedThreadSleepingTime"); PROPERTY_NAMES = Collections.unmodifiableSet(props); Set<String> secretProps = new HashSet<>(2); secretProps.add("password"); secretProps.add("username"); SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps); MULTI_VALUE_PREFIXES = Collections.emptyMap(); } @Override public boolean isEnabled(String scheme) { return "jms".equals(scheme); } @Override public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException { String syntax = scheme + BASE; String uri = syntax; Map<String, Object> copy = new HashMap<>(properties); uri = buildPathParameter(syntax, uri, "destinationType", "queue", false, copy); uri = buildPathParameter(syntax, uri, "destinationName", null, true, copy); uri = buildQueryParameters(uri, copy, encode); return uri; } @Override public Set<String> propertyNames() { return PROPERTY_NAMES; } @Override public Set<String> secretPropertyNames() { return SECRET_PROPERTY_NAMES; } @Override public Map<String, String> multiValuePrefixes() { return MULTI_VALUE_PREFIXES; } @Override public boolean isLenientProperties() { return false; } }
JmsEndpointUriFactory
java
apache__camel
components/camel-kamelet/src/test/java/org/apache/camel/component/kamelet/KameletLocalBeanClassFourTest.java
{ "start": 2383, "end": 2698 }
class ____ { private String bar; public String getBar() { return bar; } public void setBar(String bar) { this.bar = bar; } public String where(String name) { return "Hi " + name + " we are going to " + bar; } } }
MyBar
java
spring-projects__spring-framework
spring-beans/src/test/java/org/springframework/beans/factory/aot/BeanDefinitionPropertyValueCodeGeneratorDelegatesTests.java
{ "start": 14386, "end": 14682 }
class ____ { @Test void generateWhenAutowiredPropertyMarker() { compile(AutowiredPropertyMarker.INSTANCE, (instance, compiler) -> assertThat(instance).isInstanceOf(AutowiredPropertyMarker.class) .isSameAs(AutowiredPropertyMarker.INSTANCE)); } } }
AutowiredPropertyMarkerTests
java
spring-projects__spring-framework
spring-test/src/main/java/org/springframework/test/web/servlet/client/CookieAssertions.java
{ "start": 943, "end": 1456 }
class ____ extends AbstractCookieAssertions<ExchangeResult, RestTestClient.ResponseSpec> { CookieAssertions(ExchangeResult exchangeResult, RestTestClient.ResponseSpec responseSpec) { super(exchangeResult, responseSpec); } @Override protected MultiValueMap<String, ResponseCookie> getResponseCookies() { return getExchangeResult().getResponseCookies(); } @Override protected void assertWithDiagnostics(Runnable assertion) { getExchangeResult().assertWithDiagnostics(assertion); } }
CookieAssertions
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
{ "start": 2664, "end": 8400 }
class ____ implements ProviderService, YarnServiceConstants { protected static final Logger log = LoggerFactory.getLogger(AbstractProviderService.class); public abstract void processArtifact(AbstractLauncher launcher, ComponentInstance compInstance, SliderFileSystem fileSystem, Service service, ContainerLaunchService.ComponentLaunchContext compLaunchCtx) throws IOException; public Map<String, String> buildContainerTokens(ComponentInstance instance, Container container, ContainerLaunchService.ComponentLaunchContext compLaunchContext) { // Generate tokens (key-value pair) for config substitution. // Get pre-defined tokens Map<String, String> globalTokens = instance.getComponent().getScheduler().globalTokens; Map<String, String> tokensForSubstitution = ProviderUtils .initCompTokensForSubstitute(instance, container, compLaunchContext); tokensForSubstitution.putAll(globalTokens); return tokensForSubstitution; } public void buildContainerEnvironment(AbstractLauncher launcher, Service service, ComponentInstance instance, SliderFileSystem fileSystem, Configuration yarnConf, Container container, ContainerLaunchService.ComponentLaunchContext compLaunchContext, Map<String, String> tokensForSubstitution) throws IOException, SliderException { // Set the environment variables in launcher launcher.putEnv(ServiceUtils.buildEnvMap( compLaunchContext.getConfiguration(), tokensForSubstitution)); launcher.setEnv("WORK_DIR", ApplicationConstants.Environment.PWD.$()); launcher.setEnv("LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR); if (System.getenv(HADOOP_USER_NAME) != null) { launcher.setEnv(HADOOP_USER_NAME, System.getenv(HADOOP_USER_NAME)); } launcher.setEnv("LANG", "en_US.UTF-8"); launcher.setEnv("LC_ALL", "en_US.UTF-8"); launcher.setEnv("LANGUAGE", "en_US.UTF-8"); for (Entry<String, String> entry : launcher.getEnv().entrySet()) { tokensForSubstitution.put($(entry.getKey()), entry.getValue()); } } public void buildContainerLaunchCommand(AbstractLauncher launcher, Service service, ComponentInstance instance, SliderFileSystem fileSystem, Configuration yarnConf, Container container, ContainerLaunchService.ComponentLaunchContext compLaunchContext, Map<String, String> tokensForSubstitution) throws IOException, SliderException { // substitute launch command String launchCommand = compLaunchContext.getLaunchCommand(); // docker container may have empty commands if (!StringUtils.isEmpty(launchCommand)) { launchCommand = ProviderUtils .substituteStrWithTokens(launchCommand, tokensForSubstitution); CommandLineBuilder operation = new CommandLineBuilder(); operation.add(launchCommand); operation.addOutAndErrFiles(OUT_FILE, ERR_FILE); launcher.addCommand(operation.build()); } } public void buildContainerRetry(AbstractLauncher launcher, Configuration yarnConf, ContainerLaunchService.ComponentLaunchContext compLaunchContext, ComponentInstance instance) { // By default retry forever every 30 seconds ComponentRestartPolicy restartPolicy = instance.getComponent() .getRestartPolicyHandler(); if (restartPolicy.allowContainerRetriesForInstance(instance)) { launcher.setRetryContext(YarnServiceConf .getInt(CONTAINER_RETRY_MAX, DEFAULT_CONTAINER_RETRY_MAX, compLaunchContext.getConfiguration(), yarnConf), YarnServiceConf .getInt(CONTAINER_RETRY_INTERVAL, DEFAULT_CONTAINER_RETRY_INTERVAL, compLaunchContext.getConfiguration(), yarnConf), YarnServiceConf .getLong(CONTAINER_FAILURES_VALIDITY_INTERVAL, DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL, compLaunchContext.getConfiguration(), yarnConf)); } } public ResolvedLaunchParams buildContainerLaunchContext( AbstractLauncher launcher, Service service, ComponentInstance instance, SliderFileSystem fileSystem, Configuration yarnConf, Container container, ContainerLaunchService.ComponentLaunchContext compLaunchContext) throws IOException, SliderException { ResolvedLaunchParams resolved = new ResolvedLaunchParams(); processArtifact(launcher, instance, fileSystem, service, compLaunchContext); ServiceContext context = instance.getComponent().getScheduler().getContext(); // Generate tokens (key-value pair) for config substitution. Map<String, String> tokensForSubstitution = buildContainerTokens(instance, container, compLaunchContext); // Setup launch context environment buildContainerEnvironment(launcher, service, instance, fileSystem, yarnConf, container, compLaunchContext, tokensForSubstitution); // create config file on hdfs and addResolvedRsrcPath local resource ProviderUtils.createConfigFileAndAddLocalResource(launcher, fileSystem, compLaunchContext, tokensForSubstitution, instance, context, resolved); // handles static files (like normal file / archive file) for localization. ProviderUtils.handleStaticFilesForLocalization(launcher, fileSystem, compLaunchContext, resolved); // replace launch command with token specific information buildContainerLaunchCommand(launcher, service, instance, fileSystem, yarnConf, container, compLaunchContext, tokensForSubstitution); // Setup container retry settings buildContainerRetry(launcher, yarnConf, compLaunchContext, instance); return resolved; } }
AbstractProviderService
java
apache__hadoop
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/IntegerConfigurationBasicValidator.java
{ "start": 1273, "end": 2900 }
class ____ extends ConfigurationBasicValidator<Integer> implements ConfigurationValidator { private final int min; private final int max; private final int outlier; public IntegerConfigurationBasicValidator(final int min, final int max, final int defaultVal, final String configKey, final boolean throwIfInvalid) { this(min, min, max, defaultVal, configKey, throwIfInvalid); } public IntegerConfigurationBasicValidator(final int outlier, final int min, final int max, final int defaultVal, final String configKey, final boolean throwIfInvalid) { super(configKey, defaultVal, throwIfInvalid); this.min = min; this.max = max; this.outlier = outlier; } public Integer validate(final String configValue) throws InvalidConfigurationValueException { Integer result = super.validate(configValue); if (result != null) { return result; } try { result = Integer.parseInt(configValue); // throw an exception if a 'within bounds' value is missing if (getThrowIfInvalid() && (result != outlier) && (result < this.min || result > this.max)) { throw new InvalidConfigurationValueException(getConfigKey()); } if (result == outlier) { return result; } // set the value to the nearest bound if it's out of bounds if (result < this.min) { return this.min; } if (result > this.max) { return this.max; } } catch (NumberFormatException ex) { throw new InvalidConfigurationValueException(getConfigKey(), ex); } return result; } }
IntegerConfigurationBasicValidator
java
apache__dubbo
dubbo-registry/dubbo-registry-api/src/test/java/org/apache/dubbo/registry/client/migration/model/MigrationRuleTest.java
{ "start": 1639, "end": 5541 }
class ____ { private static final ServiceNameMapping mapping = mock(ServiceNameMapping.class); @Test void test_parse() { when(mapping.getMapping(any(URL.class))).thenReturn(Collections.emptySet()); String rule = "key: demo-consumer\n" + "step: APPLICATION_FIRST\n" + "threshold: 1.0\n" + "proportion: 60\n" + "delay: 60\n" + "force: false\n" + "interfaces:\n" + " - serviceKey: DemoService:1.0.0\n" + " threshold: 0.5\n" + " proportion: 30\n" + " delay: 30\n" + " force: true\n" + " step: APPLICATION_FIRST\n" + " - serviceKey: GreetingService:1.0.0\n" + " step: FORCE_APPLICATION\n" + "applications:\n" + " - serviceKey: TestApplication\n" + " threshold: 0.3\n" + " proportion: 20\n" + " delay: 10\n" + " force: false\n" + " step: FORCE_INTERFACE\n"; MigrationRule migrationRule = MigrationRule.parse(rule); assertEquals("demo-consumer", migrationRule.getKey()); assertEquals(MigrationStep.APPLICATION_FIRST, migrationRule.getStep()); assertEquals(1.0f, migrationRule.getThreshold()); assertEquals(60, migrationRule.getProportion()); assertEquals(60, migrationRule.getDelay()); assertEquals(false, migrationRule.getForce()); URL url = Mockito.mock(URL.class); ApplicationModel defaultModel = Mockito.spy(ApplicationModel.defaultModel()); Mockito.when(defaultModel.getDefaultExtension(ServiceNameMapping.class)).thenReturn(mapping); Mockito.when(url.getScopeModel()).thenReturn(defaultModel); Mockito.when(url.getDisplayServiceKey()).thenReturn("DemoService:1.0.0"); Mockito.when(url.getParameter(ArgumentMatchers.eq(REGISTRY_CLUSTER_TYPE_KEY), anyString())) .thenReturn("default"); Mockito.when(url.getParameter(ArgumentMatchers.eq(REGISTRY_CLUSTER_TYPE_KEY), anyString())) .thenReturn("default"); assertEquals(2, migrationRule.getInterfaces().size()); assertEquals(0.5f, migrationRule.getThreshold(url)); assertEquals(30, migrationRule.getProportion(url)); assertEquals(30, migrationRule.getDelay(url)); assertTrue(migrationRule.getForce(url)); assertEquals(MigrationStep.APPLICATION_FIRST, migrationRule.getStep(url)); Mockito.when(url.getDisplayServiceKey()).thenReturn("GreetingService:1.0.0"); assertEquals(1.0f, migrationRule.getThreshold(url)); assertEquals(60, migrationRule.getProportion(url)); assertEquals(60, migrationRule.getDelay(url)); assertFalse(migrationRule.getForce(url)); assertEquals(MigrationStep.FORCE_APPLICATION, migrationRule.getStep(url)); Mockito.when(url.getDisplayServiceKey()).thenReturn("GreetingService:1.0.1"); Mockito.when(url.getServiceInterface()).thenReturn("GreetingService"); when(mapping.getRemoteMapping(any(URL.class))).thenReturn(Collections.singleton("TestApplication")); Set<String> services = new HashSet<>(); services.add("TestApplication"); when(mapping.getMapping(any(URL.class))).thenReturn(services); assertEquals(0.3f, migrationRule.getThreshold(url)); assertEquals(20, migrationRule.getProportion(url)); assertEquals(10, migrationRule.getDelay(url)); assertFalse(migrationRule.getForce(url)); assertEquals(MigrationStep.FORCE_INTERFACE, migrationRule.getStep(url)); when(mapping.getMapping(any(URL.class))).thenReturn(Collections.emptySet()); ApplicationModel.defaultModel().destroy(); } }
MigrationRuleTest
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/cid/nonaggregated/SmokeTests.java
{ "start": 932, "end": 1903 }
class ____ { @Test public void simpleTest(SessionFactoryScope scope) { scope.inTransaction( (session) -> { session.createQuery( "select a from SystemAccess a" ).list(); } ); } @Test public void keyManyToOneTest(SessionFactoryScope scope) { scope.inTransaction( (session) -> { session.createQuery( "select i from LineItem i" ).list(); } ); } @BeforeEach public void createTestData(SessionFactoryScope scope) { scope.inTransaction( session -> { final Order order = new Order( 1, "123-abc" ); session.persist( order ); session.persist( new LineItem( order, 1, "xyz", 500 ) ); session.persist( new LineItem( order, 2, "tuv", 60 ) ); session.persist( new LineItem( order, 3, "def", 350 ) ); } ); } @AfterEach public void dropTestData(SessionFactoryScope scope) { scope.getSessionFactory().getSchemaManager().truncate(); } @Entity( name = "SystemAccess" ) @Table( name = "`access`" ) public static
SmokeTests
java
apache__commons-lang
src/main/java/org/apache/commons/lang3/concurrent/locks/LockingVisitors.java
{ "start": 5009, "end": 5299 }
class ____<O, L> { /** * Builds {@link LockVisitor} instances. * * @param <O> the wrapped object type. * @param <L> the wrapped lock type. * @param <B> the builder type. * @since 3.18.0 */ public static
LockVisitor
java
google__auto
value/src/test/java/com/google/auto/value/processor/AutoBuilderCompilationTest.java
{ "start": 37755, "end": 37899 }
interface ____"); } private static String sorted(String... imports) { return stream(imports).sorted().collect(joining("\n")); } }
Builder
java
quarkusio__quarkus
extensions/redis-client/runtime/src/main/java/io/quarkus/redis/runtime/datasource/ReactiveTransactionalTimeSeriesCommandsImpl.java
{ "start": 924, "end": 8226 }
class ____<K> extends AbstractTransactionalCommands implements ReactiveTransactionalTimeSeriesCommands<K> { private final ReactiveTimeSeriesCommandsImpl<K> reactive; public ReactiveTransactionalTimeSeriesCommandsImpl(ReactiveTransactionalRedisDataSource ds, ReactiveTimeSeriesCommandsImpl<K> reactive, TransactionHolder tx) { super(ds, tx); this.reactive = reactive; } @Override public Uni<Void> tsCreate(K key, CreateArgs args) { this.tx.enqueue(x -> null); return this.reactive._tsCreate(key, args).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsCreate(K key) { this.tx.enqueue(x -> null); return this.reactive._tsCreate(key).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsAdd(K key, long timestamp, double value, AddArgs args) { this.tx.enqueue(x -> null); return this.reactive._tsAdd(key, timestamp, value, args).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsAdd(K key, long timestamp, double value) { this.tx.enqueue(x -> null); return this.reactive._tsAdd(key, timestamp, value).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsAdd(K key, double value) { this.tx.enqueue(x -> null); return this.reactive._tsAdd(key, value).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsAlter(K key, AlterArgs args) { this.tx.enqueue(x -> null); return this.reactive._tsAlter(key, args).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsCreateRule(K key, K destKey, Aggregation aggregation, Duration bucketDuration) { this.tx.enqueue(x -> null); return this.reactive._tsCreateRule(key, destKey, aggregation, bucketDuration).invoke(this::queuedOrDiscard) .replaceWithVoid(); } @Override public Uni<Void> tsCreateRule(K key, K destKey, Aggregation aggregation, Duration bucketDuration, long alignTimestamp) { this.tx.enqueue(x -> null); return this.reactive._tsCreateRule(key, destKey, aggregation, bucketDuration, alignTimestamp) .invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsDecrBy(K key, double value) { this.tx.enqueue(x -> null); return this.reactive._tsDecrBy(key, value).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsDecrBy(K key, double value, IncrementArgs args) { this.tx.enqueue(x -> null); return this.reactive._tsDecrBy(key, value, args).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsDel(K key, long fromTimestamp, long toTimestamp) { this.tx.enqueue(x -> null); return this.reactive._tsDel(key, fromTimestamp, toTimestamp).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsDeleteRule(K key, K destKey) { this.tx.enqueue(x -> null); return this.reactive._tsDeleteRule(key, destKey).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsGet(K key) { this.tx.enqueue(reactive::decodeSample); return this.reactive._tsGet(key).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsGet(K key, boolean latest) { this.tx.enqueue(reactive::decodeSample); return this.reactive._tsGet(key, latest).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsIncrBy(K key, double value) { this.tx.enqueue(x -> null); return this.reactive._tsIncrBy(key, value).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsIncrBy(K key, double value, IncrementArgs args) { this.tx.enqueue(x -> null); return this.reactive._tsIncrBy(key, value, args).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsMAdd(SeriesSample<K>... samples) { this.tx.enqueue(x -> null); return this.reactive._tsMAdd(samples).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsMGet(MGetArgs args, Filter... filters) { this.tx.enqueue(reactive::decodeGroup); return this.reactive._tsMGet(args, filters).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsMGet(Filter... filters) { this.tx.enqueue(reactive::decodeGroup); return this.reactive._tsMGet(filters).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsMRange(TimeSeriesRange range, Filter... filters) { this.tx.enqueue(reactive::decodeGroup); return this.reactive._tsMRange(range, filters).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsMRange(TimeSeriesRange range, MRangeArgs args, Filter... filters) { this.tx.enqueue(reactive::decodeGroup); return this.reactive._tsMRange(range, args, filters).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsMRevRange(TimeSeriesRange range, Filter... filters) { this.tx.enqueue(reactive::decodeGroup); return this.reactive._tsMRevRange(range, filters).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsMRevRange(TimeSeriesRange range, MRangeArgs args, Filter... filters) { this.tx.enqueue(reactive::decodeGroup); return this.reactive._tsMRevRange(range, args, filters).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsQueryIndex(Filter... filters) { this.tx.enqueue(r -> reactive.marshaller.decodeAsList(r, reactive.keyType)); return this.reactive._tsQueryIndex(filters).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsRange(K key, TimeSeriesRange range) { this.tx.enqueue(r -> reactive.marshaller.decodeAsList(r, reactive::decodeSample)); return this.reactive._tsRange(key, range).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsRange(K key, TimeSeriesRange range, RangeArgs args) { this.tx.enqueue(r -> reactive.marshaller.decodeAsList(r, reactive::decodeSample)); return this.reactive._tsRange(key, range, args).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsRevRange(K key, TimeSeriesRange range) { this.tx.enqueue(r -> reactive.marshaller.decodeAsList(r, reactive::decodeSample)); return this.reactive._tsRevRange(key, range).invoke(this::queuedOrDiscard).replaceWithVoid(); } @Override public Uni<Void> tsRevRange(K key, TimeSeriesRange range, RangeArgs args) { this.tx.enqueue(r -> reactive.marshaller.decodeAsList(r, reactive::decodeSample)); return this.reactive._tsRevRange(key, range, args).invoke(this::queuedOrDiscard).replaceWithVoid(); } }
ReactiveTransactionalTimeSeriesCommandsImpl
java
apache__dubbo
dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/frame/TriDecoder.java
{ "start": 1277, "end": 6146 }
class ____ implements Deframer { private static final int HEADER_LENGTH = 5; private static final int COMPRESSED_FLAG_MASK = 1; private static final int RESERVED_MASK = 0xFE; private final CompositeByteBuf accumulate = Unpooled.compositeBuffer(); private final Listener listener; private final DeCompressor decompressor; private final Integer maxMessageSize; private boolean compressedFlag; private long pendingDeliveries; private boolean inDelivery = false; private boolean closing; private boolean closed; private int requiredLength = HEADER_LENGTH; private GrpcDecodeState state = GrpcDecodeState.HEADER; public TriDecoder(DeCompressor decompressor, Listener listener) { Configuration conf = ConfigurationUtils.getEnvConfiguration(ApplicationModel.defaultModel()); maxMessageSize = conf.getInteger(Constants.H2_SETTINGS_MAX_MESSAGE_SIZE, 50 * 1024 * 1024); this.decompressor = decompressor; this.listener = listener; } @Override public void deframe(ByteBuf data) { if (closing || closed) { // ignored return; } accumulate.addComponent(true, data); deliver(); } public void request(int numMessages) { pendingDeliveries += numMessages; deliver(); } @Override public void close() { closing = true; deliver(); } private void deliver() { // We can have reentrancy here when using a direct executor, triggered by calls to // request more messages. This is safe as we simply loop until pendingDelivers = 0 if (inDelivery) { return; } inDelivery = true; try { // Process the uncompressed bytes. while (pendingDeliveries > 0 && hasEnoughBytes()) { switch (state) { case HEADER: processHeader(); break; case PAYLOAD: // Read the body and deliver the message. processBody(); // Since we've delivered a message, decrement the number of pending // deliveries remaining. pendingDeliveries--; break; default: throw new AssertionError("Invalid state: " + state); } } if (closing) { if (!closed) { closed = true; accumulate.clear(); accumulate.release(); listener.close(); } } } finally { inDelivery = false; } } private boolean hasEnoughBytes() { return requiredLength - accumulate.readableBytes() <= 0; } /** * Processes the GRPC compression header which is composed of the compression flag and the outer * frame length. */ private void processHeader() { int type = accumulate.readUnsignedByte(); if ((type & RESERVED_MASK) != 0) { throw new RpcException("gRPC frame header malformed: reserved bits not zero"); } compressedFlag = (type & COMPRESSED_FLAG_MASK) != 0; requiredLength = accumulate.readInt(); if (requiredLength < 0) { throw new RpcException("Invalid message length: " + requiredLength); } if (requiredLength > maxMessageSize) { throw new RpcException(String.format("Message size %d exceeds limit %d", requiredLength, maxMessageSize)); } // Continue reading the frame body. state = GrpcDecodeState.PAYLOAD; } /** * Processes the GRPC message body, which depending on frame header flags may be compressed. */ private void processBody() { // There is no reliable way to get the uncompressed size per message when it's compressed, // because the uncompressed bytes are provided through an InputStream whose total size is // unknown until all bytes are read, and we don't know when it happens. byte[] stream = compressedFlag ? getCompressedBody() : getUncompressedBody(); listener.onRawMessage(stream); // Done with this frame, begin processing the next header. state = GrpcDecodeState.HEADER; requiredLength = HEADER_LENGTH; } private byte[] getCompressedBody() { final byte[] compressedBody = getUncompressedBody(); return decompressor.decompress(compressedBody); } private byte[] getUncompressedBody() { byte[] data = new byte[requiredLength]; accumulate.readBytes(data); accumulate.discardReadComponents(); return data; } private
TriDecoder
java
elastic__elasticsearch
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/client/AmazonBedrockStreamingChatProcessor.java
{ "start": 1303, "end": 5269 }
class ____ implements Flow.Processor<ConverseStreamOutput, StreamingChatCompletionResults.Results> { private static final Logger logger = LogManager.getLogger(AmazonBedrockStreamingChatProcessor.class); private final AtomicReference<Throwable> error = new AtomicReference<>(null); private final AtomicLong demand = new AtomicLong(0); private final AtomicBoolean isDone = new AtomicBoolean(false); private final AtomicBoolean onCompleteCalled = new AtomicBoolean(false); private final AtomicBoolean onErrorCalled = new AtomicBoolean(false); private final ThreadPool threadPool; private volatile Flow.Subscriber<? super StreamingChatCompletionResults.Results> downstream; private volatile Flow.Subscription upstream; AmazonBedrockStreamingChatProcessor(ThreadPool threadPool) { this.threadPool = threadPool; } @Override public void subscribe(Flow.Subscriber<? super StreamingChatCompletionResults.Results> subscriber) { if (downstream == null) { downstream = subscriber; downstream.onSubscribe(new StreamSubscription()); } else { subscriber.onError(new IllegalStateException("Subscriber already set.")); } } @Override public void onSubscribe(Flow.Subscription subscription) { if (upstream == null) { upstream = subscription; var currentRequestCount = demand.getAndUpdate(i -> 0); if (currentRequestCount > 0) { upstream.request(currentRequestCount); } } else { subscription.cancel(); } } @Override public void onNext(ConverseStreamOutput item) { if (item.sdkEventType() == ConverseStreamOutput.EventType.CONTENT_BLOCK_DELTA) { demand.set(0); // reset demand before we fork to another thread item.accept(ConverseStreamResponseHandler.Visitor.builder().onContentBlockDelta(this::sendDownstreamOnAnotherThread).build()); } else { upstream.request(1); } } // this is always called from a netty thread maintained by the AWS SDK, we'll move it to our thread to process the response private void sendDownstreamOnAnotherThread(ContentBlockDeltaEvent event) { runOnUtilityThreadPool(() -> { var text = event.delta().text(); var result = new ArrayDeque<StreamingChatCompletionResults.Result>(1); result.offer(new StreamingChatCompletionResults.Result(text)); var results = new StreamingChatCompletionResults.Results(result); downstream.onNext(results); }); } @Override public void onError(Throwable amazonBedrockRuntimeException) { ExceptionsHelper.maybeDieOnAnotherThread(amazonBedrockRuntimeException); error.set( new ElasticsearchException( Strings.format("AmazonBedrock StreamingChatProcessor failure: [%s]", amazonBedrockRuntimeException.getMessage()), amazonBedrockRuntimeException ) ); if (isDone.compareAndSet(false, true) && checkAndResetDemand() && onErrorCalled.compareAndSet(false, true)) { runOnUtilityThreadPool(() -> downstream.onError(amazonBedrockRuntimeException)); } } private boolean checkAndResetDemand() { return demand.getAndUpdate(i -> 0L) > 0L; } @Override public void onComplete() { if (isDone.compareAndSet(false, true) && checkAndResetDemand() && onCompleteCalled.compareAndSet(false, true)) { downstream.onComplete(); } } private void runOnUtilityThreadPool(Runnable runnable) { try { threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(runnable); } catch (Exception e) { logger.error(Strings.format("failed to fork [%s] to utility thread pool", runnable), e); } } private
AmazonBedrockStreamingChatProcessor
java
apache__camel
catalog/camel-catalog-common/src/main/java/org/apache/camel/catalog/common/FileUtil.java
{ "start": 902, "end": 1850 }
class ____ { private FileUtil() { } public static void findJavaFiles(File dir, Set<File> javaFiles) { File[] files = dir.isDirectory() ? dir.listFiles() : null; if (files != null) { for (File file : files) { if (file.getName().endsWith(".java")) { javaFiles.add(file); } else if (file.isDirectory()) { findJavaFiles(file, javaFiles); } } } } public static void findXmlFiles(File dir, Set<File> xmlFiles) { File[] files = dir.isDirectory() ? dir.listFiles() : null; if (files != null) { for (File file : files) { if (file.getName().endsWith(".xml")) { xmlFiles.add(file); } else if (file.isDirectory()) { findXmlFiles(file, xmlFiles); } } } } }
FileUtil
java
alibaba__nacos
client/src/main/java/com/alibaba/nacos/client/naming/remote/gprc/redo/data/BatchInstanceRedoData.java
{ "start": 896, "end": 2305 }
class ____ extends InstanceRedoData { List<Instance> instances; public List<Instance> getInstances() { return instances; } public void setInstances(List<Instance> instances) { this.instances = instances; } protected BatchInstanceRedoData(String serviceName, String groupName) { super(serviceName, groupName); } /** * build BatchInstanceRedoData. * * @param serviceName service name * @param groupName group name * @param instances instances * @return build BatchInstanceRedoData */ public static BatchInstanceRedoData build(String serviceName, String groupName, List<Instance> instances) { BatchInstanceRedoData result = new BatchInstanceRedoData(serviceName, groupName); result.setInstances(instances); return result; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof BatchInstanceRedoData)) { return false; } if (!super.equals(o)) { return false; } BatchInstanceRedoData redoData = (BatchInstanceRedoData) o; return Objects.equals(instances, redoData.instances); } @Override public int hashCode() { return Objects.hash(super.hashCode(), instances); } }
BatchInstanceRedoData
java
lettuce-io__lettuce-core
src/main/java/io/lettuce/core/output/ScoredValueScanStreamingOutput.java
{ "start": 432, "end": 1370 }
class ____<K, V> extends ScanOutput<K, V, StreamScanCursor> { private final ScoredValueStreamingChannel<V> channel; private V value; private boolean hasValue; public ScoredValueScanStreamingOutput(RedisCodec<K, V> codec, ScoredValueStreamingChannel<V> channel) { super(codec, new StreamScanCursor()); this.channel = channel; } @Override protected void setOutput(ByteBuffer bytes) { if (!hasValue) { value = codec.decodeValue(bytes); hasValue = true; return; } double score = LettuceStrings.toDouble(decodeString(bytes)); set(score); } @Override public void set(double number) { if (hasValue) { channel.onValue(ScoredValue.just(number, value)); } output.setCount(output.getCount() + 1); value = null; hasValue = false; } }
ScoredValueScanStreamingOutput
java
quarkusio__quarkus
extensions/undertow/deployment/src/test/java/io/quarkus/undertow/test/AnnotatedFilterInitParam.java
{ "start": 682, "end": 1156 }
class ____ extends HttpFilter { @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { response.getWriter().println("invoked-before-chain"); response.getWriter().println(getInitParameter("AnnotatedInitFilterParamName")); chain.doFilter(request, response); response.getWriter().println("invoked-after-chain"); } }
AnnotatedFilterInitParam
java
apache__flink
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptionsInternal.java
{ "start": 1109, "end": 1450 }
class ____ { public static final ConfigOption<String> ENTRY_POINT_CLASS = ConfigOptions.key("kubernetes.internal.jobmanager.entrypoint.class") .stringType() .noDefaultValue() .withDescription( "The entrypoint
KubernetesConfigOptionsInternal
java
apache__avro
lang/java/avro/src/main/java/org/apache/avro/file/DataFileStream.java
{ "start": 1727, "end": 1937 }
class ____<D> implements Iterator<D>, Iterable<D>, Closeable { /** * A handle that can be used to reopen a DataFile without re-reading the header * of the stream. */ public static final
DataFileStream
java
quarkusio__quarkus
integration-tests/elytron-undertow/src/test/java/io/quarkus/it/undertow/elytron/BaseAuthRestTest.java
{ "start": 305, "end": 1287 }
class ____ extends HttpsSetup { @Test @RepeatedTest(100) void testPost() { // This is a regression test in that we had a problem where the Vert.x request was not paused // before the authentication filters ran and the post message was thrown away by Vert.x because // RESTEasy hadn't registered its request handlers yet. given() .header("Authorization", "Basic am9objpqb2hu") .body("Bill") .contentType(ContentType.TEXT) .when() .post("/foo/mapped/rest") .then() .statusCode(200) .body(is("post success")); } @Test void testGet() { given() .header("Authorization", "Basic am9objpqb2hu") .when() .get("/foo/mapped/rest") .then() .statusCode(200) .body(is("get success")); } }
BaseAuthRestTest
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/runtime/leaderretrieval/LeaderRetrievalEventHandler.java
{ "start": 1474, "end": 2117 }
interface ____ { /** * Called by specific {@link LeaderRetrievalDriver} to notify leader address. * * <p>Duplicated leader change events could happen, so the implementation should check whether * the passed leader information is truly changed with last stored leader information. * * @param leaderInformation the new leader information to notify {@link LeaderRetrievalService}. * It could be {@link LeaderInformation#empty()} if the leader address does not exist in the * external storage. */ void notifyLeaderAddress(LeaderInformation leaderInformation); }
LeaderRetrievalEventHandler
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/query/Order.java
{ "start": 4894, "end": 5299 }
class ____ sorted by the * attribute with the given name, in the given direction. If the * named attribute is of textual type, the ordering is * case-sensitive. */ static <T> Order<T> by(Class<T> entityClass, String attributeName, SortDirection direction) { return new NamedAttributeOrder<>( direction, Nulls.NONE, entityClass, attributeName ); } /** * An order where an entity of the given
is
java
spring-projects__spring-security
web/src/main/java/org/springframework/security/web/server/authentication/ServerAuthenticationConverter.java
{ "start": 1254, "end": 1555 }
interface ____ { /** * Converts a {@link ServerWebExchange} to an {@link Authentication} * @param exchange The {@link ServerWebExchange} * @return A {@link Mono} representing an {@link Authentication} */ Mono<Authentication> convert(ServerWebExchange exchange); }
ServerAuthenticationConverter
java
spring-projects__spring-security
core/src/main/java/org/springframework/security/core/authority/mapping/SimpleMappableAttributesRetriever.java
{ "start": 828, "end": 997 }
interface ____ just returning a * list of mappable attributes as previously set using the corresponding setter method. * * @author Ruud Senden * @since 2.0 */ public
by
java
apache__hadoop
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBConfiguration.java
{ "start": 3775, "end": 4379 }
class ____ * @param dbUrl JDBC DB access URL. * @param userName DB access username * @param passwd DB access passwd */ public static void configureDB(JobConf job, String driverClass, String dbUrl , String userName, String passwd) { job.set(DRIVER_CLASS_PROPERTY, driverClass); job.set(URL_PROPERTY, dbUrl); if(userName != null) job.set(USERNAME_PROPERTY, userName); if(passwd != null) job.set(PASSWORD_PROPERTY, passwd); } /** * Sets the DB access related fields in the JobConf. * @param job the job * @param driverClass JDBC Driver
name
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/runtime/util/config/memory/FlinkMemory.java
{ "start": 2119, "end": 2290 }
interface ____ extends Serializable { MemorySize getJvmHeapMemorySize(); MemorySize getJvmDirectMemorySize(); MemorySize getTotalFlinkMemorySize(); }
FlinkMemory
java
junit-team__junit5
junit-vintage-engine/src/main/java/org/junit/vintage/engine/descriptor/VintageTestDescriptor.java
{ "start": 1284, "end": 4432 }
class ____ extends AbstractTestDescriptor { public static final String ENGINE_ID = "junit-vintage"; public static final String SEGMENT_TYPE_RUNNER = "runner"; public static final String SEGMENT_TYPE_TEST = "test"; public static final String SEGMENT_TYPE_DYNAMIC = "dynamic"; protected Description description; public VintageTestDescriptor(UniqueId uniqueId, Description description, @Nullable TestSource source) { this(uniqueId, description, generateDisplayName(description), source); } VintageTestDescriptor(UniqueId uniqueId, Description description, String displayName, @Nullable TestSource source) { super(uniqueId, displayName, source); this.description = description; } private static String generateDisplayName(Description description) { String methodName = DescriptionUtils.getMethodName(description); return isNotBlank(methodName) ? methodName : description.getDisplayName(); } public Description getDescription() { return description; } @Override public String getLegacyReportingName() { String methodName = DescriptionUtils.getMethodName(description); if (methodName == null) { String className = description.getClassName(); if (isNotBlank(className)) { return className; } } return super.getLegacyReportingName(); } @Override public Type getType() { return description.isTest() ? Type.TEST : Type.CONTAINER; } @Override public Set<TestTag> getTags() { Set<TestTag> tags = new LinkedHashSet<>(); addTagsFromParent(tags); addCategoriesAsTags(tags); return tags; } @Override public void removeFromHierarchy() { if (canBeRemovedFromHierarchy()) { super.removeFromHierarchy(); } } protected boolean canBeRemovedFromHierarchy() { return tryToExcludeFromRunner(this.description); } protected boolean tryToExcludeFromRunner(Description description) { // @formatter:off return getParent().map(VintageTestDescriptor.class::cast) .map(parent -> parent.tryToExcludeFromRunner(description)) .orElse(false); // @formatter:on } void pruneDescriptorsForObsoleteDescriptions(List<Description> newSiblingDescriptions) { Optional<Description> newDescription = newSiblingDescriptions.stream().filter(isEqual(description)).findAny(); if (newDescription.isPresent()) { List<Description> newChildren = newDescription.get().getChildren(); new ArrayList<>(children).stream().map(VintageTestDescriptor.class::cast).forEach( childDescriptor -> childDescriptor.pruneDescriptorsForObsoleteDescriptions(newChildren)); } else { super.removeFromHierarchy(); } } private void addTagsFromParent(Set<TestTag> tags) { getParent().map(TestDescriptor::getTags).ifPresent(tags::addAll); } private void addCategoriesAsTags(Set<TestTag> tags) { Category annotation = description.getAnnotation(Category.class); if (annotation != null) { // @formatter:off stream(annotation.value()) .map(ReflectionUtils::getAllAssignmentCompatibleClasses) .flatMap(Collection::stream) .distinct() .map(Class::getName) .map(TestTag::create) .forEachOrdered(tags::add); // @formatter:on } } }
VintageTestDescriptor
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/proxy/LazyProxyBytecodeEnhancementCollectionInitializationTest.java
{ "start": 1632, "end": 3284 }
class ____ { @BeforeEach public void checkSettings(SessionFactoryScope scope) { // We want to test this configuration exactly assertTrue( scope.getSessionFactory().getSessionFactoryOptions().isCollectionsInDefaultFetchGroupEnabled() ); } @BeforeEach public void prepare(SessionFactoryScope scope) { scope.inTransaction( s -> { Parent parent = new Parent(); parent.setId( 1 ); for ( int i = 0; i < 2; i++ ) { Child child = new Child(); child.setId( i ); s.persist( child ); child.setParent( parent ); parent.getChildren().add( child ); } s.persist( parent ); } ); } @Test public void collectionInitializationOnLazyProxy(SessionFactoryScope scope) { scope.inTransaction( s -> { Parent parent = s.getReference( Parent.class, 1 ); assertThat( Hibernate.isPropertyInitialized( parent, "children") ).isFalse(); assertThat( s.unwrap( SessionImplementor.class ).getPersistenceContext().getCollectionEntries() ) .isNullOrEmpty(); // Accessing a collection property on a lazy proxy initializes the property and instantiates the collection, // but does not initialize the collection. List<Child> children = parent.getChildren(); assertThat( Hibernate.isPropertyInitialized( parent, "children") ).isTrue(); assertThat( s.unwrap( SessionImplementor.class ).getPersistenceContext().getCollectionEntries() ) .hasSize( 1 ); assertThat( Hibernate.isInitialized( children ) ).isFalse(); children.size(); assertThat( Hibernate.isInitialized( children ) ).isTrue(); } ); } @Entity(name = "Parent") @Table static
LazyProxyBytecodeEnhancementCollectionInitializationTest
java
quarkusio__quarkus
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injection/assignability/generics/AssignabilityWithGenericsTest.java
{ "start": 3575, "end": 3685 }
class ____ { @Inject FooTyped<Long> bean; } @Dependent static
BeanInjectingActualType
java
alibaba__fastjson
src/test/java/com/alibaba/json/bvtVO/ae/huangliang2/Area.java
{ "start": 218, "end": 431 }
interface ____ { public static final String TYPE_SECTION = "section"; public static final String TYPE_FLOORV1 = "floorV1"; public static final String TYPE_FLOORV2 = "floorV2"; String getName(); }
Area
java
elastic__elasticsearch
x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java
{ "start": 711, "end": 779 }
class ____ extends BaseDateTimeProcessor { public
DateTimeProcessor
java
apache__hadoop
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
{ "start": 11951, "end": 37190 }
class ____ extends RunnableWithThrowable { private final int taskId; private final JobID jobId; private final JobConf localConf; // This is a reference to a shared object passed in by the // external context; this delivers state to the reducers regarding // where to fetch mapper outputs. private final Map<TaskAttemptID, MapOutputFile> mapOutputFiles; public ReduceTaskRunnable(int taskId, JobID jobId, Map<TaskAttemptID, MapOutputFile> mapOutputFiles) { this.taskId = taskId; this.jobId = jobId; this.mapOutputFiles = mapOutputFiles; this.localConf = new JobConf(job); this.localConf.set("mapreduce.jobtracker.address", "local"); } public void run() { try { TaskAttemptID reduceId = new TaskAttemptID(new TaskID( jobId, TaskType.REDUCE, taskId), 0); LOG.info("Starting task: " + reduceId); ReduceTask reduce = new ReduceTask(systemJobFile.toString(), reduceId, taskId, mapIds.size(), 1); reduce.setUser(UserGroupInformation.getCurrentUser(). getShortUserName()); setupChildMapredLocalDirs(reduce, localConf); reduce.setLocalMapFiles(mapOutputFiles); if (!Job.this.isInterrupted()) { reduce.setJobFile(localJobFile.toString()); localConf.setUser(reduce.getUser()); reduce.localizeConfiguration(localConf); reduce.setConf(localConf); try { reduce_tasks.getAndIncrement(); myMetrics.launchReduce(reduce.getTaskID()); reduce.run(localConf, Job.this); myMetrics.completeReduce(reduce.getTaskID()); } finally { reduce_tasks.getAndDecrement(); } LOG.info("Finishing task: " + reduceId); } else { throw new InterruptedException(); } } catch (Throwable t) { // store this to be rethrown in the initial thread context. this.storedException = t; } } } /** * Create Runnables to encapsulate reduce tasks for use by the executor * service. * @param jobId the job id * @param mapOutputFiles a mapping from task attempts to output files * @return a List of Runnables, one per reduce task. */ protected List<RunnableWithThrowable> getReduceTaskRunnables( JobID jobId, Map<TaskAttemptID, MapOutputFile> mapOutputFiles) { int taskId = 0; ArrayList<RunnableWithThrowable> list = new ArrayList<RunnableWithThrowable>(); for (int i = 0; i < this.numReduceTasks; i++) { list.add(new ReduceTaskRunnable(taskId++, jobId, mapOutputFiles)); } return list; } /** * Initialize the counters that will hold partial-progress from * the various task attempts. * @param numMaps the number of map tasks in this job. */ private synchronized void initCounters(int numMaps, int numReduces) { // Initialize state trackers for all map tasks. this.partialMapProgress = new float[numMaps]; this.mapCounters = new Counters[numMaps]; for (int i = 0; i < numMaps; i++) { this.mapCounters[i] = new Counters(); } this.partialReduceProgress = new float[numReduces]; this.reduceCounters = new Counters[numReduces]; for (int i = 0; i < numReduces; i++) { this.reduceCounters[i] = new Counters(); } this.numMapTasks = numMaps; this.numReduceTasks = numReduces; } /** * Creates the executor service used to run map tasks. * * @return an ExecutorService instance that handles map tasks */ protected synchronized ExecutorService createMapExecutor() { // Determine the size of the thread pool to use int maxMapThreads = job.getInt(LOCAL_MAX_MAPS, 1); if (maxMapThreads < 1) { throw new IllegalArgumentException( "Configured " + LOCAL_MAX_MAPS + " must be >= 1"); } maxMapThreads = Math.min(maxMapThreads, this.numMapTasks); maxMapThreads = Math.max(maxMapThreads, 1); // In case of no tasks. LOG.debug("Starting mapper thread pool executor."); LOG.debug("Max local threads: " + maxMapThreads); LOG.debug("Map tasks to process: " + this.numMapTasks); // Create a new executor service to drain the work queue. ThreadFactory tf = new ThreadFactoryBuilder() .setNameFormat("LocalJobRunner Map Task Executor #%d") .build(); ExecutorService executor = HadoopExecutors.newFixedThreadPool( maxMapThreads, tf); return executor; } /** * Creates the executor service used to run reduce tasks. * * @return an ExecutorService instance that handles reduce tasks */ protected synchronized ExecutorService createReduceExecutor() { // Determine the size of the thread pool to use int maxReduceThreads = job.getInt(LOCAL_MAX_REDUCES, 1); if (maxReduceThreads < 1) { throw new IllegalArgumentException( "Configured " + LOCAL_MAX_REDUCES + " must be >= 1"); } maxReduceThreads = Math.min(maxReduceThreads, this.numReduceTasks); maxReduceThreads = Math.max(maxReduceThreads, 1); // In case of no tasks. LOG.debug("Starting reduce thread pool executor."); LOG.debug("Max local threads: " + maxReduceThreads); LOG.debug("Reduce tasks to process: " + this.numReduceTasks); // Create a new executor service to drain the work queue. ExecutorService executor = HadoopExecutors.newFixedThreadPool( maxReduceThreads); return executor; } /** Run a set of tasks and waits for them to complete. */ private void runTasks(List<RunnableWithThrowable> runnables, ExecutorService service, String taskType) throws Exception { // Start populating the executor with work units. // They may begin running immediately (in other threads). for (Runnable r : runnables) { service.submit(r); } try { service.shutdown(); // Instructs queue to drain. // Wait for tasks to finish; do not use a time-based timeout. // (See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6179024) LOG.info("Waiting for " + taskType + " tasks"); service.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException ie) { // Cancel all threads. service.shutdownNow(); throw ie; } LOG.info(taskType + " task executor complete."); // After waiting for the tasks to complete, if any of these // have thrown an exception, rethrow it now in the main thread context. for (RunnableWithThrowable r : runnables) { if (r.storedException != null) { throw new Exception(r.storedException); } } } private org.apache.hadoop.mapreduce.OutputCommitter createOutputCommitter(boolean newApiCommitter, JobID jobId, Configuration conf) throws Exception { org.apache.hadoop.mapreduce.OutputCommitter committer = null; LOG.info("OutputCommitter set in config " + conf.get("mapred.output.committer.class")); if (newApiCommitter) { org.apache.hadoop.mapreduce.TaskID taskId = new org.apache.hadoop.mapreduce.TaskID(jobId, TaskType.MAP, 0); org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID = new org.apache.hadoop.mapreduce.TaskAttemptID(taskId, 0); org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = new TaskAttemptContextImpl(conf, taskAttemptID); OutputFormat outputFormat = ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), conf); committer = outputFormat.getOutputCommitter(taskContext); } else { committer = ReflectionUtils.newInstance(conf.getClass( "mapred.output.committer.class", FileOutputCommitter.class, org.apache.hadoop.mapred.OutputCommitter.class), conf); } LOG.info("OutputCommitter is " + committer.getClass().getName()); return committer; } @Override public void work() { JobID jobId = profile.getJobID(); JobContext jContext = new JobContextImpl(job, jobId); org.apache.hadoop.mapreduce.OutputCommitter outputCommitter = null; try { outputCommitter = createOutputCommitter(conf.getUseNewMapper(), jobId, conf); } catch (Exception e) { LOG.info("Failed to createOutputCommitter", e); return; } try { TaskSplitMetaInfo[] taskSplitMetaInfos = SplitMetaInfoReader.readSplitMetaInfo(jobId, localFs, conf, systemJobDir); int numReduceTasks = job.getNumReduceTasks(); outputCommitter.setupJob(jContext); status.setSetupProgress(1.0f); Map<TaskAttemptID, MapOutputFile> mapOutputFiles = Collections.synchronizedMap(new HashMap<TaskAttemptID, MapOutputFile>()); List<RunnableWithThrowable> mapRunnables = getMapTaskRunnables( taskSplitMetaInfos, jobId, mapOutputFiles); initCounters(mapRunnables.size(), numReduceTasks); ExecutorService mapService = createMapExecutor(); runTasks(mapRunnables, mapService, "map"); try { if (numReduceTasks > 0) { List<RunnableWithThrowable> reduceRunnables = getReduceTaskRunnables( jobId, mapOutputFiles); ExecutorService reduceService = createReduceExecutor(); runTasks(reduceRunnables, reduceService, "reduce"); } } finally { for (MapOutputFile output : mapOutputFiles.values()) { output.removeAll(); } } // delete the temporary directory in output directory outputCommitter.commitJob(jContext); status.setCleanupProgress(1.0f); if (killed) { this.status.setRunState(JobStatus.KILLED); } else { this.status.setRunState(JobStatus.SUCCEEDED); } JobEndNotifier.localRunnerNotification(job, status); } catch (Throwable t) { try { outputCommitter.abortJob(jContext, org.apache.hadoop.mapreduce.JobStatus.State.FAILED); } catch (IOException ioe) { LOG.info("Error cleaning up job:" + id); } status.setCleanupProgress(1.0f); if (killed) { this.status.setRunState(JobStatus.KILLED); } else { this.status.setRunState(JobStatus.FAILED); } LOG.warn(id.toString(), t); JobEndNotifier.localRunnerNotification(job, status); } finally { try { try { // Cleanup distributed cache localDistributedCacheManager.close(); } finally { try { fs.delete(systemJobFile.getParent(), true); // delete submit dir } finally { localFs.delete(localJobFile, true); // delete local copy } } } catch (IOException e) { LOG.warn("Error cleaning up "+id+": "+e); } } } // TaskUmbilicalProtocol methods @Override public JvmTask getTask(JvmContext context) { return null; } @Override public synchronized AMFeedback statusUpdate(TaskAttemptID taskId, TaskStatus taskStatus) throws IOException, InterruptedException { AMFeedback feedback = new AMFeedback(); feedback.setTaskFound(true); if (null == taskStatus) { return feedback; } // Serialize as we would if distributed in order to make deep copy ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); taskStatus.write(dos); dos.close(); taskStatus = TaskStatus.createTaskStatus(taskStatus.getIsMap()); taskStatus.readFields(new DataInputStream( new ByteArrayInputStream(baos.toByteArray()))); LOG.info(taskStatus.getStateString()); int mapTaskIndex = mapIds.indexOf(taskId); if (mapTaskIndex >= 0) { // mapping float numTasks = (float) this.numMapTasks; partialMapProgress[mapTaskIndex] = taskStatus.getProgress(); mapCounters[mapTaskIndex] = taskStatus.getCounters(); float partialProgress = 0.0f; for (float f : partialMapProgress) { partialProgress += f; } status.setMapProgress(partialProgress / numTasks); } else { // reducing int reduceTaskIndex = taskId.getTaskID().getId(); float numTasks = (float) this.numReduceTasks; partialReduceProgress[reduceTaskIndex] = taskStatus.getProgress(); reduceCounters[reduceTaskIndex] = taskStatus.getCounters(); float partialProgress = 0.0f; for (float f : partialReduceProgress) { partialProgress += f; } status.setReduceProgress(partialProgress / numTasks); } // ignore phase return feedback; } /** Return the current values of the counters for this job, * including tasks that are in progress. */ public synchronized Counters getCurrentCounters() { if (null == mapCounters) { // Counters not yet initialized for job. return new Counters(); } Counters current = new Counters(); for (Counters c : mapCounters) { current = Counters.sum(current, c); } if (null != reduceCounters && reduceCounters.length > 0) { for (Counters c : reduceCounters) { current = Counters.sum(current, c); } } return current; } /** * Task is reporting that it is in commit_pending * and it is waiting for the commit Response */ public void commitPending(TaskAttemptID taskid, TaskStatus taskStatus) throws IOException, InterruptedException { statusUpdate(taskid, taskStatus); } @Override public void reportDiagnosticInfo(TaskAttemptID taskid, String trace) { // Ignore for now } @Override public void reportNextRecordRange(TaskAttemptID taskid, SortedRanges.Range range) throws IOException { LOG.info("Task " + taskid + " reportedNextRecordRange " + range); } @Override public boolean canCommit(TaskAttemptID taskid) throws IOException { return true; } @Override public void done(TaskAttemptID taskId) throws IOException { int taskIndex = mapIds.indexOf(taskId); if (taskIndex >= 0) { // mapping status.setMapProgress(1.0f); } else { status.setReduceProgress(1.0f); } } @Override public synchronized void fsError(TaskAttemptID taskId, String message) throws IOException { LOG.error("FSError: "+ message + "from task: " + taskId); } @Override public void shuffleError(TaskAttemptID taskId, String message) throws IOException { LOG.error("shuffleError: "+ message + "from task: " + taskId); } public synchronized void fatalError(TaskAttemptID taskId, String msg, boolean fastFail) throws IOException { LOG.error("Fatal: "+ msg + " from task: " + taskId + " fast fail: " + fastFail); } @Override public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId, int fromEventId, int maxLocs, TaskAttemptID id) throws IOException { return new MapTaskCompletionEventsUpdate( org.apache.hadoop.mapred.TaskCompletionEvent.EMPTY_ARRAY, false); } @Override public void preempted(TaskAttemptID taskId, TaskStatus taskStatus) throws IOException, InterruptedException { // ignore } @Override public TaskCheckpointID getCheckpointID(TaskID taskId) { // ignore return null; } @Override public void setCheckpointID(TaskID downgrade, TaskCheckpointID cid) { // ignore } } public LocalJobRunner(Configuration conf) throws IOException { this(new JobConf(conf)); } @Deprecated public LocalJobRunner(JobConf conf) throws IOException { this.fs = FileSystem.getLocal(conf); this.conf = conf; myMetrics = LocalJobRunnerMetrics.create(); } // JobSubmissionProtocol methods private static int jobid = 0; // used for making sure that local jobs run in different jvms don't // collide on staging or job directories private int randid; public synchronized org.apache.hadoop.mapreduce.JobID getNewJobID() { return new org.apache.hadoop.mapreduce.JobID("local" + randid, ++jobid); } public org.apache.hadoop.mapreduce.JobStatus submitJob( org.apache.hadoop.mapreduce.JobID jobid, String jobSubmitDir, Credentials credentials) throws IOException { Job job = new Job(JobID.downgrade(jobid), jobSubmitDir); job.job.setCredentials(credentials); return job.status; } public void killJob(org.apache.hadoop.mapreduce.JobID id) { jobs.get(JobID.downgrade(id)).killed = true; jobs.get(JobID.downgrade(id)).interrupt(); } public void setJobPriority(org.apache.hadoop.mapreduce.JobID id, String jp) throws IOException { throw new UnsupportedOperationException("Changing job priority " + "in LocalJobRunner is not supported."); } /** Throws {@link UnsupportedOperationException} */ public boolean killTask(org.apache.hadoop.mapreduce.TaskAttemptID taskId, boolean shouldFail) throws IOException { throw new UnsupportedOperationException("Killing tasks in " + "LocalJobRunner is not supported"); } public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports( org.apache.hadoop.mapreduce.JobID id, TaskType type) { return new org.apache.hadoop.mapreduce.TaskReport[0]; } public org.apache.hadoop.mapreduce.JobStatus getJobStatus( org.apache.hadoop.mapreduce.JobID id) { Job job = jobs.get(JobID.downgrade(id)); if(job != null) return job.status; else return null; } public org.apache.hadoop.mapreduce.Counters getJobCounters( org.apache.hadoop.mapreduce.JobID id) { Job job = jobs.get(JobID.downgrade(id)); return new org.apache.hadoop.mapreduce.Counters(job.getCurrentCounters()); } public String getFilesystemName() throws IOException { return fs.getUri().toString(); } public ClusterMetrics getClusterMetrics() { int numMapTasks = map_tasks.get(); int numReduceTasks = reduce_tasks.get(); return new ClusterMetrics(numMapTasks, numReduceTasks, numMapTasks, numReduceTasks, 0, 0, 1, 1, jobs.size(), 1, 0, 0); } public JobTrackerStatus getJobTrackerStatus() { return JobTrackerStatus.RUNNING; } public long getTaskTrackerExpiryInterval() throws IOException, InterruptedException { return 0; } /** * Get all active trackers in cluster. * @return array of TaskTrackerInfo */ public TaskTrackerInfo[] getActiveTrackers() throws IOException, InterruptedException { return new TaskTrackerInfo[0]; } /** * Get all blacklisted trackers in cluster. * @return array of TaskTrackerInfo */ public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException, InterruptedException { return new TaskTrackerInfo[0]; } public TaskCompletionEvent[] getTaskCompletionEvents( org.apache.hadoop.mapreduce.JobID jobid , int fromEventId, int maxEvents) throws IOException { return TaskCompletionEvent.EMPTY_ARRAY; } public org.apache.hadoop.mapreduce.JobStatus[] getAllJobs() {return null;} /** * Returns the diagnostic information for a particular task in the given job. * To be implemented */ public String[] getTaskDiagnostics( org.apache.hadoop.mapreduce.TaskAttemptID taskid) throws IOException{ return new String [0]; } /** * @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getSystemDir() */ public String getSystemDir() { Path sysDir = new Path( conf.get(JTConfig.JT_SYSTEM_DIR, "/tmp/hadoop/mapred/system")); return fs.makeQualified(sysDir).toString(); } /** * @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getQueueAdmins(String) */ public AccessControlList getQueueAdmins(String queueName) throws IOException { return new AccessControlList(" ");// no queue admins for local job runner } /** * @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getStagingAreaDir() */ public String getStagingAreaDir() throws IOException { Path stagingRootDir = new Path(conf.get(JTConfig.JT_STAGING_AREA_ROOT, "/tmp/hadoop/mapred/staging")); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); String user; randid = rand.nextInt(Integer.MAX_VALUE); if (ugi != null) { user = ugi.getShortUserName() + randid; } else { user = "dummy" + randid; } return fs.makeQualified(new Path(stagingRootDir, user+"/.staging")).toString(); } public String getJobHistoryDir() { return null; } @Override public QueueInfo[] getChildQueues(String queueName) throws IOException { return null; } @Override public QueueInfo[] getRootQueues() throws IOException { return null; } @Override public QueueInfo[] getQueues() throws IOException { return null; } @Override public QueueInfo getQueue(String queue) throws IOException { return null; } @Override public org.apache.hadoop.mapreduce.QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException{ return null; } /** * Set the max number of map tasks to run concurrently in the LocalJobRunner. * @param job the job to configure * @param maxMaps the maximum number of map tasks to allow. */ public static void setLocalMaxRunningMaps( org.apache.hadoop.mapreduce.JobContext job, int maxMaps) { job.getConfiguration().setInt(LOCAL_MAX_MAPS, maxMaps); } /** * @return the max number of map tasks to run concurrently in the * LocalJobRunner. */ public static int getLocalMaxRunningMaps( org.apache.hadoop.mapreduce.JobContext job) { return job.getConfiguration().getInt(LOCAL_MAX_MAPS, 1); } /** * Set the max number of reduce tasks to run concurrently in the LocalJobRunner. * @param job the job to configure * @param maxReduces the maximum number of reduce tasks to allow. */ public static void setLocalMaxRunningReduces( org.apache.hadoop.mapreduce.JobContext job, int maxReduces) { job.getConfiguration().setInt(LOCAL_MAX_REDUCES, maxReduces); } /** * @return the max number of reduce tasks to run concurrently in the * LocalJobRunner. */ public static int getLocalMaxRunningReduces( org.apache.hadoop.mapreduce.JobContext job) { return job.getConfiguration().getInt(LOCAL_MAX_REDUCES, 1); } @Override public void cancelDelegationToken(Token<DelegationTokenIdentifier> token ) throws IOException, InterruptedException { } @Override public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) throws IOException, InterruptedException { return null; } @Override public long renewDelegationToken(Token<DelegationTokenIdentifier> token ) throws IOException,InterruptedException{ return 0; } @Override public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID, org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID) throws IOException, InterruptedException { throw new UnsupportedOperationException("Not supported"); } static void setupChildMapredLocalDirs(Task t, JobConf conf) { String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR); String jobId = t.getJobID().toString(); String taskId = t.getTaskID().toString(); boolean isCleanup = t.isTaskCleanupTask(); String user = t.getUser(); StringBuilder childMapredLocalDir = new StringBuilder(localDirs[0] + Path.SEPARATOR + getLocalTaskDir(user, jobId, taskId, isCleanup)); for (int i = 1; i < localDirs.length; i++) { childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR + getLocalTaskDir(user, jobId, taskId, isCleanup)); } LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir); conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString()); } static final String TASK_CLEANUP_SUFFIX = ".cleanup"; static final String JOBCACHE = "jobcache"; static String getLocalTaskDir(String user, String jobid, String taskid, boolean isCleanupAttempt) { String taskDir = jobDir + Path.SEPARATOR + user + Path.SEPARATOR + JOBCACHE + Path.SEPARATOR + jobid + Path.SEPARATOR + taskid; if (isCleanupAttempt) { taskDir = taskDir + TASK_CLEANUP_SUFFIX; } return taskDir; } }
ReduceTaskRunnable
java
spring-projects__spring-boot
module/spring-boot-data-jdbc/src/test/java/org/springframework/boot/data/jdbc/autoconfigure/DataJdbcRepositoriesAutoConfigurationTests.java
{ "start": 9632, "end": 9785 }
class ____ { } @TestAutoConfigurationPackage(EmptyDataPackage.class) @EnableJdbcRepositories(basePackageClasses = City.class) static
EmptyConfiguration
java
apache__flink
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/window/groupwindow/assigners/SlidingWindowAssigner.java
{ "start": 1553, "end": 3952 }
class ____ extends PanedWindowAssigner<TimeWindow> implements InternalTimeWindowAssigner { private static final long serialVersionUID = 4895551155814656518L; private final long size; private final long slide; private final long offset; private final long paneSize; private final int numPanesPerWindow; private final boolean isEventTime; protected SlidingWindowAssigner(long size, long slide, long offset, boolean isEventTime) { if (size <= 0 || slide <= 0) { throw new IllegalArgumentException( "SlidingWindowAssigner parameters must satisfy slide > 0 and size > 0"); } this.size = size; this.slide = slide; this.offset = offset; this.isEventTime = isEventTime; this.paneSize = ArithmeticUtils.gcd(size, slide); this.numPanesPerWindow = MathUtils.checkedDownCast(size / paneSize); } @Override public Collection<TimeWindow> assignWindows(RowData element, long timestamp) { List<TimeWindow> windows = new ArrayList<>((int) (size / slide)); long lastStart = TimeWindow.getWindowStartWithOffset(timestamp, offset, slide); for (long start = lastStart; start > timestamp - size; start -= slide) { windows.add(new TimeWindow(start, start + size)); } return windows; } @Override public TimeWindow assignPane(Object element, long timestamp) { long start = TimeWindow.getWindowStartWithOffset(timestamp, offset, paneSize); return new TimeWindow(start, start + paneSize); } @Override public Iterable<TimeWindow> splitIntoPanes(TimeWindow window) { return new PanesIterable(window.getStart(), paneSize, numPanesPerWindow); } @Override public TimeWindow getLastWindow(TimeWindow pane) { long lastStart = TimeWindow.getWindowStartWithOffset(pane.getStart(), offset, slide); return new TimeWindow(lastStart, lastStart + size); } @Override public TypeSerializer<TimeWindow> getWindowSerializer(ExecutionConfig executionConfig) { return new TimeWindow.Serializer(); } @Override public boolean isEventTime() { return isEventTime; } @Override public String toString() { return "SlidingWindow(" + size + ", " + slide + ")"; } private static
SlidingWindowAssigner
java
elastic__elasticsearch
test/framework/src/main/java/org/elasticsearch/datageneration/fields/leaf/GeoPointFieldDataGenerator.java
{ "start": 890, "end": 2920 }
class ____ implements FieldDataGenerator { private final Supplier<Object> formattedPoints; private final Supplier<Object> formattedPointsWithMalformed; public GeoPointFieldDataGenerator(DataSource dataSource) { var points = dataSource.get(new DataSourceRequest.GeoPointGenerator()).generator(); var representations = dataSource.get( new DataSourceRequest.TransformWeightedWrapper<GeoPoint>( List.of( Tuple.tuple(0.2, p -> Map.of("type", "point", "coordinates", List.of(p.getLon(), p.getLat()))), Tuple.tuple(0.2, p -> "POINT( " + p.getLon() + " " + p.getLat() + " )"), Tuple.tuple(0.2, p -> Map.of("lon", p.getLon(), "lat", p.getLat())), // this triggers a bug in stored source block loader, see #125710 // Tuple.tuple(0.2, p -> List.of(p.getLon(), p.getLat())), Tuple.tuple(0.2, p -> p.getLat() + "," + p.getLon()), Tuple.tuple(0.2, GeoPoint::getGeohash) ) ) ); var pointRepresentations = representations.wrapper().apply(points); this.formattedPoints = Wrappers.defaults(pointRepresentations, dataSource); var strings = dataSource.get(new DataSourceRequest.StringGenerator()).generator(); this.formattedPointsWithMalformed = Wrappers.defaultsWithMalformed(pointRepresentations, strings::get, dataSource); } @Override public Object generateValue(Map<String, Object> fieldMapping) { if (fieldMapping == null) { // dynamically mapped and dynamic mapping does not play well with this type (it sometimes gets mapped as an object) // return null to skip indexing this field return null; } if ((Boolean) fieldMapping.getOrDefault("ignore_malformed", false)) { return formattedPointsWithMalformed.get(); } return formattedPoints.get(); } }
GeoPointFieldDataGenerator
java
apache__maven
its/core-it-support/core-it-wagon/src/main/java/org/apache/maven/wagon/providers/coreit/CoreItWagon.java
{ "start": 1902, "end": 6507 }
class ____ extends AbstractWagon { @Override public void get(String resourceName, File destination) throws TransferFailedException, ResourceDoesNotExistException, AuthorizationException { InputData inputData = new InputData(); Resource resource = new Resource(resourceName); fireGetInitiated(resource, destination); inputData.setResource(resource); fillInputData(inputData); InputStream is = inputData.getInputStream(); if (is == null) { throw new TransferFailedException( getRepository().getUrl() + " - Could not open input stream for resource: '" + resource + "'"); } createParentDirectories(destination); getTransfer(inputData.getResource(), destination, is); } @Override public boolean getIfNewer(String resourceName, File destination, long timestamp) throws TransferFailedException, ResourceDoesNotExistException, AuthorizationException { return false; } @Override public void put(File source, String resourceName) throws TransferFailedException, ResourceDoesNotExistException, AuthorizationException { OutputData outputData = new OutputData(); Resource resource = new Resource(resourceName); firePutInitiated(resource, source); outputData.setResource(resource); fillOutputData(outputData); OutputStream os = outputData.getOutputStream(); if (os == null) { throw new TransferFailedException( getRepository().getUrl() + " - Could not open output stream for resource: '" + resource + "'"); } putTransfer(outputData.getResource(), source, os, true); } @Override public void closeConnection() throws ConnectionException { File f = new File("target/wagon-data"); try { f.getParentFile().mkdirs(); f.createNewFile(); } catch (IOException e) { throw new ConnectionException(e.getMessage(), e); } } public void fillInputData(InputData inputData) throws TransferFailedException, ResourceDoesNotExistException { try { String resName = inputData.getResource().getName(); InputStream is = null; if (resName.endsWith(".sha1")) { is = new ByteArrayInputStream("c96e29be962f9d8123b584b8f51d66b347d268d4".getBytes("UTF-8")); } else if (resName.endsWith(".md5")) { is = new ByteArrayInputStream("d2b637ab8965308490bc6482c860dfc5".getBytes("UTF-8")); } else { is = new ByteArrayInputStream("<metadata />".getBytes("UTF-8")); } inputData.setInputStream(is); } catch (IOException e) { throw new TransferFailedException("Broken JVM", e); } } public void fillOutputData(OutputData outputData) throws TransferFailedException { Properties props = new Properties(); if (getRepository().getPermissions() != null) { String dirPerms = getRepository().getPermissions().getDirectoryMode(); put(props, "directory.mode", dirPerms); String filePerms = getRepository().getPermissions().getFileMode(); put(props, "file.mode", filePerms); } AuthenticationInfo auth = getAuthenticationInfo(); if (auth != null) { put(props, "username", auth.getUserName()); put(props, "password", auth.getPassword()); put(props, "privateKey", auth.getPrivateKey()); put(props, "passphrase", auth.getPassphrase()); } try { File file = new File(System.getProperty("user.dir"), "target/wagon.properties").getAbsoluteFile(); file.getParentFile().mkdirs(); try (OutputStream os = new FileOutputStream(file)) { props.store(os, "MAVEN-CORE-IT-WAGON"); } } catch (IOException e) { throw new TransferFailedException(e.getMessage(), e); } outputData.setOutputStream(new ByteArrayOutputStream()); } @Override public void openConnection() throws ConnectionException, AuthenticationException { // ignore } @Override protected void openConnectionInternal() throws ConnectionException, AuthenticationException { // ignore } private void put(Properties props, String key, String value) { if (value != null) { props.setProperty(key, value); } } }
CoreItWagon
java
apache__hadoop
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRCluster.java
{ "start": 1353, "end": 1862 }
class ____ the new MiniMRYarnCluster * in MR2 but provides the same old MR1 interface, so tests can be migrated from * MR1 to MR2 with minimal changes. * * Due to major differences between MR1 and MR2, a number of methods are either * unimplemented/unsupported or were re-implemented to provide wrappers around * MR2 functionality. * * @deprecated Use {@link org.apache.hadoop.mapred.MiniMRClientClusterFactory} * instead */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Evolving public
uses