language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/dialect/unit/lockhint/AbstractLockHintTest.java | {
"start": 542,
"end": 1312
} | class ____ {
private Dialect dialect;
protected abstract String getLockHintUsed();
protected abstract Dialect getDialectUnderTest();
@BeforeEach
public void setUp() throws Exception {
this.dialect = getDialectUnderTest();
}
@AfterEach
public void tearDown() throws Exception {
this.dialect = null;
}
@Test
public void testBasicLocking() {
new SyntaxChecker( "select xyz from ABC $HOLDER$", "a" ).verify();
new SyntaxChecker( "select xyz from ABC $HOLDER$ join DEF d", "a" ).verify();
new SyntaxChecker( "select xyz from ABC $HOLDER$, DEF d", "a" ).verify();
}
protected LockOptions lockOptions(String aliasToLock) {
LockOptions lockOptions = new LockOptions(LockMode.PESSIMISTIC_WRITE);
return lockOptions;
}
protected | AbstractLockHintTest |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorServiceTypeActionRequestBWCSerializingTests.java | {
"start": 564,
"end": 2375
} | class ____ extends AbstractBWCSerializationTestCase<
UpdateConnectorServiceTypeAction.Request> {
private String connectorId;
@Override
protected Writeable.Reader<UpdateConnectorServiceTypeAction.Request> instanceReader() {
return UpdateConnectorServiceTypeAction.Request::new;
}
@Override
protected UpdateConnectorServiceTypeAction.Request createTestInstance() {
this.connectorId = randomUUID();
return new UpdateConnectorServiceTypeAction.Request(connectorId, randomAlphaOfLengthBetween(3, 10));
}
@Override
protected UpdateConnectorServiceTypeAction.Request mutateInstance(UpdateConnectorServiceTypeAction.Request instance)
throws IOException {
String originalConnectorId = instance.getConnectorId();
String serviceType = instance.getServiceType();
switch (randomIntBetween(0, 1)) {
case 0 -> originalConnectorId = randomValueOtherThan(originalConnectorId, () -> randomUUID());
case 1 -> serviceType = randomValueOtherThan(serviceType, () -> randomAlphaOfLengthBetween(3, 10));
default -> throw new AssertionError("Illegal randomisation branch");
}
return new UpdateConnectorServiceTypeAction.Request(originalConnectorId, serviceType);
}
@Override
protected UpdateConnectorServiceTypeAction.Request doParseInstance(XContentParser parser) throws IOException {
return UpdateConnectorServiceTypeAction.Request.fromXContent(parser, this.connectorId);
}
@Override
protected UpdateConnectorServiceTypeAction.Request mutateInstanceForVersion(
UpdateConnectorServiceTypeAction.Request instance,
TransportVersion version
) {
return instance;
}
}
| UpdateConnectorServiceTypeActionRequestBWCSerializingTests |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RListAsync.java | {
"start": 852,
"end": 6241
} | interface ____<V> extends RCollectionAsync<V>, RSortableAsync<List<V>>, RandomAccess {
/**
* Loads elements by specified <code>indexes</code>
*
* @param indexes of elements
* @return elements
*/
RFuture<List<V>> getAsync(int... indexes);
/**
* Inserts <code>element</code> after <code>elementToFind</code>
*
* @param elementToFind - object to find
* @param element - object to add
* @return new list size
*/
RFuture<Integer> addAfterAsync(V elementToFind, V element);
/**
* Inserts <code>element</code> before <code>elementToFind</code>
*
* @param elementToFind - object to find
* @param element - object to add
* @return new list size
*/
RFuture<Integer> addBeforeAsync(V elementToFind, V element);
/**
* Inserts <code>element</code> at <code>index</code>.
* Subsequent elements are shifted.
*
* @param index - index number
* @param element - element to insert
* @return {@code true} if list was changed
*/
RFuture<Boolean> addAsync(int index, V element);
/**
* Inserts <code>elements</code> at <code>index</code>.
* Subsequent elements are shifted.
*
* @param index - index number
* @param elements - elements to insert
* @return {@code true} if list changed
* or {@code false} if element isn't found
*/
RFuture<Boolean> addAllAsync(int index, Collection<? extends V> elements);
/**
* Returns last index of <code>element</code> or
* -1 if element isn't found
*
* @param element to find
* @return index of -1 if element isn't found
*/
RFuture<Integer> lastIndexOfAsync(Object element);
/**
* Returns last index of <code>element</code> or
* -1 if element isn't found
*
* @param element to find
* @return index of -1 if element isn't found
*/
RFuture<Integer> indexOfAsync(Object element);
/**
* Set <code>element</code> at <code>index</code>.
* Works faster than {@link #setAsync(int, Object)} but
* doesn't return previous element.
*
* @param index - index of object
* @param element - object
* @return void
*/
RFuture<Void> fastSetAsync(int index, V element);
/**
* Set <code>element</code> at <code>index</code> and returns previous element.
*
* @param index - index of object
* @param element - object
* @return previous element or <code>null</code> if element wasn't set.
*/
RFuture<V> setAsync(int index, V element);
/**
* Get element at <code>index</code>
*
* @param index - index of object
* @return element
*/
RFuture<V> getAsync(int index);
/**
* Read all elements at once
*
* @return list of values
*/
RFuture<List<V>> readAllAsync();
/**
* Trim list and remains elements only in specified range
* <code>fromIndex</code>, inclusive, and <code>toIndex</code>, inclusive.
*
* @param fromIndex - from index
* @param toIndex - to index
* @return void
*/
RFuture<Void> trimAsync(int fromIndex, int toIndex);
/**
* Removes element at <code>index</code>.
* Works faster than {@link #removeAsync(Object, int)} but
* doesn't return element.
*
* @param index - index of object
* @return void
*/
RFuture<Void> fastRemoveAsync(int index);
/**
* Removes element at <code>index</code>.
*
* @param index - index of object
* @return element or <code>null</code> if element wasn't set.
*/
RFuture<V> removeAsync(int index);
/**
* Removes up to <code>count</code> occurrences of <code>element</code>
*
* @param element - element to find
* @param count - amount occurrences
* @return {@code true} if at least one element removed;
* or {@code false} if element isn't found
*/
RFuture<Boolean> removeAsync(Object element, int count);
/**
* Returns range of values from 0 index to <code>toIndex</code>. Indexes are zero based.
* <code>-1</code> means the last element, <code>-2</code> means penultimate and so on.
*
* @param toIndex - end index
* @return elements
*/
RFuture<List<V>> rangeAsync(int toIndex);
/**
* Returns range of values from <code>fromIndex</code> to <code>toIndex</code> index including.
* Indexes are zero based. <code>-1</code> means the last element, <code>-2</code> means penultimate and so on.
*
* @param fromIndex - start index
* @param toIndex - end index
* @return elements
*/
RFuture<List<V>> rangeAsync(int fromIndex, int toIndex);
/**
* Adds object event listener
*
* @see org.redisson.api.ExpiredObjectListener
* @see org.redisson.api.DeletedObjectListener
* @see org.redisson.api.listener.ListAddListener
* @see org.redisson.api.listener.ListInsertListener
* @see org.redisson.api.listener.ListSetListener
* @see org.redisson.api.listener.ListRemoveListener
* @see org.redisson.api.listener.ListTrimListener
*
* @param listener - object event listener
* @return listener id
*/
RFuture<Integer> addListenerAsync(ObjectListener listener);
}
| RListAsync |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/IBMCOSEndpointBuilderFactory.java | {
"start": 51022,
"end": 54691
} | interface ____ extends EndpointProducerBuilder {
default IBMCOSEndpointProducerBuilder basic() {
return (IBMCOSEndpointProducerBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedIBMCOSEndpointProducerBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedIBMCOSEndpointProducerBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Reference to an IBM COS Client instance in the registry.
*
* The option is a:
* <code>com.ibm.cloud.objectstorage.services.s3.AmazonS3</code> type.
*
* Group: advanced
*
* @param cosClient the value to set
* @return the dsl builder
*/
default AdvancedIBMCOSEndpointProducerBuilder cosClient(com.ibm.cloud.objectstorage.services.s3.AmazonS3 cosClient) {
doSetProperty("cosClient", cosClient);
return this;
}
/**
* Reference to an IBM COS Client instance in the registry.
*
* The option will be converted to a
* <code>com.ibm.cloud.objectstorage.services.s3.AmazonS3</code> type.
*
* Group: advanced
*
* @param cosClient the value to set
* @return the dsl builder
*/
default AdvancedIBMCOSEndpointProducerBuilder cosClient(String cosClient) {
doSetProperty("cosClient", cosClient);
return this;
}
}
/**
* Builder for endpoint for the IBM Cloud Object Storage component.
*/
public | AdvancedIBMCOSEndpointProducerBuilder |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/BrowseEndpointBuilderFactory.java | {
"start": 1948,
"end": 9998
} | interface ____
extends
EndpointConsumerBuilder {
default BrowseEndpointConsumerBuilder basic() {
return (BrowseEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedBrowseEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedBrowseEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedBrowseEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedBrowseEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedBrowseEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedBrowseEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedBrowseEndpointConsumerBuilder browseLimit(int browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedBrowseEndpointConsumerBuilder browseLimit(String browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* To use a predicate to filter whether to include the message for
* browsing. Return true to include, false to exclude.
*
* The option is a:
* <code>java.util.function.Predicate<org.apache.camel.Exchange></code> type.
*
* Group: advanced
*
* @param filter the value to set
* @return the dsl builder
*/
default AdvancedBrowseEndpointConsumerBuilder filter(Predicate<org.apache.camel.Exchange> filter) {
doSetProperty("filter", filter);
return this;
}
/**
* To use a predicate to filter whether to include the message for
* browsing. Return true to include, false to exclude.
*
* The option will be converted to a
* <code>java.util.function.Predicate<org.apache.camel.Exchange></code> type.
*
* Group: advanced
*
* @param filter the value to set
* @return the dsl builder
*/
default AdvancedBrowseEndpointConsumerBuilder filter(String filter) {
doSetProperty("filter", filter);
return this;
}
}
/**
* Builder for endpoint producers for the Browse component.
*/
public | AdvancedBrowseEndpointConsumerBuilder |
java | elastic__elasticsearch | libs/entitlement/src/main/java/org/elasticsearch/entitlement/package-info.java | {
"start": 10063,
"end": 10439
} | class ____ to find the "layer" that hosts the class/module.
* Each layer may have a policy attached to it (1-1* relationship).
* </p>
* <p>
* This starts during Elasticsearch initialization ({@code initPhase2}), just after policies are parsed but before entitlements are
* initialized via {@link org.elasticsearch.entitlement.bootstrap.EntitlementBootstrap}, through a new | is |
java | spring-projects__spring-boot | module/spring-boot-micrometer-tracing-brave/src/test/java/org/springframework/boot/micrometer/tracing/brave/autoconfigure/CompositePropagationFactoryTests.java | {
"start": 5051,
"end": 5887
} | class ____ extends Propagation.Factory implements Propagation<String> {
private final String field;
private DummyPropagation(String field) {
this.field = field;
}
@Override
public Propagation<String> get() {
return this;
}
@Override
public List<String> keys() {
return List.of(this.field);
}
@Override
public <R> TraceContext.Injector<R> injector(Propagation.Setter<R, String> setter) {
return (traceContext, request) -> setter.put(request, this.field, this.field + "-value");
}
@Override
public <R> TraceContext.Extractor<R> extractor(Propagation.Getter<R, String> getter) {
return (request) -> {
TraceContext context = TraceContext.newBuilder().traceId(1).spanId(2).addExtra(this.field).build();
return TraceContextOrSamplingFlags.create(context);
};
}
}
}
| DummyPropagation |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/context/ContextLoader.java | {
"start": 3329,
"end": 4084
} | class ____ optionally
* load or obtain and hook up a shared parent context to the root application context.
* See the {@link #loadParentContext(ServletContext)} method for more information.
*
* <p>{@code ContextLoader} supports injecting the root web application context
* via the {@link #ContextLoader(WebApplicationContext)} constructor, allowing for
* programmatic configuration in Servlet initializers. See
* {@link org.springframework.web.WebApplicationInitializer} for usage examples.
*
* @author Juergen Hoeller
* @author Colin Sampaleanu
* @author Sam Brannen
* @since 17.02.2003
* @see ContextLoaderListener
* @see ConfigurableWebApplicationContext
* @see org.springframework.web.context.support.XmlWebApplicationContext
*/
public | can |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/MvcUriComponentsBuilder.java | {
"start": 21119,
"end": 21413
} | class ____ via {@link #relativeTo}.
* @since 4.2
*/
public MethodArgumentBuilder withMappingName(String mappingName) {
return fromMappingName(this.baseUrl, mappingName);
}
/**
* An alternative to {@link #fromMethod(Class, Method, Object...)}
* for use with an instance of this | created |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/coordination/NoOpClusterApplier.java | {
"start": 809,
"end": 1278
} | class ____ implements ClusterApplier {
@Override
public void setInitialState(ClusterState initialState) {
}
@Override
public void onNewClusterState(String source, Supplier<ClusterState> clusterStateSupplier, ActionListener<Void> listener) {
listener.onResponse(null);
}
@Override
public ClusterApplierRecordingService.Stats getStats() {
return new ClusterApplierRecordingService.Stats(Map.of());
}
}
| NoOpClusterApplier |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/pool/postgres/PG_500_connection_Test.java | {
"start": 581,
"end": 2824
} | class ____ extends TestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:postgresql://192.168.199.231:5432/druid_test_db");
dataSource.setUsername("druid_test");
dataSource.setPassword("druid_test");
dataSource.setInitialSize(50);
dataSource.setMaxActive(80);
dataSource.setMinIdle(50);
dataSource.setMinEvictableIdleTimeMillis(300 * 1000); // 300 / 10
dataSource.setTimeBetweenEvictionRunsMillis(180 * 1000); // 180 / 10
dataSource.setTestWhileIdle(false);
dataSource.setTestOnBorrow(false);
// dataSource.setValidationQuery("SELECT 1 FROM DUAL");
dataSource.setFilters("stat");
}
protected void tearDown() throws Exception {
dataSource.close();
}
public void test_conect_500() throws Exception {
dataSource.init();
Assert.assertFalse(dataSource.isOracle());
Assert.assertTrue(dataSource.getValidConnectionChecker() instanceof PGValidConnectionChecker);
int taskCount = 1000 * 100;
final CountDownLatch endLatch = new CountDownLatch(taskCount);
Runnable task = new Runnable() {
@Override
public void run() {
Connection conn = null;
Statement stmt = null;
ResultSet rs = null;
try {
conn = dataSource.getConnection();
stmt = conn.createStatement();
rs = stmt.executeQuery("SELECT 1");
while (rs.next()) {
// Process result set rows
}
} catch (SQLException ex) {
// skip
} finally {
endLatch.countDown();
}
JdbcUtils.close(rs);
JdbcUtils.close(stmt);
JdbcUtils.close(conn);
}
};
ExecutorService executor = Executors.newFixedThreadPool(100);
for (int i = 0; i < taskCount; ++i) {
executor.submit(task);
}
endLatch.await();
}
}
| PG_500_connection_Test |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction.java | {
"start": 1329,
"end": 13225
} | class ____ implements GroupingAggregatorFunction {
private static final List<IntermediateStateDesc> INTERMEDIATE_STATE_DESC = List.of(
new IntermediateStateDesc("top", ElementType.INT),
new IntermediateStateDesc("bottom", ElementType.INT),
new IntermediateStateDesc("negLeft", ElementType.INT),
new IntermediateStateDesc("negRight", ElementType.INT),
new IntermediateStateDesc("posLeft", ElementType.INT),
new IntermediateStateDesc("posRight", ElementType.INT) );
private final SpatialExtentGroupingStateWrappedLongitudeState state;
private final List<Integer> channels;
private final DriverContext driverContext;
public SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction(List<Integer> channels,
SpatialExtentGroupingStateWrappedLongitudeState state, DriverContext driverContext) {
this.channels = channels;
this.state = state;
this.driverContext = driverContext;
}
public static SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction create(
List<Integer> channels, DriverContext driverContext) {
return new SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction(channels, SpatialExtentGeoShapeDocValuesAggregator.initGrouping(), driverContext);
}
public static List<IntermediateStateDesc> intermediateStateDesc() {
return INTERMEDIATE_STATE_DESC;
}
@Override
public int intermediateBlockCount() {
return INTERMEDIATE_STATE_DESC.size();
}
@Override
public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds,
Page page) {
IntBlock valuesBlock = page.getBlock(channels.get(0));
maybeEnableGroupIdTracking(seenGroupIds, valuesBlock);
return new GroupingAggregatorFunction.AddInput() {
@Override
public void add(int positionOffset, IntArrayBlock groupIds) {
addRawInput(positionOffset, groupIds, valuesBlock);
}
@Override
public void add(int positionOffset, IntBigArrayBlock groupIds) {
addRawInput(positionOffset, groupIds, valuesBlock);
}
@Override
public void add(int positionOffset, IntVector groupIds) {
addRawInput(positionOffset, groupIds, valuesBlock);
}
@Override
public void close() {
}
};
}
private void addRawInput(int positionOffset, IntArrayBlock groups, IntBlock valuesBlock) {
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
if (groups.isNull(groupPosition)) {
continue;
}
int valuesPosition = groupPosition + positionOffset;
int groupStart = groups.getFirstValueIndex(groupPosition);
int groupEnd = groupStart + groups.getValueCount(groupPosition);
for (int g = groupStart; g < groupEnd; g++) {
int groupId = groups.getInt(g);
SpatialExtentGeoShapeDocValuesAggregator.combine(state, groupId, valuesPosition, valuesBlock);
}
}
}
@Override
public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) {
state.enableGroupIdTracking(new SeenGroupIds.Empty());
assert channels.size() == intermediateBlockCount();
Block topUncast = page.getBlock(channels.get(0));
if (topUncast.areAllValuesNull()) {
return;
}
IntVector top = ((IntBlock) topUncast).asVector();
Block bottomUncast = page.getBlock(channels.get(1));
if (bottomUncast.areAllValuesNull()) {
return;
}
IntVector bottom = ((IntBlock) bottomUncast).asVector();
Block negLeftUncast = page.getBlock(channels.get(2));
if (negLeftUncast.areAllValuesNull()) {
return;
}
IntVector negLeft = ((IntBlock) negLeftUncast).asVector();
Block negRightUncast = page.getBlock(channels.get(3));
if (negRightUncast.areAllValuesNull()) {
return;
}
IntVector negRight = ((IntBlock) negRightUncast).asVector();
Block posLeftUncast = page.getBlock(channels.get(4));
if (posLeftUncast.areAllValuesNull()) {
return;
}
IntVector posLeft = ((IntBlock) posLeftUncast).asVector();
Block posRightUncast = page.getBlock(channels.get(5));
if (posRightUncast.areAllValuesNull()) {
return;
}
IntVector posRight = ((IntBlock) posRightUncast).asVector();
assert top.getPositionCount() == bottom.getPositionCount() && top.getPositionCount() == negLeft.getPositionCount() && top.getPositionCount() == negRight.getPositionCount() && top.getPositionCount() == posLeft.getPositionCount() && top.getPositionCount() == posRight.getPositionCount();
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
if (groups.isNull(groupPosition)) {
continue;
}
int groupStart = groups.getFirstValueIndex(groupPosition);
int groupEnd = groupStart + groups.getValueCount(groupPosition);
for (int g = groupStart; g < groupEnd; g++) {
int groupId = groups.getInt(g);
int valuesPosition = groupPosition + positionOffset;
SpatialExtentGeoShapeDocValuesAggregator.combineIntermediate(state, groupId, top.getInt(valuesPosition), bottom.getInt(valuesPosition), negLeft.getInt(valuesPosition), negRight.getInt(valuesPosition), posLeft.getInt(valuesPosition), posRight.getInt(valuesPosition));
}
}
}
private void addRawInput(int positionOffset, IntBigArrayBlock groups, IntBlock valuesBlock) {
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
if (groups.isNull(groupPosition)) {
continue;
}
int valuesPosition = groupPosition + positionOffset;
int groupStart = groups.getFirstValueIndex(groupPosition);
int groupEnd = groupStart + groups.getValueCount(groupPosition);
for (int g = groupStart; g < groupEnd; g++) {
int groupId = groups.getInt(g);
SpatialExtentGeoShapeDocValuesAggregator.combine(state, groupId, valuesPosition, valuesBlock);
}
}
}
@Override
public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) {
state.enableGroupIdTracking(new SeenGroupIds.Empty());
assert channels.size() == intermediateBlockCount();
Block topUncast = page.getBlock(channels.get(0));
if (topUncast.areAllValuesNull()) {
return;
}
IntVector top = ((IntBlock) topUncast).asVector();
Block bottomUncast = page.getBlock(channels.get(1));
if (bottomUncast.areAllValuesNull()) {
return;
}
IntVector bottom = ((IntBlock) bottomUncast).asVector();
Block negLeftUncast = page.getBlock(channels.get(2));
if (negLeftUncast.areAllValuesNull()) {
return;
}
IntVector negLeft = ((IntBlock) negLeftUncast).asVector();
Block negRightUncast = page.getBlock(channels.get(3));
if (negRightUncast.areAllValuesNull()) {
return;
}
IntVector negRight = ((IntBlock) negRightUncast).asVector();
Block posLeftUncast = page.getBlock(channels.get(4));
if (posLeftUncast.areAllValuesNull()) {
return;
}
IntVector posLeft = ((IntBlock) posLeftUncast).asVector();
Block posRightUncast = page.getBlock(channels.get(5));
if (posRightUncast.areAllValuesNull()) {
return;
}
IntVector posRight = ((IntBlock) posRightUncast).asVector();
assert top.getPositionCount() == bottom.getPositionCount() && top.getPositionCount() == negLeft.getPositionCount() && top.getPositionCount() == negRight.getPositionCount() && top.getPositionCount() == posLeft.getPositionCount() && top.getPositionCount() == posRight.getPositionCount();
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
if (groups.isNull(groupPosition)) {
continue;
}
int groupStart = groups.getFirstValueIndex(groupPosition);
int groupEnd = groupStart + groups.getValueCount(groupPosition);
for (int g = groupStart; g < groupEnd; g++) {
int groupId = groups.getInt(g);
int valuesPosition = groupPosition + positionOffset;
SpatialExtentGeoShapeDocValuesAggregator.combineIntermediate(state, groupId, top.getInt(valuesPosition), bottom.getInt(valuesPosition), negLeft.getInt(valuesPosition), negRight.getInt(valuesPosition), posLeft.getInt(valuesPosition), posRight.getInt(valuesPosition));
}
}
}
private void addRawInput(int positionOffset, IntVector groups, IntBlock valuesBlock) {
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
int valuesPosition = groupPosition + positionOffset;
int groupId = groups.getInt(groupPosition);
SpatialExtentGeoShapeDocValuesAggregator.combine(state, groupId, valuesPosition, valuesBlock);
}
}
@Override
public void addIntermediateInput(int positionOffset, IntVector groups, Page page) {
state.enableGroupIdTracking(new SeenGroupIds.Empty());
assert channels.size() == intermediateBlockCount();
Block topUncast = page.getBlock(channels.get(0));
if (topUncast.areAllValuesNull()) {
return;
}
IntVector top = ((IntBlock) topUncast).asVector();
Block bottomUncast = page.getBlock(channels.get(1));
if (bottomUncast.areAllValuesNull()) {
return;
}
IntVector bottom = ((IntBlock) bottomUncast).asVector();
Block negLeftUncast = page.getBlock(channels.get(2));
if (negLeftUncast.areAllValuesNull()) {
return;
}
IntVector negLeft = ((IntBlock) negLeftUncast).asVector();
Block negRightUncast = page.getBlock(channels.get(3));
if (negRightUncast.areAllValuesNull()) {
return;
}
IntVector negRight = ((IntBlock) negRightUncast).asVector();
Block posLeftUncast = page.getBlock(channels.get(4));
if (posLeftUncast.areAllValuesNull()) {
return;
}
IntVector posLeft = ((IntBlock) posLeftUncast).asVector();
Block posRightUncast = page.getBlock(channels.get(5));
if (posRightUncast.areAllValuesNull()) {
return;
}
IntVector posRight = ((IntBlock) posRightUncast).asVector();
assert top.getPositionCount() == bottom.getPositionCount() && top.getPositionCount() == negLeft.getPositionCount() && top.getPositionCount() == negRight.getPositionCount() && top.getPositionCount() == posLeft.getPositionCount() && top.getPositionCount() == posRight.getPositionCount();
for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) {
int groupId = groups.getInt(groupPosition);
int valuesPosition = groupPosition + positionOffset;
SpatialExtentGeoShapeDocValuesAggregator.combineIntermediate(state, groupId, top.getInt(valuesPosition), bottom.getInt(valuesPosition), negLeft.getInt(valuesPosition), negRight.getInt(valuesPosition), posLeft.getInt(valuesPosition), posRight.getInt(valuesPosition));
}
}
private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, IntBlock valuesBlock) {
if (valuesBlock.mayHaveNulls()) {
state.enableGroupIdTracking(seenGroupIds);
}
}
@Override
public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) {
state.enableGroupIdTracking(seenGroupIds);
}
@Override
public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) {
state.toIntermediate(blocks, offset, selected, driverContext);
}
@Override
public void evaluateFinal(Block[] blocks, int offset, IntVector selected,
GroupingAggregatorEvaluationContext ctx) {
blocks[offset] = SpatialExtentGeoShapeDocValuesAggregator.evaluateFinal(state, selected, ctx);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getClass().getSimpleName()).append("[");
sb.append("channels=").append(channels);
sb.append("]");
return sb.toString();
}
@Override
public void close() {
state.close();
}
}
| SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/logging/LoggingInitializationContext.java | {
"start": 951,
"end": 1489
} | class ____ {
private final @Nullable ConfigurableEnvironment environment;
/**
* Create a new {@link LoggingInitializationContext} instance.
* @param environment the Spring environment.
*/
public LoggingInitializationContext(@Nullable ConfigurableEnvironment environment) {
this.environment = environment;
}
/**
* Return the Spring environment if available.
* @return the {@link Environment} or {@code null}
*/
public @Nullable Environment getEnvironment() {
return this.environment;
}
}
| LoggingInitializationContext |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/completion/GoogleVertexAiChatCompletionTaskSettings.java | {
"start": 1187,
"end": 5889
} | class ____ implements TaskSettings {
public static final String NAME = "google_vertex_ai_chatcompletion_task_settings";
private static final TransportVersion GEMINI_THINKING_BUDGET_ADDED = TransportVersion.fromName("gemini_thinking_budget_added");
private final ThinkingConfig thinkingConfig;
private final Integer maxTokens;
public static final GoogleVertexAiChatCompletionTaskSettings EMPTY_SETTINGS = new GoogleVertexAiChatCompletionTaskSettings();
private static final ThinkingConfig EMPTY_THINKING_CONFIG = new ThinkingConfig();
public GoogleVertexAiChatCompletionTaskSettings() {
this.thinkingConfig = EMPTY_THINKING_CONFIG;
this.maxTokens = null;
}
public GoogleVertexAiChatCompletionTaskSettings(ThinkingConfig thinkingConfig, @Nullable Integer maxTokens) {
this.thinkingConfig = Objects.requireNonNullElse(thinkingConfig, EMPTY_THINKING_CONFIG);
this.maxTokens = maxTokens;
}
public GoogleVertexAiChatCompletionTaskSettings(StreamInput in) throws IOException {
thinkingConfig = new ThinkingConfig(in);
TransportVersion version = in.getTransportVersion();
if (GoogleVertexAiUtils.supportsModelGarden(version)) {
maxTokens = in.readOptionalVInt();
} else {
maxTokens = null;
}
}
public static GoogleVertexAiChatCompletionTaskSettings fromMap(Map<String, Object> taskSettings) {
ValidationException validationException = new ValidationException();
// Extract optional thinkingConfig settings
ThinkingConfig thinkingConfig = ThinkingConfig.fromMap(taskSettings, validationException);
// Extract optional maxTokens setting
Integer maxTokens = extractOptionalPositiveInteger(
taskSettings,
MAX_TOKENS,
ModelConfigurations.TASK_SETTINGS,
validationException
);
if (validationException.validationErrors().isEmpty() == false) {
throw validationException;
}
return new GoogleVertexAiChatCompletionTaskSettings(thinkingConfig, maxTokens);
}
public static GoogleVertexAiChatCompletionTaskSettings of(
GoogleVertexAiChatCompletionTaskSettings originalTaskSettings,
GoogleVertexAiChatCompletionTaskSettings newTaskSettings
) {
ThinkingConfig thinkingConfig = newTaskSettings.thinkingConfig().isEmpty()
? originalTaskSettings.thinkingConfig()
: newTaskSettings.thinkingConfig();
Integer maxTokens = Objects.requireNonNullElse(newTaskSettings.maxTokens(), originalTaskSettings.maxTokens());
return new GoogleVertexAiChatCompletionTaskSettings(thinkingConfig, maxTokens);
}
public ThinkingConfig thinkingConfig() {
return thinkingConfig;
}
public Integer maxTokens() {
return maxTokens;
}
@Override
public boolean isEmpty() {
return thinkingConfig.isEmpty() && Objects.isNull(maxTokens);
}
@Override
public TaskSettings updatedTaskSettings(Map<String, Object> newSettings) {
GoogleVertexAiChatCompletionTaskSettings newTaskSettings = GoogleVertexAiChatCompletionTaskSettings.fromMap(
new HashMap<>(newSettings)
);
return GoogleVertexAiChatCompletionTaskSettings.of(this, newTaskSettings);
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return GEMINI_THINKING_BUDGET_ADDED;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
thinkingConfig.writeTo(out);
if (GoogleVertexAiUtils.supportsModelGarden(out.getTransportVersion())) {
out.writeOptionalVInt(maxTokens);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
thinkingConfig.toXContent(builder, params);
builder.field(MAX_TOKENS, maxTokens);
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) return false;
GoogleVertexAiChatCompletionTaskSettings that = (GoogleVertexAiChatCompletionTaskSettings) o;
return Objects.equals(thinkingConfig, that.thinkingConfig) && Objects.equals(maxTokens, that.maxTokens);
}
@Override
public int hashCode() {
return Objects.hash(thinkingConfig, maxTokens);
}
@Override
public String toString() {
return Strings.toString(this);
}
}
| GoogleVertexAiChatCompletionTaskSettings |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/operations/OperationUtils.java | {
"start": 1438,
"end": 6582
} | class ____ {
private static final String OPERATION_INDENT = " ";
/**
* Increases indentation for description of string of child {@link Operation}. The input can
* already contain indentation. This will increase all the indentations by one level.
*
* @param item result of {@link Operation#asSummaryString()}
* @return string with increased indentation
*/
public static String indent(String item) {
// '([^']|'')*': Matches the escape sequence "'...'" where the content between "'"
// characters can contain anything except "'" unless its doubled ('').
//
// Then each match is checked. If it starts with "'", it's left unchanged
// (escaped sequence). Otherwise, it replaces newlines within the match with indent.
Pattern pattern = Pattern.compile("('([^']|'')*')|\\n");
Matcher matcher = pattern.matcher(item);
StringBuffer output = new StringBuffer();
while (matcher.find()) {
final String group = matcher.group();
if (group.startsWith("'")) {
matcher.appendReplacement(output, Matcher.quoteReplacement(group));
} else {
String replaced = group.replaceAll("\n", "\n" + OPERATION_INDENT);
matcher.appendReplacement(output, Matcher.quoteReplacement(replaced));
}
}
matcher.appendTail(output);
return "\n" + OPERATION_INDENT + output;
}
/**
* Formats a Tree of {@link Operation} in a unified way. It prints all the parameters and adds
* all children formatted and properly indented in the following lines.
*
* <p>The format is
*
* <pre>{@code
* <operationName>: [(key1: [value1], key2: [v1, v2])]
* <child1>
* <child2>
* <child3>
* }</pre>
*
* @param operationName The operation name.
* @param parameters The operation's parameters.
* @param children The operation's children.
* @param childToString The function to convert child to String.
* @param <T> The type of the child.
* @return String representation of the given operation.
*/
public static <T extends Operation> String formatWithChildren(
String operationName,
Map<String, Object> parameters,
List<T> children,
Function<T, String> childToString) {
String description =
parameters.entrySet().stream()
.map(entry -> formatParameter(entry.getKey(), entry.getValue()))
.collect(Collectors.joining(", "));
final StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append(operationName).append(":");
if (!StringUtils.isNullOrWhitespaceOnly(description)) {
stringBuilder.append(" (").append(description).append(")");
}
String childrenDescription =
children.stream()
.map(child -> OperationUtils.indent(childToString.apply(child)))
.collect(Collectors.joining());
return stringBuilder.append(childrenDescription).toString();
}
public static String formatSelectColumns(ResolvedSchema schema, @Nullable String inputAlias) {
return schema.getColumnNames().stream()
.map(
i -> {
if (inputAlias == null) {
return EncodingUtils.escapeIdentifier(i);
}
return String.format(
"%s.%s",
EncodingUtils.escapeIdentifier(inputAlias),
EncodingUtils.escapeIdentifier(i));
})
.collect(Collectors.joining(", "));
}
public static String formatParameter(String name, Object value) {
final StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append(name);
stringBuilder.append(": ");
if (value.getClass().isArray()) {
stringBuilder.append(Arrays.toString((Object[]) value));
} else if (value instanceof Collection) {
stringBuilder.append(value);
} else {
stringBuilder.append("[").append(value).append("]");
}
return stringBuilder.toString();
}
public static String formatProperties(Map<String, String> properties) {
return properties.entrySet().stream()
.map(entry -> formatParameter(entry.getKey(), entry.getValue()))
.collect(Collectors.joining(", "));
}
public static String formatPartitionSpec(CatalogPartitionSpec spec) {
return formatPartitionSpec(spec.getPartitionSpec());
}
public static String formatPartitionSpec(Map<String, String> spec) {
return spec.entrySet().stream()
.map(entry -> entry.getKey() + "=" + entry.getValue())
.collect(Collectors.joining(", "));
}
private OperationUtils() {}
}
| OperationUtils |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java | {
"start": 46409,
"end": 46813
} | class ____ implements ConfigDef.Validator {
@Override
public void ensureValid(String name, Object value) {
try {
PluginUtils.connectorVersionRequirement((String) value);
} catch (InvalidVersionSpecificationException e) {
throw new VersionedPluginLoadingException(e.getMessage());
}
}
}
}
| PluginVersionValidator |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/node/ReportingService.java | {
"start": 690,
"end": 746
} | interface ____ extends Writeable, ToXContent {
}
}
| Info |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/MonoElapsed.java | {
"start": 906,
"end": 1511
} | class ____<T> extends InternalMonoOperator<T, Tuple2<Long, T>> implements Fuseable {
final Scheduler scheduler;
MonoElapsed(Mono<T> source, Scheduler scheduler) {
super(source);
this.scheduler = scheduler;
}
@Override
public CoreSubscriber<? super T> subscribeOrReturn(CoreSubscriber<? super Tuple2<Long, T>> actual) {
return new FluxElapsed.ElapsedSubscriber<T>(actual, scheduler);
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_ON) return scheduler;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return super.scanUnsafe(key);
}
}
| MonoElapsed |
java | spring-projects__spring-boot | module/spring-boot-batch-jdbc/src/main/java/org/springframework/boot/batch/jdbc/autoconfigure/BatchJdbcAutoConfiguration.java | {
"start": 7631,
"end": 8052
} | class ____ {
@Bean
@ConditionalOnMissingBean
BatchDataSourceScriptDatabaseInitializer batchDataSourceInitializer(DataSource dataSource,
@BatchDataSource ObjectProvider<DataSource> batchDataSource, BatchJdbcProperties properties) {
return new BatchDataSourceScriptDatabaseInitializer(batchDataSource.getIfAvailable(() -> dataSource),
properties);
}
}
static | DataSourceInitializerConfiguration |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java | {
"start": 1318,
"end": 2245
} | class ____ {
private Boolean dropBehind;
private Long readahead;
public Builder(CachingStrategy prev) {
this.dropBehind = prev.dropBehind;
this.readahead = prev.readahead;
}
public Builder setDropBehind(Boolean dropBehind) {
this.dropBehind = dropBehind;
return this;
}
public Builder setReadahead(Long readahead) {
this.readahead = readahead;
return this;
}
public CachingStrategy build() {
return new CachingStrategy(dropBehind, readahead);
}
}
public CachingStrategy(Boolean dropBehind, Long readahead) {
this.dropBehind = dropBehind;
this.readahead = readahead;
}
public Boolean getDropBehind() {
return dropBehind;
}
public Long getReadahead() {
return readahead;
}
public String toString() {
return "CachingStrategy(dropBehind=" + dropBehind +
", readahead=" + readahead + ")";
}
}
| Builder |
java | spring-projects__spring-framework | spring-jms/src/main/java/org/springframework/jms/core/BrowserCallback.java | {
"start": 1261,
"end": 1782
} | interface ____<T> {
/**
* Perform operations on the given {@link jakarta.jms.Session} and
* {@link jakarta.jms.QueueBrowser}.
* @param session the JMS {@code Session} object to use
* @param browser the JMS {@code QueueBrowser} object to use
* @return a result object from working with the {@code Session}, if any
* (or {@code null} if none)
* @throws jakarta.jms.JMSException if thrown by JMS API methods
*/
@Nullable T doInJms(Session session, QueueBrowser browser) throws JMSException;
}
| BrowserCallback |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/TestTypedDeserialization.java | {
"start": 2746,
"end": 2827
} | class ____ {
protected DummyBase(boolean foo) { }
}
static | DummyBase |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldHaveFileSystem.java | {
"start": 904,
"end": 1396
} | class ____ extends BasicErrorMessageFactory {
private static final String PATH_SHOULD_HAVE_FILE_SYSTEM = "%nExpecting path:%n %s%nto have file system:%n %s";
public static ErrorMessageFactory shouldHaveFileSystem(final Path actual, final FileSystem fileSystem) {
return new ShouldHaveFileSystem(actual, fileSystem);
}
private ShouldHaveFileSystem(final Path actual, final FileSystem expected) {
super(PATH_SHOULD_HAVE_FILE_SYSTEM, actual, expected);
}
}
| ShouldHaveFileSystem |
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/DisplayNameGenerator.java | {
"start": 15526,
"end": 16630
} | class ____ also configured to use the IndicativeSentences generator.
boolean buildPrefix = findDisplayNameGeneration(enclosingClass, remainingEnclosingInstanceTypes)//
.map(DisplayNameGeneration::value)//
.filter(IndicativeSentences.class::equals)//
.isPresent();
String prefix = (buildPrefix
? getSentenceBeginning(enclosingClass, remainingEnclosingInstanceTypes)
+ getFragmentSeparator(testClass, enclosingInstanceTypes)
: "");
return prefix + (sentenceFragment != null ? sentenceFragment
: getGeneratorFor(testClass, enclosingInstanceTypes).generateDisplayNameForNestedClass(
remainingEnclosingInstanceTypes, testClass));
}
/**
* Get the sentence fragment separator.
*
* <p>If {@link IndicativeSentencesGeneration @IndicativeSentencesGeneration}
* is present (searching enclosing classes if not found locally), the
* configured {@link IndicativeSentencesGeneration#separator() separator}
* will be used. Otherwise, {@link IndicativeSentencesGeneration#DEFAULT_SEPARATOR}
* will be used.
*
* @param testClass the test | is |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/naming/listener/AbstractFuzzyWatchEventWatcher.java | {
"start": 843,
"end": 1212
} | class ____ implements FuzzyWatchEventWatcher, FuzzyWatchLoadWatcher {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void onPatternOverLimit() {
//do nothing default
}
@Override
public void onServiceReachUpLimit() {
//do nothing default
}
}
| AbstractFuzzyWatchEventWatcher |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/utils/RegexPropertiesTest.java | {
"start": 919,
"end": 2234
} | class ____ {
@Test
void testGetProperty() {
RegexProperties regexProperties = new RegexProperties();
regexProperties.setProperty("org.apache.dubbo.provider.*", "http://localhost:20880");
regexProperties.setProperty("org.apache.dubbo.provider.config.*", "http://localhost:30880");
regexProperties.setProperty("org.apache.dubbo.provider.config.demo", "http://localhost:40880");
regexProperties.setProperty("org.apache.dubbo.consumer.*.demo", "http://localhost:50880");
regexProperties.setProperty("*.service", "http://localhost:60880");
Assertions.assertEquals(
"http://localhost:20880", regexProperties.getProperty("org.apache.dubbo.provider.cluster"));
Assertions.assertEquals(
"http://localhost:30880", regexProperties.getProperty("org.apache.dubbo.provider.config.cluster"));
Assertions.assertEquals(
"http://localhost:40880", regexProperties.getProperty("org.apache.dubbo.provider.config.demo"));
Assertions.assertEquals(
"http://localhost:50880", regexProperties.getProperty("org.apache.dubbo.consumer.service.demo"));
Assertions.assertEquals("http://localhost:60880", regexProperties.getProperty("org.apache.dubbo.service"));
}
}
| RegexPropertiesTest |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/writer/AbstractBeanDefinitionBuilder.java | {
"start": 37962,
"end": 43550
} | class ____ extends InternalBeanElement<MethodElement> implements BeanMethodElement {
private final MethodElement methodElement;
private final boolean requiresReflection;
private BeanParameterElement[] beanParameters;
private InternalBeanElementMethod(MethodElement methodElement, boolean requiresReflection) {
this(methodElement, requiresReflection, initBeanParameters(methodElement.getParameters()));
}
private InternalBeanElementMethod(MethodElement methodElement,
boolean requiresReflection,
BeanParameterElement[] beanParameters) {
super(methodElement, MutableAnnotationMetadata.of(methodElement.getAnnotationMetadata().getDeclaredMetadata()));
this.methodElement = methodElement;
this.requiresReflection = requiresReflection;
this.beanParameters = beanParameters;
}
@Override
public boolean isReflectionRequired() {
return requiresReflection;
}
@Override
public boolean isReflectionRequired(ClassElement callingType) {
return requiresReflection;
}
@Override
public boolean isPackagePrivate() {
return methodElement.isPackagePrivate();
}
@Override
public boolean isAbstract() {
return methodElement.isAbstract();
}
@Override
public boolean isStatic() {
return methodElement.isStatic();
}
@Override
public boolean isPrivate() {
return methodElement.isPrivate();
}
@Override
public boolean isFinal() {
return methodElement.isFinal();
}
@Override
public boolean isSuspend() {
return methodElement.isSuspend();
}
@Override
public boolean isDefault() {
return methodElement.isDefault();
}
@Override
public boolean isProtected() {
return methodElement.isProtected();
}
@Override
public boolean isPublic() {
return methodElement.isPublic();
}
@NonNull
@Override
public BeanMethodElement executable() {
if (!AbstractBeanDefinitionBuilder.this.executableMethods.contains(this)) {
AbstractBeanDefinitionBuilder.this.executableMethods.add(this);
}
return BeanMethodElement.super.executable();
}
@Override
public BeanMethodElement intercept(AnnotationValue<?>... annotationValue) {
if (!AbstractBeanDefinitionBuilder.this.interceptedMethods.contains(this)) {
AbstractBeanDefinitionBuilder.this.interceptedMethods.add(this);
}
return BeanMethodElement.super.intercept(annotationValue);
}
@Override
public BeanMethodElement executable(boolean processOnStartup) {
if (!AbstractBeanDefinitionBuilder.this.executableMethods.contains(this)) {
AbstractBeanDefinitionBuilder.this.executableMethods.add(this);
}
return BeanMethodElement.super.executable(processOnStartup);
}
@NonNull
@Override
public BeanMethodElement inject() {
if (!AbstractBeanDefinitionBuilder.this.injectedMethods.contains(this)) {
AbstractBeanDefinitionBuilder.this.injectedMethods.add(this);
}
return BeanMethodElement.super.inject();
}
@NonNull
@Override
public BeanMethodElement preDestroy() {
if (!AbstractBeanDefinitionBuilder.this.preDestroyMethods.contains(this)) {
AbstractBeanDefinitionBuilder.this.preDestroyMethods.add(this);
}
return BeanMethodElement.super.preDestroy();
}
@NonNull
@Override
public BeanMethodElement postConstruct() {
if (!AbstractBeanDefinitionBuilder.this.postConstructMethods.contains(this)) {
AbstractBeanDefinitionBuilder.this.postConstructMethods.add(this);
}
return BeanMethodElement.super.postConstruct();
}
@NonNull
@Override
public BeanParameterElement[] getParameters() {
return this.beanParameters;
}
@NonNull
@Override
public ClassElement getReturnType() {
return methodElement.getReturnType();
}
@NonNull
@Override
public ClassElement getGenericReturnType() {
return methodElement.getGenericReturnType();
}
@NonNull
@Override
public MethodElement withParameters(@NonNull ParameterElement... newParameters) {
this.beanParameters = initBeanParameters(newParameters);
return this;
}
@Override
public MethodElement withAnnotationMetadata(AnnotationMetadata annotationMetadata) {
this.currentMetadata = annotationMetadata;
return this;
}
@Override
public ClassElement getDeclaringType() {
return methodElement.getDeclaringType();
}
@Override
public ClassElement getOwningType() {
return AbstractBeanDefinitionBuilder.this.beanType;
}
}
/**
* Models a {@link io.micronaut.inject.ast.beans.BeanConstructorElement}.
*/
private final | InternalBeanElementMethod |
java | grpc__grpc-java | api/src/main/java/io/grpc/LoadBalancer.java | {
"start": 30667,
"end": 32570
} | class ____ {
private final List<EquivalentAddressGroup> addrs;
private final Attributes attrs;
private final Object[][] customOptions;
private CreateSubchannelArgs(
List<EquivalentAddressGroup> addrs, Attributes attrs, Object[][] customOptions) {
this.addrs = checkNotNull(addrs, "addresses are not set");
this.attrs = checkNotNull(attrs, "attrs");
this.customOptions = checkNotNull(customOptions, "customOptions");
}
/**
* Returns the addresses, which is an unmodifiable list.
*/
public List<EquivalentAddressGroup> getAddresses() {
return addrs;
}
/**
* Returns the attributes.
*/
public Attributes getAttributes() {
return attrs;
}
/**
* Get the value for a custom option or its inherent default.
*
* @param key Key identifying option
*/
@SuppressWarnings("unchecked")
public <T> T getOption(Key<T> key) {
Preconditions.checkNotNull(key, "key");
for (int i = 0; i < customOptions.length; i++) {
if (key.equals(customOptions[i][0])) {
return (T) customOptions[i][1];
}
}
return key.defaultValue;
}
/**
* Returns a builder with the same initial values as this object.
*/
public Builder toBuilder() {
return newBuilder().setAddresses(addrs).setAttributes(attrs).copyCustomOptions(customOptions);
}
/**
* Creates a new builder.
*/
public static Builder newBuilder() {
return new Builder();
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("addrs", addrs)
.add("attrs", attrs)
.add("customOptions", Arrays.deepToString(customOptions))
.toString();
}
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1771")
public static final | CreateSubchannelArgs |
java | elastic__elasticsearch | x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/DeleteEnrichPolicyActionRequestTests.java | {
"start": 501,
"end": 1190
} | class ____ extends AbstractWireSerializingTestCase<DeleteEnrichPolicyAction.Request> {
@Override
protected DeleteEnrichPolicyAction.Request createTestInstance() {
return new DeleteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, randomAlphaOfLength(4));
}
@Override
protected DeleteEnrichPolicyAction.Request mutateInstance(DeleteEnrichPolicyAction.Request instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<DeleteEnrichPolicyAction.Request> instanceReader() {
return DeleteEnrichPolicyAction.Request::new;
}
}
| DeleteEnrichPolicyActionRequestTests |
java | apache__camel | components/camel-lra/src/test/java/org/apache/camel/service/lra/LRASagaServiceTest.java | {
"start": 3677,
"end": 3851
} | class ____ extends LRAClient {
public AlternativeLRAClient(LRASagaService sagaService) {
super(sagaService);
}
}
private | AlternativeLRAClient |
java | micronaut-projects__micronaut-core | http-client-tck/src/main/java/io/micronaut/http/client/tck/tests/CookieTest.java | {
"start": 1457,
"end": 3639
} | class ____ {
private static final String SPEC_NAME = "CookieTest";
@ParameterizedTest(name = "blocking={0}")
@ValueSource(booleans = {true, false})
void cookieBinding(boolean blocking) throws IOException {
asserts(SPEC_NAME,
Map.of(BLOCKING_CLIENT_PROPERTY, blocking),
HttpRequest.GET("/cookies-test/bind")
.cookie(Cookie.of("one", "foo"))
.cookie(Cookie.of("two", "bar")),
(server, request) -> AssertionUtils.assertDoesNotThrow(server, request,
HttpResponseAssertion.builder()
.status(HttpStatus.OK)
.body(BodyAssertion.builder().body("{\"one\":\"foo\",\"two\":\"bar\"}").equals())
.build())
);
}
@ParameterizedTest(name = "blocking={0}")
@ValueSource(booleans = {true, false})
void getCookiesFromRequest(boolean blocking) throws IOException {
asserts(SPEC_NAME,
Map.of(BLOCKING_CLIENT_PROPERTY, blocking),
HttpRequest.GET("/cookies-test/all")
.cookie(Cookie.of("one", "foo"))
.cookie(Cookie.of("two", "bar")),
(server, request) -> AssertionUtils.assertDoesNotThrow(server, request,
HttpResponseAssertion.builder()
.status(HttpStatus.OK)
.body(BodyAssertion.builder().body("{\"one\":\"foo\",\"two\":\"bar\"}").equals())
.build())
);
}
@ParameterizedTest(name = "blocking={0}")
@ValueSource(booleans = {true, false})
void testNoCookies(boolean blocking) throws IOException {
asserts(SPEC_NAME,
Map.of(BLOCKING_CLIENT_PROPERTY, blocking),
HttpRequest.GET("/cookies-test/all"),
(server, request) -> AssertionUtils.assertDoesNotThrow(server, request,
HttpResponseAssertion.builder()
.status(HttpStatus.OK)
.body(BodyAssertion.builder().body("{}").equals())
.build())
);
}
@Controller("/cookies-test")
@Requires(property = "spec.name", value = SPEC_NAME)
static | CookieTest |
java | elastic__elasticsearch | x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingDeciderResult.java | {
"start": 925,
"end": 2872
} | interface ____ extends ToXContent, NamedWriteable {
String summary();
}
/**
* Create a new result with required capacity.
* @param requiredCapacity required capacity or null if no capacity can be calculated due to insufficient information.
* @param reason details/data behind the calculation
*/
public AutoscalingDeciderResult(AutoscalingCapacity requiredCapacity, Reason reason) {
this.requiredCapacity = requiredCapacity;
this.reason = reason;
}
public AutoscalingDeciderResult(StreamInput in) throws IOException {
this.requiredCapacity = in.readOptionalWriteable(AutoscalingCapacity::new);
this.reason = in.readOptionalNamedWriteable(Reason.class);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(requiredCapacity);
out.writeOptionalNamedWriteable(reason);
}
public AutoscalingCapacity requiredCapacity() {
return requiredCapacity;
}
public Reason reason() {
return reason;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (requiredCapacity != null) {
builder.field("required_capacity", requiredCapacity);
}
if (reason != null) {
builder.field("reason_summary", reason.summary());
builder.field("reason_details", reason);
}
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AutoscalingDeciderResult that = (AutoscalingDeciderResult) o;
return Objects.equals(requiredCapacity, that.requiredCapacity) && Objects.equals(reason, that.reason);
}
@Override
public int hashCode() {
return Objects.hash(requiredCapacity, reason);
}
}
| Reason |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/over/frame/SlidingOverFrame.java | {
"start": 1588,
"end": 3913
} | class ____ implements OverWindowFrame {
private final RowType inputType;
private final RowType valueType;
private GeneratedAggsHandleFunction aggsHandleFunction;
private transient AggsHandleFunction processor;
transient RowDataSerializer inputSer;
private transient RowDataSerializer valueSer;
transient ResettableExternalBuffer.BufferIterator inputIterator;
/** The next row from `input`. */
transient BinaryRowData nextRow;
/** The rows within current sliding window. */
transient ArrayDeque<RowData> buffer;
private transient RowData accValue;
public SlidingOverFrame(
RowType inputType, RowType valueType, GeneratedAggsHandleFunction aggsHandleFunction) {
this.inputType = inputType;
this.valueType = valueType;
this.aggsHandleFunction = aggsHandleFunction;
}
@Override
public void open(ExecutionContext ctx) throws Exception {
this.inputSer = new RowDataSerializer(inputType);
this.valueSer = new RowDataSerializer(valueType);
ClassLoader cl = ctx.getRuntimeContext().getUserCodeClassLoader();
processor = aggsHandleFunction.newInstance(cl);
processor.open(new PerKeyStateDataViewStore(ctx.getRuntimeContext()));
buffer = new ArrayDeque<>();
this.aggsHandleFunction = null;
}
@Override
public void prepare(ResettableExternalBuffer rows) throws Exception {
if (inputIterator != null) {
inputIterator.close();
}
inputIterator = rows.newIterator();
nextRow = OverWindowFrame.getNextOrNull(inputIterator);
buffer.clear();
processor.setWindowSize(rows.size());
// cleanup the retired accumulators value
processor.setAccumulators(processor.createAccumulators());
}
RowData accumulateBuffer(boolean bufferUpdated) throws Exception {
// Only recalculate and update when the buffer changes.
if (bufferUpdated) {
// cleanup the retired accumulators value
processor.setAccumulators(processor.createAccumulators());
for (RowData row : buffer) {
processor.accumulate(row);
}
accValue = valueSer.copy(processor.getValue());
}
return accValue;
}
}
| SlidingOverFrame |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/function/server/RequestPredicates.java | {
"start": 25411,
"end": 26102
} | class ____ extends HeadersPredicate {
private final MediaType mediaType;
public SingleContentTypePredicate(MediaType mediaType) {
super(headers -> {
MediaType contentType = headers.contentType().orElse(MediaType.APPLICATION_OCTET_STREAM);
boolean match = mediaType.includes(contentType);
traceMatch("Content-Type", mediaType, contentType, match);
return match;
});
this.mediaType = mediaType;
}
@Override
public void accept(Visitor visitor) {
visitor.header(HttpHeaders.CONTENT_TYPE, this.mediaType.toString());
}
@Override
public String toString() {
return "Content-Type: " + this.mediaType;
}
}
private static | SingleContentTypePredicate |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BulkDeleteSource.java | {
"start": 1224,
"end": 1446
} | interface ____ not guarantee that the operation is supported;
* returning a {@link BulkDelete} object from the call {@link #createBulkDelete(Path)}
* is.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public | does |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/project/DefaultProjectBuildingRequest.java | {
"start": 1437,
"end": 10110
} | class ____ implements ProjectBuildingRequest {
private RepositorySystemSession repositorySession;
private ArtifactRepository localRepository;
private List<ArtifactRepository> remoteRepositories;
private List<ArtifactRepository> pluginArtifactRepositories;
private MavenProject project;
private int validationLevel = ModelBuildingRequest.VALIDATION_LEVEL_STRICT;
private boolean processPlugins;
private List<Profile> profiles;
private List<String> activeProfileIds;
private List<String> inactiveProfileIds;
private Properties systemProperties;
private Properties userProperties;
private Instant buildStartTime;
private boolean resolveDependencies;
@Deprecated
private boolean resolveVersionRanges;
private RepositoryMerging repositoryMerging = RepositoryMerging.POM_DOMINANT;
public DefaultProjectBuildingRequest() {
processPlugins = true;
profiles = new ArrayList<>();
activeProfileIds = new ArrayList<>();
inactiveProfileIds = new ArrayList<>();
systemProperties = new Properties();
userProperties = new Properties();
remoteRepositories = new ArrayList<>();
pluginArtifactRepositories = new ArrayList<>();
}
public DefaultProjectBuildingRequest(ProjectBuildingRequest request) {
this();
setProcessPlugins(request.isProcessPlugins());
setProfiles(request.getProfiles());
setActiveProfileIds(request.getActiveProfileIds());
setInactiveProfileIds(request.getInactiveProfileIds());
setSystemProperties(request.getSystemProperties());
setUserProperties(request.getUserProperties());
setRemoteRepositories(request.getRemoteRepositories());
setPluginArtifactRepositories(request.getPluginArtifactRepositories());
setRepositorySession(request.getRepositorySession());
setLocalRepository(request.getLocalRepository());
setBuildStartTime(request.getBuildStartTime());
setProject(request.getProject());
setResolveDependencies(request.isResolveDependencies());
setValidationLevel(request.getValidationLevel());
setResolveVersionRanges(request.isResolveVersionRanges());
setRepositoryMerging(request.getRepositoryMerging());
}
@Override
public MavenProject getProject() {
return project;
}
@Override
public void setProject(MavenProject mavenProject) {
this.project = mavenProject;
}
@Override
public ProjectBuildingRequest setLocalRepository(ArtifactRepository localRepository) {
this.localRepository = localRepository;
return this;
}
@Override
public ArtifactRepository getLocalRepository() {
return localRepository;
}
@Override
public List<ArtifactRepository> getRemoteRepositories() {
return remoteRepositories;
}
@Override
public ProjectBuildingRequest setRemoteRepositories(List<ArtifactRepository> remoteRepositories) {
if (remoteRepositories != null) {
this.remoteRepositories = new ArrayList<>(remoteRepositories);
} else {
this.remoteRepositories.clear();
}
return this;
}
@Override
public List<ArtifactRepository> getPluginArtifactRepositories() {
return pluginArtifactRepositories;
}
@Override
public ProjectBuildingRequest setPluginArtifactRepositories(List<ArtifactRepository> pluginArtifactRepositories) {
if (pluginArtifactRepositories != null) {
this.pluginArtifactRepositories = new ArrayList<>(pluginArtifactRepositories);
} else {
this.pluginArtifactRepositories.clear();
}
return this;
}
@Override
public Properties getSystemProperties() {
return systemProperties;
}
@Override
public ProjectBuildingRequest setSystemProperties(Properties systemProperties) {
if (systemProperties != null) {
this.systemProperties = SystemProperties.copyProperties(systemProperties);
} else {
this.systemProperties.clear();
}
return this;
}
@Override
public Properties getUserProperties() {
return userProperties;
}
@Override
public ProjectBuildingRequest setUserProperties(Properties userProperties) {
if (userProperties != null) {
this.userProperties = new Properties();
this.userProperties.putAll(userProperties);
} else {
this.userProperties.clear();
}
return this;
}
@Override
public boolean isProcessPlugins() {
return processPlugins;
}
@Override
public ProjectBuildingRequest setProcessPlugins(boolean processPlugins) {
this.processPlugins = processPlugins;
return this;
}
@Override
public ProjectBuildingRequest setResolveDependencies(boolean resolveDependencies) {
this.resolveDependencies = resolveDependencies;
return this;
}
@Override
public boolean isResolveDependencies() {
return resolveDependencies;
}
/**
* @since 3.2.2
* @deprecated This got added when implementing MNG-2199 and is no longer used.
* Commit 6cf9320942c34bc68205425ab696b1712ace9ba4 updated the way 'MavenProject' objects are initialized.
*/
@Deprecated
@Override
public ProjectBuildingRequest setResolveVersionRanges(boolean value) {
this.resolveVersionRanges = value;
return this;
}
/**
* @since 3.2.2
* @deprecated This got added when implementing MNG-2199 and is no longer used.
* Commit 6cf9320942c34bc68205425ab696b1712ace9ba4 updated the way 'MavenProject' objects are initialized.
*/
@Deprecated
@Override
public boolean isResolveVersionRanges() {
return this.resolveVersionRanges;
}
@Override
public ProjectBuildingRequest setValidationLevel(int validationLevel) {
this.validationLevel = validationLevel;
return this;
}
@Override
public int getValidationLevel() {
return validationLevel;
}
@Override
public List<String> getActiveProfileIds() {
return activeProfileIds;
}
@Override
public void setActiveProfileIds(List<String> activeProfileIds) {
if (activeProfileIds != null) {
this.activeProfileIds = new ArrayList<>(activeProfileIds);
} else {
this.activeProfileIds.clear();
}
}
@Override
public List<String> getInactiveProfileIds() {
return inactiveProfileIds;
}
@Override
public void setInactiveProfileIds(List<String> inactiveProfileIds) {
if (inactiveProfileIds != null) {
this.inactiveProfileIds = new ArrayList<>(inactiveProfileIds);
} else {
this.inactiveProfileIds.clear();
}
}
@Override
public void setProfiles(List<Profile> profiles) {
if (profiles != null) {
this.profiles = new ArrayList<>(profiles);
} else {
this.profiles.clear();
}
}
@Override
public void addProfile(Profile profile) {
profiles.add(profile);
}
@Override
public List<Profile> getProfiles() {
return profiles;
}
@Deprecated
@Override
public Date getBuildStartTime() {
return buildStartTime != null ? new Date(buildStartTime.toEpochMilli()) : null;
}
@Deprecated
@Override
public void setBuildStartTime(Date buildStartTime) {
setBuildStartInstant(buildStartTime != null ? Instant.ofEpochMilli(buildStartTime.getTime()) : null);
}
@Override
public Instant getBuildStartInstant() {
return this.buildStartTime;
}
@Override
public void setBuildStartInstant(Instant buildStartTime) {
this.buildStartTime = buildStartTime;
}
@Override
public RepositorySystemSession getRepositorySession() {
return repositorySession;
}
@Override
public DefaultProjectBuildingRequest setRepositorySession(RepositorySystemSession repositorySession) {
this.repositorySession = repositorySession;
return this;
}
@Override
public DefaultProjectBuildingRequest setRepositoryMerging(RepositoryMerging repositoryMerging) {
this.repositoryMerging = Objects.requireNonNull(repositoryMerging, "repositoryMerging cannot be null");
return this;
}
@Override
public RepositoryMerging getRepositoryMerging() {
return repositoryMerging;
}
}
| DefaultProjectBuildingRequest |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java | {
"start": 4233,
"end": 21519
} | class ____ its own object that can have an internal cache field
*/
private static FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY);
public static synchronized void setFieldPermissionsCache(FieldPermissionsCache cache) {
RoleDescriptor.fieldPermissionsCache = Objects.requireNonNull(cache);
}
public RoleDescriptor(
String name,
@Nullable String[] clusterPrivileges,
@Nullable IndicesPrivileges[] indicesPrivileges,
@Nullable String[] runAs
) {
this(name, clusterPrivileges, indicesPrivileges, runAs, null);
}
/**
* @deprecated Use {@link #RoleDescriptor(String, String[], IndicesPrivileges[], ApplicationResourcePrivileges[],
* ConfigurableClusterPrivilege[], String[], Map, Map, RemoteIndicesPrivileges[], RemoteClusterPermissions, Restriction, String)}
*/
@Deprecated
public RoleDescriptor(
String name,
@Nullable String[] clusterPrivileges,
@Nullable IndicesPrivileges[] indicesPrivileges,
@Nullable String[] runAs,
@Nullable Map<String, Object> metadata
) {
this(name, clusterPrivileges, indicesPrivileges, runAs, metadata, null);
}
/**
* @deprecated Use {@link #RoleDescriptor(String, String[], IndicesPrivileges[], ApplicationResourcePrivileges[],
* ConfigurableClusterPrivilege[], String[], Map, Map, RemoteIndicesPrivileges[], RemoteClusterPermissions, Restriction, String)}
*/
@Deprecated
public RoleDescriptor(
String name,
@Nullable String[] clusterPrivileges,
@Nullable IndicesPrivileges[] indicesPrivileges,
@Nullable String[] runAs,
@Nullable Map<String, Object> metadata,
@Nullable Map<String, Object> transientMetadata
) {
this(
name,
clusterPrivileges,
indicesPrivileges,
null,
null,
runAs,
metadata,
transientMetadata,
RemoteIndicesPrivileges.NONE,
RemoteClusterPermissions.NONE,
Restriction.NONE,
null
);
}
public RoleDescriptor(
String name,
@Nullable String[] clusterPrivileges,
@Nullable IndicesPrivileges[] indicesPrivileges,
@Nullable ApplicationResourcePrivileges[] applicationPrivileges,
@Nullable ConfigurableClusterPrivilege[] configurableClusterPrivileges,
@Nullable String[] runAs,
@Nullable Map<String, Object> metadata,
@Nullable Map<String, Object> transientMetadata
) {
this(
name,
clusterPrivileges,
indicesPrivileges,
applicationPrivileges,
configurableClusterPrivileges,
runAs,
metadata,
transientMetadata,
RemoteIndicesPrivileges.NONE,
RemoteClusterPermissions.NONE,
Restriction.NONE,
null
);
}
public RoleDescriptor(
String name,
@Nullable String[] clusterPrivileges,
@Nullable IndicesPrivileges[] indicesPrivileges,
@Nullable ApplicationResourcePrivileges[] applicationPrivileges,
@Nullable ConfigurableClusterPrivilege[] configurableClusterPrivileges,
@Nullable String[] runAs,
@Nullable Map<String, Object> metadata,
@Nullable Map<String, Object> transientMetadata,
@Nullable RemoteIndicesPrivileges[] remoteIndicesPrivileges,
@Nullable RemoteClusterPermissions remoteClusterPermissions,
@Nullable Restriction restriction,
@Nullable String description
) {
this.name = name;
this.clusterPrivileges = clusterPrivileges != null ? clusterPrivileges : Strings.EMPTY_ARRAY;
this.configurableClusterPrivileges = sortConfigurableClusterPrivileges(configurableClusterPrivileges);
this.indicesPrivileges = indicesPrivileges != null ? indicesPrivileges : IndicesPrivileges.NONE;
this.applicationPrivileges = applicationPrivileges != null ? applicationPrivileges : ApplicationResourcePrivileges.NONE;
this.runAs = runAs != null ? runAs : Strings.EMPTY_ARRAY;
this.metadata = metadata != null ? Collections.unmodifiableMap(metadata) : Collections.emptyMap();
this.transientMetadata = transientMetadata != null
? Collections.unmodifiableMap(transientMetadata)
: Collections.singletonMap("enabled", true);
this.remoteIndicesPrivileges = remoteIndicesPrivileges != null ? remoteIndicesPrivileges : RemoteIndicesPrivileges.NONE;
this.remoteClusterPermissions = remoteClusterPermissions != null && remoteClusterPermissions.hasAnyPrivileges()
? remoteClusterPermissions
: RemoteClusterPermissions.NONE;
this.restriction = restriction != null ? restriction : Restriction.NONE;
this.description = description != null ? description : "";
}
public RoleDescriptor(StreamInput in) throws IOException {
this.name = in.readString();
this.clusterPrivileges = in.readStringArray();
int size = in.readVInt();
this.indicesPrivileges = new IndicesPrivileges[size];
for (int i = 0; i < size; i++) {
indicesPrivileges[i] = new IndicesPrivileges(in);
}
this.runAs = in.readStringArray();
this.metadata = in.readGenericMap();
this.transientMetadata = in.readGenericMap();
this.applicationPrivileges = in.readArray(ApplicationResourcePrivileges::new, ApplicationResourcePrivileges[]::new);
this.configurableClusterPrivileges = ConfigurableClusterPrivileges.readArray(in);
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
this.remoteIndicesPrivileges = in.readArray(RemoteIndicesPrivileges::new, RemoteIndicesPrivileges[]::new);
} else {
this.remoteIndicesPrivileges = RemoteIndicesPrivileges.NONE;
}
if (in.getTransportVersion().onOrAfter(WORKFLOWS_RESTRICTION_VERSION)) {
this.restriction = new Restriction(in);
} else {
this.restriction = Restriction.NONE;
}
if (in.getTransportVersion().onOrAfter(ROLE_REMOTE_CLUSTER_PRIVS)) {
this.remoteClusterPermissions = new RemoteClusterPermissions(in);
} else {
this.remoteClusterPermissions = RemoteClusterPermissions.NONE;
}
if (in.getTransportVersion().onOrAfter(SECURITY_ROLE_DESCRIPTION)) {
this.description = in.readOptionalString();
} else {
this.description = "";
}
}
public String getName() {
return this.name;
}
public String getDescription() {
return description;
}
public String[] getClusterPrivileges() {
return this.clusterPrivileges;
}
public ConfigurableClusterPrivilege[] getConditionalClusterPrivileges() {
return this.configurableClusterPrivileges;
}
public IndicesPrivileges[] getIndicesPrivileges() {
return this.indicesPrivileges;
}
public RemoteIndicesPrivileges[] getRemoteIndicesPrivileges() {
return this.remoteIndicesPrivileges;
}
public boolean hasRemoteIndicesPrivileges() {
return remoteIndicesPrivileges.length != 0;
}
public boolean hasRemoteClusterPermissions() {
return remoteClusterPermissions.hasAnyPrivileges();
}
public RemoteClusterPermissions getRemoteClusterPermissions() {
return this.remoteClusterPermissions;
}
public ApplicationResourcePrivileges[] getApplicationPrivileges() {
return this.applicationPrivileges;
}
public boolean hasClusterPrivileges() {
return clusterPrivileges.length != 0;
}
public boolean hasApplicationPrivileges() {
return applicationPrivileges.length != 0;
}
public boolean hasConfigurableClusterPrivileges() {
return configurableClusterPrivileges.length != 0;
}
public boolean hasRunAs() {
return runAs.length != 0;
}
public boolean hasDescription() {
return description.length() != 0;
}
public boolean hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster() {
return hasConfigurableClusterPrivileges()
|| hasApplicationPrivileges()
|| hasRunAs()
|| hasRemoteIndicesPrivileges()
|| hasRemoteClusterPermissions()
|| hasWorkflowsRestriction()
|| (hasClusterPrivileges()
&& RemoteClusterPermissions.getSupportedRemoteClusterPermissions().containsAll(Arrays.asList(clusterPrivileges)) == false);
}
public String[] getRunAs() {
return this.runAs;
}
public Restriction getRestriction() {
return restriction;
}
public boolean hasRestriction() {
return restriction != null && false == restriction.isEmpty();
}
public boolean hasWorkflowsRestriction() {
return hasRestriction() && restriction.hasWorkflows();
}
public Map<String, Object> getMetadata() {
return metadata;
}
public Map<String, Object> getTransientMetadata() {
return transientMetadata;
}
public boolean isUsingDocumentOrFieldLevelSecurity() {
return Arrays.stream(indicesPrivileges).anyMatch(ip -> ip.isUsingDocumentLevelSecurity() || ip.isUsingFieldLevelSecurity());
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("Role[");
sb.append("name=").append(name);
sb.append(", cluster=[").append(Strings.arrayToCommaDelimitedString(clusterPrivileges));
sb.append("], global=[").append(Strings.arrayToCommaDelimitedString(configurableClusterPrivileges));
sb.append("], indicesPrivileges=[");
for (IndicesPrivileges group : indicesPrivileges) {
sb.append(group.toString()).append(",");
}
sb.append("], applicationPrivileges=[");
for (ApplicationResourcePrivileges privilege : applicationPrivileges) {
sb.append(privilege.toString()).append(",");
}
sb.append("], runAs=[").append(Strings.arrayToCommaDelimitedString(runAs));
sb.append("], metadata=[");
sb.append(metadata);
sb.append("]");
sb.append(", remoteIndicesPrivileges=[");
for (RemoteIndicesPrivileges group : remoteIndicesPrivileges) {
sb.append(group.toString()).append(",");
}
sb.append("], remoteClusterPrivileges=[");
for (RemoteClusterPermissionGroup group : remoteClusterPermissions.groups()) {
sb.append(group.toString()).append(",");
}
sb.append("], restriction=").append(restriction);
sb.append(", description=").append(description);
sb.append("]");
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RoleDescriptor that = (RoleDescriptor) o;
if (name.equals(that.name) == false) return false;
if (Arrays.equals(clusterPrivileges, that.clusterPrivileges) == false) return false;
if (Arrays.equals(configurableClusterPrivileges, that.configurableClusterPrivileges) == false) return false;
if (Arrays.equals(indicesPrivileges, that.indicesPrivileges) == false) return false;
if (Arrays.equals(applicationPrivileges, that.applicationPrivileges) == false) return false;
if (metadata.equals(that.getMetadata()) == false) return false;
if (Arrays.equals(runAs, that.runAs) == false) return false;
if (Arrays.equals(remoteIndicesPrivileges, that.remoteIndicesPrivileges) == false) return false;
if (remoteClusterPermissions.equals(that.remoteClusterPermissions) == false) return false;
if (restriction.equals(that.restriction) == false) return false;
return Objects.equals(description, that.description);
}
@Override
public int hashCode() {
int result = name.hashCode();
result = 31 * result + Arrays.hashCode(clusterPrivileges);
result = 31 * result + Arrays.hashCode(configurableClusterPrivileges);
result = 31 * result + Arrays.hashCode(indicesPrivileges);
result = 31 * result + Arrays.hashCode(applicationPrivileges);
result = 31 * result + Arrays.hashCode(runAs);
result = 31 * result + metadata.hashCode();
result = 31 * result + Arrays.hashCode(remoteIndicesPrivileges);
result = 31 * result + remoteClusterPermissions.hashCode();
result = 31 * result + restriction.hashCode();
result = 31 * result + Objects.hashCode(description);
return result;
}
public boolean isEmpty() {
return clusterPrivileges.length == 0
&& configurableClusterPrivileges.length == 0
&& indicesPrivileges.length == 0
&& applicationPrivileges.length == 0
&& runAs.length == 0
&& metadata.size() == 0
&& remoteIndicesPrivileges.length == 0
&& remoteClusterPermissions.groups().isEmpty()
&& restriction.isEmpty();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return toXContent(builder, params, false);
}
public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean docCreation) throws IOException {
builder.startObject();
innerToXContent(builder, params, docCreation);
return builder.endObject();
}
/**
* Generates x-content for this {@link RoleDescriptor} instance.
*
* @param builder the x-content builder
* @param params the parameters for x-content generation directives
* @param docCreation {@code true} if the x-content is being generated for creating a document
* in the security index, {@code false} if the x-content being generated
* is for API display purposes
* @return x-content builder
* @throws IOException if there was an error writing the x-content to the builder
*/
public XContentBuilder innerToXContent(XContentBuilder builder, Params params, boolean docCreation) throws IOException {
builder.array(Fields.CLUSTER.getPreferredName(), clusterPrivileges);
if (configurableClusterPrivileges.length != 0) {
builder.field(Fields.GLOBAL.getPreferredName());
ConfigurableClusterPrivileges.toXContent(builder, params, Arrays.asList(configurableClusterPrivileges));
}
builder.xContentList(Fields.INDICES.getPreferredName(), indicesPrivileges);
builder.xContentList(Fields.APPLICATIONS.getPreferredName(), applicationPrivileges);
if (runAs != null) {
builder.array(Fields.RUN_AS.getPreferredName(), runAs);
}
builder.field(Fields.METADATA.getPreferredName(), metadata);
if (docCreation) {
builder.field(Fields.TYPE.getPreferredName(), ROLE_TYPE);
} else {
builder.field(Fields.TRANSIENT_METADATA.getPreferredName(), transientMetadata);
}
if (hasRemoteIndicesPrivileges()) {
builder.xContentList(Fields.REMOTE_INDICES.getPreferredName(), remoteIndicesPrivileges);
}
if (hasRemoteClusterPermissions()) {
builder.array(Fields.REMOTE_CLUSTER.getPreferredName(), remoteClusterPermissions);
}
if (hasRestriction()) {
builder.field(Fields.RESTRICTION.getPreferredName(), restriction);
}
if (hasDescription()) {
builder.field(Fields.DESCRIPTION.getPreferredName(), description);
}
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeStringArray(clusterPrivileges);
out.writeVInt(indicesPrivileges.length);
for (IndicesPrivileges group : indicesPrivileges) {
group.writeTo(out);
}
out.writeStringArray(runAs);
out.writeGenericMap(metadata);
out.writeGenericMap(transientMetadata);
out.writeArray(ApplicationResourcePrivileges::write, applicationPrivileges);
ConfigurableClusterPrivileges.writeArray(out, getConditionalClusterPrivileges());
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
out.writeArray(remoteIndicesPrivileges);
}
if (out.getTransportVersion().onOrAfter(WORKFLOWS_RESTRICTION_VERSION)) {
restriction.writeTo(out);
}
if (out.getTransportVersion().onOrAfter(ROLE_REMOTE_CLUSTER_PRIVS)) {
remoteClusterPermissions.writeTo(out);
}
if (out.getTransportVersion().onOrAfter(SECURITY_ROLE_DESCRIPTION)) {
out.writeOptionalString(description);
}
}
public static Parser.Builder parserBuilder() {
return new Parser.Builder();
}
public record Parser(boolean allow2xFormat, boolean allowRestriction, boolean allowDescription) {
public static final | to |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/lock/OptimisticForceIncrementLockingStrategy.java | {
"start": 681,
"end": 1957
} | class ____ implements LockingStrategy {
private final EntityPersister lockable;
private final LockMode lockMode;
/**
* Construct locking strategy.
*
* @param lockable The metadata for the entity to be locked.
* @param lockMode Indicates the type of lock to be acquired.
*/
public OptimisticForceIncrementLockingStrategy(EntityPersister lockable, LockMode lockMode) {
this.lockable = lockable;
this.lockMode = lockMode;
if ( lockMode.lessThan( LockMode.OPTIMISTIC_FORCE_INCREMENT ) ) {
throw new HibernateException( "[" + lockMode + "] not valid for [" + lockable.getEntityName() + "]" );
}
}
@Override
public void lock(Object id, Object version, Object object, int timeout, EventSource session) {
if ( !lockable.isVersioned() ) {
throw new HibernateException( "[" + lockMode + "] not supported for non-versioned entities [" + lockable.getEntityName() + "]" );
}
// final EntityEntry entry = session.getPersistenceContextInternal().getEntry( object );
// Register the EntityIncrementVersionProcess action to run just prior to transaction commit.
session.getActionQueue().registerCallback( new EntityIncrementVersionProcess( object ) );
}
protected LockMode getLockMode() {
return lockMode;
}
}
| OptimisticForceIncrementLockingStrategy |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/CsrfConfigurerTests.java | {
"start": 34687,
"end": 35044
} | class ____ {
static RequestMatcher MATCHER;
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.csrf((csrf) -> csrf.requireCsrfProtectionMatcher(MATCHER));
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static | RequireCsrfProtectionMatcherInLambdaConfig |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/read/ValueSourceReaderTypeConversionTests.java | {
"start": 86166,
"end": 86550
} | class ____ extends BlockToStringConverter {
TestLongBlockToStringConverter(DriverContext driverContext) {
super(driverContext);
}
@Override
BytesRef evalValue(Block container, int index) {
return new BytesRef(Long.toString(((LongBlock) container).getLong(index)));
}
}
private static | TestLongBlockToStringConverter |
java | apache__camel | components/camel-kafka/src/test/java/org/apache/camel/component/kafka/serde/DefaultKafkaHeaderSerializerTest.java | {
"start": 1191,
"end": 2407
} | class ____ {
private final DefaultKafkaHeaderSerializer serializer = new DefaultKafkaHeaderSerializer();
@ParameterizedTest
@MethodSource("primeNumbers")
public void serialize(Object value, byte[] expectedResult) {
serializer.setCamelContext(new DefaultCamelContext());
byte[] result = serializer.serialize("someKey", value);
assertArrayEquals(expectedResult, result);
}
public static Collection<Object[]> primeNumbers() {
return Arrays.asList(new Object[][] {
{ Boolean.TRUE, "true".getBytes() }, // boolean
{ -12, new byte[] { -1, -1, -1, -12 } }, // integer
{ 19L, new byte[] { 0, 0, 0, 0, 0, 0, 0, 19 } }, // long
{ 22.0D, new byte[] { 64, 54, 0, 0, 0, 0, 0, 0 } }, // double
{ "someValue", "someValue".getBytes() }, // string
{ new byte[] { 0, 2, -43 }, new byte[] { 0, 2, -43 } }, // byte[]
{ new TextNode("foo"), "foo".getBytes() }, // jackson TextNode
{ null, null }, // null
{ new Object(), null } // unknown
// type
});
}
}
| DefaultKafkaHeaderSerializerTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java | {
"start": 3853,
"end": 44264
} | class ____ extends ServiceTestUtils {
private static final Logger LOG =
LoggerFactory.getLogger(TestYarnNativeServices.class);
@BeforeEach
public void setup() throws Exception {
File tmpYarnDir = new File("target", "tmp");
FileUtils.deleteQuietly(tmpYarnDir);
}
@AfterEach
public void tearDown() throws IOException {
shutdown();
}
// End-to-end test to use ServiceClient to deploy a service.
// 1. Create a service with 2 components, each of which has 2 containers
// 2. Flex up each component to 3 containers and check the component instance names
// 3. Flex down each component to 1 container and check the component instance names
// 4. Flex up each component to 2 containers and check the component instance names
// 5. Stop the service
// 6. Destroy the service
@Test
@Timeout(value = 200)
public void testCreateFlexStopDestroyService() throws Exception {
setupInternal(NUM_NMS);
ServiceClient client = createClient(getConf());
Service exampleApp = createExampleApplication();
client.actionCreate(exampleApp);
SliderFileSystem fileSystem = new SliderFileSystem(getConf());
Path appDir = fileSystem.buildClusterDirPath(exampleApp.getName());
// check app.json is persisted.
assertTrue(
getFS().exists(new Path(appDir, exampleApp.getName() + ".json")));
waitForServiceToBeStable(client, exampleApp);
// Flex two components, each from 2 container to 3 containers.
flexComponents(client, exampleApp, 3L);
// wait for flex to be completed, increase from 2 to 3 containers.
waitForServiceToBeStable(client, exampleApp);
// check all instances name for each component are in sequential order.
checkCompInstancesInOrder(client, exampleApp);
// flex down to 1
flexComponents(client, exampleApp, 1L);
waitForServiceToBeStable(client, exampleApp);
checkCompInstancesInOrder(client, exampleApp);
// check component dir and registry are cleaned up.
// flex up again to 2
flexComponents(client, exampleApp, 2L);
waitForServiceToBeStable(client, exampleApp);
checkCompInstancesInOrder(client, exampleApp);
// stop the service
LOG.info("Stop the service");
client.actionStop(exampleApp.getName(), true);
ApplicationReport report = client.getYarnClient()
.getApplicationReport(ApplicationId.fromString(exampleApp.getId()));
// AM unregisters with RM successfully
assertEquals(FINISHED, report.getYarnApplicationState());
assertEquals(FinalApplicationStatus.ENDED,
report.getFinalApplicationStatus());
String serviceZKPath = RegistryUtils.servicePath(RegistryUtils
.currentUser(), YarnServiceConstants.APP_TYPE, exampleApp.getName());
assertFalse(getCuratorService().zkPathExists(serviceZKPath),
"Registry ZK service path still exists after stop");
LOG.info("Destroy the service");
// destroy the service and check the app dir is deleted from fs.
assertEquals(0, client.actionDestroy(exampleApp.getName()));
// check the service dir on hdfs (in this case, local fs) are deleted.
assertFalse(getFS().exists(appDir));
// check that destroying again does not succeed
assertEquals(EXIT_NOT_FOUND, client.actionDestroy(exampleApp.getName()));
}
// Save a service without starting it and ensure that stop does not NPE and
// that service can be successfully destroyed
@Test
@Timeout(value = 200)
public void testStopDestroySavedService() throws Exception {
setupInternal(NUM_NMS);
ServiceClient client = createClient(getConf());
Service exampleApp = createExampleApplication();
client.actionBuild(exampleApp);
assertEquals(EXIT_COMMAND_ARGUMENT_ERROR, client.actionStop(
exampleApp.getName()));
assertEquals(0, client.actionDestroy(exampleApp.getName()));
}
// Create compa with 2 containers
// Create compb with 2 containers which depends on compa
// Create compc with 2 containers which depends on compb
// Check containers for compa started before containers for compb before
// containers for compc
@Test
@Timeout(value = 200)
public void testComponentStartOrder() throws Exception {
setupInternal(NUM_NMS);
ServiceClient client = createClient(getConf());
Service exampleApp = new Service();
exampleApp.setName("teststartorder");
exampleApp.setVersion("v1");
exampleApp.addComponent(createComponent("compa", 2, "sleep 1000"));
// Let compb depend on compa
Component compb = createComponent("compb", 2, "sleep 1000");
compb.setDependencies(Collections.singletonList("compa"));
exampleApp.addComponent(compb);
// Let compc depend on compb
Component compc = createComponent("compc", 2, "sleep 1000");
compc.setDependencies(Collections.singletonList("compb"));
exampleApp.addComponent(compc);
client.actionCreate(exampleApp);
waitForServiceToBeStable(client, exampleApp);
// check that containers for compa are launched before containers for compb
checkContainerLaunchDependencies(client, exampleApp, "compa", "compb",
"compc");
client.actionStop(exampleApp.getName(), true);
client.actionDestroy(exampleApp.getName());
}
@Test
@Timeout(value = 200)
public void testCreateServiceSameNameDifferentUser() throws Exception {
String sameAppName = "same-name";
String userA = "usera";
String userB = "userb";
setupInternal(NUM_NMS);
ServiceClient client = createClient(getConf());
String origBasePath = getConf().get(YARN_SERVICE_BASE_PATH);
Service userAApp = new Service();
userAApp.setName(sameAppName);
userAApp.setVersion("v1");
userAApp.addComponent(createComponent("comp", 1, "sleep 1000"));
Service userBApp = new Service();
userBApp.setName(sameAppName);
userBApp.setVersion("v1");
userBApp.addComponent(createComponent("comp", 1, "sleep 1000"));
File userABasePath = null, userBBasePath = null;
try {
userABasePath = new File(origBasePath, userA);
userABasePath.mkdirs();
getConf().set(YARN_SERVICE_BASE_PATH, userABasePath.getAbsolutePath());
client.actionCreate(userAApp);
waitForServiceToBeStarted(client, userAApp);
userBBasePath = new File(origBasePath, userB);
userBBasePath.mkdirs();
getConf().set(YARN_SERVICE_BASE_PATH, userBBasePath.getAbsolutePath());
client.actionBuild(userBApp);
} catch (Exception e) {
fail("Exception should not be thrown - " + e.getLocalizedMessage());
} finally {
if (userABasePath != null) {
getConf().set(YARN_SERVICE_BASE_PATH, userABasePath.getAbsolutePath());
client.actionStop(sameAppName, true);
client.actionDestroy(sameAppName);
}
if (userBBasePath != null) {
getConf().set(YARN_SERVICE_BASE_PATH, userBBasePath.getAbsolutePath());
client.actionDestroy(sameAppName);
}
}
// Need to extend this test to validate that different users can create
// apps of exact same name. So far only create followed by build is tested.
// Need to test create followed by create.
}
@Test
@Timeout(value = 200)
public void testCreateServiceSameNameSameUser() throws Exception {
String sameAppName = "same-name";
String user = UserGroupInformation.getCurrentUser().getUserName();
System.setProperty("user.name", user);
setupInternal(NUM_NMS);
ServiceClient client = createClient(getConf());
Service appA = new Service();
appA.setName(sameAppName);
appA.setVersion("v1");
appA.addComponent(createComponent("comp", 1, "sleep 1000"));
Service appB = new Service();
appB.setName(sameAppName);
appB.setVersion("v1");
appB.addComponent(createComponent("comp", 1, "sleep 1000"));
try {
client.actionBuild(appA);
client.actionBuild(appB);
} catch (Exception e) {
String expectedMsg = "Service Instance dir already exists:";
if (e.getLocalizedMessage() != null) {
assertThat(e.getLocalizedMessage()).contains(expectedMsg);
} else {
fail("Message cannot be null. It has to say - " + expectedMsg);
}
} finally {
// cleanup
client.actionDestroy(sameAppName);
}
try {
client.actionCreate(appA);
waitForServiceToBeStarted(client, appA);
client.actionCreate(appB);
waitForServiceToBeStarted(client, appB);
} catch (Exception e) {
String expectedMsg = "Failed to create service " + sameAppName
+ ", because it already exists.";
if (e.getLocalizedMessage() != null) {
assertThat(e.getLocalizedMessage()).contains(expectedMsg);
} else {
fail("Message cannot be null. It has to say - " + expectedMsg);
}
} finally {
// cleanup
client.actionStop(sameAppName, true);
client.actionDestroy(sameAppName);
}
}
// Test to verify recovery of SeviceMaster after RM is restarted.
// 1. Create an example service.
// 2. Restart RM.
// 3. Fail the application attempt.
// 4. Verify ServiceMaster recovers.
@Test
@Timeout(value = 200)
public void testRecoverComponentsAfterRMRestart() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
conf.setBoolean(
YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
conf.setLong(YarnConfiguration.NM_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,
500L);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true);
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,
YarnConfiguration.DEFAULT_RM_MAX_COMPLETED_APPLICATIONS);
setConf(conf);
setupInternal(NUM_NMS);
ServiceClient client = createClient(getConf());
Service exampleApp = createExampleApplication();
client.actionCreate(exampleApp);
Multimap<String, String> containersBeforeFailure =
waitForAllCompToBeReady(client, exampleApp);
LOG.info("Restart the resource manager");
getYarnCluster().restartResourceManager(
getYarnCluster().getActiveRMIndex());
GenericTestUtils.waitFor(() ->
getYarnCluster().getResourceManager().getServiceState() ==
org.apache.hadoop.service.Service.STATE.STARTED, 2000, 200000);
assertTrue(getYarnCluster().waitForNodeManagersToConnect(5000),
"node managers connected");
ApplicationId exampleAppId = ApplicationId.fromString(exampleApp.getId());
ApplicationAttemptId applicationAttemptId = client.getYarnClient()
.getApplicationReport(exampleAppId).getCurrentApplicationAttemptId();
LOG.info("Fail the application attempt {}", applicationAttemptId);
client.getYarnClient().failApplicationAttempt(applicationAttemptId);
//wait until attempt 2 is running
GenericTestUtils.waitFor(() -> {
try {
ApplicationReport ar = client.getYarnClient()
.getApplicationReport(exampleAppId);
return ar.getCurrentApplicationAttemptId().getAttemptId() == 2 &&
ar.getYarnApplicationState() == YarnApplicationState.RUNNING;
} catch (YarnException | IOException e) {
throw new RuntimeException("while waiting", e);
}
}, 2000, 200000);
Multimap<String, String> containersAfterFailure = waitForAllCompToBeReady(
client, exampleApp);
containersBeforeFailure.keys().forEach(compName -> {
assertEquals(containersBeforeFailure.get(compName).size(),
containersAfterFailure.get(compName) == null ? 0 :
containersAfterFailure.get(compName).size(),
"num containers after by restart for " + compName);
});
LOG.info("Stop/destroy service {}", exampleApp);
client.actionStop(exampleApp.getName(), true);
client.actionDestroy(exampleApp.getName());
}
@Test
@Timeout(value = 200)
public void testUpgrade() throws Exception {
setupInternal(NUM_NMS);
getConf().setBoolean(YARN_SERVICE_UPGRADE_ENABLED, true);
ServiceClient client = createClient(getConf());
Service service = createExampleApplication();
client.actionCreate(service);
waitForServiceToBeStable(client, service);
// upgrade the service
Component component = service.getComponents().iterator().next();
service.setState(ServiceState.UPGRADING);
service.setVersion("v2");
component.getConfiguration().getEnv().put("key1", "val1");
client.initiateUpgrade(service);
// wait for service to be in upgrade state
waitForServiceToBeInState(client, service, ServiceState.UPGRADING);
SliderFileSystem fs = new SliderFileSystem(getConf());
Service fromFs = ServiceApiUtil.loadServiceUpgrade(fs,
service.getName(), service.getVersion());
assertEquals(service.getName(), fromFs.getName());
assertEquals(service.getVersion(), fromFs.getVersion());
// upgrade containers
Service liveService = client.getStatus(service.getName());
client.actionUpgrade(service,
liveService.getComponent(component.getName()).getContainers());
waitForAllCompToBeReady(client, service);
// finalize the upgrade
client.actionStart(service.getName());
waitForServiceToBeStable(client, service);
Service active = client.getStatus(service.getName());
assertEquals(ComponentState.STABLE,
active.getComponent(component.getName()).getState(), "component not stable");
assertEquals("val1", active.getComponent(component.getName()).getConfiguration()
.getEnv("key1"), "comp does not have new env");
LOG.info("Stop/destroy service {}", service);
client.actionStop(service.getName(), true);
client.actionDestroy(service.getName());
}
@Test
@Timeout(value = 200)
public void testExpressUpgrade() throws Exception {
setupInternal(NUM_NMS);
getConf().setBoolean(YARN_SERVICE_UPGRADE_ENABLED, true);
ServiceClient client = createClient(getConf());
Service service = createExampleApplication();
client.actionCreate(service);
waitForServiceToBeStable(client, service);
// upgrade the service
Component component = service.getComponents().iterator().next();
service.setState(ServiceState.EXPRESS_UPGRADING);
service.setVersion("v2");
component.getConfiguration().getEnv().put("key1", "val1");
Component component2 = service.getComponent("compb");
component2.getConfiguration().getEnv().put("key2", "val2");
client.actionUpgradeExpress(service);
waitForServiceToBeExpressUpgrading(client, service);
// wait for upgrade to complete
waitForServiceToBeStable(client, service);
Service active = client.getStatus(service.getName());
assertEquals(service.getVersion(),
active.getVersion(), "version mismatch");
assertEquals(ComponentState.STABLE,
active.getComponent(component.getName()).getState(), "component not stable");
assertEquals("val1", active.getComponent(component.getName()).getConfiguration()
.getEnv("key1"), "compa does not have new env");
assertEquals("val2", active.getComponent(component2.getName()).getConfiguration()
.getEnv("key2"), "compb does not have new env");
LOG.info("Stop/destroy service {}", service);
client.actionStop(service.getName(), true);
client.actionDestroy(service.getName());
}
@Test
@Timeout(value = 200)
public void testCancelUpgrade() throws Exception {
setupInternal(NUM_NMS);
getConf().setBoolean(YARN_SERVICE_UPGRADE_ENABLED, true);
ServiceClient client = createClient(getConf());
Service service = createExampleApplication();
Component component = service.getComponents().iterator().next();
component.getConfiguration().getEnv().put("key1", "val0");
client.actionCreate(service);
waitForServiceToBeStable(client, service);
// upgrade the service
service.setState(ServiceState.UPGRADING);
service.setVersion("v2");
component.getConfiguration().getEnv().put("key1", "val1");
client.initiateUpgrade(service);
// wait for service to be in upgrade state
waitForServiceToBeInState(client, service, ServiceState.UPGRADING);
// upgrade 1 container
Service liveService = client.getStatus(service.getName());
Container container = liveService.getComponent(component.getName())
.getContainers().iterator().next();
client.actionUpgrade(service, Lists.newArrayList(container));
Thread.sleep(500);
// cancel the upgrade
client.actionCancelUpgrade(service.getName());
waitForServiceToBeStable(client, service);
Service active = client.getStatus(service.getName());
assertEquals(ComponentState.STABLE,
active.getComponent(component.getName()).getState(), "component not stable");
assertEquals("val0",
active.getComponent(component.getName()).getConfiguration()
.getEnv("key1"), "comp does not have new env");
LOG.info("Stop/destroy service {}", service);
client.actionStop(service.getName(), true);
client.actionDestroy(service.getName());
}
// Test to verify ANTI_AFFINITY placement policy
// 1. Start mini cluster
// with 3 NMs and scheduler placement-constraint handler
// 2. Create an example service with 3 containers
// 3. Verify no more than 1 container comes up in each of the 3 NMs
// 4. Flex the component to 4 containers
// 5. Verify that the 4th container does not even get allocated since there
// are only 3 NMs
@Test
@Timeout(value = 200)
public void testCreateServiceWithPlacementPolicy() throws Exception {
// We need to enable scheduler placement-constraint at the cluster level to
// let apps use placement policies.
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,
YarnConfiguration.DEFAULT_RM_MAX_COMPLETED_APPLICATIONS);
setConf(conf);
setupInternal(3);
ServiceClient client = createClient(getConf());
Service exampleApp = new Service();
exampleApp.setName("example-app");
exampleApp.setVersion("v1");
Component comp = createComponent("compa", 3L, "sleep 1000");
PlacementPolicy pp = new PlacementPolicy();
PlacementConstraint pc = new PlacementConstraint();
pc.setName("CA1");
pc.setTargetTags(Collections.singletonList("compa"));
pc.setScope(PlacementScope.NODE);
pc.setType(PlacementType.ANTI_AFFINITY);
pp.setConstraints(Collections.singletonList(pc));
comp.setPlacementPolicy(pp);
exampleApp.addComponent(comp);
client.actionCreate(exampleApp);
waitForServiceToBeStable(client, exampleApp);
// Check service is stable and all 3 containers are running
Service service = client.getStatus(exampleApp.getName());
Component component = service.getComponent("compa");
assertEquals(ServiceState.STABLE,
service.getState(), "Service state should be STABLE");
assertEquals(3,
component.getContainers().size(), "3 containers are expected to be running");
// Prepare a map of non-AM containers for later lookup
Set<String> nonAMContainerIdSet = new HashSet<>();
for (Container cont : component.getContainers()) {
nonAMContainerIdSet.add(cont.getId());
}
// Verify that no more than 1 non-AM container came up on each of the 3 NMs
Set<String> hosts = new HashSet<>();
ApplicationReport report = client.getYarnClient()
.getApplicationReport(ApplicationId.fromString(exampleApp.getId()));
GetContainersRequest req = GetContainersRequest
.newInstance(report.getCurrentApplicationAttemptId());
ResourceManager rm = getYarnCluster().getResourceManager();
for (ContainerReport contReport : rm.getClientRMService().getContainers(req)
.getContainerList()) {
if (!nonAMContainerIdSet
.contains(contReport.getContainerId().toString())) {
continue;
}
if (hosts.contains(contReport.getNodeHttpAddress())) {
fail("Container " + contReport.getContainerId()
+ " came up in the same host as another container.");
} else {
hosts.add(contReport.getNodeHttpAddress());
}
}
// Flex compa up to 5, which is more containers than the no of NMs
Map<String, Long> compCounts = new HashMap<>();
compCounts.put("compa", 5L);
exampleApp.getComponent("compa").setNumberOfContainers(5L);
client.flexByRestService(exampleApp.getName(), compCounts);
try {
// 10 secs is enough for the container to be started. The down side of
// this test is that it has to wait that long. Setting a higher wait time
// will add to the total time taken by tests to run.
waitForServiceToBeStable(client, exampleApp, 10000);
fail("Service should not be in a stable state. It should throw "
+ "a timeout exception.");
} catch (Exception e) {
// Check that service state is not STABLE and only 3 containers are
// running and the fourth one should not get allocated.
service = client.getStatus(exampleApp.getName());
component = service.getComponent("compa");
assertNotEquals(ServiceState.STABLE, service.getState(),
"Service state should not be STABLE");
assertEquals(ComponentState.FLEXING, component.getState(),
"Component state should be FLEXING");
assertEquals(3, component.getContainers().size(),
"3 containers are expected to be running");
}
// Flex compa down to 4 now, which is still more containers than the no of
// NMs. This tests the usecase that flex down does not kill any of the
// currently running containers since the required number of containers are
// still higher than the currently running number of containers. However,
// component state will still be FLEXING and service state not STABLE.
compCounts = new HashMap<>();
compCounts.put("compa", 4L);
exampleApp.getComponent("compa").setNumberOfContainers(4L);
client.flexByRestService(exampleApp.getName(), compCounts);
try {
// 10 secs is enough for the container to be started. The down side of
// this test is that it has to wait that long. Setting a higher wait time
// will add to the total time taken by tests to run.
waitForServiceToBeStable(client, exampleApp, 10000);
fail("Service should not be in a stable state. It should throw "
+ "a timeout exception.");
} catch (Exception e) {
// Check that service state is not STABLE and only 3 containers are
// running and the fourth one should not get allocated.
service = client.getStatus(exampleApp.getName());
component = service.getComponent("compa");
assertNotEquals(ServiceState.STABLE, service.getState(),
"Service state should not be STABLE");
assertEquals(ComponentState.FLEXING, component.getState(),
"Component state should be FLEXING");
assertEquals(3, component.getContainers().size(),
"3 containers are expected to be running");
}
// Finally flex compa down to 3, which is exactly the number of containers
// currently running. This will bring the component and service states to
// STABLE.
compCounts = new HashMap<>();
compCounts.put("compa", 3L);
exampleApp.getComponent("compa").setNumberOfContainers(3L);
client.flexByRestService(exampleApp.getName(), compCounts);
waitForServiceToBeStable(client, exampleApp);
LOG.info("Stop/destroy service {}", exampleApp);
client.actionStop(exampleApp.getName(), true);
client.actionDestroy(exampleApp.getName());
}
@Test
@Timeout(value = 200)
public void testAMSigtermDoesNotKillApplication() throws Exception {
runAMSignalTest(SignalContainerCommand.GRACEFUL_SHUTDOWN);
}
@Test
@Timeout(value = 200)
public void testAMSigkillDoesNotKillApplication() throws Exception {
runAMSignalTest(SignalContainerCommand.FORCEFUL_SHUTDOWN);
}
public void runAMSignalTest(SignalContainerCommand signal) throws Exception {
setupInternal(NUM_NMS);
ServiceClient client = createClient(getConf());
Service exampleApp = createExampleApplication();
client.actionCreate(exampleApp);
waitForServiceToBeStable(client, exampleApp);
Service appStatus1 = client.getStatus(exampleApp.getName());
ApplicationId exampleAppId = ApplicationId.fromString(appStatus1.getId());
YarnClient yarnClient = createYarnClient(getConf());
ApplicationReport applicationReport = yarnClient.getApplicationReport(
exampleAppId);
ApplicationAttemptId firstAttemptId = applicationReport
.getCurrentApplicationAttemptId();
ApplicationAttemptReport attemptReport = yarnClient
.getApplicationAttemptReport(firstAttemptId);
// the AM should not perform a graceful shutdown since the operation was not
// initiated through the service client
yarnClient.signalToContainer(attemptReport.getAMContainerId(), signal);
GenericTestUtils.waitFor(() -> {
try {
ApplicationReport ar = client.getYarnClient()
.getApplicationReport(exampleAppId);
YarnApplicationState state = ar.getYarnApplicationState();
assertTrue(state == YarnApplicationState.RUNNING ||
state == YarnApplicationState.ACCEPTED);
if (state != YarnApplicationState.RUNNING) {
return false;
}
if (ar.getCurrentApplicationAttemptId() == null ||
ar.getCurrentApplicationAttemptId().equals(firstAttemptId)) {
return false;
}
Service appStatus2 = client.getStatus(exampleApp.getName());
if (appStatus2.getState() != ServiceState.STABLE) {
return false;
}
assertEquals(getSortedContainerIds(appStatus1).toString(),
getSortedContainerIds(appStatus2).toString());
return true;
} catch (YarnException | IOException e) {
throw new RuntimeException("while waiting", e);
}
}, 2000, 200000);
}
private static List<String> getSortedContainerIds(Service s) {
List<String> containerIds = new ArrayList<>();
for (Component component : s.getComponents()) {
for (Container container : component.getContainers()) {
containerIds.add(container.getId());
}
}
Collections.sort(containerIds);
return containerIds;
}
// Test to verify component health threshold monitor. It uses anti-affinity
// placement policy to make it easier to simulate container failure by
// allocating more containers than the no of NMs.
// 1. Start mini cluster with 3 NMs and scheduler placement-constraint handler
// 2. Create an example service of 3 containers with anti-affinity placement
// policy and health threshold = 65%, window = 3 secs, init-delay = 0 secs,
// poll-frequency = 1 secs
// 3. Flex the component to 4 containers. This makes health = 75%, so based on
// threshold the service will continue to run beyond the window of 3 secs.
// 4. Flex the component to 5 containers. This makes health = 60%, so based on
// threshold the service will be stopped after the window of 3 secs.
@Test
@Timeout(value = 200)
public void testComponentHealthThresholdMonitor() throws Exception {
// We need to enable scheduler placement-constraint at the cluster level to
// let apps use placement policies.
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,
YarnConfiguration.DEFAULT_RM_MAX_COMPLETED_APPLICATIONS);
conf.setInt(YarnConfiguration.NM_VCORES, 1);
setConf(conf);
setupInternal(3);
ServiceClient client = createClient(getConf());
Service exampleApp = new Service();
exampleApp.setName("example-app");
exampleApp.setVersion("v1");
Component comp = createComponent("compa", 3L, "sleep 1000");
PlacementPolicy pp = new PlacementPolicy();
PlacementConstraint pc = new PlacementConstraint();
pc.setName("CA1");
pc.setTargetTags(Collections.singletonList("compa"));
pc.setScope(PlacementScope.NODE);
pc.setType(PlacementType.ANTI_AFFINITY);
pp.setConstraints(Collections.singletonList(pc));
comp.setPlacementPolicy(pp);
Configuration config = new Configuration();
config.setProperty(CONTAINER_HEALTH_THRESHOLD_PERCENT, "65");
config.setProperty(CONTAINER_HEALTH_THRESHOLD_WINDOW_SEC, "3");
config.setProperty(CONTAINER_HEALTH_THRESHOLD_INIT_DELAY_SEC, "0");
config.setProperty(CONTAINER_HEALTH_THRESHOLD_POLL_FREQUENCY_SEC, "1");
config.setProperty(DEFAULT_READINESS_CHECK_ENABLED, "false");
comp.setConfiguration(config);
exampleApp.addComponent(comp);
// Make sure AM does not come up after service is killed for this test
Configuration serviceConfig = new Configuration();
serviceConfig.setProperty(AM_RESTART_MAX, "1");
exampleApp.setConfiguration(serviceConfig);
client.actionCreate(exampleApp);
waitForServiceToBeStable(client, exampleApp);
// Check service is stable and all 3 containers are running
Service service = client.getStatus(exampleApp.getName());
Component component = service.getComponent("compa");
assertEquals(ServiceState.STABLE,
service.getState(), "Service state should be STABLE");
assertEquals(3, component.getContainers().size(),
"3 containers are expected to be running");
// Flex compa up to 4 - will make health 75% (3 out of 4 running), but still
// above threshold of 65%, so service will continue to run.
Map<String, Long> compCounts = new HashMap<>();
compCounts.put("compa", 4L);
exampleApp.getComponent("compa").setNumberOfContainers(4L);
client.flexByRestService(exampleApp.getName(), compCounts);
try {
// Wait for 6 secs (window 3 secs + 1 for next poll + 2 for buffer). Since
// the service will never go to stable state (because of anti-affinity the
// 4th container will never be allocated) it will timeout. However, after
// the timeout the service should continue to run since health is 75%
// which is above the threshold of 65%.
waitForServiceToBeStable(client, exampleApp, 6000);
fail("Service should not be in a stable state. It should throw "
+ "a timeout exception.");
} catch (Exception e) {
// Check that service state is STARTED and only 3 containers are running
service = client.getStatus(exampleApp.getName());
component = service.getComponent("compa");
assertEquals(ServiceState.STARTED, service.getState(),
"Service state should be STARTED");
assertEquals(ComponentState.FLEXING, component.getState(),
"Component state should be FLEXING");
assertEquals(3, component.getContainers().size(),
"3 containers are expected to be running");
}
// Flex compa up to 5 - will make health 60% (3 out of 5 running), so
// service will stop since it is below threshold of 65%.
compCounts.put("compa", 5L);
exampleApp.getComponent("compa").setNumberOfContainers(5L);
client.flexByRestService(exampleApp.getName(), compCounts);
try {
// Wait for 14 secs (window 3 secs + 1 for next poll + 2 for buffer + 5
// secs of service wait before shutting down + 3 secs app cleanup so that
// API returns that service is in FAILED state). Note, because of
// anti-affinity the 4th and 5th container will never be allocated.
waitForServiceToBeInState(client, exampleApp, ServiceState.FAILED,
14000);
} catch (Exception e) {
fail("Should not have thrown exception");
}
LOG.info("Destroy service {}", exampleApp);
client.actionDestroy(exampleApp.getName());
}
// Check containers launched are in dependency order
// Get all containers into a list and sort based on container launch time e.g.
// compa-c1, compa-c2, compb-c1, compb-c2;
// check that the container's launch time are align with the dependencies.
private void checkContainerLaunchDependencies(ServiceClient client,
Service exampleApp, String... compOrder)
throws IOException, YarnException {
Service retrievedApp = client.getStatus(exampleApp.getName());
List<Container> containerList = new ArrayList<>();
for (Component component : retrievedApp.getComponents()) {
containerList.addAll(component.getContainers());
}
// sort based on launchTime
containerList
.sort((o1, o2) -> o1.getLaunchTime().compareTo(o2.getLaunchTime()));
LOG.info("containerList: " + containerList);
// check the containers are in the dependency order.
int index = 0;
for (String comp : compOrder) {
long num = retrievedApp.getComponent(comp).getNumberOfContainers();
for (int i = 0; i < num; i++) {
String compInstanceName = containerList.get(index).getComponentInstanceName();
String compName =
compInstanceName.substring(0, compInstanceName.lastIndexOf('-'));
assertEquals(comp, compName);
index++;
}
}
}
private Map<String, Long> flexComponents(ServiceClient client,
Service exampleApp, long count) throws YarnException, IOException {
Map<String, Long> compCounts = new HashMap<>();
compCounts.put("compa", count);
compCounts.put("compb", count);
// flex will update the persisted conf to reflect latest number of containers.
exampleApp.getComponent("compa").setNumberOfContainers(count);
exampleApp.getComponent("compb").setNumberOfContainers(count);
client.flexByRestService(exampleApp.getName(), compCounts);
return compCounts;
}
// Check each component's comp instances name are in sequential order.
// E.g. If there are two instances compA-1 and compA-2
// When flex up to 4 instances, it should be compA-1 , compA-2, compA-3, compA-4
// When flex down to 3 instances, it should be compA-1 , compA-2, compA-3.
private void checkCompInstancesInOrder(ServiceClient client,
Service exampleApp) throws IOException, YarnException,
TimeoutException, InterruptedException {
waitForContainers(client, exampleApp);
Service service = client.getStatus(exampleApp.getName());
for (Component comp : service.getComponents()) {
checkEachCompInstancesInOrder(comp, exampleApp.getName());
}
}
private void waitForContainers(ServiceClient client, Service exampleApp)
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(() -> {
try {
Service service = client.getStatus(exampleApp.getName());
for (Component comp : service.getComponents()) {
if (comp.getContainers().size() != comp.getNumberOfContainers()) {
return false;
}
}
return true;
} catch (Exception e) {
return false;
}
}, 2000, 200000);
}
private void checkEachCompInstancesInOrder(Component component, String
serviceName) throws TimeoutException, InterruptedException {
TreeSet<String> instances = new TreeSet<>();
for (Container container : component.getContainers()) {
instances.add(container.getComponentInstanceName());
String componentZKPath = RegistryUtils.componentPath(RegistryUtils
.currentUser(), YarnServiceConstants.APP_TYPE, serviceName,
RegistryPathUtils.encodeYarnID(container.getId()));
GenericTestUtils.waitFor(() -> {
try {
return getCuratorService().zkPathExists(componentZKPath);
} catch (IOException e) {
return false;
}
}, 1000, 60000);
}
int i = 0;
for (String s : instances) {
assertThat(s).isEqualTo(component.getName() + "-" + i);
i++;
}
}
@Test
@Timeout(value = 200)
public void testRestartServiceForNonExistingInRM() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, 0);
setConf(conf);
setupInternal(NUM_NMS);
ServiceClient client = createClient(getConf());
Service exampleApp = createExampleApplication();
client.actionCreate(exampleApp);
waitForServiceToBeStable(client, exampleApp);
try {
client.actionStop(exampleApp.getName(), true);
} catch (ApplicationNotFoundException e) {
LOG.info("ignore ApplicationNotFoundException during stopping");
}
client.actionStart(exampleApp.getName());
waitForServiceToBeStable(client, exampleApp);
Service service = client.getStatus(exampleApp.getName());
assertEquals(ServiceState.STABLE, service.getState(),
"Restarted service state should be STABLE");
}
@Test
@Timeout(value = 200)
public void testAMFailureValidity() throws Exception {
setupInternal(NUM_NMS);
ServiceClient client = createClient(getConf());
Service exampleApp = new Service();
exampleApp.setName("example-app");
exampleApp.setVersion("v1");
exampleApp.addComponent(createComponent("compa", 2, "sleep 1000"));
Configuration serviceConfig = new Configuration();
serviceConfig.setProperty(AM_RESTART_MAX, "2");
serviceConfig.setProperty(AM_FAILURES_VALIDITY_INTERVAL, "1000");
exampleApp.setConfiguration(serviceConfig);
client.actionCreate(exampleApp);
waitForServiceToBeStable(client, exampleApp);
Service appStatus1 = client.getStatus(exampleApp.getName());
ApplicationId exampleAppId = ApplicationId.fromString(appStatus1.getId());
YarnClient yarnClient = createYarnClient(getConf());
// kill AM1
ApplicationReport applicationReport = yarnClient.getApplicationReport(
exampleAppId);
ApplicationAttemptReport attemptReport = yarnClient
.getApplicationAttemptReport(applicationReport
.getCurrentApplicationAttemptId());
yarnClient.signalToContainer(attemptReport.getAMContainerId(),
SignalContainerCommand.GRACEFUL_SHUTDOWN);
waitForServiceToBeStable(client, exampleApp);
assertEquals(ServiceState.STABLE, client.getStatus(
exampleApp.getName()).getState());
// kill AM2 after 'yarn.service.am-failure.validity-interval-ms'
Thread.sleep(2000);
applicationReport = yarnClient.getApplicationReport(exampleAppId);
attemptReport = yarnClient.getApplicationAttemptReport(applicationReport
.getCurrentApplicationAttemptId());
yarnClient.signalToContainer(attemptReport.getAMContainerId(),
SignalContainerCommand.GRACEFUL_SHUTDOWN);
waitForServiceToBeStable(client, exampleApp);
assertEquals(ServiceState.STABLE, client.getStatus(
exampleApp.getName()).getState());
}
public Service createServiceWithSingleComp(int memory){
Service service = new Service();
service.setName("example-app");
service.setVersion("v1");
Component component = new Component();
component.setName("sleep");
component.setNumberOfContainers(1L);
component.setLaunchCommand("sleep 1000");
org.apache.hadoop.yarn.service.api.records.Resource resource = new Resource();
resource.setMemory(Integer.toString(memory));
resource.setCpus(1);
component.setResource(resource);
service.addComponent(component);
return service;
}
@Test
@Timeout(value = 200)
public void testServiceSameNameWithFailure() throws Exception{
setupInternal(NUM_NMS);
ServiceClient client = createClient(getConf());
try {
client.actionCreate(createServiceWithSingleComp(1024000));
fail("Service should throw YarnException as memory is " +
"configured as 1000GB, which is more than allowed");
} catch (YarnException e) {
assertTrue(true);
}
Service service = createServiceWithSingleComp(128);
try {
client.actionCreate(service);
} catch (SliderException e){
fail("Not able to submit service as the files related to" +
" failed service with same name are not cleared");
}
waitForServiceToBeStable(client,service);
client.actionStop(service.getName(), true);
client.actionDestroy(service.getName());
}
}
| TestYarnNativeServices |
java | quarkusio__quarkus | extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/customizers/HttpServerOptionsCustomizerTest.java | {
"start": 580,
"end": 1115
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyBean.class, MyCustomizer.class));
@Inject
MyCustomizer customizer;
@Test
void test() {
Assertions.assertThat(customizer.count()).isEqualTo(2);
Assertions.assertThat(RestAssured.get("http://localhost:9998").body().asString()).isEqualTo("hello");
}
@ApplicationScoped
public static | HttpServerOptionsCustomizerTest |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/factories/FunctionDefinitionFactory.java | {
"start": 1370,
"end": 1839
} | class ____.
*
* @param name name of the {@link CatalogFunction}
* @param catalogFunction the catalog function
* @param context the {@link Context} for creating function definition
* @return a {@link FunctionDefinition}
*/
FunctionDefinition createFunctionDefinition(
String name, CatalogFunction catalogFunction, Context context);
/** Context provided when a function definition is created. */
@PublicEvolving
| name |
java | quarkusio__quarkus | core/runtime/src/main/java/io/quarkus/logging/Log.java | {
"start": 47482,
"end": 47929
} | class ____
* @param message the message
* @param t the throwable
*/
public static void info(String loggerFqcn, Object message, Throwable t) {
if (shouldFail) {
throw fail();
}
Logger.getLogger(stackWalker.getCallerClass()).info(loggerFqcn, message, t);
}
/**
* Issue a log message with parameters and a throwable with a level of INFO.
*
* @param loggerFqcn the logger | name |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/commons/util/AnnotationUtilsTests.java | {
"start": 27522,
"end": 27655
} | class ____ implements TaggedInterface {
}
@Tags({ @Tag("a"), @Tag("b"), @Tag("c") })
@Tag("d")
static | LocalTagOnTaggedInterfaceClass |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/criteria/CriteriaIdVersionTest.java | {
"start": 670,
"end": 2399
} | class ____ {
@Test
void test(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
scope.inTransaction( session -> {
session.persist( new Thing() );
} );
scope.inSession( session -> {
var cb = session.getCriteriaBuilder();
var cq = cb.createQuery( Long.class );
var root = cq.from( Thing.class );
cq.select( cb.id(root).as( Long.class ) );
assertEquals( 2L, session.createSelectionQuery( cq ).getSingleResult() );
} );
scope.inSession( session -> {
var cb = session.getCriteriaBuilder();
var cq = cb.createQuery( Long.class );
var root = cq.from( Thing.class );
cq.select( cb.version(root).as( Long.class ) );
assertEquals( 3L, session.createSelectionQuery( cq ).getSingleResult() );
} );
scope.inSession( session -> {
var cb = session.getCriteriaBuilder();
var cq = cb.createQuery( Long.class );
var root = cq.from( Thing.class );
cq.select( root.id().asLong() );
assertEquals( 2L, session.createSelectionQuery( cq ).getSingleResult() );
} );
}
@Test
void testPath(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
scope.inTransaction( session -> {
Thing thing = new Thing();
Thing otherThing = new Thing();
otherThing.id = 5;
thing.other = otherThing;
session.persist( thing );
} );
scope.inSession( session -> {
var cb = session.getCriteriaBuilder();
var cq = cb.createQuery( Long.class );
var root = cq.from( Thing.class );
cq.select( cb.id( root.get("other") ).as( Long.class ) );
cq.where( root.get("other").isNotNull() );
assertEquals( 5L, session.createSelectionQuery( cq ).getSingleResult() );
} );
}
@Entity
static | CriteriaIdVersionTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java | {
"start": 1163,
"end": 1285
} | class ____ contains the mappings and settings logic for failure store indices that are a part of data streams.
*/
public | that |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/TimeType.java | {
"start": 2021,
"end": 5232
} | class ____ extends LogicalType {
private static final long serialVersionUID = 1L;
public static final int MIN_PRECISION = 0;
public static final int MAX_PRECISION = 9;
public static final int DEFAULT_PRECISION = 0;
private static final String FORMAT = "TIME(%d)";
private static final Set<String> NULL_OUTPUT_CONVERSION =
conversionSet(
java.sql.Time.class.getName(),
java.time.LocalTime.class.getName(),
Integer.class.getName(),
Long.class.getName());
private static final Set<String> NOT_NULL_INPUT_OUTPUT_CONVERSION =
conversionSet(
java.sql.Time.class.getName(),
java.time.LocalTime.class.getName(),
Integer.class.getName(),
int.class.getName(),
Long.class.getName(),
long.class.getName());
private static final Class<?> DEFAULT_CONVERSION = java.time.LocalTime.class;
private final int precision;
public TimeType(boolean isNullable, int precision) {
super(isNullable, LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE);
if (precision < MIN_PRECISION || precision > MAX_PRECISION) {
throw new ValidationException(
String.format(
"Time precision must be between %d and %d (both inclusive).",
MIN_PRECISION, MAX_PRECISION));
}
this.precision = precision;
}
public TimeType(int precision) {
this(true, precision);
}
public TimeType() {
this(DEFAULT_PRECISION);
}
public int getPrecision() {
return precision;
}
@Override
public LogicalType copy(boolean isNullable) {
return new TimeType(isNullable, precision);
}
@Override
public String asSerializableString() {
return withNullability(FORMAT, precision);
}
@Override
public boolean supportsInputConversion(Class<?> clazz) {
return NOT_NULL_INPUT_OUTPUT_CONVERSION.contains(clazz.getName());
}
@Override
public boolean supportsOutputConversion(Class<?> clazz) {
if (isNullable()) {
return NULL_OUTPUT_CONVERSION.contains(clazz.getName());
}
return NOT_NULL_INPUT_OUTPUT_CONVERSION.contains(clazz.getName());
}
@Override
public Class<?> getDefaultConversion() {
return DEFAULT_CONVERSION;
}
@Override
public List<LogicalType> getChildren() {
return Collections.emptyList();
}
@Override
public <R> R accept(LogicalTypeVisitor<R> visitor) {
return visitor.visit(this);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
TimeType timeType = (TimeType) o;
return precision == timeType.precision;
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), precision);
}
}
| TimeType |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/InnerClassCreatorTest.java | {
"start": 1537,
"end": 1660
} | class ____ {
@JsonCreator
public InnerSomething1502() {}
}
}
static | InnerSomething1502 |
java | resilience4j__resilience4j | resilience4j-spring/src/main/java/io/github/resilience4j/utils/RxJava2OnClasspathCondition.java | {
"start": 954,
"end": 1803
} | class ____ implements Condition {
private static final Logger logger = LoggerFactory.getLogger(RxJava2OnClasspathCondition.class);
private static final String CLASS_TO_CHECK = "io.reactivex.Flowable";
private static final String R4J_RXJAVA = "io.github.resilience4j.AbstractSubscriber";
@Override
public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {
return AspectUtil.checkClassIfFound(context, CLASS_TO_CHECK, (e) -> logger.debug(
"RxJava2 related Aspect extensions are not activated, because RxJava2 is not on the classpath."))
&& AspectUtil.checkClassIfFound(context, R4J_RXJAVA, (e) -> logger.debug(
"RxJava2 related Aspect extensions are not activated because Resilience4j RxJava2 module is not on the classpath."));
}
}
| RxJava2OnClasspathCondition |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopDoubleIntAggregator.java | {
"start": 3149,
"end": 4386
} | class ____ implements GroupingAggregatorState {
private final DoubleIntBucketedSort sort;
private GroupingState(BigArrays bigArrays, int limit, boolean ascending) {
this.sort = new DoubleIntBucketedSort(bigArrays, ascending ? SortOrder.ASC : SortOrder.DESC, limit);
}
public void add(int groupId, double value, int outputValue) {
sort.collect(value, outputValue, groupId);
}
@Override
public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) {
sort.toBlocks(driverContext.blockFactory(), blocks, offset, selected);
}
Block toBlock(BlockFactory blockFactory, IntVector selected) {
Block[] blocks = new Block[2];
sort.toBlocks(blockFactory, blocks, 0, selected);
Releasables.close(blocks[0]);
return blocks[1];
}
@Override
public void enableGroupIdTracking(SeenGroupIds seen) {
// we figure out seen values from nulls on the values block
}
@Override
public void close() {
Releasables.closeExpectNoException(sort);
}
}
public static | GroupingState |
java | quarkusio__quarkus | extensions/agroal/deployment/src/test/java/io/quarkus/agroal/test/ValidationQueryTest.java | {
"start": 381,
"end": 1395
} | class ____ {
private static final String testLogPath = "target/validation-query-test.log";
//tag::injection[]
@Inject
AgroalDataSource defaultDataSource;
//end::injection[]
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withConfigurationResource("application-datasource-with-validation.properties")
.overrideRuntimeConfigKey("quarkus.datasource.jdbc.url",
"jdbc:h2:tcp://localhost/mem:default?queryLog=%s;DATABASE_EVENT_LISTENER=io.quarkus.agroal.test.QueryLoggingH2DBEventListener" //Register QueryLoggingH2DBEventListener
.formatted(testLogPath));
@Test
public void testQueryTimeoutIsApplied() throws SQLException {
//Test connection is acquirable
try (Connection connection = defaultDataSource.getConnection()) {
//nop
}
assertThat(new File(testLogPath)).content().contains("SET QUERY_TIMEOUT ?");
}
}
| ValidationQueryTest |
java | google__guava | guava-tests/test/com/google/common/io/SourceSinkFactories.java | {
"start": 14954,
"end": 15423
} | class ____ extends Jdk7FileFactory implements ByteSourceFactory {
@Override
public ByteSource createSource(byte[] bytes) throws IOException {
checkNotNull(bytes);
Path file = createFile();
java.nio.file.Files.write(file, bytes);
return MoreFiles.asByteSource(file);
}
@Override
public byte[] getExpected(byte[] bytes) {
return checkNotNull(bytes);
}
}
@AndroidIncompatible
private static | PathByteSourceFactory |
java | spring-projects__spring-boot | core/spring-boot-testcontainers/src/dockerTest/java/org/springframework/boot/testcontainers/lifecycle/TestcontainersLifecycleOrderIntegrationTests.java | {
"start": 2628,
"end": 2844
} | class ____ {
@Bean
@ServiceConnection
RedisContainer redisContainer() {
return TestImage.container(EventRecordingRedisContainer.class);
}
}
@Configuration(proxyBeanMethods = false)
static | ContainerConfig |
java | apache__flink | flink-metrics/flink-metrics-slf4j/src/main/java/org/apache/flink/events/slf4j/Slf4jEventReporterFactory.java | {
"start": 1098,
"end": 1298
} | class ____ implements EventReporterFactory {
@Override
public EventReporter createEventReporter(Properties properties) {
return new Slf4jEventReporter();
}
}
| Slf4jEventReporterFactory |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java | {
"start": 2725,
"end": 3299
} | class ____ extends ESIntegTestCase {
private static final String UPDATE_SCRIPTS = "update_scripts";
private static final String PUT_VALUES_SCRIPT = "put_values";
private static final String FIELD_INC_SCRIPT = "field_inc";
private static final String UPSERT_SCRIPT = "scripted_upsert";
private static final String EXTRACT_CTX_SCRIPT = "extract_ctx";
@SuppressWarnings("unchecked")
private static Map<String, Object> get(Map<String, Object> source, String key) {
return (Map<String, Object>) source.get(key);
}
public static | UpdateIT |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java | {
"start": 5936,
"end": 10523
} | interface ____ {
ExpressionEvaluator get(DriverContext context);
/**
* {@code true} if it is safe and fast to evaluate this expression eagerly
* in {@link ExpressionEvaluator}s that need to be lazy, like {@code CASE}.
* This defaults to {@code false}, but expressions
* that evaluate quickly and can not produce warnings may override this to
* {@code true} to get a significant speed-up in {@code CASE}-like operations.
*/
default boolean eagerEvalSafeInLazy() {
return false;
}
}
/**
* Evaluate the expression.
* @return the returned Block has its own reference and the caller is responsible for releasing it.
*/
Block eval(Page page);
/**
* Heap used by the evaluator <strong>excluding</strong> any memory that's separately tracked
* like the {@link BreakingBytesRefBuilder} used for string concat.
*/
long baseRamBytesUsed();
}
private record ConstantNullEvaluator(DriverContext context) implements ExpressionEvaluator {
private static final String NAME = "ConstantNull";
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantNullEvaluator.class);
@Override
public Block eval(Page page) {
return context.blockFactory().newConstantNullBlock(page.getPositionCount());
}
@Override
public void close() {}
@Override
public String toString() {
return NAME;
}
@Override
public long baseRamBytesUsed() {
return BASE_RAM_BYTES_USED;
}
record Factory() implements ExpressionEvaluator.Factory {
@Override
public ConstantNullEvaluator get(DriverContext context) {
return new ConstantNullEvaluator(context);
};
@Override
public String toString() {
return NAME;
}
};
}
public static final ExpressionEvaluator.Factory CONSTANT_NULL_FACTORY = new ConstantNullEvaluator.Factory();
private record ConstantTrueEvaluator(DriverContext context) implements ExpressionEvaluator {
private static final String NAME = "ConstantTrue";
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantTrueEvaluator.class);
@Override
public Block eval(Page page) {
return context.blockFactory().newConstantBooleanBlockWith(true, page.getPositionCount());
}
@Override
public void close() {}
@Override
public String toString() {
return NAME;
}
@Override
public long baseRamBytesUsed() {
return BASE_RAM_BYTES_USED;
}
record Factory() implements ExpressionEvaluator.Factory {
@Override
public ConstantTrueEvaluator get(DriverContext context) {
return new ConstantTrueEvaluator(context);
};
@Override
public String toString() {
return NAME;
}
};
}
public static final ExpressionEvaluator.Factory CONSTANT_TRUE_FACTORY = new ConstantTrueEvaluator.Factory();
private record ConstantFalseEvaluator(DriverContext context) implements ExpressionEvaluator {
private static final String NAME = "ConstantFalse";
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantFalseEvaluator.class);
@Override
public Block eval(Page page) {
return context.blockFactory().newConstantBooleanBlockWith(false, page.getPositionCount());
}
@Override
public void close() {}
@Override
public String toString() {
return NAME;
}
@Override
public long baseRamBytesUsed() {
return BASE_RAM_BYTES_USED;
}
record Factory() implements ExpressionEvaluator.Factory {
@Override
public ConstantFalseEvaluator get(DriverContext context) {
return new ConstantFalseEvaluator(context);
};
@Override
public String toString() {
return NAME;
}
};
}
public static final ExpressionEvaluator.Factory CONSTANT_FALSE_FACTORY = new ConstantFalseEvaluator.Factory();
}
| Factory |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/LoggerContext.java | {
"start": 5993,
"end": 10389
} | class ____ by the `%s` property", propertyName);
throw new RuntimeException(message, error);
}
}
private final InternalLoggerRegistry loggerRegistry = new InternalLoggerRegistry();
private final CopyOnWriteArrayList<PropertyChangeListener> propertyChangeListeners = new CopyOnWriteArrayList<>();
private volatile List<LoggerContextShutdownAware> listeners;
/**
* The Configuration is volatile to guarantee that initialization of the Configuration has completed before the
* reference is updated.
*/
private volatile Configuration configuration = new DefaultConfiguration();
private final ConcurrentMap<String, Object> externalMap = new ConcurrentHashMap<>();
private String contextName;
private volatile URI configLocation;
private Cancellable shutdownCallback;
private final Lock configLock = new ReentrantLock();
/**
* Constructor taking only a name.
*
* @param name The context name.
*/
public LoggerContext(final String name) {
this(name, null, (URI) null);
}
/**
* Constructor taking a name and a reference to an external context.
*
* @param name The context name.
* @param externalContext The external context.
*/
public LoggerContext(final String name, final Object externalContext) {
this(name, externalContext, (URI) null);
}
/**
* Constructor taking a name, external context and a configuration URI.
*
* @param name The context name.
* @param externalContext The external context.
* @param configLocn The location of the configuration as a URI.
*/
public LoggerContext(final String name, final Object externalContext, final URI configLocn) {
this.contextName = name;
if (externalContext != null) {
externalMap.put(EXTERNAL_CONTEXT_KEY, externalContext);
}
this.configLocation = configLocn;
}
/**
* Constructor taking a name external context and a configuration location String. The location must be resolvable
* to a File.
*
* @param name The configuration location.
* @param externalContext The external context.
* @param configLocn The configuration location.
*/
@SuppressFBWarnings(
value = "PATH_TRAVERSAL_IN",
justification = "The configLocn comes from a secure source (Log4j properties)")
public LoggerContext(final String name, final Object externalContext, final String configLocn) {
this.contextName = name;
if (externalContext != null) {
externalMap.put(EXTERNAL_CONTEXT_KEY, externalContext);
}
if (configLocn != null) {
URI uri;
try {
uri = new File(configLocn).toURI();
} catch (final Exception ex) {
uri = null;
}
configLocation = uri;
} else {
configLocation = null;
}
}
@Override
public void addShutdownListener(final LoggerContextShutdownAware listener) {
if (listeners == null) {
synchronized (this) {
if (listeners == null) {
listeners = new CopyOnWriteArrayList<>();
}
}
}
listeners.add(listener);
}
@Override
public List<LoggerContextShutdownAware> getListeners() {
return listeners;
}
/**
* Returns the current LoggerContext.
* <p>
* Avoids the type cast for:
* </p>
*
* <pre>
* (LoggerContext) LogManager.getContext();
* </pre>
*
* <p>
* WARNING - The LoggerContext returned by this method may not be the LoggerContext used to create a Logger for the
* calling class.
* </p>
*
* @return The current LoggerContext.
* @see LogManager#getContext()
*/
public static LoggerContext getContext() {
return (LoggerContext) LogManager.getContext();
}
/**
* Returns a LoggerContext.
* <p>
* Avoids the type cast for:
* </p>
*
* <pre>
* (LoggerContext) LogManager.getContext(currentContext);
* </pre>
*
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a | pointed |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/validation/ValidationBindHandlerTests.java | {
"start": 12273,
"end": 12518
} | class ____ {
@Valid
private ExampleNested nested = new ExampleNested();
ExampleNested getNested() {
return this.nested;
}
void setNested(ExampleNested nested) {
this.nested = nested;
}
}
static | ExampleValidatedWithNestedBean |
java | playframework__playframework | core/play/src/main/java/play/mvc/StatusHeader.java | {
"start": 23597,
"end": 24122
} | class ____ from.
*
* @param resourceName The path of the resource to load.
* @param inline Whether it should be served as an inline file, or as an attachment.
* @return a '200 OK' result containing the resource in the body with in-line content disposition.
*/
public Result sendResource(String resourceName, boolean inline) {
return sendResource(resourceName, inline, () -> {}, null);
}
/**
* Send the given resource.
*
* <p>The resource will be loaded from the same classloader that this | comes |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/path/JSONPath_6.java | {
"start": 344,
"end": 1056
} | class ____ extends TestCase {
public void test_path() throws Exception {
String json = "{\"hello\":\"world\"}";
JSONObject object = JSON.parseObject(json);
assertTrue(JSONPath.contains(object, "$.hello"));
assertTrue(JSONPath.contains(object, "hello"));
}
// public void test_path_2() throws Exception {
//// File file = new File("/Users/wenshao/Downloads/test");
//// String json = FileUtils.readFileToString(file);
// String json = "{\"returnObj\":[{\"$ref\":\"$.subInvokes.com\\\\.alipay\\\\.cif\\\\.user\\\\.UserInfoQueryService\\\\@findUserInfosByCardNo\\\\(String[])[0].response[0]\"}]}";
// JSON.parseObject(json);
// }
}
| JSONPath_6 |
java | apache__camel | components/camel-test/camel-test-main-junit5/src/test/java/org/apache/camel/test/main/junit5/annotation/DumpRouteTest.java | {
"start": 1373,
"end": 1457
} | class ____ that the route can be dumped.
*/
@CamelMainTest(dumpRoute = "yaml")
| ensuring |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/DefaultExecutionGraphBuilder.java | {
"start": 17353,
"end": 17446
} | class ____ not supposed to be instantiated. */
private DefaultExecutionGraphBuilder() {}
}
| is |
java | spring-projects__spring-framework | spring-jdbc/src/test/java/org/springframework/jdbc/support/SQLErrorCodesFactoryTests.java | {
"start": 6239,
"end": 6881
} | class ____ extends SQLErrorCodesFactory {
@Override
protected Resource loadResource(String path) {
if (SQLErrorCodesFactory.SQL_ERROR_CODE_OVERRIDE_PATH.equals(path)) {
return new ClassPathResource("test-error-codes.xml", SQLErrorCodesFactoryTests.class);
}
return null;
}
}
// Should have loaded without error
TestSQLErrorCodesFactory sf = new TestSQLErrorCodesFactory();
assertThat(sf.getErrorCodes("XX").getBadSqlGrammarCodes()).isEmpty();
assertThat(sf.getErrorCodes("Oracle").getBadSqlGrammarCodes()).containsExactly("1", "2");
}
@Test
void invalidUserDefinedCodeFormat() {
| TestSQLErrorCodesFactory |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/aggregators/LongZeroConvergence.java | {
"start": 1200,
"end": 1716
} | class ____ implements ConvergenceCriterion<LongValue> {
/**
* Returns true, if the aggregator value is zero, false otherwise.
*
* @param iteration The number of the iteration superstep. Ignored in this case.
* @param value The aggregator value, which is compared to zero.
* @return True, if the aggregator value is zero, false otherwise.
*/
@Override
public boolean isConverged(int iteration, LongValue value) {
return value.getValue() == 0;
}
}
| LongZeroConvergence |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/mappedsuperclass/typedmappedsuperclass/AttachmentGroupInTopic.java | {
"start": 223,
"end": 382
} | class ____
extends AttachmentGroup<AttachmentGroupInTopic, AttachmentGroupPostInTopic, UserRole> {
public AttachmentGroupInTopic() {
}
}
| AttachmentGroupInTopic |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/plugins/PluginsLoaderTests.java | {
"start": 12660,
"end": 14969
} | class ____ extends Plugin {
}
"""));
var classToBytes = InMemoryJavaCompiler.compile(sources);
JarUtils.createJarWithEntries(jar, Map.ofEntries(entry("p/A.class", classToBytes.get("p.A"))));
var pluginsLoader = newPluginsLoader(settings);
try {
var loadedLayers = pluginsLoader.pluginLayers().toList();
assertThat(loadedLayers, hasSize(1));
assertThat(loadedLayers.get(0).pluginBundle().pluginDescriptor().getName(), equalTo("non-modular-plugin"));
assertThat(loadedLayers.get(0).pluginBundle().pluginDescriptor().isStable(), is(false));
assertThat(loadedLayers.get(0).pluginBundle().pluginDescriptor().isModular(), is(false));
assertThat(pluginsLoader.pluginDescriptors(), hasSize(1));
assertThat(pluginsLoader.pluginDescriptors().get(0).getName(), equalTo("non-modular-plugin"));
assertThat(pluginsLoader.pluginDescriptors().get(0).isModular(), is(false));
var pluginModuleLayer = loadedLayers.get(0).pluginModuleLayer();
assertThat(pluginModuleLayer, is(ModuleLayer.boot()));
} finally {
closePluginLoaders(pluginsLoader);
}
}
private static void createStablePlugin(Path home) throws IOException {
final Path plugins = home.resolve("plugins");
final Path plugin = plugins.resolve(STABLE_PLUGIN_NAME);
Files.createDirectories(plugin);
PluginTestUtil.writeStablePluginProperties(
plugin,
"description",
"description",
"name",
STABLE_PLUGIN_NAME,
"version",
"1.0.0",
"elasticsearch.version",
Version.CURRENT.toString(),
"java.version",
System.getProperty("java.specification.version")
);
Path jar = plugin.resolve("impl.jar");
JarUtils.createJarWithEntries(jar, Map.of("p/A.class", InMemoryJavaCompiler.compile("p.A", """
package p;
import java.util.Map;
import org.elasticsearch.plugin.analysis.CharFilterFactory;
import org.elasticsearch.plugin.NamedComponent;
import java.io.Reader;
@NamedComponent( "a_name")
public | A |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/masterreplica/Requests.java | {
"start": 570,
"end": 2507
} | class ____ extends
CompletableEventLatchSupport<Tuple2<RedisURI, TimedAsyncCommand<String, String, String>>, List<RedisNodeDescription>> {
private final Map<RedisURI, TimedAsyncCommand<String, String, String>> rawViews = new TreeMap<>(
ReplicaUtils.RedisURIComparator.INSTANCE);
private final List<RedisNodeDescription> nodes;
public Requests(int expectedCount, List<RedisNodeDescription> nodes) {
super(expectedCount);
this.nodes = nodes;
}
protected void addRequest(RedisURI redisURI, TimedAsyncCommand<String, String, String> command) {
rawViews.put(redisURI, command);
command.onComplete((s, throwable) -> {
if (throwable != null) {
accept(throwable);
} else {
accept(Tuples.of(redisURI, command));
}
});
}
@Override
protected void onEmit(Emission<List<RedisNodeDescription>> emission) {
List<RedisNodeDescription> result = new ArrayList<>();
Map<RedisNodeDescription, Long> latencies = new HashMap<>();
for (RedisNodeDescription node : nodes) {
TimedAsyncCommand<String, String, String> future = getRequest(node.getUri());
if (future == null || !future.isDone()) {
continue;
}
RedisNodeDescription redisNodeDescription = findNodeByUri(nodes, node.getUri());
latencies.put(redisNodeDescription, future.duration());
result.add(redisNodeDescription);
}
SortAction sortAction = SortAction.getSortAction();
sortAction.sort(result, new LatencyComparator(latencies));
emission.success(result);
}
protected Set<RedisURI> nodes() {
return rawViews.keySet();
}
protected TimedAsyncCommand<String, String, String> getRequest(RedisURI redisURI) {
return rawViews.get(redisURI);
}
}
| Requests |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-zookeeper-curator5/src/main/java/org/apache/dubbo/remoting/zookeeper/curator5/StateListener.java | {
"start": 864,
"end": 1066
} | interface ____ {
int SESSION_LOST = 0;
int CONNECTED = 1;
int RECONNECTED = 2;
int SUSPENDED = 3;
int NEW_SESSION_CREATED = 4;
void stateChanged(int connected);
}
| StateListener |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/module/SimpleSerializers.java | {
"start": 7913,
"end": 9480
} | class ____?
for (Class<?> curr = cls; (curr != null); curr = curr.getSuperclass()) {
key.reset(curr);
ser = _classMappings.get(key);
if (ser != null) {
return ser;
}
}
}
}
// No direct match? How about super-interfaces?
if (_interfaceMappings != null) {
ser = _findInterfaceMapping(cls, key);
if (ser != null) {
return ser;
}
// still no matches? Maybe interfaces of super classes
if (!cls.isInterface()) {
while ((cls = cls.getSuperclass()) != null) {
ser = _findInterfaceMapping(cls, key);
if (ser != null) {
return ser;
}
}
}
}
return null;
}
protected ValueSerializer<?> _findInterfaceMapping(Class<?> cls, ClassKey key)
{
for (Class<?> iface : cls.getInterfaces()) {
key.reset(iface);
ValueSerializer<?> ser = _interfaceMappings.get(key);
if (ser != null) {
return ser;
}
ser = _findInterfaceMapping(iface, key);
if (ser != null) {
return ser;
}
}
return null;
}
protected void _addSerializer(Class<?> cls, ValueSerializer<?> ser)
{
ClassKey key = new ClassKey(cls);
// Interface or | match |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/any/xml/AnyTypeTest.java | {
"start": 1126,
"end": 2715
} | class ____ {
@BeforeEach
public void createTestData(SessionFactoryScope scope) {
final Person person = new Person();
final Address address = new Address();
person.setData( address );
scope.inTransaction(
session -> {
session.persist( person );
session.persist( address );
}
);
}
@AfterEach
public void dropTestData(SessionFactoryScope scope) {
scope.dropData();
}
@Test
public void testStoredData(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
final Person person = session.createQuery( "from Person", Person.class ).uniqueResult();
assertThat( person.getData(), instanceOf( Address.class ) );
}
);
}
@Test
public void testJoinFetchOfAnAnyTypeAttribute(SessionFactoryScope scope) {
// Query translator should dis-allow join fetching of an <any/> mapping. Let's make sure it does...
scope.inTransaction(
session -> {
try {
session.createQuery( "select p from Person p join fetch p.data" ).list();
}
catch (SemanticException e) {
// expected
validateAnyJoinException( e );
}
catch (IllegalArgumentException e) {
// expected with JPA exception wrapping
assertThat( e.getCause(), instanceOf( SemanticException.class ) );
final SemanticException semanticException = (SemanticException) e.getCause();
validateAnyJoinException( semanticException );
}
}
);
}
private static void validateAnyJoinException(SemanticException e) {
assertThat( e.getMessage(), is( "An @Any attribute cannot be join fetched" ) );
}
}
| AnyTypeTest |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/AutoOneOfTest.java | {
"start": 15062,
"end": 17329
} | enum ____ {
EMPTY,
STRING,
}
public abstract Kind getKind();
public abstract void empty();
public abstract String string();
public static MaybeEmpty ofEmpty() {
return AutoOneOf_AutoOneOfTest_MaybeEmpty.empty();
}
public static MaybeEmpty ofString(String s) {
return AutoOneOf_AutoOneOfTest_MaybeEmpty.string(s);
}
}
@Test
public void voidPropertyIsSingleton() {
MaybeEmpty empty1 = MaybeEmpty.ofEmpty();
MaybeEmpty empty2 = MaybeEmpty.ofEmpty();
assertThat(empty1).isSameInstanceAs(empty2);
}
@Test
public void voidPropertyRemainsSingletonWhenDeserialized() throws Exception {
MaybeEmpty empty1 = MaybeEmpty.ofEmpty();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
// We're still compiling this with -source 6, so we can't use try-with-resources.
ObjectOutputStream dos = new ObjectOutputStream(baos);
dos.writeObject(empty1);
dos.close();
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
ObjectInputStream ois = new ObjectInputStream(bais);
MaybeEmpty empty2 = (MaybeEmpty) ois.readObject();
assertThat(empty2).isSameInstanceAs(empty1);
}
@Test
public void voidPropertyToString() {
MaybeEmpty empty = MaybeEmpty.ofEmpty();
assertThat(empty.toString()).isEqualTo("MaybeEmpty{empty}");
}
@Test
public void voidPropertyHashCodeIsIdentity() {
MaybeEmpty empty = MaybeEmpty.ofEmpty();
assertThat(empty.hashCode()).isEqualTo(System.identityHashCode(empty));
}
@Test
public void voidPropertyGetterDoesNothing() {
MaybeEmpty empty = MaybeEmpty.ofEmpty();
empty.empty();
}
@Test
public void voidPropertyNotEqualToNonVoid() {
MaybeEmpty empty = MaybeEmpty.ofEmpty();
MaybeEmpty notEmpty = MaybeEmpty.ofString("foo");
assertThat(empty).isNotEqualTo(notEmpty);
assertThat(notEmpty).isNotEqualTo(empty);
}
@Test
public void voidPropertyWrongType() {
MaybeEmpty notEmpty = MaybeEmpty.ofString("foo");
try {
notEmpty.empty();
fail();
} catch (UnsupportedOperationException e) {
assertThat(e).hasMessageThat().containsMatch("(?i:string)");
}
}
@AutoOneOf(OneOfArray.Kind.class)
public abstract static | Kind |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/TypeUtilsTest_loadClass.java | {
"start": 150,
"end": 903
} | class ____ extends TestCase {
public void test_loadClass() throws Exception {
Assert.assertSame(Entity.class,
TypeUtils.loadClass("com.alibaba.json.bvt.parser.TypeUtilsTest_loadClass$Entity",
Entity.class.getClassLoader()));
Assert.assertSame(Entity.class,
TypeUtils.loadClass("com.alibaba.json.bvt.parser.TypeUtilsTest_loadClass$Entity", null));
}
public void test_error() throws Exception {
Assert.assertNull(TypeUtils.loadClass("com.alibaba.json.bvt.parser.TypeUtilsTest_loadClass.Entity",
Entity.class.getClassLoader()));
}
public static | TypeUtilsTest_loadClass |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java | {
"start": 704,
"end": 4768
} | class ____ {
private static final String BASE_NAME = "cluster:monitor/xpack/info/";
public static final ActionType<XPackInfoFeatureResponse> SECURITY = xpackInfoFeatureAction(XPackField.SECURITY);
public static final ActionType<XPackInfoFeatureResponse> MONITORING = xpackInfoFeatureAction(XPackField.MONITORING);
public static final ActionType<XPackInfoFeatureResponse> WATCHER = xpackInfoFeatureAction(XPackField.WATCHER);
public static final ActionType<XPackInfoFeatureResponse> GRAPH = xpackInfoFeatureAction(XPackField.GRAPH);
public static final ActionType<XPackInfoFeatureResponse> MACHINE_LEARNING = xpackInfoFeatureAction(XPackField.MACHINE_LEARNING);
public static final ActionType<XPackInfoFeatureResponse> LOGSTASH = xpackInfoFeatureAction(XPackField.LOGSTASH);
public static final ActionType<XPackInfoFeatureResponse> EQL = xpackInfoFeatureAction(XPackField.EQL);
public static final ActionType<XPackInfoFeatureResponse> ESQL = xpackInfoFeatureAction(XPackField.ESQL);
public static final ActionType<XPackInfoFeatureResponse> SQL = xpackInfoFeatureAction(XPackField.SQL);
public static final ActionType<XPackInfoFeatureResponse> ROLLUP = xpackInfoFeatureAction(XPackField.ROLLUP);
public static final ActionType<XPackInfoFeatureResponse> INDEX_LIFECYCLE = xpackInfoFeatureAction(XPackField.INDEX_LIFECYCLE);
public static final ActionType<XPackInfoFeatureResponse> SNAPSHOT_LIFECYCLE = xpackInfoFeatureAction(XPackField.SNAPSHOT_LIFECYCLE);
public static final ActionType<XPackInfoFeatureResponse> CCR = xpackInfoFeatureAction(XPackField.CCR);
public static final ActionType<XPackInfoFeatureResponse> TRANSFORM = xpackInfoFeatureAction(XPackField.TRANSFORM);
public static final ActionType<XPackInfoFeatureResponse> VOTING_ONLY = xpackInfoFeatureAction(XPackField.VOTING_ONLY);
public static final ActionType<XPackInfoFeatureResponse> SPATIAL = xpackInfoFeatureAction(XPackField.SPATIAL);
public static final ActionType<XPackInfoFeatureResponse> ANALYTICS = xpackInfoFeatureAction(XPackField.ANALYTICS);
public static final ActionType<XPackInfoFeatureResponse> ENRICH = xpackInfoFeatureAction(XPackField.ENRICH);
public static final ActionType<XPackInfoFeatureResponse> SEARCHABLE_SNAPSHOTS = xpackInfoFeatureAction(XPackField.SEARCHABLE_SNAPSHOTS);
public static final ActionType<XPackInfoFeatureResponse> DATA_STREAMS = xpackInfoFeatureAction(XPackField.DATA_STREAMS);
public static final ActionType<XPackInfoFeatureResponse> DATA_TIERS = xpackInfoFeatureAction(XPackField.DATA_TIERS);
public static final ActionType<XPackInfoFeatureResponse> AGGREGATE_METRIC = xpackInfoFeatureAction(XPackField.AGGREGATE_METRIC);
public static final ActionType<XPackInfoFeatureResponse> ARCHIVE = xpackInfoFeatureAction(XPackField.ARCHIVE);
public static final ActionType<XPackInfoFeatureResponse> ENTERPRISE_SEARCH = xpackInfoFeatureAction(XPackField.ENTERPRISE_SEARCH);
public static final ActionType<XPackInfoFeatureResponse> UNIVERSAL_PROFILING = xpackInfoFeatureAction(XPackField.UNIVERSAL_PROFILING);
public static final ActionType<XPackInfoFeatureResponse> LOGSDB = xpackInfoFeatureAction(XPackField.LOGSDB);
public static final List<ActionType<XPackInfoFeatureResponse>> ALL = List.of(
SECURITY,
MONITORING,
WATCHER,
GRAPH,
MACHINE_LEARNING,
LOGSTASH,
EQL,
ESQL,
SQL,
ROLLUP,
INDEX_LIFECYCLE,
SNAPSHOT_LIFECYCLE,
CCR,
TRANSFORM,
VOTING_ONLY,
SPATIAL,
ANALYTICS,
ENRICH,
DATA_STREAMS,
SEARCHABLE_SNAPSHOTS,
DATA_TIERS,
AGGREGATE_METRIC,
ARCHIVE,
ENTERPRISE_SEARCH,
UNIVERSAL_PROFILING,
LOGSDB
);
public static ActionType<XPackInfoFeatureResponse> xpackInfoFeatureAction(String suffix) {
return new ActionType<>(BASE_NAME + suffix);
}
private XPackInfoFeatureAction() {/* no instances */}
}
| XPackInfoFeatureAction |
java | apache__spark | sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/PlainSaslServer.java | {
"start": 5732,
"end": 5962
} | class ____ extends Provider {
public SaslPlainProvider() {
super("HiveSaslPlain", 1.0, "Hive Plain SASL provider");
put("SaslServerFactory.PLAIN", SaslPlainServerFactory.class.getName());
}
}
}
| SaslPlainProvider |
java | netty__netty | codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandlerTest.java | {
"start": 2377,
"end": 19508
} | class ____ {
private final Queue<FullHttpResponse> responses = new ArrayDeque<FullHttpResponse>();
@BeforeEach
public void setUp() {
responses.clear();
}
@Test
public void testHttpUpgradeRequestFull() {
testHttpUpgradeRequest0(true);
}
@Test
public void testHttpUpgradeRequestNonFull() {
testHttpUpgradeRequest0(false);
}
private void testHttpUpgradeRequest0(boolean full) {
EmbeddedChannel ch = createChannel(new MockOutboundHandler());
ChannelHandlerContext handshakerCtx = ch.pipeline().context(WebSocketServerProtocolHandshakeHandler.class);
writeUpgradeRequest(ch, full);
FullHttpResponse response = responses.remove();
assertEquals(SWITCHING_PROTOCOLS, response.status());
response.release();
assertNotNull(WebSocketServerProtocolHandler.getHandshaker(handshakerCtx.channel()));
assertFalse(ch.finish());
}
@Test
public void testWebSocketServerProtocolHandshakeHandlerReplacedBeforeHandshake() {
EmbeddedChannel ch = createChannel(new MockOutboundHandler());
ChannelHandlerContext handshakerCtx = ch.pipeline().context(WebSocketServerProtocolHandshakeHandler.class);
ch.pipeline().addLast(new ChannelInboundHandlerAdapter() {
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (evt instanceof WebSocketServerProtocolHandler.HandshakeComplete) {
// We should have removed the handler already.
assertNull(ctx.pipeline().context(WebSocketServerProtocolHandshakeHandler.class));
}
}
});
writeUpgradeRequest(ch);
FullHttpResponse response = responses.remove();
assertEquals(SWITCHING_PROTOCOLS, response.status());
response.release();
assertNotNull(WebSocketServerProtocolHandler.getHandshaker(handshakerCtx.channel()));
assertFalse(ch.finish());
}
@Test
public void testHttpUpgradeRequestInvalidUpgradeHeader() {
EmbeddedChannel ch = createChannel();
FullHttpRequest httpRequestWithEntity = new WebSocketRequestBuilder().httpVersion(HTTP_1_1)
.method(HttpMethod.GET)
.uri("/test")
.connection("Upgrade")
.version00()
.upgrade("BogusSocket")
.build();
ch.writeInbound(httpRequestWithEntity);
FullHttpResponse response = responses.remove();
assertEquals(BAD_REQUEST, response.status());
assertEquals("not a WebSocket handshake request: missing upgrade", getResponseMessage(response));
response.release();
assertFalse(ch.finish());
}
@Test
public void testHttpUpgradeRequestMissingWSKeyHeader() {
EmbeddedChannel ch = createChannel();
HttpRequest httpRequest = new WebSocketRequestBuilder().httpVersion(HTTP_1_1)
.method(HttpMethod.GET)
.uri("/test")
.key(null)
.connection("Upgrade")
.upgrade(HttpHeaderValues.WEBSOCKET)
.version13()
.build();
ch.writeInbound(httpRequest);
FullHttpResponse response = responses.remove();
assertEquals(BAD_REQUEST, response.status());
assertEquals("not a WebSocket request: missing key", getResponseMessage(response));
response.release();
assertFalse(ch.finish());
}
@Test
public void testCreateUTF8Validator() {
WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder()
.websocketPath("/test")
.withUTF8Validator(true)
.build();
EmbeddedChannel ch = new EmbeddedChannel(
new WebSocketServerProtocolHandler(config),
new HttpRequestDecoder(),
new HttpResponseEncoder(),
new MockOutboundHandler());
writeUpgradeRequest(ch);
FullHttpResponse response = responses.remove();
assertEquals(SWITCHING_PROTOCOLS, response.status());
response.release();
assertNotNull(ch.pipeline().get(Utf8FrameValidator.class));
}
@Test
public void testDoNotCreateUTF8Validator() {
WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder()
.websocketPath("/test")
.withUTF8Validator(false)
.build();
EmbeddedChannel ch = new EmbeddedChannel(
new WebSocketServerProtocolHandler(config),
new HttpRequestDecoder(),
new HttpResponseEncoder(),
new MockOutboundHandler());
writeUpgradeRequest(ch);
FullHttpResponse response = responses.remove();
assertEquals(SWITCHING_PROTOCOLS, response.status());
response.release();
assertNull(ch.pipeline().get(Utf8FrameValidator.class));
}
@Test
public void testHandleTextFrame() {
CustomTextFrameHandler customTextFrameHandler = new CustomTextFrameHandler();
EmbeddedChannel ch = createChannel(customTextFrameHandler);
writeUpgradeRequest(ch);
FullHttpResponse response = responses.remove();
assertEquals(SWITCHING_PROTOCOLS, response.status());
response.release();
if (ch.pipeline().context(HttpRequestDecoder.class) != null) {
// Removing the HttpRequestDecoder because we are writing a TextWebSocketFrame and thus
// decoding is not necessary.
ch.pipeline().remove(HttpRequestDecoder.class);
}
ch.writeInbound(new TextWebSocketFrame("payload"));
assertEquals("processed: payload", customTextFrameHandler.getContent());
assertFalse(ch.finish());
}
@Test
public void testCheckWebSocketPathStartWithSlash() {
WebSocketRequestBuilder builder = new WebSocketRequestBuilder().httpVersion(HTTP_1_1)
.method(HttpMethod.GET)
.key(HttpHeaderNames.SEC_WEBSOCKET_KEY)
.connection("Upgrade")
.upgrade(HttpHeaderValues.WEBSOCKET)
.version13();
WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder()
.websocketPath("/")
.checkStartsWith(true)
.build();
FullHttpResponse response;
createChannel(config, null).writeInbound(builder.uri("/test").build());
response = responses.remove();
assertEquals(SWITCHING_PROTOCOLS, response.status());
response.release();
createChannel(config, null).writeInbound(builder.uri("/?q=v").build());
response = responses.remove();
assertEquals(SWITCHING_PROTOCOLS, response.status());
response.release();
createChannel(config, null).writeInbound(builder.uri("/").build());
response = responses.remove();
assertEquals(SWITCHING_PROTOCOLS, response.status());
response.release();
}
@Test
public void testCheckValidWebSocketPath() {
HttpRequest httpRequest = new WebSocketRequestBuilder().httpVersion(HTTP_1_1)
.method(HttpMethod.GET)
.uri("/test")
.key(HttpHeaderNames.SEC_WEBSOCKET_KEY)
.connection("Upgrade")
.upgrade(HttpHeaderValues.WEBSOCKET)
.version13()
.build();
WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder()
.websocketPath("/test")
.checkStartsWith(true)
.build();
EmbeddedChannel ch = new EmbeddedChannel(
new WebSocketServerProtocolHandler(config),
new HttpRequestDecoder(),
new HttpResponseEncoder(),
new MockOutboundHandler());
ch.writeInbound(httpRequest);
FullHttpResponse response = responses.remove();
assertEquals(SWITCHING_PROTOCOLS, response.status());
response.release();
}
@Test
public void testCheckInvalidWebSocketPath() {
HttpRequest httpRequest = new WebSocketRequestBuilder().httpVersion(HTTP_1_1)
.method(HttpMethod.GET)
.uri("/testabc")
.key(HttpHeaderNames.SEC_WEBSOCKET_KEY)
.connection("Upgrade")
.upgrade(HttpHeaderValues.WEBSOCKET)
.version13()
.build();
WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder()
.websocketPath("/test")
.checkStartsWith(true)
.build();
EmbeddedChannel ch = new EmbeddedChannel(
new WebSocketServerProtocolHandler(config),
new HttpRequestDecoder(),
new HttpResponseEncoder(),
new MockOutboundHandler());
ch.writeInbound(httpRequest);
ChannelHandlerContext handshakerCtx = ch.pipeline().context(WebSocketServerProtocolHandshakeHandler.class);
assertNull(WebSocketServerProtocolHandler.getHandshaker(handshakerCtx.channel()));
}
@Test
public void testExplicitCloseFrameSentWhenServerChannelClosed() throws Exception {
WebSocketCloseStatus closeStatus = WebSocketCloseStatus.ENDPOINT_UNAVAILABLE;
EmbeddedChannel client = createClient();
EmbeddedChannel server = createServer();
assertFalse(server.writeInbound(client.<ByteBuf>readOutbound()));
assertFalse(client.writeInbound(server.<ByteBuf>readOutbound()));
// When server channel closed with explicit close-frame
assertTrue(server.writeOutbound(new CloseWebSocketFrame(closeStatus)));
server.close();
// Then client receives provided close-frame
assertTrue(client.writeInbound(server.<ByteBuf>readOutbound()));
assertFalse(server.isOpen());
CloseWebSocketFrame closeMessage = client.readInbound();
assertEquals(closeMessage.statusCode(), closeStatus.code());
closeMessage.release();
client.close();
assertTrue(ReferenceCountUtil.release(client.readOutbound()));
assertFalse(client.finishAndReleaseAll());
assertFalse(server.finishAndReleaseAll());
}
@Test
public void testCloseFrameSentWhenServerChannelClosedSilently() throws Exception {
EmbeddedChannel client = createClient();
EmbeddedChannel server = createServer();
assertFalse(server.writeInbound(client.<ByteBuf>readOutbound()));
assertFalse(client.writeInbound(server.<ByteBuf>readOutbound()));
// When server channel closed without explicit close-frame
server.close();
// Then client receives NORMAL_CLOSURE close-frame
assertTrue(client.writeInbound(server.<ByteBuf>readOutbound()));
assertFalse(server.isOpen());
CloseWebSocketFrame closeMessage = client.readInbound();
assertEquals(closeMessage.statusCode(), WebSocketCloseStatus.NORMAL_CLOSURE.code());
closeMessage.release();
client.close();
assertTrue(ReferenceCountUtil.release(client.readOutbound()));
assertFalse(client.finishAndReleaseAll());
assertFalse(server.finishAndReleaseAll());
}
@Test
public void testExplicitCloseFrameSentWhenClientChannelClosed() throws Exception {
WebSocketCloseStatus closeStatus = WebSocketCloseStatus.INVALID_PAYLOAD_DATA;
EmbeddedChannel client = createClient();
EmbeddedChannel server = createServer();
assertFalse(server.writeInbound(client.<ByteBuf>readOutbound()));
assertFalse(client.writeInbound(server.<ByteBuf>readOutbound()));
// When client channel closed with explicit close-frame
assertTrue(client.writeOutbound(new CloseWebSocketFrame(closeStatus)));
client.close();
// Then client receives provided close-frame
assertFalse(server.writeInbound(client.<ByteBuf>readOutbound()));
assertFalse(client.isOpen());
assertFalse(server.isOpen());
CloseWebSocketFrame closeMessage = decode(server.<ByteBuf>readOutbound(), CloseWebSocketFrame.class);
assertEquals(closeMessage.statusCode(), closeStatus.code());
closeMessage.release();
assertFalse(client.finishAndReleaseAll());
assertFalse(server.finishAndReleaseAll());
}
@Test
public void testCloseFrameSentWhenClientChannelClosedSilently() throws Exception {
EmbeddedChannel client = createClient();
EmbeddedChannel server = createServer();
assertFalse(server.writeInbound(client.<ByteBuf>readOutbound()));
assertFalse(client.writeInbound(server.<ByteBuf>readOutbound()));
// When client channel closed without explicit close-frame
client.close();
// Then server receives NORMAL_CLOSURE close-frame
assertFalse(server.writeInbound(client.<ByteBuf>readOutbound()));
assertFalse(client.isOpen());
assertFalse(server.isOpen());
CloseWebSocketFrame closeMessage = decode(server.<ByteBuf>readOutbound(), CloseWebSocketFrame.class);
assertEquals(closeMessage, new CloseWebSocketFrame(WebSocketCloseStatus.NORMAL_CLOSURE));
closeMessage.release();
assertFalse(client.finishAndReleaseAll());
assertFalse(server.finishAndReleaseAll());
}
private EmbeddedChannel createClient(ChannelHandler... handlers) throws Exception {
WebSocketClientProtocolConfig clientConfig = WebSocketClientProtocolConfig.newBuilder()
.webSocketUri("http://test/test")
.dropPongFrames(false)
.handleCloseFrames(false)
.build();
EmbeddedChannel ch = new EmbeddedChannel(false, false,
new HttpClientCodec(),
new HttpObjectAggregator(8192),
new WebSocketClientProtocolHandler(clientConfig)
);
ch.pipeline().addLast(handlers);
ch.register();
return ch;
}
private EmbeddedChannel createServer(ChannelHandler... handlers) throws Exception {
WebSocketServerProtocolConfig serverConfig = WebSocketServerProtocolConfig.newBuilder()
.websocketPath("/test")
.dropPongFrames(false)
.build();
EmbeddedChannel ch = new EmbeddedChannel(false, false,
new HttpServerCodec(),
new HttpObjectAggregator(8192),
new WebSocketServerProtocolHandler(serverConfig)
);
ch.pipeline().addLast(handlers);
ch.register();
return ch;
}
@SuppressWarnings("SameParameterValue")
private <T> T decode(ByteBuf input, Class<T> clazz) {
EmbeddedChannel ch = new EmbeddedChannel(new WebSocket13FrameDecoder(true, false, 65536, true));
assertTrue(ch.writeInbound(input));
Object decoded = ch.readInbound();
assertNotNull(decoded);
assertFalse(ch.finish());
return clazz.cast(decoded);
}
private EmbeddedChannel createChannel() {
return createChannel(null);
}
private EmbeddedChannel createChannel(ChannelHandler handler) {
WebSocketServerProtocolConfig serverConfig = WebSocketServerProtocolConfig.newBuilder()
.websocketPath("/test")
.sendCloseFrame(null)
.build();
return createChannel(serverConfig, handler);
}
private EmbeddedChannel createChannel(WebSocketServerProtocolConfig serverConfig, ChannelHandler handler) {
return new EmbeddedChannel(
new WebSocketServerProtocolHandler(serverConfig),
new HttpRequestDecoder(),
new HttpResponseEncoder(),
new MockOutboundHandler(),
handler);
}
private static void writeUpgradeRequest(EmbeddedChannel ch) {
writeUpgradeRequest(ch, true);
}
private static void writeUpgradeRequest(EmbeddedChannel ch, boolean full) {
HttpRequest request = WebSocketRequestBuilder.successful();
if (full) {
ch.writeInbound(request);
} else {
if (request instanceof FullHttpRequest) {
FullHttpRequest fullHttpRequest = (FullHttpRequest) request;
HttpRequest req = new DefaultHttpRequest(fullHttpRequest.protocolVersion(), fullHttpRequest.method(),
fullHttpRequest.uri(), fullHttpRequest.headers().copy());
ch.writeInbound(req);
ch.writeInbound(new DefaultHttpContent(fullHttpRequest.content().copy()));
ch.writeInbound(LastHttpContent.EMPTY_LAST_CONTENT);
fullHttpRequest.release();
} else {
ch.writeInbound(request);
}
}
}
private static String getResponseMessage(FullHttpResponse response) {
return response.content().toString(CharsetUtil.UTF_8);
}
private | WebSocketServerProtocolHandlerTest |
java | netty__netty | transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollReuseAddrTest.java | {
"start": 9533,
"end": 9976
} | class ____ extends ChannelInboundHandlerAdapter {
private final AtomicBoolean accepted;
ServerSocketTestHandler(AtomicBoolean accepted) {
this.accepted = accepted;
}
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
accepted.set(true);
ctx.close();
}
}
@ChannelHandler.Sharable
private static | ServerSocketTestHandler |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/mappedBy/IsNullAndMappedByTest.java | {
"start": 9108,
"end": 9637
} | class ____ {
@Id
private Integer id;
private String name;
@OneToOne( mappedBy = "person" )
private Account account;
public Person() {
}
public Person(Integer id, String name) {
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public String getName() {
return name;
}
public Account getAccount() {
return account;
}
}
@SuppressWarnings( { "FieldCanBeLocal", "unused" } )
@Entity( name = "Account" )
@Table( name = "ACCOUNT_TABLE" )
public static | Person |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/endpoint/web/annotation/WebEndpointDiscovererTests.java | {
"start": 19692,
"end": 19864
} | class ____ {
@ReadOperation
@Nullable Object getOne(@Selector String id) {
return null;
}
}
@Endpoint(id = "test")
static | AdditionalOperationWebEndpointExtension |
java | spring-projects__spring-security | webauthn/src/main/java/org/springframework/security/web/webauthn/api/COSEAlgorithmIdentifier.java | {
"start": 937,
"end": 2063
} | class ____ {
public static final COSEAlgorithmIdentifier EdDSA = new COSEAlgorithmIdentifier(-8);
public static final COSEAlgorithmIdentifier ES256 = new COSEAlgorithmIdentifier(-7);
public static final COSEAlgorithmIdentifier ES384 = new COSEAlgorithmIdentifier(-35);
public static final COSEAlgorithmIdentifier ES512 = new COSEAlgorithmIdentifier(-36);
public static final COSEAlgorithmIdentifier RS256 = new COSEAlgorithmIdentifier(-257);
public static final COSEAlgorithmIdentifier RS384 = new COSEAlgorithmIdentifier(-258);
public static final COSEAlgorithmIdentifier RS512 = new COSEAlgorithmIdentifier(-259);
public static final COSEAlgorithmIdentifier RS1 = new COSEAlgorithmIdentifier(-65535);
private final long value;
private COSEAlgorithmIdentifier(long value) {
this.value = value;
}
public long getValue() {
return this.value;
}
@Override
public String toString() {
return String.valueOf(this.value);
}
public static COSEAlgorithmIdentifier[] values() {
return new COSEAlgorithmIdentifier[] { EdDSA, ES256, ES384, ES512, RS256, RS384, RS512, RS1 };
}
}
| COSEAlgorithmIdentifier |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/bigquery/parser/BigQueryStatementParser.java | {
"start": 683,
"end": 9718
} | class ____ extends SQLStatementParser {
public BigQueryStatementParser(String sql) {
super(new BigQueryExprParser(sql));
}
public BigQueryStatementParser(String sql, SQLParserFeature... features) {
super(new BigQueryExprParser(sql, features));
}
public BigQueryStatementParser(Lexer lexer) {
super(new BigQueryExprParser(lexer));
}
public BigQuerySelectParser createSQLSelectParser() {
return new BigQuerySelectParser(this.exprParser, selectListCache);
}
public SQLCreateTableParser getSQLCreateTableParser() {
return new BigQueryCreateTableParser(this.exprParser);
}
@Override
public SQLCreateFunctionStatement parseCreateFunction() {
SQLCreateFunctionStatement createFunction = new SQLCreateFunctionStatement();
accept(Token.CREATE);
if (lexer.nextIfIdentifier("TEMP")
|| lexer.nextIfIdentifier(FnvHash.Constants.TEMPORARY)) {
createFunction.setTemporary(true);
}
accept(Token.FUNCTION);
createFunction.setName(
this.exprParser.name());
parameters(createFunction.getParameters(), createFunction);
if (lexer.nextIfIdentifier(FnvHash.Constants.RETURNS)) {
createFunction.setReturnDataType(
this.exprParser.parseDataType()
);
}
for (;;) {
if (lexer.nextIfIdentifier("LANGUAGE")) {
createFunction.setLanguage(
lexer.stringVal()
);
accept(Token.IDENTIFIER);
continue;
}
if (lexer.nextIfIdentifier(FnvHash.Constants.OPTIONS)) {
exprParser.parseAssignItem(createFunction.getOptions(), createFunction);
continue;
}
if (lexer.nextIf(Token.AS)) {
if (lexer.nextIf(Token.LPAREN)) {
createFunction.setBlock(
new SQLExprStatement(
this.exprParser.expr()));
accept(Token.RPAREN);
} else {
lexer.nextIfIdentifier("R");
String script = lexer.stringVal();
if (script.startsWith("\"") && script.endsWith("\"")) {
script = script.substring(1, script.length() - 1);
}
createFunction.setWrappedSource(
script
);
if (lexer.token() == Token.LITERAL_TEXT_BLOCK || lexer.token() == Token.LITERAL_CHARS) {
lexer.nextToken();
} else {
setErrorEndPos(lexer.pos());
printError(lexer.token());
}
}
continue;
}
break;
}
if (lexer.nextIf(Token.SEMI)) {
createFunction.setAfterSemi(true);
}
return createFunction;
}
public SQLStatement parseDeclare() {
accept(Token.DECLARE);
SQLDeclareStatement declareStatement = new SQLDeclareStatement();
for (; ; ) {
SQLDeclareItem item = new SQLDeclareItem();
item.setName(exprParser.name());
declareStatement.addItem(item);
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
} else if (lexer.token() != Token.EOF) {
item.setDataType(exprParser.parseDataType());
if (lexer.nextIf(Token.DEFAULT)) {
item.setValue(exprParser.expr());
}
break;
} else {
throw new ParserException("TODO. " + lexer.info());
}
}
return declareStatement;
}
public boolean parseStatementListDialect(List<SQLStatement> statementList) {
if (lexer.identifierEquals("ASSERT")) {
statementList.add(parseAssert());
return true;
}
if (lexer.token() == Token.BEGIN) {
statementList.add(parseBlock());
return true;
}
if (lexer.token() == Token.RAISE) {
statementList.add(parseRaise());
return true;
}
return false;
}
@Override
public SQLStatement parseExecute() {
acceptIdentifier(FnvHash.Constants.EXECUTE);
acceptIdentifier("IMMEDIATE");
BigQueryExecuteImmediateStatement stmt = new BigQueryExecuteImmediateStatement();
stmt.setDynamicSql(
this.exprParser.expr()
);
if (lexer.nextIf(Token.INTO)) {
this.exprParser.exprList(stmt.getInto(), stmt);
}
if (lexer.nextIf(Token.USING)) {
for (;;) {
SQLExpr expr = this.exprParser.expr();
String alias = null;
if (lexer.nextIf(Token.AS)) {
alias = lexer.stringVal();
lexer.nextToken();
}
stmt.addUsing(expr, alias);
if (lexer.nextIf(Token.COMMA)) {
continue;
}
break;
}
}
return stmt;
}
public SQLStatement parseRaise() {
accept(Token.RAISE);
SQLRaiseStatement sqlRaiseStatement = new SQLRaiseStatement();
if (lexer.nextIf(Token.USING)) {
acceptIdentifier("MESSAGE");
accept(Token.EQ);
sqlRaiseStatement.setMessage(exprParser.expr());
}
return sqlRaiseStatement;
}
protected SQLStatement parseAssert() {
acceptIdentifier("ASSERT");
BigQueryAssertStatement stmt = new BigQueryAssertStatement();
stmt.setExpr(
exprParser.expr()
);
if (lexer.nextIf(Token.AS)) {
stmt.setAs((SQLCharExpr) exprParser.primary());
}
return stmt;
}
public SQLDeleteStatement parseDeleteStatement() {
SQLDeleteStatement deleteStatement = new SQLDeleteStatement(getDbType());
accept(Token.DELETE);
lexer.nextIf(Token.FROM);
SQLTableSource tableSource = createSQLSelectParser().parseTableSource();
deleteStatement.setTableSource(tableSource);
if (lexer.nextIf(Token.WHERE)) {
SQLExpr where = this.exprParser.expr();
deleteStatement.setWhere(where);
}
return deleteStatement;
}
@Override
protected void mergeBeforeName() {
this.lexer.nextIf(Token.INTO);
}
public SQLStatement parseBlock() {
accept(Token.BEGIN);
if (lexer.identifierEquals("TRANSACTION") || lexer.identifierEquals("TRAN")) {
lexer.nextToken();
SQLStartTransactionStatement startTrans = new SQLStartTransactionStatement(dbType);
if (lexer.token() == Token.IDENTIFIER) {
SQLName name = this.exprParser.name();
startTrans.setName(name);
}
return startTrans;
}
SQLBlockStatement block = new SQLBlockStatement();
parseStatementList(block.getStatementList(), -1, block);
if (lexer.token() == Token.EXCEPTION) {
block.setException(parseException());
}
accept(Token.END);
return block;
}
protected void createViewAs(SQLCreateViewStatement createView) {
if (lexer.nextIfIdentifier(FnvHash.Constants.OPTIONS)) {
exprParser.parseAssignItem(createView.getOptions(), createView);
}
super.createViewAs(createView);
}
@Override
protected SQLStatement parseCreateModel() {
accept(Token.CREATE);
acceptIdentifier("MODEL");
BigQueryCreateModelStatement stmt = new BigQueryCreateModelStatement();
if (lexer.nextIf(Token.IF)) {
accept(Token.NOT);
accept(Token.EXISTS);
stmt.setIfNotExists(true);
} else if (lexer.nextIf(Token.OR)) {
accept(Token.REPLACE);
stmt.setReplace(true);
}
stmt.setName(
exprParser.name()
);
if (lexer.nextIfIdentifier("OPTIONS")) {
exprParser.parseAssignItem(stmt.getOptions(), stmt);
}
if (lexer.nextIf(Token.AS)) {
accept(Token.LPAREN);
acceptIdentifier("TRAINING_DATA");
accept(Token.AS);
accept(Token.LPAREN);
stmt.setTrainingData(
parseStatement0()
);
accept(Token.RPAREN);
accept(Token.COMMA);
acceptIdentifier("CUSTOM_HOLIDAY");
accept(Token.AS);
accept(Token.LPAREN);
stmt.setCustomHoliday(
parseStatement0()
);
accept(Token.RPAREN);
accept(Token.RPAREN);
}
return stmt;
}
}
| BigQueryStatementParser |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/output/ValueDoubleMapOutput.java | {
"start": 826,
"end": 2658
} | class ____<K, V> extends CommandOutput<K, V, Map<V, Double>> {
private static final InternalLogger LOG = InternalLoggerFactory.getInstance(ValueDoubleMapOutput.class);
private boolean outputError = false;
private boolean hasKey;
private V key;
/**
* Initialize a new {@link ValueDoubleMapOutput}.
*
* @param codec Codec used to decode keys and values, must not be {@code null}.
*/
public ValueDoubleMapOutput(RedisCodec<K, V> codec) {
super(codec, null);
}
@Override
public void set(ByteBuffer bytes) {
if (outputError) {
return;
}
if (!hasKey) {
key = (bytes == null) ? null : codec.decodeValue(bytes);
hasKey = true;
return;
}
// RESP2 does not have a double type, so we are assuming we are parsing it now
try {
Double value = Double.parseDouble(decodeString(bytes));
output.put(key, value);
key = null;
hasKey = false;
} catch (NumberFormatException e) {
LOG.warn("Unable to fallback to parsing double from string, discarding the result");
output = new HashMap<>(0);
outputError = true;
}
}
@Override
public void set(double number) {
if (outputError) {
return;
}
if (hasKey) {
output.put(key, number);
key = null;
hasKey = false;
return;
}
LOG.warn("Expected bytes but got double, discarding the result");
output = new HashMap<>(0);
outputError = true;
}
@Override
public void multi(int count) {
if (output == null) {
output = new LinkedHashMap<>(count / 2, 1);
}
}
}
| ValueDoubleMapOutput |
java | grpc__grpc-java | grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java | {
"start": 5451,
"end": 22717
} | enum ____ {
ROUND_ROBIN,
PICK_FIRST,
}
private final String serviceName;
private final long fallbackTimeoutMs;
private final Helper helper;
private final Context context;
private final SynchronizationContext syncContext;
@Nullable
private final SubchannelPool subchannelPool;
private final TimeProvider time;
private final Stopwatch stopwatch;
private final ScheduledExecutorService timerService;
private static final Attributes.Key<AtomicReference<ConnectivityStateInfo>> STATE_INFO =
Attributes.Key.create("io.grpc.grpclb.GrpclbLoadBalancer.stateInfo");
private final BackoffPolicy.Provider backoffPolicyProvider;
private final ChannelLogger logger;
// Scheduled only once. Never reset.
@Nullable
private ScheduledHandle fallbackTimer;
private List<EquivalentAddressGroup> fallbackBackendList = Collections.emptyList();
private boolean usingFallbackBackends;
// Reason to fallback, will be used as RPC's error message if fail to fallback (e.g., no
// fallback addresses found).
@Nullable
private Status fallbackReason;
// True if the current balancer has returned a serverlist. Will be reset to false when lost
// connection to a balancer.
private boolean balancerWorking;
@Nullable
private BackoffPolicy lbRpcRetryPolicy;
@Nullable
private ScheduledHandle lbRpcRetryTimer;
@Nullable
private ManagedChannel lbCommChannel;
@Nullable
private LbStream lbStream;
private Map<List<EquivalentAddressGroup>, Subchannel> subchannels = Collections.emptyMap();
private final GrpclbConfig config;
// Has the same size as the round-robin list from the balancer.
// A drop entry from the round-robin list becomes a DropEntry here.
// A backend entry from the robin-robin list becomes a null here.
private List<DropEntry> dropList = Collections.emptyList();
// Contains only non-drop, i.e., backends from the round-robin list from the balancer.
private List<BackendEntry> backendList = Collections.emptyList();
private RoundRobinPicker currentPicker =
new RoundRobinPicker(Collections.<DropEntry>emptyList(), Arrays.asList(BUFFER_ENTRY));
private boolean requestConnectionPending;
GrpclbState(
GrpclbConfig config,
Helper helper,
Context context,
SubchannelPool subchannelPool,
TimeProvider time,
Stopwatch stopwatch,
BackoffPolicy.Provider backoffPolicyProvider) {
this.config = checkNotNull(config, "config");
this.helper = checkNotNull(helper, "helper");
this.context = checkNotNull(context, "context");
this.syncContext = checkNotNull(helper.getSynchronizationContext(), "syncContext");
if (config.getMode() == Mode.ROUND_ROBIN) {
this.subchannelPool = checkNotNull(subchannelPool, "subchannelPool");
subchannelPool.registerListener(
new PooledSubchannelStateListener() {
@Override
public void onSubchannelState(
Subchannel subchannel, ConnectivityStateInfo newState) {
handleSubchannelState(subchannel, newState);
}
});
} else {
this.subchannelPool = null;
}
this.time = checkNotNull(time, "time provider");
this.stopwatch = checkNotNull(stopwatch, "stopwatch");
this.timerService = checkNotNull(helper.getScheduledExecutorService(), "timerService");
this.backoffPolicyProvider = checkNotNull(backoffPolicyProvider, "backoffPolicyProvider");
if (config.getServiceName() != null) {
this.serviceName = config.getServiceName();
} else {
this.serviceName = checkNotNull(helper.getAuthority(), "helper returns null authority");
}
this.fallbackTimeoutMs = config.getFallbackTimeoutMs();
this.logger = checkNotNull(helper.getChannelLogger(), "logger");
logger.log(ChannelLogLevel.INFO, "[grpclb-<{0}>] Created", serviceName);
}
void handleSubchannelState(Subchannel subchannel, ConnectivityStateInfo newState) {
if (newState.getState() == SHUTDOWN || !subchannels.containsValue(subchannel)) {
return;
}
if (config.getMode() == Mode.ROUND_ROBIN && newState.getState() == IDLE) {
subchannel.requestConnection();
}
if (newState.getState() == TRANSIENT_FAILURE || newState.getState() == IDLE) {
helper.refreshNameResolution();
}
AtomicReference<ConnectivityStateInfo> stateInfoRef =
subchannel.getAttributes().get(STATE_INFO);
// If all RR servers are unhealthy, it's possible that at least one connection is CONNECTING at
// every moment which causes RR to stay in CONNECTING. It's better to keep the TRANSIENT_FAILURE
// state in that case so that fail-fast RPCs can fail fast.
boolean keepState =
config.getMode() == Mode.ROUND_ROBIN
&& stateInfoRef.get().getState() == TRANSIENT_FAILURE
&& (newState.getState() == CONNECTING || newState.getState() == IDLE);
if (!keepState) {
stateInfoRef.set(newState);
maybeUseFallbackBackends();
maybeUpdatePicker();
}
}
/**
* Handle new addresses of the balancer and backends from the resolver, and create connection if
* not yet connected.
*/
void handleAddresses(
List<EquivalentAddressGroup> newLbAddressGroups,
List<EquivalentAddressGroup> newBackendServers) {
logger.log(
ChannelLogLevel.DEBUG,
"[grpclb-<{0}>] Resolved addresses: lb addresses {1}, backends: {2}",
serviceName,
newLbAddressGroups,
newBackendServers);
fallbackBackendList = newBackendServers;
if (newLbAddressGroups.isEmpty()) {
// No balancer address: close existing balancer connection and prepare to enter fallback
// mode. If there is no successful backend connection, it enters fallback mode immediately.
// Otherwise, fallback does not happen until backend connections are lost. This behavior
// might be different from other languages (e.g., existing balancer connection is not
// closed in C-core), but we aren't changing it at this time.
shutdownLbComm();
if (!usingFallbackBackends) {
fallbackReason = NO_LB_ADDRESS_PROVIDED_STATUS;
cancelFallbackTimer();
maybeUseFallbackBackends();
}
} else {
startLbComm(newLbAddressGroups);
// Avoid creating a new RPC just because the addresses were updated, as it can cause a
// stampeding herd. The current RPC may be on a connection to an address not present in
// newLbAddressGroups, but we're considering that "okay". If we detected the RPC is to an
// outdated backend, we could choose to re-create the RPC.
if (lbStream == null) {
cancelLbRpcRetryTimer();
startLbRpc();
}
// Start the fallback timer if it's never started and we are not already using fallback
// backends.
if (fallbackTimer == null && !usingFallbackBackends) {
fallbackTimer =
syncContext.schedule(
new FallbackModeTask(BALANCER_TIMEOUT_STATUS),
fallbackTimeoutMs,
TimeUnit.MILLISECONDS,
timerService);
}
}
if (usingFallbackBackends) {
// Populate the new fallback backends to round-robin list.
useFallbackBackends();
}
maybeUpdatePicker();
}
void requestConnection() {
requestConnectionPending = true;
for (RoundRobinEntry entry : currentPicker.pickList) {
if (entry instanceof IdleSubchannelEntry) {
((IdleSubchannelEntry) entry).subchannel.requestConnection();
requestConnectionPending = false;
}
}
}
private void maybeUseFallbackBackends() {
if (balancerWorking || usingFallbackBackends) {
return;
}
// Balancer RPC should have either been broken or timed out.
checkState(fallbackReason != null, "no reason to fallback");
for (Subchannel subchannel : subchannels.values()) {
ConnectivityStateInfo stateInfo = subchannel.getAttributes().get(STATE_INFO).get();
if (stateInfo.getState() == READY) {
return;
}
// If we do have balancer-provided backends, use one of its error in the error message if
// fail to fallback.
if (stateInfo.getState() == TRANSIENT_FAILURE) {
fallbackReason = stateInfo.getStatus();
}
}
// Fallback conditions met
useFallbackBackends();
}
/**
* Populate backend servers to be used from the fallback backends.
*/
private void useFallbackBackends() {
usingFallbackBackends = true;
logger.log(ChannelLogLevel.INFO, "[grpclb-<{0}>] Using fallback backends", serviceName);
List<DropEntry> newDropList = new ArrayList<>();
List<BackendAddressGroup> newBackendAddrList = new ArrayList<>();
for (EquivalentAddressGroup eag : fallbackBackendList) {
newDropList.add(null);
newBackendAddrList.add(new BackendAddressGroup(eag, null));
}
updateServerList(newDropList, newBackendAddrList, null);
}
private void shutdownLbComm() {
if (lbCommChannel != null) {
lbCommChannel.shutdown();
lbCommChannel = null;
}
shutdownLbRpc();
}
private void shutdownLbRpc() {
if (lbStream != null) {
lbStream.close(Status.CANCELLED.withDescription("balancer shutdown").asException());
// lbStream will be set to null in LbStream.cleanup()
}
}
private void startLbComm(List<EquivalentAddressGroup> overrideAuthorityEags) {
checkNotNull(overrideAuthorityEags, "overrideAuthorityEags");
assert !overrideAuthorityEags.isEmpty();
String doNotUseAuthority = overrideAuthorityEags.get(0).getAttributes()
.get(EquivalentAddressGroup.ATTR_AUTHORITY_OVERRIDE) + NO_USE_AUTHORITY_SUFFIX;
if (lbCommChannel == null) {
lbCommChannel = helper.createOobChannel(overrideAuthorityEags, doNotUseAuthority);
logger.log(
ChannelLogLevel.DEBUG,
"[grpclb-<{0}>] Created grpclb channel: EAG={1}",
serviceName,
overrideAuthorityEags);
} else {
helper.updateOobChannelAddresses(lbCommChannel, overrideAuthorityEags);
}
}
private void startLbRpc() {
checkState(lbStream == null, "previous lbStream has not been cleared yet");
LoadBalancerGrpc.LoadBalancerStub stub = LoadBalancerGrpc.newStub(lbCommChannel);
lbStream = new LbStream(stub);
Context prevContext = context.attach();
try {
lbStream.start();
} finally {
context.detach(prevContext);
}
stopwatch.reset().start();
LoadBalanceRequest initRequest = LoadBalanceRequest.newBuilder()
.setInitialRequest(InitialLoadBalanceRequest.newBuilder()
.setName(serviceName).build())
.build();
logger.log(
ChannelLogLevel.DEBUG,
"[grpclb-<{0}>] Sent initial grpclb request {1}", serviceName, initRequest);
try {
lbStream.lbRequestWriter.onNext(initRequest);
} catch (Exception e) {
lbStream.close(e);
}
}
private void cancelFallbackTimer() {
if (fallbackTimer != null) {
fallbackTimer.cancel();
}
}
private void cancelLbRpcRetryTimer() {
if (lbRpcRetryTimer != null) {
lbRpcRetryTimer.cancel();
lbRpcRetryTimer = null;
}
}
void shutdown() {
logger.log(ChannelLogLevel.INFO, "[grpclb-<{0}>] Shutdown", serviceName);
shutdownLbComm();
switch (config.getMode()) {
case ROUND_ROBIN:
// We close the subchannels through subchannelPool instead of helper just for convenience of
// testing.
for (Subchannel subchannel : subchannels.values()) {
returnSubchannelToPool(subchannel);
}
subchannelPool.clear();
break;
case PICK_FIRST:
if (!subchannels.isEmpty()) {
checkState(subchannels.size() == 1, "Excessive Subchannels: %s", subchannels);
subchannels.values().iterator().next().shutdown();
}
break;
default:
throw new AssertionError("Missing case for " + config.getMode());
}
subchannels = Collections.emptyMap();
cancelFallbackTimer();
cancelLbRpcRetryTimer();
}
void propagateError(Status status) {
logger.log(ChannelLogLevel.DEBUG, "[grpclb-<{0}>] Error: {1}", serviceName, status);
if (backendList.isEmpty()) {
Status error =
Status.UNAVAILABLE.withCause(status.getCause()).withDescription(status.getDescription());
maybeUpdatePicker(
TRANSIENT_FAILURE, new RoundRobinPicker(dropList, Arrays.asList(new ErrorEntry(error))));
}
}
private void returnSubchannelToPool(Subchannel subchannel) {
subchannelPool.returnSubchannel(subchannel, subchannel.getAttributes().get(STATE_INFO).get());
}
@VisibleForTesting
@Nullable
GrpclbClientLoadRecorder getLoadRecorder() {
if (lbStream == null) {
return null;
}
return lbStream.loadRecorder;
}
/**
* Populate backend servers to be used based on the given list of addresses.
*/
private void updateServerList(
List<DropEntry> newDropList, List<BackendAddressGroup> newBackendAddrList,
@Nullable GrpclbClientLoadRecorder loadRecorder) {
HashMap<List<EquivalentAddressGroup>, Subchannel> newSubchannelMap =
new HashMap<>();
List<BackendEntry> newBackendList = new ArrayList<>();
switch (config.getMode()) {
case ROUND_ROBIN:
for (BackendAddressGroup backendAddr : newBackendAddrList) {
EquivalentAddressGroup eag = backendAddr.getAddresses();
List<EquivalentAddressGroup> eagAsList = Collections.singletonList(eag);
Subchannel subchannel = newSubchannelMap.get(eagAsList);
if (subchannel == null) {
subchannel = subchannels.get(eagAsList);
if (subchannel == null) {
subchannel = subchannelPool.takeOrCreateSubchannel(eag, createSubchannelAttrs());
subchannel.requestConnection();
}
newSubchannelMap.put(eagAsList, subchannel);
}
BackendEntry entry;
// Only picks with tokens are reported to LoadRecorder
if (backendAddr.getToken() == null) {
entry = new BackendEntry(subchannel);
} else {
entry = new BackendEntry(subchannel, loadRecorder, backendAddr.getToken());
}
newBackendList.add(entry);
}
// Close Subchannels whose addresses have been delisted
for (Map.Entry<List<EquivalentAddressGroup>, Subchannel> entry : subchannels.entrySet()) {
List<EquivalentAddressGroup> eagList = entry.getKey();
if (!newSubchannelMap.containsKey(eagList)) {
returnSubchannelToPool(entry.getValue());
}
}
subchannels = Collections.unmodifiableMap(newSubchannelMap);
break;
case PICK_FIRST:
checkState(subchannels.size() <= 1, "Unexpected Subchannel count: %s", subchannels);
final Subchannel subchannel;
if (newBackendAddrList.isEmpty()) {
if (subchannels.size() == 1) {
subchannel = subchannels.values().iterator().next();
subchannel.shutdown();
subchannels = Collections.emptyMap();
}
break;
}
List<EquivalentAddressGroup> eagList = new ArrayList<>();
// Because for PICK_FIRST, we create a single Subchannel for all addresses, we have to
// attach the tokens to the EAG attributes and use TokenAttachingLoadRecorder to put them on
// headers.
//
// The PICK_FIRST code path doesn't cache Subchannels.
for (BackendAddressGroup bag : newBackendAddrList) {
EquivalentAddressGroup origEag = bag.getAddresses();
Attributes eagAttrs = origEag.getAttributes();
if (bag.getToken() != null) {
eagAttrs = eagAttrs.toBuilder()
.set(GrpclbConstants.TOKEN_ATTRIBUTE_KEY, bag.getToken()).build();
}
eagList.add(new EquivalentAddressGroup(origEag.getAddresses(), eagAttrs));
}
if (subchannels.isEmpty()) {
subchannel =
helper.createSubchannel(
CreateSubchannelArgs.newBuilder()
.setAddresses(eagList)
.setAttributes(createSubchannelAttrs())
.build());
subchannel.start(new SubchannelStateListener() {
@Override
public void onSubchannelState(ConnectivityStateInfo newState) {
handleSubchannelState(subchannel, newState);
}
});
if (requestConnectionPending) {
subchannel.requestConnection();
requestConnectionPending = false;
}
} else {
subchannel = subchannels.values().iterator().next();
subchannel.updateAddresses(eagList);
}
subchannels = Collections.singletonMap(eagList, subchannel);
newBackendList.add(
new BackendEntry(subchannel, new TokenAttachingTracerFactory(loadRecorder)));
break;
default:
throw new AssertionError("Missing case for " + config.getMode());
}
dropList = Collections.unmodifiableList(newDropList);
backendList = Collections.unmodifiableList(newBackendList);
}
@VisibleForTesting
| Mode |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/BaseRouterClientRMTest.java | {
"start": 9472,
"end": 27302
} | class ____
extends RouterClientRMService {
public MockRouterClientRMService() {
super();
}
}
protected GetNewApplicationResponse getNewApplication(String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetNewApplicationResponse>() {
@Override
public GetNewApplicationResponse run() throws Exception {
GetNewApplicationRequest req =
GetNewApplicationRequest.newInstance();
GetNewApplicationResponse response =
getRouterClientRMService().getNewApplication(req);
return response;
}
});
}
protected SubmitApplicationResponse submitApplication(
final ApplicationId appId, String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<SubmitApplicationResponse>() {
@Override
public SubmitApplicationResponse run() throws Exception {
ContainerLaunchContext amContainerSpec = mock(
ContainerLaunchContext.class);
ApplicationSubmissionContext context = ApplicationSubmissionContext
.newInstance(appId, MockApps.newAppName(), "q1",
Priority.newInstance(0), amContainerSpec, false, false, -1,
Resources.createResource(
YarnConfiguration.
DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB),
"MockApp");
SubmitApplicationRequest req = SubmitApplicationRequest
.newInstance(context);
SubmitApplicationResponse response = getRouterClientRMService()
.submitApplication(req);
return response;
}
});
}
protected KillApplicationResponse forceKillApplication(
final ApplicationId appId, String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<KillApplicationResponse>() {
@Override
public KillApplicationResponse run() throws Exception {
KillApplicationRequest req =
KillApplicationRequest.newInstance(appId);
KillApplicationResponse response =
getRouterClientRMService().forceKillApplication(req);
return response;
}
});
}
protected GetClusterMetricsResponse getClusterMetrics(String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetClusterMetricsResponse>() {
@Override
public GetClusterMetricsResponse run() throws Exception {
GetClusterMetricsRequest req =
GetClusterMetricsRequest.newInstance();
GetClusterMetricsResponse response =
getRouterClientRMService().getClusterMetrics(req);
return response;
}
});
}
protected GetClusterNodesResponse getClusterNodes(String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetClusterNodesResponse>() {
@Override
public GetClusterNodesResponse run() throws Exception {
GetClusterNodesRequest req = GetClusterNodesRequest.newInstance();
GetClusterNodesResponse response =
getRouterClientRMService().getClusterNodes(req);
return response;
}
});
}
protected GetQueueInfoResponse getQueueInfo(String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetQueueInfoResponse>() {
@Override
public GetQueueInfoResponse run() throws Exception {
GetQueueInfoRequest req =
GetQueueInfoRequest.newInstance("default", false, false, false);
GetQueueInfoResponse response =
getRouterClientRMService().getQueueInfo(req);
return response;
}
});
}
protected GetQueueUserAclsInfoResponse getQueueUserAcls(String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetQueueUserAclsInfoResponse>() {
@Override
public GetQueueUserAclsInfoResponse run() throws Exception {
GetQueueUserAclsInfoRequest req =
GetQueueUserAclsInfoRequest.newInstance();
GetQueueUserAclsInfoResponse response =
getRouterClientRMService().getQueueUserAcls(req);
return response;
}
});
}
protected MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues(
String user, final ApplicationId appId)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user).doAs(
new PrivilegedExceptionAction<MoveApplicationAcrossQueuesResponse>() {
@Override
public MoveApplicationAcrossQueuesResponse run() throws Exception {
MoveApplicationAcrossQueuesRequest req =
MoveApplicationAcrossQueuesRequest.newInstance(appId,
"newQueue");
MoveApplicationAcrossQueuesResponse response =
getRouterClientRMService().moveApplicationAcrossQueues(req);
return response;
}
});
}
public GetNewReservationResponse getNewReservation(String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetNewReservationResponse>() {
@Override
public GetNewReservationResponse run() throws Exception {
GetNewReservationResponse response = getRouterClientRMService()
.getNewReservation(GetNewReservationRequest.newInstance());
return response;
}
});
}
protected ReservationSubmissionResponse submitReservation(String user,
final ReservationId reservationId)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<ReservationSubmissionResponse>() {
@Override
public ReservationSubmissionResponse run() throws Exception {
Clock clock = new UTCClock();
long arrival = clock.getTime();
long duration = 60000;
long deadline = (long) (arrival + 1.05 * duration);
ReservationSubmissionRequest req = ReservationSystemTestUtil
.createSimpleReservationRequest(reservationId, 1, arrival,
deadline, duration);
ReservationSubmissionResponse response =
getRouterClientRMService().submitReservation(req);
return response;
}
});
}
protected ReservationUpdateResponse updateReservation(String user,
final ReservationId reservationId)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<ReservationUpdateResponse>() {
@Override
public ReservationUpdateResponse run() throws Exception {
Clock clock = new UTCClock();
long arrival = clock.getTime();
long duration = 60000;
long deadline = (long) (arrival + 1.05 * duration);
ReservationDefinition rDef =
createSimpleReservationRequest(1, arrival, deadline, duration,
reservationId).getReservationDefinition();
ReservationUpdateRequest req =
ReservationUpdateRequest.newInstance(rDef, reservationId);
ReservationUpdateResponse response =
getRouterClientRMService().updateReservation(req);
return response;
}
});
}
protected ReservationDeleteResponse deleteReservation(String user,
final ReservationId reservationId)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<ReservationDeleteResponse>() {
@Override
public ReservationDeleteResponse run() throws Exception {
ReservationDeleteRequest req =
ReservationDeleteRequest.newInstance(reservationId);
ReservationDeleteResponse response =
getRouterClientRMService().deleteReservation(req);
return response;
}
});
}
protected GetNodesToLabelsResponse getNodeToLabels(String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetNodesToLabelsResponse>() {
@Override
public GetNodesToLabelsResponse run() throws Exception {
GetNodesToLabelsRequest req = GetNodesToLabelsRequest.newInstance();
GetNodesToLabelsResponse response =
getRouterClientRMService().getNodeToLabels(req);
return response;
}
});
}
protected GetLabelsToNodesResponse getLabelsToNodes(String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetLabelsToNodesResponse>() {
@Override
public GetLabelsToNodesResponse run() throws Exception {
GetLabelsToNodesRequest req = GetLabelsToNodesRequest.newInstance();
GetLabelsToNodesResponse response =
getRouterClientRMService().getLabelsToNodes(req);
return response;
}
});
}
protected GetClusterNodeLabelsResponse getClusterNodeLabels(String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetClusterNodeLabelsResponse>() {
@Override
public GetClusterNodeLabelsResponse run() throws Exception {
GetClusterNodeLabelsRequest req =
GetClusterNodeLabelsRequest.newInstance();
GetClusterNodeLabelsResponse response =
getRouterClientRMService().getClusterNodeLabels(req);
return response;
}
});
}
protected GetApplicationReportResponse getApplicationReport(String user,
final ApplicationId appId)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetApplicationReportResponse>() {
@Override
public GetApplicationReportResponse run() throws Exception {
GetApplicationReportRequest req =
GetApplicationReportRequest.newInstance(appId);
GetApplicationReportResponse response =
getRouterClientRMService().getApplicationReport(req);
return response;
}
});
}
protected GetApplicationsResponse getApplications(String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetApplicationsResponse>() {
@Override
public GetApplicationsResponse run() throws Exception {
GetApplicationsRequest req = GetApplicationsRequest.newInstance();
GetApplicationsResponse response =
getRouterClientRMService().getApplications(req);
return response;
}
});
}
protected GetApplicationAttemptReportResponse getApplicationAttemptReport(
String user, final ApplicationAttemptId appAttemptId)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user).doAs(
new PrivilegedExceptionAction<GetApplicationAttemptReportResponse>() {
@Override
public GetApplicationAttemptReportResponse run() throws Exception {
GetApplicationAttemptReportRequest req =
GetApplicationAttemptReportRequest.newInstance(appAttemptId);
GetApplicationAttemptReportResponse response =
getRouterClientRMService().getApplicationAttemptReport(req);
return response;
}
});
}
protected GetApplicationAttemptsResponse getApplicationAttempts(String user,
final ApplicationId applicationId)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetApplicationAttemptsResponse>() {
@Override
public GetApplicationAttemptsResponse run() throws Exception {
GetApplicationAttemptsRequest req =
GetApplicationAttemptsRequest.newInstance(applicationId);
GetApplicationAttemptsResponse response =
getRouterClientRMService().getApplicationAttempts(req);
return response;
}
});
}
protected GetContainerReportResponse getContainerReport(String user,
final ContainerId containerId)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetContainerReportResponse>() {
@Override
public GetContainerReportResponse run() throws Exception {
GetContainerReportRequest req =
GetContainerReportRequest.newInstance(containerId);
GetContainerReportResponse response =
getRouterClientRMService().getContainerReport(req);
return response;
}
});
}
protected GetContainersResponse getContainers(String user,
final ApplicationAttemptId appAttemptId)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetContainersResponse>() {
@Override
public GetContainersResponse run() throws Exception {
GetContainersRequest req =
GetContainersRequest.newInstance(appAttemptId);
GetContainersResponse response =
getRouterClientRMService().getContainers(req);
return response;
}
});
}
protected GetDelegationTokenResponse getDelegationToken(final String user)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<GetDelegationTokenResponse>() {
@Override
public GetDelegationTokenResponse run() throws Exception {
GetDelegationTokenRequest req =
GetDelegationTokenRequest.newInstance(user);
GetDelegationTokenResponse response =
getRouterClientRMService().getDelegationToken(req);
return response;
}
});
}
protected RenewDelegationTokenResponse renewDelegationToken(String user,
final Token token)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<RenewDelegationTokenResponse>() {
@Override
public RenewDelegationTokenResponse run() throws Exception {
RenewDelegationTokenRequest req =
RenewDelegationTokenRequest.newInstance(token);
RenewDelegationTokenResponse response =
getRouterClientRMService().renewDelegationToken(req);
return response;
}
});
}
protected CancelDelegationTokenResponse cancelDelegationToken(String user,
final Token token)
throws YarnException, IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<CancelDelegationTokenResponse>() {
@Override
public CancelDelegationTokenResponse run() throws Exception {
CancelDelegationTokenRequest req =
CancelDelegationTokenRequest.newInstance(token);
CancelDelegationTokenResponse response =
getRouterClientRMService().cancelDelegationToken(req);
return response;
}
});
}
private ReservationSubmissionRequest createSimpleReservationRequest(
int numContainers, long arrival, long deadline, long duration,
ReservationId reservationId) {
// create a request with a single atomic ask
ReservationRequest r = ReservationRequest
.newInstance(Resource.newInstance(1024, 1), numContainers, 1, duration);
ReservationRequests reqs = ReservationRequests.newInstance(
Collections.singletonList(r), ReservationRequestInterpreter.R_ALL);
ReservationDefinition rDef = ReservationDefinition.newInstance(arrival,
deadline, reqs, "testRouterClientRMService#reservation");
ReservationSubmissionRequest request = ReservationSubmissionRequest
.newInstance(rDef, "dedicated", reservationId);
return request;
}
}
| MockRouterClientRMService |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/io/stream/VersionedNamedWriteable.java | {
"start": 699,
"end": 2912
} | interface ____ extends NamedWriteable {
/**
* Returns the name of the writeable object
*/
String getWriteableName();
/**
* The minimal version of the recipient this object can be sent to.
* See {@link #supportsVersion(TransportVersion)} for the default serialization check.
*/
TransportVersion getMinimalSupportedVersion();
/**
* Determines whether this instance should be serialized based on the provided transport version.
*
* The default implementation returns {@code true} if the given transport version is
* equal to or newer than {@link #getMinimalSupportedVersion()}.
* Subclasses may override this method to define custom serialization logic.
*
* @param version the transport version of the receiving node
* @return {@code true} if the instance should be serialized, {@code false} otherwise
*/
default boolean supportsVersion(TransportVersion version) {
return version.onOrAfter(getMinimalSupportedVersion());
}
/**
* Writes all those values in the given map to {@code out} that pass the version check in
* {@link VersionedNamedWriteable#supportsVersion} as a list.
*
* @param out stream to write to
* @param customs map of customs
* @param <T> type of customs in map
*/
static <T extends VersionedNamedWriteable> void writeVersionedWritables(StreamOutput out, Map<String, T> customs) throws IOException {
writeVersionedWriteables(out, customs.values());
}
static void writeVersionedWriteables(StreamOutput out, Iterable<? extends VersionedNamedWriteable> writeables) throws IOException {
// filter out objects not supported by the stream version
int numberOfCompatibleValues = 0;
for (var value : writeables) {
if (value.supportsVersion(out.getTransportVersion())) {
numberOfCompatibleValues++;
}
}
out.writeVInt(numberOfCompatibleValues);
for (var value : writeables) {
if (value.supportsVersion(out.getTransportVersion())) {
out.writeNamedWriteable(value);
}
}
}
}
| VersionedNamedWriteable |
java | spring-projects__spring-boot | module/spring-boot-restclient/src/test/java/org/springframework/boot/restclient/autoconfigure/RestTemplateAutoConfigurationTests.java | {
"start": 11967,
"end": 12192
} | class ____ {
@Bean
RestTemplateBuilderConfigurer restTemplateBuilderConfigurer() {
return new RestTemplateBuilderConfigurer();
}
}
@Configuration(proxyBeanMethods = false)
static | RestTemplateBuilderConfigurerConfig |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsageTests.java | {
"start": 876,
"end": 2010
} | class ____ extends AbstractWireSerializingTestCase<SpatialFeatureSetUsage> {
@Override
protected SpatialFeatureSetUsage createTestInstance() {
SpatialStatsAction.Response statsResponse = randomStatsResponse();
return new SpatialFeatureSetUsage(statsResponse);
}
@Override
protected SpatialFeatureSetUsage mutateInstance(SpatialFeatureSetUsage instance) {
return null; // no mutations
}
@Override
protected Writeable.Reader<SpatialFeatureSetUsage> instanceReader() {
return SpatialFeatureSetUsage::new;
}
private SpatialStatsAction.Response randomStatsResponse() {
DiscoveryNode node = DiscoveryNodeUtils.create("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9300));
EnumCounters<SpatialStatsAction.Item> counters = new EnumCounters<>(SpatialStatsAction.Item.class);
SpatialStatsAction.NodeResponse nodeResponse = new SpatialStatsAction.NodeResponse(node, counters);
return new SpatialStatsAction.Response(new ClusterName("cluster_name"), List.of(nodeResponse), emptyList());
}
}
| SpatialFeatureSetUsageTests |
java | dropwizard__dropwizard | dropwizard-testing/src/test/java/io/dropwizard/testing/app/DropwizardTestApplication.java | {
"start": 656,
"end": 1072
} | class ____ extends Application<TestConfiguration> {
@Override
public void run(TestConfiguration configuration, Environment environment) throws Exception {
environment.jersey().register(new TestResource(configuration.getMessage()));
environment.admin().addTask(new HelloTask());
environment.admin().addTask(new EchoTask());
}
@Path("/")
public static | DropwizardTestApplication |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/InfiniteRecursionTest.java | {
"start": 11014,
"end": 11439
} | class ____ {
Test next;
boolean nextIsCool;
boolean isCool(boolean thisIsCool) {
return thisIsCool || next.isCool(thisIsCool);
}
}
""")
.doTest();
}
@Test
public void positiveBinaryRightHandSideNotConditional() {
compilationHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/TreatJoinTest.java | {
"start": 6812,
"end": 7269
} | class ____ {
@Id
@GeneratedValue
private Long id;
private String description;
@ManyToOne(cascade = CascadeType.PERSIST, fetch = FetchType.LAZY)
private Price price;
public Item() {
}
public Item(Price price) {
this.price = price;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
}
@Entity(name = "Price")
public static | Item |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/inject/NamedBeanNotFoundTest.java | {
"start": 333,
"end": 726
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addAsResource(new StringAsset("{inject:bing.ping}"), "templates/bing.html"))
.setExpectedException(TemplateException.class);
@Test
public void testValidation() {
fail();
}
}
| NamedBeanNotFoundTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java | {
"start": 2227,
"end": 10401
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(Merger.class);
// Local directories
private static LocalDirAllocator lDirAlloc =
new LocalDirAllocator(MRConfig.LOCAL_DIR);
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec,
Path[] inputs, boolean deleteInputs,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return
new MergeQueue<K, V>(conf, fs, inputs, deleteInputs, codec, comparator,
reporter, null,
TaskType.REDUCE).merge(keyClass, valueClass,
mergeFactor, tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec,
Path[] inputs, boolean deleteInputs,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator,
Progressable reporter,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Counters.Counter mergedMapOutputsCounter,
Progress mergePhase)
throws IOException {
return
new MergeQueue<K, V>(conf, fs, inputs, deleteInputs, codec, comparator,
reporter, mergedMapOutputsCounter,
TaskType.REDUCE).merge(
keyClass, valueClass,
mergeFactor, tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
List<Segment<K, V>> segments,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return merge(conf, fs, keyClass, valueClass, segments, mergeFactor, tmpDir,
comparator, reporter, false, readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
List<Segment<K, V>> segments,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
sortSegments,
TaskType.REDUCE).merge(keyClass, valueClass,
mergeFactor, tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec,
List<Segment<K, V>> segments,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase,
TaskType taskType)
throws IOException {
return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
sortSegments, codec,
taskType).merge(keyClass, valueClass,
mergeFactor, tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
List<Segment<K, V>> segments,
int mergeFactor, int inMemSegments, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
sortSegments,
TaskType.REDUCE).merge(keyClass, valueClass,
mergeFactor, inMemSegments,
tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec,
List<Segment<K, V>> segments,
int mergeFactor, int inMemSegments, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
sortSegments, codec,
TaskType.REDUCE).merge(keyClass, valueClass,
mergeFactor, inMemSegments,
tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
void writeFile(RawKeyValueIterator records, Writer<K, V> writer,
Progressable progressable, Configuration conf)
throws IOException {
long progressBar = conf.getLong(JobContext.RECORDS_BEFORE_PROGRESS,
10000);
long recordCtr = 0;
while(records.next()) {
writer.append(records.getKey(), records.getValue());
if (((recordCtr++) % progressBar) == 0) {
progressable.progress();
}
}
}
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static | Merger |
java | elastic__elasticsearch | test/fixtures/aws-ec2-fixture/src/test/java/fixture/aws/ec2/AwsEc2HttpHandlerTests.java | {
"start": 1347,
"end": 4003
} | class ____ extends ESTestCase {
public void testDescribeInstances() throws IOException, XMLStreamException {
final List<String> addresses = randomList(
1,
10,
() -> "10.0." + between(1, 254) + "." + between(1, 254) + ":" + between(1025, 65535)
);
final var handler = new AwsEc2HttpHandler((ignored1, ignored2) -> true, () -> addresses);
final var response = handleRequest(handler);
assertEquals(RestStatus.OK, response.status());
final var unseenAddressesInTags = Stream.of("privateDnsName", "dnsName", "privateIpAddress", "ipAddress")
.collect(
Collectors.toMap(
localName -> new QName("http://ec2.amazonaws.com/doc/2013-02-01/", localName),
localName -> new HashSet<>(addresses)
)
);
final var xmlStreamReader = XMLInputFactory.newDefaultFactory().createXMLStreamReader(response.body().streamInput());
try {
for (; xmlStreamReader.getEventType() != XMLStreamConstants.END_DOCUMENT; xmlStreamReader.next()) {
if (xmlStreamReader.getEventType() == XMLStreamConstants.START_ELEMENT) {
final var unseenAddresses = unseenAddressesInTags.get(xmlStreamReader.getName());
if (unseenAddresses != null) {
xmlStreamReader.next();
assertEquals(XMLStreamConstants.CHARACTERS, xmlStreamReader.getEventType());
final var currentAddress = xmlStreamReader.getText();
assertTrue(currentAddress, unseenAddresses.remove(currentAddress));
}
}
}
} finally {
xmlStreamReader.close();
}
assertTrue(unseenAddressesInTags.toString(), unseenAddressesInTags.values().stream().allMatch(HashSet::isEmpty));
}
private record TestHttpResponse(RestStatus status, BytesReference body) {}
private static TestHttpResponse handleRequest(AwsEc2HttpHandler handler) {
final var httpExchange = new TestHttpExchange(
"POST",
"/",
new BytesArray("Action=DescribeInstances"),
TestHttpExchange.EMPTY_HEADERS
);
try {
handler.handle(httpExchange);
} catch (IOException e) {
fail(e);
}
assertNotEquals(0, httpExchange.getResponseCode());
return new TestHttpResponse(RestStatus.fromCode(httpExchange.getResponseCode()), httpExchange.getResponseBodyContents());
}
private static | AwsEc2HttpHandlerTests |
java | quarkusio__quarkus | extensions/reactive-mssql-client/deployment/src/test/java/io/quarkus/reactive/mssql/client/ConfigActiveFalseNamedDatasourceStaticInjectionTest.java | {
"start": 2030,
"end": 2124
} | class ____ {
@Inject
@ReactiveDataSource("ds-1")
Pool pool;
}
}
| MyBean |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/issues/Issue5267.java | {
"start": 671,
"end": 1844
} | class ____ {
@Test
public void test_create_type() throws Exception {
for (DbType dbType : new DbType[]{DbType.oracle}) {
for (String sql : new String[]{
"CREATE OR REPLACE TYPE TYPE4 as ENUM( 'Lane', 'Junction', 'Area', 'TrafficLight' ) ;",
}) {
SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, dbType);
SQLStatement statement = parser.parseStatement();
System.out.println(dbType + "原始的sql===" + sql);
System.out.println(dbType + "原始sql归一化===" + Issue5421.normalizeSql(sql));
String newSql = statement.toString();
System.out.println(dbType + "初次解析生成的sql归一化===" + Issue5421.normalizeSql(newSql));
parser = SQLParserUtils.createSQLStatementParser(newSql, dbType);
statement = parser.parseStatement();
System.out.println(dbType + "重新解析sql归一化===" + Issue5421.normalizeSql(statement.toString()));
assertTrue(Issue5421.normalizeSql(sql).equalsIgnoreCase(Issue5421.normalizeSql(statement.toString())));
}
}
}
}
| Issue5267 |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/seq/ReadRecoveryTest.java | {
"start": 431,
"end": 3641
} | class ____ {
public int a, b;
@Override public String toString() { return "{Bean, a="+a+", b="+b+"}"; }
}
/*
/**********************************************************************
/* Unit tests; root-level value sequences via Mapper
/**********************************************************************
*/
private final ObjectMapper MAPPER = newJsonMapper();
@Test
public void testRootBeans() throws Exception
{
final String JSON = a2q("{'a':3} {'x':5}");
MappingIterator<Bean> it = MAPPER.readerFor(Bean.class)
.with(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)
.readValues(JSON);
// First one should be fine
assertTrue(it.hasNextValue());
Bean bean = it.nextValue();
assertEquals(3, bean.a);
// but second one not
try {
bean = it.nextValue();
fail("Should not have succeeded");
} catch (UnrecognizedPropertyException e) {
verifyException(e, "Unrecognized property \"x\"");
}
// 21-May-2015, tatu: With [databind#734], recovery, we now know there's no more data!
assertFalse(it.hasNextValue());
it.close();
}
// for [databind#734]
// Simple test for verifying that basic recover works for a case of
// unknown structured value
@Test
public void testSimpleRootRecovery() throws Exception
{
final String JSON = a2q("{'a':3}{'a':27,'foo':[1,2],'b':{'x':3}} {'a':1,'b':2} ");
MappingIterator<Bean> it = MAPPER.readerFor(Bean.class)
.with(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)
.readValues(JSON);
Bean bean = it.nextValue();
assertNotNull(bean);
assertEquals(3, bean.a);
// second one problematic
try {
it.nextValue();
} catch (UnrecognizedPropertyException e) {
verifyException(e, "Unrecognized property \"foo\"");
}
// but should recover nicely
bean = it.nextValue();
assertNotNull(bean);
assertEquals(1, bean.a);
assertEquals(2, bean.b);
assertFalse(it.hasNextValue());
it.close();
}
// Similar to "raw" root-level Object sequence, but in array
@Test
public void testSimpleArrayRecovery() throws Exception
{
final String JSON = a2q("[{'a':3},{'a':27,'foo':[1,2],'b':{'x':3}} ,{'a':1,'b':2} ]");
MappingIterator<Bean> it = MAPPER.readerFor(Bean.class)
.with(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)
.readValues(JSON);
Bean bean = it.nextValue();
assertNotNull(bean);
assertEquals(3, bean.a);
// second one problematic
try {
it.nextValue();
} catch (UnrecognizedPropertyException e) {
verifyException(e, "Unrecognized property \"foo\"");
}
// but should recover nicely
bean = it.nextValue();
assertNotNull(bean);
assertEquals(1, bean.a);
assertEquals(2, bean.b);
assertFalse(it.hasNextValue());
it.close();
}
}
| Bean |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/matchers/CapturingArgumentsTest.java | {
"start": 773,
"end": 991
} | class ____ {
private final Integer age;
public Person(Integer age) {
this.age = age;
}
public int getAge() {
return age;
}
}
private static | Person |
java | google__guava | android/guava/src/com/google/common/base/CharMatcher.java | {
"start": 41731,
"end": 42782
} | class ____ extends CharMatcher {
private final String description;
private final char[] rangeStarts;
private final char[] rangeEnds;
RangesMatcher(String description, char[] rangeStarts, char[] rangeEnds) {
this.description = description;
this.rangeStarts = rangeStarts;
this.rangeEnds = rangeEnds;
checkArgument(rangeStarts.length == rangeEnds.length);
for (int i = 0; i < rangeStarts.length; i++) {
checkArgument(rangeStarts[i] <= rangeEnds[i]);
if (i + 1 < rangeStarts.length) {
checkArgument(rangeEnds[i] < rangeStarts[i + 1]);
}
}
}
@Override
public boolean matches(char c) {
int index = Arrays.binarySearch(rangeStarts, c);
if (index >= 0) {
return true;
} else {
index = ~index - 1;
return index >= 0 && c <= rangeEnds[index];
}
}
@Override
public String toString() {
return description;
}
}
/** Implementation of {@link #digit()}. */
private static final | RangesMatcher |
java | apache__camel | components/camel-github/src/test/java/org/apache/camel/component/github/producer/PullRequestStateProducerTest.java | {
"start": 1478,
"end": 3060
} | class ____ extends GitHubComponentTestBase {
private String commitsha;
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:validPullRequest")
.process(new MockPullRequestStateProducerProcessor())
.to("github://pullRequestState?state=success&repoOwner=anotherguy&repoName=somerepo");
} // end of configure
};
}
@Test
public void testPullRequestStateProducer() {
commitsha = commitService.getNextSha();
Endpoint stateProducerEndpoint = getMandatoryEndpoint("direct:validPullRequest");
Exchange exchange = stateProducerEndpoint.createExchange();
String text = "Message sent at " + new Date();
exchange.getIn().setBody(text);
Exchange response = template.send(stateProducerEndpoint, exchange);
assertNotNull(response.getMessage().getBody());
if (!(response.getMessage().getBody() instanceof CommitStatus)) {
fail("Expecting CommitStatus");
}
CommitStatus status = response.getMessage().getBody(CommitStatus.class);
// Check status set on commit service
if (commitService.getCommitStatus(commitsha) != status) {
fail("Commit status sent to service is different from response");
}
assertEquals("success", status.getState());
assertEquals(status.getDescription(), text);
}
public | PullRequestStateProducerTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.