language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | grpc__grpc-java | authz/src/main/java/io/grpc/authz/AuthorizationServerInterceptor.java | {
"start": 1632,
"end": 3031
} | class ____ implements ServerInterceptor {
private final List<ServerInterceptor> interceptors = new ArrayList<>();
private AuthorizationServerInterceptor(String authorizationPolicy)
throws IOException {
List<RBAC> rbacs = AuthorizationPolicyTranslator.translate(authorizationPolicy);
if (rbacs == null || rbacs.isEmpty() || rbacs.size() > 2) {
throw new IllegalArgumentException("Failed to translate authorization policy");
}
for (RBAC rbac: rbacs) {
interceptors.add(
InternalRbacFilter.createInterceptor(
io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC.newBuilder()
.setRules(rbac).build()));
}
}
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(
ServerCall<ReqT, RespT> call, Metadata headers,
ServerCallHandler<ReqT, RespT> next) {
for (ServerInterceptor interceptor: interceptors) {
next = InternalServerInterceptors.interceptCallHandlerCreate(interceptor, next);
}
return next.startCall(call, headers);
}
// Static method that creates an AuthorizationServerInterceptor.
public static AuthorizationServerInterceptor create(String authorizationPolicy)
throws IOException {
checkNotNull(authorizationPolicy, "authorizationPolicy");
return new AuthorizationServerInterceptor(authorizationPolicy);
}
}
| AuthorizationServerInterceptor |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/authentication/rememberme/TokenBasedRememberMeServices.java | {
"start": 13865,
"end": 14156
} | enum ____ {
MD5("MD5"), SHA256("SHA-256");
private final String digestAlgorithm;
RememberMeTokenAlgorithm(String digestAlgorithm) {
this.digestAlgorithm = digestAlgorithm;
}
public String getDigestAlgorithm() {
return this.digestAlgorithm;
}
}
}
| RememberMeTokenAlgorithm |
java | quarkusio__quarkus | test-framework/junit5-component/src/test/java/io/quarkus/test/component/declarative/InterceptorMethodsTest.java | {
"start": 2368,
"end": 3396
} | interface ____ {
}
@Priority(20)
@SimpleBinding
@AroundInvoke
Object aroundInvoke1(InvocationContext context) throws Exception {
EVENTS.add("ai1");
return Boolean.parseBoolean(context.proceed().toString()) ? charlie.ping() : "false";
}
// default priority is 1
@SimpleBinding
@AroundInvoke
Object aroundInvoke2(InvocationContext context) throws Exception {
EVENTS.add("ai2");
return context.proceed().toString().toUpperCase();
}
@SimpleBinding
@PostConstruct
void postConstruct(ArcInvocationContext context) throws Exception {
EVENTS.add("pc");
context.proceed();
}
@SimpleBinding
@PreDestroy
void preDestroy(ArcInvocationContext context) throws Exception {
EVENTS.add("pd");
context.proceed();
}
@SimpleBinding
@AroundConstruct
void aroundConstruct(ArcInvocationContext context) throws Exception {
EVENTS.add("ac");
context.proceed();
}
}
| SimpleBinding |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1600/Issue1603_getter.java | {
"start": 1000,
"end": 1202
} | class ____ {
private final Collection<String> values = Collections.emptyList();
public Collection<String> getValues() {
return values;
}
}
public static | Model_2 |
java | spring-projects__spring-boot | module/spring-boot-jackson/src/main/java/org/springframework/boot/jackson/JacksonComponent.java | {
"start": 1997,
"end": 2980
} | interface ____ {
/**
* The value may indicate a suggestion for a logical component name, to be turned into
* a Spring bean in case of an auto-detected component.
* @return the component name
*/
@AliasFor(annotation = Component.class)
String value() default "";
/**
* The types that are handled by the provided serializer/deserializer. This attribute
* is mandatory for a {@link KeyDeserializer}, as the type cannot be inferred. For a
* {@link ValueSerializer} or {@link ValueDeserializer} it can be used to limit
* handling to a subclasses of type inferred from the generic.
* @return the types that should be handled by the component
*/
Class<?>[] type() default {};
/**
* The scope under which the serializer/deserializer should be registered with the
* module.
* @return the component's handle type
*/
Scope scope() default Scope.VALUES;
/**
* The various scopes under which a serializer/deserializer can be registered.
*/
| JacksonComponent |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/samples/client/standalone/resultmatches/StatusAssertionTests.java | {
"start": 3628,
"end": 4348
} | class ____ {
@RequestMapping("/teaPot")
void teaPot() {
}
@RequestMapping("/created")
@ResponseStatus(CREATED)
void created(){
}
@Get(path = "/createdWithComposedAnnotation", status = CREATED)
void createdWithComposedAnnotation() {
}
@RequestMapping("/badRequest")
@ResponseStatus(code = BAD_REQUEST, reason = "Expired token")
void badRequest(){
}
@RequestMapping("/notImplemented")
@ResponseStatus(NOT_IMPLEMENTED)
void notImplemented(){
}
@RequestMapping("/throwsException")
@ResponseStatus(NOT_IMPLEMENTED)
void throwsException() {
throw new IllegalStateException();
}
@ExceptionHandler
void exceptionHandler(IllegalStateException ex) {
}
}
}
| StatusController |
java | apache__camel | core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/CamelInternalProcessor.java | {
"start": 36212,
"end": 39686
} | class ____ implements CamelInternalProcessorAdvice<UnitOfWork> {
private final Route route;
private String routeId;
private final UnitOfWorkFactory uowFactory;
public UnitOfWorkProcessorAdvice(Route route, CamelContext camelContext) {
this.route = route;
if (route != null) {
this.routeId = route.getRouteId();
}
this.uowFactory = PluginHelper.getUnitOfWorkFactory(camelContext);
// optimize uow factory to initialize it early and once per advice
this.uowFactory.afterPropertiesConfigured(camelContext);
}
@Override
public UnitOfWork before(Exchange exchange) throws Exception {
// if the exchange doesn't have from route id set, then set it if it originated
// from this unit of work
if (route != null && exchange.getFromRouteId() == null) {
if (routeId == null) {
this.routeId = route.getRouteId();
}
exchange.getExchangeExtension().setFromRouteId(routeId);
}
// only return UnitOfWork if we created a new as then its us that handle the lifecycle to done the created UoW
UnitOfWork uow = exchange.getUnitOfWork();
UnitOfWork created = null;
if (uow == null) {
// If there is no existing UoW, then we should start one and
// terminate it once processing is completed for the exchange.
created = createUnitOfWork(exchange);
exchange.getExchangeExtension().setUnitOfWork(created);
uow = created;
} else {
// reuse existing exchange
if (uow.onPrepare(exchange)) {
// need to re-attach uow
exchange.getExchangeExtension().setUnitOfWork(uow);
// we are prepared for reuse and can regard it as-if we created the unit of work
// so the after method knows that this is the outer bounds and should done the unit of work
created = uow;
}
}
// for any exchange we should push/pop route context so we can keep track of which route we are routing
if (route != null) {
uow.pushRoute(route);
}
return created;
}
@Override
public void after(Exchange exchange, UnitOfWork uow) throws Exception {
UnitOfWork existing = exchange.getUnitOfWork();
// execute done on uow if we created it, and the consumer is not doing it
if (uow != null) {
UnitOfWorkHelper.doneUow(uow, exchange);
}
// after UoW is done lets pop the route context which must be done on every existing UoW
if (route != null && existing != null) {
existing.popRoute();
}
}
protected UnitOfWork createUnitOfWork(Exchange exchange) {
if (uowFactory != null) {
return uowFactory.createUnitOfWork(exchange);
} else {
return PluginHelper.getUnitOfWorkFactory(exchange.getContext()).createUnitOfWork(exchange);
}
}
}
/**
* Advice when Message History has been enabled.
*/
@SuppressWarnings("unchecked")
public static | UnitOfWorkProcessorAdvice |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/operations/QueryOperation.java | {
"start": 1497,
"end": 2874
} | interface ____ extends Operation {
/** Resolved schema of this operation. */
ResolvedSchema getResolvedSchema();
/**
* Returns a SQL string that fully serializes this instance. The serialized string can be used
* for storing the query in e.g. a {@link org.apache.flink.table.catalog.Catalog} as a view.
*
* @return detailed string for persisting in a catalog
* @see Operation#asSummaryString()
*/
default String asSerializableString() {
return asSerializableString(DefaultSqlFactory.INSTANCE);
}
/**
* Returns a SQL string that fully serializes this instance. The serialized string can be used
* for storing the query in e.g. a {@link org.apache.flink.table.catalog.Catalog} as a view.
*
* @param sqlFactory can be used to customize the serialization to a SQL string
* @return Flink SQL string for persisting in a catalog
* @see Operation#asSummaryString()
* @see EnvironmentSettings.Builder#withSqlFactory(SqlFactory)
*/
default String asSerializableString(SqlFactory sqlFactory) {
throw new UnsupportedOperationException(
"QueryOperations are not string serializable for now.");
}
List<QueryOperation> getChildren();
default <T> T accept(QueryOperationVisitor<T> visitor) {
return visitor.visit(this);
}
}
| QueryOperation |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/Hello.java | {
"start": 153,
"end": 227
} | class ____ {
public String ping() {
return "pong";
}
}
| Hello |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java | {
"start": 1425,
"end": 3912
} | class ____<O> extends AbstractService {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractLivelinessMonitor.class);
//thread which runs periodically to see the last time since a heartbeat is
//received.
private Thread checkerThread;
private volatile boolean stopped;
public static final int DEFAULT_EXPIRE = 5*60*1000;//5 mins
private long expireInterval = DEFAULT_EXPIRE;
private long monitorInterval = expireInterval / 3;
private volatile boolean resetTimerOnStart = true;
private final Clock clock;
private Map<O, Long> running = new HashMap<O, Long>();
public AbstractLivelinessMonitor(String name, Clock clock) {
super(name);
this.clock = clock;
}
public AbstractLivelinessMonitor(String name) {
this(name, new MonotonicClock());
}
@Override
protected void serviceStart() throws Exception {
assert !stopped : "starting when already stopped";
resetTimer();
checkerThread = new SubjectInheritingThread(new PingChecker());
checkerThread.setName("Ping Checker for "+getName());
checkerThread.start();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
stopped = true;
if (checkerThread != null) {
checkerThread.interrupt();
}
super.serviceStop();
}
protected abstract void expire(O ob);
protected void setExpireInterval(long expireInterval) {
this.expireInterval = expireInterval;
}
protected long getExpireInterval(O o) {
// by-default return for all the registered object interval.
return this.expireInterval;
}
protected void setMonitorInterval(long monitorInterval) {
this.monitorInterval = monitorInterval;
}
public synchronized void receivedPing(O ob) {
//only put for the registered objects
if (running.containsKey(ob)) {
running.put(ob, clock.getTime());
}
}
public synchronized void register(O ob) {
register(ob, clock.getTime());
}
public synchronized void register(O ob, long expireTime) {
running.put(ob, expireTime);
}
public synchronized void unregister(O ob) {
running.remove(ob);
}
public synchronized void resetTimer() {
if (resetTimerOnStart) {
long time = clock.getTime();
for (O ob : running.keySet()) {
running.put(ob, time);
}
}
}
protected void setResetTimeOnStart(boolean resetTimeOnStart) {
this.resetTimerOnStart = resetTimeOnStart;
}
private | AbstractLivelinessMonitor |
java | elastic__elasticsearch | x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/otlp/datapoint/DataPointGroupingContext.java | {
"start": 7005,
"end": 8414
} | class ____ {
private final Resource resource;
private final ByteString resourceSchemaUrl;
private final TsidBuilder resourceTsidBuilder;
private final Map<Hash128, ScopeGroup> scopes;
ResourceGroup(Resource resource, ByteString resourceSchemaUrl, TsidBuilder resourceTsidBuilder) {
this.resource = resource;
this.resourceSchemaUrl = resourceSchemaUrl;
this.resourceTsidBuilder = resourceTsidBuilder;
this.scopes = new HashMap<>();
}
public ScopeGroup getOrCreateScope(ScopeMetrics scopeMetrics) {
TsidBuilder scopeTsidBuilder = ScopeTsidFunnel.forScope(byteStringAccessor, scopeMetrics);
Hash128 scopeHash = scopeTsidBuilder.hash();
scopeTsidBuilder.addAll(resourceTsidBuilder);
ScopeGroup scopeGroup = scopes.get(scopeHash);
if (scopeGroup == null) {
scopeGroup = new ScopeGroup(this, scopeMetrics.getScope(), scopeMetrics.getSchemaUrlBytes(), scopeTsidBuilder);
scopes.put(scopeHash, scopeGroup);
}
return scopeGroup;
}
public <E extends Exception> void forEach(CheckedConsumer<DataPointGroup, E> consumer) throws E {
for (ScopeGroup scopeGroup : scopes.values()) {
scopeGroup.forEach(consumer);
}
}
}
| ResourceGroup |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/PythonCalcJsonPlanTest.java | {
"start": 1446,
"end": 3313
} | class ____ extends TableTestBase {
private StreamTableTestUtil util;
private TableEnvironment tEnv;
@BeforeEach
void setup() {
util = streamTestUtil(TableConfig.getDefault());
tEnv = util.getTableEnv();
String srcTableDdl =
"CREATE TABLE MyTable (\n"
+ " a bigint,\n"
+ " b int not null,\n"
+ " c varchar,\n"
+ " d timestamp(3)\n"
+ ") with (\n"
+ " 'connector' = 'values',\n"
+ " 'bounded' = 'false')";
tEnv.executeSql(srcTableDdl);
}
@Test
void testPythonCalc() {
tEnv.createTemporaryFunction("pyFunc", new PythonScalarFunction("pyFunc"));
String sinkTableDdl =
"CREATE TABLE MySink (\n"
+ " a bigint,\n"
+ " b int\n"
+ ") with (\n"
+ " 'connector' = 'values',\n"
+ " 'table-sink-class' = 'DEFAULT')";
tEnv.executeSql(sinkTableDdl);
util.verifyJsonPlan("insert into MySink select a, pyFunc(b, b) from MyTable");
}
@Test
void testPythonFunctionInWhereClause() {
tEnv.createTemporaryFunction("pyFunc", new BooleanPythonScalarFunction("pyFunc"));
String sinkTableDdl =
"CREATE TABLE MySink (\n"
+ " a bigint,\n"
+ " b int\n"
+ ") with (\n"
+ " 'connector' = 'values',\n"
+ " 'table-sink-class' = 'DEFAULT')";
tEnv.executeSql(sinkTableDdl);
util.verifyJsonPlan("insert into MySink select a, b from MyTable where pyFunc(b, b + 1)");
}
}
| PythonCalcJsonPlanTest |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/builder/ReflectionToStringBuilder.java | {
"start": 25885,
"end": 30011
} | class ____ stop appending fields for.
*/
public Class<?> getUpToClass() {
return this.upToClass;
}
/**
* Calls {@code java.lang.reflect.Field.get(Object)}.
*
* @param field
* The Field to query.
* @return The Object from the given Field.
* @throws IllegalArgumentException
* see {@link java.lang.reflect.Field#get(Object)}
* @throws IllegalAccessException
* see {@link java.lang.reflect.Field#get(Object)}
*
* @see java.lang.reflect.Field#get(Object)
*/
protected Object getValue(final Field field) throws IllegalAccessException {
return field.get(getObject());
}
/**
* Gets whether or not to append static fields.
*
* @return Whether or not to append static fields.
* @since 2.1
*/
public boolean isAppendStatics() {
return this.appendStatics;
}
/**
* Gets whether or not to append transient fields.
*
* @return Whether or not to append transient fields.
*/
public boolean isAppendTransients() {
return this.appendTransients;
}
/**
* Gets whether or not to append fields whose values are null.
*
* @return Whether or not to append fields whose values are null.
* @since 3.6
*/
public boolean isExcludeNullValues() {
return this.excludeNullValues;
}
/**
* Appends to the {@code toString} an {@link Object} array.
*
* @param array
* the array to add to the {@code toString}
* @return {@code this} instance.
*/
public ReflectionToStringBuilder reflectionAppendArray(final Object array) {
getStyle().reflectionAppendArrayDetail(getStringBuffer(), null, array);
return this;
}
/**
* Sets whether or not to append static fields.
*
* @param appendStatics
* Whether or not to append static fields.
* @since 2.1
*/
public void setAppendStatics(final boolean appendStatics) {
this.appendStatics = appendStatics;
}
/**
* Sets whether or not to append transient fields.
*
* @param appendTransients
* Whether or not to append transient fields.
*/
public void setAppendTransients(final boolean appendTransients) {
this.appendTransients = appendTransients;
}
/**
* Sets the field names to exclude.
*
* @param excludeFieldNamesParam
* The excludeFieldNames to excluding from toString or {@code null}.
* @return {@code this}
*/
public ReflectionToStringBuilder setExcludeFieldNames(final String... excludeFieldNamesParam) {
if (excludeFieldNamesParam == null) {
this.excludeFieldNames = null;
} else {
// clone and remove nulls
this.excludeFieldNames = ArraySorter.sort(toNoNullStringArray(excludeFieldNamesParam));
}
return this;
}
/**
* Sets whether or not to append fields whose values are null.
*
* @param excludeNullValues
* Whether or not to append fields whose values are null.
* @since 3.6
*/
public void setExcludeNullValues(final boolean excludeNullValues) {
this.excludeNullValues = excludeNullValues;
}
/**
* Sets the field names to include. {@code null} or empty means all fields are included. All fields are included by default. This method will override the default behavior.
*
* @param includeFieldNamesParam
* The includeFieldNames that must be on toString or {@code null}.
* @return {@code this}
* @since 3.13.0
*/
public ReflectionToStringBuilder setIncludeFieldNames(final String... includeFieldNamesParam) {
if (includeFieldNamesParam == null) {
this.includeFieldNames = null;
} else {
// clone and remove nulls
this.includeFieldNames = ArraySorter.sort(toNoNullStringArray(includeFieldNamesParam));
}
return this;
}
/**
* Sets the last super | to |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/IdempotentConsumerTest.java | {
"start": 1334,
"end": 10221
} | class ____ extends ContextTestSupport {
protected Endpoint startEndpoint;
protected MockEndpoint resultEndpoint;
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testDuplicateMessagesAreFilteredOut() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.idempotentConsumer(header("messageId"), MemoryIdempotentRepository.memoryIdempotentRepository(10))
.to("mock:result");
}
});
context.start();
resultEndpoint.expectedBodiesReceived("one", "two", "three");
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("3", "three");
assertMockEndpointsSatisfied();
}
@Test
public void testNotSkiDuplicate() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
IdempotentRepository repo = MemoryIdempotentRepository.memoryIdempotentRepository(200);
from("direct:start").idempotentConsumer(header("messageId")).idempotentRepository(repo).skipDuplicate(false)
.to("mock:result");
}
});
context.start();
resultEndpoint.expectedBodiesReceived("one", "two", "one", "two", "one", "three");
resultEndpoint.message(0).exchangeProperty(Exchange.DUPLICATE_MESSAGE).isNull();
resultEndpoint.message(1).exchangeProperty(Exchange.DUPLICATE_MESSAGE).isNull();
resultEndpoint.message(2).exchangeProperty(Exchange.DUPLICATE_MESSAGE).isEqualTo(Boolean.TRUE);
resultEndpoint.message(3).exchangeProperty(Exchange.DUPLICATE_MESSAGE).isEqualTo(Boolean.TRUE);
resultEndpoint.message(4).exchangeProperty(Exchange.DUPLICATE_MESSAGE).isEqualTo(Boolean.TRUE);
resultEndpoint.message(5).exchangeProperty(Exchange.DUPLICATE_MESSAGE).isNull();
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("3", "three");
assertMockEndpointsSatisfied();
}
@Test
public void testNotSkiDuplicateWithFilter() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
IdempotentRepository repo = MemoryIdempotentRepository.memoryIdempotentRepository(200);
// START SNIPPET: e1
from("direct:start")
// instruct idempotent consumer to not skip duplicates as we
// will filter then our self
.idempotentConsumer(header("messageId")).idempotentRepository(repo).skipDuplicate(false)
.filter(exchangeProperty(Exchange.DUPLICATE_MESSAGE).isEqualTo(true))
// filter out duplicate messages by sending them to
// someplace else and then stop
.to("mock:duplicate").stop().end()
// and here we process only new messages (no duplicates)
.to("mock:result");
// END SNIPPET: e1
}
});
context.start();
resultEndpoint.expectedBodiesReceived("one", "two", "three");
getMockEndpoint("mock:duplicate").expectedBodiesReceived("one", "two", "one");
getMockEndpoint("mock:duplicate").allMessages().exchangeProperty(Exchange.DUPLICATE_MESSAGE).isEqualTo(Boolean.TRUE);
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("3", "three");
assertMockEndpointsSatisfied();
}
@Test
public void testFailedExchangesNotAddedDeadLetterChannel() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
errorHandler(deadLetterChannel("mock:error").maximumRedeliveries(2).redeliveryDelay(0).logStackTrace(false));
from("direct:start")
.idempotentConsumer(header("messageId"), MemoryIdempotentRepository.memoryIdempotentRepository(200))
.process(new Processor() {
public void process(Exchange exchange) {
String id = exchange.getIn().getHeader("messageId", String.class);
if (id.equals("2")) {
throw new IllegalArgumentException("Damm I cannot handle id 2");
}
}
}).to("mock:result");
}
});
context.start();
// we send in 2 messages with id 2 that fails
getMockEndpoint("mock:error").expectedMessageCount(2);
resultEndpoint.expectedBodiesReceived("one", "three");
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("3", "three");
assertMockEndpointsSatisfied();
}
@Test
public void testFailedExchangesNotAddedDeadLetterChannelNotHandled() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
errorHandler(deadLetterChannel("mock:error").maximumRedeliveries(2).redeliveryDelay(0).logStackTrace(false));
from("direct:start")
.idempotentConsumer(header("messageId"), MemoryIdempotentRepository.memoryIdempotentRepository(200))
.process(new Processor() {
public void process(Exchange exchange) {
String id = exchange.getIn().getHeader("messageId", String.class);
if (id.equals("2")) {
throw new IllegalArgumentException("Damm I cannot handle id 2");
}
}
}).to("mock:result");
}
});
context.start();
// we send in 2 messages with id 2 that fails
getMockEndpoint("mock:error").expectedMessageCount(2);
resultEndpoint.expectedBodiesReceived("one", "three");
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("3", "three");
assertMockEndpointsSatisfied();
}
@Test
public void testFailedExchangesNotAdded() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
// use default error handler
errorHandler(defaultErrorHandler());
from("direct:start")
.idempotentConsumer(header("messageId"), MemoryIdempotentRepository.memoryIdempotentRepository(200))
.process(new Processor() {
public void process(Exchange exchange) {
String id = exchange.getIn().getHeader("messageId", String.class);
if (id.equals("2")) {
throw new IllegalArgumentException("Damm I cannot handle id 2");
}
}
}).to("mock:result");
}
});
context.start();
resultEndpoint.expectedBodiesReceived("one", "three");
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("2", "two");
sendMessage("1", "one");
sendMessage("3", "three");
assertMockEndpointsSatisfied();
}
protected void sendMessage(final Object messageId, final Object body) {
template.send(startEndpoint, new Processor() {
public void process(Exchange exchange) {
// now lets fire in a message
Message in = exchange.getIn();
in.setBody(body);
in.setHeader("messageId", messageId);
}
});
}
@Override
@BeforeEach
public void setUp() throws Exception {
super.setUp();
startEndpoint = resolveMandatoryEndpoint("direct:start");
resultEndpoint = getMockEndpoint("mock:result");
}
}
| IdempotentConsumerTest |
java | apache__rocketmq | store/src/main/java/org/apache/rocketmq/store/lock/BackOffReentrantLock.java | {
"start": 900,
"end": 1221
} | class ____ implements AdaptiveBackOffSpinLock {
private ReentrantLock putMessageNormalLock = new ReentrantLock(); // NonfairSync
@Override
public void lock() {
putMessageNormalLock.lock();
}
@Override
public void unlock() {
putMessageNormalLock.unlock();
}
}
| BackOffReentrantLock |
java | hibernate__hibernate-orm | hibernate-spatial/src/test/java/org/hibernate/spatial/testing/dialects/db2/DB2TestSupport.java | {
"start": 352,
"end": 769
} | class ____ extends TestSupport {
public TestData createTestData(TestDataPurpose purpose) {
switch ( purpose ) {
case SpatialFunctionsData:
return TestData.fromFile( "db2/test-db2nozm-only-polygon.xml" );
default:
return TestData.fromFile( "db2/test-db2nozm-data-set.xml" );
}
}
public DB2ExpectationsFactory createExpectationsFactory() {
return new DB2ExpectationsFactory();
}
}
| DB2TestSupport |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/tracing/BraveTracing.java | {
"start": 14372,
"end": 14700
} | class ____ implements Endpoint {
final zipkin2.Endpoint endpoint;
public BraveEndpoint(zipkin2.Endpoint endpoint) {
this.endpoint = endpoint;
}
}
/**
* {@link TraceContext} implementation for Brave's {@link brave.propagation.TraceContext}.
*/
public static | BraveEndpoint |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/cache/spi/access/CollectionDataAccess.java | {
"start": 808,
"end": 1860
} | interface ____ extends CachedDomainDataAccess {
/**
* To create instances of CollectionCacheKey for this region, Hibernate will invoke this method
* exclusively so that generated implementations can generate optimised keys.
* @param id the primary identifier of the Collection
* @param collectionDescriptor the descriptor of the collection for which a key is being generated
* @param factory a reference to the current SessionFactory
* @param tenantIdentifier the tenant id, or null if multi-tenancy is not being used.
*
* @return a key which can be used to identify this collection on this same region
*/
Object generateCacheKey(
Object id,
CollectionPersister collectionDescriptor,
SessionFactoryImplementor factory,
String tenantIdentifier);
/**
* Performs reverse operation to {@link #generateCacheKey}
*
* @param cacheKey key previously returned from {@link #generateCacheKey}
*
* @return original key passed to {@link #generateCacheKey}
*/
Object getCacheKeyId(Object cacheKey);
}
| CollectionDataAccess |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/mock/MockPreparedStatement.java | {
"start": 808,
"end": 2369
} | class ____ extends PreparedStatementBase implements MockStatementBase, PreparedStatement {
private final String sql;
public MockPreparedStatement(MockConnection conn, String sql) {
super(conn);
this.sql = sql;
}
public String getSql() {
return sql;
}
public MockConnection getConnection() throws SQLException {
return (MockConnection) super.getConnection();
}
@Override
public ResultSet executeQuery() throws SQLException {
checkOpen();
MockConnection conn = getConnection();
if (conn != null && conn.getDriver() != null) {
return conn.getDriver().executeQuery(this, sql);
}
if (conn != null) {
conn.handleSleep();
return conn.getDriver().createMockResultSet(this);
}
return new MockResultSet(this);
}
@Override
public int executeUpdate() throws SQLException {
checkOpen();
if (getConnection() != null) {
getConnection().handleSleep();
}
return 0;
}
@Override
public boolean execute() throws SQLException {
checkOpen();
if (getConnection() != null) {
getConnection().handleSleep();
}
return false;
}
@Override
public ResultSet getResultSet() throws SQLException {
checkOpen();
if (resultSet == null) {
resultSet = this.getConnection().getDriver().createResultSet(this);
}
return resultSet;
}
}
| MockPreparedStatement |
java | hibernate__hibernate-orm | hibernate-vector/src/test/java/org/hibernate/vector/SparseByteVectorTest.java | {
"start": 1717,
"end": 10001
} | class ____ {
private static final byte[] V1 = new byte[]{ 0, 2, 3 };
private static final byte[] V2 = new byte[]{ 0, 5, 6 };
@BeforeEach
public void prepareData(SessionFactoryScope scope) {
scope.inTransaction( em -> {
em.persist( new VectorEntity( 1L, new SparseByteVector( V1 ) ) );
em.persist( new VectorEntity( 2L, new SparseByteVector( V2 ) ) );
} );
}
@AfterEach
public void cleanup(SessionFactoryScope scope) {
scope.inTransaction( em -> {
em.createMutationQuery( "delete from VectorEntity" ).executeUpdate();
} );
}
@Test
public void testRead(SessionFactoryScope scope) {
scope.inTransaction( em -> {
VectorEntity tableRecord;
tableRecord = em.find( VectorEntity.class, 1L );
assertArrayEquals( new byte[]{ 0, 2, 3 }, tableRecord.getTheVector().toDenseVector() );
tableRecord = em.find( VectorEntity.class, 2L );
assertArrayEquals( new byte[]{ 0, 5, 6 }, tableRecord.getTheVector().toDenseVector() );
} );
}
@Test
public void testCast(SessionFactoryScope scope) {
scope.inTransaction( em -> {
final String literal = VectorTestHelper.vectorSparseStringLiteral( new byte[] {1, 1, 1}, em );
final Tuple vector = em.createSelectionQuery( "select cast(e.theVector as string), cast('" + literal + "' as sparse_byte_vector(3)) from VectorEntity e where e.id = 1", Tuple.class )
.getSingleResult();
assertEquals( VectorTestHelper.vectorSparseStringLiteral( V1, em ), vector.get( 0, String.class ) );
assertEquals( new SparseByteVector( new byte[]{ 1, 1, 1 } ), vector.get( 1, SparseByteVector.class ) );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsCosineDistance.class)
public void testCosineDistance(SessionFactoryScope scope) {
scope.inTransaction( em -> {
final byte[] vector = new byte[]{ 1, 1, 1 };
final List<Tuple> results = em.createSelectionQuery( "select e.id, cosine_distance(e.theVector, :vec) from VectorEntity e order by e.id", Tuple.class )
.setParameter( "vec", new SparseByteVector( vector ) )
.getResultList();
assertEquals( 2, results.size() );
assertEquals( 1L, results.get( 0 ).get( 0 ) );
assertEquals( cosineDistance( V1, vector ), results.get( 0 ).get( 1, double.class ), 0.0000001D );
assertEquals( 2L, results.get( 1 ).get( 0 ) );
assertEquals( cosineDistance( V2, vector ), results.get( 1 ).get( 1, double.class ), 0.0000001D );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsEuclideanDistance.class)
public void testEuclideanDistance(SessionFactoryScope scope) {
scope.inTransaction( em -> {
final byte[] vector = new byte[]{ 1, 1, 1 };
final List<Tuple> results = em.createSelectionQuery( "select e.id, euclidean_distance(e.theVector, :vec) from VectorEntity e order by e.id", Tuple.class )
.setParameter( "vec", new SparseByteVector( vector ) )
.getResultList();
assertEquals( 2, results.size() );
assertEquals( 1L, results.get( 0 ).get( 0 ) );
assertEquals( euclideanDistance( V1, vector ), results.get( 0 ).get( 1, double.class ), 0.000001D );
assertEquals( 2L, results.get( 1 ).get( 0 ) );
assertEquals( euclideanDistance( V2, vector ), results.get( 1 ).get( 1, double.class ), 0.000001D );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsEuclideanSquaredDistance.class)
public void testEuclideanSquaredDistance(SessionFactoryScope scope) {
scope.inTransaction( em -> {
final byte[] vector = new byte[]{ 1, 1, 1 };
final List<Tuple> results = em.createSelectionQuery( "select e.id, euclidean_squared_distance(e.theVector, :vec) from VectorEntity e order by e.id", Tuple.class )
.setParameter( "vec", new SparseByteVector( vector ) )
.getResultList();
assertEquals( 2, results.size() );
assertEquals( 1L, results.get( 0 ).get( 0 ) );
assertEquals( euclideanSquaredDistance( V1, vector ), results.get( 0 ).get( 1, double.class ), 0.000001D );
assertEquals( 2L, results.get( 1 ).get( 0 ) );
assertEquals( euclideanSquaredDistance( V2, vector ), results.get( 1 ).get( 1, double.class ), 0.000001D );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsTaxicabDistance.class)
@SkipForDialect(dialectClass = OracleDialect.class, reason = "Oracle 23.9 bug")
public void testTaxicabDistance(SessionFactoryScope scope) {
scope.inTransaction( em -> {
final byte[] vector = new byte[]{ 1, 1, 1 };
final List<Tuple> results = em.createSelectionQuery( "select e.id, taxicab_distance(e.theVector, :vec) from VectorEntity e order by e.id", Tuple.class )
.setParameter( "vec", new SparseByteVector( vector ) )
.getResultList();
assertEquals( 2, results.size() );
assertEquals( 1L, results.get( 0 ).get( 0 ) );
assertEquals( taxicabDistance( V1, vector ), results.get( 0 ).get( 1, double.class ), 0D );
assertEquals( 2L, results.get( 1 ).get( 0 ) );
assertEquals( taxicabDistance( V2, vector ), results.get( 1 ).get( 1, double.class ), 0D );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsInnerProduct.class)
public void testInnerProduct(SessionFactoryScope scope) {
scope.inTransaction( em -> {
final byte[] vector = new byte[]{ 1, 1, 1 };
final List<Tuple> results = em.createSelectionQuery( "select e.id, inner_product(e.theVector, :vec), negative_inner_product(e.theVector, :vec) from VectorEntity e order by e.id", Tuple.class )
.setParameter( "vec", new SparseByteVector( vector ) )
.getResultList();
assertEquals( 2, results.size() );
assertEquals( 1L, results.get( 0 ).get( 0 ) );
assertEquals( innerProduct( V1, vector ), results.get( 0 ).get( 1, double.class ), 0D );
assertEquals( innerProduct( V1, vector ) * -1, results.get( 0 ).get( 2, double.class ), 0D );
assertEquals( 2L, results.get( 1 ).get( 0 ) );
assertEquals( innerProduct( V2, vector ), results.get( 1 ).get( 1, double.class ), 0D );
assertEquals( innerProduct( V2, vector ) * -1, results.get( 1 ).get( 2, double.class ), 0D );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsHammingDistance.class)
@SkipForDialect(dialectClass = OracleDialect.class, reason = "Oracle 23.9 bug")
public void testHammingDistance(SessionFactoryScope scope) {
scope.inTransaction( em -> {
final byte[] vector = new byte[]{ 1, 1, 1 };
final List<Tuple> results = em.createSelectionQuery( "select e.id, hamming_distance(e.theVector, :vec) from VectorEntity e order by e.id", Tuple.class )
.setParameter( "vec", new SparseByteVector( vector ) )
.getResultList();
assertEquals( 2, results.size() );
assertEquals( 1L, results.get( 0 ).get( 0 ) );
assertEquals( hammingDistance( V1, vector ), results.get( 0 ).get( 1, double.class ), 0D );
assertEquals( 2L, results.get( 1 ).get( 0 ) );
assertEquals( hammingDistance( V2, vector ), results.get( 1 ).get( 1, double.class ), 0D );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsVectorDims.class)
public void testVectorDims(SessionFactoryScope scope) {
scope.inTransaction( em -> {
final List<Tuple> results = em.createSelectionQuery( "select e.id, vector_dims(e.theVector) from VectorEntity e order by e.id", Tuple.class )
.getResultList();
assertEquals( 2, results.size() );
assertEquals( 1L, results.get( 0 ).get( 0 ) );
assertEquals( V1.length, results.get( 0 ).get( 1 ) );
assertEquals( 2L, results.get( 1 ).get( 0 ) );
assertEquals( V2.length, results.get( 1 ).get( 1 ) );
} );
}
@Test
@RequiresDialectFeature(feature = DialectFeatureChecks.SupportsVectorNorm.class)
public void testVectorNorm(SessionFactoryScope scope) {
scope.inTransaction( em -> {
final List<Tuple> results = em.createSelectionQuery( "select e.id, vector_norm(e.theVector) from VectorEntity e order by e.id", Tuple.class )
.getResultList();
assertEquals( 2, results.size() );
assertEquals( 1L, results.get( 0 ).get( 0 ) );
assertEquals( euclideanNorm( V1 ), results.get( 0 ).get( 1, double.class ), 0D );
assertEquals( 2L, results.get( 1 ).get( 0 ) );
assertEquals( euclideanNorm( V2 ), results.get( 1 ).get( 1, double.class ), 0D );
} );
}
@Entity( name = "VectorEntity" )
public static | SparseByteVectorTest |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/pool/exception/OracleExceptionSorterTest_stmt_setQueryTimeout.java | {
"start": 534,
"end": 2431
} | class ____ extends PoolTestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
super.setUp();
assertEquals(0, JdbcStatManager.getInstance().getSqlList().size());
dataSource = new DruidDataSource();
dataSource.setExceptionSorter(new OracleExceptionSorter());
dataSource.setDriver(new OracleMockDriver());
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setPoolPreparedStatements(true);
dataSource.setMaxOpenPreparedStatements(100);
}
@Override
protected void tearDown() throws Exception {
JdbcUtils.close(dataSource);
super.tearDown();
}
public void test_connect() throws Exception {
String sql = "SELECT 1";
{
DruidPooledConnection conn = dataSource.getConnection();
PreparedStatement pstmt = conn.prepareStatement(sql);
pstmt.execute();
pstmt.close();
conn.close();
}
DruidPooledConnection conn = dataSource.getConnection();
MockConnection mockConn = conn.unwrap(MockConnection.class);
assertNotNull(mockConn);
Statement stmt = conn.createStatement();
SQLException exception = new SQLException("xx", "xxx", 28);
mockConn.setError(exception);
SQLException stmtErrror = null;
try {
stmt.setQueryTimeout(10);
} catch (SQLException ex) {
stmtErrror = ex;
}
assertNotNull(stmtErrror);
assertSame(exception, stmtErrror);
SQLException commitError = null;
try {
conn.commit();
} catch (SQLException ex) {
commitError = ex;
}
assertNotNull(commitError);
assertSame(exception, commitError.getCause());
conn.close();
}
}
| OracleExceptionSorterTest_stmt_setQueryTimeout |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxOnBackpressureBufferStrategy.java | {
"start": 2633,
"end": 8591
} | class ____<T>
extends ArrayDeque<T>
implements InnerOperator<T, T> {
final CoreSubscriber<? super T> actual;
final Context ctx;
final int bufferSize;
final boolean delayError;
final BufferOverflowStrategy overflowStrategy;
final @Nullable Consumer<? super T> onOverflow;
@SuppressWarnings("NotNullFieldNotInitialized") // s initialized in onSubscribe
Subscription s;
volatile boolean cancelled;
volatile boolean done;
@Nullable Throwable error;
volatile int wip;
static final AtomicIntegerFieldUpdater<BackpressureBufferDropOldestSubscriber> WIP =
AtomicIntegerFieldUpdater.newUpdater(BackpressureBufferDropOldestSubscriber.class,
"wip");
volatile long requested;
static final AtomicLongFieldUpdater<BackpressureBufferDropOldestSubscriber> REQUESTED =
AtomicLongFieldUpdater.newUpdater(BackpressureBufferDropOldestSubscriber.class,
"requested");
BackpressureBufferDropOldestSubscriber(
CoreSubscriber<? super T> actual,
int bufferSize,
boolean delayError,
@Nullable Consumer<? super T> onOverflow,
BufferOverflowStrategy overflowStrategy) {
this.actual = actual;
this.ctx = actual.currentContext();
this.delayError = delayError;
this.onOverflow = onOverflow;
this.overflowStrategy = overflowStrategy;
this.bufferSize = bufferSize;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.PARENT) return s;
if (key == Attr.REQUESTED_FROM_DOWNSTREAM) return requested;
if (key == Attr.TERMINATED) return done && isEmpty();
if (key == Attr.CANCELLED) return cancelled;
if (key == Attr.BUFFERED) return size();
if (key == Attr.ERROR) return error;
if (key == Attr.PREFETCH) return Integer.MAX_VALUE;
if (key == Attr.DELAY_ERROR) return delayError;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return InnerOperator.super.scanUnsafe(key);
}
@Override
public void onSubscribe(Subscription s) {
if (Operators.validate(this.s, s)) {
this.s = s;
actual.onSubscribe(this);
s.request(Long.MAX_VALUE);
}
}
@Override
public void onNext(T t) {
if (done) {
Operators.onNextDropped(t, ctx);
return;
}
boolean callOnOverflow = false;
boolean callOnError = false;
T overflowElement = t;
synchronized(this) {
if (size() == bufferSize) {
callOnOverflow = true;
switch (overflowStrategy) {
case DROP_OLDEST:
overflowElement = pollFirst();
offer(t);
break;
case DROP_LATEST:
//do nothing
break;
case ERROR:
default:
callOnError = true;
break;
}
}
else {
offer(t);
}
}
if (callOnOverflow) {
if (onOverflow != null) {
try {
onOverflow.accept(overflowElement);
}
catch (Throwable e) {
Throwable ex = Operators.onOperatorError(s, e, overflowElement, ctx);
onError(ex);
return;
}
finally {
Operators.onDiscard(overflowElement, ctx);
}
}
else {
Operators.onDiscard(overflowElement, ctx);
}
}
if (callOnError) {
Throwable ex = Operators.onOperatorError(s, Exceptions.failWithOverflow(), overflowElement, ctx);
onError(ex);
}
if (!callOnError && !callOnOverflow) {
drain();
}
}
@Override
public void onError(Throwable t) {
if (done) {
Operators.onErrorDropped(t, ctx);
return;
}
error = t;
done = true;
drain();
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
drain();
}
void drain() {
if (WIP.getAndIncrement(this) != 0) {
return;
}
int missed = 1;
for (; ; ) {
Subscriber<? super T> a = actual;
//noinspection ConstantConditions
if (a != null) {
innerDrain(a);
return;
}
missed = WIP.addAndGet(this, -missed);
if (missed == 0) {
break;
}
}
}
void innerDrain(Subscriber<? super T> a) {
int missed = 1;
for (; ; ) {
long r = requested;
long e = 0L;
while (r != e) {
boolean d = done;
T t;
synchronized (this) {
t = poll();
}
boolean empty = t == null;
if (checkTerminated(d, empty, a)) {
return;
}
if (empty) {
break;
}
a.onNext(t);
e++;
}
if (r == e) {
boolean empty;
synchronized (this) {
empty = isEmpty();
}
if (checkTerminated(done, empty, a)) {
return;
}
}
if (e != 0L && r != Long.MAX_VALUE) {
Operators.produced(REQUESTED, this, e);
}
missed = WIP.addAndGet(this, -missed);
if (missed == 0) {
break;
}
}
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
Operators.addCap(REQUESTED, this, n);
drain();
}
}
@Override
public void cancel() {
if (!cancelled) {
cancelled = true;
s.cancel();
if (WIP.getAndIncrement(this) == 0) {
synchronized (this) {
clear();
}
}
}
}
@Override
public CoreSubscriber<? super T> actual() {
return actual;
}
boolean checkTerminated(boolean d, boolean empty, Subscriber<? super T> a) {
if (cancelled) {
s.cancel();
synchronized (this) {
clear();
}
return true;
}
if (d) {
if (delayError) {
if (empty) {
Throwable e = error;
if (e != null) {
a.onError(e);
}
else {
a.onComplete();
}
return true;
}
}
else {
Throwable e = error;
if (e != null) {
synchronized (this) {
clear();
}
a.onError(e);
return true;
}
else if (empty) {
a.onComplete();
return true;
}
}
}
return false;
}
@Override
public void clear() {
Operators.onDiscardMultiple(this, ctx);
super.clear();
}
}
}
| BackpressureBufferDropOldestSubscriber |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeIT.java | {
"start": 901,
"end": 2330
} | class ____ extends CartesianShapeIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singleton(LocalStateSpatialPlugin.class);
}
@Override
protected void getGeoShapeMapping(XContentBuilder b) throws IOException {
b.field("type", "shape");
}
@Override
protected IndexVersion randomSupportedVersion() {
return IndexVersionUtils.randomCompatibleWriteVersion(random());
}
@Override
protected boolean allowExpensiveQueries() {
return true;
}
public void testMappingUpdate() {
// create index
IndexVersion version = randomSupportedVersion();
assertAcked(indicesAdmin().prepareCreate("test").setSettings(settings(version).build()).setMapping("shape", "type=shape").get());
ensureGreen();
String update = """
{
"properties": {
"shape": {
"type": "shape",
"strategy": "recursive"
}
}
}""";
MapperParsingException e = expectThrows(
MapperParsingException.class,
() -> indicesAdmin().preparePutMapping("test").setSource(update, XContentType.JSON).get()
);
assertThat(e.getMessage(), containsString("unknown parameter [strategy] on mapper [shape] of type [shape]"));
}
}
| CartesianShapeIT |
java | spring-projects__spring-security | oauth2/oauth2-core/src/main/java/org/springframework/security/oauth2/core/endpoint/OAuth2AuthorizationRequest.java | {
"start": 8802,
"end": 16974
} | class ____<T extends OAuth2AuthorizationRequest, B extends AbstractBuilder<T, B>> {
private String authorizationUri;
private final AuthorizationGrantType authorizationGrantType = AuthorizationGrantType.AUTHORIZATION_CODE;
private final OAuth2AuthorizationResponseType responseType = OAuth2AuthorizationResponseType.CODE;
private String clientId;
private String redirectUri;
private Set<String> scopes;
private String state;
private Map<String, Object> additionalParameters = new LinkedHashMap<>();
private Consumer<Map<String, Object>> parametersConsumer = (params) -> {
};
private Map<String, Object> attributes = new LinkedHashMap<>();
private String authorizationRequestUri;
private Function<UriBuilder, URI> authorizationRequestUriFunction = (builder) -> builder.build();
private final DefaultUriBuilderFactory uriBuilderFactory;
protected AbstractBuilder() {
this.uriBuilderFactory = new DefaultUriBuilderFactory();
// The supplied authorizationUri may contain encoded parameters
// so disable encoding in UriBuilder and instead apply encoding within this
// builder
this.uriBuilderFactory.setEncodingMode(DefaultUriBuilderFactory.EncodingMode.NONE);
}
@SuppressWarnings("unchecked")
protected final B getThis() {
// avoid unchecked casts in subclasses by using "getThis()" instead of "(B)
// this"
return (B) this;
}
/**
* Sets the uri for the authorization endpoint.
* @param authorizationUri the uri for the authorization endpoint
* @return the {@link AbstractBuilder}
*/
public B authorizationUri(String authorizationUri) {
this.authorizationUri = authorizationUri;
return getThis();
}
/**
* Sets the client identifier.
* @param clientId the client identifier
* @return the {@link AbstractBuilder}
*/
public B clientId(String clientId) {
this.clientId = clientId;
return getThis();
}
/**
* Sets the uri for the redirection endpoint.
* @param redirectUri the uri for the redirection endpoint
* @return the {@link AbstractBuilder}
*/
public B redirectUri(String redirectUri) {
this.redirectUri = redirectUri;
return getThis();
}
/**
* Sets the scope(s).
* @param scope the scope(s)
* @return the {@link AbstractBuilder}
*/
public B scope(String... scope) {
if (scope != null && scope.length > 0) {
return scopes(new LinkedHashSet<>(Arrays.asList(scope)));
}
return getThis();
}
/**
* Sets the scope(s).
* @param scopes the scope(s)
* @return the {@link AbstractBuilder}
*/
public B scopes(Set<String> scopes) {
this.scopes = scopes;
return getThis();
}
/**
* Sets the state.
* @param state the state
* @return the {@link AbstractBuilder}
*/
public B state(String state) {
this.state = state;
return getThis();
}
/**
* Sets the additional parameter(s) used in the request.
* @param additionalParameters the additional parameter(s) used in the request
* @return the {@link AbstractBuilder}
*/
public B additionalParameters(Map<String, Object> additionalParameters) {
if (!CollectionUtils.isEmpty(additionalParameters)) {
this.additionalParameters.putAll(additionalParameters);
}
return getThis();
}
/**
* A {@code Consumer} to be provided access to the additional parameter(s)
* allowing the ability to add, replace, or remove.
* @param additionalParametersConsumer a {@code Consumer} of the additional
* parameters
* @return the {@link AbstractBuilder}
* @since 5.3
*/
public B additionalParameters(Consumer<Map<String, Object>> additionalParametersConsumer) {
if (additionalParametersConsumer != null) {
additionalParametersConsumer.accept(this.additionalParameters);
}
return getThis();
}
/**
* A {@code Consumer} to be provided access to all the parameters allowing the
* ability to add, replace, or remove.
* @param parametersConsumer a {@code Consumer} of all the parameters
* @return the {@link AbstractBuilder}
* @since 5.3
*/
public B parameters(Consumer<Map<String, Object>> parametersConsumer) {
if (parametersConsumer != null) {
this.parametersConsumer = parametersConsumer;
}
return getThis();
}
/**
* Sets the attributes associated to the request.
* @param attributes the attributes associated to the request
* @return the {@link AbstractBuilder}
* @since 5.2
*/
public B attributes(Map<String, Object> attributes) {
if (!CollectionUtils.isEmpty(attributes)) {
this.attributes.putAll(attributes);
}
return getThis();
}
/**
* A {@code Consumer} to be provided access to the attribute(s) allowing the
* ability to add, replace, or remove.
* @param attributesConsumer a {@code Consumer} of the attribute(s)
* @return the {@link AbstractBuilder}
* @since 5.3
*/
public B attributes(Consumer<Map<String, Object>> attributesConsumer) {
if (attributesConsumer != null) {
attributesConsumer.accept(this.attributes);
}
return getThis();
}
/**
* Sets the {@code URI} string representation of the OAuth 2.0 Authorization
* Request.
*
* <p>
* <b>NOTE:</b> The {@code URI} string is <b>required</b> to be encoded in the
* {@code application/x-www-form-urlencoded} MIME format.
* @param authorizationRequestUri the {@code URI} string representation of the
* OAuth 2.0 Authorization Request
* @return the {@link AbstractBuilder}
* @since 5.1
*/
public B authorizationRequestUri(String authorizationRequestUri) {
this.authorizationRequestUri = authorizationRequestUri;
return getThis();
}
/**
* A {@code Function} to be provided a {@code UriBuilder} representation of the
* OAuth 2.0 Authorization Request allowing for further customizations.
* @param authorizationRequestUriFunction a {@code Function} to be provided a
* {@code UriBuilder} representation of the OAuth 2.0 Authorization Request
* @return the {@link AbstractBuilder}
* @since 5.3
*/
public B authorizationRequestUri(Function<UriBuilder, URI> authorizationRequestUriFunction) {
if (authorizationRequestUriFunction != null) {
this.authorizationRequestUriFunction = authorizationRequestUriFunction;
}
return getThis();
}
public abstract T build();
private String buildAuthorizationRequestUri() {
Map<String, Object> parameters = getParameters(); // Not encoded
this.parametersConsumer.accept(parameters);
MultiValueMap<String, String> queryParams = new LinkedMultiValueMap<>();
parameters.forEach((k, v) -> {
String key = encodeQueryParam(k);
if (v instanceof Iterable) {
((Iterable<?>) v).forEach((value) -> queryParams.add(key, encodeQueryParam(String.valueOf(value))));
}
else if (v != null && v.getClass().isArray()) {
Object[] values = (Object[]) v;
for (Object value : values) {
queryParams.add(key, encodeQueryParam(String.valueOf(value)));
}
}
else {
queryParams.set(key, encodeQueryParam(String.valueOf(v)));
}
});
UriBuilder uriBuilder = this.uriBuilderFactory.uriString(this.authorizationUri).queryParams(queryParams);
return this.authorizationRequestUriFunction.apply(uriBuilder).toString();
}
protected Map<String, Object> getParameters() {
Map<String, Object> parameters = new LinkedHashMap<>();
parameters.put(OAuth2ParameterNames.RESPONSE_TYPE, this.responseType.getValue());
parameters.put(OAuth2ParameterNames.CLIENT_ID, this.clientId);
if (!CollectionUtils.isEmpty(this.scopes)) {
parameters.put(OAuth2ParameterNames.SCOPE, StringUtils.collectionToDelimitedString(this.scopes, " "));
}
if (this.state != null) {
parameters.put(OAuth2ParameterNames.STATE, this.state);
}
if (this.redirectUri != null) {
parameters.put(OAuth2ParameterNames.REDIRECT_URI, this.redirectUri);
}
parameters.putAll(this.additionalParameters);
return parameters;
}
// Encode query parameter value according to RFC 3986
private static String encodeQueryParam(String value) {
return UriUtils.encodeQueryParam(value, StandardCharsets.UTF_8);
}
}
}
| AbstractBuilder |
java | quarkusio__quarkus | extensions/devui/runtime/src/main/java/io/quarkus/devui/runtime/jsonrpc/JsonRpcMethod.java | {
"start": 4635,
"end": 5551
} | class ____ {
private Class<?> type;
private String description;
private boolean required;
public Parameter(Class<?> type, String description, boolean required) {
this.type = type;
this.description = description;
this.required = required;
}
public Class<?> getType() {
return type;
}
public void setType(Class<?> type) {
this.type = type;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public boolean isRequired() {
return required;
}
public void setRequired(boolean required) {
this.required = required;
}
}
private static final String UNDERSCORE = "_";
}
| Parameter |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/fetch/subselect/SubselectFetchCollectionFromBatchTest.java | {
"start": 13620,
"end": 14240
} | class ____ {
@Id
@GeneratedValue
private Long id;
private String name;
@OneToMany(cascade = CascadeType.ALL)
@Fetch(FetchMode.SUBSELECT)
private List<Employee> collaborators = new ArrayList<>();
public String getName() {
return name;
}
@SuppressWarnings("unused")
private Employee() {
}
public Employee(String name) {
this.name = name;
}
public boolean addCollaborator(Employee employee) {
return collaborators.add(employee);
}
public List<Employee> getCollaborators() {
return collaborators;
}
@Override
public String toString() {
return name;
}
}
}
| Employee |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/properties/IfBuildPropertyRepeatableStereotypeTest.java | {
"start": 4122,
"end": 4263
} | class ____ implements MyService {
}
@InheritableTransitiveNotMatchingProperty
static abstract | TransitiveNotMatchingPropertyMyService |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/JavacErrorDescriptionListener.java | {
"start": 1670,
"end": 7120
} | class ____ implements DescriptionListener {
private final Log log;
private final JavaFileObject sourceFile;
private final Function<Fix, AppliedFix> fixToAppliedFix;
private final Context context;
// When we're trying to refactor using error prone fixes, any error halts compilation of other
// files. We set this to true when refactoring so we can log every hit without breaking the
// compile.
private final boolean dontUseErrors;
// The suffix for properties in src/main/resources/com/google/errorprone/errors.properties
private static final String MESSAGE_BUNDLE_KEY = "error.prone";
// DiagnosticFlag.API ensures that errors are always reported, bypassing 'shouldReport' logic
// that filters out duplicate diagnostics at the same position, and ensures that
// ErrorProneAnalyzer can compare the counts of errors reported by Error Prone with the total
// number of errors reported.
private static final ImmutableSet<JCDiagnostic.DiagnosticFlag> DIAGNOSTIC_FLAGS =
ImmutableSet.of(JCDiagnostic.DiagnosticFlag.API);
private JavacErrorDescriptionListener(
Log log,
EndPosTable endPositions,
JavaFileObject sourceFile,
Context context,
boolean dontUseErrors) {
this.log = log;
this.sourceFile = sourceFile;
this.context = context;
this.dontUseErrors = dontUseErrors;
checkNotNull(endPositions);
// Optimization for checks that emit the same fix multiple times. Consider a check that renames
// all uses of a symbol, and reports the diagnostic on all occurrences of the symbol. This can
// be useful in environments where diagnostics are only shown on changed lines, but can lead to
// quadratic behaviour during fix application if we're not careful.
Map<Fix, AppliedFix> cache = new HashMap<>();
try {
CharSequence sourceFileContent = sourceFile.getCharContent(true);
fixToAppliedFix =
fix ->
cache.computeIfAbsent(fix, f -> AppliedFix.apply(sourceFileContent, endPositions, f));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public void onDescribed(Description description) {
ImmutableList<AppliedFix> appliedFixes =
description.fixes.stream()
.filter(f -> !shouldSkipImportTreeFix(description.position, f))
.map(fixToAppliedFix)
.filter(Objects::nonNull)
.collect(toImmutableList());
String message = messageForFixes(description, appliedFixes);
// Swap the log's source and the current file's source; then be sure to swap them back later.
JavaFileObject originalSource = log.useSource(sourceFile);
try {
JCDiagnostic.Factory factory = JCDiagnostic.Factory.instance(context);
DiagnosticPosition pos = description.position;
JCDiagnostic.DiagnosticType type =
switch (description.severity()) {
case ERROR ->
dontUseErrors
? JCDiagnostic.DiagnosticType.WARNING
: JCDiagnostic.DiagnosticType.ERROR;
case WARNING -> JCDiagnostic.DiagnosticType.WARNING;
case SUGGESTION -> JCDiagnostic.DiagnosticType.NOTE;
};
log.report(
factory.create(
type,
/* lintCategory */ null,
// Make a defensive copy, as JDK at head mutates its arguments.
EnumSet.copyOf(DIAGNOSTIC_FLAGS),
log.currentSource(),
pos,
MESSAGE_BUNDLE_KEY,
message));
} finally {
if (originalSource != null) {
log.useSource(originalSource);
}
}
}
// b/79407644: Because AppliedFix doesn't consider imports, just don't display a
// suggested fix to an ImportTree when the fix reports imports to remove/add. Imports can still
// be fixed if they were specified via SuggestedFix.replace, for example.
private static boolean shouldSkipImportTreeFix(DiagnosticPosition position, Fix f) {
if (position.getTree() != null && !(position.getTree() instanceof ImportTree)) {
return false;
}
return !f.getImportsToAdd().isEmpty() || !f.getImportsToRemove().isEmpty();
}
private static String messageForFixes(Description description, List<AppliedFix> appliedFixes) {
StringBuilder messageBuilder = new StringBuilder(description.getMessage());
boolean first = true;
for (AppliedFix appliedFix : appliedFixes) {
if (first) {
messageBuilder.append("\nDid you mean ");
} else {
messageBuilder.append(" or ");
}
if (appliedFix.isRemoveLine()) {
messageBuilder.append("to remove this line");
} else {
messageBuilder.append("'").append((CharSequence) appliedFix.snippet()).append("'");
}
first = false;
}
if (!first) { // appended at least one suggested fix to the message
messageBuilder.append("?");
}
return messageBuilder.toString();
}
static Factory provider(Context context) {
return (log, compilation) ->
new JavacErrorDescriptionListener(
log, compilation.endPositions, compilation.getSourceFile(), context, false);
}
static Factory providerForRefactoring(Context context) {
return (log, compilation) ->
new JavacErrorDescriptionListener(
log, compilation.endPositions, compilation.getSourceFile(), context, true);
}
}
| JavacErrorDescriptionListener |
java | resilience4j__resilience4j | resilience4j-vavr/src/test/java/io/github/resilience4j/core/VavrCheckedFunctionUtilsTest.java | {
"start": 846,
"end": 4076
} | class ____ {
@Test
public void shouldRecoverFromException() throws Throwable {
CheckedFunction0<String> callable = () -> {
throw new IOException("BAM!");
};
CheckedFunction0<String> callableWithRecovery = VavrCheckedFunctionUtils.recover(callable, (ex) -> "Bla");
String result = callableWithRecovery.apply();
assertThat(result).isEqualTo("Bla");
}
@Test
public void shouldRecoverFromSpecificExceptions() throws Throwable {
CheckedFunction0<String> callable = () -> {
throw new IOException("BAM!");
};
CheckedFunction0<String> callableWithRecovery = VavrCheckedFunctionUtils.recover(callable,
asList(IllegalArgumentException.class, IOException.class),
(ex) -> "Bla");
String result = callableWithRecovery.apply();
assertThat(result).isEqualTo("Bla");
}
@Test
public void shouldRecoverFromResult() throws Throwable {
CheckedFunction0<String> callable = () -> "Wrong Result";
CheckedFunction0<String> callableWithRecovery = VavrCheckedFunctionUtils.andThen(callable, (result, ex) -> {
if(result.equals("Wrong Result")){
return "Bla";
}
return result;
});
String result = callableWithRecovery.apply();
assertThat(result).isEqualTo("Bla");
}
@Test
public void shouldRecoverFromException2() throws Throwable {
CheckedFunction0<String> callable = () -> {
throw new IllegalArgumentException("BAM!");
};
CheckedFunction0<String> callableWithRecovery = VavrCheckedFunctionUtils.andThen(callable, (result, ex) -> {
if(ex instanceof IllegalArgumentException){
return "Bla";
}
return result;
});
String result = callableWithRecovery.apply();
assertThat(result).isEqualTo("Bla");
}
@Test
public void shouldRecoverFromSpecificResult() throws Throwable {
CheckedFunction0<String> supplier = () -> "Wrong Result";
CheckedFunction0<String> callableWithRecovery = VavrCheckedFunctionUtils.recover(supplier, (result) -> result.equals("Wrong Result"), (r) -> "Bla");
String result = callableWithRecovery.apply();
assertThat(result).isEqualTo("Bla");
}
@Test(expected = RuntimeException.class)
public void shouldRethrowException() throws Throwable {
CheckedFunction0<String> callable = () -> {
throw new IOException("BAM!");
};
CheckedFunction0<String> callableWithRecovery = VavrCheckedFunctionUtils.recover(callable, (ex) -> {
throw new RuntimeException();
});
callableWithRecovery.apply();
}
@Test(expected = RuntimeException.class)
public void shouldRethrowException2() throws Throwable {
CheckedFunction0<String> callable = () -> {
throw new RuntimeException("BAM!");
};
CheckedFunction0<String> callableWithRecovery = VavrCheckedFunctionUtils.recover(callable, IllegalArgumentException.class, (ex) -> "Bla");
callableWithRecovery.apply();
}
}
| VavrCheckedFunctionUtilsTest |
java | spring-projects__spring-framework | spring-websocket/src/main/java/org/springframework/web/socket/server/support/AbstractHandshakeHandler.java | {
"start": 2931,
"end": 14085
} | class ____ implements HandshakeHandler, Lifecycle {
// For WebSocket upgrades in HTTP/2 (see RFC 8441)
private static final HttpMethod CONNECT_METHOD = HttpMethod.valueOf("CONNECT");
protected final Log logger = LogFactory.getLog(getClass());
private final RequestUpgradeStrategy requestUpgradeStrategy;
private final List<String> supportedProtocols = new ArrayList<>();
private volatile boolean running;
/**
* Default constructor that uses {@link StandardWebSocketUpgradeStrategy}.
*/
protected AbstractHandshakeHandler() {
this(new StandardWebSocketUpgradeStrategy());
}
/**
* A constructor that accepts a runtime-specific {@link RequestUpgradeStrategy}.
* @param requestUpgradeStrategy the upgrade strategy to use
*/
protected AbstractHandshakeHandler(RequestUpgradeStrategy requestUpgradeStrategy) {
Assert.notNull(requestUpgradeStrategy, "RequestUpgradeStrategy must not be null");
this.requestUpgradeStrategy = requestUpgradeStrategy;
}
/**
* Return the {@link RequestUpgradeStrategy} for WebSocket requests.
*/
public RequestUpgradeStrategy getRequestUpgradeStrategy() {
return this.requestUpgradeStrategy;
}
/**
* Use this property to configure the list of supported sub-protocols.
* The first configured sub-protocol that matches a client-requested sub-protocol
* is accepted. If there are no matches the response will not contain a
* {@literal Sec-WebSocket-Protocol} header.
* <p>Note that if the WebSocketHandler passed in at runtime is an instance of
* {@link SubProtocolCapable} then there is no need to explicitly configure
* this property. That is certainly the case with the built-in STOMP over
* WebSocket support. Therefore, this property should be configured explicitly
* only if the WebSocketHandler does not implement {@code SubProtocolCapable}.
*/
public void setSupportedProtocols(String... protocols) {
this.supportedProtocols.clear();
for (String protocol : protocols) {
this.supportedProtocols.add(protocol.toLowerCase(Locale.ROOT));
}
}
/**
* Return the list of supported sub-protocols.
*/
public String[] getSupportedProtocols() {
return StringUtils.toStringArray(this.supportedProtocols);
}
@Override
public void start() {
if (!isRunning()) {
this.running = true;
doStart();
}
}
protected void doStart() {
if (this.requestUpgradeStrategy instanceof Lifecycle lifecycle) {
lifecycle.start();
}
}
@Override
public void stop() {
if (isRunning()) {
this.running = false;
doStop();
}
}
protected void doStop() {
if (this.requestUpgradeStrategy instanceof Lifecycle lifecycle) {
lifecycle.stop();
}
}
@Override
public boolean isRunning() {
return this.running;
}
@Override
public final boolean doHandshake(ServerHttpRequest request, ServerHttpResponse response,
WebSocketHandler wsHandler, Map<String, Object> attributes) throws HandshakeFailureException {
WebSocketHttpHeaders headers = new WebSocketHttpHeaders(request.getHeaders());
if (logger.isTraceEnabled()) {
logger.trace("Processing request " + request.getURI() + " with headers=" + headers);
}
try {
HttpMethod httpMethod = request.getMethod();
if (HttpMethod.GET != httpMethod && !CONNECT_METHOD.equals(httpMethod)) {
response.setStatusCode(HttpStatus.METHOD_NOT_ALLOWED);
response.getHeaders().setAllow(Set.of(HttpMethod.GET, CONNECT_METHOD));
if (logger.isErrorEnabled()) {
logger.error("Handshake failed due to unexpected HTTP method: " + httpMethod);
}
return false;
}
if (HttpMethod.GET == httpMethod) {
if (!"WebSocket".equalsIgnoreCase(headers.getUpgrade())) {
handleInvalidUpgradeHeader(request, response);
return false;
}
List<String> connectionValue = headers.getConnection();
if (!connectionValue.contains("Upgrade") && !connectionValue.contains("upgrade")) {
handleInvalidConnectHeader(request, response);
return false;
}
String key = headers.getSecWebSocketKey();
if (key == null) {
if (logger.isErrorEnabled()) {
logger.error("Missing \"Sec-WebSocket-Key\" header");
}
response.setStatusCode(HttpStatus.BAD_REQUEST);
return false;
}
}
if (!isWebSocketVersionSupported(headers)) {
handleWebSocketVersionNotSupported(request, response);
return false;
}
if (!isValidOrigin(request)) {
response.setStatusCode(HttpStatus.FORBIDDEN);
return false;
}
}
catch (IOException ex) {
throw new HandshakeFailureException(
"Response update failed during upgrade to WebSocket: " + request.getURI(), ex);
}
String subProtocol = selectProtocol(headers.getSecWebSocketProtocol(), wsHandler);
List<WebSocketExtension> requested = headers.getSecWebSocketExtensions();
List<WebSocketExtension> supported = this.requestUpgradeStrategy.getSupportedExtensions(request);
List<WebSocketExtension> extensions = filterRequestedExtensions(request, requested, supported);
Principal user = determineUser(request, wsHandler, attributes);
if (logger.isTraceEnabled()) {
logger.trace("Upgrading to WebSocket, subProtocol=" + subProtocol + ", extensions=" + extensions);
}
this.requestUpgradeStrategy.upgrade(request, response, subProtocol, extensions, user, wsHandler, attributes);
return true;
}
protected void handleInvalidUpgradeHeader(ServerHttpRequest request, ServerHttpResponse response) throws IOException {
if (logger.isErrorEnabled()) {
logger.error(LogFormatUtils.formatValue(
"Handshake failed due to invalid Upgrade header: " + request.getHeaders().getUpgrade(), -1, true));
}
response.setStatusCode(HttpStatus.BAD_REQUEST);
response.getBody().write("Can \"Upgrade\" only to \"WebSocket\".".getBytes(StandardCharsets.UTF_8));
}
protected void handleInvalidConnectHeader(ServerHttpRequest request, ServerHttpResponse response) throws IOException {
if (logger.isErrorEnabled()) {
logger.error(LogFormatUtils.formatValue(
"Handshake failed due to invalid Connection header" + request.getHeaders().getConnection(), -1, true));
}
response.setStatusCode(HttpStatus.BAD_REQUEST);
response.getBody().write("\"Connection\" must be \"upgrade\".".getBytes(StandardCharsets.UTF_8));
}
protected boolean isWebSocketVersionSupported(WebSocketHttpHeaders httpHeaders) {
String version = httpHeaders.getSecWebSocketVersion();
String[] supportedVersions = getSupportedVersions();
for (String supportedVersion : supportedVersions) {
if (supportedVersion.trim().equals(version)) {
return true;
}
}
return false;
}
protected String[] getSupportedVersions() {
return this.requestUpgradeStrategy.getSupportedVersions();
}
protected void handleWebSocketVersionNotSupported(ServerHttpRequest request, ServerHttpResponse response) {
if (logger.isErrorEnabled()) {
String version = request.getHeaders().getFirst(WebSocketHttpHeaders.SEC_WEBSOCKET_VERSION);
logger.error(LogFormatUtils.formatValue(
"Handshake failed due to unsupported WebSocket version: " + version +
". Supported versions: " + Arrays.toString(getSupportedVersions()), -1, true));
}
response.setStatusCode(HttpStatus.UPGRADE_REQUIRED);
response.getHeaders().set(WebSocketHttpHeaders.SEC_WEBSOCKET_VERSION,
StringUtils.arrayToCommaDelimitedString(getSupportedVersions()));
}
/**
* Return whether the request {@code Origin} header value is valid or not.
* By default, all origins as considered as valid. Consider using an
* {@link OriginHandshakeInterceptor} for filtering origins if needed.
*/
protected boolean isValidOrigin(ServerHttpRequest request) {
return true;
}
/**
* Perform the sub-protocol negotiation based on requested and supported sub-protocols.
* For the list of supported sub-protocols, this method first checks if the target
* WebSocketHandler is a {@link SubProtocolCapable} and then also checks if any
* sub-protocols have been explicitly configured with
* {@link #setSupportedProtocols(String...)}.
* @param requestedProtocols the requested sub-protocols
* @param webSocketHandler the WebSocketHandler that will be used
* @return the selected protocols or {@code null}
* @see #determineHandlerSupportedProtocols(WebSocketHandler)
*/
protected @Nullable String selectProtocol(List<String> requestedProtocols, WebSocketHandler webSocketHandler) {
List<String> handlerProtocols = determineHandlerSupportedProtocols(webSocketHandler);
for (String protocol : requestedProtocols) {
if (handlerProtocols.contains(protocol.toLowerCase(Locale.ROOT))) {
return protocol;
}
if (this.supportedProtocols.contains(protocol.toLowerCase(Locale.ROOT))) {
return protocol;
}
}
return null;
}
/**
* Determine the sub-protocols supported by the given WebSocketHandler by
* checking whether it is an instance of {@link SubProtocolCapable}.
* @param handler the handler to check
* @return a list of supported protocols, or an empty list if none available
*/
protected final List<String> determineHandlerSupportedProtocols(WebSocketHandler handler) {
WebSocketHandler handlerToCheck = WebSocketHandlerDecorator.unwrap(handler);
List<String> subProtocols = null;
if (handlerToCheck instanceof SubProtocolCapable subProtocolCapable) {
subProtocols = subProtocolCapable.getSubProtocols();
}
return (subProtocols != null ? subProtocols : Collections.emptyList());
}
/**
* Filter the list of requested WebSocket extensions.
* <p>As of 4.1, the default implementation of this method filters the list to
* leave only extensions that are both requested and supported.
* @param request the current request
* @param requestedExtensions the list of extensions requested by the client
* @param supportedExtensions the list of extensions supported by the server
* @return the selected extensions or an empty list
*/
protected List<WebSocketExtension> filterRequestedExtensions(ServerHttpRequest request,
List<WebSocketExtension> requestedExtensions, List<WebSocketExtension> supportedExtensions) {
List<WebSocketExtension> result = new ArrayList<>(requestedExtensions.size());
for (WebSocketExtension extension : requestedExtensions) {
if (supportedExtensions.contains(extension)) {
result.add(extension);
}
}
return result;
}
/**
* A method that can be used to associate a user with the WebSocket session
* in the process of being established. The default implementation calls
* {@link ServerHttpRequest#getPrincipal()}
* <p>Subclasses can provide custom logic for associating a user with a session,
* for example for assigning a name to anonymous users (i.e. not fully authenticated).
* @param request the handshake request
* @param wsHandler the WebSocket handler that will handle messages
* @param attributes handshake attributes to pass to the WebSocket session
* @return the user for the WebSocket session, or {@code null} if not available
*/
protected @Nullable Principal determineUser(
ServerHttpRequest request, WebSocketHandler wsHandler, Map<String, Object> attributes) {
return request.getPrincipal();
}
}
| AbstractHandshakeHandler |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/ClientHighAvailabilityServicesFactory.java | {
"start": 1044,
"end": 1616
} | interface ____ {
/**
* Creates a {@link ClientHighAvailabilityServices} instance.
*
* @param configuration Flink configuration
* @param fatalErrorHandler {@link FatalErrorHandler} fatalErrorHandler to handle unexpected
* errors
* @return instance of {@link ClientHighAvailabilityServices}
* @throws Exception when HA services can not be created.
*/
ClientHighAvailabilityServices create(
Configuration configuration, FatalErrorHandler fatalErrorHandler) throws Exception;
}
| ClientHighAvailabilityServicesFactory |
java | quarkusio__quarkus | integration-tests/hibernate-reactive-mssql/src/test/java/io/quarkus/it/hibernate/reactive/mssql/HibernateReactiveMSSQLInGraalIT.java | {
"start": 134,
"end": 211
} | class ____ extends HibernateReactiveMSSQLTest {
}
| HibernateReactiveMSSQLInGraalIT |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/wall/mysql/MySqlWallTest29.java | {
"start": 837,
"end": 1067
} | class ____ extends TestCase {
public void test_false() throws Exception {
assertFalse(WallUtils.isValidateMySql(//
"select * from t where FID = 1 OR UNHEX('4D7953514C') = 'MySQL'"));
}
}
| MySqlWallTest29 |
java | square__retrofit | retrofit-converters/jackson/src/test/java/retrofit2/converter/jackson/JacksonConverterFactoryTest.java | {
"start": 3598,
"end": 3739
} | class ____ {
final String theName;
ErroringValue(String theName) {
this.theName = theName;
}
}
static final | ErroringValue |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITgh11084ReactorReaderPreferConsumerPomTest.java | {
"start": 964,
"end": 1952
} | class ____ extends AbstractMavenIntegrationTestCase {
@Test
void partialReactorShouldResolveUsingConsumerPom() throws Exception {
File testDir = extractResources("/gh-11084-reactorreader-prefer-consumer-pom");
// First build module a to populate project-local-repo with artifacts including consumer POM
Verifier v1 = newVerifier(testDir.getAbsolutePath());
v1.addCliArguments("clean", "package", "-X", "-Dmaven.consumer.pom.flatten=true");
v1.setLogFileName("log-1.txt");
v1.execute();
v1.verifyErrorFreeLog();
// Now build only module b; ReactorReader should pick consumer POM from project-local-repo
Verifier v2 = newVerifier(testDir.getAbsolutePath());
v2.setLogFileName("log-2.txt");
v2.addCliArguments("clean", "compile", "-f", "b", "-X", "-Dmaven.consumer.pom.flatten=true");
v2.execute();
v2.verifyErrorFreeLog();
}
}
| MavenITgh11084ReactorReaderPreferConsumerPomTest |
java | apache__camel | components/camel-opentelemetry-metrics/src/test/java/org/apache/camel/opentelemetry/metrics/integration/messagehistory/MessageHistoryPatternIT.java | {
"start": 1732,
"end": 4070
} | class ____ extends AbstractOpenTelemetryTest {
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
OpenTelemetryMessageHistoryFactory factory = new OpenTelemetryMessageHistoryFactory();
factory.setMeter(otelExtension.getOpenTelemetry().getMeter("meterTest"));
factory.setNodePattern("step");
context.setMessageHistoryFactory(factory);
return context;
}
@Test
public void testMessageHistory() throws Exception {
int count = 3;
getMockEndpoint("mock:a").expectedMessageCount(count);
getMockEndpoint("mock:b").expectedMessageCount(count);
getMockEndpoint("mock:bar").expectedMessageCount(count);
for (int i = 0; i < count; i++) {
Exchange out = template.request("direct:start", e -> {
e.getMessage().setBody("Hello World");
});
}
MockEndpoint.assertIsSatisfied(context);
// there should be 3 names
assertEquals(3, getAllPointData(DEFAULT_CAMEL_MESSAGE_HISTORY_METER_NAME).size());
assertEquals(count, getPointData("route1", "a").getCount());
assertEquals(count, getPointData("route1", "b").getCount());
assertEquals(count, getPointData("route2", "bar").getCount());
}
private HistogramPointData getPointData(String routeId, String nodeId) {
PointData pd = getAllPointDataForRouteId(DEFAULT_CAMEL_MESSAGE_HISTORY_METER_NAME, routeId)
.stream()
.filter(point -> nodeId.equals(point.getAttributes().get(AttributeKey.stringKey("nodeId"))))
.findFirst().orElse(null);
assertNotNull(pd);
assertInstanceOf(HistogramPointData.class, pd);
return (HistogramPointData) pd;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").step("a").transform().constant("Bye World").to("mock:a").end().step("b").transform()
.constant("Hi World").to("direct:bar").to("mock:b").end();
from("direct:bar").step("bar").to("log:bar").to("mock:bar").end();
}
};
}
}
| MessageHistoryPatternIT |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_2000/Issue2088.java | {
"start": 1325,
"end": 1423
} | class ____ {
@JSONField(format = "yyyyMMddHHmmssSSSZ")
public Date date;
}
}
| Model |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/DiscoverySelectors.java | {
"start": 12728,
"end": 14557
} | class ____} of the
* {@linkplain Thread thread} that uses the resulting selector.
*
* <p>Note: Since Java 9, all resources are on the module path. Either in
* named or unnamed modules. These resources are also considered to be
* classpath resources.
*
* <p>The {@link Set} supplied to this method should have a reliable iteration
* order to support reliable discovery and execution order. It is therefore
* recommended that the set be a {@link java.util.SequencedSet} (on Java 21
* or higher), {@link java.util.SortedSet}, {@link java.util.LinkedHashSet},
* or similar. Note that {@link Set#of(Object[])} and related {@code Set.of()}
* methods do not guarantee a reliable iteration order.
*
* @param classpathResources a set of classpath resources; never
* {@code null} or empty. All resources must have the same name, may not
* be {@code null} or blank.
* @since 1.12
* @see #selectClasspathResource(String, FilePosition)
* @see #selectClasspathResource(String)
* @see ClasspathResourceSelector
* @see ReflectionSupport#tryToGetResources(String)
* @deprecated Please use {@link #selectClasspathResourceByName(Set)} instead.
*/
@API(status = DEPRECATED, since = "1.14")
@Deprecated(since = "1.14", forRemoval = true)
@SuppressWarnings("removal")
public static ClasspathResourceSelector selectClasspathResource(
Set<org.junit.platform.commons.support.Resource> classpathResources) {
return selectClasspathResourceByName(classpathResources);
}
/**
* Create a {@code ClasspathResourceSelector} for the supplied classpath
* resources.
*
* <p>Since {@linkplain org.junit.platform.engine.TestEngine engines} are not
* expected to modify the classpath, the supplied resource must be on the
* classpath of the
* {@linkplain Thread#getContextClassLoader() context | loader |
java | google__dagger | javatests/dagger/internal/codegen/DaggerSuperficialValidationTest.java | {
"start": 15947,
"end": 16133
} | interface ____ {",
" Class[] classes();",
" }",
"",
" @TestAnnotation(classes = MissingType.class)",
" static | TestAnnotation |
java | quarkusio__quarkus | extensions/smallrye-openapi/deployment/src/test/java/io/quarkus/smallrye/openapi/test/jaxrs/ResourceBean.java | {
"start": 178,
"end": 372
} | class ____ {
@Override
public String toString() {
return "resource";
}
@RolesAllowed("admin")
public String anotherMethod() {
return "bla";
}
}
| ResourceBean |
java | apache__flink | flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/api/java/hadoop/mapred/HadoopInputFormatTest.java | {
"start": 10737,
"end": 11177
} | class ____ implements InputFormat<String, Long> {
@Override
public InputSplit[] getSplits(JobConf jobConf, int i) throws IOException {
return new InputSplit[0];
}
@Override
public RecordReader<String, Long> getRecordReader(
InputSplit inputSplit, JobConf jobConf, Reporter reporter) throws IOException {
return null;
}
}
private | DummyInputFormat |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/resource/beans/container/spi/FallbackContainedBean.java | {
"start": 304,
"end": 969
} | class ____<B> implements ContainedBean<B>, ManagedBean<B> {
private final Class<B> beanType;
private final B beanInstance;
public FallbackContainedBean(Class<B> beanType, BeanInstanceProducer producer) {
this.beanType = beanType;
this.beanInstance = producer.produceBeanInstance( beanType );
}
public FallbackContainedBean(String beanName, Class<B> beanType, BeanInstanceProducer producer) {
this.beanType = beanType;
this.beanInstance = producer.produceBeanInstance( beanName, beanType );
}
@Override
public Class<B> getBeanClass() {
return beanType;
}
@Override
public B getBeanInstance() {
return beanInstance;
}
}
| FallbackContainedBean |
java | apache__flink | flink-table/flink-sql-gateway-api/src/main/java/org/apache/flink/table/gateway/api/operation/OperationStatus.java | {
"start": 1090,
"end": 2970
} | enum ____ {
/** The operation is newly created. */
INITIALIZED(false),
/** Prepare the resources for the operation. */
PENDING(false),
/** The operation is running. */
RUNNING(false),
/** All the work is finished and ready for the client to fetch the results. */
FINISHED(true),
/** Operation has been cancelled. */
CANCELED(true),
/** Operation has been closed and all related resources are collected. */
CLOSED(true),
/** Some error happens. */
ERROR(true),
/** The execution of the operation timeout. */
TIMEOUT(true);
private final boolean isTerminalStatus;
OperationStatus(boolean isTerminalStatus) {
this.isTerminalStatus = isTerminalStatus;
}
public static boolean isValidStatusTransition(
OperationStatus fromStatus, OperationStatus toStatus) {
return toOperationStatusSet(fromStatus).contains(toStatus);
}
public boolean isTerminalStatus() {
return isTerminalStatus;
}
private static Set<OperationStatus> toOperationStatusSet(OperationStatus fromStatus) {
switch (fromStatus) {
case INITIALIZED:
return new HashSet<>(Arrays.asList(PENDING, CANCELED, CLOSED, TIMEOUT, ERROR));
case PENDING:
return new HashSet<>(Arrays.asList(RUNNING, CANCELED, CLOSED, TIMEOUT, ERROR));
case RUNNING:
return new HashSet<>(Arrays.asList(FINISHED, CANCELED, CLOSED, TIMEOUT, ERROR));
case FINISHED:
case CANCELED:
case TIMEOUT:
case ERROR:
return Collections.singleton(CLOSED);
case CLOSED:
return Collections.emptySet();
default:
throw new IllegalArgumentException("Unknown from status: " + fromStatus);
}
}
}
| OperationStatus |
java | apache__camel | components/camel-jsonpath/src/test/java/org/apache/camel/jsonpath/JsonPathHeaderNameTest.java | {
"start": 1057,
"end": 2028
} | class ____ extends CamelTestSupport {
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
var jp = expression().jsonpath().expression("$..store.book.length()").resultType(int.class)
.source("header:myHeader").end();
from("direct:start")
.setHeader("number", jp)
.to("mock:result");
}
};
}
@Test
public void testAuthors() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:result").expectedHeaderReceived("number", "3");
Object file = new File("src/test/resources/books.json");
template.sendBodyAndHeader("direct:start", "Hello World", "myHeader", file);
MockEndpoint.assertIsSatisfied(context);
}
}
| JsonPathHeaderNameTest |
java | apache__flink | flink-table/flink-table-api-java-bridge/src/main/java/org/apache/flink/connector/datagen/table/DataGenVisitorBase.java | {
"start": 1779,
"end": 2647
} | class ____ extends LogicalTypeDefaultVisitor<DataGeneratorContainer> {
protected final String name;
protected final ReadableConfig config;
protected DataGenVisitorBase(String name, ReadableConfig config) {
this.name = name;
this.config = config;
}
@Override
public DataGeneratorContainer visit(DateType dateType) {
return DataGeneratorContainer.of(
TimeGenerator.of(() -> (int) LocalDate.now().toEpochDay()));
}
@Override
public DataGeneratorContainer visit(TimeType timeType) {
return DataGeneratorContainer.of(TimeGenerator.of(() -> LocalTime.now().get(MILLI_OF_DAY)));
}
@Override
protected DataGeneratorContainer defaultMethod(LogicalType logicalType) {
throw new ValidationException("Unsupported type: " + logicalType);
}
private | DataGenVisitorBase |
java | apache__avro | lang/java/mapred/src/main/java/org/apache/avro/hadoop/io/AvroDatumConverterFactory.java | {
"start": 6506,
"end": 7079
} | class ____ extends AvroDatumConverter<BytesWritable, ByteBuffer> {
private final Schema mSchema;
/** Constructor. */
public BytesWritableConverter() {
mSchema = Schema.create(Schema.Type.BYTES);
}
/** {@inheritDoc} */
@Override
public ByteBuffer convert(BytesWritable input) {
return ByteBuffer.wrap(input.getBytes());
}
/** {@inheritDoc} */
@Override
public Schema getWriterSchema() {
return mSchema;
}
}
/** Converts ByteWritables into GenericFixed of size 1. */
public static | BytesWritableConverter |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/http/impl/http2/multiplex/Http2MultiplexHandler.java | {
"start": 2860,
"end": 12826
} | class ____ extends ChannelDuplexHandler implements io.netty.handler.codec.http2.Http2Connection.Listener {
private final Channel channel;
private final Http2MultiplexConnectionFactory connectionFactory;
private final Map<Http2StreamChannel, ChannelHandlerContext> pendingChannels; // For clients
private final Deque<PromiseInternal<Void>> pendingSettingsAcks;
private Http2MultiplexConnection connection;
private ChannelHandlerContext chctx;
private Http2Settings localSettings;
private Http2Settings remoteSettings;
private GoAway goAwayStatus;
public Http2MultiplexHandler(Channel channel,
ContextInternal context,
Http2MultiplexConnectionFactory connectionFactory,
Http2Settings initialSettings) {
// Initial settings ack
ArrayDeque<PromiseInternal<Void>> pendingAcks = new ArrayDeque<>();
pendingAcks.add(context.promise());
this.channel = channel;
this.localSettings = initialSettings;
this.pendingSettingsAcks = pendingAcks;
this.pendingChannels = new HashMap<>();
this.connectionFactory = connectionFactory;
}
Http2MultiplexConnection connection() {
return connection;
}
Http2Settings localSettings() {
return localSettings;
}
Http2Settings remoteSettings() {
return remoteSettings;
}
GoAway goAwayStatus() {
return goAwayStatus;
}
int windowsSize() {
Http2ConnectionHandler codec = chctx.pipeline().get(Http2FrameCodec.class);
Http2Connection connection = codec.connection();
Http2Stream stream = connection.connectionStream();
return connection.local().flowController().windowSize(stream);
}
void incrementWindowsSize(int windowSizeIncrement) throws Http2Exception {
Http2ConnectionHandler codec = chctx.pipeline().get(Http2FrameCodec.class);
Http2Connection connection = codec.connection();
Http2Stream stream = connection.connectionStream();
codec.decoder().flowController().incrementWindowSize(stream, windowSizeIncrement);
}
void createClientStream(Http2ClientStream stream) throws Exception {
Http2StreamChannelBootstrap bootstrap = new Http2StreamChannelBootstrap(channel);
bootstrap.handler(this);
Future<Http2StreamChannel> fut;
try {
fut = bootstrap.open();
} catch (Exception e) {
throw e;
}
Http2StreamChannel streamChannel = fut.sync().get();
ChannelHandlerContext chctx = pendingChannels.remove(streamChannel);
connection.registerChannel(stream, streamChannel.stream(), chctx);
}
Http2ClientStream upgradeClientStream(Http2StreamChannel channel, Object metric, Object trace,
ContextInternal context) {
ChannelHandlerContext chctx = pendingChannels.remove(channel);
ClientMetrics<?, ?, ?> clientMetrics = ((Http2MultiplexClientConnection) connection).clientMetrics();
Http2FrameStream s = channel.stream();
Http2ClientStream sb = Http2ClientStream.create(s.id(), (Http2ClientConnection) connection, context,
null, false, connection.transportMetrics, clientMetrics, channel.isWritable());
connection.registerChannel(sb, s, chctx);
sb.upgrade(metric, trace);
return sb;
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) {
Channel channel = ctx.channel();
if (channel == this.channel) {
this.chctx = ctx;
this.connection = connectionFactory.createConnection(this, chctx);
} else if (channel instanceof Http2StreamChannel) {
if (connection.isServer()) {
ctx.pipeline().addBefore(ctx.name(), "chunkedWriter", new ChunkedWriteHandler());
} else {
pendingChannels.put((Http2StreamChannel) channel, ctx);
}
}
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
if (ctx.channel() == channel) {
connection.onClose();
} else if (ctx.channel() instanceof Http2StreamChannel) {
Http2StreamChannel streamChannel = (Http2StreamChannel) ctx.channel();
connection.onStreamClose(streamChannel.stream().id());
}
super.channelInactive(ctx);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (ctx.channel() == channel) {
connection.onException(cause);
} else if (ctx.channel() instanceof Http2StreamChannel) {
Http2StreamChannel streamChannel = (Http2StreamChannel) ctx.channel();
connection.onException(streamChannel.stream().id(), cause);
} else {
super.exceptionCaught(ctx, cause);
}
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof Http2HeadersFrame) {
Http2HeadersFrame frame = (Http2HeadersFrame) msg;
Http2Headers headers = frame.headers();
connection.receiveHeaders(ctx, frame.stream(), headers, frame.isEndStream());
} else if (msg instanceof Http2DataFrame) {
Http2DataFrame frame = (Http2DataFrame) msg;
connection.receiveData(ctx, frame.stream().id(), frame.content(), frame.isEndStream(), frame.initialFlowControlledBytes());
} else if (msg instanceof Http2UnknownFrame) {
Http2UnknownFrame frame = (Http2UnknownFrame) msg;
connection.receiveUnknownFrame(frame.stream().id(), frame.frameType(), frame.flags().value(), frame.content());
} else if (msg instanceof Http2SettingsFrame) {
Http2SettingsFrame frame = (Http2SettingsFrame) msg;
remoteSettings = frame.settings();
connection.receiveSettings(ctx, frame.settings());
} else if (msg instanceof Http2SettingsAckFrame) {
PromiseInternal<Void> pendingSettingAck = pendingSettingsAcks.poll();
pendingSettingAck.complete();
} else if (msg instanceof Http2GoAwayFrame) {
Http2GoAwayFrame frame = (Http2GoAwayFrame) msg;
connection.receiveGoAway(frame.errorCode(), frame.lastStreamId(), BufferInternal.buffer(frame.content()));
} else if (msg instanceof Http2PingFrame) {
Http2PingFrame frame = (Http2PingFrame) msg;
connection.receivePingAck(frame.content(), frame.ack());
}
super.channelRead(ctx, msg);
}
@Override
public void channelWritabilityChanged(ChannelHandlerContext ctx) throws Exception {
Channel channel = ctx.channel();
if (channel instanceof Http2StreamChannel) {
Http2StreamChannel stream = (Http2StreamChannel) channel;
connection.onWritabilityChanged(stream.stream().id());
}
super.channelWritabilityChanged(ctx);
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof Http2ResetFrame) {
Http2ResetFrame frame = (Http2ResetFrame) evt;
connection.receiveResetFrame(frame.stream().id(), frame.errorCode());
} else if (evt instanceof HttpServerUpgradeHandler.UpgradeEvent) {
// Work-around
HttpServerUpgradeHandler.UpgradeEvent upgradeEvent = (HttpServerUpgradeHandler.UpgradeEvent) evt;
String settingsHeader = upgradeEvent.upgradeRequest().headers().get("HTTP2-Settings");
remoteSettings = HttpUtils.decodeSettings(settingsHeader);
connection.receiveSettings(ctx, remoteSettings);
} else if (evt instanceof Http2ConnectionPrefaceAndSettingsFrameWrittenEvent) {
((Http2MultiplexClientConnection)connection).onInitialSettingsSent();
} if (evt instanceof IdleStateEvent) {
connection.onIdle();
} else if (evt instanceof ShutdownEvent) {
ShutdownEvent shutdownEvt = (ShutdownEvent) evt;
connection.shutdown(shutdownEvt.timeout().toMillis(), TimeUnit.MILLISECONDS);
}
super.userEventTriggered(ctx, evt);
}
void writeGoAway(long code, ByteBuf content, PromiseInternal<Void> listener) {
Http2GoAwayFrame frame = new DefaultHttp2GoAwayFrame(code, content);
frame.lastStreamId();
ChannelFuture fut = chctx.writeAndFlush(frame);
fut.addListener(listener);
}
void writeSettings(Http2Settings update, PromiseInternal<Void> listener) {
for (Map.Entry<Character, Long> entry : localSettings.entrySet()) {
Character key = entry.getKey();
if (Objects.equals(update.get(key), entry.getValue())) {
// We can safely remove as this is a private copy
update.remove(key);
}
}
// This server does not support push currently
update.remove(SETTINGS_ENABLE_PUSH);
Http2SettingsFrame frame = new DefaultHttp2SettingsFrame(update);
ChannelFuture future = chctx.writeAndFlush(frame);
future.addListener((ChannelFutureListener) res -> {
if (res.isSuccess()) {
pendingSettingsAcks.add(listener);
} else {
listener.operationComplete(res);
}
});
localSettings = update; // Make a copy ?
}
@Override
public void onStreamAdded(Http2Stream stream) {
}
@Override
public void onStreamActive(Http2Stream stream) {
}
@Override
public void onStreamHalfClosed(Http2Stream stream) {
}
@Override
public void onStreamClosed(Http2Stream stream) {
if (connection instanceof Http2MultiplexClientConnection) {
Http2MultiplexClientConnection clientConnection = (Http2MultiplexClientConnection) connection;
clientConnection.refresh();
}
}
@Override
public void onStreamRemoved(Http2Stream stream) {
}
@Override
public void onGoAwaySent(int lastStreamId, long errorCode, ByteBuf debugData) {
checkGoAway(lastStreamId, errorCode, debugData);
}
@Override
public void onGoAwayReceived(int lastStreamId, long errorCode, ByteBuf debugData) {
checkGoAway(lastStreamId, errorCode, debugData);
}
private void checkGoAway(int lastStreamId, long errorCode, ByteBuf debugData) {
if (goAwayStatus == null) {
goAwayStatus = new GoAway().setLastStreamId(lastStreamId).setErrorCode(errorCode);
connection.onGoAway(errorCode, lastStreamId, BufferInternal.buffer(debugData));
}
}
}
| Http2MultiplexHandler |
java | netty__netty | codec-compression/src/test/java/io/netty/handler/codec/compression/ZlibTest.java | {
"start": 1738,
"end": 17623
} | class ____ {
private static final byte[] BYTES_SMALL = new byte[128];
private static final byte[] BYTES_LARGE = new byte[1024 * 1024];
private static final byte[] BYTES_LARGE2 = ("<!--?xml version=\"1.0\" encoding=\"ISO-8859-1\"?-->\n" +
"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" " +
"\"https://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n" +
"<html xmlns=\"https://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\"><head>\n" +
" <title>Apache Tomcat</title>\n" +
"</head>\n" +
'\n' +
"<body>\n" +
"<h1>It works !</h1>\n" +
'\n' +
"<p>If you're seeing this page via a web browser, it means you've setup Tomcat successfully." +
" Congratulations!</p>\n" +
" \n" +
"<p>This is the default Tomcat home page." +
" It can be found on the local filesystem at: <code>/var/lib/tomcat7/webapps/ROOT/index.html</code></p>\n" +
'\n' +
"<p>Tomcat7 veterans might be pleased to learn that this system instance of Tomcat is installed with" +
" <code>CATALINA_HOME</code> in <code>/usr/share/tomcat7</code> and <code>CATALINA_BASE</code> in" +
" <code>/var/lib/tomcat7</code>, following the rules from" +
" <code>/usr/share/doc/tomcat7-common/RUNNING.txt.gz</code>.</p>\n" +
'\n' +
"<p>You might consider installing the following packages, if you haven't already done so:</p>\n" +
'\n' +
"<p><b>tomcat7-docs</b>: This package installs a web application that allows to browse the Tomcat 7" +
" documentation locally. Once installed, you can access it by clicking <a href=\"docs/\">here</a>.</p>\n" +
'\n' +
"<p><b>tomcat7-examples</b>: This package installs a web application that allows to access the Tomcat" +
" 7 Servlet and JSP examples. Once installed, you can access it by clicking" +
" <a href=\"examples/\">here</a>.</p>\n" +
'\n' +
"<p><b>tomcat7-admin</b>: This package installs two web applications that can help managing this Tomcat" +
" instance. Once installed, you can access the <a href=\"manager/html\">manager webapp</a> and" +
" the <a href=\"host-manager/html\">host-manager webapp</a>.</p><p>\n" +
'\n' +
"</p><p>NOTE: For security reasons, using the manager webapp is restricted" +
" to users with role \"manager\"." +
" The host-manager webapp is restricted to users with role \"admin\". Users are " +
"defined in <code>/etc/tomcat7/tomcat-users.xml</code>.</p>\n" +
'\n' +
'\n' +
'\n' +
"</body></html>").getBytes(CharsetUtil.UTF_8);
static {
Random rand = ThreadLocalRandom.current();
rand.nextBytes(BYTES_SMALL);
rand.nextBytes(BYTES_LARGE);
}
protected ZlibDecoder createDecoder(ZlibWrapper wrapper) {
return createDecoder(wrapper, 0);
}
protected abstract ZlibEncoder createEncoder(ZlibWrapper wrapper);
protected abstract ZlibDecoder createDecoder(ZlibWrapper wrapper, int maxAllocation);
@Test
public void testGZIP2() throws Exception {
byte[] bytes = "message".getBytes(CharsetUtil.UTF_8);
ByteBuf data = Unpooled.wrappedBuffer(bytes);
ByteBuf deflatedData = Unpooled.wrappedBuffer(gzip(bytes));
EmbeddedChannel chDecoderGZip = new EmbeddedChannel(createDecoder(ZlibWrapper.GZIP));
try {
while (deflatedData.isReadable()) {
chDecoderGZip.writeInbound(deflatedData.readRetainedSlice(1));
}
deflatedData.release();
assertTrue(chDecoderGZip.finish());
ByteBuf buf = Unpooled.buffer();
for (;;) {
ByteBuf b = chDecoderGZip.readInbound();
if (b == null) {
break;
}
buf.writeBytes(b);
b.release();
}
assertEquals(buf, data);
assertNull(chDecoderGZip.readInbound());
data.release();
buf.release();
} finally {
dispose(chDecoderGZip);
}
}
@Test
public void testGZIP3() throws Exception {
byte[] bytes = "Foo".getBytes(CharsetUtil.UTF_8);
ByteBuf data = Unpooled.wrappedBuffer(bytes);
ByteBuf deflatedData = Unpooled.wrappedBuffer(
new byte[]{
31, -117, // magic number
8, // CM
2, // FLG.FHCRC
0, 0, 0, 0, // MTIME
0, // XFL
7, // OS
-66, -77, // CRC16
115, -53, -49, 7, 0, // compressed blocks
-63, 35, 62, -76, // CRC32
3, 0, 0, 0 // ISIZE
}
);
EmbeddedChannel chDecoderGZip = new EmbeddedChannel(createDecoder(ZlibWrapper.GZIP));
try {
while (deflatedData.isReadable()) {
chDecoderGZip.writeInbound(deflatedData.readRetainedSlice(1));
}
deflatedData.release();
assertTrue(chDecoderGZip.finish());
ByteBuf buf = Unpooled.buffer();
for (;;) {
ByteBuf b = chDecoderGZip.readInbound();
if (b == null) {
break;
}
buf.writeBytes(b);
b.release();
}
assertEquals(buf, data);
assertNull(chDecoderGZip.readInbound());
data.release();
buf.release();
} finally {
dispose(chDecoderGZip);
}
}
private void testCompress0(ZlibWrapper encoderWrapper, ZlibWrapper decoderWrapper, ByteBuf data) throws Exception {
EmbeddedChannel chEncoder = new EmbeddedChannel(createEncoder(encoderWrapper));
EmbeddedChannel chDecoderZlib = new EmbeddedChannel(createDecoder(decoderWrapper));
try {
chEncoder.writeOutbound(data.retain());
chEncoder.flush();
data.resetReaderIndex();
for (;;) {
ByteBuf deflatedData = chEncoder.readOutbound();
if (deflatedData == null) {
break;
}
chDecoderZlib.writeInbound(deflatedData);
}
byte[] decompressed = new byte[data.readableBytes()];
int offset = 0;
for (;;) {
ByteBuf buf = chDecoderZlib.readInbound();
if (buf == null) {
break;
}
int length = buf.readableBytes();
buf.readBytes(decompressed, offset, length);
offset += length;
buf.release();
if (offset == decompressed.length) {
break;
}
}
assertEquals(data, Unpooled.wrappedBuffer(decompressed));
assertNull(chDecoderZlib.readInbound());
// Closing an encoder channel will generate a footer.
assertTrue(chEncoder.finish());
for (;;) {
Object msg = chEncoder.readOutbound();
if (msg == null) {
break;
}
ReferenceCountUtil.release(msg);
}
// But, the footer will be decoded into nothing. It's only for validation.
assertFalse(chDecoderZlib.finish());
data.release();
} finally {
dispose(chEncoder);
dispose(chDecoderZlib);
}
}
private void testCompressNone(ZlibWrapper encoderWrapper, ZlibWrapper decoderWrapper) throws Exception {
EmbeddedChannel chEncoder = new EmbeddedChannel(createEncoder(encoderWrapper));
EmbeddedChannel chDecoderZlib = new EmbeddedChannel(createDecoder(decoderWrapper));
try {
// Closing an encoder channel without writing anything should generate both header and footer.
assertTrue(chEncoder.finish());
for (;;) {
ByteBuf deflatedData = chEncoder.readOutbound();
if (deflatedData == null) {
break;
}
chDecoderZlib.writeInbound(deflatedData);
}
// Decoder should not generate anything at all.
boolean decoded = false;
for (;;) {
ByteBuf buf = chDecoderZlib.readInbound();
if (buf == null) {
break;
}
buf.release();
decoded = true;
}
assertFalse(decoded, "should decode nothing");
assertFalse(chDecoderZlib.finish());
} finally {
dispose(chEncoder);
dispose(chDecoderZlib);
}
}
private static void dispose(EmbeddedChannel ch) {
if (ch.finish()) {
for (;;) {
Object msg = ch.readInbound();
if (msg == null) {
break;
}
ReferenceCountUtil.release(msg);
}
for (;;) {
Object msg = ch.readOutbound();
if (msg == null) {
break;
}
ReferenceCountUtil.release(msg);
}
}
}
// Test for https://github.com/netty/netty/issues/2572
private void testDecompressOnly(ZlibWrapper decoderWrapper, byte[] compressed, byte[] data) throws Exception {
EmbeddedChannel chDecoder = new EmbeddedChannel(createDecoder(decoderWrapper));
chDecoder.writeInbound(Unpooled.copiedBuffer(compressed));
assertTrue(chDecoder.finish());
ByteBuf decoded = Unpooled.buffer(data.length);
for (;;) {
ByteBuf buf = chDecoder.readInbound();
if (buf == null) {
break;
}
decoded.writeBytes(buf);
buf.release();
}
assertEquals(Unpooled.copiedBuffer(data), decoded);
decoded.release();
}
private void testCompressSmall(ZlibWrapper encoderWrapper, ZlibWrapper decoderWrapper) throws Exception {
testCompress0(encoderWrapper, decoderWrapper, Unpooled.wrappedBuffer(BYTES_SMALL));
testCompress0(encoderWrapper, decoderWrapper,
Unpooled.directBuffer(BYTES_SMALL.length).writeBytes(BYTES_SMALL));
}
private void testCompressLarge(ZlibWrapper encoderWrapper, ZlibWrapper decoderWrapper) throws Exception {
testCompress0(encoderWrapper, decoderWrapper, Unpooled.wrappedBuffer(BYTES_LARGE));
testCompress0(encoderWrapper, decoderWrapper,
Unpooled.directBuffer(BYTES_LARGE.length).writeBytes(BYTES_LARGE));
}
@Test
public void testZLIB() throws Exception {
testCompressNone(ZlibWrapper.ZLIB, ZlibWrapper.ZLIB);
testCompressSmall(ZlibWrapper.ZLIB, ZlibWrapper.ZLIB);
testCompressLarge(ZlibWrapper.ZLIB, ZlibWrapper.ZLIB);
testDecompressOnly(ZlibWrapper.ZLIB, deflate(BYTES_LARGE2), BYTES_LARGE2);
}
@Test
public void testNONE() throws Exception {
testCompressNone(ZlibWrapper.NONE, ZlibWrapper.NONE);
testCompressSmall(ZlibWrapper.NONE, ZlibWrapper.NONE);
testCompressLarge(ZlibWrapper.NONE, ZlibWrapper.NONE);
}
@Test
public void testGZIP() throws Exception {
testCompressNone(ZlibWrapper.GZIP, ZlibWrapper.GZIP);
testCompressSmall(ZlibWrapper.GZIP, ZlibWrapper.GZIP);
testCompressLarge(ZlibWrapper.GZIP, ZlibWrapper.GZIP);
testDecompressOnly(ZlibWrapper.GZIP, gzip(BYTES_LARGE2), BYTES_LARGE2);
}
@Test
public void testGZIPCompressOnly() throws Exception {
testGZIPCompressOnly0(null); // Do not write anything; just finish the stream.
testGZIPCompressOnly0(EmptyArrays.EMPTY_BYTES); // Write an empty array.
testGZIPCompressOnly0(BYTES_SMALL);
testGZIPCompressOnly0(BYTES_LARGE);
}
private void testGZIPCompressOnly0(byte[] data) throws IOException {
EmbeddedChannel chEncoder = new EmbeddedChannel(createEncoder(ZlibWrapper.GZIP));
if (data != null) {
chEncoder.writeOutbound(Unpooled.wrappedBuffer(data));
}
assertTrue(chEncoder.finish());
ByteBuf encoded = Unpooled.buffer();
for (;;) {
ByteBuf buf = chEncoder.readOutbound();
if (buf == null) {
break;
}
encoded.writeBytes(buf);
buf.release();
}
ByteBuf decoded = Unpooled.buffer();
try (GZIPInputStream stream = new GZIPInputStream(new ByteBufInputStream(encoded, true))) {
byte[] buf = new byte[8192];
for (;;) {
int readBytes = stream.read(buf);
if (readBytes < 0) {
break;
}
decoded.writeBytes(buf, 0, readBytes);
}
}
if (data != null) {
assertEquals(Unpooled.wrappedBuffer(data), decoded);
} else {
assertFalse(decoded.isReadable());
}
decoded.release();
}
@Test
public void testZLIB_OR_NONE() throws Exception {
testCompressNone(ZlibWrapper.NONE, ZlibWrapper.ZLIB_OR_NONE);
testCompressSmall(ZlibWrapper.NONE, ZlibWrapper.ZLIB_OR_NONE);
testCompressLarge(ZlibWrapper.NONE, ZlibWrapper.ZLIB_OR_NONE);
}
@Test
public void testZLIB_OR_NONE2() throws Exception {
testCompressNone(ZlibWrapper.ZLIB, ZlibWrapper.ZLIB_OR_NONE);
testCompressSmall(ZlibWrapper.ZLIB, ZlibWrapper.ZLIB_OR_NONE);
testCompressLarge(ZlibWrapper.ZLIB, ZlibWrapper.ZLIB_OR_NONE);
}
@Test
public void testZLIB_OR_NONE3() throws Exception {
testCompressNone(ZlibWrapper.GZIP, ZlibWrapper.ZLIB_OR_NONE);
testCompressSmall(ZlibWrapper.GZIP, ZlibWrapper.ZLIB_OR_NONE);
testCompressLarge(ZlibWrapper.GZIP, ZlibWrapper.ZLIB_OR_NONE);
}
@Test
public void testMaxAllocation() throws Exception {
int maxAllocation = 1024;
ZlibDecoder decoder = createDecoder(ZlibWrapper.ZLIB, maxAllocation);
final EmbeddedChannel chDecoder = new EmbeddedChannel(decoder);
TestByteBufAllocator alloc = new TestByteBufAllocator(chDecoder.alloc());
chDecoder.config().setAllocator(alloc);
DecompressionException e = assertThrows(DecompressionException.class, new Executable() {
@Override
public void execute() throws Throwable {
chDecoder.writeInbound(Unpooled.wrappedBuffer(deflate(BYTES_LARGE)));
}
});
assertTrue(e.getMessage().startsWith("Decompression buffer has reached maximum size"));
assertEquals(maxAllocation, alloc.getMaxAllocation());
assertTrue(decoder.isClosed());
assertFalse(chDecoder.finish());
}
private static byte[] gzip(byte[] bytes) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
GZIPOutputStream stream = new GZIPOutputStream(out);
stream.write(bytes);
stream.close();
return out.toByteArray();
}
private static byte[] deflate(byte[] bytes) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
try (OutputStream stream = new DeflaterOutputStream(out)) {
stream.write(bytes);
}
return out.toByteArray();
}
private static final | ZlibTest |
java | spring-projects__spring-security | kerberos/kerberos-web/src/test/java/org/springframework/security/kerberos/docs/AuthProviderConfigTests.java | {
"start": 1006,
"end": 1081
} | class ____ {
@Test
public void configLoads() {
}
}
| AuthProviderConfigTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java | {
"start": 3731,
"end": 4806
} | class ____ implements TypeParser {
final Function<MappingParserContext, MetadataFieldMapper> defaultMapperParser;
final Function<MappingParserContext, Builder> builderFunction;
public ConfigurableTypeParser(
Function<MappingParserContext, MetadataFieldMapper> defaultMapperParser,
Function<MappingParserContext, Builder> builderFunction
) {
this.defaultMapperParser = defaultMapperParser;
this.builderFunction = builderFunction;
}
@Override
public Builder parse(String name, Map<String, Object> node, MappingParserContext parserContext) throws MapperParsingException {
Builder builder = builderFunction.apply(parserContext);
builder.parseMetadataField(name, parserContext, node);
return builder;
}
@Override
public MetadataFieldMapper getDefault(MappingParserContext parserContext) {
return defaultMapperParser.apply(parserContext);
}
}
public abstract static | ConfigurableTypeParser |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/internals/ChangelogRecordDeserializationHelper.java | {
"start": 1514,
"end": 3378
} | class ____ {
public static final Logger log = LoggerFactory.getLogger(ChangelogRecordDeserializationHelper.class);
private static final byte[] V_0_CHANGELOG_VERSION_HEADER_VALUE = {(byte) 0};
public static final String CHANGELOG_VERSION_HEADER_KEY = "v";
public static final String CHANGELOG_POSITION_HEADER_KEY = "c";
public static final RecordHeader CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY = new RecordHeader(
CHANGELOG_VERSION_HEADER_KEY, V_0_CHANGELOG_VERSION_HEADER_VALUE);
public static void applyChecksAndUpdatePosition(
final ConsumerRecord<byte[], byte[]> record,
final boolean consistencyEnabled,
final Position position
) {
if (!consistencyEnabled) {
return;
}
final Header versionHeader = record.headers().lastHeader(
ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_KEY);
if (versionHeader == null) {
return;
} else {
switch (versionHeader.value()[0]) {
case 0:
final Header vectorHeader = record.headers().lastHeader(CHANGELOG_POSITION_HEADER_KEY);
if (vectorHeader == null) {
throw new StreamsException("This should not happen. Consistency is enabled but the changelog "
+ "contains records without consistency information.");
}
position.merge(PositionSerde.deserialize(ByteBuffer.wrap(vectorHeader.value())));
break;
default:
log.warn("Changelog records have been encoded using a larger version than this server understands." +
"Please upgrade your server.");
}
}
}
}
| ChangelogRecordDeserializationHelper |
java | grpc__grpc-java | okhttp/third_party/okhttp/main/java/io/grpc/okhttp/internal/proxy/HttpUrl.java | {
"start": 3344,
"end": 16530
} | class ____ {
String scheme;
String host;
int port = -1;
public Builder() {
}
public Builder scheme(String scheme) {
if (scheme == null) {
throw new IllegalArgumentException("scheme == null");
} else if (scheme.equalsIgnoreCase("http")) {
this.scheme = "http";
} else if (scheme.equalsIgnoreCase("https")) {
this.scheme = "https";
} else {
throw new IllegalArgumentException("unexpected scheme: " + scheme);
}
return this;
}
/**
* @param host either a regular hostname, International Domain Name, IPv4 address, or IPv6
* address.
*/
public Builder host(String host) {
if (host == null) throw new IllegalArgumentException("host == null");
String encoded = canonicalizeHost(host, 0, host.length());
if (encoded == null) throw new IllegalArgumentException("unexpected host: " + host);
this.host = encoded;
return this;
}
public Builder port(int port) {
if (port <= 0 || port > 65535) throw new IllegalArgumentException("unexpected port: " + port);
this.port = port;
return this;
}
int effectivePort() {
return port != -1 ? port : defaultPort(scheme);
}
public HttpUrl build() {
if (scheme == null) throw new IllegalStateException("scheme == null");
if (host == null) throw new IllegalStateException("host == null");
return new HttpUrl(this);
}
@Override public String toString() {
StringBuilder result = new StringBuilder();
result.append(scheme);
result.append("://");
if (host.indexOf(':') != -1) {
// Host is an IPv6 address.
result.append('[');
result.append(host);
result.append(']');
} else {
result.append(host);
}
int effectivePort = effectivePort();
if (effectivePort != defaultPort(scheme)) {
result.append(':');
result.append(effectivePort);
}
return result.toString();
}
private static String canonicalizeHost(String input, int pos, int limit) {
// Start by percent decoding the host. The WHATWG spec suggests doing this only after we've
// checked for IPv6 square braces. But Chrome does it first, and that's more lenient.
String percentDecoded = percentDecode(input, pos, limit, false);
// If the input is encased in square braces "[...]", drop 'em. We have an IPv6 address.
if (percentDecoded.startsWith("[") && percentDecoded.endsWith("]")) {
InetAddress inetAddress = decodeIpv6(percentDecoded, 1, percentDecoded.length() - 1);
if (inetAddress == null) return null;
byte[] address = inetAddress.getAddress();
if (address.length == 16) return inet6AddressToAscii(address);
throw new AssertionError();
}
return domainToAscii(percentDecoded);
}
/** Decodes an IPv6 address like 1111:2222:3333:4444:5555:6666:7777:8888 or ::1. */
private static InetAddress decodeIpv6(String input, int pos, int limit) {
byte[] address = new byte[16];
int b = 0;
int compress = -1;
int groupOffset = -1;
for (int i = pos; i < limit; ) {
if (b == address.length) return null; // Too many groups.
// Read a delimiter.
if (i + 2 <= limit && input.regionMatches(i, "::", 0, 2)) {
// Compression "::" delimiter, which is anywhere in the input, including its prefix.
if (compress != -1) return null; // Multiple "::" delimiters.
i += 2;
b += 2;
compress = b;
if (i == limit) break;
} else if (b != 0) {
// Group separator ":" delimiter.
if (input.regionMatches(i, ":", 0, 1)) {
i++;
} else if (input.regionMatches(i, ".", 0, 1)) {
// If we see a '.', rewind to the beginning of the previous group and parse as IPv4.
if (!decodeIpv4Suffix(input, groupOffset, limit, address, b - 2)) return null;
b += 2; // We rewound two bytes and then added four.
break;
} else {
return null; // Wrong delimiter.
}
}
// Read a group, one to four hex digits.
int value = 0;
groupOffset = i;
for (; i < limit; i++) {
char c = input.charAt(i);
int hexDigit = decodeHexDigit(c);
if (hexDigit == -1) break;
value = (value << 4) + hexDigit;
}
int groupLength = i - groupOffset;
if (groupLength == 0 || groupLength > 4) return null; // Group is the wrong size.
// We've successfully read a group. Assign its value to our byte array.
address[b++] = (byte) ((value >>> 8) & 0xff);
address[b++] = (byte) (value & 0xff);
}
// All done. If compression happened, we need to move bytes to the right place in the
// address. Here's a sample:
//
// input: "1111:2222:3333::7777:8888"
// before: { 11, 11, 22, 22, 33, 33, 00, 00, 77, 77, 88, 88, 00, 00, 00, 00 }
// compress: 6
// b: 10
// after: { 11, 11, 22, 22, 33, 33, 00, 00, 00, 00, 00, 00, 77, 77, 88, 88 }
//
if (b != address.length) {
if (compress == -1) return null; // Address didn't have compression or enough groups.
System.arraycopy(address, compress, address, address.length - (b - compress), b - compress);
Arrays.fill(address, compress, compress + (address.length - b), (byte) 0);
}
try {
return InetAddress.getByAddress(address);
} catch (UnknownHostException e) {
throw new AssertionError();
}
}
/** Decodes an IPv4 address suffix of an IPv6 address, like 1111::5555:6666:192.168.0.1. */
private static boolean decodeIpv4Suffix(
String input, int pos, int limit, byte[] address, int addressOffset) {
int b = addressOffset;
for (int i = pos; i < limit; ) {
if (b == address.length) return false; // Too many groups.
// Read a delimiter.
if (b != addressOffset) {
if (input.charAt(i) != '.') return false; // Wrong delimiter.
i++;
}
// Read 1 or more decimal digits for a value in 0..255.
int value = 0;
int groupOffset = i;
for (; i < limit; i++) {
char c = input.charAt(i);
if (c < '0' || c > '9') break;
if (value == 0 && groupOffset != i) return false; // Reject unnecessary leading '0's.
value = (value * 10) + c - '0';
if (value > 255) return false; // Value out of range.
}
int groupLength = i - groupOffset;
if (groupLength == 0) return false; // No digits.
// We've successfully read a byte.
address[b++] = (byte) value;
}
if (b != addressOffset + 4) return false; // Too few groups. We wanted exactly four.
return true; // Success.
}
/**
* Performs IDN ToASCII encoding and canonicalize the result to lowercase. e.g. This converts
* {@code ☃.net} to {@code xn--n3h.net}, and {@code WwW.GoOgLe.cOm} to {@code www.google.com}.
* {@code null} will be returned if the input cannot be ToASCII encoded or if the result
* contains unsupported ASCII characters.
*/
private static String domainToAscii(String input) {
try {
String result = IDN.toASCII(input).toLowerCase(Locale.US);
if (result.isEmpty()) return null;
// Confirm that the IDN ToASCII result doesn't contain any illegal characters.
if (containsInvalidHostnameAsciiCodes(result)) {
return null;
}
// TODO: implement all label limits.
return result;
} catch (IllegalArgumentException e) {
return null;
}
}
private static boolean containsInvalidHostnameAsciiCodes(String hostnameAscii) {
for (int i = 0; i < hostnameAscii.length(); i++) {
char c = hostnameAscii.charAt(i);
// The WHATWG Host parsing rules accepts some character codes which are invalid by
// definition for OkHttp's host header checks (and the WHATWG Host syntax definition). Here
// we rule out characters that would cause problems in host headers.
if (c <= '\u001f' || c >= '\u007f') {
return true;
}
// Check for the characters mentioned in the WHATWG Host parsing spec:
// U+0000, U+0009, U+000A, U+000D, U+0020, "#", "%", "/", ":", "?", "@", "[", "\", and "]"
// (excluding the characters covered above).
if (" #%/:?@[\\]".indexOf(c) != -1) {
return true;
}
}
return false;
}
private static String inet6AddressToAscii(byte[] address) {
// Go through the address looking for the longest run of 0s. Each group is 2-bytes.
int longestRunOffset = -1;
int longestRunLength = 0;
for (int i = 0; i < address.length; i += 2) {
int currentRunOffset = i;
while (i < 16 && address[i] == 0 && address[i + 1] == 0) {
i += 2;
}
int currentRunLength = i - currentRunOffset;
if (currentRunLength > longestRunLength) {
longestRunOffset = currentRunOffset;
longestRunLength = currentRunLength;
}
}
// Emit each 2-byte group in hex, separated by ':'. The longest run of zeroes is "::".
Buffer result = new Buffer();
for (int i = 0; i < address.length; ) {
if (i == longestRunOffset) {
result.writeByte(':');
i += longestRunLength;
if (i == 16) result.writeByte(':');
} else {
if (i > 0) result.writeByte(':');
int group = (address[i] & 0xff) << 8 | (address[i + 1] & 0xff);
result.writeHexadecimalUnsignedLong(group);
i += 2;
}
}
return result.readUtf8();
}
}
static String percentDecode(String encoded, int pos, int limit, boolean plusIsSpace) {
for (int i = pos; i < limit; i++) {
char c = encoded.charAt(i);
if (c == '%' || (c == '+' && plusIsSpace)) {
// Slow path: the character at i requires decoding!
Buffer out = new Buffer();
out.writeUtf8(encoded, pos, i);
percentDecode(out, encoded, i, limit, plusIsSpace);
return out.readUtf8();
}
}
// Fast path: no characters in [pos..limit) required decoding.
return encoded.substring(pos, limit);
}
static void percentDecode(Buffer out, String encoded, int pos, int limit, boolean plusIsSpace) {
int codePoint;
for (int i = pos; i < limit; i += Character.charCount(codePoint)) {
codePoint = encoded.codePointAt(i);
if (codePoint == '%' && i + 2 < limit) {
int d1 = decodeHexDigit(encoded.charAt(i + 1));
int d2 = decodeHexDigit(encoded.charAt(i + 2));
if (d1 != -1 && d2 != -1) {
out.writeByte((d1 << 4) + d2);
i += 2;
continue;
}
} else if (codePoint == '+' && plusIsSpace) {
out.writeByte(' ');
continue;
}
out.writeUtf8CodePoint(codePoint);
}
}
static int decodeHexDigit(char c) {
if (c >= '0' && c <= '9') return c - '0';
if (c >= 'a' && c <= 'f') return c - 'a' + 10;
if (c >= 'A' && c <= 'F') return c - 'A' + 10;
return -1;
}
static void canonicalize(Buffer out, String input, int pos, int limit,
String encodeSet, boolean alreadyEncoded, boolean plusIsSpace, boolean asciiOnly) {
Buffer utf8Buffer = null; // Lazily allocated.
int codePoint;
for (int i = pos; i < limit; i += Character.charCount(codePoint)) {
codePoint = input.codePointAt(i);
if (alreadyEncoded
&& (codePoint == '\t' || codePoint == '\n' || codePoint == '\f' || codePoint == '\r')) {
// Skip this character.
} else if (codePoint == '+' && plusIsSpace) {
// Encode '+' as '%2B' since we permit ' ' to be encoded as either '+' or '%20'.
out.writeUtf8(alreadyEncoded ? "+" : "%2B");
} else if (codePoint < 0x20
|| codePoint == 0x7f
|| (codePoint >= 0x80 && asciiOnly)
|| encodeSet.indexOf(codePoint) != -1
|| (codePoint == '%' && !alreadyEncoded)) {
// Percent encode this character.
if (utf8Buffer == null) {
utf8Buffer = new Buffer();
}
utf8Buffer.writeUtf8CodePoint(codePoint);
while (!utf8Buffer.exhausted()) {
try {
fakeEofExceptionMethod(); // Okio 2.x can throw EOFException from readByte()
int b = utf8Buffer.readByte() & 0xff;
out.writeByte('%');
out.writeByte(HEX_DIGITS[(b >> 4) & 0xf]);
out.writeByte(HEX_DIGITS[b & 0xf]);
} catch (EOFException e) {
throw new IndexOutOfBoundsException(e.getMessage());
}
}
} else {
// This character doesn't need encoding. Just copy it over.
out.writeUtf8CodePoint(codePoint);
}
}
}
private static void fakeEofExceptionMethod() throws EOFException {}
}
| Builder |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/StreamSourceContextIdleDetectionTests.java | {
"start": 1884,
"end": 1957
} | class ____ {
/** The tests in this | StreamSourceContextIdleDetectionTests |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/operators/coordination/CoordinatorEventsExactlyOnceITCase.java | {
"start": 10928,
"end": 10999
} | class ____ implements OperatorEvent {}
protected static final | EndEvent |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java | {
"start": 8643,
"end": 18466
} | class ____ extends EditLogTailer {
public EditLogTailerForTest(FSNamesystem namesystem, Configuration conf) {
super(namesystem, conf);
}
public void catchupDuringFailover() throws IOException {
synchronized (TestDelegationTokensWithHA.this) {
while (!catchup) {
try {
LOG.info("The editlog tailer is waiting to catchup...");
TestDelegationTokensWithHA.this.wait();
} catch (InterruptedException e) {}
}
}
super.catchupDuringFailover();
}
}
/**
* Test if correct exception (StandbyException or RetriableException) can be
* thrown during the NN failover.
*/
@Test
@Timeout(value = 300)
public void testDelegationTokenDuringNNFailover() throws Exception {
EditLogTailer editLogTailer = nn1.getNamesystem().getEditLogTailer();
// stop the editLogTailer of nn1
editLogTailer.stop();
Configuration conf = (Configuration) Whitebox.getInternalState(
editLogTailer, "conf");
nn1.getNamesystem().setEditLogTailerForTests(
new EditLogTailerForTest(nn1.getNamesystem(), conf));
// create token
final Token<DelegationTokenIdentifier> token =
getDelegationToken(fs, "JobTracker");
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
identifier.readFields(new DataInputStream(
new ByteArrayInputStream(tokenId)));
// Ensure that it's present in the nn0 secret manager and can
// be renewed directly from there.
LOG.info("A valid token should have non-null password, " +
"and should be renewed successfully");
assertTrue(null != dtSecretManager.retrievePassword(identifier));
dtSecretManager.renewToken(token, "JobTracker");
// transition nn0 to standby
cluster.transitionToStandby(0);
try {
cluster.getNameNodeRpc(0).renewDelegationToken(token);
fail("StandbyException is expected since nn0 is in standby state");
} catch (StandbyException e) {
GenericTestUtils.assertExceptionContains(
HAServiceState.STANDBY.toString(), e);
}
new SubjectInheritingThread() {
@Override
public void work() {
try {
cluster.transitionToActive(1);
} catch (Exception e) {
LOG.error("Transition nn1 to active failed", e);
}
}
}.start();
Thread.sleep(1000);
try {
nn1.getNamesystem().verifyToken(token.decodeIdentifier(),
token.getPassword());
fail("RetriableException/StandbyException is expected since nn1 is in transition");
} catch (IOException e) {
assertTrue(e instanceof StandbyException
|| e instanceof RetriableException);
LOG.info("Got expected exception", e);
}
catchup = true;
synchronized (this) {
this.notifyAll();
}
Configuration clientConf = dfs.getConf();
doRenewOrCancel(token, clientConf, TokenTestAction.RENEW);
doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL);
}
@Test
@Timeout(value = 300)
public void testDelegationTokenWithDoAs() throws Exception {
final Token<DelegationTokenIdentifier> token =
getDelegationToken(fs, "JobTracker");
final UserGroupInformation longUgi = UserGroupInformation
.createRemoteUser("JobTracker/foo.com@FOO.COM");
final UserGroupInformation shortUgi = UserGroupInformation
.createRemoteUser("JobTracker");
longUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// try renew with long name
token.renew(conf);
return null;
}
});
shortUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
token.renew(conf);
return null;
}
});
longUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
token.cancel(conf);
return null;
}
});
}
@Test
@Timeout(value = 300)
public void testHAUtilClonesDelegationTokens() throws Exception {
final Token<DelegationTokenIdentifier> token =
getDelegationToken(fs, "JobTracker");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test");
URI haUri = new URI("hdfs://my-ha-uri/");
token.setService(HAUtilClient.buildTokenServiceForLogicalUri(haUri,
HdfsConstants.HDFS_URI_SCHEME));
ugi.addToken(token);
Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>();
nnAddrs.add(new InetSocketAddress("localhost",
nn0.getNameNodeAddress().getPort()));
nnAddrs.add(new InetSocketAddress("localhost",
nn1.getNameNodeAddress().getPort()));
HAUtilClient.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs);
Collection<Token<? extends TokenIdentifier>> tokens = ugi.getTokens();
assertEquals(3, tokens.size());
LOG.info("Tokens:\n" + Joiner.on("\n").join(tokens));
DelegationTokenSelector dts = new DelegationTokenSelector();
// check that the token selected for one of the physical IPC addresses
// matches the one we received
for (InetSocketAddress addr : nnAddrs) {
Text ipcDtService = SecurityUtil.buildTokenService(addr);
Token<DelegationTokenIdentifier> token2 =
dts.selectToken(ipcDtService, ugi.getTokens());
assertNotNull(token2);
assertArrayEquals(token.getIdentifier(), token2.getIdentifier());
assertArrayEquals(token.getPassword(), token2.getPassword());
}
// switch to host-based tokens, shouldn't match existing tokens
SecurityUtilTestHelper.setTokenServiceUseIp(false);
for (InetSocketAddress addr : nnAddrs) {
Text ipcDtService = SecurityUtil.buildTokenService(addr);
Token<DelegationTokenIdentifier> token2 =
dts.selectToken(ipcDtService, ugi.getTokens());
assertNull(token2);
}
// reclone the tokens, and see if they match now
HAUtilClient.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs);
for (InetSocketAddress addr : nnAddrs) {
Text ipcDtService = SecurityUtil.buildTokenService(addr);
Token<DelegationTokenIdentifier> token2 =
dts.selectToken(ipcDtService, ugi.getTokens());
assertNotNull(token2);
assertArrayEquals(token.getIdentifier(), token2.getIdentifier());
assertArrayEquals(token.getPassword(), token2.getPassword());
}
}
/**
* HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an
* exception if the URI is a logical URI. This bug fails the combination of
* ha + mapred + security.
*/
@Test
@Timeout(value = 300)
public void testDFSGetCanonicalServiceName() throws Exception {
URI hAUri = HATestUtil.getLogicalUri(cluster);
String haService = HAUtilClient.buildTokenServiceForLogicalUri(hAUri,
HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, dfs.getCanonicalServiceName());
final String renewer = UserGroupInformation.getCurrentUser().getShortUserName();
final Token<DelegationTokenIdentifier> token =
getDelegationToken(dfs, renewer);
assertEquals(haService, token.getService().toString());
// make sure the logical uri is handled correctly
token.renew(dfs.getConf());
token.cancel(dfs.getConf());
}
@Test
@Timeout(value = 300)
public void testHdfsGetCanonicalServiceName() throws Exception {
Configuration conf = dfs.getConf();
URI haUri = HATestUtil.getLogicalUri(cluster);
AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf);
String haService = HAUtilClient.buildTokenServiceForLogicalUri(haUri,
HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, afs.getCanonicalServiceName());
Token<?> token = afs.getDelegationTokens(
UserGroupInformation.getCurrentUser().getShortUserName()).get(0);
assertEquals(haService, token.getService().toString());
// make sure the logical uri is handled correctly
token.renew(conf);
token.cancel(conf);
}
@Test
@Timeout(value = 300)
public void testCancelAndUpdateDelegationTokens() throws Exception {
// Create UGI with token1
String user = UserGroupInformation.getCurrentUser().getShortUserName();
UserGroupInformation ugi1 = UserGroupInformation.createRemoteUser(user);
ugi1.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
final Token<DelegationTokenIdentifier> token1 =
getDelegationToken(fs, "JobTracker");
UserGroupInformation.getCurrentUser()
.addToken(token1.getService(), token1);
FileSystem fs1 = HATestUtil.configureFailoverFs(cluster, conf);
// Cancel token1
doRenewOrCancel(token1, conf, TokenTestAction.CANCEL);
// Update UGI with token2
final Token<DelegationTokenIdentifier> token2 =
getDelegationToken(fs, "JobTracker");
UserGroupInformation.getCurrentUser()
.addToken(token2.getService(), token2);
// Check whether token2 works
fs1.listFiles(new Path("/"), false);
return null;
}
});
}
@SuppressWarnings("unchecked")
private Token<DelegationTokenIdentifier> getDelegationToken(FileSystem fs,
String renewer) throws IOException {
final Token<?> tokens[] = fs.addDelegationTokens(renewer, null);
assertEquals(1, tokens.length);
return (Token<DelegationTokenIdentifier>) tokens[0];
}
| EditLogTailerForTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/deletedetached/DeleteDetachedOptionalityViolationTest.java | {
"start": 1823,
"end": 1920
} | class ____ {
@GeneratedValue
@Id
long id;
@Basic(optional = false)
String stuff;
}
}
| Thing |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/JUnit4TestsNotRunWithinEnclosedTest.java | {
"start": 2120,
"end": 2639
} | class ____ {
@Test
// BUG: Diagnostic contains:
public void test() {}
}
""")
.doTest();
}
@Test
public void refactoring_changesToUseJunitRunner() {
refactoring
.addInputLines(
"FooTest.java",
"""
import org.junit.Test;
import org.junit.experimental.runners.Enclosed;
import org.junit.runner.RunWith;
@RunWith(Enclosed.class)
public final | FooTest |
java | apache__dubbo | dubbo-metrics/dubbo-metrics-api/src/main/java/org/apache/dubbo/metrics/model/MethodMetric.java | {
"start": 1441,
"end": 1470
} | class ____ method.
*/
public | for |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/type/TypeFactoryTest.java | {
"start": 1102,
"end": 1153
} | class ____<T> extends ArrayList<T> { }
| GenericList |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/oracle/ast/stmt/OraclePrimaryKey.java | {
"start": 1087,
"end": 3179
} | class ____ extends SQLPrimaryKeyImpl implements OracleConstraint, SQLPrimaryKey, SQLTableElement, SQLTableConstraint {
private OracleUsingIndexClause using;
private SQLName exceptionsInto;
private Boolean enable;
private Initially initially;
private Boolean deferrable;
@Override
protected void accept0(SQLASTVisitor visitor) {
this.accept0((OracleASTVisitor) visitor);
}
@Override
public void accept0(OracleASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, getName());
acceptChild(visitor, getColumns());
acceptChild(visitor, using);
acceptChild(visitor, exceptionsInto);
}
visitor.endVisit(this);
}
public Boolean getDeferrable() {
return deferrable;
}
public void setDeferrable(Boolean deferrable) {
this.deferrable = deferrable;
}
public OracleUsingIndexClause getUsing() {
return using;
}
public void setUsing(OracleUsingIndexClause using) {
this.using = using;
}
public SQLName getExceptionsInto() {
return exceptionsInto;
}
public void setExceptionsInto(SQLName exceptionsInto) {
this.exceptionsInto = exceptionsInto;
}
public Boolean getEnable() {
return enable;
}
public void setEnable(Boolean enable) {
this.enable = enable;
}
public Initially getInitially() {
return initially;
}
public void setInitially(Initially initially) {
this.initially = initially;
}
public void cloneTo(OraclePrimaryKey x) {
super.cloneTo(x);
if (using != null) {
x.setUsing(using.clone());
}
if (exceptionsInto != null) {
x.setExceptionsInto(exceptionsInto.clone());
}
x.enable = enable;
x.initially = initially;
x.deferrable = deferrable;
}
public OraclePrimaryKey clone() {
OraclePrimaryKey x = new OraclePrimaryKey();
cloneTo(x);
return x;
}
}
| OraclePrimaryKey |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java | {
"start": 2306,
"end": 2515
} | class ____ an instance of {@link BPServiceActor} for each NN,
* and delegates calls to both NNs.
* It also maintains the state about which of the NNs is considered active.
*/
@InterfaceAudience.Private
| manages |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesStoreEventType.java | {
"start": 933,
"end": 989
} | enum ____ {
STORE_ATTRIBUTES
}
| NodeAttributesStoreEventType |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptive/Executing.java | {
"start": 2609,
"end": 11207
} | class ____ extends StateWithExecutionGraph
implements ResourceListener, StateTransitionManager.Context, CheckpointStatsListener {
private final Context context;
private final StateTransitionManager stateTransitionManager;
private final int rescaleOnFailedCheckpointCount;
// null indicates that there was no change event observed, yet
@Nullable private AtomicInteger failedCheckpointCountdown;
Executing(
ExecutionGraph executionGraph,
ExecutionGraphHandler executionGraphHandler,
OperatorCoordinatorHandler operatorCoordinatorHandler,
Logger logger,
Context context,
ClassLoader userCodeClassLoader,
List<ExceptionHistoryEntry> failureCollection,
Function<StateTransitionManager.Context, StateTransitionManager>
stateTransitionManagerFactory,
int rescaleOnFailedCheckpointCount) {
super(
context,
executionGraph,
executionGraphHandler,
operatorCoordinatorHandler,
logger,
userCodeClassLoader,
failureCollection);
this.context = context;
Preconditions.checkState(
executionGraph.getState() == JobStatus.RUNNING, "Assuming running execution graph");
this.stateTransitionManager = stateTransitionManagerFactory.apply(this);
Preconditions.checkArgument(
rescaleOnFailedCheckpointCount > 0,
"The rescaleOnFailedCheckpointCount should be larger than 0.");
this.rescaleOnFailedCheckpointCount = rescaleOnFailedCheckpointCount;
this.failedCheckpointCountdown = null;
deploy();
// check if new resources have come available in the meantime
context.runIfState(
this,
() -> {
stateTransitionManager.onChange();
stateTransitionManager.onTrigger();
},
Duration.ZERO);
}
@Override
public boolean hasSufficientResources() {
return parallelismChanged() && context.hasSufficientResources();
}
@Override
public boolean hasDesiredResources() {
return parallelismChanged() && context.hasDesiredResources();
}
private boolean parallelismChanged() {
final VertexParallelism currentParallelism =
extractCurrentVertexParallelism(getExecutionGraph());
return context.getAvailableVertexParallelism()
.map(
availableParallelism ->
availableParallelism.getVertices().stream()
.anyMatch(
vertex ->
currentParallelism.getParallelism(vertex)
!= availableParallelism
.getParallelism(vertex)))
.orElse(false);
}
private static VertexParallelism extractCurrentVertexParallelism(
AccessExecutionGraph executionGraph) {
return new VertexParallelism(
executionGraph.getAllVertices().values().stream()
.collect(
Collectors.toMap(
AccessExecutionJobVertex::getJobVertexId,
AccessExecutionJobVertex::getParallelism)));
}
@Override
public ScheduledFuture<?> scheduleOperation(Runnable callback, Duration delay) {
return context.runIfState(this, callback, delay);
}
@Override
public void transitionToSubsequentState() {
context.goToRestarting(
getExecutionGraph(),
getExecutionGraphHandler(),
getOperatorCoordinatorHandler(),
Duration.ofMillis(0L),
context.getAvailableVertexParallelism()
.orElseThrow(
() ->
new IllegalStateException(
"Resources must be available when rescaling.")),
getFailures());
}
@Override
public JobStatus getJobStatus() {
return JobStatus.RUNNING;
}
@Override
public void cancel() {
context.goToCanceling(
getExecutionGraph(),
getExecutionGraphHandler(),
getOperatorCoordinatorHandler(),
getFailures());
}
@Override
void onFailure(Throwable cause, CompletableFuture<Map<String, String>> failureLabels) {
FailureResultUtil.restartOrFail(
context.howToHandleFailure(cause, failureLabels), context, this);
}
@Override
void onGloballyTerminalState(JobStatus globallyTerminalState) {
context.goToFinished(ArchivedExecutionGraph.createFrom(getExecutionGraph()));
}
@Override
public void onLeave(Class<? extends State> newState) {
stateTransitionManager.close();
super.onLeave(newState);
}
private void deploy() {
for (ExecutionJobVertex executionJobVertex :
getExecutionGraph().getVerticesTopologically()) {
for (ExecutionVertex executionVertex : executionJobVertex.getTaskVertices()) {
if (executionVertex.getExecutionState() == ExecutionState.CREATED
|| executionVertex.getExecutionState() == ExecutionState.SCHEDULED) {
deploySafely(executionVertex);
}
}
}
}
private void deploySafely(ExecutionVertex executionVertex) {
try {
executionVertex.deploy();
} catch (JobException e) {
handleDeploymentFailure(executionVertex, e);
}
}
private void handleDeploymentFailure(ExecutionVertex executionVertex, JobException e) {
executionVertex.markFailed(e);
}
@Override
public void onNewResourcesAvailable() {
stateTransitionManager.onChange();
initializeFailedCheckpointCountdownIfUnset();
}
@Override
public void onNewResourceRequirements() {
stateTransitionManager.onChange();
initializeFailedCheckpointCountdownIfUnset();
}
@Override
public void onCompletedCheckpoint() {
triggerPotentialRescale();
}
@Override
public void onFailedCheckpoint() {
if (this.failedCheckpointCountdown != null
&& this.failedCheckpointCountdown.decrementAndGet() <= 0) {
triggerPotentialRescale();
}
}
private void triggerPotentialRescale() {
stateTransitionManager.onTrigger();
this.failedCheckpointCountdown = null;
}
private void initializeFailedCheckpointCountdownIfUnset() {
if (failedCheckpointCountdown == null) {
this.failedCheckpointCountdown = new AtomicInteger(this.rescaleOnFailedCheckpointCount);
}
}
CompletableFuture<String> stopWithSavepoint(
@Nullable final String targetDirectory,
boolean terminate,
SavepointFormatType formatType) {
final ExecutionGraph executionGraph = getExecutionGraph();
StopWithSavepointTerminationManager.checkSavepointActionPreconditions(
executionGraph.getCheckpointCoordinator(),
targetDirectory,
executionGraph.getJobID(),
getLogger());
getLogger().info("Triggering stop-with-savepoint for job {}.", executionGraph.getJobID());
CheckpointScheduling schedulingProvider = new CheckpointSchedulingProvider(executionGraph);
schedulingProvider.stopCheckpointScheduler();
final CompletableFuture<String> savepointFuture =
Objects.requireNonNull(executionGraph.getCheckpointCoordinator())
.triggerSynchronousSavepoint(terminate, targetDirectory, formatType)
.thenApply(CompletedCheckpoint::getExternalPointer);
return context.goToStopWithSavepoint(
executionGraph,
getExecutionGraphHandler(),
getOperatorCoordinatorHandler(),
schedulingProvider,
savepointFuture,
getFailures());
}
/** Context of the {@link Executing} state. */
| Executing |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/api/condition/DisabledOnOsIntegrationTests.java | {
"start": 1966,
"end": 4576
} | class ____ {
@Test
@Disabled("Only used in a unit test via reflection")
void enabledBecauseAnnotationIsNotPresent() {
}
@Test
@Disabled("Only used in a unit test via reflection")
@DisabledOnOs({})
void missingOsAndArchitectureDeclaration() {
}
@Test
@DisabledOnOs(value = { AIX, FREEBSD, LINUX, MAC, OPENBSD, WINDOWS, SOLARIS,
OTHER }, disabledReason = "Disabled on every OS")
void disabledOnEveryOs() {
fail("should be disabled");
}
@Test
@DisabledOnOs(AIX)
void aix() {
assertFalse(onAix());
}
@Test
@DisabledOnOs(FREEBSD)
void freebsd() {
assertFalse(onFreebsd());
}
@Test
@DisabledOnOs(LINUX)
void linux() {
assertFalse(onLinux());
}
@Test
@DisabledOnOs(MAC)
void macOs() {
assertFalse(onMac());
}
@Test
@DisabledOnMac
void macOsWithComposedAnnotation() {
assertFalse(onMac());
}
@Test
@DisabledOnOs(OPENBSD)
void openbsd() {
assertFalse(onOpenbsd());
}
@Test
@DisabledOnOs(WINDOWS)
void windows() {
assertFalse(onWindows());
}
@Test
@DisabledOnOs(SOLARIS)
void solaris() {
assertFalse(onSolaris());
}
@Test
@DisabledOnOs(OTHER)
void other() {
assertTrue(onAix() || onFreebsd() || onLinux() || onMac() || onOpenbsd() || onSolaris() || onWindows());
}
@Test
@DisabledOnOs(architectures = "x86_64")
void architectureX86_64() {
assertFalse(onArchitecture("x_86_64"));
}
@Test
@DisabledOnOs(architectures = "aarch64")
void architectureAarch64() {
assertFalse(onArchitecture("aarch64"));
}
@Test
@DisabledOnOs(value = MAC, architectures = "x86_64")
void architectureX86_64WithMacOs() {
assertFalse(onMac() && onArchitecture("x_86_64"));
}
@Test
@DisabledOnOs(value = WINDOWS, architectures = "x86_64")
void architectureX86_64WithWindows() {
assertFalse(onWindows() && onArchitecture("x86_64"));
}
@Test
@DisabledOnOs(value = LINUX, architectures = "x86_64")
void architectureX86_64WithLinux() {
assertFalse(onLinux() && onArchitecture("x86_64"));
}
@Test
@DisabledOnOs(value = MAC, architectures = "aarch64")
void aarch64WithMacOs() {
assertFalse(onMac() && onArchitecture("aarch64"));
}
@Test
@DisabledOnOs(value = WINDOWS, architectures = "aarch64")
void aarch64WithWindows() {
assertFalse(onWindows() && onArchitecture("aarch64"));
}
@Test
@DisabledOnOs(value = LINUX, architectures = "aarch64")
void aarch64WithLinux() {
assertFalse(onLinux() && onArchitecture("aarch64"));
}
// -------------------------------------------------------------------------
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
@DisabledOnOs(MAC)
@ | DisabledOnOsIntegrationTests |
java | apache__camel | components/camel-joor/src/test/java/org/apache/camel/language/joor/JoorOptionalPredicateTest.java | {
"start": 1040,
"end": 2011
} | class ____ extends CamelTestSupport {
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.choice()
.when()
.joor("optionalBody.isPresent()")
.to("mock:body")
.otherwise()
.to("mock:null");
}
};
}
@Test
public void testPredicate() throws Exception {
getMockEndpoint("mock:body").expectedMessageCount(1);
getMockEndpoint("mock:null").expectedMessageCount(3);
template.sendBody("direct:start", null);
template.sendBody("direct:start", 5);
template.sendBody("direct:start", null);
template.sendBody("direct:start", null);
MockEndpoint.assertIsSatisfied(context);
}
}
| JoorOptionalPredicateTest |
java | quarkusio__quarkus | integration-tests/no-awt/src/test/java/io/quarkus/awt/it/GraphicsIT.java | {
"start": 778,
"end": 3202
} | class ____ {
private static final Logger LOG = Logger.getLogger(GraphicsIT.class);
public static Pattern AWT_EXTENSION_HINT_PATTERN = Pattern.compile(".*" + AWT_EXTENSION_HINT + ".*");
@ParameterizedTest
@ValueSource(strings = {
"IIORegistry",
"GraphicsEnvironment",
"Color",
"BufferedImage",
"Transformations",
"ConvolveOp",
"Path2D",
"ImageReader",
"ImageWriter"
})
public void testGraphics(String entrypoint) throws IOException {
LOG.infof("Triggering test: %s", entrypoint);
RestAssured.given().when()
.param("entrypoint", entrypoint)
.get("/graphics")
.then()
.statusCode(HttpStatus.SC_INTERNAL_SERVER_ERROR)
.log().all();
checkLog(AWT_EXTENSION_HINT_PATTERN);
}
/**
* Looks for a pattern in the log, line by line.
*
* @param lineMatchRegexp pattern
*/
static void checkLog(final Pattern lineMatchRegexp) {
final Path logFilePath = Paths.get(".", "target", "quarkus.log").toAbsolutePath();
org.awaitility.Awaitility.given().pollInterval(100, TimeUnit.MILLISECONDS)
.atMost(3, TimeUnit.SECONDS)
.untilAsserted(() -> {
assertTrue(Files.exists(logFilePath), "Quarkus log file " + logFilePath + " is missing");
boolean found = false;
final StringBuilder sbLog = new StringBuilder();
try (BufferedReader reader = new BufferedReader(
new InputStreamReader(new ByteArrayInputStream(Files.readAllBytes(logFilePath)),
StandardCharsets.UTF_8))) {
String line;
while ((line = reader.readLine()) != null) {
sbLog.append(line).append("\r\n");
found = lineMatchRegexp.matcher(line).matches();
if (found) {
break;
}
}
}
assertTrue(found, "Pattern " + lineMatchRegexp.pattern() + " not found in log " + logFilePath + ". \n" +
"The log was: " + sbLog);
});
}
}
| GraphicsIT |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/KeyDeserializer.java | {
"start": 1571,
"end": 1712
} | class ____ only to be used with annotations, to
* indicate that <b>no deserializer is configured</b>.
*<p>
* Specifically, this | is |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2781/Issue2781Mapper.java | {
"start": 421,
"end": 613
} | interface ____ {
Issue2781Mapper INSTANCE = Mappers.getMapper( Issue2781Mapper.class );
@Mapping(target = "nested", source = "source")
Target map(Source source);
| Issue2781Mapper |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableDoFinally.java | {
"start": 1317,
"end": 1745
} | class ____<T> extends AbstractObservableWithUpstream<T, T> {
final Action onFinally;
public ObservableDoFinally(ObservableSource<T> source, Action onFinally) {
super(source);
this.onFinally = onFinally;
}
@Override
protected void subscribeActual(Observer<? super T> observer) {
source.subscribe(new DoFinallyObserver<>(observer, onFinally));
}
static final | ObservableDoFinally |
java | quarkusio__quarkus | extensions/tls-registry/deployment/src/test/java/io/quarkus/tls/PemKeyStoreUserOrderTest.java | {
"start": 1180,
"end": 3117
} | class ____ {
private static final String configuration = """
quarkus.tls.key-store.pem.foo.key=target/certs/test-pem-order.key
quarkus.tls.key-store.pem.foo.cert=target/certs/test-pem-order.crt
quarkus.tls.key-store.pem.bar.key=target/certs/test-pem-order-alias1.key
quarkus.tls.key-store.pem.bar.cert=target/certs/test-pem-order-alias1.crt
quarkus.tls.key-store.pem.baz.key=target/certs/test-pem-order-alias2.key
quarkus.tls.key-store.pem.baz.cert=target/certs/test-pem-order-alias2.crt
quarkus.tls.key-store.pem.order=foo,bar,baz
""";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.add(new StringAsset(configuration), "application.properties"));
@Inject
TlsConfigurationRegistry certificates;
@Test
void test() throws KeyStoreException, CertificateParsingException {
TlsConfiguration def = certificates.getDefault().orElseThrow();
assertThat(def.getKeyStore()).isNotNull();
List<X509Certificate> list = new ArrayList<>();
Iterator<String> iterator = def.getKeyStore().aliases().asIterator();
while (iterator.hasNext()) {
String alias = iterator.next();
X509Certificate certificate = (X509Certificate) def.getKeyStore().getCertificate(alias);
list.add(certificate);
}
assertThat(list).hasSize(3);
assertThat(new ArrayList<>(list.get(0).getSubjectAlternativeNames()).get(0).toString()).contains("quarkus.io");
assertThat(new ArrayList<>(list.get(1).getSubjectAlternativeNames()).get(0).toString()).contains("acme.org");
assertThat(new ArrayList<>(list.get(2).getSubjectAlternativeNames()).get(0).toString()).contains("example.com");
}
}
| PemKeyStoreUserOrderTest |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/asm/MethodWriter.java | {
"start": 44645,
"end": 88046
} | class ____ frames, we will need to insert a frame after
// this GOTO_W during the additional ClassReader -> ClassWriter round trip to remove the ASM
// specific instructions. To not miss this additional frame, we need to use an ASM_GOTO_W
// here, which has the unfortunate effect of forcing this additional round trip (which in
// some case would not have been really necessary, but we can't know this at this point).
code.putByte(Constants.ASM_GOTO_W);
hasAsmInstructions = true;
// The instruction after the GOTO_W becomes the target of the IFNOT instruction.
nextInsnIsJumpTarget = true;
}
label.put(code, code.length - 1, true);
} else if (baseOpcode != opcode) {
// Case of a GOTO_W or JSR_W specified by the user (normally ClassReader when used to remove
// ASM specific instructions). In this case we keep the original instruction.
code.putByte(opcode);
label.put(code, code.length - 1, true);
} else {
// Case of a jump with an offset >= -32768, or of a jump with an unknown offset. In these
// cases we store the offset in 2 bytes (which will be increased via a ClassReader ->
// ClassWriter round trip if it turns out that 2 bytes are not sufficient).
code.putByte(baseOpcode);
label.put(code, code.length - 1, false);
}
// If needed, update the maximum stack size and number of locals, and stack map frames.
if (currentBasicBlock != null) {
Label nextBasicBlock = null;
if (compute == COMPUTE_ALL_FRAMES) {
currentBasicBlock.frame.execute(baseOpcode, 0, null, null);
// Record the fact that 'label' is the target of a jump instruction.
label.getCanonicalInstance().flags |= Label.FLAG_JUMP_TARGET;
// Add 'label' as a successor of the current basic block.
addSuccessorToCurrentBasicBlock(Edge.JUMP, label);
if (baseOpcode != Opcodes.GOTO) {
// The next instruction starts a new basic block (except for GOTO: by default the code
// following a goto is unreachable - unless there is an explicit label for it - and we
// should not compute stack frame types for its instructions).
nextBasicBlock = new Label();
}
} else if (compute == COMPUTE_INSERTED_FRAMES) {
currentBasicBlock.frame.execute(baseOpcode, 0, null, null);
} else if (compute == COMPUTE_MAX_STACK_AND_LOCAL_FROM_FRAMES) {
// No need to update maxRelativeStackSize (the stack size delta is always negative).
relativeStackSize += STACK_SIZE_DELTA[baseOpcode];
} else {
if (baseOpcode == Opcodes.JSR) {
// Record the fact that 'label' designates a subroutine, if not already done.
if ((label.flags & Label.FLAG_SUBROUTINE_START) == 0) {
label.flags |= Label.FLAG_SUBROUTINE_START;
hasSubroutines = true;
}
currentBasicBlock.flags |= Label.FLAG_SUBROUTINE_CALLER;
// Note that, by construction in this method, a block which calls a subroutine has at
// least two successors in the control flow graph: the first one (added below) leads to
// the instruction after the JSR, while the second one (added here) leads to the JSR
// target. Note that the first successor is virtual (it does not correspond to a possible
// execution path): it is only used to compute the successors of the basic blocks ending
// with a ret, in {@link Label#addSubroutineRetSuccessors}.
addSuccessorToCurrentBasicBlock(relativeStackSize + 1, label);
// The instruction after the JSR starts a new basic block.
nextBasicBlock = new Label();
} else {
// No need to update maxRelativeStackSize (the stack size delta is always negative).
relativeStackSize += STACK_SIZE_DELTA[baseOpcode];
addSuccessorToCurrentBasicBlock(relativeStackSize, label);
}
}
// If the next instruction starts a new basic block, call visitLabel to add the label of this
// instruction as a successor of the current block, and to start a new basic block.
if (nextBasicBlock != null) {
if (nextInsnIsJumpTarget) {
nextBasicBlock.flags |= Label.FLAG_JUMP_TARGET;
}
visitLabel(nextBasicBlock);
}
if (baseOpcode == Opcodes.GOTO) {
endCurrentBasicBlockWithNoSuccessor();
}
}
}
@Override
public void visitLabel(final Label label) {
// Resolve the forward references to this label, if any.
hasAsmInstructions |= label.resolve(code.data, stackMapTableEntries, code.length);
// visitLabel starts a new basic block (except for debug only labels), so we need to update the
// previous and current block references and list of successors.
if ((label.flags & Label.FLAG_DEBUG_ONLY) != 0) {
return;
}
if (compute == COMPUTE_ALL_FRAMES) {
if (currentBasicBlock != null) {
if (label.bytecodeOffset == currentBasicBlock.bytecodeOffset) {
// We use {@link Label#getCanonicalInstance} to store the state of a basic block in only
// one place, but this does not work for labels which have not been visited yet.
// Therefore, when we detect here two labels having the same bytecode offset, we need to
// - consolidate the state scattered in these two instances into the canonical instance:
currentBasicBlock.flags |= (label.flags & Label.FLAG_JUMP_TARGET);
// - make sure the two instances share the same Frame instance (the implementation of
// {@link Label#getCanonicalInstance} relies on this property; here label.frame should be
// null):
label.frame = currentBasicBlock.frame;
// - and make sure to NOT assign 'label' into 'currentBasicBlock' or 'lastBasicBlock', so
// that they still refer to the canonical instance for this bytecode offset.
return;
}
// End the current basic block (with one new successor).
addSuccessorToCurrentBasicBlock(Edge.JUMP, label);
}
// Append 'label' at the end of the basic block list.
if (lastBasicBlock != null) {
if (label.bytecodeOffset == lastBasicBlock.bytecodeOffset) {
// Same comment as above.
lastBasicBlock.flags |= (label.flags & Label.FLAG_JUMP_TARGET);
// Here label.frame should be null.
label.frame = lastBasicBlock.frame;
currentBasicBlock = lastBasicBlock;
return;
}
lastBasicBlock.nextBasicBlock = label;
}
lastBasicBlock = label;
// Make it the new current basic block.
currentBasicBlock = label;
// Here label.frame should be null.
label.frame = new Frame(label);
} else if (compute == COMPUTE_INSERTED_FRAMES) {
if (currentBasicBlock == null) {
// This case should happen only once, for the visitLabel call in the constructor. Indeed, if
// compute is equal to COMPUTE_INSERTED_FRAMES, currentBasicBlock stays unchanged.
currentBasicBlock = label;
} else {
// Update the frame owner so that a correct frame offset is computed in Frame.accept().
currentBasicBlock.frame.owner = label;
}
} else if (compute == COMPUTE_MAX_STACK_AND_LOCAL) {
if (currentBasicBlock != null) {
// End the current basic block (with one new successor).
currentBasicBlock.outputStackMax = (short) maxRelativeStackSize;
addSuccessorToCurrentBasicBlock(relativeStackSize, label);
}
// Start a new current basic block, and reset the current and maximum relative stack sizes.
currentBasicBlock = label;
relativeStackSize = 0;
maxRelativeStackSize = 0;
// Append the new basic block at the end of the basic block list.
if (lastBasicBlock != null) {
lastBasicBlock.nextBasicBlock = label;
}
lastBasicBlock = label;
} else if (compute == COMPUTE_MAX_STACK_AND_LOCAL_FROM_FRAMES && currentBasicBlock == null) {
// This case should happen only once, for the visitLabel call in the constructor. Indeed, if
// compute is equal to COMPUTE_MAX_STACK_AND_LOCAL_FROM_FRAMES, currentBasicBlock stays
// unchanged.
currentBasicBlock = label;
}
}
@Override
public void visitLdcInsn(final Object value) {
lastBytecodeOffset = code.length;
// Add the instruction to the bytecode of the method.
Symbol constantSymbol = symbolTable.addConstant(value);
int constantIndex = constantSymbol.index;
char firstDescriptorChar;
boolean isLongOrDouble =
constantSymbol.tag == Symbol.CONSTANT_LONG_TAG
|| constantSymbol.tag == Symbol.CONSTANT_DOUBLE_TAG
|| (constantSymbol.tag == Symbol.CONSTANT_DYNAMIC_TAG
&& ((firstDescriptorChar = constantSymbol.value.charAt(0)) == 'J'
|| firstDescriptorChar == 'D'));
if (isLongOrDouble) {
code.put12(Constants.LDC2_W, constantIndex);
} else if (constantIndex >= 256) {
code.put12(Constants.LDC_W, constantIndex);
} else {
code.put11(Opcodes.LDC, constantIndex);
}
// If needed, update the maximum stack size and number of locals, and stack map frames.
if (currentBasicBlock != null) {
if (compute == COMPUTE_ALL_FRAMES || compute == COMPUTE_INSERTED_FRAMES) {
currentBasicBlock.frame.execute(Opcodes.LDC, 0, constantSymbol, symbolTable);
} else {
int size = relativeStackSize + (isLongOrDouble ? 2 : 1);
if (size > maxRelativeStackSize) {
maxRelativeStackSize = size;
}
relativeStackSize = size;
}
}
}
@Override
public void visitIincInsn(final int varIndex, final int increment) {
lastBytecodeOffset = code.length;
// Add the instruction to the bytecode of the method.
if ((varIndex > 255) || (increment > 127) || (increment < -128)) {
code.putByte(Constants.WIDE).put12(Opcodes.IINC, varIndex).putShort(increment);
} else {
code.putByte(Opcodes.IINC).put11(varIndex, increment);
}
// If needed, update the maximum stack size and number of locals, and stack map frames.
if (currentBasicBlock != null
&& (compute == COMPUTE_ALL_FRAMES || compute == COMPUTE_INSERTED_FRAMES)) {
currentBasicBlock.frame.execute(Opcodes.IINC, varIndex, null, null);
}
if (compute != COMPUTE_NOTHING) {
int currentMaxLocals = varIndex + 1;
if (currentMaxLocals > maxLocals) {
maxLocals = currentMaxLocals;
}
}
}
@Override
public void visitTableSwitchInsn(
final int min, final int max, final Label dflt, final Label... labels) {
lastBytecodeOffset = code.length;
// Add the instruction to the bytecode of the method.
code.putByte(Opcodes.TABLESWITCH).putByteArray(null, 0, (4 - code.length % 4) % 4);
dflt.put(code, lastBytecodeOffset, true);
code.putInt(min).putInt(max);
for (Label label : labels) {
label.put(code, lastBytecodeOffset, true);
}
// If needed, update the maximum stack size and number of locals, and stack map frames.
visitSwitchInsn(dflt, labels);
}
@Override
public void visitLookupSwitchInsn(final Label dflt, final int[] keys, final Label[] labels) {
lastBytecodeOffset = code.length;
// Add the instruction to the bytecode of the method.
code.putByte(Opcodes.LOOKUPSWITCH).putByteArray(null, 0, (4 - code.length % 4) % 4);
dflt.put(code, lastBytecodeOffset, true);
code.putInt(labels.length);
for (int i = 0; i < labels.length; ++i) {
code.putInt(keys[i]);
labels[i].put(code, lastBytecodeOffset, true);
}
// If needed, update the maximum stack size and number of locals, and stack map frames.
visitSwitchInsn(dflt, labels);
}
private void visitSwitchInsn(final Label dflt, final Label[] labels) {
if (currentBasicBlock != null) {
if (compute == COMPUTE_ALL_FRAMES) {
currentBasicBlock.frame.execute(Opcodes.LOOKUPSWITCH, 0, null, null);
// Add all the labels as successors of the current basic block.
addSuccessorToCurrentBasicBlock(Edge.JUMP, dflt);
dflt.getCanonicalInstance().flags |= Label.FLAG_JUMP_TARGET;
for (Label label : labels) {
addSuccessorToCurrentBasicBlock(Edge.JUMP, label);
label.getCanonicalInstance().flags |= Label.FLAG_JUMP_TARGET;
}
} else if (compute == COMPUTE_MAX_STACK_AND_LOCAL) {
// No need to update maxRelativeStackSize (the stack size delta is always negative).
--relativeStackSize;
// Add all the labels as successors of the current basic block.
addSuccessorToCurrentBasicBlock(relativeStackSize, dflt);
for (Label label : labels) {
addSuccessorToCurrentBasicBlock(relativeStackSize, label);
}
}
// End the current basic block.
endCurrentBasicBlockWithNoSuccessor();
}
}
@Override
public void visitMultiANewArrayInsn(final String descriptor, final int numDimensions) {
lastBytecodeOffset = code.length;
// Add the instruction to the bytecode of the method.
Symbol descSymbol = symbolTable.addConstantClass(descriptor);
code.put12(Opcodes.MULTIANEWARRAY, descSymbol.index).putByte(numDimensions);
// If needed, update the maximum stack size and number of locals, and stack map frames.
if (currentBasicBlock != null) {
if (compute == COMPUTE_ALL_FRAMES || compute == COMPUTE_INSERTED_FRAMES) {
currentBasicBlock.frame.execute(
Opcodes.MULTIANEWARRAY, numDimensions, descSymbol, symbolTable);
} else {
// No need to update maxRelativeStackSize (the stack size delta is always negative).
relativeStackSize += 1 - numDimensions;
}
}
}
@Override
public AnnotationVisitor visitInsnAnnotation(
final int typeRef, final TypePath typePath, final String descriptor, final boolean visible) {
if (visible) {
return lastCodeRuntimeVisibleTypeAnnotation =
AnnotationWriter.create(
symbolTable,
(typeRef & 0xFF0000FF) | (lastBytecodeOffset << 8),
typePath,
descriptor,
lastCodeRuntimeVisibleTypeAnnotation);
} else {
return lastCodeRuntimeInvisibleTypeAnnotation =
AnnotationWriter.create(
symbolTable,
(typeRef & 0xFF0000FF) | (lastBytecodeOffset << 8),
typePath,
descriptor,
lastCodeRuntimeInvisibleTypeAnnotation);
}
}
@Override
public void visitTryCatchBlock(
final Label start, final Label end, final Label handler, final String type) {
Handler newHandler =
new Handler(
start, end, handler, type != null ? symbolTable.addConstantClass(type).index : 0, type);
if (firstHandler == null) {
firstHandler = newHandler;
} else {
lastHandler.nextHandler = newHandler;
}
lastHandler = newHandler;
}
@Override
public AnnotationVisitor visitTryCatchAnnotation(
final int typeRef, final TypePath typePath, final String descriptor, final boolean visible) {
if (visible) {
return lastCodeRuntimeVisibleTypeAnnotation =
AnnotationWriter.create(
symbolTable, typeRef, typePath, descriptor, lastCodeRuntimeVisibleTypeAnnotation);
} else {
return lastCodeRuntimeInvisibleTypeAnnotation =
AnnotationWriter.create(
symbolTable, typeRef, typePath, descriptor, lastCodeRuntimeInvisibleTypeAnnotation);
}
}
@Override
public void visitLocalVariable(
final String name,
final String descriptor,
final String signature,
final Label start,
final Label end,
final int index) {
if (signature != null) {
if (localVariableTypeTable == null) {
localVariableTypeTable = new ByteVector();
}
++localVariableTypeTableLength;
localVariableTypeTable
.putShort(start.bytecodeOffset)
.putShort(end.bytecodeOffset - start.bytecodeOffset)
.putShort(symbolTable.addConstantUtf8(name))
.putShort(symbolTable.addConstantUtf8(signature))
.putShort(index);
}
if (localVariableTable == null) {
localVariableTable = new ByteVector();
}
++localVariableTableLength;
localVariableTable
.putShort(start.bytecodeOffset)
.putShort(end.bytecodeOffset - start.bytecodeOffset)
.putShort(symbolTable.addConstantUtf8(name))
.putShort(symbolTable.addConstantUtf8(descriptor))
.putShort(index);
if (compute != COMPUTE_NOTHING) {
char firstDescChar = descriptor.charAt(0);
int currentMaxLocals = index + (firstDescChar == 'J' || firstDescChar == 'D' ? 2 : 1);
if (currentMaxLocals > maxLocals) {
maxLocals = currentMaxLocals;
}
}
}
@Override
public AnnotationVisitor visitLocalVariableAnnotation(
final int typeRef,
final TypePath typePath,
final Label[] start,
final Label[] end,
final int[] index,
final String descriptor,
final boolean visible) {
// Create a ByteVector to hold a 'type_annotation' JVMS structure.
// See https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.20.
ByteVector typeAnnotation = new ByteVector();
// Write target_type, target_info, and target_path.
typeAnnotation.putByte(typeRef >>> 24).putShort(start.length);
for (int i = 0; i < start.length; ++i) {
typeAnnotation
.putShort(start[i].bytecodeOffset)
.putShort(end[i].bytecodeOffset - start[i].bytecodeOffset)
.putShort(index[i]);
}
TypePath.put(typePath, typeAnnotation);
// Write type_index and reserve space for num_element_value_pairs.
typeAnnotation.putShort(symbolTable.addConstantUtf8(descriptor)).putShort(0);
if (visible) {
return lastCodeRuntimeVisibleTypeAnnotation =
new AnnotationWriter(
symbolTable,
/* useNamedValues= */ true,
typeAnnotation,
lastCodeRuntimeVisibleTypeAnnotation);
} else {
return lastCodeRuntimeInvisibleTypeAnnotation =
new AnnotationWriter(
symbolTable,
/* useNamedValues= */ true,
typeAnnotation,
lastCodeRuntimeInvisibleTypeAnnotation);
}
}
@Override
public void visitLineNumber(final int line, final Label start) {
if (lineNumberTable == null) {
lineNumberTable = new ByteVector();
}
++lineNumberTableLength;
lineNumberTable.putShort(start.bytecodeOffset);
lineNumberTable.putShort(line);
}
@Override
public void visitMaxs(final int maxStack, final int maxLocals) {
if (compute == COMPUTE_ALL_FRAMES) {
computeAllFrames();
} else if (compute == COMPUTE_MAX_STACK_AND_LOCAL) {
computeMaxStackAndLocal();
} else if (compute == COMPUTE_MAX_STACK_AND_LOCAL_FROM_FRAMES) {
this.maxStack = maxRelativeStackSize;
} else {
this.maxStack = maxStack;
this.maxLocals = maxLocals;
}
}
/** Computes all the stack map frames of the method, from scratch. */
private void computeAllFrames() {
// Complete the control flow graph with exception handler blocks.
Handler handler = firstHandler;
while (handler != null) {
String catchTypeDescriptor =
handler.catchTypeDescriptor == null ? "java/lang/Throwable" : handler.catchTypeDescriptor;
int catchType = Frame.getAbstractTypeFromInternalName(symbolTable, catchTypeDescriptor);
// Mark handlerBlock as an exception handler.
Label handlerBlock = handler.handlerPc.getCanonicalInstance();
handlerBlock.flags |= Label.FLAG_JUMP_TARGET;
// Add handlerBlock as a successor of all the basic blocks in the exception handler range.
Label handlerRangeBlock = handler.startPc.getCanonicalInstance();
Label handlerRangeEnd = handler.endPc.getCanonicalInstance();
while (handlerRangeBlock != handlerRangeEnd) {
handlerRangeBlock.outgoingEdges =
new Edge(catchType, handlerBlock, handlerRangeBlock.outgoingEdges);
handlerRangeBlock = handlerRangeBlock.nextBasicBlock;
}
handler = handler.nextHandler;
}
// Create and visit the first (implicit) frame.
Frame firstFrame = firstBasicBlock.frame;
firstFrame.setInputFrameFromDescriptor(symbolTable, accessFlags, descriptor, this.maxLocals);
firstFrame.accept(this);
// Fix point algorithm: add the first basic block to a list of blocks to process (i.e. blocks
// whose stack map frame has changed) and, while there are blocks to process, remove one from
// the list and update the stack map frames of its successor blocks in the control flow graph
// (which might change them, in which case these blocks must be processed too, and are thus
// added to the list of blocks to process). Also compute the maximum stack size of the method,
// as a by-product.
Label listOfBlocksToProcess = firstBasicBlock;
listOfBlocksToProcess.nextListElement = Label.EMPTY_LIST;
int maxStackSize = 0;
while (listOfBlocksToProcess != Label.EMPTY_LIST) {
// Remove a basic block from the list of blocks to process.
Label basicBlock = listOfBlocksToProcess;
listOfBlocksToProcess = listOfBlocksToProcess.nextListElement;
basicBlock.nextListElement = null;
// By definition, basicBlock is reachable.
basicBlock.flags |= Label.FLAG_REACHABLE;
// Update the (absolute) maximum stack size.
int maxBlockStackSize = basicBlock.frame.getInputStackSize() + basicBlock.outputStackMax;
if (maxBlockStackSize > maxStackSize) {
maxStackSize = maxBlockStackSize;
}
// Update the successor blocks of basicBlock in the control flow graph.
Edge outgoingEdge = basicBlock.outgoingEdges;
while (outgoingEdge != null) {
Label successorBlock = outgoingEdge.successor.getCanonicalInstance();
boolean successorBlockChanged =
basicBlock.frame.merge(symbolTable, successorBlock.frame, outgoingEdge.info);
if (successorBlockChanged && successorBlock.nextListElement == null) {
// If successorBlock has changed it must be processed. Thus, if it is not already in the
// list of blocks to process, add it to this list.
successorBlock.nextListElement = listOfBlocksToProcess;
listOfBlocksToProcess = successorBlock;
}
outgoingEdge = outgoingEdge.nextEdge;
}
}
// Loop over all the basic blocks and visit the stack map frames that must be stored in the
// StackMapTable attribute. Also replace unreachable code with NOP* ATHROW, and remove it from
// exception handler ranges.
Label basicBlock = firstBasicBlock;
while (basicBlock != null) {
if ((basicBlock.flags & (Label.FLAG_JUMP_TARGET | Label.FLAG_REACHABLE))
== (Label.FLAG_JUMP_TARGET | Label.FLAG_REACHABLE)) {
basicBlock.frame.accept(this);
}
if ((basicBlock.flags & Label.FLAG_REACHABLE) == 0) {
// Find the start and end bytecode offsets of this unreachable block.
Label nextBasicBlock = basicBlock.nextBasicBlock;
int startOffset = basicBlock.bytecodeOffset;
int endOffset = (nextBasicBlock == null ? code.length : nextBasicBlock.bytecodeOffset) - 1;
if (endOffset >= startOffset) {
// Replace its instructions with NOP ... NOP ATHROW.
for (int i = startOffset; i < endOffset; ++i) {
code.data[i] = Opcodes.NOP;
}
code.data[endOffset] = (byte) Opcodes.ATHROW;
// Emit a frame for this unreachable block, with no local and a Throwable on the stack
// (so that the ATHROW could consume this Throwable if it were reachable).
int frameIndex = visitFrameStart(startOffset, /* numLocal = */ 0, /* numStack = */ 1);
currentFrame[frameIndex] =
Frame.getAbstractTypeFromInternalName(symbolTable, "java/lang/Throwable");
visitFrameEnd();
// Remove this unreachable basic block from the exception handler ranges.
firstHandler = Handler.removeRange(firstHandler, basicBlock, nextBasicBlock);
// The maximum stack size is now at least one, because of the Throwable declared above.
maxStackSize = Math.max(maxStackSize, 1);
}
}
basicBlock = basicBlock.nextBasicBlock;
}
this.maxStack = maxStackSize;
}
/** Computes the maximum stack size of the method. */
private void computeMaxStackAndLocal() {
// Complete the control flow graph with exception handler blocks.
Handler handler = firstHandler;
while (handler != null) {
Label handlerBlock = handler.handlerPc;
Label handlerRangeBlock = handler.startPc;
Label handlerRangeEnd = handler.endPc;
// Add handlerBlock as a successor of all the basic blocks in the exception handler range.
while (handlerRangeBlock != handlerRangeEnd) {
if ((handlerRangeBlock.flags & Label.FLAG_SUBROUTINE_CALLER) == 0) {
handlerRangeBlock.outgoingEdges =
new Edge(Edge.EXCEPTION, handlerBlock, handlerRangeBlock.outgoingEdges);
} else {
// If handlerRangeBlock is a JSR block, add handlerBlock after the first two outgoing
// edges to preserve the hypothesis about JSR block successors order (see
// {@link #visitJumpInsn}).
handlerRangeBlock.outgoingEdges.nextEdge.nextEdge =
new Edge(
Edge.EXCEPTION, handlerBlock, handlerRangeBlock.outgoingEdges.nextEdge.nextEdge);
}
handlerRangeBlock = handlerRangeBlock.nextBasicBlock;
}
handler = handler.nextHandler;
}
// Complete the control flow graph with the successor blocks of subroutines, if needed.
if (hasSubroutines) {
// First step: find the subroutines. This step determines, for each basic block, to which
// subroutine(s) it belongs. Start with the main "subroutine":
short numSubroutines = 1;
firstBasicBlock.markSubroutine(numSubroutines);
// Then, mark the subroutines called by the main subroutine, then the subroutines called by
// those called by the main subroutine, etc.
for (short currentSubroutine = 1; currentSubroutine <= numSubroutines; ++currentSubroutine) {
Label basicBlock = firstBasicBlock;
while (basicBlock != null) {
if ((basicBlock.flags & Label.FLAG_SUBROUTINE_CALLER) != 0
&& basicBlock.subroutineId == currentSubroutine) {
Label jsrTarget = basicBlock.outgoingEdges.nextEdge.successor;
if (jsrTarget.subroutineId == 0) {
// If this subroutine has not been marked yet, find its basic blocks.
jsrTarget.markSubroutine(++numSubroutines);
}
}
basicBlock = basicBlock.nextBasicBlock;
}
}
// Second step: find the successors in the control flow graph of each subroutine basic block
// 'r' ending with a RET instruction. These successors are the virtual successors of the basic
// blocks ending with JSR instructions (see {@link #visitJumpInsn)} that can reach 'r'.
Label basicBlock = firstBasicBlock;
while (basicBlock != null) {
if ((basicBlock.flags & Label.FLAG_SUBROUTINE_CALLER) != 0) {
// By construction, jsr targets are stored in the second outgoing edge of basic blocks
// that ends with a jsr instruction (see {@link #FLAG_SUBROUTINE_CALLER}).
Label subroutine = basicBlock.outgoingEdges.nextEdge.successor;
subroutine.addSubroutineRetSuccessors(basicBlock);
}
basicBlock = basicBlock.nextBasicBlock;
}
}
// Data flow algorithm: put the first basic block in a list of blocks to process (i.e. blocks
// whose input stack size has changed) and, while there are blocks to process, remove one
// from the list, update the input stack size of its successor blocks in the control flow
// graph, and add these blocks to the list of blocks to process (if not already done).
Label listOfBlocksToProcess = firstBasicBlock;
listOfBlocksToProcess.nextListElement = Label.EMPTY_LIST;
int maxStackSize = maxStack;
while (listOfBlocksToProcess != Label.EMPTY_LIST) {
// Remove a basic block from the list of blocks to process. Note that we don't reset
// basicBlock.nextListElement to null on purpose, to make sure we don't reprocess already
// processed basic blocks.
Label basicBlock = listOfBlocksToProcess;
listOfBlocksToProcess = listOfBlocksToProcess.nextListElement;
// Compute the (absolute) input stack size and maximum stack size of this block.
int inputStackTop = basicBlock.inputStackSize;
int maxBlockStackSize = inputStackTop + basicBlock.outputStackMax;
// Update the absolute maximum stack size of the method.
if (maxBlockStackSize > maxStackSize) {
maxStackSize = maxBlockStackSize;
}
// Update the input stack size of the successor blocks of basicBlock in the control flow
// graph, and add these blocks to the list of blocks to process, if not already done.
Edge outgoingEdge = basicBlock.outgoingEdges;
if ((basicBlock.flags & Label.FLAG_SUBROUTINE_CALLER) != 0) {
// Ignore the first outgoing edge of the basic blocks ending with a jsr: these are virtual
// edges which lead to the instruction just after the jsr, and do not correspond to a
// possible execution path (see {@link #visitJumpInsn} and
// {@link Label#FLAG_SUBROUTINE_CALLER}).
outgoingEdge = outgoingEdge.nextEdge;
}
while (outgoingEdge != null) {
Label successorBlock = outgoingEdge.successor;
if (successorBlock.nextListElement == null) {
successorBlock.inputStackSize =
(short) (outgoingEdge.info == Edge.EXCEPTION ? 1 : inputStackTop + outgoingEdge.info);
successorBlock.nextListElement = listOfBlocksToProcess;
listOfBlocksToProcess = successorBlock;
}
outgoingEdge = outgoingEdge.nextEdge;
}
}
this.maxStack = maxStackSize;
}
@Override
public void visitEnd() {
// Nothing to do.
}
// -----------------------------------------------------------------------------------------------
// Utility methods: control flow analysis algorithm
// -----------------------------------------------------------------------------------------------
/**
* Adds a successor to {@link #currentBasicBlock} in the control flow graph.
*
* @param info information about the control flow edge to be added.
* @param successor the successor block to be added to the current basic block.
*/
private void addSuccessorToCurrentBasicBlock(final int info, final Label successor) {
currentBasicBlock.outgoingEdges = new Edge(info, successor, currentBasicBlock.outgoingEdges);
}
/**
* Ends the current basic block. This method must be used in the case where the current basic
* block does not have any successor.
*
* <p>WARNING: this method must be called after the currently visited instruction has been put in
* {@link #code} (if frames are computed, this method inserts a new Label to start a new basic
* block after the current instruction).
*/
private void endCurrentBasicBlockWithNoSuccessor() {
if (compute == COMPUTE_ALL_FRAMES) {
Label nextBasicBlock = new Label();
nextBasicBlock.frame = new Frame(nextBasicBlock);
nextBasicBlock.resolve(code.data, stackMapTableEntries, code.length);
lastBasicBlock.nextBasicBlock = nextBasicBlock;
lastBasicBlock = nextBasicBlock;
currentBasicBlock = null;
} else if (compute == COMPUTE_MAX_STACK_AND_LOCAL) {
currentBasicBlock.outputStackMax = (short) maxRelativeStackSize;
currentBasicBlock = null;
}
}
// -----------------------------------------------------------------------------------------------
// Utility methods: stack map frames
// -----------------------------------------------------------------------------------------------
/**
* Starts the visit of a new stack map frame, stored in {@link #currentFrame}.
*
* @param offset the bytecode offset of the instruction to which the frame corresponds.
* @param numLocal the number of local variables in the frame.
* @param numStack the number of stack elements in the frame.
* @return the index of the next element to be written in this frame.
*/
int visitFrameStart(final int offset, final int numLocal, final int numStack) {
int frameLength = 3 + numLocal + numStack;
if (currentFrame == null || currentFrame.length < frameLength) {
currentFrame = new int[frameLength];
}
currentFrame[0] = offset;
currentFrame[1] = numLocal;
currentFrame[2] = numStack;
return 3;
}
/**
* Sets an abstract type in {@link #currentFrame}.
*
* @param frameIndex the index of the element to be set in {@link #currentFrame}.
* @param abstractType an abstract type.
*/
void visitAbstractType(final int frameIndex, final int abstractType) {
currentFrame[frameIndex] = abstractType;
}
/**
* Ends the visit of {@link #currentFrame} by writing it in the StackMapTable entries and by
* updating the StackMapTable number_of_entries (except if the current frame is the first one,
* which is implicit in StackMapTable). Then resets {@link #currentFrame} to {@literal null}.
*/
void visitFrameEnd() {
if (previousFrame != null) {
if (stackMapTableEntries == null) {
stackMapTableEntries = new ByteVector();
}
putFrame();
++stackMapTableNumberOfEntries;
}
previousFrame = currentFrame;
currentFrame = null;
}
/** Compresses and writes {@link #currentFrame} in a new StackMapTable entry. */
private void putFrame() {
final int numLocal = currentFrame[1];
final int numStack = currentFrame[2];
if (symbolTable.getMajorVersion() < Opcodes.V1_6) {
// Generate a StackMap attribute entry, which are always uncompressed.
stackMapTableEntries.putShort(currentFrame[0]).putShort(numLocal);
putAbstractTypes(3, 3 + numLocal);
stackMapTableEntries.putShort(numStack);
putAbstractTypes(3 + numLocal, 3 + numLocal + numStack);
return;
}
final int offsetDelta =
stackMapTableNumberOfEntries == 0
? currentFrame[0]
: currentFrame[0] - previousFrame[0] - 1;
final int previousNumlocal = previousFrame[1];
final int numLocalDelta = numLocal - previousNumlocal;
int type = Frame.FULL_FRAME;
if (numStack == 0) {
switch (numLocalDelta) {
case -3:
case -2:
case -1:
type = Frame.CHOP_FRAME;
break;
case 0:
type = offsetDelta < 64 ? Frame.SAME_FRAME : Frame.SAME_FRAME_EXTENDED;
break;
case 1:
case 2:
case 3:
type = Frame.APPEND_FRAME;
break;
default:
// Keep the FULL_FRAME type.
break;
}
} else if (numLocalDelta == 0 && numStack == 1) {
type =
offsetDelta < 63
? Frame.SAME_LOCALS_1_STACK_ITEM_FRAME
: Frame.SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED;
}
if (type != Frame.FULL_FRAME) {
// Verify if locals are the same as in the previous frame.
int frameIndex = 3;
for (int i = 0; i < previousNumlocal && i < numLocal; i++) {
if (currentFrame[frameIndex] != previousFrame[frameIndex]) {
type = Frame.FULL_FRAME;
break;
}
frameIndex++;
}
}
switch (type) {
case Frame.SAME_FRAME:
stackMapTableEntries.putByte(offsetDelta);
break;
case Frame.SAME_LOCALS_1_STACK_ITEM_FRAME:
stackMapTableEntries.putByte(Frame.SAME_LOCALS_1_STACK_ITEM_FRAME + offsetDelta);
putAbstractTypes(3 + numLocal, 4 + numLocal);
break;
case Frame.SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED:
stackMapTableEntries
.putByte(Frame.SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED)
.putShort(offsetDelta);
putAbstractTypes(3 + numLocal, 4 + numLocal);
break;
case Frame.SAME_FRAME_EXTENDED:
stackMapTableEntries.putByte(Frame.SAME_FRAME_EXTENDED).putShort(offsetDelta);
break;
case Frame.CHOP_FRAME:
stackMapTableEntries
.putByte(Frame.SAME_FRAME_EXTENDED + numLocalDelta)
.putShort(offsetDelta);
break;
case Frame.APPEND_FRAME:
stackMapTableEntries
.putByte(Frame.SAME_FRAME_EXTENDED + numLocalDelta)
.putShort(offsetDelta);
putAbstractTypes(3 + previousNumlocal, 3 + numLocal);
break;
case Frame.FULL_FRAME:
default:
stackMapTableEntries.putByte(Frame.FULL_FRAME).putShort(offsetDelta).putShort(numLocal);
putAbstractTypes(3, 3 + numLocal);
stackMapTableEntries.putShort(numStack);
putAbstractTypes(3 + numLocal, 3 + numLocal + numStack);
break;
}
}
/**
* Puts some abstract types of {@link #currentFrame} in {@link #stackMapTableEntries} , using the
* JVMS verification_type_info format used in StackMapTable attributes.
*
* @param start index of the first type in {@link #currentFrame} to write.
* @param end index of last type in {@link #currentFrame} to write (exclusive).
*/
private void putAbstractTypes(final int start, final int end) {
for (int i = start; i < end; ++i) {
Frame.putAbstractType(symbolTable, currentFrame[i], stackMapTableEntries);
}
}
/**
* Puts the given public API frame element type in {@link #stackMapTableEntries} , using the JVMS
* verification_type_info format used in StackMapTable attributes.
*
* @param type a frame element type described using the same format as in {@link
* MethodVisitor#visitFrame}, i.e. either {@link Opcodes#TOP}, {@link Opcodes#INTEGER}, {@link
* Opcodes#FLOAT}, {@link Opcodes#LONG}, {@link Opcodes#DOUBLE}, {@link Opcodes#NULL}, or
* {@link Opcodes#UNINITIALIZED_THIS}, or the internal name of a class, or a Label designating
* a NEW instruction (for uninitialized types).
*/
private void putFrameType(final Object type) {
if (type instanceof Integer) {
stackMapTableEntries.putByte(((Integer) type).intValue());
} else if (type instanceof String) {
stackMapTableEntries
.putByte(Frame.ITEM_OBJECT)
.putShort(symbolTable.addConstantClass((String) type).index);
} else {
stackMapTableEntries.putByte(Frame.ITEM_UNINITIALIZED);
((Label) type).put(stackMapTableEntries);
}
}
// -----------------------------------------------------------------------------------------------
// Utility methods
// -----------------------------------------------------------------------------------------------
/**
* Returns whether the attributes of this method can be copied from the attributes of the given
* method (assuming there is no method visitor between the given ClassReader and this
* MethodWriter). This method should only be called just after this MethodWriter has been created,
* and before any content is visited. It returns true if the attributes corresponding to the
* constructor arguments (at most a Signature, an Exception, a Deprecated and a Synthetic
* attribute) are the same as the corresponding attributes in the given method.
*
* @param source the source ClassReader from which the attributes of this method might be copied.
* @param hasSyntheticAttribute whether the method_info JVMS structure from which the attributes
* of this method might be copied contains a Synthetic attribute.
* @param hasDeprecatedAttribute whether the method_info JVMS structure from which the attributes
* of this method might be copied contains a Deprecated attribute.
* @param descriptorIndex the descriptor_index field of the method_info JVMS structure from which
* the attributes of this method might be copied.
* @param signatureIndex the constant pool index contained in the Signature attribute of the
* method_info JVMS structure from which the attributes of this method might be copied, or 0.
* @param exceptionsOffset the offset in 'source.b' of the Exceptions attribute of the method_info
* JVMS structure from which the attributes of this method might be copied, or 0.
* @return whether the attributes of this method can be copied from the attributes of the
* method_info JVMS structure in 'source.b', between 'methodInfoOffset' and 'methodInfoOffset'
* + 'methodInfoLength'.
*/
boolean canCopyMethodAttributes(
final ClassReader source,
final boolean hasSyntheticAttribute,
final boolean hasDeprecatedAttribute,
final int descriptorIndex,
final int signatureIndex,
final int exceptionsOffset) {
// If the method descriptor has changed, with more locals than the max_locals field of the
// original Code attribute, if any, then the original method attributes can't be copied. A
// conservative check on the descriptor changes alone ensures this (being more precise is not
// worth the additional complexity, because these cases should be rare -- if a transform changes
// a method descriptor, most of the time it needs to change the method's code too).
if (source != symbolTable.getSource()
|| descriptorIndex != this.descriptorIndex
|| signatureIndex != this.signatureIndex
|| hasDeprecatedAttribute != ((accessFlags & Opcodes.ACC_DEPRECATED) != 0)) {
return false;
}
boolean needSyntheticAttribute =
symbolTable.getMajorVersion() < Opcodes.V1_5 && (accessFlags & Opcodes.ACC_SYNTHETIC) != 0;
if (hasSyntheticAttribute != needSyntheticAttribute) {
return false;
}
if (exceptionsOffset == 0) {
if (numberOfExceptions != 0) {
return false;
}
} else if (source.readUnsignedShort(exceptionsOffset) == numberOfExceptions) {
int currentExceptionOffset = exceptionsOffset + 2;
for (int i = 0; i < numberOfExceptions; ++i) {
if (source.readUnsignedShort(currentExceptionOffset) != exceptionIndexTable[i]) {
return false;
}
currentExceptionOffset += 2;
}
}
return true;
}
/**
* Sets the source from which the attributes of this method will be copied.
*
* @param methodInfoOffset the offset in 'symbolTable.getSource()' of the method_info JVMS
* structure from which the attributes of this method will be copied.
* @param methodInfoLength the length in 'symbolTable.getSource()' of the method_info JVMS
* structure from which the attributes of this method will be copied.
*/
void setMethodAttributesSource(final int methodInfoOffset, final int methodInfoLength) {
// Don't copy the attributes yet, instead store their location in the source | has |
java | apache__maven | api/maven-api-core/src/main/java/org/apache/maven/api/services/ArtifactDeployer.java | {
"start": 1334,
"end": 2163
} | interface ____ extends Service {
/**
* @param request {@link ArtifactDeployerRequest}
* @throws ArtifactDeployerException if the deployment failed
*/
void deploy(@Nonnull ArtifactDeployerRequest request);
/**
* @param session the repository session
* @param repository the repository to deploy to
* @param artifacts the collection of artifacts to deploy
* @throws ArtifactDeployerException if the deployment failed
* @throws IllegalArgumentException if an argument is {@code null} or invalid
*/
default void deploy(
@Nonnull Session session,
@Nonnull RemoteRepository repository,
@Nonnull Collection<ProducedArtifact> artifacts) {
deploy(ArtifactDeployerRequest.build(session, repository, artifacts));
}
}
| ArtifactDeployer |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/common/UnitTestFeatureVersion.java | {
"start": 2409,
"end": 3755
} | enum ____ implements FeatureVersion {
UT_FV1_0(0, MetadataVersion.MINIMUM_VERSION, Map.of()),
UT_FV1_1(1, MetadataVersion.IBP_3_7_IV0, Map.of());
private final short featureLevel;
private final MetadataVersion bootstrapMetadataVersion;
private final Map<String, Short> dependencies;
public static final String FEATURE_NAME = "unit.test.feature.version.1";
public static final FV1 LATEST_PRODUCTION = UT_FV1_0;
FV1(int featureLevel, MetadataVersion bootstrapMetadataVersion, Map<String, Short> dependencies) {
this.featureLevel = (short) featureLevel;
this.bootstrapMetadataVersion = bootstrapMetadataVersion;
this.dependencies = dependencies;
}
@Override
public short featureLevel() {
return featureLevel;
}
@Override
public String featureName() {
return FEATURE_NAME;
}
@Override
public MetadataVersion bootstrapMetadataVersion() {
return bootstrapMetadataVersion;
}
@Override
public Map<String, Short> dependencies() {
return dependencies;
}
}
/**
* The feature is used to test the dependency of the latest production that is not yet production ready.
*/
public | FV1 |
java | apache__hadoop | hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java | {
"start": 2699,
"end": 2801
} | class ____ RetriableCommand to implement the copy of files,
* with retries on failure.
*/
public | extends |
java | google__dagger | javatests/dagger/internal/codegen/ComponentBuilderTest.java | {
"start": 5644,
"end": 6044
} | interface ____");
});
}
@Test
public void testSetterReturningNonVoidOrBuilderFails() {
Source componentFile =
CompilerTests.javaSource(
"test.SimpleComponent",
"package test;",
"",
"import dagger.Component;",
"import javax.inject.Provider;",
"",
"@Component",
"abstract | Builder |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java | {
"start": 1822,
"end": 1960
} | class ____
* writes to Timeline Service.
*
* App-related lifecycle management is handled by this service.
*/
@Private
@Unstable
public | for |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/router/RouterTest.java | {
"start": 6699,
"end": 6745
} | class ____ implements Action {}
private | Index |
java | apache__camel | components/camel-twilio/src/generated/java/org/apache/camel/component/twilio/internal/OutgoingCallerIdApiMethod.java | {
"start": 668,
"end": 2741
} | enum ____ implements ApiMethod {
DELETER(
com.twilio.rest.api.v2010.account.OutgoingCallerIdDeleter.class,
"deleter",
arg("pathSid", String.class)),
DELETER_1(
com.twilio.rest.api.v2010.account.OutgoingCallerIdDeleter.class,
"deleter",
arg("pathAccountSid", String.class),
arg("pathSid", String.class)),
FETCHER(
com.twilio.rest.api.v2010.account.OutgoingCallerIdFetcher.class,
"fetcher",
arg("pathSid", String.class)),
FETCHER_1(
com.twilio.rest.api.v2010.account.OutgoingCallerIdFetcher.class,
"fetcher",
arg("pathAccountSid", String.class),
arg("pathSid", String.class)),
READER(
com.twilio.rest.api.v2010.account.OutgoingCallerIdReader.class,
"reader"),
READER_1(
com.twilio.rest.api.v2010.account.OutgoingCallerIdReader.class,
"reader",
arg("pathAccountSid", String.class)),
UPDATER(
com.twilio.rest.api.v2010.account.OutgoingCallerIdUpdater.class,
"updater",
arg("pathSid", String.class)),
UPDATER_1(
com.twilio.rest.api.v2010.account.OutgoingCallerIdUpdater.class,
"updater",
arg("pathAccountSid", String.class),
arg("pathSid", String.class));
private final ApiMethod apiMethod;
OutgoingCallerIdApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(OutgoingCallerId.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
| OutgoingCallerIdApiMethod |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/util/EnumResolver.java | {
"start": 14477,
"end": 14615
} | enum ____ to use for deserialization.
*/
public boolean hasAsValueAnnotation() {
return _hasAsValueAnnotation;
}
}
| values |
java | apache__camel | components/camel-dapr/src/test/java/org/apache/camel/component/dapr/operations/DaprSecretTest.java | {
"start": 1965,
"end": 4908
} | class ____ extends CamelTestSupport {
@Mock
private DaprClient client;
@Mock
private DaprEndpoint endpoint;
@Test
void testGetSecret() throws Exception {
final Map<String, String> mockResponse = Map.of("myKey", "myVal");
when(endpoint.getClient()).thenReturn(client);
when(client.getSecret(any(GetSecretRequest.class))).thenReturn(Mono.just(mockResponse));
DaprConfiguration configuration = new DaprConfiguration();
configuration.setOperation(DaprOperation.secret);
configuration.setSecretStore("myStore");
configuration.setKey("myKey");
DaprConfigurationOptionsProxy configurationOptionsProxy = new DaprConfigurationOptionsProxy(configuration);
final Exchange exchange = new DefaultExchange(context);
final DaprSecretHandler operation = new DaprSecretHandler(configurationOptionsProxy, endpoint);
final DaprOperationResponse operationResponse = operation.handle(exchange);
assertNotNull(operationResponse);
assertEquals(mockResponse, operationResponse.getBody());
}
@Test
void testGetBulkSecret() throws Exception {
final Map<String, Map<String, String>> mockResponse = Map.of("secretKey1", Map.of("myKey1", "myVal1"),
"secretKey2", Map.of("myKey2", "myVal2"));
when(endpoint.getClient()).thenReturn(client);
when(client.getBulkSecret(any(GetBulkSecretRequest.class))).thenReturn(Mono.just(mockResponse));
DaprConfiguration configuration = new DaprConfiguration();
configuration.setOperation(DaprOperation.secret);
configuration.setSecretStore("myStore");
DaprConfigurationOptionsProxy configurationOptionsProxy = new DaprConfigurationOptionsProxy(configuration);
final Exchange exchange = new DefaultExchange(context);
final DaprSecretHandler operation = new DaprSecretHandler(configurationOptionsProxy, endpoint);
final DaprOperationResponse operationResponse = operation.handle(exchange);
assertNotNull(operationResponse);
assertEquals(mockResponse, operationResponse.getBody());
}
@Test
void testValidateConfiguration() {
DaprConfiguration configuration = new DaprConfiguration();
configuration.setOperation(DaprOperation.secret);
DaprConfigurationOptionsProxy configurationOptionsProxy = new DaprConfigurationOptionsProxy(configuration);
final Exchange exchange = new DefaultExchange(context);
final DaprSecretHandler operation = new DaprSecretHandler(configurationOptionsProxy, endpoint);
// case 1: secretStore is empty
assertThrows(IllegalArgumentException.class, () -> operation.validateConfiguration(exchange));
// case 2: valid configuration
configuration.setSecretStore("myStore");
assertDoesNotThrow(() -> operation.validateConfiguration(exchange));
}
}
| DaprSecretTest |
java | quarkusio__quarkus | integration-tests/micrometer-prometheus/src/main/java/io/quarkus/it/micrometer/prometheus/AnnotatedResource.java | {
"start": 3961,
"end": 4460
} | class ____ implements Supplier<Object> {
boolean fail;
Response(boolean fail) {
this.fail = fail;
}
@Override
public Object get() {
try {
Thread.sleep(3);
} catch (InterruptedException e) {
// intentionally empty
}
if (fail) {
throw new NullPointerException("Failed on purpose");
}
return new Object();
}
}
}
| Response |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/runtime/utils/JavaUserDefinedAggFunctions.java | {
"start": 13783,
"end": 13917
} | class ____ {
public long count;
}
/** Sum aggregate function with multiple arguments. */
public static | MultiArgSumAcc |
java | quarkusio__quarkus | extensions/hibernate-search-orm-elasticsearch/runtime/src/main/java/io/quarkus/hibernate/search/orm/elasticsearch/runtime/HibernateSearchElasticsearchRuntimeConfigPersistenceUnit.java | {
"start": 8507,
"end": 8783
} | interface ____ {
/**
* The strategy to use when loading entities during the execution of a search query.
*/
@WithDefault("skip")
EntityLoadingCacheLookupStrategy strategy();
}
@ConfigGroup
| SearchQueryLoadingCacheLookupConfig |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/internal/synchronization/work/FakeBidirectionalRelationWorkUnit.java | {
"start": 5312,
"end": 7577
} | class ____ {
private final Object owningEntity;
private final RelationDescription rd;
private final RevisionType revisionType;
private final Object index;
public FakeRelationChange(
Object owningEntity, RelationDescription rd, RevisionType revisionType,
Object index) {
this.owningEntity = owningEntity;
this.rd = rd;
this.revisionType = revisionType;
this.index = index;
}
public RevisionType getRevisionType() {
return revisionType;
}
public void generateData(SharedSessionContractImplementor sessionImplementor, Map<String, Object> data) {
// If the revision type is "DEL", it means that the object is removed from the collection. Then the
// new owner will in fact be null.
rd.getFakeBidirectionalRelationMapper().mapToMapFromEntity(
sessionImplementor, data,
revisionType == RevisionType.DEL ? null : owningEntity, null
);
rd.getFakeBidirectionalRelationMapper().mapModifiedFlagsToMapFromEntity(
sessionImplementor, data,
revisionType == RevisionType.DEL ? null : owningEntity, null
);
// Also mapping the index, if the collection is indexed.
if ( rd.getFakeBidirectionalRelationIndexMapper() != null ) {
rd.getFakeBidirectionalRelationIndexMapper().mapToMapFromEntity(
sessionImplementor, data,
revisionType == RevisionType.DEL ? null : index, null
);
rd.getFakeBidirectionalRelationIndexMapper().mapModifiedFlagsToMapFromEntity(
sessionImplementor, data,
revisionType == RevisionType.DEL ? null : index, null
);
}
}
public static FakeRelationChange merge(FakeRelationChange first, FakeRelationChange second) {
if ( first == null ) {
return second;
}
if ( second == null ) {
return first;
}
/*
* The merging rules are the following (revision types of the first and second changes):
* - DEL, DEL - return any (the work units are the same)
* - DEL, ADD - return ADD (points to new owner)
* - ADD, DEL - return ADD (points to new owner)
* - ADD, ADD - return second (points to newer owner)
*/
if ( first.getRevisionType() == RevisionType.DEL || second.getRevisionType() == RevisionType.ADD ) {
return second;
}
else {
return first;
}
}
}
}
| FakeRelationChange |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/browse/BrowseLimitTest.java | {
"start": 1153,
"end": 2566
} | class ____ extends ContextTestSupport {
protected final Object body1 = "one";
protected final Object body2 = "two";
protected final Object body3 = "three";
protected final Object body4 = "four";
protected final Object body5 = "five";
@Test
public void testLimit() throws Exception {
template.sendBody("browse:foo?browseLimit=1", body1);
template.sendBody("browse:foo?browseLimit=1", body2);
template.sendBody("browse:foo?browseLimit=1", body3);
template.sendBody("browse:foo?browseLimit=1", body4);
template.sendBody("browse:foo?browseLimit=1", body5);
Collection<Endpoint> list = context.getEndpoints();
assertEquals(2, list.size(), "number of endpoints");
BrowsableEndpoint be1 = context.getEndpoint("browse:foo?browseLimit=1", BrowsableEndpoint.class);
assertEquals(1, be1.getExchanges().size());
assertEquals("five", be1.getExchanges().get(0).getMessage().getBody());
BrowsableEndpoint be2 = context.getEndpoint("browse:bar?browseLimit=5", BrowsableEndpoint.class);
assertEquals(5, be2.getExchanges().size());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("browse:foo?browseLimit=1").to("browse:bar?browseLimit=5");
}
};
}
}
| BrowseLimitTest |
java | quarkusio__quarkus | extensions/smallrye-graphql/runtime/src/main/java/io/quarkus/smallrye/graphql/runtime/SmallRyeAuthGraphQLTransportWSSubprotocolHandler.java | {
"start": 690,
"end": 2485
} | class ____ extends GraphQLTransportWSSubprotocolHandler {
private final SmallRyeAuthGraphQLWSHandler authHander;
public SmallRyeAuthGraphQLTransportWSSubprotocolHandler(GraphQLWebSocketSession session,
Map<String, Object> context,
RoutingContext ctx,
SmallRyeGraphQLAbstractHandler handler,
Optional<String> authorizationClientInitPayloadName) {
super(session, context);
this.authHander = new SmallRyeAuthGraphQLWSHandler(session, ctx, handler, authorizationClientInitPayloadName);
}
@Override
protected void onMessage(JsonObject message) {
if (message != null && message.getString("type").equals("connection_init")) {
Map<String, Object> payload = (Map<String, Object>) message.get("payload");
this.authHander.handlePayload(payload, () -> {
// Identity has been updated. Now pass the successful connection_init back to the SmallRye GraphQL library to take over
super.onMessage(message);
}, failure -> {
// Failure handling Authorization. This method triggers a 4401 (Unauthorized).
if (!session.isClosed()) {
if (failure instanceof SmallRyeAuthSecurityIdentityAlreadyAssignedException) {
session.close((short) 4400, "Authorization specified in multiple locations");
} else {
session.close((short) 4403, "Forbidden");
}
}
});
} else {
super.onMessage(message);
}
}
@Override
public void onClose() {
super.onClose();
this.authHander.cancelAuthExpiry();
}
}
| SmallRyeAuthGraphQLTransportWSSubprotocolHandler |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/localdate/LocalDateAssert_isIn_Test.java | {
"start": 1364,
"end": 2752
} | class ____ extends LocalDateAssertBaseTest {
@Test
void should_pass_if_actual_is_in_dates_as_string_array_parameter() {
assertThat(REFERENCE).isIn(REFERENCE.toString(), AFTER.toString());
}
@Test
void should_fail_if_actual_is_not_in_dates_as_string_array_parameter() {
// WHEN
ThrowingCallable code = () -> assertThat(REFERENCE).isIn(AFTER.toString(), BEFORE.toString());
// THEN
assertThatAssertionErrorIsThrownBy(code).withMessage(shouldBeIn(REFERENCE, asList(AFTER, BEFORE)).create());
}
@Test
void should_fail_if_dates_as_string_array_parameter_is_null() {
// GIVEN
String[] otherLocalDatesAsString = null;
// WHEN
ThrowingCallable code = () -> assertThat(LocalDate.now()).isIn(otherLocalDatesAsString);
// THEN
assertThatIllegalArgumentException().isThrownBy(code)
.withMessage("The given LocalDate array should not be null");
}
@Test
void should_fail_if_dates_as_string_array_parameter_is_empty() {
// GIVEN
String[] otherLocalDatesAsString = new String[0];
// WHEN
ThrowingCallable code = () -> assertThat(LocalDate.now()).isIn(otherLocalDatesAsString);
// THEN
assertThatIllegalArgumentException().isThrownBy(code)
.withMessage("The given LocalDate array should not be empty");
}
}
| LocalDateAssert_isIn_Test |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/main/java/io/quarkus/rest/client/reactive/deployment/DotNames.java | {
"start": 1227,
"end": 3561
} | class ____ {
public static final DotName REGISTER_PROVIDER = DotName.createSimple(RegisterProvider.class.getName());
public static final DotName REGISTER_PROVIDERS = DotName.createSimple(RegisterProviders.class.getName());
public static final DotName CLIENT_HEADER_PARAM = DotName.createSimple(ClientHeaderParam.class.getName());
public static final DotName CLIENT_HEADER_PARAMS = DotName.createSimple(ClientHeaderParams.class.getName());
public static final DotName CLIENT_QUERY_PARAM = DotName.createSimple(ClientQueryParam.class.getName());
public static final DotName CLIENT_QUERY_PARAMS = DotName.createSimple(ClientQueryParams.class.getName());
public static final DotName CLIENT_FORM_PARAM = DotName.createSimple(ClientFormParam.class.getName());
public static final DotName CLIENT_FORM_PARAMS = DotName.createSimple(ClientFormParams.class.getName());
public static final DotName REGISTER_CLIENT_HEADERS = DotName.createSimple(RegisterClientHeaders.class.getName());
public static final DotName CLIENT_REQUEST_FILTER = DotName.createSimple(ClientRequestFilter.class.getName());
public static final DotName CLIENT_RESPONSE_FILTER = DotName.createSimple(ClientResponseFilter.class.getName());
public static final DotName CLIENT_EXCEPTION_MAPPER = DotName.createSimple(ClientExceptionMapper.class.getName());
public static final DotName CLIENT_REDIRECT_HANDLER = DotName.createSimple(ClientRedirectHandler.class.getName());
public static final DotName CLIENT_BASIC_AUTH = DotName.createSimple(ClientBasicAuth.class.getName());
public static final DotName RESPONSE_EXCEPTION_MAPPER = DotName.createSimple(ResponseExceptionMapper.class.getName());
static final DotName METHOD = DotName.createSimple(Method.class.getName());
static final DotName URI = DotName.createSimple(URI.class.getName());
static final DotName MAP = DotName.createSimple(Map.class.getName());
static final DotName MULTIVALUED_MAP = DotName.createSimple(MultivaluedMap.class.getName());
static final DotName STRING = DotName.createSimple(String.class.getName());
static final DotName OBJECT = DotName.createSimple(Object.class.getName());
public static final DotName SSE_EVENT_FILTER = DotName.createSimple(SseEventFilter.class);
private DotNames() {
}
}
| DotNames |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/InvokeCallback.java | {
"start": 969,
"end": 1448
} | interface ____ {
/**
* This method is expected to be invoked after {@link #operationSucceed(RemotingCommand)}
* or {@link #operationFail(Throwable)}
*
* @param responseFuture the returned object contains response or exception
*/
void operationComplete(final ResponseFuture responseFuture);
default void operationSucceed(final RemotingCommand response) {
}
default void operationFail(final Throwable throwable) {
}
}
| InvokeCallback |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/locking/RemoveEntityTest.java | {
"start": 1364,
"end": 3342
} | class ____ {
private static final String EMPLOYEE_TO_DELETE_MAIL = "demo-user@mail.com";
@BeforeEach
public void setUp(SessionFactoryScope scope) {
scope.inTransaction(session -> {
var emp = new EmployeeEntity();
emp.setEmail( EMPLOYEE_TO_DELETE_MAIL );
session.persist( emp );
var linkEntity = new LinkEntity();
linkEntity.setEmployeeId( emp.getEmployeeId() );
session.persist( linkEntity );
var link = Set.of( linkEntity );
emp.setFolderLink( link );
session.persist( emp );
emp = new EmployeeEntity();
emp.setEmail( "demo-user2@mail.com" );
session.persist( emp );
} );
}
@AfterEach
void tearDown(SessionFactoryScope factoryScope) {
factoryScope.dropData();
}
@Test
public void testDelete(SessionFactoryScope scope) {
scope.inTransaction(session -> {
var employee = session.createQuery(
"FROM EmployeeEntity e where e.email = :mail",
EmployeeEntity.class
).setParameter( "mail", EMPLOYEE_TO_DELETE_MAIL ).getSingleResult();
assertThat( employee ).isNotNull();
var links = session.createQuery( "FROM LinkEntity", LinkEntity.class ).list();
assertThat( links.size() ).isEqualTo( 1 );
var linkEntity = links.get( 0 );
assertThat( linkEntity.getEmployeeId() ).isEqualTo( employee.getEmployeeId() );
session.remove( linkEntity );
session.remove( employee );
} );
scope.inTransaction(session -> {
var employees = session.createQuery(
"FROM EmployeeEntity e where e.email = :mail",
EmployeeEntity.class
).setParameter( "mail", EMPLOYEE_TO_DELETE_MAIL ).getResultList();
assertThat( employees.size() ).isEqualTo( 0 );
var links = session.createQuery( "FROM LinkEntity", LinkEntity.class ).list();
assertThat( links.size() ).isEqualTo( 0 );
} );
}
@Entity(name = "EmployeeEntity")
@Table(name = "Employee", uniqueConstraints = {
@UniqueConstraint(columnNames = "ID"),
@UniqueConstraint(columnNames = "EMAIL")
})
public static | RemoveEntityTest |
java | elastic__elasticsearch | x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java | {
"start": 60549,
"end": 60775
} | class ____ extends ResourceNotFoundException {
TransformConfigLostOnReloadException(String msg, Throwable cause, Object... args) {
super(msg, cause, args);
}
}
}
| TransformConfigLostOnReloadException |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java | {
"start": 3293,
"end": 20769
} | class ____ {
private static final long PREFERRED_BLOCK_SIZE = 128 * 1024 * 1024;
private static final short REPLICATION = 3;
private static final String SUPERGROUP = "supergroup";
private static final String SUPERUSER = "superuser";
private static final UserGroupInformation BRUCE =
UserGroupInformation.createUserForTesting("bruce", new String[] { });
private static final UserGroupInformation DIANA =
UserGroupInformation.createUserForTesting("diana", new String[] { "sales" });
private static final UserGroupInformation CLARK =
UserGroupInformation.createUserForTesting("clark", new String[] { "execs" });
private FSDirectory dir;
private INodeDirectory inodeRoot;
@BeforeEach
public void setUp() throws IOException {
Configuration conf = new Configuration();
FSNamesystem fsn = mock(FSNamesystem.class);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
FsPermission perm = (FsPermission) args[0];
return new PermissionStatus(SUPERUSER, SUPERGROUP, perm);
}
}).when(fsn).createFsOwnerPermissions(any(FsPermission.class));
dir = new FSDirectory(fsn, conf);
inodeRoot = dir.getRoot();
}
@Test
public void testAclOwner() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(BRUCE, "/file1", READ);
assertPermissionGranted(BRUCE, "/file1", WRITE);
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionDenied(BRUCE, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
}
@Test
public void testAclNamedUser() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclNamedUserDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", NONE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", READ);
}
@Test
public void testAclNamedUserTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "diana", NONE),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, MASK, READ_EXECUTE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", ALL);
}
@Test
public void testAclNamedUserMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0620);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, WRITE),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclGroup() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclGroupDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0604);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, MASK, NONE),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclGroupTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, MASK, NONE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", ALL);
}
@Test
public void testAclGroupTraverseDenyOnlyDefaultEntries() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, "sales", NONE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", ALL);
}
@Test
public void testAclGroupMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclNamedGroup() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "sales", READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclNamedGroupDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "execs", NONE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclNamedGroupTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "sales", NONE),
aclEntry(ACCESS, MASK, READ_EXECUTE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", ALL);
}
@Test
public void testAclNamedGroupMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "sales", READ_WRITE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclOther() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0774);
addAcl(inodeFile,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, MASK, ALL),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", ALL);
assertPermissionGranted(DIANA, "/file1", ALL);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
private void addAcl(INodeWithAdditionalFields inode, AclEntry... acl)
throws IOException {
AclStorage.updateINodeAcl(inode,
Arrays.asList(acl), Snapshot.CURRENT_STATE_ID);
}
private void assertPermissionGranted(UserGroupInformation user, String path,
FsAction access) throws IOException {
INodesInPath iip = dir.getINodesInPath(path, DirOp.READ);
dir.getPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
false, null, null, access, null, false);
}
private void assertPermissionDenied(UserGroupInformation user, String path,
FsAction access) throws IOException {
try {
INodesInPath iip = dir.getINodesInPath(path, DirOp.READ);
dir.getPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
false, null, null, access, null, false);
fail("expected AccessControlException for user + " + user + ", path = " +
path + ", access = " + access);
} catch (AccessControlException e) {
assertTrue(e.getMessage().contains(user.getUserName().toString()),
"Permission denied messages must carry the username");
assertTrue(e.getMessage().contains(new Path(path).getParent().toUri().getPath()),
"Permission denied messages must carry the path parent");
}
}
private static INodeDirectory createINodeDirectory(INodeDirectory parent,
String name, String owner, String group, short perm) throws IOException {
PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group,
FsPermission.createImmutable(perm));
INodeDirectory inodeDirectory = new INodeDirectory(
HdfsConstants.GRANDFATHER_INODE_ID, name.getBytes(StandardCharsets.UTF_8), permStatus, 0L);
parent.addChild(inodeDirectory);
return inodeDirectory;
}
private static INodeFile createINodeFile(INodeDirectory parent, String name,
String owner, String group, short perm) throws IOException {
PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group,
FsPermission.createImmutable(perm));
INodeFile inodeFile = new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
name.getBytes(StandardCharsets.UTF_8), permStatus, 0L, 0L, null,
REPLICATION, PREFERRED_BLOCK_SIZE);
parent.addChild(inodeFile);
return inodeFile;
}
@Test
public void testCheckAccessControlEnforcerSlowness() throws Exception {
final long thresholdMs = 10;
final LongFunction<String> checkAccessControlEnforcerSlowness =
elapsedMs -> FSPermissionChecker.checkAccessControlEnforcerSlowness(
elapsedMs, thresholdMs, INodeAttributeProvider.AccessControlEnforcer.class,
false, "/foo", "mkdir", "client");
final String m1 = FSPermissionChecker.runCheckPermission(
() -> FSPermissionChecker.LOG.info("Fast runner"),
checkAccessControlEnforcerSlowness);
assertNull(m1);
final String m2 = FSPermissionChecker.runCheckPermission(() -> {
FSPermissionChecker.LOG.info("Slow runner");
try {
Thread.sleep(20);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IllegalStateException(e);
}
}, checkAccessControlEnforcerSlowness);
assertNotNull(m2);
}
}
| TestFSPermissionChecker |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/internals/TopologyMetadata.java | {
"start": 2665,
"end": 3685
} | class ____ {
private Logger log;
// the "__" (double underscore) string is not allowed for topology names, so it's safe to use to indicate
// that it's not a named topology
public static final String UNNAMED_TOPOLOGY = "__UNNAMED_TOPOLOGY__";
private static final Pattern EMPTY_ZERO_LENGTH_PATTERN = Pattern.compile("");
private final StreamsConfig config;
private final ProcessingMode processingMode;
private final TopologyVersion version;
private final TaskExecutionMetadata taskExecutionMetadata;
private final Set<String> pausedTopologies;
private final ConcurrentNavigableMap<String, InternalTopologyBuilder> builders; // Keep sorted by topology name for readability
private ProcessorTopology globalTopology;
private final Map<String, StateStore> globalStateStores = new HashMap<>();
private final Set<String> allInputTopics = new HashSet<>();
private final Map<String, Long> threadVersions = new ConcurrentHashMap<>();
public static | TopologyMetadata |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_leupom_3.java | {
"start": 701,
"end": 915
} | class ____ implements Model {
private Integer id;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
}
}
| Person |
java | apache__camel | components/camel-opensearch/src/test/java/org/apache/camel/component/opensearch/integration/OpensearchSizeLimitIT.java | {
"start": 1220,
"end": 3211
} | class ____ extends OpensearchTestSupport {
@Test
void testSize() throws Exception {
//put 4
template().requestBody("direct:index", getContent("content"), String.class);
template().requestBody("direct:index", getContent("content1"), String.class);
template().requestBody("direct:index", getContent("content2"), String.class);
String response = template().requestBody("direct:index", getContent("content3"), String.class);
String query = """
{
"query" : {
"match_all": {}
}
}
""";
// Delay the execution, because the search is getting stale results
Awaitility.await().pollDelay(2, TimeUnit.SECONDS).untilAsserted(() -> {
HitsMetadata<?> searchWithSizeTwo = template().requestBody("direct:searchWithSizeTwo", query, HitsMetadata.class);
HitsMetadata<?> searchFrom3 = template().requestBody("direct:searchFrom3", query, HitsMetadata.class);
assertEquals(2, searchWithSizeTwo.hits().size());
assertEquals(1, searchFrom3.hits().size());
});
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:index")
.to("opensearch://opensearch?operation=Index&indexName=size-limit");
from("direct:searchWithSizeTwo")
.to("opensearch://opensearch?operation=Search&indexName=size-limit&size=2");
from("direct:searchFrom3")
.to("opensearch://opensearch?operation=Search&indexName=size-limit&from=3");
}
};
}
private Map<String, String> getContent(String content) {
Map<String, String> map = new HashMap<>();
map.put("content", content);
return map;
}
}
| OpensearchSizeLimitIT |
java | spring-projects__spring-boot | module/spring-boot-cassandra/src/main/java/org/springframework/boot/cassandra/autoconfigure/CassandraAutoConfiguration.java | {
"start": 13166,
"end": 15077
} | class ____ implements CassandraConnectionDetails {
private final CassandraProperties properties;
private final @Nullable SslBundles sslBundles;
private PropertiesCassandraConnectionDetails(CassandraProperties properties, @Nullable SslBundles sslBundles) {
this.properties = properties;
this.sslBundles = sslBundles;
}
@Override
public List<Node> getContactPoints() {
List<String> contactPoints = this.properties.getContactPoints();
return (contactPoints != null) ? contactPoints.stream().map(this::asNode).toList()
: Collections.emptyList();
}
@Override
public @Nullable String getUsername() {
return this.properties.getUsername();
}
@Override
public @Nullable String getPassword() {
return this.properties.getPassword();
}
@Override
public @Nullable String getLocalDatacenter() {
return this.properties.getLocalDatacenter();
}
@Override
public @Nullable SslBundle getSslBundle() {
Ssl ssl = this.properties.getSsl();
if (ssl == null || !ssl.isEnabled()) {
return null;
}
if (StringUtils.hasLength(ssl.getBundle())) {
Assert.notNull(this.sslBundles, "SSL bundle name has been set but no SSL bundles found in context");
return this.sslBundles.getBundle(ssl.getBundle());
}
return SslBundle.systemDefault();
}
private Node asNode(String contactPoint) {
int i = contactPoint.lastIndexOf(':');
if (i >= 0) {
String portCandidate = contactPoint.substring(i + 1);
Integer port = asPort(portCandidate);
if (port != null) {
return new Node(contactPoint.substring(0, i), port);
}
}
return new Node(contactPoint, this.properties.getPort());
}
private @Nullable Integer asPort(String value) {
try {
int i = Integer.parseInt(value);
return (i > 0 && i < 65535) ? i : null;
}
catch (Exception ex) {
return null;
}
}
}
}
| PropertiesCassandraConnectionDetails |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/boolean_/BooleanAssert_isFalse_Test.java | {
"start": 1028,
"end": 2190
} | class ____ {
@Test
void should_fail_if_actual_is_null() {
// GIVEN
Boolean actual = null;
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).isFalse());
// THEN
then(assertionError).hasMessage(shouldNotBeNull().create());
}
@Test
void should_pass_if_primitive_boolean_is_false() {
// GIVEN
boolean actual = false;
// WHEN/THEN
assertThat(actual).isFalse();
}
@Test
void should_pass_if_Boolean_is_false() {
// GIVEN
Boolean actual = false;
// WHEN/THEN
assertThat(actual).isFalse();
}
@Test
void should_fail_if_primitive_boolean_is_true() {
// GIVEN
boolean actual = true;
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).isFalse());
// THEN
then(assertionError).hasMessage(shouldBeFalse(actual).create());
}
@Test
void should_fail_if_Boolean_is_true() {
// GIVEN
Boolean actual = true;
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).isFalse());
// THEN
then(assertionError).hasMessage(shouldBeFalse(actual).create());
}
}
| BooleanAssert_isFalse_Test |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/pattern/AbstractStyleNameConverter.java | {
"start": 8415,
"end": 8845
} | class ____ null
*/
public static Magenta newInstance(final Configuration config, final String[] options) {
return newInstance(Magenta.class, NAME, config, options);
}
}
/**
* Red style pattern converter. Adds ANSI color styling to the result of the enclosed pattern.
*/
@Plugin(name = Red.NAME, category = "Converter")
@ConverterKeys(Red.NAME)
public static final | or |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-api/src/main/java/org/apache/dubbo/rpc/AttachmentsAdapter.java | {
"start": 894,
"end": 1045
} | class ____ map adapters to support attachments in RpcContext, Invocation and Result switch from
* <String, String> to <String, Object>
*/
public | provides |
java | playframework__playframework | persistence/play-java-jpa/src/main/java/play/db/jpa/JPAComponents.java | {
"start": 332,
"end": 690
} | interface ____ extends DBComponents, ConfigurationComponents {
ApplicationLifecycle applicationLifecycle();
default JPAConfig jpaConfig() {
return new DefaultJPAConfig.JPAConfigProvider(config()).get();
}
default JPAApi jpaApi() {
return new DefaultJPAApi.JPAApiProvider(jpaConfig(), applicationLifecycle(), dbApi()).get();
}
}
| JPAComponents |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.