language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
grpc__grpc-java
|
binder/src/androidTest/java/io/grpc/binder/internal/OneWayBinderProxies.java
|
{
"start": 3366,
"end": 4266
}
|
class ____ extends OneWayBinderProxy {
private final OneWayBinderProxy wrapped;
private boolean dropAllTransactions;
BlackHoleOneWayBinderProxy(OneWayBinderProxy wrapped) {
super(wrapped.getDelegate());
this.wrapped = wrapped;
}
/**
* Causes all future invocations of transact to be silently dropped.
*
* <p>Users are responsible for ensuring their calls "happen-before" the relevant calls to
* {@link #transact(int, ParcelHolder)}.
*/
public void dropAllTransactions(boolean dropAllTransactions) {
this.dropAllTransactions = dropAllTransactions;
}
@Override
public void transact(int code, ParcelHolder data) throws RemoteException {
if (!dropAllTransactions) {
wrapped.transact(code, data);
}
}
}
// Cannot be instantiated.
private OneWayBinderProxies() {}
;
}
|
BlackHoleOneWayBinderProxy
|
java
|
square__javapoet
|
src/test/java/com/squareup/javapoet/FileWritingTest.java
|
{
"start": 7617,
"end": 8479
}
|
class ____ {\n"
+ "\tDate madeFreshDate;\n"
+ "\n"
+ "\tpublic static void main(String[] args) {\n"
+ "\t\tSystem.out.println(\"Hello World!\");\n"
+ "\t}\n"
+ "}\n");
}
/**
* This test confirms that JavaPoet ignores the host charset and always uses UTF-8. The host
* charset is customized with {@code -Dfile.encoding=ISO-8859-1}.
*/
@Test public void fileIsUtf8() throws IOException {
JavaFile javaFile = JavaFile.builder("foo", TypeSpec.classBuilder("Taco").build())
.addFileComment("Pi\u00f1ata\u00a1")
.build();
javaFile.writeTo(fsRoot);
Path fooPath = fsRoot.resolve(fs.getPath("foo", "Taco.java"));
assertThat(new String(Files.readAllBytes(fooPath), UTF_8)).isEqualTo(""
+ "// Pi\u00f1ata\u00a1\n"
+ "package foo;\n"
+ "\n"
+ "
|
Test
|
java
|
spring-projects__spring-boot
|
module/spring-boot-opentelemetry/src/main/java/org/springframework/boot/opentelemetry/autoconfigure/OpenTelemetryResourceAttributes.java
|
{
"start": 1623,
"end": 5790
}
|
class ____ {
/**
* Default value for service name. Used if {@code service.name} is not set and no name
* can be deduced from the running application.
*/
private static final String DEFAULT_SERVICE_NAME = "unknown_service";
private final Environment environment;
private final Map<String, String> resourceAttributes;
private final Function<String, @Nullable String> systemEnvironment;
/**
* Creates a new instance of {@link OpenTelemetryResourceAttributes}.
* @param environment the environment
* @param resourceAttributes user-provided resource attributes to be used
*/
public OpenTelemetryResourceAttributes(Environment environment, @Nullable Map<String, String> resourceAttributes) {
this(environment, resourceAttributes, null);
}
/**
* Creates a new {@link OpenTelemetryResourceAttributes} instance.
* @param environment the environment
* @param resourceAttributes user-provided resource attributes to be used
* @param systemEnvironment a function to retrieve environment variables by name
*/
OpenTelemetryResourceAttributes(Environment environment, @Nullable Map<String, String> resourceAttributes,
@Nullable Function<String, @Nullable String> systemEnvironment) {
Assert.notNull(environment, "'environment' must not be null");
this.environment = environment;
this.resourceAttributes = (resourceAttributes != null) ? resourceAttributes : Collections.emptyMap();
this.systemEnvironment = (systemEnvironment != null) ? systemEnvironment : System::getenv;
}
/**
* Applies resource attributes to the provided {@link BiConsumer} after being combined
* from environment variables and user-defined resource attributes.
* <p>
* If a key exists in both environment variables and user-defined resources, the value
* from the user-defined resource takes precedence, even if it is empty.
* <p>
* Additionally, {@code spring.application.name} or {@code unknown_service} will be
* used as the default for {@code service.name}, and {@code spring.application.group}
* will serve as the default for {@code service.group} and {@code service.namespace}.
* @param consumer the {@link BiConsumer} to apply
*/
public void applyTo(BiConsumer<String, String> consumer) {
Assert.notNull(consumer, "'consumer' must not be null");
Map<String, String> attributes = getResourceAttributesFromEnv();
this.resourceAttributes.forEach((name, value) -> {
if (StringUtils.hasLength(name) && value != null) {
attributes.put(name, value);
}
});
attributes.computeIfAbsent("service.name", (key) -> getApplicationName());
attributes.computeIfAbsent("service.namespace", (key) -> getServiceNamespace());
attributes.forEach(consumer);
}
private String getApplicationName() {
return this.environment.getProperty("spring.application.name", DEFAULT_SERVICE_NAME);
}
private @Nullable String getServiceNamespace() {
return this.environment.getProperty("spring.application.group");
}
/**
* Parses resource attributes from the {@link System#getenv()}. This method fetches
* attributes defined in the {@code OTEL_RESOURCE_ATTRIBUTES} and
* {@code OTEL_SERVICE_NAME} environment variables and provides them as key-value
* pairs.
* <p>
* If {@code service.name} is also provided in {@code OTEL_RESOURCE_ATTRIBUTES}, then
* {@code OTEL_SERVICE_NAME} takes precedence.
* @return resource attributes
*/
private Map<String, String> getResourceAttributesFromEnv() {
Map<String, String> attributes = new LinkedHashMap<>();
for (String attribute : StringUtils.tokenizeToStringArray(getEnv("OTEL_RESOURCE_ATTRIBUTES"), ",")) {
int index = attribute.indexOf('=');
if (index > 0) {
String key = attribute.substring(0, index);
String value = attribute.substring(index + 1);
attributes.put(key.trim(), StringUtils.uriDecode(value.trim(), StandardCharsets.UTF_8));
}
}
String otelServiceName = getEnv("OTEL_SERVICE_NAME");
if (otelServiceName != null) {
attributes.put("service.name", otelServiceName);
}
return attributes;
}
private @Nullable String getEnv(String name) {
return this.systemEnvironment.apply(name);
}
}
|
OpenTelemetryResourceAttributes
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/support/DispatcherServletInitializerTests.java
|
{
"start": 1340,
"end": 2885
}
|
class ____ {
private static final String SERVLET_NAME = "myservlet";
private static final String ROLE_NAME = "role";
private static final String SERVLET_MAPPING = "/myservlet";
private final MockServletContext servletContext = new MyMockServletContext();
private final AbstractDispatcherServletInitializer initializer = new MyDispatcherServletInitializer();
private final Map<String, Servlet> servlets = new LinkedHashMap<>(2);
private final Map<String, MockServletRegistration> registrations = new LinkedHashMap<>(2);
@Test
void register() throws ServletException {
initializer.onStartup(servletContext);
assertThat(servlets).hasSize(1);
assertThat(servlets.get(SERVLET_NAME)).isNotNull();
DispatcherServlet servlet = (DispatcherServlet) servlets.get(SERVLET_NAME);
assertThat(servlet.getClass()).isEqualTo(MyDispatcherServlet.class);
WebApplicationContext servletContext = servlet.getWebApplicationContext();
assertThat(servletContext.containsBean("bean")).isTrue();
boolean condition = servletContext.getBean("bean") instanceof MyBean;
assertThat(condition).isTrue();
assertThat(registrations).hasSize(1);
assertThat(registrations.get(SERVLET_NAME)).isNotNull();
MockServletRegistration registration = registrations.get(SERVLET_NAME);
assertThat(registration.getMappings()).isEqualTo(Collections.singleton(SERVLET_MAPPING));
assertThat(registration.getLoadOnStartup()).isEqualTo(1);
assertThat(registration.getRunAsRole()).isEqualTo(ROLE_NAME);
}
private
|
DispatcherServletInitializerTests
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/extensions/TemplateExtensionMethodsTest.java
|
{
"start": 4952,
"end": 5529
}
|
class ____ {
// default priority - class-level annotation
static String alpha(Foo foo) {
return "alpha";
}
// Explicit priority, higher than ValueResolverGenerator.DEFAULT_PRIORITY
@TemplateExtension(matchName = "pong", priority = 100)
static String bravo(Foo foo) {
return "bravo";
}
// default priority - method-level annotation
@TemplateExtension(matchName = "pong")
static String charlie(Foo foo) {
return "charlie";
}
}
}
|
PrioritizedExtensions
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
|
{
"start": 16000,
"end": 16109
}
|
class ____ responsible for collecting the timeline entities and
* publishing them in async.
*/
private
|
is
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/enums/EnumNamingDeserializationTest.java
|
{
"start": 2385,
"end": 2504
}
|
enum ____ {
REAL_NAME
}
@EnumNaming(EnumNamingStrategies.LowerCamelCaseStrategy.class)
static
|
BaseEnum
|
java
|
apache__camel
|
components/camel-aws/camel-aws-config/src/main/java/org/apache/camel/component/aws/config/client/AWSConfigClientFactory.java
|
{
"start": 1306,
"end": 1378
}
|
class ____ return the correct type of AWS Config client.
*/
public final
|
to
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/StatementSwitchToExpressionSwitchTest.java
|
{
"start": 96183,
"end": 97799
}
|
interface ____ {}
public int y = 0;
public int foo(Suit suit) {
// epsilon
// zeta
// alpha
/* beta */
/* chi */
/* gamma */
/* delta */
@MyAnno(v = "foo")
@MyOtherAnno
@MyAnno
final int x =
switch (suit) {
case HEART, DIAMOND -> ((y + 1) * (y * y)) << 1;
case SPADE -> throw new RuntimeException();
default -> throw new NullPointerException();
};
Map<? extends String, ? super Test> map =
switch (suit) {
case HEART, DIAMOND -> new HashMap<>();
case SPADE -> throw new RuntimeException();
default -> throw new NullPointerException();
};
return x;
}
}
""")
.setArgs(
"-XepOpt:StatementSwitchToExpressionSwitch:EnableAssignmentSwitchConversion",
"-XepOpt:StatementSwitchToExpressionSwitch:EnableDirectConversion=false")
.setFixChooser(StatementSwitchToExpressionSwitchTest::assertOneFixAndChoose)
.doTest(TEXT_MATCH);
}
@Test
public void variableInTransitiveEnclosingBlock_shouldNotBeMoved() {
refactoringHelper
.addInputLines(
"Test.java",
"""
import java.util.HashMap;
import java.util.Map;
|
MyOtherAnno
|
java
|
apache__camel
|
components/camel-test/camel-test-main-junit5/src/main/java/org/apache/camel/test/main/junit5/CamelMainExtension.java
|
{
"start": 5972,
"end": 11177
}
|
class ____
sourceContext = context.getParent().orElseThrow();
}
return sourceContext.getStore(NAMESPACE);
}
/**
* Dump the route coverage for the given test if it is enabled.
*/
private void dumpRouteCoverageIfNeeded(ExtensionContext context, long time, String currentTestName) throws Exception {
// if we should dump route stats, then write that to a file
if (isRouteCoverageEnabled(context)) {
final Class<?> requiredTestClass = context.getRequiredTestClass();
// In case of a {@code @Nested} test class, its name will be prefixed by the name of its outer classes
String className = requiredTestClass.getName().substring(requiredTestClass.getPackageName().length() + 1);
String dir = "target/camel-route-coverage";
String name = String.format("%s-%s.xml", className, StringHelper.before(currentTestName, "("));
final ModelCamelContext camelContext = getContextStore(context).get(CONTEXT, CamelMainContext.class).context();
ManagedCamelContext mc = camelContext == null
? null : camelContext.getCamelContextExtension().getContextPlugin(ManagedCamelContext.class);
ManagedCamelContextMBean managedCamelContext = mc == null ? null : mc.getManagedCamelContext();
if (managedCamelContext == null) {
LOG.warn("Cannot dump route coverage to file as JMX is not enabled. "
+ "Add camel-management JAR as dependency to enable JMX in the unit test classes.");
} else {
routeCoverageDumper.dump(managedCamelContext, camelContext, dir, name, requiredTestClass.getName(),
currentTestName,
time);
}
}
}
/**
* Dump the route for the given test if it is enabled.
*/
private void dumpRouteIfNeeded(ExtensionContext context, String currentTestName) throws Exception {
String dump = getRouteDump(context);
if (dump != null && !dump.isBlank()) {
final Class<?> requiredTestClass = context.getRequiredTestClass();
// In case of a {@code @Nested} test class, its name will be prefixed by the name of its outer classes
String className = requiredTestClass.getName().substring(requiredTestClass.getPackageName().length() + 1);
String dir = "target/camel-route-dump";
String ext = dump.toLowerCase();
String name = String.format("%s-%s.%s", className, StringHelper.before(currentTestName, "("), ext);
final ModelCamelContext camelContext = getContextStore(context).get(CONTEXT, CamelMainContext.class).context();
DumpRoutesStrategy drs = camelContext.getCamelContextExtension().getContextPlugin(DumpRoutesStrategy.class);
drs.setOutput(dir + "/" + name);
drs.setInclude("*");
drs.setLog(false);
drs.setUriAsParameters(true);
drs.dumpRoutes(dump);
}
}
/**
* Indicates whether the route coverage is enabled according to the given extension context and the value of the
* system property {@link org.apache.camel.test.junit5.util.CamelContextTestHelper#ROUTE_COVERAGE_ENABLED}.
* <p/>
* In case of {@code @Nested} test classes, the value is always extracted from the annotation of the outer class.
*
* @return {@code true} if the route coverage is enabled, {@code false} otherwise.
*/
private boolean isRouteCoverageEnabled(ExtensionContext context) {
return CamelContextTestHelper.isRouteCoverageEnabled(false)
|| context.getRequiredTestInstances().getAllInstances().get(0).getClass()
.getAnnotation(CamelMainTest.class).dumpRouteCoverage();
}
/**
* Indicates whether the route dump is enabled according to the given extension context and the value of the system
* property {@link org.apache.camel.test.junit5.util.CamelContextTestHelper#ROUTE_DUMP_ENABLED}.
* <p/>
* In case of {@code @Nested} test classes, the value is always extracted from the annotation of the outer class.
*
* @return xml or yaml if the route dump is enabled, {@code null} otherwise.
*/
private String getRouteDump(ExtensionContext context) {
String dump = CamelContextTestHelper.getRouteDump(null);
if (dump == null) {
dump = context.getRequiredTestInstances().getAllInstances().get(0).getClass()
.getAnnotation(CamelMainTest.class).dumpRoute();
}
return dump;
}
/**
* Indicates whether JMX should be used during testing according to the given extension context.
* <p/>
* In case of {@code @Nested} test classes, the value is always extracted from the annotation of the outer class.
*
* @return {@code true} if JMX should be used, {@code false} otherwise.
*/
private boolean useJmx(ExtensionContext context) {
return context.getRequiredTestInstances().getAllInstances().get(0).getClass()
.getAnnotation(CamelMainTest.class).useJmx();
}
}
|
context
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/parser/deser/awt/PointDeserializerTest2.java
|
{
"start": 218,
"end": 535
}
|
class ____ extends TestCase {
public void test_error_3() throws Exception {
Exception error = null;
try {
JSON.parseObject("{\"z\":44}", Point.class);
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
}
|
PointDeserializerTest2
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Issue1013.java
|
{
"start": 2220,
"end": 2404
}
|
class ____ {
public List<Integer> list;
public TestDomain2(){
list = new ArrayList<Integer>();
list.add(1);
}
}
static
|
TestDomain2
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/protocol/CommandHandler.java
|
{
"start": 32020,
"end": 32377
}
|
class ____ implements DemandAware.Source {
@Override
public void requestMore() {
if (isConnected() && !isClosed()) {
if (!channel.config().isAutoRead()) {
channel.pipeline().fireUserEventTriggered(EnableAutoRead.INSTANCE);
}
}
}
}
|
BackpressureSource
|
java
|
apache__dubbo
|
dubbo-remoting/dubbo-remoting-api/src/main/java/org/apache/dubbo/remoting/exchange/support/ExchangeServerDelegate.java
|
{
"start": 1224,
"end": 3369
}
|
class ____ implements ExchangeServer {
private transient ExchangeServer server;
public ExchangeServerDelegate() {}
public ExchangeServerDelegate(ExchangeServer server) {
setServer(server);
}
public ExchangeServer getServer() {
return server;
}
public void setServer(ExchangeServer server) {
this.server = server;
}
@Override
public boolean isBound() {
return server.isBound();
}
@Override
public void reset(URL url) {
server.reset(url);
}
@Override
@Deprecated
public void reset(org.apache.dubbo.common.Parameters parameters) {
reset(getUrl().addParameters(parameters.getParameters()));
}
@Override
public Collection<Channel> getChannels() {
return server.getChannels();
}
@Override
public Channel getChannel(InetSocketAddress remoteAddress) {
return server.getChannel(remoteAddress);
}
@Override
public URL getUrl() {
return server.getUrl();
}
@Override
public ChannelHandler getChannelHandler() {
return server.getChannelHandler();
}
@Override
public InetSocketAddress getLocalAddress() {
return server.getLocalAddress();
}
@Override
public void send(Object message) throws RemotingException {
server.send(message);
}
@Override
public void send(Object message, boolean sent) throws RemotingException {
server.send(message, sent);
}
@Override
public void close() {
server.close();
}
@Override
public boolean isClosed() {
return server.isClosed();
}
@Override
public Collection<ExchangeChannel> getExchangeChannels() {
return server.getExchangeChannels();
}
@Override
public ExchangeChannel getExchangeChannel(InetSocketAddress remoteAddress) {
return server.getExchangeChannel(remoteAddress);
}
@Override
public void close(int timeout) {
server.close(timeout);
}
@Override
public void startClose() {
server.startClose();
}
}
|
ExchangeServerDelegate
|
java
|
hibernate__hibernate-orm
|
hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/sequence/CacheSequenceSupport.java
|
{
"start": 377,
"end": 1513
}
|
class ____ implements SequenceSupport {
public static final SequenceSupport INSTANCE = new CacheSequenceSupport();
public String getSequenceNextValString(String sequenceName) {
return "select InterSystems.Sequences_GetNext('" + sequenceName + "')" + getFromDual( sequenceName );
}
public String getSelectSequenceNextValString(String sequenceName) {
//TODO: is this really correct? Why can't we just call InterSystems.Sequences_GetNext() without the select?
return "(select InterSystems.Sequences_GetNext('" + sequenceName + "')" + getFromDual( sequenceName ) + ")";
}
private String getFromDual(String sequenceName) {
return " from InterSystems.Sequences where ucase(name)=ucase('" + sequenceName + "')";
}
public String getCreateSequenceString(String sequenceName) {
return "insert into InterSystems.Sequences(Name) values (ucase('" + sequenceName + "'))";
}
public String getDropSequenceString(String sequenceName) {
return "delete from InterSystems.Sequences where ucase(name)=ucase('" + sequenceName + "')";
}
@Override
public boolean supportsPooledSequences() {
return false;
}
}
|
CacheSequenceSupport
|
java
|
bumptech__glide
|
library/src/main/java/com/bumptech/glide/load/engine/DataCacheKey.java
|
{
"start": 227,
"end": 1212
}
|
class ____ implements Key {
private final Key sourceKey;
private final Key signature;
DataCacheKey(Key sourceKey, Key signature) {
this.sourceKey = sourceKey;
this.signature = signature;
}
Key getSourceKey() {
return sourceKey;
}
@Override
public boolean equals(Object o) {
if (o instanceof DataCacheKey) {
DataCacheKey other = (DataCacheKey) o;
return sourceKey.equals(other.sourceKey) && signature.equals(other.signature);
}
return false;
}
@Override
public int hashCode() {
int result = sourceKey.hashCode();
result = 31 * result + signature.hashCode();
return result;
}
@Override
public String toString() {
return "DataCacheKey{" + "sourceKey=" + sourceKey + ", signature=" + signature + '}';
}
@Override
public void updateDiskCacheKey(@NonNull MessageDigest messageDigest) {
sourceKey.updateDiskCacheKey(messageDigest);
signature.updateDiskCacheKey(messageDigest);
}
}
|
DataCacheKey
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/matchers/AnnotationDoesNotHaveArgumentTest.java
|
{
"start": 1290,
"end": 1521
}
|
interface ____ {
String value() default "";
}
""");
}
@Test
public void matchesWhenArgumentIsNotPresent() {
writeFile(
"Class.java",
"""
@Annotation
public
|
Annotation
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/UnusedVariableTest.java
|
{
"start": 31682,
"end": 31936
}
|
class ____ {
private int x = 1;
public int a() {
x = a();
return 1;
}
}
""")
.addOutputLines(
"Test.java",
"""
|
Test
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterizedTestIntegrationTests.java
|
{
"start": 86493,
"end": 87288
}
|
interface ____ {
}
@SuppressWarnings("JUnitMalformedDeclaration")
@ParameterizedTest(quoteTextArguments = false)
@TwoMethodSources
void testWithRepeatableMethodSourceAsMetaAnnotation(String argument) {
fail(argument);
}
public static Stream<Arguments> someArgumentsMethodSource() {
return Stream.of(Arguments.of("some"));
}
public static Stream<Arguments> otherArgumentsMethodSource() {
return Stream.of(Arguments.of("other"));
}
@ParameterizedTest(quoteTextArguments = false)
@FieldSource("someArgumentsContainer")
@FieldSource("otherArgumentsContainer")
void testWithRepeatableFieldSource(String argument) {
fail(argument);
}
@FieldSource("someArgumentsContainer")
@FieldSource("otherArgumentsContainer")
@Retention(RUNTIME)
@
|
TwoMethodSources
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/web/reactive/server/samples/XmlContentTests.java
|
{
"start": 4346,
"end": 4981
}
|
class ____ {
@GetMapping(produces = MediaType.APPLICATION_XML_VALUE)
PersonsWrapper getPersons() {
return new PersonsWrapper(new Person("Jane"), new Person("Jason"), new Person("John"));
}
@GetMapping(path = "/{name}", produces = MediaType.APPLICATION_XML_VALUE)
Person getPerson(@PathVariable String name) {
return new Person(name);
}
@PostMapping(consumes = MediaType.APPLICATION_XML_VALUE)
ResponseEntity<Object> savepersons(@RequestBody Person person) {
URI location = URI.create(String.format("/persons/%s", person.getName()));
return ResponseEntity.created(location).build();
}
}
}
|
PersonController
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/MonoDelayUntil.java
|
{
"start": 4708,
"end": 12309
}
|
class ____<T> implements InnerOperator<T, T> {
final Function<? super T, ? extends Publisher<?>>[] otherGenerators;
final CoreSubscriber<? super T> actual;
int index;
@Nullable T value;
boolean done;
@SuppressWarnings("NotNullFieldNotInitialized") // s initialized in onSubscribe
Subscription s;
@Nullable DelayUntilTrigger<?> triggerSubscriber;
volatile @Nullable Throwable error;
// https://github.com/uber/NullAway/issues/1157
@SuppressWarnings({"rawtypes", "DataFlowIssue"})
static final AtomicReferenceFieldUpdater<DelayUntilCoordinator, @Nullable Throwable> ERROR =
AtomicReferenceFieldUpdater.newUpdater(DelayUntilCoordinator.class, Throwable.class, "error");
volatile int state;
@SuppressWarnings("rawtypes")
static final AtomicIntegerFieldUpdater<DelayUntilCoordinator> STATE =
AtomicIntegerFieldUpdater.newUpdater(DelayUntilCoordinator.class, "state");
static final int HAS_SUBSCRIPTION = 0b00000000000000000000000000000001;
static final int HAS_INNER = 0b00000000000000000000000000000010;
static final int HAS_REQUEST = 0b00000000000000000000000000000100;
static final int HAS_VALUE = 0b00000000000000000000000000001000;
static final int TERMINATED = 0b10000000000000000000000000000000;
DelayUntilCoordinator(CoreSubscriber<? super T> subscriber,
Function<? super T, ? extends Publisher<?>>[] otherGenerators) {
this.actual = subscriber;
this.otherGenerators = otherGenerators;
}
@Override
public void onSubscribe(Subscription s) {
if (Operators.validate(this.s, s)) {
this.s = s;
int previousState = markHasSubscription();
if (isTerminated(previousState)) {
s.cancel();
return;
}
s.request(Long.MAX_VALUE);
}
}
@Override
public void onNext(T t) {
if (this.done) {
Operators.onDiscard(t, this.actual.currentContext());
return;
}
this.value = t;
subscribeNextTrigger();
}
@Override
public void onError(Throwable t) {
if (this.done) {
Operators.onErrorDropped(t, this.actual.currentContext());
return;
}
this.done = true;
if (this.value == null) {
this.actual.onError(t);
return;
}
if (!Exceptions.addThrowable(ERROR, this, t)) {
Operators.onErrorDropped(t, this.actual.currentContext());
return;
}
final int previousState = markTerminated();
if (isTerminated(previousState)) {
return;
}
if (hasInner(previousState)) {
Operators.onDiscard(this.value, this.actual.currentContext());
assert this.triggerSubscriber != null : "triggerSubscriber can not be null when HAS_INNER is set";
this.triggerSubscriber.cancel();
}
final Throwable e = Exceptions.terminate(ERROR, this);
//noinspection ConstantConditions
this.actual.onError(e);
}
@Override
public void onComplete() {
if (this.done) {
return;
}
if (this.value == null) {
this.done = true;
this.actual.onComplete();
}
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
final int previousState = markHasRequest();
if (isTerminated(previousState)) {
return;
}
if (hasRequest(previousState)) {
return;
}
if (hasValue(previousState)) {
this.done = true;
final CoreSubscriber<? super T> actual = this.actual;
final T v = this.value;
assert v != null : "value can not be null when flag is set";
actual.onNext(v);
actual.onComplete();
}
}
}
@Override
public void cancel() {
final int previousState = markTerminated();
if (isTerminated(previousState)) {
return;
}
final Throwable t = Exceptions.terminate(ERROR, this);
if (t != null) {
Operators.onErrorDropped(t, this.actual.currentContext());
}
if (hasSubscription(previousState)) {
this.s.cancel();
}
if (hasInner(previousState)) {
Operators.onDiscard(this.value, this.actual.currentContext());
assert this.triggerSubscriber != null : "triggerSubscriber can not be null when HAS_INNER is set";
this.triggerSubscriber.cancel();
}
}
@SuppressWarnings({"unchecked", "rawtypes"})
void subscribeNextTrigger() {
final Function<? super T, ? extends Publisher<?>> generator =
this.otherGenerators[this.index];
Publisher<?> p;
try {
T v = this.value;
assert v != null : "value can not be null when subscribing to next trigger";
p = generator.apply(v);
Objects.requireNonNull(p, "mapper returned null value");
}
catch (Throwable t) {
onError(t);
return;
}
DelayUntilTrigger triggerSubscriber = this.triggerSubscriber;
if (triggerSubscriber == null) {
triggerSubscriber = new DelayUntilTrigger<>(this);
this.triggerSubscriber = triggerSubscriber;
}
p = Operators.toFluxOrMono(p);
p.subscribe(triggerSubscriber);
}
@Override
public CoreSubscriber<? super T> actual() {
return this.actual;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.CANCELLED) return isTerminated(this.state) && !this.done;
if (key == Attr.TERMINATED) return isTerminated(this.state) && this.done;
if (key == Attr.PREFETCH) return Integer.MAX_VALUE;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return InnerOperator.super.scanUnsafe(key);
}
@Override
public Stream<? extends Scannable> inners() {
final DelayUntilTrigger<?> subscriber = this.triggerSubscriber;
return subscriber == null ? Stream.empty() : Stream.of(subscriber);
}
/**
* Sets flag has subscription to indicate that we have already received
* subscription from the value upstream
*
* @return previous state
*/
int markHasSubscription() {
for (;;) {
final int state = this.state;
if (isTerminated(state)) {
return TERMINATED;
}
if (STATE.compareAndSet(this, state, state | HAS_SUBSCRIPTION)) {
return state;
}
}
}
/**
* Sets {@link #HAS_REQUEST} flag which indicates that there is a demand from
* the downstream
*
* @return previous state
*/
int markHasRequest() {
for (; ; ) {
final int state = this.state;
if (isTerminated(state)) {
return TERMINATED;
}
if (hasRequest(state)) {
return state;
}
final int nextState;
if (hasValue(state)) {
nextState = TERMINATED;
}
else {
nextState = state | HAS_REQUEST;
}
if (STATE.compareAndSet(this, state, nextState)) {
return state;
}
}
}
/**
* Sets current state to {@link #TERMINATED}
*
* @return previous state
*/
int markTerminated() {
for (;;) {
final int state = this.state;
if (isTerminated(state)) {
return TERMINATED;
}
if (STATE.compareAndSet(this, state, TERMINATED)) {
return state;
}
}
}
/**
* Terminates execution if there is a demand from the downstream or sets
* {@link #HAS_VALUE} flag indicating that the delay process is completed
* however there is no demand from the downstream yet
*/
void complete() {
for (; ; ) {
int s = this.state;
if (isTerminated(s)) {
return;
}
if (hasRequest(s) && STATE.compareAndSet(this, s, TERMINATED)) {
final CoreSubscriber<? super T> actual = this.actual;
final T v = this.value;
assert v != null : "value can not be null when completing as a result of processing triggers";
actual.onNext(v);
actual.onComplete();
return;
}
if (STATE.compareAndSet(this, s, s | HAS_VALUE)) {
return;
}
}
}
}
static final
|
DelayUntilCoordinator
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/text/StrMatcher.java
|
{
"start": 1607,
"end": 1723
}
|
class ____ {
/**
* Class used to define a character for matching purposes.
*/
static final
|
StrMatcher
|
java
|
apache__camel
|
components/camel-ehcache/src/main/java/org/apache/camel/component/ehcache/EhcacheConsumer.java
|
{
"start": 1115,
"end": 3503
}
|
class ____ extends DefaultConsumer implements CacheEventListener<Object, Object> {
private final EhcacheConfiguration configuration;
private final String cacheName;
private Cache cache;
public EhcacheConsumer(EhcacheEndpoint endpoint, String cacheName, EhcacheConfiguration configuration,
Processor processor) {
super(endpoint, processor);
this.configuration = configuration;
this.cacheName = cacheName;
}
@Override
public EhcacheEndpoint getEndpoint() {
return (EhcacheEndpoint) super.getEndpoint();
}
@Override
protected void doStart() throws Exception {
super.doStart();
Class<?> kt = null;
if (configuration.getKeyType() != null) {
kt = getEndpoint().getCamelContext().getClassResolver().resolveClass(configuration.getKeyType());
}
Class<?> vt = null;
if (configuration.getValueType() != null) {
vt = getEndpoint().getCamelContext().getClassResolver().resolveClass(configuration.getValueType());
}
this.cache = getEndpoint().getManager().getCache(cacheName, kt, vt);
this.cache.getRuntimeConfiguration().registerCacheEventListener(
this,
configuration.getEventOrdering(),
configuration.getEventFiring(),
configuration.getEventTypesSet());
}
@Override
protected void doStop() throws Exception {
cache.getRuntimeConfiguration().deregisterCacheEventListener(this);
super.doStop();
}
@Override
public void onEvent(CacheEvent<?, ?> event) {
if (isRunAllowed()) {
final Exchange exchange = createExchange(false);
final Message message = exchange.getIn();
message.setHeader(EhcacheConstants.KEY, event.getKey());
message.setHeader(EhcacheConstants.EVENT_TYPE, event.getType());
message.setHeader(EhcacheConstants.OLD_VALUE, event.getOldValue());
message.setBody(event.getNewValue());
try {
getProcessor().process(exchange);
} catch (Exception e) {
getExceptionHandler().handleException("Error processing exchange", exchange, e);
} finally {
releaseExchange(exchange, false);
}
}
}
}
|
EhcacheConsumer
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-transcribe/src/main/java/org/apache/camel/component/aws2/transcribe/client/impl/Transcribe2ClientStandardImpl.java
|
{
"start": 1712,
"end": 3650
}
|
class ____ implements Transcribe2InternalClient {
private static final Logger LOG = LoggerFactory.getLogger(Transcribe2ClientStandardImpl.class);
private Transcribe2Configuration configuration;
public Transcribe2ClientStandardImpl(Transcribe2Configuration configuration) {
this.configuration = configuration;
}
public Transcribe2ClientStandardImpl(SdkHttpClient httpClient, Transcribe2Configuration configuration) {
this.configuration = configuration;
}
@Override
public TranscribeClient getTranscribeClient() {
TranscribeClient client = null;
TranscribeClientBuilder clientBuilder = TranscribeClient.builder();
ApacheHttpClient.Builder httpClientBuilder = ApacheHttpClient.builder();
if (ObjectHelper.isNotEmpty(configuration.getRegion())) {
clientBuilder = clientBuilder.region(Region.of(configuration.getRegion()));
}
if (configuration.isOverrideEndpoint()) {
clientBuilder.endpointOverride(URI.create(configuration.getUriEndpointOverride()));
}
if (ObjectHelper.isNotEmpty(configuration.getAccessKey()) && ObjectHelper.isNotEmpty(configuration.getSecretKey())) {
AwsBasicCredentials cred = AwsBasicCredentials.create(configuration.getAccessKey(), configuration.getSecretKey());
clientBuilder = clientBuilder.credentialsProvider(StaticCredentialsProvider.create(cred));
}
if (configuration.isTrustAllCertificates()) {
httpClientBuilder.buildWithDefaults(AttributeMap
.builder()
.put(
SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES,
Boolean.TRUE)
.build());
}
clientBuilder.httpClient(httpClientBuilder.build());
client = clientBuilder.build();
return client;
}
}
|
Transcribe2ClientStandardImpl
|
java
|
quarkusio__quarkus
|
extensions/amazon-lambda/deployment/src/test/java/io/quarkus/amazon/lambda/deployment/testing/InputCollectionOutputCollectionLambdaTest.java
|
{
"start": 672,
"end": 1770
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest test = new QuarkusUnitTest().setArchiveProducer(() -> ShrinkWrap
.create(JavaArchive.class)
.addClasses(InputCollectionOutputCollectionLambda.class, InputPerson.class, OutputPerson.class));
@Test
void requestHandler_InputCollectionInputPerson_OutputCollectionOutputPerson() {
List<InputPerson> personList = new ArrayList<>();
personList.add(new InputPerson("Chris"));
personList.add(new InputPerson("Fred"));
given()
.body(personList)
.when()
.post()
.then()
.statusCode(200)
.body("", hasItem(hasEntry("outputname", "Chris"))) // OutputPerson serializes name with key outputname
.body("", hasItem(hasEntry("outputname", "Fred")))
.body("", not(hasItem(hasEntry("name", "Chris")))) // make sure that there is no key name
.body("", not(hasItem(hasEntry("name", "Fred"))));
}
}
|
InputCollectionOutputCollectionLambdaTest
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
|
{
"start": 3787,
"end": 3905
}
|
class ____ find
* @return the file or null if it is not found
* @throws IOException any IO problem, including the
|
to
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-api/src/main/java/org/apache/dubbo/config/ReferenceConfig.java
|
{
"start": 7515,
"end": 8004
}
|
interface ____ reference
*/
private transient volatile T ref;
/**
* The invoker of the reference service
*/
private transient volatile Invoker<?> invoker;
/**
* The flag whether the ReferenceConfig has been initialized
*/
private transient volatile boolean initialized;
/**
* whether this ReferenceConfig has been destroyed
*/
private transient volatile boolean destroyed;
/**
* The service names that the Dubbo
|
proxy
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/inference/configuration/SettingsConfigurationFieldTypeTests.java
|
{
"start": 684,
"end": 1286
}
|
class ____ extends ESTestCase {
public void testFieldType_WithValidConfigurationFieldTypeString() {
SettingsConfigurationFieldType fieldType = SettingsConfigurationTestUtils.getRandomConfigurationFieldType();
assertThat(SettingsConfigurationFieldType.fieldType(fieldType.toString()), equalTo(fieldType));
}
public void testFieldType_WithInvalidConfigurationFieldTypeString_ExpectIllegalArgumentException() {
assertThrows(IllegalArgumentException.class, () -> SettingsConfigurationFieldType.fieldType("invalid field type"));
}
}
|
SettingsConfigurationFieldTypeTests
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/ast/statement/SQLIfStatement.java
|
{
"start": 5123,
"end": 5969
}
|
class ____ extends SQLObjectImpl {
private List<SQLStatement> statements = new ArrayList<SQLStatement>();
@Override
public void accept0(SQLASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, statements);
}
visitor.endVisit(this);
}
public List<SQLStatement> getStatements() {
return statements;
}
public void setStatements(List<SQLStatement> statements) {
this.statements = statements;
}
public Else clone() {
Else x = new Else();
for (SQLStatement stmt : statements) {
SQLStatement stmt2 = stmt.clone();
stmt2.setParent(x);
x.statements.add(stmt2);
}
return x;
}
}
}
|
Else
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractRename.java
|
{
"start": 1055,
"end": 1268
}
|
class ____ extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);
}
}
|
ITestAzureNativeContractRename
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADSSEEncryptionWithDefaultS3Settings.java
|
{
"start": 2127,
"end": 2270
}
|
class ____ extends {@link AbstractTestS3AEncryption}
* and tests already configured bucket level DSSE encryption using s3 console.
*/
public
|
that
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/server/processor/src/main/java/org/jboss/resteasy/reactive/server/processor/generation/filters/CustomFilterGenerator.java
|
{
"start": 46005,
"end": 46211
}
|
enum ____ {
VOID,
RESPONSE,
REST_RESPONSE,
OPTIONAL_RESPONSE,
OPTIONAL_REST_RESPONSE,
UNI_VOID,
UNI_RESPONSE,
UNI_REST_RESPONSE
}
}
|
ReturnType
|
java
|
micronaut-projects__micronaut-core
|
http-server-netty/src/test/groovy/io/micronaut/http/server/netty/websocket/ChatClientWebSocket.java
|
{
"start": 1331,
"end": 2686
}
|
class ____ implements AutoCloseable { // <2>
private WebSocketSession session;
private HttpRequest request;
private String topic;
private String username;
private Collection<String> replies = new ConcurrentLinkedQueue<>();
private String subProtocol;
@OnOpen
public void onOpen(String topic, String username, WebSocketSession session, HttpRequest request) { // <3>
this.topic = topic;
this.username = username;
this.session = session;
this.request = request;
this.subProtocol = session.getSubprotocol().orElse(null);
}
public String getTopic() {
return topic;
}
public String getUsername() {
return username;
}
public Collection<String> getReplies() {
return replies;
}
public WebSocketSession getSession() {
return session;
}
public HttpRequest getRequest() {
return request;
}
@OnMessage
public void onMessage(
String message) {
replies.add(message); // <4>
}
// end::class[]
public abstract void send(String message);
public abstract Future<String> sendAsync(String message);
@SingleResult
public abstract Publisher<String> sendRx(String message);
public String getSubProtocol() {
return subProtocol;
}
}
|
ChatClientWebSocket
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/idgen/n_ative/local/NativeGeneratorClassTest.java
|
{
"start": 1923,
"end": 1997
}
|
class ____ {
@Id @GeneratedValue
long id;
String data;
}
}
|
NativeEntity
|
java
|
grpc__grpc-java
|
netty/src/main/java/io/grpc/netty/NettyServerHandler.java
|
{
"start": 5430,
"end": 25856
}
|
class ____ extends AbstractNettyHandler {
private static final Logger logger = Logger.getLogger(NettyServerHandler.class.getName());
private static final long KEEPALIVE_PING = 0xDEADL;
@VisibleForTesting
static final long GRACEFUL_SHUTDOWN_PING = 0x97ACEF001L;
private static final long GRACEFUL_SHUTDOWN_PING_TIMEOUT_NANOS = TimeUnit.SECONDS.toNanos(10);
/** Temporary workaround for #8674. Fine to delete after v1.45 release, and maybe earlier. */
private static final boolean DISABLE_CONNECTION_HEADER_CHECK = Boolean.parseBoolean(
System.getProperty("io.grpc.netty.disableConnectionHeaderCheck", "false"));
private final Http2Connection.PropertyKey streamKey;
private final ServerTransportListener transportListener;
private final int maxMessageSize;
private final long keepAliveTimeInNanos;
private final long keepAliveTimeoutInNanos;
private final long maxConnectionAgeInNanos;
private final long maxConnectionAgeGraceInNanos;
private final RstStreamCounter rstStreamCounter;
private final List<? extends ServerStreamTracer.Factory> streamTracerFactories;
private final TransportTracer transportTracer;
private final KeepAliveEnforcer keepAliveEnforcer;
private final Attributes eagAttributes;
/** Incomplete attributes produced by negotiator. */
private Attributes negotiationAttributes;
private InternalChannelz.Security securityInfo;
/** Completed attributes produced by transportReady. */
private Attributes attributes;
private Throwable connectionError;
private boolean teWarningLogged;
private WriteQueue serverWriteQueue;
private AsciiString lastKnownAuthority;
@CheckForNull
private KeepAliveManager keepAliveManager;
@CheckForNull
private MaxConnectionIdleManager maxConnectionIdleManager;
@CheckForNull
private ScheduledFuture<?> maxConnectionAgeMonitor;
@CheckForNull
private GracefulShutdown gracefulShutdown;
static NettyServerHandler newHandler(
ServerTransportListener transportListener,
ChannelPromise channelUnused,
List<? extends ServerStreamTracer.Factory> streamTracerFactories,
TransportTracer transportTracer,
int maxStreams,
boolean autoFlowControl,
int flowControlWindow,
int maxHeaderListSize,
int softLimitHeaderListSize,
int maxMessageSize,
long keepAliveTimeInNanos,
long keepAliveTimeoutInNanos,
long maxConnectionIdleInNanos,
long maxConnectionAgeInNanos,
long maxConnectionAgeGraceInNanos,
boolean permitKeepAliveWithoutCalls,
long permitKeepAliveTimeInNanos,
int maxRstCount,
long maxRstPeriodNanos,
Attributes eagAttributes) {
Preconditions.checkArgument(maxHeaderListSize > 0, "maxHeaderListSize must be positive: %s",
maxHeaderListSize);
Http2FrameLogger frameLogger = new Http2FrameLogger(LogLevel.DEBUG, NettyServerHandler.class);
Http2HeadersDecoder headersDecoder = new GrpcHttp2ServerHeadersDecoder(maxHeaderListSize);
Http2FrameReader frameReader = new Http2InboundFrameLogger(
new DefaultHttp2FrameReader(headersDecoder), frameLogger);
Http2HeadersEncoder encoder = new DefaultHttp2HeadersEncoder(
Http2HeadersEncoder.NEVER_SENSITIVE, false, 16, Integer.MAX_VALUE);
Http2FrameWriter frameWriter =
new Http2OutboundFrameLogger(new DefaultHttp2FrameWriter(encoder), frameLogger);
return newHandler(
channelUnused,
frameReader,
frameWriter,
transportListener,
streamTracerFactories,
transportTracer,
maxStreams,
autoFlowControl,
flowControlWindow,
maxHeaderListSize,
softLimitHeaderListSize,
maxMessageSize,
keepAliveTimeInNanos,
keepAliveTimeoutInNanos,
maxConnectionIdleInNanos,
maxConnectionAgeInNanos,
maxConnectionAgeGraceInNanos,
permitKeepAliveWithoutCalls,
permitKeepAliveTimeInNanos,
maxRstCount,
maxRstPeriodNanos,
eagAttributes,
Ticker.systemTicker());
}
static NettyServerHandler newHandler(
ChannelPromise channelUnused,
Http2FrameReader frameReader,
Http2FrameWriter frameWriter,
ServerTransportListener transportListener,
List<? extends ServerStreamTracer.Factory> streamTracerFactories,
TransportTracer transportTracer,
int maxStreams,
boolean autoFlowControl,
int flowControlWindow,
int maxHeaderListSize,
int softLimitHeaderListSize,
int maxMessageSize,
long keepAliveTimeInNanos,
long keepAliveTimeoutInNanos,
long maxConnectionIdleInNanos,
long maxConnectionAgeInNanos,
long maxConnectionAgeGraceInNanos,
boolean permitKeepAliveWithoutCalls,
long permitKeepAliveTimeInNanos,
int maxRstCount,
long maxRstPeriodNanos,
Attributes eagAttributes,
Ticker ticker) {
Preconditions.checkArgument(maxStreams > 0, "maxStreams must be positive: %s", maxStreams);
Preconditions.checkArgument(flowControlWindow > 0, "flowControlWindow must be positive: %s",
flowControlWindow);
Preconditions.checkArgument(maxHeaderListSize > 0, "maxHeaderListSize must be positive: %s",
maxHeaderListSize);
Preconditions.checkArgument(
softLimitHeaderListSize > 0, "softLimitHeaderListSize must be positive: %s",
softLimitHeaderListSize);
Preconditions.checkArgument(maxMessageSize > 0, "maxMessageSize must be positive: %s",
maxMessageSize);
final Http2Connection connection = new DefaultHttp2Connection(true);
UniformStreamByteDistributor dist = new UniformStreamByteDistributor(connection);
dist.minAllocationChunk(MIN_ALLOCATED_CHUNK); // Increased for benchmarks performance.
DefaultHttp2RemoteFlowController controller =
new DefaultHttp2RemoteFlowController(connection, dist);
connection.remote().flowController(controller);
final KeepAliveEnforcer keepAliveEnforcer = new KeepAliveEnforcer(
permitKeepAliveWithoutCalls, permitKeepAliveTimeInNanos, TimeUnit.NANOSECONDS);
if (ticker == null) {
ticker = Ticker.systemTicker();
}
RstStreamCounter rstStreamCounter
= new RstStreamCounter(maxRstCount, maxRstPeriodNanos, ticker);
// Create the local flow controller configured to auto-refill the connection window.
connection.local().flowController(
new DefaultHttp2LocalFlowController(connection, DEFAULT_WINDOW_UPDATE_RATIO, true));
frameWriter = new WriteMonitoringFrameWriter(frameWriter, keepAliveEnforcer);
Http2ConnectionEncoder encoder =
new DefaultHttp2ConnectionEncoder(connection, frameWriter);
encoder = new Http2ControlFrameLimitEncoder(encoder, 10000);
encoder = new Http2RstCounterEncoder(encoder, rstStreamCounter);
Http2ConnectionDecoder decoder = new DefaultHttp2ConnectionDecoder(connection, encoder,
frameReader);
Http2Settings settings = new Http2Settings();
settings.initialWindowSize(flowControlWindow);
settings.maxConcurrentStreams(maxStreams);
settings.maxHeaderListSize(maxHeaderListSize);
return new NettyServerHandler(
channelUnused,
connection,
transportListener,
streamTracerFactories,
transportTracer,
decoder, encoder, settings,
maxMessageSize,
maxHeaderListSize,
softLimitHeaderListSize,
keepAliveTimeInNanos,
keepAliveTimeoutInNanos,
maxConnectionIdleInNanos,
maxConnectionAgeInNanos, maxConnectionAgeGraceInNanos,
keepAliveEnforcer,
autoFlowControl,
rstStreamCounter,
eagAttributes, ticker);
}
private NettyServerHandler(
ChannelPromise channelUnused,
final Http2Connection connection,
ServerTransportListener transportListener,
List<? extends ServerStreamTracer.Factory> streamTracerFactories,
TransportTracer transportTracer,
Http2ConnectionDecoder decoder,
Http2ConnectionEncoder encoder,
Http2Settings settings,
int maxMessageSize,
int maxHeaderListSize,
int softLimitHeaderListSize,
long keepAliveTimeInNanos,
long keepAliveTimeoutInNanos,
long maxConnectionIdleInNanos,
long maxConnectionAgeInNanos,
long maxConnectionAgeGraceInNanos,
final KeepAliveEnforcer keepAliveEnforcer,
boolean autoFlowControl,
RstStreamCounter rstStreamCounter,
Attributes eagAttributes,
Ticker ticker) {
super(
channelUnused,
decoder,
encoder,
settings,
new ServerChannelLogger(),
autoFlowControl,
null,
ticker,
maxHeaderListSize,
softLimitHeaderListSize);
final MaxConnectionIdleManager maxConnectionIdleManager;
if (maxConnectionIdleInNanos == MAX_CONNECTION_IDLE_NANOS_DISABLED) {
maxConnectionIdleManager = null;
} else {
maxConnectionIdleManager = new MaxConnectionIdleManager(maxConnectionIdleInNanos);
}
connection.addListener(new Http2ConnectionAdapter() {
@Override
public void onStreamActive(Http2Stream stream) {
if (connection.numActiveStreams() == 1) {
keepAliveEnforcer.onTransportActive();
if (maxConnectionIdleManager != null) {
maxConnectionIdleManager.onTransportActive();
}
}
}
@Override
public void onStreamClosed(Http2Stream stream) {
if (connection.numActiveStreams() == 0) {
keepAliveEnforcer.onTransportIdle();
if (maxConnectionIdleManager != null) {
maxConnectionIdleManager.onTransportIdle();
}
}
}
});
checkArgument(maxMessageSize >= 0, "maxMessageSize must be non-negative: %s", maxMessageSize);
this.maxMessageSize = maxMessageSize;
this.keepAliveTimeInNanos = keepAliveTimeInNanos;
this.keepAliveTimeoutInNanos = keepAliveTimeoutInNanos;
this.maxConnectionIdleManager = maxConnectionIdleManager;
this.maxConnectionAgeInNanos = maxConnectionAgeInNanos;
this.maxConnectionAgeGraceInNanos = maxConnectionAgeGraceInNanos;
this.keepAliveEnforcer = checkNotNull(keepAliveEnforcer, "keepAliveEnforcer");
this.rstStreamCounter = rstStreamCounter;
this.eagAttributes = checkNotNull(eagAttributes, "eagAttributes");
streamKey = encoder.connection().newKey();
this.transportListener = checkNotNull(transportListener, "transportListener");
this.streamTracerFactories = checkNotNull(streamTracerFactories, "streamTracerFactories");
this.transportTracer = checkNotNull(transportTracer, "transportTracer");
// Set the frame listener on the decoder.
decoder().frameListener(new FrameListener());
}
@Nullable
Throwable connectionError() {
return connectionError;
}
@Override
public void handlerAdded(final ChannelHandlerContext ctx) throws Exception {
serverWriteQueue = new WriteQueue(ctx.channel());
// init max connection age monitor
if (maxConnectionAgeInNanos != MAX_CONNECTION_AGE_NANOS_DISABLED) {
maxConnectionAgeMonitor = ctx.executor().schedule(
new LogExceptionRunnable(new Runnable() {
@Override
public void run() {
if (gracefulShutdown == null) {
gracefulShutdown = new GracefulShutdown("max_age", maxConnectionAgeGraceInNanos);
gracefulShutdown.start(ctx);
ctx.flush();
}
}
}),
maxConnectionAgeInNanos,
TimeUnit.NANOSECONDS);
}
if (maxConnectionIdleManager != null) {
maxConnectionIdleManager.start(new Runnable() {
@Override
public void run() {
if (gracefulShutdown == null) {
gracefulShutdown = new GracefulShutdown("max_idle", null);
gracefulShutdown.start(ctx);
ctx.flush();
}
}
}, ctx.executor());
}
if (keepAliveTimeInNanos != SERVER_KEEPALIVE_TIME_NANOS_DISABLED) {
keepAliveManager = new KeepAliveManager(new KeepAlivePinger(ctx), ctx.executor(),
keepAliveTimeInNanos, keepAliveTimeoutInNanos, true /* keepAliveDuringTransportIdle */);
keepAliveManager.onTransportStarted();
}
assert encoder().connection().equals(decoder().connection());
transportTracer.setFlowControlWindowReader(new Utils.FlowControlReader(encoder().connection()));
super.handlerAdded(ctx);
}
private void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers)
throws Http2Exception {
try {
// Connection-specific header fields makes a request malformed. Ideally this would be handled
// by Netty. RFC 7540 section 8.1.2.2
if (!DISABLE_CONNECTION_HEADER_CHECK && headers.contains(CONNECTION)) {
resetStream(ctx, streamId, Http2Error.PROTOCOL_ERROR.code(), ctx.newPromise());
return;
}
if (headers.authority() == null) {
List<CharSequence> hosts = headers.getAll(HOST);
if (hosts.size() > 1) {
// RFC 7230 section 5.4
respondWithHttpError(ctx, streamId, 400, Status.Code.INTERNAL,
"Multiple host headers");
return;
}
if (!hosts.isEmpty()) {
headers.add(AUTHORITY.value(), hosts.get(0));
}
}
headers.remove(HOST);
// Remove the leading slash of the path and get the fully qualified method name
CharSequence path = headers.path();
if (path == null) {
respondWithHttpError(ctx, streamId, 404, Status.Code.UNIMPLEMENTED,
"Expected path but is missing");
return;
}
if (path.charAt(0) != '/') {
respondWithHttpError(ctx, streamId, 404, Status.Code.UNIMPLEMENTED,
String.format("Expected path to start with /: %s", path));
return;
}
String method = path.subSequence(1, path.length()).toString();
// Verify that the Content-Type is correct in the request.
CharSequence contentType = headers.get(CONTENT_TYPE_HEADER);
if (contentType == null) {
respondWithHttpError(
ctx, streamId, 415, Status.Code.INTERNAL, "Content-Type is missing from the request");
return;
}
String contentTypeString = contentType.toString();
if (!GrpcUtil.isGrpcContentType(contentTypeString)) {
respondWithHttpError(ctx, streamId, 415, Status.Code.INTERNAL,
String.format("Content-Type '%s' is not supported", contentTypeString));
return;
}
if (!HTTP_METHOD.contentEquals(headers.method())) {
Http2Headers extraHeaders = new DefaultHttp2Headers();
extraHeaders.add(HttpHeaderNames.ALLOW, HTTP_METHOD);
respondWithHttpError(ctx, streamId, 405, Status.Code.INTERNAL,
String.format("Method '%s' is not supported", headers.method()), extraHeaders);
return;
}
int h2HeadersSize = Utils.getH2HeadersSize(headers);
if (Utils.shouldRejectOnMetadataSizeSoftLimitExceeded(
h2HeadersSize, softLimitHeaderListSize, maxHeaderListSize)) {
respondWithHttpError(ctx, streamId, 431, Status.Code.RESOURCE_EXHAUSTED, String.format(
"Client Headers of size %d exceeded Metadata size soft limit: %d",
h2HeadersSize,
softLimitHeaderListSize));
return;
}
if (!teWarningLogged && !TE_TRAILERS.contentEquals(headers.get(TE_HEADER))) {
logger.warning(String.format("Expected header TE: %s, but %s is received. This means "
+ "some intermediate proxy may not support trailers",
TE_TRAILERS, headers.get(TE_HEADER)));
teWarningLogged = true;
}
// The Http2Stream object was put by AbstractHttp2ConnectionHandler before calling this
// method.
Http2Stream http2Stream = requireHttp2Stream(streamId);
Metadata metadata = Utils.convertHeaders(headers);
StatsTraceContext statsTraceCtx =
StatsTraceContext.newServerContext(streamTracerFactories, method, metadata);
NettyServerStream.TransportState state = new NettyServerStream.TransportState(
this,
ctx.channel().eventLoop(),
http2Stream,
maxMessageSize,
statsTraceCtx,
transportTracer,
method);
try (TaskCloseable ignore = PerfMark.traceTask("NettyServerHandler.onHeadersRead")) {
PerfMark.attachTag(state.tag());
String authority = getOrUpdateAuthority((AsciiString) headers.authority());
NettyServerStream stream = new NettyServerStream(
ctx.channel(),
state,
attributes,
authority,
statsTraceCtx);
transportListener.streamCreated(stream, method, metadata);
state.onStreamAllocated();
http2Stream.setProperty(streamKey, state);
}
} catch (Exception e) {
logger.log(Level.WARNING, "Exception in onHeadersRead()", e);
// Throw an exception that will get handled by onStreamError.
throw newStreamException(streamId, e);
}
}
private String getOrUpdateAuthority(AsciiString authority) {
if (authority == null) {
return null;
} else if (!authority.equals(lastKnownAuthority)) {
lastKnownAuthority = authority;
}
// AsciiString.toString() is internally cached, so subsequent calls will not
// result in recomputing the String representation of lastKnownAuthority.
return lastKnownAuthority.toString();
}
private void onDataRead(int streamId, ByteBuf data, int padding, boolean endOfStream)
throws Http2Exception {
flowControlPing().onDataRead(data.readableBytes(), padding);
try {
NettyServerStream.TransportState stream = serverStream(requireHttp2Stream(streamId));
if (stream == null) {
return;
}
try (TaskCloseable ignore = PerfMark.traceTask("NettyServerHandler.onDataRead")) {
PerfMark.attachTag(stream.tag());
stream.inboundDataReceived(data, endOfStream);
}
} catch (Throwable e) {
logger.log(Level.WARNING, "Exception in onDataRead()", e);
// Throw an exception that will get handled by onStreamError.
throw newStreamException(streamId, e);
}
}
private void onRstStreamRead(int streamId, long errorCode) throws Http2Exception {
Http2Exception tooManyRstStream = rstStreamCounter.countRstStream();
if (tooManyRstStream != null) {
throw tooManyRstStream;
}
try {
NettyServerStream.TransportState stream = serverStream(connection().stream(streamId));
if (stream != null) {
try (TaskCloseable ignore = PerfMark.traceTask("NettyServerHandler.onRstStreamRead")) {
PerfMark.attachTag(stream.tag());
stream.transportReportStatus(
Status.CANCELLED.withDescription("RST_STREAM received for code " + errorCode));
}
}
} catch (Throwable e) {
logger.log(Level.WARNING, "Exception in onRstStreamRead()", e);
// Throw an exception that will get handled by onStreamError.
throw newStreamException(streamId, e);
}
}
@Override
protected void onConnectionError(ChannelHandlerContext ctx, boolean outbound, Throwable cause,
Http2Exception http2Ex) {
logger.log(Level.FINE, "Connection Error", cause);
connectionError = cause;
super.onConnectionError(ctx, outbound, cause, http2Ex);
}
@Override
protected void onStreamError(ChannelHandlerContext ctx, boolean outbound, Throwable cause,
StreamException http2Ex) {
NettyServerStream.TransportState serverStream = serverStream(
connection().stream(Http2Exception.streamId(http2Ex)));
Level level = Level.WARNING;
if (serverStream == null && http2Ex.error() == Http2Error.STREAM_CLOSED) {
level = Level.FINE;
}
logger.log(level, "Stream Error", cause);
Tag tag = serverStream != null ? serverStream.tag() : PerfMark.createTag();
try (TaskCloseable ignore = PerfMark.traceTask("NettyServerHandler.onStreamError")) {
PerfMark.attachTag(tag);
if (serverStream != null) {
serverStream.transportReportStatus(Utils.statusFromThrowable(cause));
}
// TODO(ejona): Abort the stream by sending headers to help the client with debugging.
// Delegate to the base
|
NettyServerHandler
|
java
|
resilience4j__resilience4j
|
resilience4j-spring/src/test/java/io/github/resilience4j/timelimiter/configure/RxJava2TimeLimiterAspectExtTest.java
|
{
"start": 530,
"end": 3018
}
|
class ____ {
@Mock
ProceedingJoinPoint proceedingJoinPoint;
@InjectMocks
RxJava2TimeLimiterAspectExt rxJava2TimeLimiterAspectExt;
@Test
public void testCheckTypes() {
assertThat(rxJava2TimeLimiterAspectExt.canHandleReturnType(Flowable.class)).isTrue();
assertThat(rxJava2TimeLimiterAspectExt.canHandleReturnType(Single.class)).isTrue();
assertThat(rxJava2TimeLimiterAspectExt.canHandleReturnType(Observable.class)).isTrue();
assertThat(rxJava2TimeLimiterAspectExt.canHandleReturnType(Completable.class)).isTrue();
assertThat(rxJava2TimeLimiterAspectExt.canHandleReturnType(Maybe.class)).isTrue();
}
@Test
public void testRxJava2Types() throws Throwable {
TimeLimiter timeLimiter = TimeLimiter.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test"));
assertThat(rxJava2TimeLimiterAspectExt.handle(proceedingJoinPoint, timeLimiter, "testMethod")).isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test"));
assertThat(rxJava2TimeLimiterAspectExt.handle(proceedingJoinPoint, timeLimiter, "testMethod")).isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Observable.just("Test"));
assertThat(rxJava2TimeLimiterAspectExt.handle(proceedingJoinPoint, timeLimiter, "testMethod")).isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Completable.complete());
assertThat(rxJava2TimeLimiterAspectExt.handle(proceedingJoinPoint, timeLimiter, "testMethod")).isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Maybe.just("Test"));
assertThat(rxJava2TimeLimiterAspectExt.handle(proceedingJoinPoint, timeLimiter, "testMethod")).isNotNull();
}
@Test
public void shouldThrowIllegalArgumentExceptionWithNotRxJava2Type() throws Throwable{
TimeLimiter timeLimiter = TimeLimiter.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn("NOT RXJAVA2 TYPE");
try {
rxJava2TimeLimiterAspectExt.handle(proceedingJoinPoint, timeLimiter, "testMethod");
fail("exception missed");
} catch (Throwable e) {
assertThat(e).isInstanceOf(IllegalReturnTypeException.class)
.hasMessage(
"java.lang.String testMethod has unsupported by @TimeLimiter return type. RxJava2 expects Flowable/Single/...");
}
}
}
|
RxJava2TimeLimiterAspectExtTest
|
java
|
apache__camel
|
components/camel-mongodb-gridfs/src/generated/java/org/apache/camel/component/mongodb/gridfs/GridFsEndpointConfigurer.java
|
{
"start": 741,
"end": 6806
}
|
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
GridFsEndpoint target = (GridFsEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "bucket": target.setBucket(property(camelContext, java.lang.String.class, value)); return true;
case "database": target.setDatabase(property(camelContext, java.lang.String.class, value)); return true;
case "delay": target.setDelay(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "fileattributename":
case "fileAttributeName": target.setFileAttributeName(property(camelContext, java.lang.String.class, value)); return true;
case "initialdelay":
case "initialDelay": target.setInitialDelay(property(camelContext, java.time.Duration.class, value).toMillis()); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "operation": target.setOperation(property(camelContext, java.lang.String.class, value)); return true;
case "persistenttscollection":
case "persistentTSCollection": target.setPersistentTSCollection(property(camelContext, java.lang.String.class, value)); return true;
case "persistenttsobject":
case "persistentTSObject": target.setPersistentTSObject(property(camelContext, java.lang.String.class, value)); return true;
case "query": target.setQuery(property(camelContext, java.lang.String.class, value)); return true;
case "querystrategy":
case "queryStrategy": target.setQueryStrategy(property(camelContext, org.apache.camel.component.mongodb.gridfs.QueryStrategy.class, value)); return true;
case "readpreference":
case "readPreference": target.setReadPreference(property(camelContext, com.mongodb.ReadPreference.class, value)); return true;
case "writeconcern":
case "writeConcern": target.setWriteConcern(property(camelContext, com.mongodb.WriteConcern.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "bucket": return java.lang.String.class;
case "database": return java.lang.String.class;
case "delay": return long.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "fileattributename":
case "fileAttributeName": return java.lang.String.class;
case "initialdelay":
case "initialDelay": return long.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "operation": return java.lang.String.class;
case "persistenttscollection":
case "persistentTSCollection": return java.lang.String.class;
case "persistenttsobject":
case "persistentTSObject": return java.lang.String.class;
case "query": return java.lang.String.class;
case "querystrategy":
case "queryStrategy": return org.apache.camel.component.mongodb.gridfs.QueryStrategy.class;
case "readpreference":
case "readPreference": return com.mongodb.ReadPreference.class;
case "writeconcern":
case "writeConcern": return com.mongodb.WriteConcern.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
GridFsEndpoint target = (GridFsEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "bucket": return target.getBucket();
case "database": return target.getDatabase();
case "delay": return target.getDelay();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "fileattributename":
case "fileAttributeName": return target.getFileAttributeName();
case "initialdelay":
case "initialDelay": return target.getInitialDelay();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "operation": return target.getOperation();
case "persistenttscollection":
case "persistentTSCollection": return target.getPersistentTSCollection();
case "persistenttsobject":
case "persistentTSObject": return target.getPersistentTSObject();
case "query": return target.getQuery();
case "querystrategy":
case "queryStrategy": return target.getQueryStrategy();
case "readpreference":
case "readPreference": return target.getReadPreference();
case "writeconcern":
case "writeConcern": return target.getWriteConcern();
default: return null;
}
}
}
|
GridFsEndpointConfigurer
|
java
|
grpc__grpc-java
|
netty/src/jmh/java/io/grpc/netty/MethodDescriptorBenchmark.java
|
{
"start": 1224,
"end": 2843
}
|
class ____ {
private static final MethodDescriptor.Marshaller<Void> marshaller =
new MethodDescriptor.Marshaller<Void>() {
@Override
public InputStream stream(Void value) {
return new ByteArrayInputStream(new byte[]{});
}
@Override
public Void parse(InputStream stream) {
return null;
}
};
MethodDescriptor<Void, Void> method = MethodDescriptor.<Void, Void>newBuilder()
.setType(MethodDescriptor.MethodType.UNARY)
.setFullMethodName("Service/Method")
.setRequestMarshaller(marshaller)
.setResponseMarshaller(marshaller)
.build();
InternalMethodDescriptor imd = new InternalMethodDescriptor(InternalKnownTransport.NETTY);
byte[] directBytes = new AsciiString("/" + method.getFullMethodName()).toByteArray();
/** Foo bar. */
@Benchmark
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
public AsciiString old() {
return new AsciiString("/" + method.getFullMethodName());
}
/** Foo bar. */
@Benchmark
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
public AsciiString transportSpecific() {
AsciiString path;
if ((path = (AsciiString) imd.geRawMethodName(method)) != null) {
path = new AsciiString("/" + method.getFullMethodName());
imd.setRawMethodName(method, path);
}
return path;
}
/** Foo bar. */
@Benchmark
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
public AsciiString direct() {
return new AsciiString(directBytes, false);
}
}
|
MethodDescriptorBenchmark
|
java
|
netty__netty
|
example/src/main/java/io/netty/example/qotm/QuoteOfTheMomentServerHandler.java
|
{
"start": 916,
"end": 2331
}
|
class ____ extends SimpleChannelInboundHandler<DatagramPacket> {
private static final Random random = new Random();
// Quotes from Mohandas K. Gandhi:
private static final String[] quotes = {
"Where there is love there is life.",
"First they ignore you, then they laugh at you, then they fight you, then you win.",
"Be the change you want to see in the world.",
"The weak can never forgive. Forgiveness is the attribute of the strong.",
};
private static String nextQuote() {
int quoteId;
synchronized (random) {
quoteId = random.nextInt(quotes.length);
}
return quotes[quoteId];
}
@Override
public void channelRead0(ChannelHandlerContext ctx, DatagramPacket packet) throws Exception {
System.err.println(packet);
if ("QOTM?".equals(packet.content().toString(CharsetUtil.UTF_8))) {
ctx.write(new DatagramPacket(
Unpooled.copiedBuffer("QOTM: " + nextQuote(), CharsetUtil.UTF_8), packet.sender()));
}
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
ctx.flush();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
// We don't close the channel because we can keep serving requests.
}
}
|
QuoteOfTheMomentServerHandler
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/serializer/SerializeWriterTest_19.java
|
{
"start": 331,
"end": 940
}
|
class ____ extends TestCase {
public void test_writer_1() throws Exception {
SerializeWriter out = new SerializeWriter(14);
out.config(SerializerFeature.QuoteFieldNames, true);
out.config(SerializerFeature.UseSingleQuotes, true);
try {
JSONSerializer serializer = new JSONSerializer(out);
VO vo = new VO();
vo.getValues().add("#");
serializer.write(vo);
Assert.assertEquals("{'values':['#']}", out.toString());
} finally {
out.close();
}
}
public static
|
SerializeWriterTest_19
|
java
|
junit-team__junit5
|
junit-platform-commons/src/main/java/org/junit/platform/commons/support/HierarchyTraversalMode.java
|
{
"start": 695,
"end": 879
}
|
enum ____ {
/**
* Traverse the hierarchy using top-down semantics.
*/
TOP_DOWN,
/**
* Traverse the hierarchy using bottom-up semantics.
*/
BOTTOM_UP
}
|
HierarchyTraversalMode
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/annotation/AnnotationsScannerTests.java
|
{
"start": 22568,
"end": 22694
}
|
class ____ {
@TestAnnotation1
public void method() {
}
}
@TestAnnotation1
@TestAnnotation2
static
|
WithSingleAnnotation
|
java
|
elastic__elasticsearch
|
qa/smoke-test-http/src/internalClusterTest/java/org/elasticsearch/http/HttpSmokeTestCase.java
|
{
"start": 958,
"end": 1806
}
|
class ____ extends ESIntegTestCase {
@Override
protected boolean addMockHttpTransport() {
return false; // enable http
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME)
.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME)
.build();
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(getTestTransportPlugin(), MainRestPlugin.class);
}
public static void assertOK(Response response) {
assertThat(response.getStatusLine().getStatusCode(), oneOf(200, 201));
}
}
|
HttpSmokeTestCase
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/ChainingConstructorIgnoresParameter.java
|
{
"start": 3518,
"end": 8887
}
|
class ____ multiple definitions. But I would expect for multiple definitions within the same
* compiler invocation to cause deeper problems.)
*/
paramTypesForMethod.clear();
callersToEvaluate.clear(); // should have already been cleared
return NO_MATCH;
}
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
MethodSymbol symbol = getSymbol(tree);
// TODO(cpovirk): determine whether anyone might be calling Foo.this()
if (!isIdentifierWithName(tree.getMethodSelect(), "this")) {
return NO_MATCH;
}
callersToEvaluate.put(symbol, new Caller(tree, state));
return evaluateCallers(symbol);
}
@Override
public Description matchMethod(MethodTree tree, VisitorState state) {
MethodSymbol symbol = getSymbol(tree);
if (!symbol.isConstructor()) {
return NO_MATCH;
}
paramTypesForMethod.put(symbol, unmodifiableList(tree.getParameters()));
return evaluateCallers(symbol);
}
private Description evaluateCallers(MethodSymbol symbol) {
List<VariableTree> paramTypes = paramTypesForMethod.get(symbol);
if (paramTypes == null) {
// We haven't seen the declaration yet. We'll evaluate the call when we do.
return NO_MATCH;
}
for (Caller caller : callersToEvaluate.removeAll(symbol)) {
VisitorState state = caller.state;
MethodInvocationTree invocation = caller.tree;
MethodTree callerConstructor = state.findEnclosing(MethodTree.class);
if (callerConstructor == null) {
continue; // impossible, at least in compilable code?
}
Map<String, Type> availableParams = indexTypeByName(callerConstructor.getParameters());
/*
* TODO(cpovirk): Better handling of varargs: If the last parameter type is varargs and it is
* called as varargs (rather than by passing an array), then rewrite the parameter types to
* (p0, p1, ..., p[n-2], p[n-1] = element type of varargs parameter if an argument is
* supplied, p[n] = ditto, etc.). For now, we settle for not crashing in the face of a
* mismatch between the number of parameters declared and the number supplied.
*
* (Use MethodSymbol.isVarArgs.)
*/
for (int i = 0; i < paramTypes.size() && i < invocation.getArguments().size(); i++) {
VariableTree formalParam = paramTypes.get(i);
String formalParamName = formalParam.getName().toString();
Type formalParamType = getType(formalParam.getType());
Type availableParamType = availableParams.get(formalParamName);
ExpressionTree actualParam = invocation.getArguments().get(i);
if (
/*
* The caller has no param of this type. (Or if it did, we couldn't determine the type.
* Does that ever happen?) If the param doesn't exist, the caller can't be failing to
* pass it.
*/
availableParamType == null
/*
* We couldn't determine the type of the formal parameter. (Does this ever happen?)
*/
|| formalParamType == null
/*
* The caller is passing the expected parameter (or "ImmutableList.copyOf(parameter),"
* "new File(parameter)," etc.).
*/
|| referencesIdentifierWithName(formalParamName, actualParam, state)) {
continue;
}
if (state.getTypes().isAssignable(availableParamType, formalParamType)) {
reportMatch(invocation, state, actualParam, formalParamName);
}
/*
* If formal parameter is of an incompatible type, the caller might in theory still intend
* to pass a derived expression. For example, "Foo(String file)" might intend to call
* "Foo(File file)" by passing "new File(file)." If this comes up in practice, we could
* provide the dummy suggested fix "someExpression(formalParamName)." However, my research
* suggests that this will rarely if ever be what the user wants.
*/
}
}
// All matches are reported through reportMatch calls instead of return values.
return NO_MATCH;
}
private static Map<String, Type> indexTypeByName(List<? extends VariableTree> parameters) {
Map<String, Type> result = newHashMap();
for (VariableTree parameter : parameters) {
result.put(parameter.getName().toString(), getType(parameter.getType()));
}
return result;
}
private void reportMatch(
Tree diagnosticPosition, VisitorState state, Tree toReplace, String replaceWith) {
state.reportMatch(describeMatch(diagnosticPosition, replace(toReplace, replaceWith)));
}
private static boolean referencesIdentifierWithName(
String name, ExpressionTree tree, VisitorState state) {
Matcher<IdentifierTree> identifierMatcher =
new Matcher<IdentifierTree>() {
@Override
public boolean matches(IdentifierTree tree, VisitorState state) {
return isIdentifierWithName(tree, name);
}
};
return hasIdentifier(identifierMatcher).matches(tree, state);
}
private static boolean isIdentifierWithName(ExpressionTree tree, String name) {
return tree instanceof IdentifierTree identifierTree
&& identifierTree.getName().contentEquals(name);
}
private static final
|
has
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeFromCompletable.java
|
{
"start": 982,
"end": 1468
}
|
class ____<T> extends Maybe<T> implements HasUpstreamCompletableSource {
final CompletableSource source;
public MaybeFromCompletable(CompletableSource source) {
this.source = source;
}
@Override
public CompletableSource source() {
return source;
}
@Override
protected void subscribeActual(MaybeObserver<? super T> observer) {
source.subscribe(new FromCompletableObserver<T>(observer));
}
static final
|
MaybeFromCompletable
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/core/Ordered.java
|
{
"start": 1635,
"end": 2403
}
|
interface ____ {
/**
* Useful constant for the highest precedence value.
* @see java.lang.Integer#MIN_VALUE
*/
int HIGHEST_PRECEDENCE = Integer.MIN_VALUE;
/**
* Useful constant for the lowest precedence value.
* @see java.lang.Integer#MAX_VALUE
*/
int LOWEST_PRECEDENCE = Integer.MAX_VALUE;
/**
* Get the order value of this object.
* <p>Higher values are interpreted as lower priority. As a consequence,
* the object with the lowest value has the highest priority (somewhat
* analogous to Servlet {@code load-on-startup} values).
* <p>Same order values will result in arbitrary sort positions for the
* affected objects.
* @return the order value
* @see #HIGHEST_PRECEDENCE
* @see #LOWEST_PRECEDENCE
*/
int getOrder();
}
|
Ordered
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java
|
{
"start": 2055,
"end": 2140
}
|
class ____ LDAP session factories that can make use of a connection pool
*/
abstract
|
for
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/message/ParameterVisitable.java
|
{
"start": 1070,
"end": 1843
}
|
interface ____ {
/**
* Performs the given action for each parameter until all values
* have been processed or the action throws an exception.
* <p>
* The second parameter lets callers pass in a stateful object to be modified with the key-value pairs,
* so the TriConsumer implementation itself can be stateless and potentially reusable.
* </p>
*
* @param action The action to be performed for each key-value pair in this collection
* @param state the object to be passed as the third parameter to each invocation on the
* specified ParameterConsumer.
* @param <S> type of the third parameter
* @since 2.11
*/
<S> void forEachParameter(ParameterConsumer<S> action, S state);
}
|
ParameterVisitable
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsV2ResourceCalculator.java
|
{
"start": 1435,
"end": 1564
}
|
class ____ property.
* Theoretically the ResourceCalculatorProcessTree can be configured
* using the mapreduce.job.process-tree.
|
job
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-rocksdb/src/test/java/org/apache/flink/state/rocksdb/restore/DistributeStateHandlerHelperTest.java
|
{
"start": 2584,
"end": 8794
}
|
class ____ extends TestLogger {
private static final int NUM_KEY_GROUPS = 128;
private static final KeyGroupRange KEY_GROUP_RANGE = new KeyGroupRange(0, NUM_KEY_GROUPS - 1);
private static final int KEY_GROUP_PREFIX_BYTES =
CompositeKeySerializationUtils.computeRequiredBytesInKeyGroupPrefix(NUM_KEY_GROUPS);
private static final String CF_NAME = "test-column-family";
@TempDir private Path tempDir;
/** Test whether sst files are exported when the key group all in range. */
@Test
public void testAutoCompactionIsDisabled() throws Exception {
Path rocksDir = tempDir.resolve("rocksdb_dir");
Path dbPath = rocksDir.resolve("db");
Path chkDir = rocksDir.resolve("chk");
Path exportDir = rocksDir.resolve("export");
Files.createDirectories(dbPath);
Files.createDirectories(exportDir);
ArrayList<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(2);
try (RocksDB db = openDB(dbPath.toString(), columnFamilyHandles)) {
ColumnFamilyHandle testCfHandler = columnFamilyHandles.get(1);
// Create SST files and verify their creation
for (int i = 0; i < 4; i++) {
db.flush(new FlushOptions().setWaitForFlush(true), testCfHandler);
for (int j = 10; j < NUM_KEY_GROUPS / 2; j++) {
byte[] bytes = new byte[KEY_GROUP_PREFIX_BYTES];
CompositeKeySerializationUtils.serializeKeyGroup(j, bytes);
db.delete(testCfHandler, bytes);
}
assertThat(
dbPath.toFile()
.listFiles(
(file, name) ->
name.toLowerCase().endsWith(".sst")))
.hasSize(i);
}
// Create checkpoint
try (Checkpoint checkpoint = Checkpoint.create(db)) {
checkpoint.createCheckpoint(chkDir.toString());
}
}
// Verify there are 4 sst files in level 0, compaction will be triggered once the DB is
// opened.
assertThat(chkDir.toFile().listFiles((file, name) -> name.toLowerCase().endsWith(".sst")))
.hasSize(4);
// Create IncrementalLocalKeyedStateHandle for testing
IncrementalLocalKeyedStateHandle stateHandle = createTestStateHandle(chkDir.toString());
try (DistributeStateHandlerHelper helper =
createDistributeStateHandlerHelper(
stateHandle, (name) -> new ColumnFamilyOptions())) {
// This simulates the delay that allows background compaction to clean up SST files if
// auto compaction is enabled.
Thread.sleep(500);
Map<RegisteredStateMetaInfoBase.Key, List<ExportImportFilesMetaData>>
exportedColumnFamiliesOut = new HashMap<>();
List<IncrementalLocalKeyedStateHandle> skipped = new ArrayList<>();
Either<KeyGroupRange, IncrementalLocalKeyedStateHandle> result =
helper.tryDistribute(exportDir, exportedColumnFamiliesOut);
assertThat(result.isLeft()).isTrue();
assertThat(exportedColumnFamiliesOut).isNotEmpty();
assertThat(skipped).isEmpty();
}
}
private RocksDB openDB(String path, ArrayList<ColumnFamilyHandle> columnFamilyHandles)
throws RocksDBException {
List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>(2);
columnFamilyDescriptors.add(
new ColumnFamilyDescriptor(
RocksDB.DEFAULT_COLUMN_FAMILY,
new ColumnFamilyOptions().setDisableAutoCompactions(true)));
columnFamilyDescriptors.add(
new ColumnFamilyDescriptor(
CF_NAME.getBytes(ConfigConstants.DEFAULT_CHARSET),
new ColumnFamilyOptions().setDisableAutoCompactions(true)));
return RocksDB.open(
new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true),
path,
columnFamilyDescriptors,
columnFamilyHandles);
}
/**
* Creates a minimal IncrementalLocalKeyedStateHandle for testing. Uses empty metadata to focus
* on SST file distribution behavior.
*/
private IncrementalLocalKeyedStateHandle createTestStateHandle(String checkpointDir) {
return new IncrementalLocalKeyedStateHandle(
UUID.randomUUID(),
1L,
new DirectoryStateHandle(Paths.get(checkpointDir), 0L),
KEY_GROUP_RANGE,
new ByteStreamStateHandle("meta", new byte[0]),
Collections.emptyList());
}
/** Creates a DistributeStateHandlerHelper with test-specific configuration. */
private DistributeStateHandlerHelper createDistributeStateHandlerHelper(
IncrementalLocalKeyedStateHandle stateHandle,
Function<String, ColumnFamilyOptions> columnFamilyOptionsFactory)
throws Exception {
TypeSerializer<?> namespaceSerializer = LongSerializer.INSTANCE;
TypeSerializer<?> stateSerializer = DoubleSerializer.INSTANCE;
List<StateMetaInfoSnapshot> stateMetaInfoList = new ArrayList<>();
stateMetaInfoList.add(
new RegisteredKeyValueStateBackendMetaInfo<>(
StateDescriptor.Type.VALUE,
CF_NAME,
namespaceSerializer,
stateSerializer)
.snapshot());
return new DistributeStateHandlerHelper(
stateHandle,
stateMetaInfoList,
columnFamilyOptionsFactory,
new DBOptions().setCreateIfMissing(true),
null,
null,
KEY_GROUP_PREFIX_BYTES,
KEY_GROUP_RANGE,
"test-operator",
0);
}
}
|
DistributeStateHandlerHelperTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/flush/CollectionFlushAfterQueryTest.java
|
{
"start": 2348,
"end": 3026
}
|
class ____ {
@Id
private Long id;
private String name;
@OneToMany(cascade = CascadeType.ALL)
protected Set<MyOtherEntity> otherEntities = new HashSet<>();
public MyEntity() {
}
public MyEntity(Long id, String name) {
this.id = id;
this.name = name;
}
public Long getId() {
return this.id;
}
public Set<MyOtherEntity> getOtherEntities() {
return otherEntities;
}
public void setOtherEntities(Set<MyOtherEntity> otherEntities) {
this.otherEntities = otherEntities;
}
public void addOtherEntity(MyOtherEntity otherEntity) {
this.otherEntities.add( otherEntity );
}
}
@Entity(name = "MyOtherEntity")
public static
|
MyEntity
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/PublicApiNamedStreamShouldReturnStreamTest.java
|
{
"start": 903,
"end": 1285
}
|
class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(PublicApiNamedStreamShouldReturnStream.class, getClass());
@Test
public void abstractMethodPositiveCase() {
compilationHelper
.addSourceLines(
"in/Test.java",
"""
public abstract
|
PublicApiNamedStreamShouldReturnStreamTest
|
java
|
mapstruct__mapstruct
|
core/src/main/java/org/mapstruct/CollectionMappingStrategy.java
|
{
"start": 1800,
"end": 3537
}
|
enum ____ {
/**
* The setter of the target property will be used to propagate the value:
* {@code orderDto.setOrderLines(order.getOrderLines)}.
* <p>
* If no setter is available but a getter method, this will be used, under the assumption it has been initialized:
* {@code orderDto.getOrderLines().addAll(order.getOrderLines)}. This will also be the case when using
* {@link MappingTarget} (updating existing instances).
*/
ACCESSOR_ONLY,
/**
* If present, the setter of the target property will be used to propagate the value:
* {@code orderDto.setOrderLines(order.getOrderLines)}.
* <p>
* If no setter but and adder method is present, that adder will be invoked for each element of the source
* collection: {@code order.addOrderLine(orderLine() )}.
* <p>
* If neither a setter nor an adder method but a getter for the target property is present, that getter will be
* used, assuming it returns an initialized collection: If no setter is available, MapStruct will first look for an
* adder method before resorting to a getter.
*/
SETTER_PREFERRED,
/**
* Identical to {@link #SETTER_PREFERRED}, only that adder methods will be preferred over setter methods, if both
* are present for a given collection-typed property.
*/
ADDER_PREFERRED,
/**
* Identical to {@link #SETTER_PREFERRED}, however the target collection will not be cleared and accessed via
* addAll in case of updating existing bean instances, see: {@link MappingTarget}.
*
* Instead the target accessor (e.g. set) will be used on the target bean to set the collection.
*/
TARGET_IMMUTABLE;
}
|
CollectionMappingStrategy
|
java
|
apache__camel
|
components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FromFtpAsyncProcessIT.java
|
{
"start": 3153,
"end": 3757
}
|
class ____ extends AsyncProcessorSupport {
private final ExecutorService executor = Executors.newSingleThreadExecutor();
@Override
public boolean process(final Exchange exchange, final AsyncCallback callback) {
executor.submit(() -> {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// ignore
}
exchange.getIn().setHeader("foo", 123);
callback.done(false);
});
return false;
}
}
}
|
MyAsyncProcessor
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/InvalidTimeZoneIDTest.java
|
{
"start": 1248,
"end": 2753
}
|
class ____ {
private static final String TIMEZONE_ID = "unknown";
public static void test() {
// BUG: Diagnostic contains:
TimeZone.getTimeZone("");
// BUG: Diagnostic contains:
TimeZone.getTimeZone("unknown");
// BUG: Diagnostic contains:
TimeZone.getTimeZone(TIMEZONE_ID);
// BUG: Diagnostic contains:
TimeZone.getTimeZone("America/Los_Angele");
// BUG: Diagnostic contains:
TimeZone.getTimeZone("KST");
}
public static void invalidCustomIDs() {
// BUG: Diagnostic contains:
TimeZone.getTimeZone("UTC+0");
// BUG: Diagnostic contains:
TimeZone.getTimeZone("GMT+24");
// BUG: Diagnostic contains:
TimeZone.getTimeZone("GMT1");
// BUG: Diagnostic contains:
TimeZone.getTimeZone("GMT/0");
}
public static void underscoreSuggestion() {
// BUG: Diagnostic contains: America/Los_Angeles
TimeZone.getTimeZone("America/Los Angeles");
}
}
""")
.doTest();
}
@Test
public void negativeCase() {
compilationHelper
.addSourceLines(
"a/A.java",
"""
package a;
import java.util.TimeZone;
|
A
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/suite/engine/SuiteLauncherDiscoveryRequestBuilderTests.java
|
{
"start": 22614,
"end": 22678
}
|
interface ____ {
}
@Test
void metaAnnotations() {
@Meta
|
Meta
|
java
|
spring-projects__spring-framework
|
spring-jms/src/test/java/org/springframework/jms/listener/SimpleMessageListenerContainerTests.java
|
{
"start": 1645,
"end": 18258
}
|
class ____ {
private static final String DESTINATION_NAME = "foo";
private static final String EXCEPTION_MESSAGE = "This.Is.It";
private static final StubQueue QUEUE_DESTINATION = new StubQueue();
private final SimpleMessageListenerContainer container = new SimpleMessageListenerContainer();
@Test
void testSettingMessageListenerToANullType() {
this.container.setMessageListener(null);
assertThat(this.container.getMessageListener()).isNull();
}
@Test
void testSettingMessageListenerToAnUnsupportedType() {
assertThatIllegalArgumentException().isThrownBy(() ->
this.container.setMessageListener("Bingo"));
}
@Test
void testSessionTransactedModeReallyDoesDefaultToFalse() {
assertThat(this.container.isPubSubNoLocal()).as("The [pubSubLocal] property of SimpleMessageListenerContainer " +
"must default to false. Change this test (and the attendant javadoc) if you have changed the default.").isFalse();
}
@Test
void testSettingConcurrentConsumersToZeroIsNotAllowed() {
assertThatIllegalArgumentException().isThrownBy(() -> {
this.container.setConcurrentConsumers(0);
this.container.afterPropertiesSet();
});
}
@Test
void testSettingConcurrentConsumersToANegativeValueIsNotAllowed() {
assertThatIllegalArgumentException().isThrownBy(() -> {
this.container.setConcurrentConsumers(-198);
this.container.afterPropertiesSet();
});
}
@Test
void testContextRefreshedEventDoesNotStartTheConnectionIfAutoStartIsSetToFalse() throws Exception {
MessageConsumer messageConsumer = mock();
Session session = mock();
// Queue gets created in order to create MessageConsumer for that Destination...
given(session.createQueue(DESTINATION_NAME)).willReturn(QUEUE_DESTINATION);
// and then the MessageConsumer gets created...
given(session.createConsumer(QUEUE_DESTINATION, null)).willReturn(messageConsumer); // no MessageSelector...
Connection connection = mock();
// session gets created in order to register MessageListener...
given(connection.createSession(this.container.isSessionTransacted(),
this.container.getSessionAcknowledgeMode())).willReturn(session);
ConnectionFactory connectionFactory = mock();
given(connectionFactory.createConnection()).willReturn(connection);
this.container.setConnectionFactory(connectionFactory);
this.container.setDestinationName(DESTINATION_NAME);
this.container.setMessageListener(new TestMessageListener());
this.container.setAutoStartup(false);
this.container.afterPropertiesSet();
GenericApplicationContext context = new GenericApplicationContext();
context.getBeanFactory().registerSingleton("messageListenerContainer", this.container);
context.refresh();
context.close();
verify(connection).setExceptionListener(this.container);
}
@Test
void testContextRefreshedEventStartsTheConnectionByDefault() throws Exception {
MessageConsumer messageConsumer = mock();
Session session = mock();
// Queue gets created in order to create MessageConsumer for that Destination...
given(session.createQueue(DESTINATION_NAME)).willReturn(QUEUE_DESTINATION);
// and then the MessageConsumer gets created...
given(session.createConsumer(QUEUE_DESTINATION, null)).willReturn(messageConsumer); // no MessageSelector...
Connection connection = mock();
// session gets created in order to register MessageListener...
given(connection.createSession(this.container.isSessionTransacted(),
this.container.getSessionAcknowledgeMode())).willReturn(session);
// and the connection is start()ed after the listener is registered...
ConnectionFactory connectionFactory = mock();
given(connectionFactory.createConnection()).willReturn(connection);
this.container.setConnectionFactory(connectionFactory);
this.container.setDestinationName(DESTINATION_NAME);
this.container.setMessageListener(new TestMessageListener());
this.container.afterPropertiesSet();
GenericApplicationContext context = new GenericApplicationContext();
context.getBeanFactory().registerSingleton("messageListenerContainer", this.container);
context.refresh();
context.close();
verify(connection).setExceptionListener(this.container);
verify(connection).start();
}
@Test
void testCorrectSessionExposedForSessionAwareMessageListenerInvocation() throws Exception {
final SimpleMessageConsumer messageConsumer = new SimpleMessageConsumer();
final Session session = mock();
// Queue gets created in order to create MessageConsumer for that Destination...
given(session.createQueue(DESTINATION_NAME)).willReturn(QUEUE_DESTINATION);
// and then the MessageConsumer gets created...
given(session.createConsumer(QUEUE_DESTINATION, null)).willReturn(messageConsumer); // no MessageSelector...
// an exception is thrown, so the rollback logic is being applied here...
given(session.getTransacted()).willReturn(false);
given(session.getAcknowledgeMode()).willReturn(Session.AUTO_ACKNOWLEDGE);
Connection connection = mock();
// session gets created in order to register MessageListener...
given(connection.createSession(this.container.isSessionTransacted(),
this.container.getSessionAcknowledgeMode())).willReturn(session);
// and the connection is start()ed after the listener is registered...
final ConnectionFactory connectionFactory = mock();
given(connectionFactory.createConnection()).willReturn(connection);
final Set<String> failure = new HashSet<>(1);
this.container.setConnectionFactory(connectionFactory);
this.container.setDestinationName(DESTINATION_NAME);
this.container.setMessageListener((SessionAwareMessageListener<Message>) (Message message, @Nullable Session sess) -> {
try {
// Check correct Session passed into SessionAwareMessageListener.
assertThat(session).isSameAs(sess);
}
catch (Throwable ex) {
failure.add("MessageListener execution failed: " + ex);
}
});
this.container.afterPropertiesSet();
this.container.start();
final Message message = mock();
messageConsumer.sendMessage(message);
if (!failure.isEmpty()) {
fail(failure.iterator().next().toString());
}
verify(connection).setExceptionListener(this.container);
verify(connection).start();
}
@Test
void testTaskExecutorCorrectlyInvokedWhenSpecified() throws Exception {
final SimpleMessageConsumer messageConsumer = new SimpleMessageConsumer();
final Session session = mock();
given(session.createQueue(DESTINATION_NAME)).willReturn(QUEUE_DESTINATION);
given(session.createConsumer(QUEUE_DESTINATION, null)).willReturn(messageConsumer); // no MessageSelector...
given(session.getTransacted()).willReturn(false);
given(session.getAcknowledgeMode()).willReturn(Session.AUTO_ACKNOWLEDGE);
Connection connection = mock();
given(connection.createSession(this.container.isSessionTransacted(),
this.container.getSessionAcknowledgeMode())).willReturn(session);
final ConnectionFactory connectionFactory = mock();
given(connectionFactory.createConnection()).willReturn(connection);
final TestMessageListener listener = new TestMessageListener();
this.container.setConnectionFactory(connectionFactory);
this.container.setDestinationName(DESTINATION_NAME);
this.container.setMessageListener(listener);
this.container.setTaskExecutor(task -> {
listener.executorInvoked = true;
assertThat(listener.listenerInvoked).isFalse();
task.run();
assertThat(listener.listenerInvoked).isTrue();
});
this.container.afterPropertiesSet();
this.container.start();
final Message message = mock();
messageConsumer.sendMessage(message);
assertThat(listener.executorInvoked).isTrue();
assertThat(listener.listenerInvoked).isTrue();
verify(connection).setExceptionListener(this.container);
verify(connection).start();
}
@Test
void testRegisteredExceptionListenerIsInvokedOnException() throws Exception {
final SimpleMessageConsumer messageConsumer = new SimpleMessageConsumer();
Session session = mock();
// Queue gets created in order to create MessageConsumer for that Destination...
given(session.createQueue(DESTINATION_NAME)).willReturn(QUEUE_DESTINATION);
// and then the MessageConsumer gets created...
given(session.createConsumer(QUEUE_DESTINATION, null)).willReturn(messageConsumer); // no MessageSelector...
// an exception is thrown, so the rollback logic is being applied here...
given(session.getTransacted()).willReturn(false);
Connection connection = mock();
// session gets created in order to register MessageListener...
given(connection.createSession(this.container.isSessionTransacted(),
this.container.getSessionAcknowledgeMode())).willReturn(session);
// and the connection is start()ed after the listener is registered...
ConnectionFactory connectionFactory = mock();
given(connectionFactory.createConnection()).willReturn(connection);
final JMSException theException = new JMSException(EXCEPTION_MESSAGE);
this.container.setConnectionFactory(connectionFactory);
this.container.setDestinationName(DESTINATION_NAME);
this.container.setMessageListener((SessionAwareMessageListener<Message>) (Message message, @Nullable Session session1) -> {
throw theException;
});
ExceptionListener exceptionListener = mock();
this.container.setExceptionListener(exceptionListener);
this.container.afterPropertiesSet();
this.container.start();
// manually trigger an Exception with the above bad MessageListener...
final Message message = mock();
// a Throwable from a MessageListener MUST simply be swallowed...
messageConsumer.sendMessage(message);
verify(connection).setExceptionListener(this.container);
verify(connection).start();
verify(exceptionListener).onException(theException);
}
@Test
void testRegisteredErrorHandlerIsInvokedOnException() throws Exception {
final SimpleMessageConsumer messageConsumer = new SimpleMessageConsumer();
Session session = mock();
// Queue gets created in order to create MessageConsumer for that Destination...
given(session.createQueue(DESTINATION_NAME)).willReturn(QUEUE_DESTINATION);
// and then the MessageConsumer gets created...
given(session.createConsumer(QUEUE_DESTINATION, null)).willReturn(messageConsumer); // no MessageSelector...
// an exception is thrown, so the rollback logic is being applied here...
given(session.getTransacted()).willReturn(false);
Connection connection = mock();
// session gets created in order to register MessageListener...
given(connection.createSession(this.container.isSessionTransacted(),
this.container.getSessionAcknowledgeMode())).willReturn(session);
ConnectionFactory connectionFactory = mock();
given(connectionFactory.createConnection()).willReturn(connection);
final IllegalStateException theException = new IllegalStateException("intentional test failure");
this.container.setConnectionFactory(connectionFactory);
this.container.setDestinationName(DESTINATION_NAME);
this.container.setMessageListener((SessionAwareMessageListener<Message>) (Message message, @Nullable Session session1) -> {
throw theException;
});
ErrorHandler errorHandler = mock();
this.container.setErrorHandler(errorHandler);
this.container.afterPropertiesSet();
this.container.start();
// manually trigger an Exception with the above bad MessageListener...
Message message = mock();
// a Throwable from a MessageListener MUST simply be swallowed...
messageConsumer.sendMessage(message);
verify(connection).setExceptionListener(this.container);
verify(connection).start();
verify(errorHandler).handleError(theException);
}
@Test
void testNoRollbackOccursIfSessionIsNotTransactedAndThatExceptionsDo_NOT_Propagate() throws Exception {
final SimpleMessageConsumer messageConsumer = new SimpleMessageConsumer();
Session session = mock();
// Queue gets created in order to create MessageConsumer for that Destination...
given(session.createQueue(DESTINATION_NAME)).willReturn(QUEUE_DESTINATION);
// and then the MessageConsumer gets created...
given(session.createConsumer(QUEUE_DESTINATION, null)).willReturn(messageConsumer); // no MessageSelector...
// an exception is thrown, so the rollback logic is being applied here...
given(session.getTransacted()).willReturn(false);
Connection connection = mock();
// session gets created in order to register MessageListener...
given(connection.createSession(this.container.isSessionTransacted(),
this.container.getSessionAcknowledgeMode())).willReturn(session);
// and the connection is start()ed after the listener is registered...
ConnectionFactory connectionFactory = mock();
given(connectionFactory.createConnection()).willReturn(connection);
this.container.setConnectionFactory(connectionFactory);
this.container.setDestinationName(DESTINATION_NAME);
this.container.setMessageListener((MessageListener) message -> {
throw new UnsupportedOperationException();
});
this.container.afterPropertiesSet();
this.container.start();
// manually trigger an Exception with the above bad MessageListener...
final Message message = mock();
// a Throwable from a MessageListener MUST simply be swallowed...
messageConsumer.sendMessage(message);
verify(connection).setExceptionListener(this.container);
verify(connection).start();
}
@Test
void testTransactedSessionsGetRollbackLogicAppliedAndThatExceptionsStillDo_NOT_Propagate() throws Exception {
this.container.setSessionTransacted(true);
final SimpleMessageConsumer messageConsumer = new SimpleMessageConsumer();
Session session = mock();
// Queue gets created in order to create MessageConsumer for that Destination...
given(session.createQueue(DESTINATION_NAME)).willReturn(QUEUE_DESTINATION);
// and then the MessageConsumer gets created...
given(session.createConsumer(QUEUE_DESTINATION, null)).willReturn(messageConsumer); // no MessageSelector...
// an exception is thrown, so the rollback logic is being applied here...
given(session.getTransacted()).willReturn(true);
Connection connection = mock();
// session gets created in order to register MessageListener...
given(connection.createSession(this.container.isSessionTransacted(),
this.container.getSessionAcknowledgeMode())).willReturn(session);
// and the connection is start()ed after the listener is registered...
ConnectionFactory connectionFactory = mock();
given(connectionFactory.createConnection()).willReturn(connection);
this.container.setConnectionFactory(connectionFactory);
this.container.setDestinationName(DESTINATION_NAME);
this.container.setMessageListener((MessageListener) message -> {
throw new UnsupportedOperationException();
});
this.container.afterPropertiesSet();
this.container.start();
// manually trigger an Exception with the above bad MessageListener...
final Message message = mock();
// a Throwable from a MessageListener MUST simply be swallowed...
messageConsumer.sendMessage(message);
// Session is rolled back because it is transacted...
verify(session).rollback();
verify(connection).setExceptionListener(this.container);
verify(connection).start();
}
@Test
void testDestroyClosesConsumersSessionsAndConnectionInThatOrder() throws Exception {
MessageConsumer messageConsumer = mock();
Session session = mock();
// Queue gets created in order to create MessageConsumer for that Destination...
given(session.createQueue(DESTINATION_NAME)).willReturn(QUEUE_DESTINATION);
// and then the MessageConsumer gets created...
given(session.createConsumer(QUEUE_DESTINATION, null)).willReturn(messageConsumer); // no MessageSelector...
Connection connection = mock();
// session gets created in order to register MessageListener...
given(connection.createSession(this.container.isSessionTransacted(),
this.container.getSessionAcknowledgeMode())).willReturn(session);
// and the connection is start()ed after the listener is registered...
ConnectionFactory connectionFactory = mock();
given(connectionFactory.createConnection()).willReturn(connection);
this.container.setConnectionFactory(connectionFactory);
this.container.setDestinationName(DESTINATION_NAME);
this.container.setMessageListener(new TestMessageListener());
this.container.afterPropertiesSet();
this.container.start();
this.container.destroy();
verify(messageConsumer).close();
verify(session).close();
verify(connection).setExceptionListener(this.container);
verify(connection).start();
verify(connection).close();
}
private static
|
SimpleMessageListenerContainerTests
|
java
|
junit-team__junit5
|
junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/DiscoverySelectors.java
|
{
"start": 8370,
"end": 10307
}
|
class ____} of the
* {@linkplain Thread thread} that uses these selectors.
*
* <p>The {@link Set} supplied to this method should have a reliable iteration
* order to support reliable discovery and execution order. It is therefore
* recommended that the set be a {@link java.util.SequencedSet} (on Java 21
* or higher), {@link java.util.SortedSet}, {@link java.util.LinkedHashSet},
* or similar. Note that {@link Set#of(Object[])} and related {@code Set.of()}
* methods do not guarantee a reliable iteration order.
*
* @param classpathRoots set of directories and JAR files in the filesystem
* that represent classpath roots; never {@code null}
* @return a list of selectors for the supplied classpath roots; elements
* which do not physically exist in the filesystem will be filtered out
* @see ClasspathRootSelector
* @see Thread#getContextClassLoader()
*/
public static List<ClasspathRootSelector> selectClasspathRoots(Set<Path> classpathRoots) {
Preconditions.notNull(classpathRoots, "classpathRoots must not be null");
// @formatter:off
return classpathRoots.stream()
.filter(Files::exists)
.map(Path::toUri)
.map(ClasspathRootSelector::new)
// unmodifiable since selectClasspathRoots is a public, non-internal method
.toList();
// @formatter:on
}
/**
* Create a {@code ClasspathResourceSelector} for the supplied classpath
* resource name.
*
* <p>The name of a <em>classpath resource</em> must follow the semantics
* for resource paths as defined in {@link ClassLoader#getResource(String)}.
*
* <p>If the supplied classpath resource name is prefixed with a slash
* ({@code /}), the slash will be removed.
*
* <p>Since {@linkplain org.junit.platform.engine.TestEngine engines} are not
* expected to modify the classpath, the supplied classpath resource must be
* on the classpath of the
* {@linkplain Thread#getContextClassLoader() context
|
loader
|
java
|
spring-projects__spring-boot
|
core/spring-boot-test/src/test/java/org/springframework/boot/test/context/SpringBootTestArgsTests.java
|
{
"start": 1174,
"end": 1593
}
|
class ____ {
@Autowired
private ApplicationArguments args;
@Test
void applicationArgumentsPopulated() {
assertThat(this.args.getOptionNames()).containsOnly("option.foo");
assertThat(this.args.getOptionValues("option.foo")).containsOnly("foo-value");
assertThat(this.args.getNonOptionArgs()).containsOnly("other.bar=other-bar-value");
}
@Configuration(proxyBeanMethods = false)
static
|
SpringBootTestArgsTests
|
java
|
apache__kafka
|
server/src/test/java/org/apache/kafka/server/quota/ClientQuotasRequestTest.java
|
{
"start": 2505,
"end": 34248
}
|
class ____ {
private final ClusterInstance cluster;
public ClientQuotasRequestTest(ClusterInstance cluster) {
this.cluster = cluster;
}
@ClusterTest
public void testAlterClientQuotasRequest() throws InterruptedException {
ClientQuotaEntity entity = new ClientQuotaEntity(
Map.of(ClientQuotaEntity.USER, "user", ClientQuotaEntity.CLIENT_ID, "client-id"));
// Expect an empty configuration.
verifyDescribeEntityQuotas(entity, Map.of());
// Add two configuration entries.
alterEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(10000.0),
QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(20000.0)
), false);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, 10000.0,
QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0
));
// Update an existing entry.
alterEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(15000.0)
), false);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, 15000.0,
QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0
));
// Remove an existing configuration entry.
alterEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.empty()
), false);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0
));
// Remove a non-existent configuration entry. This should make no changes.
alterEntityQuotas(entity, Map.of(
QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Optional.empty()
), false);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0
));
// Add back a deleted configuration entry.
alterEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(5000.0)
), false);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, 5000.0,
QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0
));
// Perform a mixed update.
alterEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(20000.0),
QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, Optional.empty(),
QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Optional.of(12.3)
), false);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0,
QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, 12.3
));
}
@ClusterTest
public void testAlterClientQuotasRequestValidateOnly() throws InterruptedException {
ClientQuotaEntity entity = new ClientQuotaEntity(Map.of(ClientQuotaEntity.USER, "user"));
// Set up a configuration.
alterEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(20000.0),
QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Optional.of(23.45)
), false);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0,
QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, 23.45
));
// Validate-only addition.
alterEntityQuotas(entity, Map.of(
QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(50000.0)
), true);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0,
QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, 23.45
));
// Validate-only modification.
alterEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(10000.0)
), true);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0,
QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, 23.45
));
// Validate-only removal.
alterEntityQuotas(entity, Map.of(
QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Optional.empty()
), true);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0,
QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, 23.45
));
// Validate-only mixed update.
alterEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(10000.0),
QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(50000.0),
QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Optional.empty()
), true);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0,
QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, 23.45
));
}
@ClusterTest
public void testClientQuotasForScramUsers() throws InterruptedException, ExecutionException {
final String userName = "user";
try (Admin admin = cluster.admin()) {
AlterUserScramCredentialsResult results = admin.alterUserScramCredentials(List.of(
new UserScramCredentialUpsertion(userName, new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_256, 4096), "password")));
results.all().get();
ClientQuotaEntity entity = new ClientQuotaEntity(Map.of(ClientQuotaEntity.USER, userName));
verifyDescribeEntityQuotas(entity, Map.of());
alterEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(10000.0),
QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(20000.0)
), false);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, 10000.0,
QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0
));
}
}
@ClusterTest
public void testAlterIpQuotasRequest() throws InterruptedException {
final String knownHost = "1.2.3.4";
final String unknownHost = "2.3.4.5";
ClientQuotaEntity entity = toIpEntity(Optional.of(knownHost));
ClientQuotaEntity defaultEntity = toIpEntity(Optional.empty());
ClientQuotaFilterComponent entityFilter = ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.IP, knownHost);
ClientQuotaFilterComponent defaultEntityFilter = ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.IP);
ClientQuotaFilterComponent allIpEntityFilter = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.IP);
// Expect an empty configuration.
verifyIpQuotas(allIpEntityFilter, Map.of(), unknownHost);
// Add a configuration entry.
alterEntityQuotas(entity, Map.of(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, Optional.of(100.0)), false);
verifyIpQuotas(entityFilter, Map.of(entity, 100.0), unknownHost);
// update existing entry
alterEntityQuotas(entity, Map.of(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, Optional.of(150.0)), false);
verifyIpQuotas(entityFilter, Map.of(entity, 150.0), unknownHost);
// update default value
alterEntityQuotas(defaultEntity, Map.of(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, Optional.of(200.0)), false);
verifyIpQuotas(defaultEntityFilter, Map.of(defaultEntity, 200.0), unknownHost);
// describe all IP quotas
verifyIpQuotas(allIpEntityFilter, Map.of(entity, 150.0, defaultEntity, 200.0), unknownHost);
// remove entry
alterEntityQuotas(entity, Map.of(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, Optional.empty()), false);
verifyIpQuotas(entityFilter, Map.of(), unknownHost);
// remove default value
alterEntityQuotas(defaultEntity, Map.of(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, Optional.empty()), false);
verifyIpQuotas(allIpEntityFilter, Map.of(), unknownHost);
}
private void verifyIpQuotas(ClientQuotaFilterComponent entityFilter, Map<ClientQuotaEntity, Double> expectedMatches,
String unknownHost) throws InterruptedException {
TestUtils.retryOnExceptionWithTimeout(5000L, () -> {
Map<ClientQuotaEntity, Map<String, Double>> result = describeClientQuotas(
ClientQuotaFilter.containsOnly(List.of(entityFilter))).get();
assertEquals(expectedMatches.keySet(), result.keySet());
for (Map.Entry<ClientQuotaEntity, Map<String, Double>> entry : result.entrySet()) {
ClientQuotaEntity entity = entry.getKey();
Map<String, Double> props = entry.getValue();
assertEquals(Set.of(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG), props.keySet());
assertEquals(expectedMatches.get(entity), props.get(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG));
String entityName = entity.entries().get(ClientQuotaEntity.IP);
// ClientQuotaEntity with null name maps to default entity
InetAddress entityIp = entityName == null
? InetAddress.getByName(unknownHost)
: InetAddress.getByName(entityName);
int currentServerQuota = cluster.brokers()
.values()
.iterator()
.next()
.socketServer()
.connectionQuotas()
.connectionRateForIp(entityIp);
assertTrue(Math.abs(expectedMatches.get(entity) - currentServerQuota) < 0.01,
String.format("Connection quota of %s is not %s but %s", entity, expectedMatches.get(entity), currentServerQuota));
}
});
}
@ClusterTest
public void testAlterClientQuotasInvalidRequests() {
final ClientQuotaEntity entity1 = new ClientQuotaEntity(Map.of(ClientQuotaEntity.USER, ""));
TestUtils.assertFutureThrows(InvalidRequestException.class,
alterEntityQuotas(entity1, Map.of(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Optional.of(12.34)), true));
final ClientQuotaEntity entity2 = new ClientQuotaEntity(Map.of(ClientQuotaEntity.CLIENT_ID, ""));
TestUtils.assertFutureThrows(InvalidRequestException.class,
alterEntityQuotas(entity2, Map.of(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Optional.of(12.34)), true));
final ClientQuotaEntity entity3 = new ClientQuotaEntity(Map.of("", "name"));
TestUtils.assertFutureThrows(InvalidRequestException.class,
alterEntityQuotas(entity3, Map.of(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Optional.of(12.34)), true));
final ClientQuotaEntity entity4 = new ClientQuotaEntity(Map.of());
TestUtils.assertFutureThrows(InvalidRequestException.class,
alterEntityQuotas(entity4, Map.of(QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(10000.5)), true));
final ClientQuotaEntity entity5 = new ClientQuotaEntity(Map.of(ClientQuotaEntity.USER, "user"));
TestUtils.assertFutureThrows(InvalidRequestException.class,
alterEntityQuotas(entity5, Map.of("bad", Optional.of(1.0)), true));
final ClientQuotaEntity entity6 = new ClientQuotaEntity(Map.of(ClientQuotaEntity.USER, "user"));
TestUtils.assertFutureThrows(InvalidRequestException.class,
alterEntityQuotas(entity6, Map.of(QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(10000.5)), true));
}
private void expectInvalidRequestWithMessage(Future<?> future, String expectedMessage) {
InvalidRequestException exception = TestUtils.assertFutureThrows(InvalidRequestException.class, future);
assertNotNull(exception);
assertTrue(
exception.getMessage().contains(expectedMessage),
String.format("Expected message %s to contain %s", exception, expectedMessage)
);
}
@ClusterTest
public void testAlterClientQuotasInvalidEntityCombination() {
ClientQuotaEntity userAndIpEntity = new ClientQuotaEntity(
Map.of(ClientQuotaEntity.USER, "user", ClientQuotaEntity.IP, "1.2.3.4")
);
ClientQuotaEntity clientAndIpEntity = new ClientQuotaEntity(
Map.of(ClientQuotaEntity.CLIENT_ID, "client", ClientQuotaEntity.IP, "1.2.3.4")
);
final String expectedExceptionMessage = "Invalid quota entity combination";
expectInvalidRequestWithMessage(
alterEntityQuotas(userAndIpEntity, Map.of(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Optional.of(12.34)), true),
expectedExceptionMessage
);
expectInvalidRequestWithMessage(
alterEntityQuotas(clientAndIpEntity, Map.of(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Optional.of(12.34)), true),
expectedExceptionMessage
);
}
@ClusterTest
public void testAlterClientQuotasBadIp() {
ClientQuotaEntity invalidHostPatternEntity = new ClientQuotaEntity(
Map.of(ClientQuotaEntity.IP, "not a valid host because it has spaces")
);
ClientQuotaEntity unresolvableHostEntity = new ClientQuotaEntity(
Map.of(ClientQuotaEntity.IP, "RFC2606.invalid")
);
final String expectedExceptionMessage = "not a valid IP";
expectInvalidRequestWithMessage(
alterEntityQuotas(invalidHostPatternEntity, Map.of(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, Optional.of(50.0)), true),
expectedExceptionMessage
);
expectInvalidRequestWithMessage(
alterEntityQuotas(unresolvableHostEntity, Map.of(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, Optional.of(50.0)), true),
expectedExceptionMessage
);
}
@ClusterTest
public void testDescribeClientQuotasInvalidFilterCombination() {
ClientQuotaFilterComponent ipFilterComponent = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.IP);
ClientQuotaFilterComponent userFilterComponent = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.USER);
ClientQuotaFilterComponent clientIdFilterComponent = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.CLIENT_ID);
final String expectedExceptionMessage = "Invalid entity filter component combination";
expectInvalidRequestWithMessage(
describeClientQuotas(ClientQuotaFilter.contains(List.of(ipFilterComponent, userFilterComponent))),
expectedExceptionMessage
);
expectInvalidRequestWithMessage(
describeClientQuotas(ClientQuotaFilter.contains(List.of(ipFilterComponent, clientIdFilterComponent))),
expectedExceptionMessage
);
}
// Entities to be matched against.
private final Map<ClientQuotaEntity, Double> matchUserClientEntities = new HashMap<>(Map.ofEntries(
Map.entry(toClientEntity(toUserMap("user-1"), toClientIdMap("client-id-1")), 50.50),
Map.entry(toClientEntity(toUserMap("user-2"), toClientIdMap("client-id-1")), 51.51),
Map.entry(toClientEntity(toUserMap("user-3"), toClientIdMap("client-id-2")), 52.52),
Map.entry(toClientEntity(toUserMap(null), toClientIdMap("client-id-1")), 53.53),
Map.entry(toClientEntity(toUserMap("user-1"), toClientIdMap(null)), 54.54),
Map.entry(toClientEntity(toUserMap("user-3"), toClientIdMap(null)), 55.55),
Map.entry(toClientEntity(toUserMap("user-1")), 56.56),
Map.entry(toClientEntity(toUserMap("user-2")), 57.57),
Map.entry(toClientEntity(toUserMap("user-3")), 58.58),
Map.entry(toClientEntity(toUserMap(null)), 59.59),
Map.entry(toClientEntity(toClientIdMap("client-id-2")), 60.60)
));
private final Map<ClientQuotaEntity, Double> matchIpEntities = Map.of(
toIpEntity(Optional.of("1.2.3.4")), 10.0,
toIpEntity(Optional.of("2.3.4.5")), 20.0
);
private void setupDescribeClientQuotasMatchTest() {
Map<ClientQuotaEntity, Map<String, Optional<Double>>> userClientQuotas = matchUserClientEntities.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey,
e -> Map.of(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Optional.of(e.getValue()))));
Map<ClientQuotaEntity, Map<String, Optional<Double>>> ipQuotas = matchIpEntities.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey,
e -> Map.of(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, Optional.of(e.getValue()))));
Map<ClientQuotaEntity, Map<String, Optional<Double>>> allQuotas = new HashMap<>();
allQuotas.putAll(userClientQuotas);
allQuotas.putAll(ipQuotas);
Map<ClientQuotaEntity, KafkaFuture<Void>> result = alterClientQuotas(allQuotas, false);
matchUserClientEntities.forEach((entity, value) -> {
try {
result.get(entity).get(10, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new RuntimeException(e);
}
});
matchIpEntities.forEach((entity, value) -> {
try {
result.get(entity).get(10, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new RuntimeException(e);
}
});
}
private Map<ClientQuotaEntity, Map<String, Double>> matchEntity(ClientQuotaEntity entity)
throws ExecutionException, InterruptedException {
List<ClientQuotaFilterComponent> components = entity.entries().entrySet().stream().map(entry -> {
if (entry.getValue() == null) {
return ClientQuotaFilterComponent.ofDefaultEntity(entry.getKey());
} else {
return ClientQuotaFilterComponent.ofEntity(entry.getKey(), entry.getValue());
}
}).toList();
return describeClientQuotas(ClientQuotaFilter.containsOnly(components)).get();
}
@ClusterTest
public void testDescribeClientQuotasMatchExact() throws ExecutionException, InterruptedException {
setupDescribeClientQuotasMatchTest();
// Test exact matches.
matchUserClientEntities.forEach((e, v) -> {
try {
TestUtils.retryOnExceptionWithTimeout(5000L, () -> {
Map<ClientQuotaEntity, Map<String, Double>> result = matchEntity(e);
assertEquals(1, result.size());
assertNotNull(result.get(e));
double value = result.get(e).get(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG);
assertEquals(value, v, 1e-6);
});
} catch (InterruptedException ex) {
throw new RuntimeException(ex);
}
});
// Entities not contained in `matchEntityList`.
List<ClientQuotaEntity> notMatchEntities = List.of(
toClientEntity(toUserMap("user-1"), toClientIdMap("client-id-2")),
toClientEntity(toUserMap("user-3"), toClientIdMap("client-id-1")),
toClientEntity(toUserMap("user-2"), toClientIdMap(null)),
toClientEntity(toUserMap("user-4")),
toClientEntity(toUserMap(null), toClientIdMap("client-id-2")),
toClientEntity(toClientIdMap("client-id-1")),
toClientEntity(toClientIdMap("client-id-3"))
);
// Verify exact matches of the non-matches returns empty.
for (ClientQuotaEntity e : notMatchEntities) {
Map<ClientQuotaEntity, Map<String, Double>> result = matchEntity(e);
assertEquals(0, result.size());
}
}
@SuppressWarnings("unchecked")
private void testMatchEntities(ClientQuotaFilter filter, int expectedMatchSize, Predicate<ClientQuotaEntity> partition)
throws InterruptedException {
TestUtils.retryOnExceptionWithTimeout(5000L, () -> {
Map<ClientQuotaEntity, Map<String, Double>> result = describeClientQuotas(filter).get();
List<Map.Entry<ClientQuotaEntity, Double>> expectedMatches = matchUserClientEntities.entrySet()
.stream()
.collect(Collectors.partitioningBy(entry -> partition.test(entry.getKey())))
.get(true);
expectedMatches.addAll(matchIpEntities.entrySet()
.stream()
.collect(Collectors.partitioningBy(entry -> partition.test(entry.getKey())))
.get(true));
// for test verification
assertEquals(expectedMatchSize, expectedMatches.size());
assertEquals(expectedMatchSize, result.size(),
"Failed to match " + expectedMatchSize + "entities for " + filter);
Map<Object, Object> expectedMatchesMap = Map.ofEntries(expectedMatches.toArray(new Map.Entry[0]));
matchUserClientEntities.forEach((entity, expectedValue) -> {
if (expectedMatchesMap.containsKey(entity)) {
Map<String, Double> config = result.get(entity);
assertNotNull(config);
Double value = config.get(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG);
assertNotNull(value);
assertEquals(expectedValue, value, 1e-6);
} else {
assertNull(result.get(entity));
}
});
matchIpEntities.forEach((entity, expectedValue) -> {
if (expectedMatchesMap.containsKey(entity)) {
Map<String, Double> config = result.get(entity);
assertNotNull(config);
Double value = config.get(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG);
assertNotNull(value);
assertEquals(expectedValue, value, 1e-6);
} else {
assertNull(result.get(entity));
}
});
});
}
@ClusterTest
public void testDescribeClientQuotasMatchPartial() throws InterruptedException {
setupDescribeClientQuotasMatchTest();
// Match open-ended existing user.
testMatchEntities(
ClientQuotaFilter.contains(List.of(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, "user-1"))),
3,
entity -> Objects.equals(entity.entries().get(ClientQuotaEntity.USER), "user-1")
);
// Match open-ended non-existent user.
testMatchEntities(
ClientQuotaFilter.contains(List.of(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, "unknown"))),
0,
entity -> false
);
// Match open-ended existing client ID.
testMatchEntities(
ClientQuotaFilter.contains(List.of(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.CLIENT_ID, "client-id-2"))),
2,
entity -> Objects.equals(entity.entries().get(ClientQuotaEntity.CLIENT_ID), "client-id-2")
);
// Match open-ended default user.
testMatchEntities(
ClientQuotaFilter.contains(List.of(ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.USER))),
2,
entity -> entity.entries().containsKey(ClientQuotaEntity.USER) && entity.entries().get(ClientQuotaEntity.USER) == null
);
// Match close-ended existing user.
testMatchEntities(
ClientQuotaFilter.containsOnly(List.of(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, "user-2"))),
1,
entity -> Objects.equals(entity.entries().get(ClientQuotaEntity.USER), "user-2") && !entity.entries().containsKey(ClientQuotaEntity.CLIENT_ID)
);
// Match close-ended existing client ID that has no matching entity.
testMatchEntities(
ClientQuotaFilter.containsOnly(List.of(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.CLIENT_ID, "client-id-1"))),
0,
entity -> false
);
// Match against all entities with the user type in a close-ended match.
testMatchEntities(
ClientQuotaFilter.containsOnly(List.of(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.USER))),
4,
entity -> entity.entries().containsKey(ClientQuotaEntity.USER) && !entity.entries().containsKey(ClientQuotaEntity.CLIENT_ID)
);
// Match against all entities with the user type in an open-ended match.
testMatchEntities(
ClientQuotaFilter.contains(List.of(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.USER))),
10,
entity -> entity.entries().containsKey(ClientQuotaEntity.USER)
);
// Match against all entities with the client ID type in a close-ended match.
testMatchEntities(
ClientQuotaFilter.containsOnly(List.of(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.CLIENT_ID))),
1,
entity -> entity.entries().containsKey(ClientQuotaEntity.CLIENT_ID) && !entity.entries().containsKey(ClientQuotaEntity.USER)
);
// Match against all entities with the client ID type in an open-ended match.
testMatchEntities(
ClientQuotaFilter.contains(List.of(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.CLIENT_ID))),
7,
entity -> entity.entries().containsKey(ClientQuotaEntity.CLIENT_ID)
);
// Match against all entities with IP type in an open-ended match.
testMatchEntities(
ClientQuotaFilter.contains(List.of(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.IP))),
2,
entity -> entity.entries().containsKey(ClientQuotaEntity.IP)
);
// Match open-ended empty filter List. This should match all entities.
testMatchEntities(ClientQuotaFilter.contains(List.of()), 13, entity -> true);
// Match close-ended empty filter List. This should match no entities.
testMatchEntities(ClientQuotaFilter.containsOnly(List.of()), 0, entity -> false);
}
@ClusterTest
public void testClientQuotasUnsupportedEntityTypes() {
ClientQuotaEntity entity = new ClientQuotaEntity(Map.of("other", "name"));
KafkaFuture<Map<ClientQuotaEntity, Map<String, Double>>> future = describeClientQuotas(
ClientQuotaFilter.containsOnly(getComponents(entity)));
TestUtils.assertFutureThrows(UnsupportedVersionException.class, future);
}
@ClusterTest
public void testClientQuotasSanitized() throws InterruptedException {
// An entity with name that must be sanitized when writing to Zookeeper.
ClientQuotaEntity entity = new ClientQuotaEntity(Map.of(ClientQuotaEntity.USER, "user with spaces"));
alterEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Optional.of(20000.0)
), false);
verifyDescribeEntityQuotas(entity, Map.of(
QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, 20000.0
));
}
private Map<String, String> toUserMap(String user) {
// Uses Collections.singletonMap instead of Map.of to support null user parameter.
return Collections.singletonMap(ClientQuotaEntity.USER, user);
}
private Map<String, String> toClientIdMap(String clientId) {
// Uses Collections.singletonMap instead of Map.of to support null client-id parameter.
return Collections.singletonMap(ClientQuotaEntity.CLIENT_ID, clientId);
}
@SafeVarargs
private ClientQuotaEntity toClientEntity(Map<String, String>... entries) {
Map<String, String> entityMap = new HashMap<>();
for (Map<String, String> entry : entries) {
entityMap.putAll(entry);
}
return new ClientQuotaEntity(entityMap);
}
private ClientQuotaEntity toIpEntity(Optional<String> ip) {
return new ClientQuotaEntity(Collections.singletonMap(ClientQuotaEntity.IP, ip.orElse(null)));
}
private void verifyDescribeEntityQuotas(ClientQuotaEntity entity, Map<String, Double> quotas)
throws InterruptedException {
TestUtils.retryOnExceptionWithTimeout(5000L, () -> {
Map<ClientQuotaEntity, Map<String, Double>> describe = describeClientQuotas(
ClientQuotaFilter.containsOnly(getComponents(entity))).get();
if (quotas.isEmpty()) {
assertEquals(0, describe.size());
} else {
assertEquals(1, describe.size());
Map<String, Double> configs = describe.get(entity);
assertNotNull(configs);
assertEquals(quotas.size(), configs.size());
quotas.forEach((k, v) -> {
Double value = configs.get(k);
assertNotNull(value);
assertEquals(v, value, 1e-6);
});
}
});
}
private List<ClientQuotaFilterComponent> getComponents(ClientQuotaEntity entity) {
return entity.entries().entrySet().stream().map(entry -> {
String entityType = entry.getKey();
String entityName = entry.getValue();
return Optional.ofNullable(entityName)
.map(name -> ClientQuotaFilterComponent.ofEntity(entityType, name))
.orElseGet(() -> ClientQuotaFilterComponent.ofDefaultEntity(entityType));
}).toList();
}
private KafkaFuture<Map<ClientQuotaEntity, Map<String, Double>>> describeClientQuotas(ClientQuotaFilter filter) {
try (Admin admin = cluster.admin()) {
return admin.describeClientQuotas(filter).entities();
}
}
private KafkaFuture<Void> alterEntityQuotas(ClientQuotaEntity entity, Map<String, Optional<Double>> alter, boolean validateOnly) {
return alterClientQuotas(Map.of(entity, alter), validateOnly).get(entity);
}
private Map<ClientQuotaEntity, KafkaFuture<Void>> alterClientQuotas(Map<ClientQuotaEntity, Map<String,
Optional<Double>>> request, boolean validateOnly) {
List<ClientQuotaAlteration> entries = request.entrySet().stream().map(entry -> {
ClientQuotaEntity entity = entry.getKey();
Map<String, Optional<Double>> alter = entry.getValue();
List<ClientQuotaAlteration.Op> ops = alter.entrySet()
.stream()
.map(configEntry -> new ClientQuotaAlteration.Op(configEntry.getKey(),
configEntry.getValue().orElse(null)))
.toList();
return new ClientQuotaAlteration(entity, ops);
}).toList();
try (Admin admin = cluster.admin()) {
Map<ClientQuotaEntity, KafkaFuture<Void>> result = admin.alterClientQuotas(entries,
new AlterClientQuotasOptions().validateOnly(validateOnly)).values();
assertEquals(request.size(), result.size());
request.forEach((e, r) -> assertTrue(result.containsKey(e)));
return result;
}
}
}
|
ClientQuotasRequestTest
|
java
|
grpc__grpc-java
|
alts/src/test/java/io/grpc/alts/internal/AesGcmHkdfAeadCrypterTest.java
|
{
"start": 1070,
"end": 1587
}
|
class ____ {
final String comment;
final byte[] key;
final byte[] nonce;
final byte[] aad;
final byte[] plaintext;
final byte[] ciphertext;
TestVector(TestVectorBuilder builder) {
comment = builder.comment;
key = builder.key;
nonce = builder.nonce;
aad = builder.aad;
plaintext = builder.plaintext;
ciphertext = builder.ciphertext;
}
static TestVectorBuilder builder() {
return new TestVectorBuilder();
}
}
private static
|
TestVector
|
java
|
apache__flink
|
flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptions.java
|
{
"start": 35761,
"end": 35890
}
|
enum ____ {
InternalIP,
ExternalIP,
}
/** The container image pull policy. */
public
|
NodePortAddressType
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobExecutionITCase.java
|
{
"start": 1647,
"end": 4264
}
|
class ____ {
/**
* Tests that tasks with a co-location constraint are scheduled in the same slots. In fact it
* also tests that consumers are scheduled wrt their input location if the co-location
* constraint is deactivated.
*/
@Test
void testCoLocationConstraintJobExecution() throws Exception {
final int numSlotsPerTaskExecutor = 1;
final int numTaskExecutors = 3;
final int parallelism = numTaskExecutors * numSlotsPerTaskExecutor;
final JobGraph jobGraph = createJobGraph(parallelism);
final TestingMiniClusterConfiguration miniClusterConfiguration =
TestingMiniClusterConfiguration.newBuilder()
.setNumSlotsPerTaskManager(numSlotsPerTaskExecutor)
.setNumTaskManagers(numTaskExecutors)
.setLocalCommunication(true)
.build();
try (TestingMiniCluster miniCluster =
TestingMiniCluster.newBuilder(miniClusterConfiguration).build()) {
miniCluster.start();
miniCluster.submitJob(jobGraph).get();
final CompletableFuture<JobResult> jobResultFuture =
miniCluster.requestJobResult(jobGraph.getJobID());
assertThat(jobResultFuture.get().isSuccess()).isTrue();
}
}
private JobGraph createJobGraph(int parallelism) {
final JobVertex sender = new JobVertex("Sender");
sender.setParallelism(parallelism);
sender.setInvokableClass(TestingAbstractInvokables.Sender.class);
final JobVertex receiver = new JobVertex("Receiver");
receiver.setParallelism(parallelism);
receiver.setInvokableClass(TestingAbstractInvokables.Receiver.class);
// In order to make testCoLocationConstraintJobExecution fail, one needs to
// remove the co-location constraint and the slot sharing groups, because then
// the receivers will have to wait for the senders to finish and the slot
// assignment order to the receivers is non-deterministic (depending on the
// order in which the senders finish).
final SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
receiver.setSlotSharingGroup(slotSharingGroup);
sender.setSlotSharingGroup(slotSharingGroup);
receiver.setStrictlyCoLocatedWith(sender);
connectNewDataSetAsInput(
receiver, sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
return JobGraphTestUtils.streamingJobGraph(sender, receiver);
}
}
|
JobExecutionITCase
|
java
|
apache__flink
|
flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/rest/handler/statement/ExecuteStatementHandler.java
|
{
"start": 1904,
"end": 3618
}
|
class ____
extends AbstractSqlGatewayRestHandler<
ExecuteStatementRequestBody,
ExecuteStatementResponseBody,
SessionMessageParameters> {
public ExecuteStatementHandler(
SqlGatewayService service,
Map<String, String> responseHeaders,
MessageHeaders<
ExecuteStatementRequestBody,
ExecuteStatementResponseBody,
SessionMessageParameters>
messageHeaders) {
super(service, responseHeaders, messageHeaders);
}
@Override
protected CompletableFuture<ExecuteStatementResponseBody> handleRequest(
SqlGatewayRestAPIVersion version,
@Nonnull HandlerRequest<ExecuteStatementRequestBody> request) {
String statement = request.getRequestBody().getStatement();
Long timeout = request.getRequestBody().getTimeout();
timeout = timeout == null ? 0L : timeout;
SessionHandle sessionHandle = request.getPathParameter(SessionHandleIdPathParameter.class);
Map<String, String> executionConfigMap = request.getRequestBody().getExecutionConfig();
Configuration executionConfig =
executionConfigMap == null
? new Configuration()
: Configuration.fromMap(executionConfigMap);
OperationHandle operationHandle =
service.executeStatement(sessionHandle, statement, timeout, executionConfig);
return CompletableFuture.completedFuture(
new ExecuteStatementResponseBody(operationHandle.getIdentifier().toString()));
}
}
|
ExecuteStatementHandler
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/data/binary/BinaryStringDataUtil.java
|
{
"start": 2132,
"end": 46172
}
|
class ____ {
public static final BinaryStringData NULL_STRING = fromString("NULL");
public static final BinaryStringData TRUE_STRING = fromString("TRUE");
public static final BinaryStringData FALSE_STRING = fromString("FALSE");
public static final BinaryStringData[] EMPTY_STRING_ARRAY = new BinaryStringData[0];
private static final List<BinaryStringData> TRUE_STRINGS =
Stream.of("t", "true", "y", "yes", "1")
.map(BinaryStringData::fromString)
.peek(BinaryStringData::ensureMaterialized)
.collect(Collectors.toList());
private static final List<BinaryStringData> FALSE_STRINGS =
Stream.of("f", "false", "n", "no", "0")
.map(BinaryStringData::fromString)
.peek(BinaryStringData::ensureMaterialized)
.collect(Collectors.toList());
private static byte[] getTmpBytes(BinaryStringData str, int sizeInBytes) {
byte[] bytes = SegmentsUtil.allocateReuseBytes(sizeInBytes);
SegmentsUtil.copyToBytes(str.getSegments(), str.getOffset(), bytes, 0, sizeInBytes);
return bytes;
}
/**
* Splits the provided text into an array, separator string specified.
*
* <p>The separator is not included in the returned String array. Adjacent separators are
* treated as separators for empty tokens.
*
* <p>A {@code null} separator splits on whitespace.
*
* <pre>
* "".splitByWholeSeparatorPreserveAllTokens(*) = []
* "ab de fg".splitByWholeSeparatorPreserveAllTokens(null) = ["ab", "de", "fg"]
* "ab de fg".splitByWholeSeparatorPreserveAllTokens(null) = ["ab", "", "", "de", "fg"]
* "ab:cd:ef".splitByWholeSeparatorPreserveAllTokens(":") = ["ab", "cd", "ef"]
* "ab-!-cd-!-ef".splitByWholeSeparatorPreserveAllTokens("-!-") = ["ab", "cd", "ef"]
* </pre>
*
* <p>Note: returned binary strings reuse memory segments from the input str.
*
* @param separator String containing the String to be used as a delimiter, {@code null} splits
* on whitespace
* @return an array of parsed Strings, {@code null} if null String was input
*/
public static BinaryStringData[] splitByWholeSeparatorPreserveAllTokens(
BinaryStringData str, BinaryStringData separator) {
str.ensureMaterialized();
final int sizeInBytes = str.getSizeInBytes();
MemorySegment[] segments = str.getSegments();
int offset = str.getOffset();
if (sizeInBytes == 0) {
return EMPTY_STRING_ARRAY;
}
if (separator == null || EMPTY_UTF8.equals(separator)) {
// Split on whitespace.
return splitByWholeSeparatorPreserveAllTokens(str, fromString(" "));
}
separator.ensureMaterialized();
int sepSize = separator.getSizeInBytes();
MemorySegment[] sepSegs = separator.getSegments();
int sepOffset = separator.getOffset();
final ArrayList<BinaryStringData> substrings = new ArrayList<>();
int beg = 0;
int end = 0;
while (end < sizeInBytes) {
end =
SegmentsUtil.find(
segments,
offset + beg,
sizeInBytes - beg,
sepSegs,
sepOffset,
sepSize)
- offset;
if (end > -1) {
if (end > beg) {
// The following is OK, because String.substring( beg, end ) excludes
// the character at the position 'end'.
substrings.add(fromAddress(segments, offset + beg, end - beg));
// Set the starting point for the next search.
// The following is equivalent to beg = end + (separatorLength - 1) + 1,
// which is the right calculation:
beg = end + sepSize;
} else {
// We found a consecutive occurrence of the separator.
substrings.add(EMPTY_UTF8);
beg = end + sepSize;
}
} else {
// String.substring( beg ) goes from 'beg' to the end of the String.
substrings.add(fromAddress(segments, offset + beg, sizeInBytes - beg));
end = sizeInBytes;
}
}
return substrings.toArray(new BinaryStringData[0]);
}
/** Parse a {@link StringData} to boolean. */
public static boolean toBoolean(BinaryStringData str) throws TableException {
BinaryStringData lowerCase = str.toLowerCase();
if (TRUE_STRINGS.contains(lowerCase)) {
return true;
}
if (FALSE_STRINGS.contains(lowerCase)) {
return false;
}
throw new TableException("Cannot parse '" + str + "' as BOOLEAN.");
}
/** Calculate the hash value of the given bytes use {@link MessageDigest}. */
public static BinaryStringData hash(byte[] bytes, MessageDigest md) {
return fromString(EncodingUtils.hex(md.digest(bytes)));
}
/** Calculate the hash value of a given string use {@link MessageDigest}. */
public static BinaryStringData hash(BinaryStringData str, MessageDigest md) {
return hash(str.toBytes(), md);
}
public static BinaryStringData hash(BinaryStringData str, String algorithm)
throws NoSuchAlgorithmException {
return hash(str, MessageDigest.getInstance(algorithm));
}
/**
* Parses a {@link BinaryStringData} to {@link DecimalData}.
*
* @return DecimalData value if the parsing was successful.
*/
public static DecimalData toDecimal(BinaryStringData str, int precision, int scale)
throws NumberFormatException {
str.ensureMaterialized();
DecimalData data;
if (DecimalDataUtils.isByteArrayDecimal(precision)
|| DecimalDataUtils.isByteArrayDecimal(str.getSizeInBytes())) {
data = toBigPrecisionDecimal(str, precision, scale);
} else {
int sizeInBytes = str.getSizeInBytes();
data =
toDecimalFromBytes(
precision, scale, getTmpBytes(str, sizeInBytes), 0, sizeInBytes);
}
if (data == null) {
throw numberFormatExceptionFor(str, "Overflow.");
}
return data;
}
private static DecimalData toDecimalFromBytes(
int precision, int scale, byte[] bytes, int offset, int sizeInBytes) {
// Data in DecimalData is stored by one long value if `precision` <=
// DecimalData.MAX_LONG_DIGITS.
// In this case we can directly extract the value from memory segment.
int i = 0;
// Remove white spaces at the beginning
byte b = 0;
while (i < sizeInBytes) {
b = bytes[offset + i];
if (b != ' ' && b != '\n' && b != '\t') {
break;
}
i++;
}
if (i == sizeInBytes) {
// all whitespaces
return null;
}
// ======= begin significant part =======
final boolean negative = b == '-';
if (negative || b == '+') {
i++;
if (i == sizeInBytes) {
// only contains prefix plus/minus
return null;
}
}
long significand = 0;
int exp = 0;
int significandLen = 0, pointPos = -1;
while (i < sizeInBytes) {
b = bytes[offset + i];
i++;
if (b >= '0' && b <= '9') {
// No need to worry about overflow, because sizeInBytes <=
// DecimalData.MAX_LONG_DIGITS
significand = significand * 10 + (b - '0');
significandLen++;
} else if (b == '.') {
if (pointPos >= 0) {
// More than one decimal point
return null;
}
pointPos = significandLen;
} else {
break;
}
}
if (pointPos < 0) {
pointPos = significandLen;
}
if (negative) {
significand = -significand;
}
// ======= end significand part =======
// ======= begin exponential part =======
if ((b == 'e' || b == 'E') && i < sizeInBytes) {
b = bytes[offset + i];
final boolean expNegative = b == '-';
if (expNegative || b == '+') {
i++;
if (i == sizeInBytes) {
return null;
}
}
int expDigits = 0;
// As `precision` <= 18, value absolute range is limited to 10^-18 ~ 10^18.
// The worst case is <18-digits>E-36
final int expStopValue = 40;
while (i < sizeInBytes) {
b = bytes[offset + i];
i++;
if (b >= '0' && b <= '9') {
// No need to worry about larger exponents,
// because they will produce overflow or underflow
if (expDigits < expStopValue) {
expDigits = expDigits * 10 + (b - '0');
}
} else {
break;
}
}
if (expNegative) {
expDigits = -expDigits;
}
exp += expDigits;
}
exp -= significandLen - pointPos;
// ======= end exponential part =======
// Check for invalid character at the end
while (i < sizeInBytes) {
b = bytes[offset + i];
i++;
// White spaces are allowed at the end
if (b != ' ' && b != '\n' && b != '\t') {
return null;
}
}
// Round exp to scale
int change = exp + scale;
if (significandLen + change > precision) {
// Overflow
return null;
}
if (change >= 0) {
significand *= DecimalDataUtils.power10(change);
} else {
int k = negative ? -5 : 5;
significand =
(significand + k * DecimalDataUtils.power10(-change - 1))
/ DecimalDataUtils.power10(-change);
}
return DecimalData.fromUnscaledLong(significand, precision, scale);
}
private static DecimalData toBigPrecisionDecimal(
BinaryStringData str, int precision, int scale) {
// As data in DecimalData is currently stored by BigDecimal if `precision` >
// DecimalData.MAX_LONG_DIGITS,
// and BigDecimal only supports String or char[] for its constructor,
// we can't directly extract the value from BinaryStringData.
//
// As BigDecimal(char[], int, int) is faster than BigDecimal(String, int, int),
// we extract char[] from the memory segment and pass it to the constructor of BigDecimal.
int sizeInBytes = str.getSizeInBytes();
int offset = str.getOffset();
MemorySegment[] segments = str.getSegments();
char[] chars = SegmentsUtil.allocateReuseChars(sizeInBytes);
int len;
if (segments.length == 1) {
len = StringUtf8Utils.decodeUTF8Strict(segments[0], offset, sizeInBytes, chars);
} else {
byte[] bytes = SegmentsUtil.allocateReuseBytes(sizeInBytes);
SegmentsUtil.copyToBytes(segments, offset, bytes, 0, sizeInBytes);
len = StringUtf8Utils.decodeUTF8Strict(bytes, 0, sizeInBytes, chars);
}
if (len < 0) {
return null;
} else {
// Trim white spaces
int start = 0, end = len;
for (int i = 0; i < len; i++) {
if (chars[i] != ' ' && chars[i] != '\n' && chars[i] != '\t') {
start = i;
break;
}
}
for (int i = len - 1; i >= 0; i--) {
if (chars[i] != ' ' && chars[i] != '\n' && chars[i] != '\t') {
end = i + 1;
break;
}
}
BigDecimal bd = new BigDecimal(chars, start, end - start);
return DecimalData.fromBigDecimal(bd, precision, scale);
}
}
/**
* Parses this BinaryStringData to Long.
*
* <p>Note that, in this method we accumulate the result in negative format, and convert it to
* positive format at the end, if this string is not started with '-'. This is because min value
* is bigger than max value in digits, e.g. Long.MAX_VALUE is '9223372036854775807' and
* Long.MIN_VALUE is '-9223372036854775808'.
*
* <p>This code is mostly copied from LazyLong.parseLong in Hive.
*/
public static long toLong(BinaryStringData str) throws NumberFormatException {
int sizeInBytes = str.getSizeInBytes();
byte[] tmpBytes = getTmpBytes(str, sizeInBytes);
if (sizeInBytes == 0) {
throw numberFormatExceptionFor(str, "Input is empty.");
}
int i = 0;
byte b = tmpBytes[i];
final boolean negative = b == '-';
if (negative || b == '+') {
i++;
if (sizeInBytes == 1) {
throw numberFormatExceptionFor(str, "Input has only positive or negative symbol.");
}
}
long result = 0;
final byte separator = '.';
final int radix = 10;
final long stopValue = Long.MIN_VALUE / radix;
while (i < sizeInBytes) {
b = tmpBytes[i];
i++;
if (b == separator) {
// We allow decimals and will return a truncated integral in that case.
// Therefore we won't throw an exception here (checking the fractional
// part happens below.)
break;
}
int digit;
if (b >= '0' && b <= '9') {
digit = b - '0';
} else {
throw numberFormatExceptionFor(str, "Invalid character found.");
}
// We are going to process the new digit and accumulate the result. However, before
// doing this, if the result is already smaller than the
// stopValue(Long.MIN_VALUE / radix), then result * 10 will definitely be smaller
// than minValue, and we can stop.
if (result < stopValue) {
throw numberFormatExceptionFor(str, "Overflow.");
}
result = result * radix - digit;
// Since the previous result is less than or equal to
// stopValue(Long.MIN_VALUE / radix), we can just use `result > 0` to check overflow.
// If result overflows, we should stop.
if (result > 0) {
throw numberFormatExceptionFor(str, "Overflow.");
}
}
// This is the case when we've encountered a decimal separator. The fractional
// part will not change the number, but we will verify that the fractional part
// is well formed.
while (i < sizeInBytes) {
byte currentByte = tmpBytes[i];
if (currentByte < '0' || currentByte > '9') {
throw numberFormatExceptionFor(str, "Invalid character found.");
}
i++;
}
if (!negative) {
result = -result;
if (result < 0) {
throw numberFormatExceptionFor(str, "Overflow.");
}
}
return result;
}
/**
* Parses this BinaryStringData to Int.
*
* <p>Note that, in this method we accumulate the result in negative format, and convert it to
* positive format at the end, if this string is not started with '-'. This is because min value
* is bigger than max value in digits, e.g. Integer.MAX_VALUE is '2147483647' and
* Integer.MIN_VALUE is '-2147483648'.
*
* <p>This code is mostly copied from LazyInt.parseInt in Hive.
*
* <p>Note that, this method is almost same as `toLong`, but we leave it duplicated for
* performance reasons, like Hive does.
*/
public static int toInt(BinaryStringData str) throws NumberFormatException {
int sizeInBytes = str.getSizeInBytes();
byte[] tmpBytes = getTmpBytes(str, sizeInBytes);
if (sizeInBytes == 0) {
throw numberFormatExceptionFor(str, "Input is empty.");
}
int i = 0;
byte b = tmpBytes[i];
final boolean negative = b == '-';
if (negative || b == '+') {
i++;
if (sizeInBytes == 1) {
throw numberFormatExceptionFor(str, "Input has only positive or negative symbol.");
}
}
int result = 0;
final byte separator = '.';
final int radix = 10;
final long stopValue = Integer.MIN_VALUE / radix;
while (i < sizeInBytes) {
b = tmpBytes[i];
i++;
if (b == separator) {
// We allow decimals and will return a truncated integral in that case.
// Therefore we won't throw an exception here (checking the fractional
// part happens below.)
break;
}
int digit;
if (b >= '0' && b <= '9') {
digit = b - '0';
} else {
throw numberFormatExceptionFor(str, "Invalid character found.");
}
// We are going to process the new digit and accumulate the result. However, before
// doing this, if the result is already smaller than the
// stopValue(Long.MIN_VALUE / radix), then result * 10 will definitely be smaller
// than minValue, and we can stop.
if (result < stopValue) {
throw numberFormatExceptionFor(str, "Overflow.");
}
result = result * radix - digit;
// Since the previous result is less than or equal to
// stopValue(Long.MIN_VALUE / radix), we can just use `result > 0` to check overflow.
// If result overflows, we should stop.
if (result > 0) {
throw numberFormatExceptionFor(str, "Overflow.");
}
}
// This is the case when we've encountered a decimal separator. The fractional
// part will not change the number, but we will verify that the fractional part
// is well formed.
while (i < sizeInBytes) {
byte currentByte = tmpBytes[i];
if (currentByte < '0' || currentByte > '9') {
throw numberFormatExceptionFor(str, "Invalid character found.");
}
i++;
}
if (!negative) {
result = -result;
if (result < 0) {
throw numberFormatExceptionFor(str, "Overflow.");
}
}
return result;
}
public static short toShort(BinaryStringData str) throws NumberFormatException {
int intValue = toInt(str);
short result = (short) intValue;
if (result == intValue) {
return result;
}
throw numberFormatExceptionFor(str, "Overflow.");
}
public static byte toByte(BinaryStringData str) throws NumberFormatException {
int intValue = toInt(str);
byte result = (byte) intValue;
if (result == intValue) {
return result;
}
throw numberFormatExceptionFor(str, "Overflow.");
}
public static double toDouble(BinaryStringData str) throws NumberFormatException {
return Double.parseDouble(str.toString());
}
public static float toFloat(BinaryStringData str) throws NumberFormatException {
return Float.parseFloat(str.toString());
}
private static NumberFormatException numberFormatExceptionFor(StringData input, String reason) {
return new NumberFormatException("For input string: '" + input + "'. " + reason);
}
public static int toDate(BinaryStringData input) throws DateTimeException {
Integer date = DateTimeUtils.parseDate(input.toString());
if (date == null) {
throw new DateTimeException("For input string: '" + input + "'.");
}
return date;
}
public static int toTime(BinaryStringData input, int precision) throws DateTimeException {
Integer milliSeconds = DateTimeUtils.parseTime(input.toString());
if (milliSeconds == null) {
throw new DateTimeException(
"Invalid time format: '"
+ input
+ "'. "
+ "Expected format: HH:mm:ss[.fff] where HH is 00-23, mm is 00-59, ss is 00-59");
}
return DateTimeUtils.applyTimePrecisionTruncation(milliSeconds, precision);
}
/** Used by {@code CAST(x as TIMESTAMP)}. */
public static TimestampData toTimestamp(BinaryStringData input, int precision)
throws DateTimeException {
return DateTimeUtils.parseTimestampData(input.toString(), precision);
}
/** Used by {@code CAST(x as TIMESTAMP_LTZ)}. */
public static TimestampData toTimestamp(
BinaryStringData input, int precision, TimeZone timeZone) throws DateTimeException {
return DateTimeUtils.parseTimestampData(input.toString(), precision, timeZone);
}
/**
* Parse target string as key-value string and return the value matches key name. If accept any
* null arguments, return null. example: keyvalue('k1=v1;k2=v2', ';', '=', 'k2') = 'v2'
* keyvalue('k1:v1,k2:v2', ',', ':', 'k3') = NULL
*
* @param split1 separator between key-value tuple.
* @param split2 separator between key and value.
* @param keyName name of the key whose value you want return.
* @return target value.
*/
public static BinaryStringData keyValue(
BinaryStringData str, byte split1, byte split2, BinaryStringData keyName) {
str.ensureMaterialized();
if (keyName == null || keyName.getSizeInBytes() == 0) {
return null;
}
if (str.inFirstSegment() && keyName.inFirstSegment()) {
// position in byte
int byteIdx = 0;
// position of last split1
int lastSplit1Idx = -1;
while (byteIdx < str.getSizeInBytes()) {
// If find next split1 in str, process current kv
if (str.getSegments()[0].get(str.getOffset() + byteIdx) == split1) {
int currentKeyIdx = lastSplit1Idx + 1;
// If key of current kv is keyName, return the value directly
BinaryStringData value =
findValueOfKey(str, split2, keyName, currentKeyIdx, byteIdx);
if (value != null) {
return value;
}
lastSplit1Idx = byteIdx;
}
byteIdx++;
}
// process the string which is not ends with split1
int currentKeyIdx = lastSplit1Idx + 1;
return findValueOfKey(str, split2, keyName, currentKeyIdx, str.getSizeInBytes());
} else {
return keyValueSlow(str, split1, split2, keyName);
}
}
private static BinaryStringData findValueOfKey(
BinaryStringData str, byte split, BinaryStringData keyName, int start, int end) {
int keyNameLen = keyName.getSizeInBytes();
for (int idx = start; idx < end; idx++) {
if (str.getSegments()[0].get(str.getOffset() + idx) == split) {
if (idx == start + keyNameLen
&& str.getSegments()[0].equalTo(
keyName.getSegments()[0],
str.getOffset() + start,
keyName.getOffset(),
keyNameLen)) {
int valueIdx = idx + 1;
int valueLen = end - valueIdx;
byte[] bytes = new byte[valueLen];
str.getSegments()[0].get(str.getOffset() + valueIdx, bytes, 0, valueLen);
return fromBytes(bytes, 0, valueLen);
} else {
return null;
}
}
}
return null;
}
private static BinaryStringData keyValueSlow(
BinaryStringData str, byte split1, byte split2, BinaryStringData keyName) {
// position in byte
int byteIdx = 0;
// position of last split1
int lastSplit1Idx = -1;
while (byteIdx < str.getSizeInBytes()) {
// If find next split1 in str, process current kv
if (str.byteAt(byteIdx) == split1) {
int currentKeyIdx = lastSplit1Idx + 1;
BinaryStringData value =
findValueOfKeySlow(str, split2, keyName, currentKeyIdx, byteIdx);
if (value != null) {
return value;
}
lastSplit1Idx = byteIdx;
}
byteIdx++;
}
int currentKeyIdx = lastSplit1Idx + 1;
return findValueOfKeySlow(str, split2, keyName, currentKeyIdx, str.getSizeInBytes());
}
private static BinaryStringData findValueOfKeySlow(
BinaryStringData str, byte split, BinaryStringData keyName, int start, int end) {
int keyNameLen = keyName.getSizeInBytes();
for (int idx = start; idx < end; idx++) {
if (str.byteAt(idx) == split) {
if (idx == start + keyNameLen
&& SegmentsUtil.equals(
str.getSegments(),
str.getOffset() + start,
keyName.getSegments(),
keyName.getOffset(),
keyNameLen)) {
int valueIdx = idx + 1;
byte[] bytes =
SegmentsUtil.copyToBytes(
str.getSegments(), str.getOffset() + valueIdx, end - valueIdx);
return fromBytes(bytes);
} else {
return null;
}
}
}
return null;
}
public static BinaryStringData substringSQL(BinaryStringData str, int pos) {
return substringSQL(str, pos, Integer.MAX_VALUE);
}
public static BinaryStringData substringSQL(BinaryStringData str, int pos, int length) {
if (length < 0) {
return null;
}
str.ensureMaterialized();
if (str.equals(EMPTY_UTF8)) {
return EMPTY_UTF8;
}
int start;
int end;
int numChars = str.numChars();
if (pos > 0) {
start = pos - 1;
if (start >= numChars) {
return EMPTY_UTF8;
}
} else if (pos < 0) {
start = numChars + pos;
if (start < 0) {
return EMPTY_UTF8;
}
} else {
start = 0;
}
if ((numChars - start) < length) {
end = numChars;
} else {
end = start + length;
}
return str.substring(start, end);
}
/**
* Concatenates input strings together into a single string. Returns NULL if any argument is
* NULL.
*/
public static BinaryStringData concat(BinaryStringData... inputs) {
return concat(Arrays.asList(inputs));
}
public static BinaryStringData concat(Iterable<BinaryStringData> inputs) {
// Compute the total length of the result.
int totalLength = 0;
for (BinaryStringData input : inputs) {
if (input == null) {
return null;
}
input.ensureMaterialized();
totalLength += input.getSizeInBytes();
}
// Allocate a new byte array, and copy the inputs one by one into it.
final byte[] result = new byte[totalLength];
int offset = 0;
for (BinaryStringData input : inputs) {
if (input != null) {
int len = input.getSizeInBytes();
SegmentsUtil.copyToBytes(
input.getSegments(), input.getOffset(), result, offset, len);
offset += len;
}
}
return fromBytes(result);
}
/**
* Concatenates input strings together into a single string using the separator. Returns NULL If
* the separator is NULL.
*
* <p>Note: CONCAT_WS() does not skip any empty strings, however it does skip any NULL values
* after the separator. For example, concat_ws(",", "a", null, "c") would yield "a,c".
*/
public static BinaryStringData concatWs(
BinaryStringData separator, BinaryStringData... inputs) {
return concatWs(separator, Arrays.asList(inputs));
}
public static BinaryStringData concatWs(
BinaryStringData separator, Iterable<BinaryStringData> inputs) {
if (null == separator) {
return null;
}
separator.ensureMaterialized();
int numInputBytes = 0; // total number of bytes from the inputs
int numInputs = 0; // number of non-null inputs
for (BinaryStringData input : inputs) {
if (input != null) {
input.ensureMaterialized();
numInputBytes += input.getSizeInBytes();
numInputs++;
}
}
if (numInputs == 0) {
// Return an empty string if there is no input, or all the inputs are null.
return EMPTY_UTF8;
}
// Allocate a new byte array, and copy the inputs one by one into it.
// The size of the new array is the size of all inputs, plus the separators.
final byte[] result =
new byte[numInputBytes + (numInputs - 1) * separator.getSizeInBytes()];
int offset = 0;
int j = 0;
for (BinaryStringData input : inputs) {
if (input != null) {
int len = input.getSizeInBytes();
SegmentsUtil.copyToBytes(
input.getSegments(), input.getOffset(), result, offset, len);
offset += len;
j++;
// Add separator if this is not the last input.
if (j < numInputs) {
SegmentsUtil.copyToBytes(
separator.getSegments(),
separator.getOffset(),
result,
offset,
separator.getSizeInBytes());
offset += separator.getSizeInBytes();
}
}
}
return fromBytes(result);
}
/**
* Reverse each character in current string.
*
* @return a new string which character order is reverse to current string.
*/
public static BinaryStringData reverse(BinaryStringData str) {
str.ensureMaterialized();
if (str.inFirstSegment()) {
byte[] result = new byte[str.getSizeInBytes()];
// position in byte
int byteIdx = 0;
while (byteIdx < str.getSizeInBytes()) {
int charBytes = numBytesForFirstByte(str.getByteOneSegment(byteIdx));
str.getSegments()[0].get(
str.getOffset() + byteIdx,
result,
result.length - byteIdx - charBytes,
charBytes);
byteIdx += charBytes;
}
return BinaryStringData.fromBytes(result);
} else {
return reverseMultiSegs(str);
}
}
private static BinaryStringData reverseMultiSegs(BinaryStringData str) {
byte[] result = new byte[str.getSizeInBytes()];
// position in byte
int byteIdx = 0;
int segSize = str.getSegments()[0].size();
BinaryStringData.SegmentAndOffset index = str.firstSegmentAndOffset(segSize);
while (byteIdx < str.getSizeInBytes()) {
int charBytes = numBytesForFirstByte(index.value());
SegmentsUtil.copyMultiSegmentsToBytes(
str.getSegments(),
str.getOffset() + byteIdx,
result,
result.length - byteIdx - charBytes,
charBytes);
byteIdx += charBytes;
index.skipBytes(charBytes, segSize);
}
return BinaryStringData.fromBytes(result);
}
/**
* Walk each character of current string from both ends, remove the character if it is in trim
* string. Return the new substring which both ends trim characters have been removed.
*
* @param trimStr the trim string
* @return A subString which both ends trim characters have been removed.
*/
public static BinaryStringData trim(BinaryStringData str, BinaryStringData trimStr) {
if (trimStr == null) {
return null;
}
return trimRight(trimLeft(str, trimStr), trimStr);
}
public static BinaryStringData trimLeft(BinaryStringData str) {
str.ensureMaterialized();
if (str.inFirstSegment()) {
int s = 0;
// skip all of the space (0x20) in the left side
while (s < str.getSizeInBytes() && str.getByteOneSegment(s) == 0x20) {
s++;
}
if (s == str.getSizeInBytes()) {
// empty string
return EMPTY_UTF8;
} else {
return str.copyBinaryStringInOneSeg(s, str.getSizeInBytes() - s);
}
} else {
return trimLeftSlow(str);
}
}
private static BinaryStringData trimLeftSlow(BinaryStringData str) {
int s = 0;
int segSize = str.getSegments()[0].size();
BinaryStringData.SegmentAndOffset front = str.firstSegmentAndOffset(segSize);
// skip all of the space (0x20) in the left side
while (s < str.getSizeInBytes() && front.value() == 0x20) {
s++;
front.nextByte(segSize);
}
if (s == str.getSizeInBytes()) {
// empty string
return EMPTY_UTF8;
} else {
return str.copyBinaryString(s, str.getSizeInBytes() - 1);
}
}
public static boolean isEmpty(BinaryStringData str) {
// check javaObject or binarySection directly rather than call
// BinaryStringData#getSizeInBytes to avoid performance loss caused by materialization
if (str.javaObject != null) {
return str.javaObject.isEmpty();
} else {
return str.binarySection == null || str.binarySection.getSizeInBytes() == 0;
}
}
public static boolean isSpaceString(BinaryStringData str) {
if (str.javaObject != null) {
return str.javaObject.equals(" ");
} else {
return str.byteAt(0) == ' ';
}
}
/**
* Walk each character of current string from left end, remove the character if it is in trim
* string. Stops at the first character which is not in trim string. Return the new substring.
*
* @param trimStr the trim string
* @return A subString which removes all of the character from the left side that is in trim
* string.
*/
public static BinaryStringData trimLeft(BinaryStringData str, BinaryStringData trimStr) {
str.ensureMaterialized();
if (trimStr == null) {
return null;
}
trimStr.ensureMaterialized();
if (isSpaceString(trimStr)) {
return trimLeft(str);
}
if (str.inFirstSegment()) {
int searchIdx = 0;
while (searchIdx < str.getSizeInBytes()) {
int charBytes = numBytesForFirstByte(str.getByteOneSegment(searchIdx));
BinaryStringData currentChar = str.copyBinaryStringInOneSeg(searchIdx, charBytes);
// try to find the matching for the character in the trimString characters.
if (trimStr.contains(currentChar)) {
searchIdx += charBytes;
} else {
break;
}
}
// empty string
if (searchIdx >= str.getSizeInBytes()) {
return EMPTY_UTF8;
} else {
return str.copyBinaryStringInOneSeg(searchIdx, str.getSizeInBytes() - searchIdx);
}
} else {
return trimLeftSlow(str, trimStr);
}
}
private static BinaryStringData trimLeftSlow(BinaryStringData str, BinaryStringData trimStr) {
int searchIdx = 0;
int segSize = str.getSegments()[0].size();
BinaryStringData.SegmentAndOffset front = str.firstSegmentAndOffset(segSize);
while (searchIdx < str.getSizeInBytes()) {
int charBytes = numBytesForFirstByte(front.value());
BinaryStringData currentChar =
str.copyBinaryString(searchIdx, searchIdx + charBytes - 1);
if (trimStr.contains(currentChar)) {
searchIdx += charBytes;
front.skipBytes(charBytes, segSize);
} else {
break;
}
}
if (searchIdx == str.getSizeInBytes()) {
// empty string
return EMPTY_UTF8;
} else {
return str.copyBinaryString(searchIdx, str.getSizeInBytes() - 1);
}
}
public static BinaryStringData trimRight(BinaryStringData str) {
str.ensureMaterialized();
if (str.inFirstSegment()) {
int e = str.getSizeInBytes() - 1;
// skip all of the space (0x20) in the right side
while (e >= 0 && str.getByteOneSegment(e) == 0x20) {
e--;
}
if (e < 0) {
// empty string
return EMPTY_UTF8;
} else {
return str.copyBinaryStringInOneSeg(0, e + 1);
}
} else {
return trimRightSlow(str);
}
}
private static BinaryStringData trimRightSlow(BinaryStringData str) {
int e = str.getSizeInBytes() - 1;
int segSize = str.getSegments()[0].size();
BinaryStringData.SegmentAndOffset behind = str.lastSegmentAndOffset(segSize);
// skip all of the space (0x20) in the right side
while (e >= 0 && behind.value() == 0x20) {
e--;
behind.previousByte(segSize);
}
if (e < 0) {
// empty string
return EMPTY_UTF8;
} else {
return str.copyBinaryString(0, e);
}
}
/**
* Walk each character of current string from right end, remove the character if it is in trim
* string. Stops at the first character which is not in trim string. Return the new substring.
*
* @param trimStr the trim string
* @return A subString which removes all of the character from the right side that is in trim
* string.
*/
public static BinaryStringData trimRight(BinaryStringData str, BinaryStringData trimStr) {
str.ensureMaterialized();
if (trimStr == null) {
return null;
}
trimStr.ensureMaterialized();
if (isSpaceString(trimStr)) {
return trimRight(str);
}
if (str.inFirstSegment()) {
int charIdx = 0;
int byteIdx = 0;
// each element in charLens is length of character in the source string
int[] charLens = new int[str.getSizeInBytes()];
// each element in charStartPos is start position of first byte in the source string
int[] charStartPos = new int[str.getSizeInBytes()];
while (byteIdx < str.getSizeInBytes()) {
charStartPos[charIdx] = byteIdx;
charLens[charIdx] = numBytesForFirstByte(str.getByteOneSegment(byteIdx));
byteIdx += charLens[charIdx];
charIdx++;
}
// searchIdx points to the first character which is not in trim string from the right
// end.
int searchIdx = str.getSizeInBytes() - 1;
charIdx -= 1;
while (charIdx >= 0) {
BinaryStringData currentChar =
str.copyBinaryStringInOneSeg(charStartPos[charIdx], charLens[charIdx]);
if (trimStr.contains(currentChar)) {
searchIdx -= charLens[charIdx];
} else {
break;
}
charIdx--;
}
if (searchIdx < 0) {
// empty string
return EMPTY_UTF8;
} else {
return str.copyBinaryStringInOneSeg(0, searchIdx + 1);
}
} else {
return trimRightSlow(str, trimStr);
}
}
private static BinaryStringData trimRightSlow(BinaryStringData str, BinaryStringData trimStr) {
int charIdx = 0;
int byteIdx = 0;
int segSize = str.getSegments()[0].size();
BinaryStringData.SegmentAndOffset index = str.firstSegmentAndOffset(segSize);
// each element in charLens is length of character in the source string
int[] charLens = new int[str.getSizeInBytes()];
// each element in charStartPos is start position of first byte in the source string
int[] charStartPos = new int[str.getSizeInBytes()];
while (byteIdx < str.getSizeInBytes()) {
charStartPos[charIdx] = byteIdx;
int charBytes = numBytesForFirstByte(index.value());
charLens[charIdx] = charBytes;
byteIdx += charBytes;
charIdx++;
index.skipBytes(charBytes, segSize);
}
// searchIdx points to the first character which is not in trim string from the right
// end.
int searchIdx = str.getSizeInBytes() - 1;
charIdx -= 1;
while (charIdx >= 0) {
BinaryStringData currentChar =
str.copyBinaryString(
charStartPos[charIdx], charStartPos[charIdx] + charLens[charIdx] - 1);
if (trimStr.contains(currentChar)) {
searchIdx -= charLens[charIdx];
} else {
break;
}
charIdx--;
}
if (searchIdx < 0) {
// empty string
return EMPTY_UTF8;
} else {
return str.copyBinaryString(0, searchIdx);
}
}
public static BinaryStringData trim(
BinaryStringData str, boolean leading, boolean trailing, BinaryStringData seek) {
str.ensureMaterialized();
if (seek == null) {
return null;
}
if (leading && trailing) {
return trim(str, seek);
} else if (leading) {
return trimLeft(str, seek);
} else if (trailing) {
return trimRight(str, seek);
} else {
return str;
}
}
public static String safeToString(BinaryStringData str) {
if (str == null) {
return null;
} else {
return str.toString();
}
}
}
|
BinaryStringDataUtil
|
java
|
apache__camel
|
components/camel-xchange/src/main/java/org/apache/camel/component/xchange/XChangeMarketDataProducer.java
|
{
"start": 1206,
"end": 2133
}
|
class ____ extends DefaultProducer {
public XChangeMarketDataProducer(XChangeEndpoint endpoint) {
super(endpoint);
}
@Override
public XChangeEndpoint getEndpoint() {
return (XChangeEndpoint) super.getEndpoint();
}
@Override
public void process(Exchange exchange) throws Exception {
XChangeEndpoint endpoint = getEndpoint();
XChangeMethod method = endpoint.getConfiguration().getMethod();
if (XChangeMethod.ticker == method) {
CurrencyPair pair = exchange.getIn().getHeader(HEADER_CURRENCY_PAIR, CurrencyPair.class);
pair = pair != null ? pair : exchange.getMessage().getBody(CurrencyPair.class);
pair = pair != null ? pair : endpoint.getConfiguration().getAsCurrencyPair();
Ticker ticker = endpoint.getTicker(pair);
exchange.getMessage().setBody(ticker);
}
}
}
|
XChangeMarketDataProducer
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/repository/SchemaRepository.java
|
{
"start": 27171,
"end": 28974
}
|
class ____ extends SQLServerASTVisitorAdapter {
public SqlServerConsoleSchemaVisitor() {
this.dbType = DbType.sqlserver;
}
public boolean visit(SQLDropSequenceStatement x) {
acceptDropSequence(x);
return false;
}
public boolean visit(SQLCreateSequenceStatement x) {
acceptCreateSequence(x);
return false;
}
public boolean visit(OracleCreateTableStatement x) {
visit((SQLCreateTableStatement) x);
return false;
}
public boolean visit(SQLCreateTableStatement x) {
acceptCreateTable(x);
return false;
}
public boolean visit(SQLDropTableStatement x) {
acceptDropTable(x);
return false;
}
public boolean visit(SQLCreateViewStatement x) {
acceptView(x);
return false;
}
public boolean visit(SQLAlterViewStatement x) {
acceptView(x);
return false;
}
public boolean visit(SQLCreateIndexStatement x) {
acceptCreateIndex(x);
return false;
}
public boolean visit(SQLCreateFunctionStatement x) {
acceptCreateFunction(x);
return false;
}
public boolean visit(SQLAlterTableStatement x) {
acceptAlterTable(x);
return false;
}
public boolean visit(SQLUseStatement x) {
String schema = x.getDatabase().getSimpleName();
setDefaultSchema(schema);
return false;
}
public boolean visit(SQLDropIndexStatement x) {
acceptDropIndex(x);
return false;
}
}
public
|
SqlServerConsoleSchemaVisitor
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/annotation/AnnotationBeanNameGenerator.java
|
{
"start": 3351,
"end": 5402
}
|
class ____ implements BeanNameGenerator {
/**
* A convenient constant for a default {@code AnnotationBeanNameGenerator} instance,
* as used for component scanning purposes.
* @since 5.2
*/
public static final AnnotationBeanNameGenerator INSTANCE = new AnnotationBeanNameGenerator();
private static final String COMPONENT_ANNOTATION_CLASSNAME = "org.springframework.stereotype.Component";
private static final Adapt[] ADAPTATIONS = Adapt.values(false, true);
private static final Log logger = LogFactory.getLog(AnnotationBeanNameGenerator.class);
/**
* Set used to track which stereotype annotations have already been checked
* to see if they use a convention-based override for the {@code value}
* attribute in {@code @Component}.
* @since 6.1
* @see #determineBeanNameFromAnnotation(AnnotatedBeanDefinition)
*/
private static final Set<String> conventionBasedStereotypeCheckCache = ConcurrentHashMap.newKeySet();
private final Map<String, Set<String>> metaAnnotationTypesCache = new ConcurrentHashMap<>();
@Override
public String generateBeanName(BeanDefinition definition, BeanDefinitionRegistry registry) {
if (definition instanceof AnnotatedBeanDefinition annotatedBeanDefinition) {
String beanName = determineBeanNameFromAnnotation(annotatedBeanDefinition);
if (StringUtils.hasText(beanName)) {
// Explicit bean name found.
return beanName;
}
}
// Fallback: generate a unique default bean name.
return buildDefaultBeanName(definition, registry);
}
/**
* Derive a bean name from one of the annotations on the class.
* @param annotatedDef the annotation-aware bean definition
* @return the bean name, or {@code null} if none is found
*/
protected @Nullable String determineBeanNameFromAnnotation(AnnotatedBeanDefinition annotatedDef) {
AnnotationMetadata metadata = annotatedDef.getMetadata();
String beanName = getExplicitBeanName(metadata);
if (beanName != null) {
return beanName;
}
// List of annotations directly present on the
|
AnnotationBeanNameGenerator
|
java
|
elastic__elasticsearch
|
x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java
|
{
"start": 4096,
"end": 21645
}
|
class ____ extends ESSingleNodeTestCase {
@Override
protected Settings nodeSettings() {
return Settings.builder()
.put(super.nodeSettings())
.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false)
.put("xpack.monitoring.collection.interval", MonitoringService.MIN_INTERVAL)
.put("xpack.monitoring.exporters._local.type", "local")
.put("xpack.monitoring.exporters._local.enabled", false)
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", false)
.build();
}
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return List.of(
LocalStateMonitoring.class,
MockIngestPlugin.class,
CommonAnalysisPlugin.class,
MapperExtrasPlugin.class,
Wildcard.class
);
}
private String createBulkEntity() {
return """
{"index":{"_type":"monitoring_data_type"}}
{"foo":{"bar":0}}
{"index":{"_type":"monitoring_data_type"}}
{"foo":{"bar":1}}
{"index":{"_type":"monitoring_data_type"}}
{"foo":{"bar":2}}
""";
}
/**
* Monitoring Bulk test:
*
* This test uses the Monitoring Bulk Request to index documents. It then ensure that the documents were correctly
* indexed and have the expected information. REST API tests (like how this is really called) are handled as part of the
* XPackRest tests.
*/
public void testMonitoringBulk() throws Exception {
whenExportersAreReady(() -> {
final MonitoredSystem system = randomSystem();
final TimeValue interval = TimeValue.timeValueSeconds(randomIntBetween(1, 20));
final MonitoringBulkResponse bulkResponse = new MonitoringBulkRequestBuilder(client()).add(
system,
new BytesArray(createBulkEntity().getBytes(StandardCharsets.UTF_8)),
XContentType.JSON,
System.currentTimeMillis(),
interval.millis()
).get();
assertThat(bulkResponse.status(), is(RestStatus.OK));
assertThat(bulkResponse.getError(), nullValue());
final String monitoringIndex = ".monitoring-" + system.getSystem() + "-" + TEMPLATE_VERSION + "-*";
// Wait for the monitoring index to be created
assertBusy(() -> {
// Monitoring uses auto_expand_replicas, so it should be green even without replicas
ensureGreen(monitoringIndex);
assertThat(client().admin().indices().prepareRefresh(monitoringIndex).get().getStatus(), is(RestStatus.OK));
assertResponse(client().prepareSearch(".monitoring-" + system.getSystem() + "-" + TEMPLATE_VERSION + "-*"), response -> {
// exactly 3 results are expected
assertThat("No monitoring documents yet", response.getHits().getTotalHits().value(), equalTo(3L));
final List<Map<String, Object>> sources = Arrays.stream(response.getHits().getHits())
.map(SearchHit::getSourceAsMap)
.collect(Collectors.toList());
// find distinct _source.timestamp fields
assertThat(sources.stream().map(source -> source.get("timestamp")).distinct().count(), is(1L));
// find distinct _source.source_node fields (which is a map)
assertThat(sources.stream().map(source -> source.get("source_node")).distinct().count(), is(1L));
});
});
assertCheckedResponse(client().prepareSearch(monitoringIndex), response -> {
final SearchHits hits = response.getHits();
assertThat(response.getHits().getTotalHits().value(), equalTo(3L));
Map<String, Object> sourceHit = hits.getHits()[0].getSourceAsMap();
Object ts = extractValue("timestamp", sourceHit);
Object sn_ts = extractValue("source_node.timestamp", sourceHit);
for (int i = 1; i < hits.getHits().length; i++) {
sourceHit = hits.getHits()[i].getSourceAsMap();
assertThat("Monitoring documents must have the same timestamp", extractValue("timestamp", sourceHit), equalTo(ts));
assertThat(
"Monitoring documents must have the same source_node timestamp",
extractValue("source_node.timestamp", sourceHit),
equalTo(sn_ts)
);
}
for (final SearchHit hit : hits.getHits()) {
assertMonitoringDoc(toMap(hit), system, interval);
}
});
});
}
/**
* Monitoring Service test:
*
* This test waits for the monitoring service to collect monitoring documents and then checks that all expected documents
* have been indexed with the expected information.
*/
public void testMonitoringService() throws Exception {
final boolean createAPMIndex = randomBoolean();
final String indexName = createAPMIndex ? "apm-2017.11.06" : "books";
assertThat(
prepareIndex(indexName).setId("0")
.setRefreshPolicy("true")
.setSource("{\"field\":\"value\"}", XContentType.JSON)
.get()
.status(),
is(RestStatus.CREATED)
);
final Settings settings = Settings.builder().put("cluster.metadata.display_name", "my cluster").build();
assertAcked(clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(settings));
whenExportersAreReady(() -> {
assertBusy(() -> {
assertCheckedResponse(
client().prepareSearch(".monitoring-es-*")
.setCollapse(new CollapseBuilder("type"))
.addSort("timestamp", SortOrder.DESC),
response -> {
assertThat(response.status(), is(RestStatus.OK));
assertThat(
"Expecting a minimum number of 6 docs, one per collector",
response.getHits().getHits().length,
greaterThanOrEqualTo(6)
);
for (final SearchHit hit : response.getHits()) {
final Map<String, Object> searchHit = toMap(hit);
assertMonitoringDoc(searchHit, MonitoredSystem.ES, MonitoringService.MIN_INTERVAL);
}
}
);
});
});
}
/**
* Asserts that the monitoring document (provided as a Map) contains the common information that
* all monitoring documents must have
*/
@SuppressWarnings("unchecked")
private void assertMonitoringDoc(final Map<String, Object> document, final MonitoredSystem expectedSystem, final TimeValue interval) {
assertEquals(document.toString(), 3, document.size());
final String index = (String) document.get("_index");
assertThat(index, containsString(".monitoring-" + expectedSystem.getSystem() + "-" + TEMPLATE_VERSION + "-"));
assertThat((String) document.get("_id"), is(not(emptyOrNullString())));
final Map<String, Object> source = (Map<String, Object>) document.get("_source");
assertThat(source, notNullValue());
assertThat((String) source.get("cluster_uuid"), is(not(emptyOrNullString())));
final String timestamp = (String) source.get("timestamp");
assertThat(timestamp, is(not(emptyOrNullString())));
assertThat(((Number) source.get("interval_ms")).longValue(), equalTo(interval.getMillis()));
DateFormatter formatter = DateFormatter.forPattern("yyyy.MM.dd");
long isoTimestamp = Instant.from(DateFormatter.forPattern("strict_date_time").parse(timestamp)).toEpochMilli();
String isoDateTime = MonitoringTemplateUtils.indexName(formatter.withZone(ZoneOffset.UTC), expectedSystem, isoTimestamp);
assertThat(index, equalTo(isoDateTime));
final Map<String, Object> sourceNode = (Map<String, Object>) source.get("source_node");
if (sourceNode != null) {
assertMonitoringDocSourceNode(sourceNode);
}
}
/**
* Asserts that the source_node information (provided as a Map) of a monitoring document correspond to
* the current local node information
*/
private void assertMonitoringDocSourceNode(final Map<String, Object> sourceNode) {
assertEquals(6, sourceNode.size());
final NodesInfoResponse nodesResponse = clusterAdmin().prepareNodesInfo().clear().get();
assertEquals(1, nodesResponse.getNodes().size());
final DiscoveryNode node = nodesResponse.getNodes().stream().findFirst().get().getNode();
assertThat(sourceNode.get("uuid"), equalTo(node.getId()));
assertThat(sourceNode.get("host"), equalTo(node.getHostName()));
assertThat(sourceNode.get("transport_address"), equalTo(node.getAddress().toString()));
assertThat(sourceNode.get("ip"), equalTo(node.getAddress().getAddress()));
assertThat(sourceNode.get("name"), equalTo(node.getName()));
assertThat((String) sourceNode.get("timestamp"), is(not(emptyOrNullString())));
}
/**
* Executes the given {@link Runnable} once the monitoring exporters are ready and functional. Ensure that
* the exporters and the monitoring service are shut down after the runnable has been executed.
*/
private void whenExportersAreReady(final CheckedRunnable<Exception> runnable) throws Exception {
try {
try {
enableMonitoring();
} catch (AssertionError e) {
// Added to debug https://github.com/elastic/elasticsearch/issues/29880
// Remove when fixed
StringBuilder b = new StringBuilder();
b.append("\n==== jstack at monitoring enablement failure time ====\n");
for (ThreadInfo ti : ManagementFactory.getThreadMXBean().dumpAllThreads(true, true)) {
append(b, ti);
}
b.append("^^==============================================\n");
logger.info(b.toString());
throw e;
}
runnable.run();
} finally {
disableMonitoring();
}
}
// borrowed from randomized-testing
private static void append(StringBuilder b, ThreadInfo ti) {
b.append('"').append(ti.getThreadName()).append('"');
b.append(" ID=").append(ti.getThreadId());
final State threadState = ti.getThreadState();
b.append(" ").append(threadState);
if (ti.getLockName() != null) {
b.append(" on ").append(ti.getLockName());
}
if (ti.getLockOwnerName() != null) {
b.append(" owned by \"").append(ti.getLockOwnerName()).append("\" ID=").append(ti.getLockOwnerId());
}
b.append(ti.isSuspended() ? " (suspended)" : "");
b.append(ti.isInNative() ? " (in native code)" : "");
b.append("\n");
final StackTraceElement[] stack = ti.getStackTrace();
final LockInfo lockInfo = ti.getLockInfo();
final MonitorInfo[] monitorInfos = ti.getLockedMonitors();
for (int i = 0; i < stack.length; i++) {
b.append("\tat ").append(stack[i]).append("\n");
if (i == 0 && lockInfo != null) {
b.append("\t- ").append(threadState).append(lockInfo).append("\n");
}
for (MonitorInfo mi : monitorInfos) {
if (mi.getLockedStackDepth() == i) {
b.append("\t- locked ").append(mi).append("\n");
}
}
}
LockInfo[] lockInfos = ti.getLockedSynchronizers();
if (lockInfos.length > 0) {
b.append("\tLocked synchronizers:\n");
for (LockInfo li : ti.getLockedSynchronizers()) {
b.append("\t- ").append(li).append("\n");
}
}
b.append("\n");
}
/**
* Enable the monitoring service and the Local exporter, waiting for some monitoring documents
* to be indexed before it returns.
*/
public void enableMonitoring() throws Exception {
// delete anything that may happen to already exist
assertAcked(client().admin().indices().prepareDelete(".monitoring-*"));
assertThat("Must be no enabled exporters before enabling monitoring", getMonitoringUsageExportersDefined(), is(false));
final Settings settings = Settings.builder()
.put("xpack.monitoring.collection.enabled", true)
.put("xpack.monitoring.exporters._local.type", "local")
.put("xpack.monitoring.exporters._local.enabled", true)
.build();
assertAcked(clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(settings));
assertBusy(() -> assertThat("[_local] exporter not enabled yet", getMonitoringUsageExportersDefined(), is(true)));
assertBusy(() -> {
// Monitoring uses auto_expand_replicas, so it should be green even without replicas
ensureGreen(".monitoring-es-*");
assertThat(client().admin().indices().prepareRefresh(".monitoring-es-*").get().getStatus(), is(RestStatus.OK));
assertThat(
"No monitoring documents yet",
SearchResponseUtils.getTotalHitsValue(client().prepareSearch(".monitoring-es-" + TEMPLATE_VERSION + "-*").setSize(0)),
greaterThan(0L)
);
}, 30L, TimeUnit.SECONDS);
}
/**
* Disable the monitoring service and the Local exporter.
*/
public void disableMonitoring() throws Exception {
final Settings settings = Settings.builder()
.putNull("xpack.monitoring.collection.enabled")
.putNull("xpack.monitoring.exporters._local.type")
.putNull("xpack.monitoring.exporters._local.enabled")
.putNull("cluster.metadata.display_name")
.build();
assertAcked(clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(settings));
assertBusy(() -> assertThat("Exporters are not yet stopped", getMonitoringUsageExportersDefined(), is(false)));
assertBusy(() -> {
try {
// now wait until Monitoring has actually stopped
final NodesStatsResponse response = clusterAdmin().prepareNodesStats().clear().setThreadPool(true).get();
for (final NodeStats nodeStats : response.getNodes()) {
boolean foundBulkThreads = false;
for (final ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) {
if (WRITE.equals(threadPoolStats.name())) {
foundBulkThreads = true;
assertThat("Still some active _bulk threads!", threadPoolStats.active(), equalTo(0));
break;
}
}
assertThat("Could not find bulk thread pool", foundBulkThreads, is(true));
}
} catch (Exception e) {
throw new ElasticsearchException("Failed to wait for monitoring exporters to stop:", e);
}
}, 30L, TimeUnit.SECONDS);
}
private boolean getMonitoringUsageExportersDefined() {
final XPackUsageResponse usageResponse = safeGet(
client().execute(XPackUsageAction.INSTANCE, new XPackUsageRequest(SAFE_AWAIT_TIMEOUT))
);
final Optional<MonitoringFeatureSetUsage> monitoringUsage = usageResponse.getUsages()
.stream()
.filter(usage -> usage instanceof MonitoringFeatureSetUsage)
.map(usage -> (MonitoringFeatureSetUsage) usage)
.findFirst();
assertThat("Monitoring feature set does not exist", monitoringUsage.isPresent(), is(true));
return monitoringUsage.get().getExporters().isEmpty() == false;
}
/**
* Returns the {@link SearchHit} content as a {@link Map} object.
*/
private static Map<String, Object> toMap(final ToXContentObject xContentObject) throws IOException {
final XContentType xContentType = XContentType.JSON;
try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) {
xContentObject.toXContent(builder, EMPTY_PARAMS);
final Map<String, Object> map = XContentHelper.convertToMap(xContentType.xContent(), Strings.toString(builder), false);
// remove extraneous fields not actually wanted from the response
map.remove("_score");
map.remove("fields");
map.remove("sort");
return map;
}
}
/**
* Returns a {@link MonitoredSystem} supported by the Monitoring Bulk API
*/
private static MonitoredSystem randomSystem() {
return randomFrom(MonitoredSystem.LOGSTASH, MonitoredSystem.KIBANA, MonitoredSystem.BEATS);
}
}
|
MonitoringIT
|
java
|
apache__commons-lang
|
src/test/java/org/apache/commons/lang3/reflect/ConstructorUtilsTest.java
|
{
"start": 1974,
"end": 2034
}
|
class ____ extends BaseClass {
}
public static
|
SubClass
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/count/CountTest.java
|
{
"start": 5044,
"end": 5331
}
|
class ____ {
@Id String isbn;
String title;
@ManyToMany
List<Author> authors;
@ManyToOne
Publisher publisher;
Book(String isbn, String title) {
this.isbn = isbn;
this.title = title;
}
Book() {
}
}
@Entity(name="Author")
@Table(name = "authors")
static
|
Book
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/dialect/h2/visitor/H2ASTVisitorAdapter.java
|
{
"start": 734,
"end": 817
}
|
class ____ extends SQLASTVisitorAdapter implements H2ASTVisitor {
}
|
H2ASTVisitorAdapter
|
java
|
apache__kafka
|
streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowSchemaWithIndexSegmentedBytesStoreTest.java
|
{
"start": 857,
"end": 1091
}
|
class ____ extends AbstractDualSchemaRocksDBSegmentedBytesStoreTest {
@Override
SchemaType schemaType() {
return SchemaType.WindowSchemaWithIndex;
}
}
|
RocksDBTimeOrderedWindowSchemaWithIndexSegmentedBytesStoreTest
|
java
|
apache__rocketmq
|
broker/src/test/java/org/apache/rocketmq/broker/BrokerControllerTest.java
|
{
"start": 1963,
"end": 7810
}
|
class ____ {
private MessageStoreConfig messageStoreConfig;
private BrokerConfig brokerConfig;
private NettyServerConfig nettyServerConfig;
@Before
public void setUp() {
messageStoreConfig = new MessageStoreConfig();
String storePathRootDir = System.getProperty("java.io.tmpdir") + File.separator + "store-"
+ UUID.randomUUID().toString();
messageStoreConfig.setStorePathRootDir(storePathRootDir);
brokerConfig = new BrokerConfig();
nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setListenPort(0);
}
@Test
public void testBrokerRestart() throws Exception {
BrokerController brokerController = new BrokerController(brokerConfig, nettyServerConfig, new NettyClientConfig(), messageStoreConfig);
assertThat(brokerController.initialize()).isTrue();
brokerController.start();
brokerController.shutdown();
}
@Test
public void testBrokerMetricsManagerInitialization() throws Exception {
BrokerController brokerController = new BrokerController(brokerConfig, nettyServerConfig, new NettyClientConfig(), messageStoreConfig);
assertThat(brokerController.initialize()).isTrue();
// Verify that brokerMetricsManager is properly initialized and not null
assertThat(brokerController.getBrokerMetricsManager()).isNotNull();
brokerController.shutdown();
}
@After
public void destroy() {
UtilAll.deleteFile(new File(messageStoreConfig.getStorePathRootDir()));
}
@Test
public void testHeadSlowTimeMills() throws Exception {
BrokerController brokerController = new BrokerController(brokerConfig, nettyServerConfig, new NettyClientConfig(), messageStoreConfig);
brokerController.initialize();
BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>();
//create task is not instance of FutureTaskExt;
Runnable runnable = new Runnable() {
@Override
public void run() {
}
};
RequestTask requestTask = new RequestTask(runnable, null, null);
// the requestTask is not the head of queue;
queue.add(new FutureTaskExt<>(requestTask, null));
long headSlowTimeMills = 100;
TimeUnit.MILLISECONDS.sleep(headSlowTimeMills);
assertThat(brokerController.headSlowTimeMills(queue)).isGreaterThanOrEqualTo(headSlowTimeMills);
}
@Test
public void testCustomRemotingServer() throws CloneNotSupportedException {
final RemotingServer mockRemotingServer = new NettyRemotingServer(nettyServerConfig);
final String mockRemotingServerName = "MOCK_REMOTING_SERVER";
BrokerController brokerController = new BrokerController(brokerConfig, nettyServerConfig, new NettyClientConfig(), messageStoreConfig);
brokerController.setRemotingServerByName(mockRemotingServerName, mockRemotingServer);
brokerController.initializeRemotingServer();
final RPCHook rpcHook = new RPCHook() {
@Override
public void doBeforeRequest(String remoteAddr, RemotingCommand request) {
}
@Override
public void doAfterResponse(String remoteAddr, RemotingCommand request, RemotingCommand response) {
}
};
brokerController.registerServerRPCHook(rpcHook);
// setRequestPipelineTest
final RequestPipeline requestPipeline = (ctx, request) -> {
};
brokerController.setRequestPipeline(requestPipeline);
NettyRemotingAbstract tcpRemotingServer = (NettyRemotingAbstract) brokerController.getRemotingServer();
Assert.assertTrue(tcpRemotingServer.getRPCHook().contains(rpcHook));
NettyRemotingAbstract fastRemotingServer = (NettyRemotingAbstract) brokerController.getFastRemotingServer();
Assert.assertTrue(fastRemotingServer.getRPCHook().contains(rpcHook));
NettyRemotingAbstract mockRemotingServer1 = (NettyRemotingAbstract) brokerController.getRemotingServerByName(mockRemotingServerName);
Assert.assertTrue(mockRemotingServer1.getRPCHook().contains(rpcHook));
Assert.assertSame(mockRemotingServer, mockRemotingServer1);
}
@Test
public void testConfigContextMethods() throws Exception {
// Test ConfigContext setter and getter methods
BrokerController brokerController = new BrokerController(brokerConfig, nettyServerConfig, new NettyClientConfig(), messageStoreConfig);
// Initially, ConfigContext should be null
assertThat(brokerController.getConfigContext()).isNull();
// Create a test ConfigContext
ConfigContext configContext = new ConfigContext.Builder()
.brokerConfig(brokerConfig)
.messageStoreConfig(messageStoreConfig)
.nettyServerConfig(nettyServerConfig)
.nettyClientConfig(new NettyClientConfig())
.authConfig(new AuthConfig())
.build();
// Set the ConfigContext
brokerController.setConfigContext(configContext);
// Verify it was set correctly
assertThat(brokerController.getConfigContext()).isNotNull();
assertThat(brokerController.getConfigContext()).isSameAs(configContext);
assertThat(brokerController.getConfigContext().getBrokerConfig()).isSameAs(brokerConfig);
assertThat(brokerController.getConfigContext().getMessageStoreConfig()).isSameAs(messageStoreConfig);
assertThat(brokerController.getConfigContext().getNettyServerConfig()).isSameAs(nettyServerConfig);
// Test setting null ConfigContext
brokerController.setConfigContext(null);
assertThat(brokerController.getConfigContext()).isNull();
}
}
|
BrokerControllerTest
|
java
|
FasterXML__jackson-core
|
src/test/java/tools/jackson/core/unittest/json/GeneratorFailTest.java
|
{
"start": 459,
"end": 4236
}
|
class ____
extends tools.jackson.core.unittest.JacksonCoreTestBase
{
private final JsonFactory F = newStreamFactory();
// [core#167]: no error for writing field name twice
@Test
void dupFieldNameWrites() throws Exception
{
_testDupFieldNameWrites(F, false);
_testDupFieldNameWrites(F, true);
}
// [core#177]
// Also: should not try writing JSON String if field name expected
// (in future maybe take one as alias... but not yet)
@Test
void failOnWritingStringNotFieldNameBytes() throws Exception {
_testFailOnWritingStringNotFieldName(F, false);
}
// [core#177]
@Test
void failOnWritingStringNotFieldNameChars() throws Exception {
_testFailOnWritingStringNotFieldName(F, true);
}
// for [core#282]
@Test
void failOnWritingFieldNameInRoot() throws Exception {
_testFailOnWritingFieldNameInRoot(F, false);
_testFailOnWritingFieldNameInRoot(F, true);
}
/*
/**********************************************************
/* Internal methods
/**********************************************************
*/
private void _testDupFieldNameWrites(JsonFactory f, boolean useReader) throws IOException
{
JsonGenerator gen;
ByteArrayOutputStream bout = new ByteArrayOutputStream();
if (useReader) {
gen = f.createGenerator(ObjectWriteContext.empty(), new OutputStreamWriter(bout, "UTF-8"));
} else {
gen = f.createGenerator(ObjectWriteContext.empty(), bout, JsonEncoding.UTF8);
}
gen.writeStartObject();
gen.writeName("a");
try {
gen.writeName("b");
gen.flush();
String json = utf8String(bout);
fail("Should not have let two consecutive property name writes succeed: output = "+json);
} catch (StreamWriteException e) {
verifyException(e, "Cannot write a property name, expecting a value");
}
gen.close();
}
private void _testFailOnWritingStringNotFieldName(JsonFactory f, boolean useReader) throws IOException
{
JsonGenerator gen;
ByteArrayOutputStream bout = new ByteArrayOutputStream();
if (useReader) {
gen = f.createGenerator(ObjectWriteContext.empty(), new OutputStreamWriter(bout, "UTF-8"));
} else {
gen = f.createGenerator(ObjectWriteContext.empty(), bout, JsonEncoding.UTF8);
}
gen.writeStartObject();
try {
gen.writeString("a");
gen.flush();
String json = utf8String(bout);
fail("Should not have let "+gen.getClass().getName()+".writeString() be used in place of 'writeName()': output = "+json);
} catch (StreamWriteException e) {
verifyException(e, "Cannot write a String");
}
gen.close();
}
// for [core#282]
private void _testFailOnWritingFieldNameInRoot(JsonFactory f, boolean useReader) throws IOException
{
JsonGenerator gen;
ByteArrayOutputStream bout = new ByteArrayOutputStream();
if (useReader) {
gen = f.createGenerator(ObjectWriteContext.empty(), new OutputStreamWriter(bout, "UTF-8"));
} else {
gen = f.createGenerator(ObjectWriteContext.empty(), bout, JsonEncoding.UTF8);
}
try {
gen.writeName("a");
gen.flush();
String json = utf8String(bout);
fail("Should not have let "+gen.getClass().getName()+".writeName() be used in root context: output = "+json);
} catch (StreamWriteException e) {
verifyException(e, "Cannot write a property name");
}
gen.close();
}
}
|
GeneratorFailTest
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/main/java/org/hibernate/envers/strategy/internal/ValidityAuditStrategy.java
|
{
"start": 27678,
"end": 28953
}
|
class ____ implements QueryParameterBinding {
private final ModelPart modelPart;
private final Object value;
public QueryParameterBindingPart(Object value, ModelPart modelPart) {
this.value = value;
this.modelPart = modelPart;
}
@Override
public int bind(
int index,
PreparedStatement statement,
SharedSessionContractImplementor session) {
try {
return modelPart.breakDownJdbcValues(
value,
index,
statement,
session,
(valueIndex, preparedStatement, sessionImplementor, jdbcValue, jdbcValueMapping) -> {
try {
//noinspection unchecked
jdbcValueMapping.getJdbcMapping().getJdbcValueBinder().bind(
preparedStatement,
jdbcValue,
valueIndex,
sessionImplementor
);
}
catch (SQLException e) {
throw new NestedRuntimeException( e );
}
},
session
);
}
catch (NestedRuntimeException e) {
throw session.getJdbcServices().getSqlExceptionHelper().convert(
(SQLException) e.getCause(),
String.format(
Locale.ROOT,
"Error binding JDBC value relative to `%s`",
modelPart.getNavigableRole().getFullPath()
)
);
}
}
static
|
QueryParameterBindingPart
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/vectors/RankVectorsScriptDocValues.java
|
{
"start": 2233,
"end": 2469
}
|
interface ____ extends Supplier<BytesRef> {
@Override
default BytesRef getInternal(int index) {
throw new UnsupportedOperationException();
}
RankVectors getInternal();
}
}
|
RankVectorsSupplier
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlClearCursorActionIT.java
|
{
"start": 789,
"end": 4168
}
|
class ____ extends AbstractSqlIntegTestCase {
public void testSqlClearCursorAction() {
assertAcked(indicesAdmin().prepareCreate("test").get());
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
int indexSize = randomIntBetween(100, 300);
logger.info("Indexing {} records", indexSize);
for (int i = 0; i < indexSize; i++) {
bulkRequestBuilder.add(new IndexRequest("test").id("id" + i).source("data", "bar", "count", i));
}
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get();
ensureYellow("test");
assertEquals(0, getNumberOfSearchContexts());
int fetchSize = randomIntBetween(5, 20);
logger.info("Fetching {} records at a time", fetchSize);
SqlQueryResponse sqlQueryResponse = new SqlQueryRequestBuilder(client()).query("SELECT * FROM test").fetchSize(fetchSize).get();
assertEquals(fetchSize, sqlQueryResponse.size());
assertThat(getNumberOfSearchContexts(), greaterThan(0L));
assertThat(sqlQueryResponse.cursor(), notNullValue());
assertThat(sqlQueryResponse.cursor(), not(equalTo(Cursor.EMPTY)));
SqlClearCursorResponse cleanCursorResponse = new SqlClearCursorRequestBuilder(client()).cursor(sqlQueryResponse.cursor()).get();
assertTrue(cleanCursorResponse.isSucceeded());
assertEquals(0, getNumberOfSearchContexts());
}
public void testAutoCursorCleanup() {
assertAcked(indicesAdmin().prepareCreate("test").get());
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
int indexSize = randomIntBetween(100, 300);
logger.info("Indexing {} records", indexSize);
for (int i = 0; i < indexSize; i++) {
bulkRequestBuilder.add(new IndexRequest("test").id("id" + i).source("data", "bar", "count", i));
}
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get();
ensureYellow("test");
assertEquals(0, getNumberOfSearchContexts());
int fetchSize = randomIntBetween(5, 20);
logger.info("Fetching {} records at a time", fetchSize);
SqlQueryResponse sqlQueryResponse = new SqlQueryRequestBuilder(client()).query("SELECT * FROM test").fetchSize(fetchSize).get();
assertEquals(fetchSize, sqlQueryResponse.size());
assertThat(getNumberOfSearchContexts(), greaterThan(0L));
assertThat(sqlQueryResponse.cursor(), notNullValue());
assertThat(sqlQueryResponse.cursor(), not(equalTo(Cursor.EMPTY)));
long fetched = sqlQueryResponse.size();
do {
sqlQueryResponse = new SqlQueryRequestBuilder(client()).cursor(sqlQueryResponse.cursor()).get();
fetched += sqlQueryResponse.size();
} while (sqlQueryResponse.cursor().isEmpty() == false);
assertEquals(indexSize, fetched);
SqlClearCursorResponse cleanCursorResponse = new SqlClearCursorRequestBuilder(client()).cursor(sqlQueryResponse.cursor()).get();
assertFalse(cleanCursorResponse.isSucceeded());
assertEquals(0, getNumberOfSearchContexts());
}
private long getNumberOfSearchContexts() {
return indicesAdmin().prepareStats("test").clear().setSearch(true).get().getIndex("test").getTotal().getSearch().getOpenContexts();
}
}
|
SqlClearCursorActionIT
|
java
|
quarkusio__quarkus
|
devtools/cli/src/main/java/io/quarkus/cli/ProjectExtensions.java
|
{
"start": 617,
"end": 1425
}
|
class ____ implements Callable<Integer> {
@CommandLine.Mixin(name = "output")
protected OutputOptionMixin output;
@CommandLine.Spec
protected CommandLine.Model.CommandSpec spec;
@Unmatched // avoids throwing errors for unmatched arguments
List<String> unmatchedArgs;
@Override
public Integer call() throws Exception {
output.info("Listing extensions (default action, see --help).");
ParseResult result = spec.commandLine().getParseResult();
List<String> args = result.originalArgs().stream().filter(x -> !"extension".equals(x) && !"ext".equals(x))
.collect(Collectors.toList());
CommandLine listCommand = spec.subcommands().get("list");
return listCommand.execute(args.toArray(new String[0]));
}
}
|
ProjectExtensions
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/single/SingleDelayWithPublisher.java
|
{
"start": 1554,
"end": 3201
}
|
class ____<T, U>
extends AtomicReference<Disposable>
implements FlowableSubscriber<U>, Disposable {
private static final long serialVersionUID = -8565274649390031272L;
final SingleObserver<? super T> downstream;
final SingleSource<T> source;
boolean done;
Subscription upstream;
OtherSubscriber(SingleObserver<? super T> actual, SingleSource<T> source) {
this.downstream = actual;
this.source = source;
}
@Override
public void onSubscribe(Subscription s) {
if (SubscriptionHelper.validate(this.upstream, s)) {
this.upstream = s;
downstream.onSubscribe(this);
s.request(Long.MAX_VALUE);
}
}
@Override
public void onNext(U value) {
upstream.cancel();
onComplete();
}
@Override
public void onError(Throwable e) {
if (done) {
RxJavaPlugins.onError(e);
return;
}
done = true;
downstream.onError(e);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
source.subscribe(new ResumeSingleObserver<>(this, downstream));
}
@Override
public void dispose() {
upstream.cancel();
DisposableHelper.dispose(this);
}
@Override
public boolean isDisposed() {
return DisposableHelper.isDisposed(get());
}
}
}
|
OtherSubscriber
|
java
|
apache__maven
|
impl/maven-core/src/test/java/org/apache/maven/configuration/internal/CompositeBeanHelperPerformanceTest.java
|
{
"start": 14376,
"end": 15531
}
|
class ____ {
private String name;
private String description;
private int count;
private List<String> items = new ArrayList<>();
private boolean enabled;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public int getCount() {
return count;
}
public void setCount(int count) {
this.count = count;
}
public List<String> getItems() {
return items;
}
public void addItem(String item) {
this.items.add(item);
}
public boolean isEnabled() {
return enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
}
/**
* A more realistic test bean that simulates typical mojo parameters
*/
public static
|
TestBean
|
java
|
micronaut-projects__micronaut-core
|
core/src/main/java/io/micronaut/core/io/service/SoftServiceLoader.java
|
{
"start": 12476,
"end": 13088
}
|
interface ____<S> {
void collect(Collection<S> values);
default void collect(Collection<S> values, boolean allowFork) {
collect(values);
}
default void collect(Consumer<? super S> consumer) {
List<S> values = new ArrayList<>();
collect(values);
values.forEach(e -> {
if (e != null) {
consumer.accept(e);
}
});
}
}
/**
* Service loader that uses {@link StaticDefinition}.
*
* @param <S> The service type
*/
public
|
ServiceCollector
|
java
|
apache__avro
|
lang/java/protobuf/src/test/java/org/apache/avro/protobuf/noopt/Test.java
|
{
"start": 121061,
"end": 130259
}
|
class ____ extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements
// @@protoc_insertion_point(builder_implements:org.apache.avro.protobuf.noopt.M)
org.apache.avro.protobuf.noopt.Test.MOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return org.apache.avro.protobuf.noopt.Test.internal_static_org_apache_avro_protobuf_noopt_M_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() {
return org.apache.avro.protobuf.noopt.Test.internal_static_org_apache_avro_protobuf_noopt_M_fieldAccessorTable
.ensureFieldAccessorsInitialized(org.apache.avro.protobuf.noopt.Test.M.class,
org.apache.avro.protobuf.noopt.Test.M.Builder.class);
}
// Construct using org.apache.avro.protobuf.noopt.Test.M.newBuilder()
private Builder() {
}
private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
return org.apache.avro.protobuf.noopt.Test.internal_static_org_apache_avro_protobuf_noopt_M_descriptor;
}
@java.lang.Override
public org.apache.avro.protobuf.noopt.Test.M getDefaultInstanceForType() {
return org.apache.avro.protobuf.noopt.Test.M.getDefaultInstance();
}
@java.lang.Override
public org.apache.avro.protobuf.noopt.Test.M build() {
org.apache.avro.protobuf.noopt.Test.M result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.avro.protobuf.noopt.Test.M buildPartial() {
org.apache.avro.protobuf.noopt.Test.M result = new org.apache.avro.protobuf.noopt.Test.M(this);
onBuilt();
return result;
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.avro.protobuf.noopt.Test.M) {
return mergeFrom((org.apache.avro.protobuf.noopt.Test.M) other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.avro.protobuf.noopt.Test.M other) {
if (other == org.apache.avro.protobuf.noopt.Test.M.getDefaultInstance())
return this;
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
// @@protoc_insertion_point(builder_scope:org.apache.avro.protobuf.noopt.M)
}
// @@protoc_insertion_point(class_scope:org.apache.avro.protobuf.noopt.M)
private static final org.apache.avro.protobuf.noopt.Test.M DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.avro.protobuf.noopt.Test.M();
}
public static org.apache.avro.protobuf.noopt.Test.M getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<M> PARSER = new com.google.protobuf.AbstractParser<M>() {
@java.lang.Override
public M parsePartialFrom(com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser<M> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<M> getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.avro.protobuf.noopt.Test.M getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private static final com.google.protobuf.Descriptors.Descriptor internal_static_org_apache_avro_protobuf_noopt_Foo_descriptor;
private static final com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_org_apache_avro_protobuf_noopt_Foo_fieldAccessorTable;
private static final com.google.protobuf.Descriptors.Descriptor internal_static_org_apache_avro_protobuf_noopt_M_descriptor;
private static final com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_org_apache_avro_protobuf_noopt_M_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor descriptor;
static {
java.lang.String[] descriptorData = { "\n\034src/test/protobuf/test.proto\022\036org.apac"
+ "he.avro.protobuf.noopt\032\037google/protobuf/"
+ "timestamp.proto\"\204\004\n\003Foo\022\r\n\005int32\030\001 \002(\005\022\r"
+ "\n\005int64\030\002 \001(\003\022\016\n\006uint32\030\003 \001(\r\022\016\n\006uint64\030"
+ "\004 \001(\004\022\016\n\006sint32\030\005 \001(\021\022\016\n\006sint64\030\006 \001(\022\022\017\n"
+ "\007fixed32\030\007 \001(\007\022\017\n\007fixed64\030\010 \001(\006\022\020\n\010sfixe"
+ "d32\030\t \001(\017\022\020\n\010sfixed64\030\n \001(\020\022\r\n\005float\030\013 \001"
+ "(\002\022\016\n\006double\030\014 \001(\001\022\014\n\004bool\030\r \001(\010\022\016\n\006stri"
+ "ng\030\016 \001(\t\022\r\n\005bytes\030\017 \001(\014\0222\n\004enum\030\020 \001(\0162!."
+ "org.apache.avro.protobuf.noopt.A:\001Z\022\020\n\010i"
+ "ntArray\030\021 \003(\005\0225\n\010fooArray\030\024 \003(\0132#.org.ap"
+ "ache.avro.protobuf.noopt.Foo\022/\n\004syms\030\023 \003" + "(\0162!.org.apache.avro.protobuf.noopt.A\0220\n"
+ "\003foo\030\022 \001(\0132#.org.apache.avro.protobuf.no"
+ "opt.Foo\022-\n\ttimestamp\030\025 \001(\0132\032.google.prot"
+ "obuf.Timestamp\"\017\n\001M\"\n\n\001N\022\005\n\001A\020\001*\030\n\001A\022\005\n\001"
+ "X\020\001\022\005\n\001Y\020\002\022\005\n\001Z\020\003" };
descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] { com.google.protobuf.TimestampProto.getDescriptor(), });
internal_static_org_apache_avro_protobuf_noopt_Foo_descriptor = getDescriptor().getMessageTypes().get(0);
internal_static_org_apache_avro_protobuf_noopt_Foo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_avro_protobuf_noopt_Foo_descriptor,
new java.lang.String[] { "Int32", "Int64", "Uint32", "Uint64", "Sint32", "Sint64", "Fixed32", "Fixed64",
"Sfixed32", "Sfixed64", "Float", "Double", "Bool", "String", "Bytes", "Enum", "IntArray", "FooArray",
"Syms", "Foo", "Timestamp", });
internal_static_org_apache_avro_protobuf_noopt_M_descriptor = getDescriptor().getMessageTypes().get(1);
internal_static_org_apache_avro_protobuf_noopt_M_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_avro_protobuf_noopt_M_descriptor, new java.lang.String[] {});
descriptor.resolveAllFeaturesImmutable();
com.google.protobuf.TimestampProto.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)
}
|
Builder
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/script/Metadata.java
|
{
"start": 10530,
"end": 10678
}
|
enum ____ {
INIT,
UPDATE,
REMOVE
}
/**
* The properties of a metadata field.
* @param type - the
|
MapOperation
|
java
|
lettuce-io__lettuce-core
|
src/test/java/io/lettuce/core/support/BoundedAsyncPoolUnitTests.java
|
{
"start": 612,
"end": 12684
}
|
class ____ {
private AtomicInteger counter = new AtomicInteger();
private List<String> destroyed = new ArrayList<>();
private AsyncObjectFactory<String> STRING_OBJECT_FACTORY = new AsyncObjectFactory<String>() {
@Override
public CompletableFuture<String> create() {
return CompletableFuture.completedFuture(counter.incrementAndGet() + "");
}
@Override
public CompletableFuture<Void> destroy(String object) {
destroyed.add(object);
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Boolean> validate(String object) {
return CompletableFuture.completedFuture(true);
}
};
@Test
void shouldCreateObject() {
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(STRING_OBJECT_FACTORY, BoundedPoolConfig.create());
String object = TestFutures.getOrTimeout(pool.acquire());
assertThat(pool.getIdle()).isEqualTo(0);
assertThat(object).isEqualTo("1");
}
@Test
void shouldCreatePoolAsync() {
CompletionStage<BoundedAsyncPool<String>> pool = BoundedAsyncPool.create(STRING_OBJECT_FACTORY,
BoundedPoolConfig.builder().minIdle(5).build());
pool.toCompletableFuture().join();
assertThat(counter).hasValue(5);
}
@Test
void failedAsyncCreationShouldCleanUpResources() {
AtomicInteger cleanups = new AtomicInteger();
AtomicInteger creations = new AtomicInteger();
CompletionStage<BoundedAsyncPool<String>> pool = BoundedAsyncPool.create(new AsyncObjectFactory<String>() {
@Override
public CompletableFuture<String> create() {
if (creations.incrementAndGet() == 1) {
return io.lettuce.core.internal.Futures.failed(new IllegalStateException());
}
return CompletableFuture.completedFuture("ok");
}
@Override
public CompletableFuture<Void> destroy(String object) {
cleanups.incrementAndGet();
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Boolean> validate(String object) {
return null;
}
}, BoundedPoolConfig.builder().minIdle(5).build());
assertThatExceptionOfType(CompletionException.class).isThrownBy(pool.toCompletableFuture()::join)
.withCauseInstanceOf(RedisException.class).withRootCauseInstanceOf(IllegalStateException.class);
assertThat(cleanups).hasValue(4);
}
@Test
void shouldCreateMinIdleObject() {
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(STRING_OBJECT_FACTORY,
BoundedPoolConfig.builder().minIdle(2).build());
assertThat(pool.getIdle()).isEqualTo(2);
assertThat(pool.getObjectCount()).isEqualTo(2);
}
@Test
void shouldCreateMaintainMinIdleObject() {
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(STRING_OBJECT_FACTORY,
BoundedPoolConfig.builder().minIdle(2).build());
TestFutures.awaitOrTimeout(pool.acquire());
assertThat(pool.getIdle()).isEqualTo(2);
assertThat(pool.getObjectCount()).isEqualTo(3);
}
@Test
void shouldCreateMaintainMinMaxIdleObject() {
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(STRING_OBJECT_FACTORY,
BoundedPoolConfig.builder().minIdle(2).maxTotal(2).build());
TestFutures.awaitOrTimeout(pool.acquire());
assertThat(pool.getIdle()).isEqualTo(1);
assertThat(pool.getObjectCount()).isEqualTo(2);
}
@Test
void shouldCreateUnboundedPool() {
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(STRING_OBJECT_FACTORY,
BoundedPoolConfig.builder().maxTotal(-1).build());
TestFutures.awaitOrTimeout(pool.acquire());
assertThat(pool.getObjectCount()).isEqualTo(1);
}
@Test
void shouldReturnObject() {
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(STRING_OBJECT_FACTORY, BoundedPoolConfig.create());
String object = TestFutures.getOrTimeout(pool.acquire());
assertThat(pool.getObjectCount()).isEqualTo(1);
pool.release(object);
assertThat(pool.getIdle()).isEqualTo(1);
}
@Test
void shouldReuseObjects() {
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(STRING_OBJECT_FACTORY, BoundedPoolConfig.create());
pool.release(TestFutures.getOrTimeout(pool.acquire()));
assertThat(TestFutures.getOrTimeout(pool.acquire())).isEqualTo("1");
assertThat(pool.getIdle()).isEqualTo(0);
}
@Test
void shouldDestroyIdle() {
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(STRING_OBJECT_FACTORY,
BoundedPoolConfig.builder().maxIdle(2).maxTotal(5).build());
List<String> objects = new ArrayList<>();
for (int i = 0; i < 3; i++) {
objects.add(TestFutures.getOrTimeout(pool.acquire()));
}
for (int i = 0; i < 2; i++) {
pool.release(objects.get(i));
}
assertThat(pool.getIdle()).isEqualTo(2);
pool.release(objects.get(2));
assertThat(pool.getIdle()).isEqualTo(2);
assertThat(pool.getObjectCount()).isEqualTo(2);
assertThat(destroyed).containsOnly("3");
}
@Test
void shouldExhaustPool() {
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(STRING_OBJECT_FACTORY,
BoundedPoolConfig.builder().maxTotal(4).build());
String object1 = TestFutures.getOrTimeout(pool.acquire());
String object2 = TestFutures.getOrTimeout(pool.acquire());
String object3 = TestFutures.getOrTimeout(pool.acquire());
String object4 = TestFutures.getOrTimeout(pool.acquire());
assertThat(pool.getIdle()).isZero();
assertThat(pool.getObjectCount()).isEqualTo(4);
assertThat(pool.acquire()).isCompletedExceptionally();
assertThat(pool.getIdle()).isZero();
assertThat(pool.getObjectCount()).isEqualTo(4);
pool.release(object1);
pool.release(object2);
pool.release(object3);
pool.release(object4);
assertThat(pool.getIdle()).isEqualTo(4);
assertThat(pool.getObjectCount()).isEqualTo(4);
}
@Test
void shouldClearPool() {
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(STRING_OBJECT_FACTORY,
BoundedPoolConfig.builder().maxTotal(4).build());
for (int i = 0; i < 20; i++) {
String object1 = TestFutures.getOrTimeout(pool.acquire());
String object2 = TestFutures.getOrTimeout(pool.acquire());
String object3 = TestFutures.getOrTimeout(pool.acquire());
String object4 = TestFutures.getOrTimeout(pool.acquire());
assertThat(pool.acquire()).isCompletedExceptionally();
pool.release(object1);
pool.release(object2);
pool.release(object3);
pool.release(object4);
pool.clear();
assertThat(pool.getObjectCount()).isZero();
assertThat(pool.getIdle()).isZero();
}
}
@Test
void shouldExhaustPoolConcurrent() {
List<CompletableFuture<String>> progress = new ArrayList<>();
AsyncObjectFactory<String> IN_PROGRESS = new AsyncObjectFactory<String>() {
@Override
public CompletableFuture<String> create() {
CompletableFuture<String> future = new CompletableFuture<>();
progress.add(future);
return future;
}
@Override
public CompletableFuture<Void> destroy(String object) {
destroyed.add(object);
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Boolean> validate(String object) {
return CompletableFuture.completedFuture(true);
}
};
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(IN_PROGRESS, BoundedPoolConfig.builder().maxTotal(4).build());
CompletableFuture<String> object1 = pool.acquire();
CompletableFuture<String> object2 = pool.acquire();
CompletableFuture<String> object3 = pool.acquire();
CompletableFuture<String> object4 = pool.acquire();
CompletableFuture<String> object5 = pool.acquire();
assertThat(pool.getIdle()).isZero();
assertThat(pool.getObjectCount()).isZero();
assertThat(pool.getCreationInProgress()).isEqualTo(4);
assertThat(object5).isCompletedExceptionally();
progress.forEach(it -> it.complete("foo"));
assertThat(pool.getIdle()).isZero();
assertThat(pool.getObjectCount()).isEqualTo(4);
assertThat(pool.getCreationInProgress()).isZero();
}
@Test
void shouldConcurrentlyFail() {
List<CompletableFuture<String>> progress = new ArrayList<>();
AsyncObjectFactory<String> IN_PROGRESS = new AsyncObjectFactory<String>() {
@Override
public CompletableFuture<String> create() {
CompletableFuture<String> future = new CompletableFuture<>();
progress.add(future);
return future;
}
@Override
public CompletableFuture<Void> destroy(String object) {
destroyed.add(object);
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Boolean> validate(String object) {
return CompletableFuture.completedFuture(true);
}
};
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(IN_PROGRESS, BoundedPoolConfig.builder().maxTotal(4).build());
CompletableFuture<String> object1 = pool.acquire();
CompletableFuture<String> object2 = pool.acquire();
CompletableFuture<String> object3 = pool.acquire();
CompletableFuture<String> object4 = pool.acquire();
progress.forEach(it -> it.completeExceptionally(new IllegalStateException()));
assertThat(object1).isCompletedExceptionally();
assertThat(object2).isCompletedExceptionally();
assertThat(object3).isCompletedExceptionally();
assertThat(object4).isCompletedExceptionally();
assertThat(pool.getIdle()).isZero();
assertThat(pool.getObjectCount()).isZero();
assertThat(pool.getCreationInProgress()).isZero();
}
@Test
void cancelShouldReturnObjectToPool() {
List<CompletableFuture<String>> progress = new ArrayList<>();
AsyncObjectFactory<String> IN_PROGRESS = new AsyncObjectFactory<String>() {
@Override
public CompletableFuture<String> create() {
CompletableFuture<String> future = new CompletableFuture<>();
progress.add(future);
return future;
}
@Override
public CompletableFuture<Void> destroy(String object) {
destroyed.add(object);
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Boolean> validate(String object) {
return CompletableFuture.completedFuture(true);
}
};
BoundedAsyncPool<String> pool = new BoundedAsyncPool<>(IN_PROGRESS,
BoundedPoolConfig.builder().maxTotal(1).maxIdle(0).build());
CompletableFuture<String> acquire = pool.acquire();
assertThat(acquire).isNotCompleted();
acquire.cancel(false);
assertThat(acquire).isCancelled();
progress.get(0).complete("after-cancel");
assertThat(destroyed).contains("after-cancel");
}
}
|
BoundedAsyncPoolUnitTests
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithm.java
|
{
"start": 971,
"end": 1094
}
|
interface ____ a Constraint Placement. The only contract is that it
* should be initialized with the RMContext.
*/
public
|
for
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/internal/matchers/ContainsExtraTypeInfo.java
|
{
"start": 614,
"end": 1094
}
|
class ____ be printed in description
* Returns more verbose description of the object which include type information
*/
String toStringWithType(String className);
/**
* Checks if target target has matching type.
* If the type matches, there is no point in rendering result from {@link #toStringWithType(String)}
*/
boolean typeMatches(Object target);
/**
*
* @return Returns the wanted argument
*/
Object getWanted();
}
|
to
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/util/PropertySource.java
|
{
"start": 1321,
"end": 3256
}
|
interface ____ {
/**
* Returns the order in which this PropertySource has priority. A higher value means that the source will be
* searched later and can be overridden by other property sources.
*
* @return priority value
*/
int getPriority();
/**
* Iterates over all properties and performs an action for each key/value pair.
*
* @param action action to perform on each key/value pair
*/
default void forEach(final BiConsumer<String, String> action) {}
/**
* Returns the list of all property names.
*
* @return list of property names
* @since 2.18.0
*/
default Collection<String> getPropertyNames() {
return Collections.emptySet();
}
/**
* Converts a list of property name tokens into a normal form. For example, a list of tokens such as
* "foo", "bar", "baz", might be normalized into the property name "log4j2.fooBarBaz".
*
* @param tokens list of property name tokens
* @return a normalized property name using the given tokens
*/
default CharSequence getNormalForm(final Iterable<? extends CharSequence> tokens) {
return null;
}
/**
* For PropertySources that cannot iterate over all the potential properties this provides a direct lookup.
* @param key The key to search for.
* @return The value or null;
* @since 2.13.0
*/
default String getProperty(final String key) {
return null;
}
/**
* For PropertySources that cannot iterate over all the potential properties this provides a direct lookup.
* @param key The key to search for.
* @return The value or null;
* @since 2.13.0
*/
default boolean containsProperty(final String key) {
return false;
}
/**
* Comparator for ordering PropertySource instances by priority.
*
* @since 2.10.0
*/
|
PropertySource
|
java
|
netty__netty
|
example/src/main/java/io/netty/example/http/websocketx/client/WebSocketClient.java
|
{
"start": 2664,
"end": 6350
}
|
class ____ {
static final String URL = System.getProperty("url", "ws://127.0.0.1:8080/websocket");
static final int MAX_CONTENT_LENGTH = 8192;
public static void main(String[] args) throws Exception {
URI uri = new URI(URL);
String scheme = uri.getScheme() == null? "ws" : uri.getScheme();
final String host = uri.getHost() == null? "127.0.0.1" : uri.getHost();
final int port;
if (uri.getPort() == -1) {
if ("ws".equalsIgnoreCase(scheme)) {
port = 80;
} else if ("wss".equalsIgnoreCase(scheme)) {
port = 443;
} else {
port = -1;
}
} else {
port = uri.getPort();
}
if (!"ws".equalsIgnoreCase(scheme) && !"wss".equalsIgnoreCase(scheme)) {
System.err.println("Only WS(S) is supported.");
return;
}
final boolean ssl = "wss".equalsIgnoreCase(scheme);
final SslContext sslCtx;
if (ssl) {
sslCtx = SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE).build();
} else {
sslCtx = null;
}
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
try {
// Connect with V13 (RFC 6455 aka HyBi-17). You can change it to V08 or V00.
// If you change it to V00, ping is not supported and remember to change
// HttpResponseDecoder to WebSocketHttpResponseDecoder in the pipeline.
final WebSocketClientHandler handler =
new WebSocketClientHandler(
WebSocketClientHandshakerFactory.newHandshaker(
uri, WebSocketVersion.V13, null, true, new DefaultHttpHeaders()));
Bootstrap b = new Bootstrap();
b.group(group)
.channel(NioSocketChannel.class)
.handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) {
ChannelPipeline p = ch.pipeline();
if (sslCtx != null) {
p.addLast(sslCtx.newHandler(ch.alloc(), host, port));
}
p.addLast(
new HttpClientCodec(),
new HttpObjectAggregator(MAX_CONTENT_LENGTH),
new WebSocketClientCompressionHandler(MAX_CONTENT_LENGTH),
handler);
}
});
Channel ch = b.connect(uri.getHost(), port).sync().channel();
handler.handshakeFuture().sync();
BufferedReader console = new BufferedReader(new InputStreamReader(System.in));
while (true) {
String msg = console.readLine();
if (msg == null) {
break;
} else if ("bye".equals(msg.toLowerCase())) {
ch.writeAndFlush(new CloseWebSocketFrame());
ch.closeFuture().sync();
break;
} else if ("ping".equals(msg.toLowerCase())) {
WebSocketFrame frame = new PingWebSocketFrame(Unpooled.wrappedBuffer(new byte[] { 8, 1, 8, 1 }));
ch.writeAndFlush(frame);
} else {
WebSocketFrame frame = new TextWebSocketFrame(msg);
ch.writeAndFlush(frame);
}
}
} finally {
group.shutdownGracefully();
}
}
}
|
WebSocketClient
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/main/java/org/springframework/messaging/converter/StringMessageConverter.java
|
{
"start": 1157,
"end": 2553
}
|
class ____ extends AbstractMessageConverter {
private final Charset defaultCharset;
public StringMessageConverter() {
this(StandardCharsets.UTF_8);
}
public StringMessageConverter(Charset defaultCharset) {
super(new MimeType("text", "plain", defaultCharset));
Assert.notNull(defaultCharset, "Default Charset must not be null");
this.defaultCharset = defaultCharset;
}
@Override
protected boolean supports(Class<?> clazz) {
return (String.class == clazz);
}
@Override
protected Object convertFromInternal(Message<?> message, Class<?> targetClass, @Nullable Object conversionHint) {
Charset charset = getContentTypeCharset(getMimeType(message.getHeaders()));
Object payload = message.getPayload();
return (payload instanceof String ? payload : new String((byte[]) payload, charset));
}
@Override
protected @Nullable Object convertToInternal(
Object payload, @Nullable MessageHeaders headers, @Nullable Object conversionHint) {
if (byte[].class == getSerializedPayloadClass()) {
Charset charset = getContentTypeCharset(getMimeType(headers));
payload = ((String) payload).getBytes(charset);
}
return payload;
}
private Charset getContentTypeCharset(@Nullable MimeType mimeType) {
if (mimeType != null && mimeType.getCharset() != null) {
return mimeType.getCharset();
}
else {
return this.defaultCharset;
}
}
}
|
StringMessageConverter
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java
|
{
"start": 4860,
"end": 8518
}
|
class ____ {
private Disk disk;
private ShardLimits shardLimits;
private Builder(HealthMetadata healthMetadata) {
this.disk = healthMetadata.diskMetadata;
this.shardLimits = healthMetadata.shardLimitsMetadata;
}
public Builder disk(Disk disk) {
this.disk = disk;
return this;
}
public Builder shardLimits(ShardLimits shardLimits) {
this.shardLimits = shardLimits;
return this;
}
public HealthMetadata build() {
return new HealthMetadata(disk, shardLimits);
}
}
/**
* Contains the thresholds needed to determine the health of a cluster when it comes to the amount of room available to create new
* shards. These values are determined by the elected master.
*/
public record ShardLimits(
int maxShardsPerNode,
int maxShardsPerNodeFrozen,
int shardCapacityUnhealthyThresholdYellow,
int shardCapacityUnhealthyThresholdRed
) implements ToXContentFragment, Writeable {
private static final String TYPE = "shard_limits";
private static final ParseField MAX_SHARDS_PER_NODE = new ParseField("max_shards_per_node");
private static final ParseField MAX_SHARDS_PER_NODE_FROZEN = new ParseField("max_shards_per_node_frozen");
private static final ParseField SHARD_CAPACITY_UNHEALTHY_THRESHOLD_YELLOW_FIELD = new ParseField(
"shard_capacity_unhealthy_threshold_yellow"
);
private static final ParseField SHARD_CAPACITY_UNHEALTHY_THRESHOLD_RED_FIELD = new ParseField(
"shard_capacity_unhealthy_threshold_red"
);
static final TransportVersion VERSION_SUPPORTING_SHARD_LIMIT_FIELDS = TransportVersions.V_8_8_0;
static final TransportVersion VERSION_SHARD_CAPACITY_UNHEALTH_THRESHOLDS = TransportVersion.fromName(
"shard_capacity_unhealthy_thresholds"
);
static ShardLimits readFrom(StreamInput in) throws IOException {
return in.getTransportVersion().supports(VERSION_SHARD_CAPACITY_UNHEALTH_THRESHOLDS)
? new ShardLimits(in.readInt(), in.readInt(), in.readInt(), in.readInt())
// defaults from older versions
: new ShardLimits(in.readInt(), in.readInt(), 10, 5);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(MAX_SHARDS_PER_NODE.getPreferredName(), maxShardsPerNode);
builder.field(MAX_SHARDS_PER_NODE_FROZEN.getPreferredName(), maxShardsPerNodeFrozen);
builder.field(SHARD_CAPACITY_UNHEALTHY_THRESHOLD_YELLOW_FIELD.getPreferredName(), shardCapacityUnhealthyThresholdYellow);
builder.field(SHARD_CAPACITY_UNHEALTHY_THRESHOLD_RED_FIELD.getPreferredName(), shardCapacityUnhealthyThresholdRed);
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(maxShardsPerNode);
out.writeInt(maxShardsPerNodeFrozen);
if (out.getTransportVersion().supports(VERSION_SHARD_CAPACITY_UNHEALTH_THRESHOLDS)) {
out.writeInt(shardCapacityUnhealthyThresholdYellow);
out.writeInt(shardCapacityUnhealthyThresholdRed);
}
}
public static Builder newBuilder() {
return new Builder();
}
public static Builder newBuilder(ShardLimits shardLimits) {
return new Builder(shardLimits);
}
public static
|
Builder
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ProductionComponentProcessorTest.java
|
{
"start": 5216,
"end": 5827
}
|
class ____ {",
" @Produces String str(@Production Executor executor) {",
" return \"\";",
" }",
"}");
Source componentFile =
CompilerTests.javaSource(
"test.SimpleComponent",
"package test;",
"",
"import com.google.common.util.concurrent.ListenableFuture;",
"import dagger.producers.ProductionComponent;",
"import java.util.concurrent.Executor;",
"",
"@ProductionComponent(modules = {ExecutorModule.class, SimpleModule.class})",
"
|
SimpleModule
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/RedissonLiveObjectService.java
|
{
"start": 39188,
"end": 39772
}
|
class ____ declaration.");
}
FieldDescription.InDefinedShape idFieldDescription = fieldsWithRIdAnnotation.getOnly();
String idFieldName = idFieldDescription.getName();
Field idField = null;
try {
idField = ClassUtils.getDeclaredField(entityClass, idFieldName);
} catch (Exception e) {
throw new IllegalStateException(e);
}
if (ClassUtils.isAnnotationPresent(idField.getType(), REntity.class)) {
throw new IllegalArgumentException("Field with RId annotation cannot be a type of which
|
field
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/SpannerDialect.java
|
{
"start": 29281,
"end": 29851
}
|
class ____ implements LockingStrategy {
@Override
public void lock(
Object id, Object version, Object object, int timeout, SharedSessionContractImplementor session)
throws StaleObjectStateException, LockingStrategyException {
// Do nothing. Cloud Spanner doesn't have have locking strategies.
}
}
/**
* A no-op delegate for generating Unique-Constraints. Cloud Spanner offers unique-restrictions
* via interleaved indexes with the "UNIQUE" option. This is not currently supported.
*
* @author Chengyuan Zhao
*/
static
|
DoNothingLockingStrategy
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/serializer/DupSetterTest2.java
|
{
"start": 517,
"end": 842
}
|
class ____ {
private Integer status;
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public void setStatus(String status) {
throw new IllegalStateException();
}
}
}
|
VO
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/AnnotationPositionTest.java
|
{
"start": 9071,
"end": 9278
}
|
class ____ {}
""")
.doTest(TEXT_MATCH);
}
@Test
public void betweenModifiers() {
refactoringHelper
.addInputLines(
"Test.java",
"""
|
Test
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/CriteriaWithDynamicInstantiationAndOrderByTest.java
|
{
"start": 9289,
"end": 9584
}
|
class ____ {
private Long a;
private String b;
private String c;
public R3(Long a, String b, String c) {
this.a = a;
this.b = b;
this.c = c;
}
public Long getA() {
return a;
}
public String getB() {
return b;
}
public String getC() {
return c;
}
}
}
|
R3
|
java
|
dropwizard__dropwizard
|
dropwizard-testing/src/test/java/io/dropwizard/testing/junit5/AbstractDropwizardAppExtensionTest.java
|
{
"start": 458,
"end": 3020
}
|
class ____ {
@Test
void canGetExpectedResourceOverHttp() {
final String content = JerseyClientBuilder.createClient().target(
"http://localhost:" + getExtension().getLocalPort() + "/test").request().get(String.class);
assertThat(content).isEqualTo("Yes, it's here");
}
@Test
void returnsConfiguration() {
final TestConfiguration config = getExtension().getConfiguration();
assertThat(config.getMessage()).isEqualTo("Yes, it's here");
}
@Test
void returnsApplication() {
assertThat(getExtension().<DropwizardTestApplication>getApplication())
.isNotNull();
}
@Test
void returnsEnvironment() {
final Environment environment = getExtension().getEnvironment();
assertThat(environment.getName()).isEqualTo("DropwizardTestApplication");
}
@Test
void canPerformAdminTask() {
final String response
= getExtension().client().target("http://localhost:"
+ getExtension().getAdminPort() + "/tasks/hello?name=test_user")
.request()
.post(Entity.entity("", MediaType.TEXT_PLAIN), String.class);
assertThat(response).isEqualTo("Hello has been said to test_user");
}
@Test
void canPerformAdminTaskWithPostBody() {
final String response = getExtension().client()
.target("http://localhost:" + getExtension().getAdminPort() + "/tasks/echo")
.request()
.post(Entity.entity("Custom message", MediaType.TEXT_PLAIN), String.class);
assertThat(response).isEqualTo("Custom message");
}
@Test
void clientUsesJacksonMapperFromEnvironment() {
final Optional<String> message = getExtension().client()
.target("http://localhost:" + getExtension().getLocalPort() + "/message")
.request()
.get(DropwizardTestApplication.MessageView.class)
.getMessage();
assertThat(message)
.hasValue("Yes, it's here");
}
@Test
void clientSupportsPatchMethod() {
final String method = getExtension().client()
.target("http://localhost:" + getExtension().getLocalPort() + "/echoPatch")
.request()
.method("PATCH", Entity.text("Patch is working"), String.class);
assertThat(method).isEqualTo("Patch is working");
}
abstract DropwizardAppExtension<TestConfiguration> getExtension();
}
|
AbstractDropwizardAppExtensionTest
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/config/VersionProperties.java
|
{
"start": 1125,
"end": 4099
}
|
class ____ {
/** The defaultVersion. */
private String defaultVersion;
/**
* Flag whether to use API versions that appear in mappings for supported version
* validation (true), or use only explicitly configured versions (false). Defaults to
* true.
*/
private boolean detectSupportedVersions = true;
/** The header name used to extract the API Version. */
private String headerName;
/** The media type name used to extract the API Version. */
private MediaType mediaType;
/** The media type parameter name used to extract the API Version. */
private String mediaTypeParamName;
/** The index of a path segment used to extract the API Version. */
private Integer pathSegment;
/** The request parameter name used to extract the API Version. */
private String requestParamName;
private boolean required;
private List<String> supportedVersions = new ArrayList<>();
public String getDefaultVersion() {
return defaultVersion;
}
public void setDefaultVersion(String defaultVersion) {
this.defaultVersion = defaultVersion;
}
public boolean isDetectSupportedVersions() {
return detectSupportedVersions;
}
public void setDetectSupportedVersions(boolean detectSupportedVersions) {
this.detectSupportedVersions = detectSupportedVersions;
}
public String getHeaderName() {
return headerName;
}
public void setHeaderName(String headerName) {
this.headerName = headerName;
}
public MediaType getMediaType() {
return mediaType;
}
public void setMediaType(MediaType mediaType) {
this.mediaType = mediaType;
}
public String getMediaTypeParamName() {
return mediaTypeParamName;
}
public void setMediaTypeParamName(String mediaTypeParamName) {
this.mediaTypeParamName = mediaTypeParamName;
}
public Integer getPathSegment() {
return pathSegment;
}
public void setPathSegment(Integer pathSegment) {
this.pathSegment = pathSegment;
}
public String getRequestParamName() {
return requestParamName;
}
public void setRequestParamName(String requestParamName) {
this.requestParamName = requestParamName;
}
public boolean isRequired() {
return required;
}
public void setRequired(boolean required) {
this.required = required;
}
public List<String> getSupportedVersions() {
return supportedVersions;
}
public void setSupportedVersions(List<String> supportedVersions) {
this.supportedVersions = supportedVersions;
}
@Override
public String toString() {
// @formatter:off
return new ToStringCreator(this)
.append("defaultVersion", defaultVersion)
.append("detectSupportedVersions", detectSupportedVersions)
.append("headerName", headerName)
.append("mediaType", mediaType)
.append("mediaTypeParamName", mediaTypeParamName)
.append("pathSegment", pathSegment)
.append("requestParamName", requestParamName)
.append("required", required)
.append("supportedVersions", supportedVersions)
.toString();
// @formatter:on
}
}
|
VersionProperties
|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java
|
{
"start": 2037,
"end": 8567
}
|
class ____ extends AbstractMultiClustersTestCase {
private static final String REMOTE_CLUSTER = "cluster_a";
private static final String LOCAL_INDEX = "local_idx";
private static final String REMOTE_INDEX = "remote_idx";
private static final String KEYWORD_FIELD = "my_field";
@Override
protected List<String> remoteClusterAlias() {
return List.of(REMOTE_CLUSTER);
}
@Override
protected boolean reuseClusters() {
return false;
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins(String clusterAlias) {
List<Class<? extends Plugin>> plugs = Arrays.asList(PainlessPlugin.class);
return Stream.concat(super.nodePlugins(clusterAlias).stream(), plugs.stream()).collect(Collectors.toList());
}
public void testPainlessExecuteAgainstLocalCluster() throws Exception {
setupTwoClusters();
Script script = new Script(
ScriptType.INLINE,
Script.DEFAULT_SCRIPT_LANG,
Strings.format("doc['%s'].value.length() <= params.max_length", KEYWORD_FIELD),
Map.of("max_length", 4)
);
ScriptContext<?> context = FilterScript.CONTEXT;
PainlessExecuteAction.Request.ContextSetup contextSetup = createContextSetup(LOCAL_INDEX);
PainlessExecuteAction.Request request = new PainlessExecuteAction.Request(script, context.name, contextSetup);
ActionFuture<PainlessExecuteAction.Response> actionFuture = client(LOCAL_CLUSTER).admin()
.cluster()
.execute(PainlessExecuteAction.INSTANCE, request);
PainlessExecuteAction.Response response = actionFuture.actionGet();
Object result = response.getResult();
assertThat(result, Matchers.instanceOf(Boolean.class));
assertTrue((Boolean) result);
}
/**
* Query the local cluster to run the execute actions against the 'cluster_a:remote_idx' index.
* There is no local index with the REMOTE_INDEX name, so it has to do a cross-cluster action for this to work
*/
public void testPainlessExecuteAsCrossClusterAction() throws Exception {
setupTwoClusters();
Script script = new Script(
ScriptType.INLINE,
Script.DEFAULT_SCRIPT_LANG,
Strings.format("doc['%s'].value.length() <= params.max_length", KEYWORD_FIELD),
Map.of("max_length", 4)
);
ScriptContext<?> context = FilterScript.CONTEXT;
PainlessExecuteAction.Request.ContextSetup contextSetup = createContextSetup(REMOTE_CLUSTER + ":" + REMOTE_INDEX);
PainlessExecuteAction.Request request = new PainlessExecuteAction.Request(script, context.name, contextSetup);
ActionFuture<PainlessExecuteAction.Response> actionFuture = client(LOCAL_CLUSTER).admin()
.cluster()
.execute(PainlessExecuteAction.INSTANCE, request);
PainlessExecuteAction.Response response = actionFuture.actionGet();
Object result = response.getResult();
assertThat(result, Matchers.instanceOf(Boolean.class));
assertTrue((Boolean) result);
}
private static PainlessExecuteAction.Request.ContextSetup createContextSetup(String index) {
QueryBuilder query = new MatchAllQueryBuilder();
BytesReference doc;
XContentType xContentType = XContentType.JSON.canonical();
try {
XContentBuilder xContentBuilder = XContentBuilder.builder(xContentType.xContent());
xContentBuilder.startObject();
xContentBuilder.field(KEYWORD_FIELD, "four");
xContentBuilder.endObject();
doc = BytesReference.bytes(xContentBuilder);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
PainlessExecuteAction.Request.ContextSetup contextSetup = new PainlessExecuteAction.Request.ContextSetup(index, doc, query);
contextSetup.setXContentType(XContentType.JSON);
return contextSetup;
}
private void setupTwoClusters() throws Exception {
assertAcked(client(LOCAL_CLUSTER).admin().indices().prepareCreate(LOCAL_INDEX).setMapping(KEYWORD_FIELD, "type=keyword"));
indexDocs(client(LOCAL_CLUSTER), LOCAL_INDEX);
final InternalTestCluster remoteCluster = cluster(REMOTE_CLUSTER);
remoteCluster.ensureAtLeastNumDataNodes(1);
final Settings.Builder allocationFilter = Settings.builder();
if (randomBoolean()) {
remoteCluster.ensureAtLeastNumDataNodes(3);
List<String> remoteDataNodes = remoteCluster.clusterService()
.state()
.nodes()
.stream()
.filter(DiscoveryNode::canContainData)
.map(DiscoveryNode::getName)
.toList();
assertThat(remoteDataNodes.size(), Matchers.greaterThanOrEqualTo(3));
List<String> seedNodes = randomSubsetOf(between(1, remoteDataNodes.size() - 1), remoteDataNodes);
disconnectFromRemoteClusters();
configureRemoteCluster(REMOTE_CLUSTER, seedNodes);
if (randomBoolean()) {
// Using proxy connections
allocationFilter.put("index.routing.allocation.exclude._name", String.join(",", seedNodes));
} else {
allocationFilter.put("index.routing.allocation.include._name", String.join(",", seedNodes));
}
}
assertAcked(
client(REMOTE_CLUSTER).admin()
.indices()
.prepareCreate(REMOTE_INDEX)
.setMapping(KEYWORD_FIELD, "type=keyword")
.setSettings(Settings.builder().put(allocationFilter.build()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0))
);
assertFalse(
client(REMOTE_CLUSTER).admin()
.cluster()
.prepareHealth(TEST_REQUEST_TIMEOUT, REMOTE_INDEX)
.setWaitForYellowStatus()
.setTimeout(TimeValue.timeValueSeconds(10))
.get()
.isTimedOut()
);
indexDocs(client(REMOTE_CLUSTER), REMOTE_INDEX);
}
private int indexDocs(Client client, String index) {
int numDocs = between(1, 10);
for (int i = 0; i < numDocs; i++) {
client.prepareIndex(index).setSource(KEYWORD_FIELD, "my_value").get();
}
client.admin().indices().prepareRefresh(index).get();
return numDocs;
}
}
|
CrossClusterPainlessExecuteIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.