language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FieldCanBeStaticTest.java | {
"start": 5802,
"end": 6192
} | class ____ {
// BUG: Diagnostic contains:
private final int primitive = 3;
// BUG: Diagnostic contains:
private final String string = "string";
}
""")
.doTest();
}
@Test
public void negative() {
compilationHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/multipart/MultipartInputTest.java | {
"start": 943,
"end": 10853
} | class ____ extends AbstractMultipartTest {
private static final Path uploadDir = Paths.get("file-uploads");
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(FormDataBase.class, OtherPackageFormDataBase.class, FormData.class, Status.class,
OtherFormData.class, FormDataSameFileName.class,
OtherFormDataBase.class,
MultipartResource.class, OtherMultipartResource.class)
.addAsResource(new StringAsset(
// keep the files around so we can assert the outcome
"quarkus.http.body.delete-uploaded-files-on-end=false\nquarkus.http.body.uploads-directory="
+ uploadDir.toString() + "\n"),
"application.properties");
}
});
private final File HTML_FILE = new File("./src/test/resources/test.html");
private final File HTML_FILE2 = new File("./src/test/resources/test2.html");
private final File XML_FILE = new File("./src/test/resources/test.html");
private final File TXT_FILE = new File("./src/test/resources/lorem.txt");
private final String TXT = "lorem ipsum";
private final String XML = "<note></note>";
private final String HTML = "<!DOCTYPE html><html></html>";
@BeforeEach
public void assertEmptyUploads() {
Assertions.assertTrue(isDirectoryEmpty(uploadDir));
}
@AfterEach
public void clearDirectory() {
clearDirectory(uploadDir);
}
@Test
public void testSimple() {
RestAssured.given()
.multiPart("name", "Alice")
.multiPart("active", "true")
.multiPart("num", "25")
.multiPart("status", "WORKING")
.multiPart("htmlFile", HTML, "text/html")
.multiPart("xmlFile", XML, "text/xml")
.multiPart("txtFile", TXT, "text/plain")
.accept("text/plain")
.when()
.post("/multipart/simple/2")
.then()
.statusCode(200)
.body(equalTo("Alice - true - 50 - WORKING - true - true - true"));
// ensure that the 3 uploaded files where created on disk
Assertions.assertEquals(3, uploadDir.toFile().listFiles().length);
// same with text as file
RestAssured.given()
.multiPart("name", "something.txt", "Alice".getBytes())
.multiPart("active", "true")
.multiPart("num", "25")
.multiPart("status", "WORKING")
.multiPart("htmlFile", HTML, "text/html")
.multiPart("xmlFile", XML, "text/xml")
.multiPart("txtFile", TXT, "text/plain")
.accept("text/plain")
.when()
.post("/multipart/simple/2")
.then()
.statusCode(200)
.body(equalTo("Alice - true - 50 - WORKING - true - true - true"));
}
@Test
public void testSimpleImplicit() {
RestAssured.given()
.multiPart("name", "Alice")
.multiPart("active", "true")
.multiPart("num", "25")
.multiPart("status", "WORKING")
.multiPart("htmlFile", HTML_FILE, "text/html")
.multiPart("xmlFile", XML_FILE, "text/xml")
.multiPart("txtFile", TXT_FILE, "text/plain")
.accept("text/plain")
.when()
.post("/multipart/implicit/simple/2")
.then()
.statusCode(200)
.body(equalTo("Alice - true - 50 - WORKING - text/html - true - true"));
// ensure that the 3 uploaded files where created on disk
Assertions.assertEquals(3, uploadDir.toFile().listFiles().length);
}
@Test
public void testSimpleParam() {
RestAssured.given()
.multiPart("name", "Alice")
.multiPart("active", "true")
.multiPart("num", "25")
.multiPart("status", "WORKING")
.multiPart("htmlFile", HTML, "text/html")
.multiPart("xmlFile", XML, "text/xml")
.multiPart("txtFile", TXT, "text/plain")
.accept("text/plain")
.when()
.post("/multipart/param/simple/2")
.then()
.statusCode(200)
.body(equalTo("Alice - true - 50 - WORKING - true - true - true"));
// ensure that the 3 uploaded files where created on disk
Assertions.assertEquals(3, uploadDir.toFile().listFiles().length);
// same with text as file
RestAssured.given()
.multiPart("name", "something.txt", "Alice".getBytes())
.multiPart("active", "true")
.multiPart("num", "25")
.multiPart("status", "WORKING")
.multiPart("htmlFile", HTML, "text/html")
.multiPart("xmlFile", XML, "text/xml")
.multiPart("txtFile", TXT, "text/plain")
.accept("text/plain")
.when()
.post("/multipart/param/simple/2")
.then()
.statusCode(200)
.body(equalTo("Alice - true - 50 - WORKING - true - true - true"));
}
@Test
public void testBlocking() throws IOException {
RestAssured.given()
.multiPart("name", "Trudy")
.multiPart("num", "20")
.multiPart("status", "SLEEPING")
.multiPart("htmlFile", HTML_FILE, "text/html")
.multiPart("xmlFile", XML_FILE, "text/xml")
.multiPart("txtFile", TXT_FILE, "text/plain")
.accept("text/plain")
.when()
.post("/multipart/blocking?times=2")
.then()
.statusCode(200)
.body(equalTo("Trudy - 40 - SLEEPING"))
.header("html-size", equalTo(fileSizeAsStr(HTML_FILE)))
// test that file was actually upload and that the web application isn't sharing the file with the test...
.header("html-path", not(equalTo(filePath(HTML_FILE))))
.header("xml-size", equalTo(fileSizeAsStr(XML_FILE)))
.header("xml-path", not(equalTo(filePath(XML_FILE))))
.header("txt-size", equalTo(fileSizeAsStr(TXT_FILE)))
.header("txt-path", not(equalTo(filePath(TXT_FILE))));
// ensure that the 3 uploaded files where created on disk
Assertions.assertEquals(3, uploadDir.toFile().listFiles().length);
}
@Test
public void testOther() {
RestAssured.given()
.multiPart("first", "foo")
.multiPart("last", "bar")
.accept("text/plain")
.when()
.post("/otherMultipart/simple")
.then()
.statusCode(200)
.body(equalTo("foo - bar - final - static"));
Assertions.assertEquals(0, uploadDir.toFile().listFiles().length);
}
@Test
public void testSameName() {
RestAssured.given()
.multiPart("active", "false")
.multiPart("status", "EATING")
.multiPart("htmlFile", HTML_FILE, "text/html")
.multiPart("htmlFile", HTML_FILE2, "text/html")
.multiPart("xmlFile", XML_FILE, "text/xml")
.multiPart("txtFile", TXT_FILE, "text/plain")
.accept("text/plain")
.when()
.post("/multipart/same-name")
.then()
.statusCode(200)
.body(equalTo("EATING - 2 - 1 - 1"));
// ensure that the 3 uploaded files where created on disk
Assertions.assertEquals(4, uploadDir.toFile().listFiles().length);
}
@Test
public void testSameNameParam() {
RestAssured.given()
.multiPart("active", "false")
.multiPart("status", "EATING")
.multiPart("htmlFile", HTML_FILE, "text/html")
.multiPart("htmlFile", HTML_FILE2, "text/html")
.multiPart("xmlFile", XML_FILE, "text/xml")
.multiPart("txtFile", TXT_FILE, "text/plain")
.accept("text/plain")
.when()
.post("/multipart/param/same-name")
.then()
.statusCode(200)
.body(equalTo("EATING - 2 - 1 - 1"));
// ensure that the 3 uploaded files where created on disk
Assertions.assertEquals(4, uploadDir.toFile().listFiles().length);
}
@Test
public void testExtraHeader() {
MultiPartSpecification formPart = new MultiPartSpecBuilder(TXT_FILE)
.header("extra-header", "extra-value")
.mimeType("text/plain")
.build();
RestAssured.given()
.multiPart(formPart)
.accept("text/plain")
.when()
.post("/multipart/extra-header")
.then()
.statusCode(200)
.body(equalTo("extra-value"));
// ensure that the 3 uploaded files where created on disk
Assertions.assertEquals(1, uploadDir.toFile().listFiles().length);
}
private String filePath(File file) {
return file.toPath().toAbsolutePath().toString();
}
}
| MultipartInputTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/embeddable/naming/EmbeddedColumnNamingNestedTests.java | {
"start": 2742,
"end": 2933
} | class ____ {
private String street;
private String city;
private String state;
@Embedded
@EmbeddedColumnNaming( "zip_%s" )
private ZipPlus zip;
}
@Embeddable
public static | Address |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java | {
"start": 1251,
"end": 3892
} | class ____ {
private static final String HTTPS_LISTENER_PREFIX = "listeners.https.";
private Map<String, Object> sslProps() {
return new HashMap<>() {
{
put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, new Password("ssl_key_password"));
put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "ssl_keystore");
put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, new Password("ssl_keystore_password"));
put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, "ssl_truststore");
put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, new Password("ssl_truststore_password"));
}
};
}
private Map<String, String> baseWorkerProps() {
return new HashMap<>() {
{
put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter");
put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter");
put(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, "/tmp/foo");
}
};
}
private static Map<String, String> withStringValues(Map<String, ?> inputs, String prefix) {
return ConfigDef.convertToStringMapWithPasswordValues(inputs).entrySet().stream()
.collect(Collectors.toMap(
entry -> prefix + entry.getKey(),
Map.Entry::getValue
));
}
@Test
public void testRestServerPrefixedSslConfigs() {
Map<String, String> workerProps = baseWorkerProps();
Map<String, Object> expectedSslProps = sslProps();
workerProps.putAll(withStringValues(expectedSslProps, HTTPS_LISTENER_PREFIX));
StandaloneConfig config = new StandaloneConfig(workerProps);
assertEquals(expectedSslProps, config.valuesWithPrefixAllOrNothing(HTTPS_LISTENER_PREFIX));
}
@Test
public void testRestServerNonPrefixedSslConfigs() {
Map<String, String> props = baseWorkerProps();
Map<String, Object> expectedSslProps = sslProps();
props.putAll(withStringValues(expectedSslProps, ""));
StandaloneConfig config = new StandaloneConfig(props);
Map<String, Object> actualProps = config.valuesWithPrefixAllOrNothing(HTTPS_LISTENER_PREFIX)
.entrySet().stream()
.filter(entry -> expectedSslProps.containsKey(entry.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
assertEquals(expectedSslProps, actualProps);
}
}
| StandaloneConfigTest |
java | apache__camel | components/camel-clickup/src/test/java/org/apache/camel/component/clickup/util/ClickUpMockRoutes.java | {
"start": 1253,
"end": 2539
} | class ____ extends RouteBuilder {
private static final Logger LOG = LoggerFactory.getLogger(ClickUpMockRoutes.class);
private final int port;
private final List<MockProcessor<?>> mocks = new ArrayList<>();
public ClickUpMockRoutes(int port) {
this.port = port;
}
public ClickUpMockRoutes addEndpoint(
String path, String method, boolean pathExactMatch, Class<?> returnType,
MockProcessorResponseBodyProvider mockProcessorResponseBodyProvider) {
this.mocks.add(new MockProcessor<>(method, path, pathExactMatch, returnType, mockProcessorResponseBodyProvider));
return this;
}
public ClickUpMockRoutes addErrorEndpoint(String path, String method, boolean pathExactMatch, int errorCode) {
this.mocks.add(new MockProcessor<>(method, path, pathExactMatch, errorCode));
return this;
}
@Override
public void configure() {
mocks.forEach(processor -> from(
"netty-http:http://localhost:" + port + "/clickup-api-mock/" + processor.path + "?httpMethodRestrict="
+ processor.method + (processor.pathExactMatch ? "" : "&matchOnUriPrefix=true"))
.process(processor));
}
public static | ClickUpMockRoutes |
java | quarkusio__quarkus | extensions/spring-data-jpa/runtime/src/main/java/io/quarkus/spring/data/runtime/RepositorySupport.java | {
"start": 409,
"end": 1404
} | class ____ {
private RepositorySupport() {
}
public static List<?> findByIds(AbstractJpaOperations<PanacheQuery<?>> operations, Class<?> entityClass,
Iterable<?> ids) {
Objects.requireNonNull(ids);
List<Object> result = new ArrayList<>();
ids.forEach(result::add);
// Hibernate's findMultiple also returns null elements for non-found ids. we filter out null values here to stay consistent with the previous behavior.
return operations.findByIds(entityClass, new ArrayList<>(result))
.stream()
.filter(Objects::nonNull)
.toList();
}
public static void deleteAll(AbstractJpaOperations<PanacheQuery<?>> operations, Iterable<?> entities) {
for (Object entity : entities) {
operations.delete(entity);
}
}
/**
* Add call to the Panache method implementing the actual retrieving of a reference to an entity with the given | RepositorySupport |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/argumentselectiondefects/ParameterTest.java | {
"start": 1462,
"end": 1785
} | class ____ {
/**
* A {@link BugChecker} that prints whether the type of first argument is assignable to the type
* of the second one.
*/
@BugPattern(
severity = SeverityLevel.ERROR,
summary = "Print whether the type of the first argument is assignable to the second one")
public static | ParameterTest |
java | grpc__grpc-java | alts/src/test/java/io/grpc/alts/internal/AltsProtocolNegotiatorTest.java | {
"start": 19366,
"end": 20236
} | class ____ implements TsiFrameProtector {
private final TsiFrameProtector delegate;
final AtomicInteger flushes = new AtomicInteger();
InterceptingProtector(TsiFrameProtector delegate) {
this.delegate = delegate;
}
@Override
public void protectFlush(
List<ByteBuf> unprotectedBufs, Consumer<ByteBuf> ctxWrite, ByteBufAllocator alloc)
throws GeneralSecurityException {
flushes.incrementAndGet();
delegate.protectFlush(unprotectedBufs, ctxWrite, alloc);
}
@Override
public void unprotect(ByteBuf in, List<Object> out, ByteBufAllocator alloc)
throws GeneralSecurityException {
delegate.unprotect(in, out, alloc);
}
@Override
public void destroy() {
delegate.destroy();
}
}
/** Kicks off negotiation of the server. */
private static final | InterceptingProtector |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/DebeziumOracleEndpointBuilderFactory.java | {
"start": 144366,
"end": 150096
} | interface ____
extends
EndpointConsumerBuilder {
default DebeziumOracleEndpointBuilder basic() {
return (DebeziumOracleEndpointBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedDebeziumOracleEndpointBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedDebeziumOracleEndpointBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedDebeziumOracleEndpointBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedDebeziumOracleEndpointBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedDebeziumOracleEndpointBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedDebeziumOracleEndpointBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
}
public | AdvancedDebeziumOracleEndpointBuilder |
java | apache__dubbo | dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/router/mesh/rule/destination/DestinationRule.java | {
"start": 941,
"end": 1319
} | class ____ extends BaseRule {
private DestinationRuleSpec spec;
public DestinationRuleSpec getSpec() {
return spec;
}
public void setSpec(DestinationRuleSpec spec) {
this.spec = spec;
}
@Override
public String toString() {
return "DestinationRule{" + "base=" + super.toString() + ", spec=" + spec + '}';
}
}
| DestinationRule |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/cache/interceptor/NameMatchCacheOperationSource.java | {
"start": 1242,
"end": 4136
} | class ____ implements CacheOperationSource, Serializable {
/**
* Logger available to subclasses.
* <p>Static for optimal serialization.
*/
protected static final Log logger = LogFactory.getLog(NameMatchCacheOperationSource.class);
/** Keys are method names; values are TransactionAttributes. */
private final Map<String, Collection<CacheOperation>> nameMap = new LinkedHashMap<>();
/**
* Set a name/attribute map, consisting of method names
* (for example, "myMethod") and CacheOperation instances
* (or Strings to be converted to CacheOperation instances).
* @see CacheOperation
*/
public void setNameMap(Map<String, Collection<CacheOperation>> nameMap) {
nameMap.forEach(this::addCacheMethod);
}
/**
* Add an attribute for a cacheable method.
* <p>Method names can be exact matches, or of the pattern "xxx*",
* "*xxx" or "*xxx*" for matching multiple methods.
* @param methodName the name of the method
* @param ops operation associated with the method
*/
public void addCacheMethod(String methodName, Collection<CacheOperation> ops) {
if (logger.isDebugEnabled()) {
logger.debug("Adding method [" + methodName + "] with cache operations [" + ops + "]");
}
this.nameMap.put(methodName, ops);
}
@Override
public @Nullable Collection<CacheOperation> getCacheOperations(Method method, @Nullable Class<?> targetClass) {
// look for direct name match
String methodName = method.getName();
Collection<CacheOperation> ops = this.nameMap.get(methodName);
if (ops == null) {
// Look for most specific name match.
String bestNameMatch = null;
for (String mappedName : this.nameMap.keySet()) {
if (isMatch(methodName, mappedName) &&
(bestNameMatch == null || bestNameMatch.length() <= mappedName.length())) {
ops = this.nameMap.get(mappedName);
bestNameMatch = mappedName;
}
}
}
return ops;
}
/**
* Return if the given method name matches the mapped name.
* <p>The default implementation checks for "xxx*", "*xxx" and "*xxx*" matches,
* as well as direct equality. Can be overridden in subclasses.
* @param methodName the method name of the class
* @param mappedName the name in the descriptor
* @return if the names match
* @see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
*/
protected boolean isMatch(String methodName, String mappedName) {
return PatternMatchUtils.simpleMatch(mappedName, methodName);
}
@Override
public boolean equals(@Nullable Object other) {
return (this == other || (other instanceof NameMatchCacheOperationSource otherCos &&
ObjectUtils.nullSafeEquals(this.nameMap, otherCos.nameMap)));
}
@Override
public int hashCode() {
return NameMatchCacheOperationSource.class.hashCode();
}
@Override
public String toString() {
return getClass().getName() + ": " + this.nameMap;
}
}
| NameMatchCacheOperationSource |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/mappingcontrol/ErroneousBuiltInAndBuiltInMapper.java | {
"start": 679,
"end": 879
} | class ____ {
private final Date time;
public Target(Date time) {
this.time = time;
}
public Date getTime() {
return time;
}
}
}
| Target |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java | {
"start": 3504,
"end": 4489
} | enum ____ {
DISABLED,
STORED,
SYNTHETIC
}
private static final SourceFieldMapper DEFAULT = new SourceFieldMapper(
null,
Explicit.IMPLICIT_TRUE,
Strings.EMPTY_ARRAY,
Strings.EMPTY_ARRAY,
false,
false
);
private static final SourceFieldMapper STORED = new SourceFieldMapper(
Mode.STORED,
Explicit.IMPLICIT_TRUE,
Strings.EMPTY_ARRAY,
Strings.EMPTY_ARRAY,
false,
false
);
private static final SourceFieldMapper SYNTHETIC = new SourceFieldMapper(
Mode.SYNTHETIC,
Explicit.IMPLICIT_TRUE,
Strings.EMPTY_ARRAY,
Strings.EMPTY_ARRAY,
false,
false
);
private static final SourceFieldMapper DISABLED = new SourceFieldMapper(
Mode.DISABLED,
Explicit.IMPLICIT_TRUE,
Strings.EMPTY_ARRAY,
Strings.EMPTY_ARRAY,
false,
false
);
public static | Mode |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/beans/factory/xml/XmlBeanFactoryTestTypes.java | {
"start": 6936,
"end": 7142
} | class ____ {
public String replaceMe(String echo) {
return echo;
}
}
/**
* Bean that exposes a simple property that can be set
* to a mix of references and individual values.
*/
| MethodReplaceCandidate |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/kotlin/Zoujing.java | {
"start": 208,
"end": 738
} | class ____ extends TestCase {
public void test_user() throws Exception {
ExtClassLoader classLoader = new ExtClassLoader();
Class clazz = classLoader.loadClass("com.alidme.xrecharge.platform.common.data.NoticeData");
String json = "{\"benefitNoticeState\":1}";
Object obj = JSON.parseObject(json, clazz);
String result = JSON.toJSONString(obj);
System.out.println(result);
assertEquals("{\"benefitNoticeState\":1,\"outId\":\"\"}", result);
}
private static | Zoujing |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/FluxTimeoutTest.java | {
"start": 1255,
"end": 15403
} | class ____ {
@Test
public void noTimeout() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.range(1, 10)
.timeout(Flux.never(), v -> Flux.never())
.subscribe(ts);
ts.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.assertComplete()
.assertNoError();
}
@Test
public void noTimeoutOnInstantSource() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.range(1, 10)
.timeout(Flux.empty(), v -> Flux.never())
.subscribe(ts);
ts.assertValueCount(10).assertComplete();
}
@Test
public void immediateTimeout() {
TestPublisher<Object> source = TestPublisher.create();
source.flux()
.timeout(Flux.empty(), v -> Flux.never())
.as(StepVerifier::create)
.verifyError(TimeoutException.class);
}
@Test
public void firstElementImmediateTimeout() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.range(1, 10)
.timeout(Flux.never(), v -> Flux.empty())
.subscribe(ts);
ts.assertValues(1)
.assertNotComplete()
.assertError(TimeoutException.class);
}
//Fail
//@Test
public void immediateTimeoutResume() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.range(1, 10)
.timeout(Flux.empty(), v -> Flux.never(), Flux.range(1, 10))
.subscribe(ts);
ts.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.assertComplete()
.assertNoError();
}
@Test
public void firstElementImmediateResume() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.range(1, 10)
.timeout(Flux.never(), v -> Flux.empty(), Flux.range(1, 10))
.subscribe(ts);
ts.assertValues(1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.assertComplete()
.assertNoError();
}
@Test
public void oldTimeoutHasNoEffect() {
Sinks.Many<Integer> source = Sinks.unsafe().many().multicast().directBestEffort();
Sinks.Many<Integer> tp = Sinks.unsafe().many().multicast().directBestEffort();
AssertSubscriber<Integer> ts = AssertSubscriber.create();
source.asFlux()
.timeout(tp.asFlux(), v -> Flux.never(), Flux.range(1, 10))
.subscribe(ts);
source.emitNext(0, FAIL_FAST);
tp.emitNext(1, FAIL_FAST);
source.emitComplete(FAIL_FAST);
assertThat(tp.currentSubscriberCount()).as("timeout has subscriber").isZero();
ts.assertValues(0)
.assertComplete()
.assertNoError();
}
@Test
public void oldTimeoutCompleteHasNoEffect() {
Sinks.Many<Integer> source = Sinks.unsafe().many().multicast().directBestEffort();
Sinks.Many<Integer> tp = Sinks.unsafe().many().multicast().directBestEffort();
AssertSubscriber<Integer> ts = AssertSubscriber.create();
source.asFlux()
.timeout(tp.asFlux(), v -> Flux.never(), Flux.range(1, 10))
.subscribe(ts);
source.emitNext(0, FAIL_FAST);
tp.emitComplete(FAIL_FAST);
source.emitComplete(FAIL_FAST);
assertThat(tp.currentSubscriberCount()).isZero();
ts.assertValues(0)
.assertComplete()
.assertNoError();
}
@Test
public void oldTimeoutErrorHasNoEffect() {
Sinks.Many<Integer> source = Sinks.unsafe().many().multicast().directBestEffort();
Sinks.Many<Integer> tp = Sinks.unsafe().many().multicast().directBestEffort();
AssertSubscriber<Integer> ts = AssertSubscriber.create();
source.asFlux()
.timeout(tp.asFlux(), v -> Flux.never(), Flux.range(1, 10))
.subscribe(ts);
source.emitNext(0, FAIL_FAST);
tp.emitError(new RuntimeException("forced failure"), FAIL_FAST);
source.emitComplete(FAIL_FAST);
assertThat(tp.currentSubscriberCount()).as("timeout has subscriber").isZero();
ts.assertValues(0)
.assertComplete()
.assertNoError();
}
@Test
public void itemTimeoutThrows() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.range(1, 10)
.timeout(Flux.never(), v -> {
throw new RuntimeException("forced failure");
})
.subscribe(ts);
ts.assertValues(1)
.assertNotComplete()
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure");
}
@Test
public void itemTimeoutReturnsNull() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.range(1, 10)
.timeout(Flux.never(), v -> null)
.subscribe(ts);
ts.assertValues(1)
.assertNotComplete()
.assertError(NullPointerException.class);
}
@Test
public void dropsErrorOnCompletedSource() {
Flux.range(0, 10)
.timeout(Flux.error(new RuntimeException("forced failure")), v -> Flux.never())
.as(StepVerifier::create)
.expectNextCount(10)
.verifyComplete();
}
@Test
public void firstTimeoutError() {
TestPublisher<Object> source = TestPublisher.create();
source.flux()
.timeout(Flux.error(new RuntimeException("forced failure")), v -> Flux.never())
.as(StepVerifier::create)
.then(source::complete)
.verifyErrorMessage("forced failure");
}
@Test
public void itemTimeoutError() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.range(1, 10)
.timeout(Flux.never(),
v -> Flux.error(new RuntimeException("forced failure")))
.subscribe(ts);
ts.assertValues(1)
.assertNotComplete()
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure");
}
@Test
public void timeoutRequested() {
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Sinks.Many<Integer> source = Sinks.unsafe().many().multicast().directBestEffort();
Sinks.Many<Integer> tp = Sinks.unsafe().many().multicast().directBestEffort();
source.asFlux()
.timeout(tp.asFlux(), v -> tp.asFlux())
.subscribe(ts);
tp.emitNext(1, FAIL_FAST);
source.emitNext(2, FAIL_FAST);
source.emitComplete(FAIL_FAST);
ts.assertNoValues()
.assertError(TimeoutException.class)
.assertNotComplete();
}
Flux<Integer> scenario_timeoutCanBeBoundWithCallback() {
return Flux.<Integer>never().timeout(Duration.ofMillis(500), Flux.just(-5));
}
@Test
public void timeoutCanBeBoundWithCallback() {
StepVerifier.withVirtualTime(this::scenario_timeoutCanBeBoundWithCallback)
.thenAwait(Duration.ofMillis(500))
.expectNext(-5)
.verifyComplete();
}
Flux<?> scenario_timeoutThrown() {
return Flux.never()
.timeout(Duration.ofMillis(500));
}
@Test
public void fluxPropagatesErrorUsingAwait() {
StepVerifier.withVirtualTime(this::scenario_timeoutThrown)
.thenAwait(Duration.ofMillis(500))
.verifyError(TimeoutException.class);
}
Flux<Integer> scenario_timeoutCanBeBoundWithCallback2() {
return Flux.<Integer>never().timeout(Duration.ofMillis(500), Flux.just(-5));
}
@Test
public void timeoutCanBeBoundWithCallback2() {
StepVerifier.withVirtualTime(this::scenario_timeoutCanBeBoundWithCallback2)
.thenAwait(Duration.ofMillis(500))
.expectNext(-5)
.verifyComplete();
}
Flux<?> scenario_timeoutThrown2() {
return Flux.never()
.timeout(Duration.ofMillis(500));
}
@Test
public void fluxPropagatesErrorUsingAwait2() {
StepVerifier.withVirtualTime(this::scenario_timeoutThrown2)
.thenAwait(Duration.ofMillis(500))
.verifyError(TimeoutException.class);
}
Flux<?> scenario_timeoutThrown3() {
return Flux.never()
.timeout(Duration.ofMillis(500), Schedulers.parallel());
}
@Test
public void fluxPropagatesErrorUsingAwait3() {
StepVerifier.withVirtualTime(this::scenario_timeoutThrown3)
.thenAwait(Duration.ofMillis(500))
.verifyError(TimeoutException.class);
}
@Test
public void fluxTimeoutOther() {
StepVerifier.create(Flux.never().timeout(Flux.just(1)))
.thenAwait(Duration.ofMillis(500))
.verifyError(TimeoutException.class);
}
//see https://github.com/reactor/reactor-core/issues/744
@Test
public void timeoutDropWhenNoCancelWithoutFallback() {
for (int i = 0; i < 50; i++) {
StepVerifier.withVirtualTime(
() -> Flux.just("cat")
.delaySubscription(Duration.ofMillis(3))
// We cancel on another scheduler that won't do anything to force it to act like
// the event is already in flight
.cancelOn(Schedulers.fromExecutor(r -> {}))
.timeout(Duration.ofMillis(2))
)
.thenAwait(Duration.ofSeconds(5))
.expectError(TimeoutException.class)
.verify();
}
}
//see https://github.com/reactor/reactor-core/issues/744
@Test
public void timeoutDropWhenNoCancelWithFallback() {
for (int i = 0; i < 50; i++) {
StepVerifier.withVirtualTime(
() -> Flux.just("cat")
.delaySubscription(Duration.ofMillis(3))
// We cancel on another scheduler that won't do anything to force it to act like
// the event is already in flight
.cancelOn(Schedulers.fromExecutor(r -> {}))
.timeout(Duration.ofMillis(2), Flux.just("dog").delayElements(Duration.ofMillis(5)))
)
.thenAwait(Duration.ofSeconds(5))
.expectNext("dog")
.expectComplete()
.verify();
}
}
@Test
public void timeoutDurationMessageDefault() {
StepVerifier.withVirtualTime(() -> Flux.never()
.timeout(Duration.ofHours(1)))
.thenAwait(Duration.ofHours(2))
.expectErrorMessage("Did not observe any item or terminal signal within " +
"3600000ms in 'source(FluxNever)' (and no fallback has been configured)")
.verify();
}
@Test
public void timeoutDurationMessageWithName() {
StepVerifier.withVirtualTime(() -> Flux.never()
.name("Name")
.timeout(Duration.ofHours(1)))
.thenAwait(Duration.ofHours(2))
.expectErrorMessage("Did not observe any item or terminal signal within " +
"3600000ms in 'Name' (and no fallback has been configured)")
.verify();
}
@Test
public void timeoutNotDurationMessageFirstTimeout() {
StepVerifier.create(Flux.never()
.timeout(Mono.just("immediate")))
.expectErrorMessage("Did not observe any item or terminal signal within " +
"first signal from a Publisher in 'source(FluxNever)' (and no fallback has been configured)")
.verify();
}
@Test
public void timeoutNotDurationMessageSecondTimeout() {
AtomicBoolean generatorUsed = new AtomicBoolean();
StepVerifier.create(Flux.concat(Mono.just("foo"), Mono.just("bar").delayElement(Duration.ofMillis(500)))
.timeout(Mono.delay(Duration.ofMillis(100)),
v -> {
generatorUsed.set(true);
return Mono.delay(Duration.ofMillis(100));
}))
.expectNext("foo")
.expectErrorMessage("Did not observe any item or terminal signal within " +
"first signal from a Publisher in 'source(FluxConcatArray)' (and no fallback has been configured)")
.verify();
assertThat(generatorUsed.get()).as("generator used").isTrue();
}
@Test
public void onSubscribeRace() {
for (int i = 0; i < 10_000; i++) {
Flux.just("Hello")
.concatMap(v -> Mono.delay(Duration.ofSeconds(10)))
.timeout(Duration.ofMillis(i % 100 == 0 ? 1 : 0), Mono.just(123L))
.collectList()
.as(StepVerifier::create)
.expectNextMatches(it -> it.get(0).equals(123L))
.expectComplete()
.verify(Duration.ofSeconds(1));
}
}
@Test
public void scanOperator(){
Flux<Integer> parent = Flux.just(1);
FluxTimeout<Integer, Integer, ?> test = new FluxTimeout<>(parent, Flux.just(2), v -> Flux.empty(), "desc");
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
@Test
public void scanMainSubscriber(){
CoreSubscriber<String> actual = new LambdaSubscriber<>(null, e -> {}, null, s -> s.request(1));
FluxTimeout.TimeoutMainSubscriber<String, Integer> test = new FluxTimeout.TimeoutMainSubscriber<>(actual, Flux.empty(), v -> Flux.just(2), Flux.empty(), "desc");
Subscription subscription = Operators.emptySubscription();
test.onSubscribe(subscription);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(subscription);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(1L);
test.request(2);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(1L + 2L);
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
test.cancel();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isTrue();
}
@Test
public void scanOtherSubscriber(){
CoreSubscriber<String> actual = new LambdaSubscriber<>(null, e -> {}, null, null);
FluxTimeout.TimeoutMainSubscriber<String, Integer> main = new FluxTimeout.TimeoutMainSubscriber<>(actual, Flux.empty(), v -> Flux.just(2), Flux.empty(), "desc");
FluxTimeout.TimeoutOtherSubscriber<String> test = new FluxTimeout.TimeoutOtherSubscriber<>(actual, main);
Subscription subscription = Operators.emptySubscription();
test.onSubscribe(subscription);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isNull();
}
@Test
public void scanTimeoutSubscriber(){
CoreSubscriber<String> actual = new LambdaSubscriber<>(null, e -> {}, null, null);
FluxTimeout.TimeoutMainSubscriber<String, Integer> main = new FluxTimeout.TimeoutMainSubscriber<>(actual, Flux.empty(), v -> Flux.just(2), Flux.empty(), "desc");
FluxTimeout.TimeoutTimeoutSubscriber test = new FluxTimeout.TimeoutTimeoutSubscriber(main, 2);
Subscription subscription = Operators.emptySubscription();
test.onSubscribe(subscription);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isNull();
}
}
| FluxTimeoutTest |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManager.java | {
"start": 1826,
"end": 6327
} | class ____ extends RebalanceMetricsManager {
private final Sensor successfulRebalanceSensor;
private final Sensor failedRebalanceSensor;
public final MetricName rebalanceLatencyAvg;
public final MetricName rebalanceLatencyMax;
public final MetricName rebalanceLatencyTotal;
public final MetricName rebalanceTotal;
public final MetricName rebalanceRatePerHour;
public final MetricName lastRebalanceSecondsAgo;
public final MetricName failedRebalanceTotal;
public final MetricName failedRebalanceRate;
public final MetricName assignedPartitionsCount;
private long lastRebalanceEndMs = -1L;
private long lastRebalanceStartMs = -1L;
private final Metrics metrics;
public ConsumerRebalanceMetricsManager(Metrics metrics, SubscriptionState subscriptions) {
super(CONSUMER_METRIC_GROUP_PREFIX + COORDINATOR_METRICS_SUFFIX);
this.metrics = metrics;
rebalanceLatencyAvg = createMetric(metrics, "rebalance-latency-avg",
"The average time in ms taken for a group to complete a rebalance");
rebalanceLatencyMax = createMetric(metrics, "rebalance-latency-max",
"The max time in ms taken for a group to complete a rebalance");
rebalanceLatencyTotal = createMetric(metrics, "rebalance-latency-total",
"The total number of milliseconds spent in rebalances");
rebalanceTotal = createMetric(metrics, "rebalance-total",
"The total number of rebalance events");
rebalanceRatePerHour = createMetric(metrics, "rebalance-rate-per-hour",
"The number of rebalance events per hour");
failedRebalanceTotal = createMetric(metrics, "failed-rebalance-total",
"The total number of failed rebalance events");
failedRebalanceRate = createMetric(metrics, "failed-rebalance-rate-per-hour",
"The number of failed rebalance events per hour");
assignedPartitionsCount = createMetric(metrics, "assigned-partitions",
"The number of partitions currently assigned to this consumer");
registerAssignedPartitionCount(subscriptions);
successfulRebalanceSensor = metrics.sensor("rebalance-latency");
successfulRebalanceSensor.add(rebalanceLatencyAvg, new Avg());
successfulRebalanceSensor.add(rebalanceLatencyMax, new Max());
successfulRebalanceSensor.add(rebalanceLatencyTotal, new CumulativeSum());
successfulRebalanceSensor.add(rebalanceTotal, new CumulativeCount());
successfulRebalanceSensor.add(rebalanceRatePerHour, new Rate(TimeUnit.HOURS, new WindowedCount(), 1));
failedRebalanceSensor = metrics.sensor("failed-rebalance");
failedRebalanceSensor.add(failedRebalanceTotal, new CumulativeSum());
failedRebalanceSensor.add(failedRebalanceRate, new Rate(TimeUnit.HOURS, new WindowedCount(), 1));
Measurable lastRebalance = (config, now) -> {
if (lastRebalanceEndMs == -1L)
return -1d;
else
return TimeUnit.SECONDS.convert(now - lastRebalanceEndMs, TimeUnit.MILLISECONDS);
};
lastRebalanceSecondsAgo = createMetric(metrics,
"last-rebalance-seconds-ago",
"The number of seconds since the last rebalance event");
metrics.addMetric(lastRebalanceSecondsAgo, lastRebalance);
}
public void recordRebalanceStarted(long nowMs) {
lastRebalanceStartMs = nowMs;
}
public void recordRebalanceEnded(long nowMs) {
lastRebalanceEndMs = nowMs;
successfulRebalanceSensor.record(nowMs - lastRebalanceStartMs);
}
public void maybeRecordRebalanceFailed() {
if (lastRebalanceStartMs <= lastRebalanceEndMs)
return;
failedRebalanceSensor.record();
}
public boolean rebalanceStarted() {
return lastRebalanceStartMs > lastRebalanceEndMs;
}
/**
* Register metric to track the number of assigned partitions.
* It will consider partitions assigned to the consumer
* regardless of whether they were assigned via {@link KafkaConsumer#subscribe(Pattern)} or
* {@link KafkaConsumer#assign(Collection)}
*/
private void registerAssignedPartitionCount(SubscriptionState subscriptions) {
Measurable numParts = (config, now) -> subscriptions.numAssignedPartitions();
metrics.addMetric(assignedPartitionsCount, numParts);
}
} | ConsumerRebalanceMetricsManager |
java | google__jimfs | jimfs/src/test/java/com/google/common/jimfs/FakeFileTimeSource.java | {
"start": 873,
"end": 1601
} | class ____ implements FileTimeSource {
private final Random random = new Random(System.currentTimeMillis());
private Instant now;
FakeFileTimeSource() {
randomize();
}
@CanIgnoreReturnValue
FakeFileTimeSource randomize() {
now =
Instant.ofEpochSecond(
random
.longs(Instant.MIN.getEpochSecond(), Instant.MAX.getEpochSecond())
.findAny()
.getAsLong(),
random.nextInt(1_000_000_000));
return this;
}
@CanIgnoreReturnValue
FakeFileTimeSource advance(Duration duration) {
this.now = now.plus(duration);
return this;
}
@Override
public FileTime now() {
return FileTime.from(now);
}
}
| FakeFileTimeSource |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/ai/model/mcp/McpTool.java | {
"start": 740,
"end": 1400
} | class ____ {
private String name;
private String description;
private Map<String, Object> inputSchema;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Map<String, Object> getInputSchema() {
return inputSchema;
}
public void setInputSchema(Map<String, Object> inputSchema) {
this.inputSchema = inputSchema;
}
}
| McpTool |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/Spr12278Tests.java | {
"start": 2002,
"end": 2112
} | class ____ {
@Bean
public String autowiredName() {
return "foo";
}
}
private static | BaseConfiguration |
java | google__dagger | javatests/dagger/internal/codegen/DelegateRequestRepresentationTest.java | {
"start": 9578,
"end": 9952
} | class ____ {",
" @Inject UsesSupertype(Supertype supertype) {}",
"}");
Source module =
CompilerTests.javaSource(
"other.SupertypeModule",
"package other;",
"",
"import dagger.Binds;",
"import dagger.Module;",
"",
"@Module",
"public | UsesSupertype |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authorization/method/AuthorizationMethodPointcuts.java | {
"start": 1302,
"end": 2283
} | class ____ {
static Pointcut forAllAnnotations() {
return forAnnotations(PreFilter.class, PreAuthorize.class, PostFilter.class, PostAuthorize.class);
}
@SafeVarargs
static Pointcut forAnnotations(Class<? extends Annotation>... annotations) {
ComposablePointcut pointcut = null;
for (Class<? extends Annotation> annotation : annotations) {
if (pointcut == null) {
pointcut = new ComposablePointcut(classOrMethod(annotation));
}
else {
pointcut.union(classOrMethod(annotation));
}
}
if (pointcut == null) {
throw new IllegalStateException(
"Unable to find a pointcut for annotations " + Arrays.toString(annotations));
}
return pointcut;
}
private static Pointcut classOrMethod(Class<? extends Annotation> annotation) {
return Pointcuts.union(new AnnotationMatchingPointcut(null, annotation, true),
new AnnotationMatchingPointcut(annotation, true));
}
private AuthorizationMethodPointcuts() {
}
}
| AuthorizationMethodPointcuts |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/alterTable/MySqlAlterTableTest52_primary_prefix_key.java | {
"start": 431,
"end": 923
} | class ____ extends TestCase {
public void test_0() {
String sql = "alter table test001 add primary key (b (4) asc, c (8) desc);";
MySqlStatementParser parser = new MySqlStatementParser(sql);
SQLStatement stmt = parser.parseStatementList().get(0);
parser.match(Token.EOF);
assertEquals("ALTER TABLE test001\n" +
"\tADD PRIMARY KEY (b(4) ASC, c(8) DESC);", SQLUtils.toMySqlString(stmt));
}
}
| MySqlAlterTableTest52_primary_prefix_key |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/Cache.java | {
"start": 3584,
"end": 3736
} | interface ____ any isolation or transactional
* semantics associated with the underlying caches. In particular, eviction via
* the methods of this | respect |
java | google__dagger | javatests/dagger/internal/codegen/ComponentValidationTest.java | {
"start": 17016,
"end": 17343
} | class ____ {",
" @Provides int i() { return 1; }",
"}");
Source subclassedModule =
CompilerTests.javaSource(
"test.SubclassedModule",
"package test;",
"",
"import dagger.Module;",
"",
"@Module",
" | AbstractModule |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/BeanClassLoaderAware.java | {
"start": 779,
"end": 1036
} | class ____ used by the
* present bean factory to load bean classes.
*
* <p>This is mainly intended to be implemented by framework classes which
* have to pick up application classes by name despite themselves potentially
* being loaded from a shared | loader |
java | quarkusio__quarkus | core/runtime/src/test/java/io/quarkus/runtime/util/ExceptionUtilTest.java | {
"start": 494,
"end": 3391
} | class ____ {
/**
* Tests the {@link ExceptionUtil#rootCauseFirstStackTrace(Throwable)} method
*
* @throws Exception
*/
@Test
public void testReversed() throws Exception {
final Throwable ex = generateException();
final String rootCauseFirst = ExceptionUtil.rootCauseFirstStackTrace(ex);
assertNotNull(rootCauseFirst, "Stacktrace was null");
assertTrue(rootCauseFirst.contains("Resulted in:"),
"Stacktrace doesn't contain the \"Resulted in:\" string");
assertFalse(rootCauseFirst.contains("Caused by:"), "Stacktrace contains the \"Caused by:\" string");
final String[] lines = rootCauseFirst.split("\n");
final String firstLine = lines[0];
assertTrue(firstLine.startsWith(NumberFormatException.class.getName() + ": For input string: \"23.23232\""),
"Unexpected root cause");
final List<String> expectedResultedIns = new ArrayList<>();
expectedResultedIns.add(IllegalArgumentException.class.getName() + ": Incorrect param");
expectedResultedIns.add(IOException.class.getName() + ": Request processing failed");
expectedResultedIns.add(IOError.class.getName());
expectedResultedIns.add(RuntimeException.class.getName() + ": Unexpected exception");
for (final String line : lines) {
if (!line.startsWith("Resulted in:")) {
continue;
}
final String expected = expectedResultedIns.remove(0);
assertTrue(line.startsWith("Resulted in: " + expected), "Unexpected stacktrace element '" + line + "'");
}
assertTrue(expectedResultedIns.isEmpty(), "Reversed stacktrace is missing certain elements");
}
@Test
public void testGetRootCause() {
Throwable e = generateException();
Throwable rootCause = ExceptionUtil.getRootCause(e);
assertEquals(NumberFormatException.class, rootCause.getClass());
assertNull(ExceptionUtil.getRootCause(null));
assertEquals(NullPointerException.class, ExceptionUtil.getRootCause(new NullPointerException()).getClass());
}
private Throwable generateException() {
try {
try {
Integer.parseInt("23.23232");
} catch (NumberFormatException nfe) {
throw new IllegalArgumentException("Incorrect param", nfe);
}
} catch (IllegalArgumentException iae) {
try {
throw new IOException("Request processing failed", iae);
} catch (IOException e) {
try {
throw new IOError(e);
} catch (IOError ie) {
return new RuntimeException("Unexpected exception", ie);
}
}
}
throw new RuntimeException("Should not reach here");
}
}
| ExceptionUtilTest |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MinAgg.java | {
"start": 500,
"end": 762
} | class ____ extends DefaultAggSourceLeafAgg {
public MinAgg(String id, AggSource source) {
super(id, source);
}
@Override
Function<String, ValuesSourceAggregationBuilder<?>> builder() {
return AggregationBuilders::min;
}
}
| MinAgg |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/ApiEndpoint.java | {
"start": 919,
"end": 963
} | interface ____ extends Endpoint {
}
| ApiEndpoint |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/internal/entities/mapper/relation/AbstractToOneMapper.java | {
"start": 1062,
"end": 4216
} | class ____ extends AbstractPropertyMapper {
private final ServiceRegistry serviceRegistry;
private final PropertyData propertyData;
protected AbstractToOneMapper(ServiceRegistry serviceRegistry, PropertyData propertyData) {
this.serviceRegistry = serviceRegistry;
this.propertyData = propertyData;
}
@Override
public boolean mapToMapFromEntity(
SharedSessionContractImplementor session,
Map<String, Object> data,
Object newObj,
Object oldObj) {
return false;
}
@Override
public void mapToEntityFromMap(
EnversService enversService,
Object obj,
Map data,
Object primaryKey,
AuditReaderImplementor versionsReader,
Number revision) {
if ( obj != null ) {
nullSafeMapToEntityFromMap( enversService, obj, data, primaryKey, versionsReader, revision );
}
}
@Override
public Object mapToEntityFromMap(
EnversService enversService,
Map data,
Object primaryKey,
AuditReaderImplementor versionsReader,
Number revision) {
return nullSafeMapToEntityFromMap( enversService, data, primaryKey, versionsReader, revision );
}
@Override
public List<PersistentCollectionChangeData> mapCollectionChanges(
SharedSessionContractImplementor session,
String referencingPropertyName,
PersistentCollection newColl,
Serializable oldColl,
Object id) {
return null;
}
/**
* @param enversService The EnversService
* @param entityName Entity name.
*
* @return Entity class, name and information whether it is audited or not.
*/
protected EntityInfo getEntityInfo(EnversService enversService, String entityName) {
EntityConfiguration entCfg = enversService.getEntitiesConfigurations().get( entityName );
boolean isRelationAudited = true;
if ( entCfg == null ) {
// a relation marked as RelationTargetAuditMode.NOT_AUDITED
entCfg = enversService.getEntitiesConfigurations().getNotVersionEntityConfiguration( entityName );
isRelationAudited = false;
}
final Class entityClass = ReflectionTools.loadClass( entCfg.getEntityClassName(), enversService.getClassLoaderService() );
return new EntityInfo( entityClass, entityName, isRelationAudited );
}
protected void setPropertyValue(Object targetObject, Object value) {
if ( isDynamicComponentMap() ) {
@SuppressWarnings("unchecked")
final Map<String, Object> map = (Map<String, Object>) targetObject;
map.put( propertyData.getBeanName(), value );
}
else {
setValueOnObject( propertyData, targetObject, value, serviceRegistry );
}
}
/**
* @return Bean property that represents the relation.
*/
protected PropertyData getPropertyData() {
return propertyData;
}
/**
* Parameter {@code obj} is never {@code null}.
*/
public abstract void nullSafeMapToEntityFromMap(
EnversService enversService,
Object obj,
Map data,
Object primaryKey,
AuditReaderImplementor versionsReader,
Number revision);
public abstract Object nullSafeMapToEntityFromMap(
EnversService enversService,
Map data,
Object primaryKey,
AuditReaderImplementor versionsReader,
Number revision);
/**
* Simple descriptor of an entity.
*/
protected static | AbstractToOneMapper |
java | google__dagger | javatests/dagger/functional/componentdependency/BoxedPrimitives.java | {
"start": 881,
"end": 995
} | interface ____ {
Provider<Integer> providerOfBoxedPrimitive();
}
@Module
| ConsumesPrimitiveThroughDependency |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java | {
"start": 19411,
"end": 20801
} | class ____ extends FsCommand {
static int processed = 0;
InterruptCommand() {
processed = 0;
setRecursive(true);
}
@Override
protected void processPath(PathData item) throws IOException {
System.out.println("processing: "+item);
processed++;
if (item.stat.isFile()) {
System.out.println("throw interrupt");
throw new InterruptedIOException();
}
}
}
/**
* Asserts that for the given command, the given arguments are considered
* invalid. The expectation is that the command will throw
* IllegalArgumentException.
*
* @param cmd FsCommand to check
* @param args String... arguments to check
*/
private static void assertIllegalArguments(FsCommand cmd, String... args) {
try {
cmd.run(args);
fail("Expected IllegalArgumentException from args: " +
Arrays.toString(args));
} catch (IllegalArgumentException e) {
}
}
/**
* Asserts that for the given command, the given arguments are considered valid
* on Windows, but invalid elsewhere.
*
* @param cmd FsCommand to check
* @param args String... arguments to check
*/
private static void assertValidArgumentsOnWindows(FsCommand cmd,
String... args) {
if (Shell.WINDOWS) {
cmd.run(args);
} else {
assertIllegalArguments(cmd, args);
}
}
}
| InterruptCommand |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java | {
"start": 47321,
"end": 51598
} | class ____ in the collection.
* @param data The array of elements to create the data stream from.
* @param <OUT> The type of the returned data stream
* @return The data stream representing the given array of elements
* @deprecated This method will be removed a future release, possibly as early as version 2.0.
* Use {@link #fromData(OUT...)} instead.
*/
@SafeVarargs
@Deprecated
public final <OUT> DataStreamSource<OUT> fromElements(Class<OUT> type, OUT... data) {
if (data.length == 0) {
throw new IllegalArgumentException(
"fromElements needs at least one element as argument");
}
TypeInformation<OUT> typeInfo;
try {
typeInfo = TypeExtractor.getForClass(type);
} catch (Exception e) {
throw new RuntimeException(
"Could not create TypeInformation for type "
+ type.getName()
+ "; please specify the TypeInformation manually via "
+ "StreamExecutionEnvironment#fromElements(Collection, TypeInformation)",
e);
}
return fromCollection(Arrays.asList(data), typeInfo);
}
/**
* Creates a data stream from the given non-empty collection. The type of the data stream is
* that of the elements in the collection.
*
* <p>The framework will try and determine the exact type from the collection elements. In case
* of generic elements, it may be necessary to manually supply the type information via {@link
* #fromCollection(java.util.Collection, org.apache.flink.api.common.typeinfo.TypeInformation)}.
*
* <p>Note that this operation will result in a non-parallel data stream source, i.e. a data
* stream source with parallelism one.
*
* @param data The collection of elements to create the data stream from.
* @param <OUT> The generic type of the returned data stream.
* @return The data stream representing the given collection
* @deprecated This method will be removed a future release, possibly as early as version 2.0.
* Use {@link #fromData(Collection)} instead.
*/
public <OUT> DataStreamSource<OUT> fromCollection(Collection<OUT> data) {
TypeInformation<OUT> typeInfo = extractTypeInfoFromCollection(data);
return fromCollection(data, typeInfo);
}
/**
* Creates a data stream from the given non-empty collection.
*
* <p>Note that this operation will result in a non-parallel data stream source, i.e., a data
* stream source with parallelism one.
*
* @param data The collection of elements to create the data stream from
* @param typeInfo The TypeInformation for the produced data stream
* @param <OUT> The type of the returned data stream
* @return The data stream representing the given collection
* @deprecated This method will be removed a future release, possibly as early as version 2.0.
* Use {@link #fromData(Collection, TypeInformation)} instead.
*/
public <OUT> DataStreamSource<OUT> fromCollection(
Collection<OUT> data, TypeInformation<OUT> typeInfo) {
Preconditions.checkNotNull(data, "Collection must not be null");
// must not have null elements and mixed elements
FromElementsFunction.checkCollection(data, typeInfo.getTypeClass());
SourceFunction<OUT> function = new FromElementsFunction<>(data);
return addSource(function, "Collection Source", typeInfo, Boundedness.BOUNDED)
.setParallelism(1);
}
/**
* Creates a data stream from the given iterator.
*
* <p>Because the iterator will remain unmodified until the actual execution happens, the type
* of data returned by the iterator must be given explicitly in the form of the type class (this
* is due to the fact that the Java compiler erases the generic type information).
*
* <p>Note that this operation will result in a non-parallel data stream source, i.e., a data
* stream source with a parallelism of one.
*
* @param data The iterator of elements to create the data stream from
* @param type The | type |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java | {
"start": 85989,
"end": 87578
} | class ____ extends
BaseTransition {
@Override
public void
transition(RMAppAttemptImpl appAttempt, RMAppAttemptEvent event) {
RMAppAttemptContainerFinishedEvent containerFinishedEvent =
(RMAppAttemptContainerFinishedEvent) event;
ContainerStatus containerStatus =
containerFinishedEvent.getContainerStatus();
// If this is the AM container, it means the AM container is finished,
// but we are not yet acknowledged that the final state has been saved.
// Thus, we still return FINAL_SAVING state here.
if (appAttempt.masterContainer.getId().equals(
containerStatus.getContainerId())) {
appAttempt.amContainerFinished(appAttempt, containerFinishedEvent);
if (appAttempt.targetedFinalState.equals(RMAppAttemptState.FAILED)
|| appAttempt.targetedFinalState.equals(RMAppAttemptState.KILLED)) {
// ignore Container_Finished Event if we were supposed to reach
// FAILED/KILLED state.
return;
}
// pass in the earlier AMUnregistered Event also, as this is needed for
// AMFinishedAfterFinalSavingTransition later on
appAttempt.rememberTargetTransitions(event,
new AMFinishedAfterFinalSavingTransition(
appAttempt.eventCausingFinalSaving), RMAppAttemptState.FINISHED);
return;
}
// Add all finished containers so that they can be acked to NM.
addJustFinishedContainer(appAttempt, containerFinishedEvent);
}
}
private static | ContainerFinishedAtFinalSavingTransition |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1000/Issue1085.java | {
"start": 263,
"end": 506
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
Model model = (Model) JSON.parseObject("{\"id\":123}", AbstractModel.class);
assertEquals(123, model.id);
}
public static abstract | Issue1085 |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/builder/ignore/BuilderIgnoringMapper.java | {
"start": 464,
"end": 896
} | interface ____ {
BuilderIgnoringMapper INSTANCE = Mappers.getMapper( BuilderIgnoringMapper.class );
@InheritConfiguration(name = "mapBase")
@Mapping( target = "lastName" )
Person mapWithIgnoringBase(PersonDto source);
@BeanMapping(ignoreByDefault = true)
@Mapping(target = "name", source = "name")
Person mapOnlyWithExplicit(PersonDto source);
Person mapAll(PersonDto source);
}
| BuilderIgnoringMapper |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/config/AbstractInterfaceConfig.java | {
"start": 13409,
"end": 32185
} | interface ____ dubbo 3.2
* rest and triple protocol allow the service is implement class
*/
protected void checkInterface() {}
protected boolean verifyMethodConfig(
MethodConfig methodConfig, Class<?> interfaceClass, boolean ignoreInvalidMethodConfig) {
String methodName = methodConfig.getName();
if (StringUtils.isEmpty(methodName)) {
String msg = "<dubbo:method> name attribute is required! Please check: " + "<dubbo:service interface=\""
+ interfaceName + "\" ... >" + "<dubbo:method name=\"\" ... /></<dubbo:reference>";
if (ignoreInvalidMethodConfig) {
logger.warn(CONFIG_NO_METHOD_FOUND, "", "", msg);
return false;
} else {
throw new IllegalStateException(msg);
}
}
boolean hasMethod = Arrays.stream(interfaceClass.getMethods())
.anyMatch(method -> method.getName().equals(methodName));
if (!hasMethod) {
String msg = "Found invalid method config, the interface " + interfaceClass.getName()
+ " not found method \"" + methodName + "\" : [" + methodConfig + "]";
if (ignoreInvalidMethodConfig) {
logger.warn(CONFIG_NO_METHOD_FOUND, "", "", msg);
return false;
} else {
if (!isNeedCheckMethod()) {
msg = "Generic call: " + msg;
logger.warn(CONFIG_NO_METHOD_FOUND, "", "", msg);
} else {
throw new IllegalStateException(msg);
}
}
}
return true;
}
private ArgumentConfig getArgumentByIndex(MethodConfig methodConfig, int argIndex) {
if (methodConfig.getArguments() != null && methodConfig.getArguments().size() > 0) {
for (ArgumentConfig argument : methodConfig.getArguments()) {
if (argument.getIndex() != null && argument.getIndex() == argIndex) {
return argument;
}
}
}
return null;
}
@Transient
protected boolean isNeedCheckMethod() {
return true;
}
private boolean hasArgumentConfigProps(Map<String, String> configProperties, String methodName, int argIndex) {
String argPrefix = methodName + "." + argIndex + ".";
return ConfigurationUtils.hasSubProperties(configProperties, argPrefix);
}
protected MethodConfig getMethodByName(String name) {
if (methods != null && methods.size() > 0) {
for (MethodConfig methodConfig : methods) {
if (StringUtils.isEquals(methodConfig.getName(), name)) {
return methodConfig;
}
}
}
return null;
}
/**
* Legitimacy check of stub, note that: the local will deprecated, and replace with <code>stub</code>
*
* @param interfaceClass for provider side, it is the {@link Class} of the service that will be exported; for consumer
* side, it is the {@link Class} of the remote service interface
*/
protected void checkStubAndLocal(Class<?> interfaceClass) {
verifyStubAndLocal(local, "Local", interfaceClass);
verifyStubAndLocal(stub, "Stub", interfaceClass);
}
private void verifyStubAndLocal(String className, String label, Class<?> interfaceClass) {
if (ConfigUtils.isNotEmpty(className)) {
Class<?> localClass = ConfigUtils.isDefault(className)
? ReflectUtils.forName(interfaceClass.getName() + label)
: ReflectUtils.forName(className);
verify(interfaceClass, localClass);
}
}
private void verify(Class<?> interfaceClass, Class<?> localClass) {
if (!interfaceClass.isAssignableFrom(localClass)) {
throw new IllegalStateException("The local implementation class " + localClass.getName()
+ " not implement interface " + interfaceClass.getName());
}
try {
// Check if the localClass a constructor with parameter whose type is interfaceClass
ReflectUtils.findConstructor(localClass, interfaceClass);
} catch (NoSuchMethodException e) {
throw new IllegalStateException("No such constructor \"public " + localClass.getSimpleName() + "("
+ interfaceClass.getName() + ")\" in local implementation class " + localClass.getName());
}
}
private void convertRegistryIdsToRegistries() {
computeValidRegistryIds();
if (StringUtils.isEmpty(registryIds)) {
if (CollectionUtils.isEmpty(registries)) {
List<RegistryConfig> registryConfigs = getConfigManager().getDefaultRegistries();
registryConfigs = new ArrayList<>(registryConfigs);
setRegistries(registryConfigs);
}
} else {
String[] ids = COMMA_SPLIT_PATTERN.split(registryIds);
List<RegistryConfig> tmpRegistries = new ArrayList<>();
Arrays.stream(ids).forEach(id -> {
if (tmpRegistries.stream().noneMatch(reg -> reg.getId().equals(id))) {
Optional<RegistryConfig> globalRegistry = getConfigManager().getRegistry(id);
if (globalRegistry.isPresent()) {
tmpRegistries.add(globalRegistry.get());
} else {
throw new IllegalStateException("Registry not found: " + id);
}
}
});
setRegistries(tmpRegistries);
}
}
protected boolean notHasSelfRegistryProperty() {
return CollectionUtils.isEmpty(registries) && StringUtils.isEmpty(registryIds);
}
protected void completeCompoundConfigs(AbstractInterfaceConfig interfaceConfig) {
if (interfaceConfig != null) {
if (application == null) {
setApplication(interfaceConfig.getApplication());
}
if (module == null) {
setModule(interfaceConfig.getModule());
}
if (notHasSelfRegistryProperty()) {
setRegistries(interfaceConfig.getRegistries());
setRegistryIds(interfaceConfig.getRegistryIds());
}
if (monitor == null) {
setMonitor(interfaceConfig.getMonitor());
}
}
if (module != null) {
if (notHasSelfRegistryProperty()) {
setRegistries(module.getRegistries());
}
if (monitor == null) {
setMonitor(module.getMonitor());
}
}
if (application != null) {
if (notHasSelfRegistryProperty()) {
setRegistries(application.getRegistries());
setRegistryIds(application.getRegistryIds());
}
if (monitor == null) {
setMonitor(application.getMonitor());
}
}
}
protected void computeValidRegistryIds() {
if (application != null && notHasSelfRegistryProperty()) {
setRegistries(application.getRegistries());
setRegistryIds(application.getRegistryIds());
}
}
/**
* @return local
* @deprecated Replace to <code>getStub()</code>
*/
@Deprecated
public String getLocal() {
return local;
}
/**
* @param local
* @deprecated Replace to <code>setStub(Boolean)</code>
*/
@Deprecated
public void setLocal(Boolean local) {
if (local == null) {
setLocal((String) null);
} else {
setLocal(local.toString());
}
}
/**
* @param local
* @deprecated Replace to <code>setStub(String)</code>
*/
@Deprecated
public void setLocal(String local) {
this.local = local;
}
public String getStub() {
return stub;
}
public void setStub(Boolean stub) {
if (stub == null) {
setStub((String) null);
} else {
setStub(stub.toString());
}
}
public void setStub(String stub) {
this.stub = stub;
}
public String getCluster() {
return cluster;
}
public void setCluster(String cluster) {
this.cluster = cluster;
}
public String getProxy() {
if (NativeDetector.inNativeImage()) {
return DEFAULT_NATIVE_PROXY;
} else {
return this.proxy;
}
}
public void setProxy(String proxy) {
if (NativeDetector.inNativeImage()) {
this.proxy = DEFAULT_NATIVE_PROXY;
AdaptiveCompiler.setDefaultCompiler(DEFAULT_NATIVE_PROXY);
} else {
this.proxy = proxy;
}
}
public Integer getConnections() {
return connections;
}
public void setConnections(Integer connections) {
this.connections = connections;
}
@Parameter(key = REFERENCE_FILTER_KEY, append = true)
public String getFilter() {
return filter;
}
public void setFilter(String filter) {
this.filter = filter;
}
@Parameter(key = INVOKER_LISTENER_KEY, append = true)
public String getListener() {
return listener;
}
public void setListener(String listener) {
this.listener = listener;
}
public String getLayer() {
return layer;
}
public void setLayer(String layer) {
this.layer = layer;
}
/**
* Always use the global ApplicationConfig
*/
public ApplicationConfig getApplication() {
if (application != null) {
return application;
}
return getConfigManager().getApplicationOrElseThrow();
}
/**
* @param application
* @deprecated Use {@link org.apache.dubbo.config.AbstractConfig#setScopeModel(ScopeModel)}
*/
@Deprecated
public void setApplication(ApplicationConfig application) {
this.application = application;
if (application != null) {
getConfigManager().setApplication(application);
}
}
public ModuleConfig getModule() {
if (module != null) {
return module;
}
return getModuleConfigManager().getModule().orElse(null);
}
/**
* @param module
* @deprecated Use {@link org.apache.dubbo.config.AbstractConfig#setScopeModel(ScopeModel)}
*/
@Deprecated
public void setModule(ModuleConfig module) {
this.module = module;
if (module != null) {
getModuleConfigManager().setModule(module);
}
}
public RegistryConfig getRegistry() {
return CollectionUtils.isEmpty(registries) ? null : registries.get(0);
}
public void setRegistry(RegistryConfig registry) {
if (registry != null) {
List<RegistryConfig> registries = new ArrayList<>(1);
registries.add(registry);
setRegistries(registries);
} else {
this.registries = null;
}
}
public List<RegistryConfig> getRegistries() {
return registries;
}
@SuppressWarnings({"unchecked"})
public void setRegistries(List<? extends RegistryConfig> registries) {
this.registries = (List<RegistryConfig>) registries;
}
@Parameter(excluded = true)
public String getRegistryIds() {
return registryIds;
}
public void setRegistryIds(String registryIds) {
this.registryIds = registryIds;
}
public List<MethodConfig> getMethods() {
return methods;
}
public void setMethods(List<? extends MethodConfig> methods) {
this.methods = (methods != null) ? new ArrayList<>(methods) : null;
}
/**
* It is only used in native scenarios to get methodConfigs.
* @param methodsJson
*/
public void setMethodsJson(List<String> methodsJson) {
if (methodsJson != null) {
this.methods = new ArrayList<>();
methodsJson.forEach(
(methodConfigJson) -> methods.add(JsonUtils.toJavaObject(methodConfigJson, MethodConfig.class)));
} else {
this.methods = null;
}
}
public void addMethod(MethodConfig methodConfig) {
if (this.methods == null) {
this.methods = new ArrayList<>();
}
this.methods.add(methodConfig);
}
public MonitorConfig getMonitor() {
if (monitor != null) {
return monitor;
}
// FIXME: instead of return null, we should set default monitor when getMonitor() return null in ConfigManager
return getConfigManager().getMonitor().orElse(null);
}
/**
* @deprecated Use {@link org.apache.dubbo.config.context.ConfigManager#setMonitor(MonitorConfig)}
*/
@Deprecated
public void setMonitor(String monitor) {
setMonitor(new MonitorConfig(monitor));
}
/**
* @deprecated Use {@link org.apache.dubbo.config.context.ConfigManager#setMonitor(MonitorConfig)}
*/
@Deprecated
public void setMonitor(MonitorConfig monitor) {
this.monitor = monitor;
if (monitor != null) {
getConfigManager().setMonitor(monitor);
}
}
public String getOwner() {
return owner;
}
public void setOwner(String owner) {
this.owner = owner;
}
/**
* @deprecated Use {@link org.apache.dubbo.config.context.ConfigManager#getConfigCenter(String)}
*/
@Deprecated
public ConfigCenterConfig getConfigCenter() {
if (configCenter != null) {
return configCenter;
}
Collection<ConfigCenterConfig> configCenterConfigs = getConfigManager().getConfigCenters();
if (CollectionUtils.isNotEmpty(configCenterConfigs)) {
return configCenterConfigs.iterator().next();
}
return null;
}
/**
* @deprecated Use {@link org.apache.dubbo.config.context.ConfigManager#addConfigCenter(ConfigCenterConfig)}
*/
@Deprecated
public void setConfigCenter(ConfigCenterConfig configCenter) {
this.configCenter = configCenter;
if (configCenter != null) {
getConfigManager().addConfigCenter(configCenter);
}
}
public Integer getCallbacks() {
return callbacks;
}
public void setCallbacks(Integer callbacks) {
this.callbacks = callbacks;
}
public String getOnconnect() {
return onconnect;
}
public void setOnconnect(String onconnect) {
this.onconnect = onconnect;
}
public String getOndisconnect() {
return ondisconnect;
}
public void setOndisconnect(String ondisconnect) {
this.ondisconnect = ondisconnect;
}
public String getScope() {
return scope;
}
public void setScope(String scope) {
this.scope = scope;
}
/**
* @deprecated Use {@link org.apache.dubbo.config.context.ConfigManager#getMetadataConfigs()}
*/
@Deprecated
public MetadataReportConfig getMetadataReportConfig() {
if (metadataReportConfig != null) {
return metadataReportConfig;
}
Collection<MetadataReportConfig> metadataReportConfigs =
getConfigManager().getMetadataConfigs();
if (CollectionUtils.isNotEmpty(metadataReportConfigs)) {
return metadataReportConfigs.iterator().next();
}
return null;
}
/**
* @deprecated Use {@link org.apache.dubbo.config.context.ConfigManager#addMetadataReport(MetadataReportConfig)}
*/
@Deprecated
public void setMetadataReportConfig(MetadataReportConfig metadataReportConfig) {
this.metadataReportConfig = metadataReportConfig;
if (metadataReportConfig != null) {
getConfigManager().addMetadataReport(metadataReportConfig);
}
}
@Parameter(key = TAG_KEY)
public String getTag() {
return tag;
}
public void setTag(String tag) {
this.tag = tag;
}
public Boolean getAuth() {
return auth;
}
public void setAuth(Boolean auth) {
this.auth = auth;
}
public String getAuthenticator() {
return authenticator;
}
public AbstractInterfaceConfig setAuthenticator(String authenticator) {
this.authenticator = authenticator;
return this;
}
public String getUsername() {
return username;
}
public AbstractInterfaceConfig setUsername(String username) {
this.username = username;
return this;
}
public String getPassword() {
return password;
}
public AbstractInterfaceConfig setPassword(String password) {
this.password = password;
return this;
}
public SslConfig getSslConfig() {
return getConfigManager().getSsl().orElse(null);
}
public Boolean getSingleton() {
return singleton;
}
public void setSingleton(Boolean singleton) {
this.singleton = singleton;
}
protected void initServiceMetadata(AbstractInterfaceConfig interfaceConfig) {
serviceMetadata.setVersion(getVersion(interfaceConfig));
serviceMetadata.setGroup(getGroup(interfaceConfig));
serviceMetadata.setDefaultGroup(getGroup(interfaceConfig));
serviceMetadata.setServiceInterfaceName(getInterface());
}
public String getGroup(AbstractInterfaceConfig interfaceConfig) {
return StringUtils.isEmpty(getGroup())
? (interfaceConfig != null ? interfaceConfig.getGroup() : getGroup())
: getGroup();
}
public String getVersion(AbstractInterfaceConfig interfaceConfig) {
return StringUtils.isEmpty(getVersion())
? (interfaceConfig != null ? interfaceConfig.getVersion() : getVersion())
: getVersion();
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getGroup() {
return group;
}
public void setGroup(String group) {
this.group = group;
}
public String getInterface() {
return interfaceName;
}
public void setInterface(String interfaceName) {
this.interfaceName = interfaceName;
}
@Transient
public ClassLoader getInterfaceClassLoader() {
return interfaceClassLoader;
}
public void setInterfaceClassLoader(ClassLoader interfaceClassLoader) {
this.interfaceClassLoader = interfaceClassLoader;
}
}
| since |
java | apache__flink | flink-core-api/src/main/java/org/apache/flink/api/common/state/AppendingState.java | {
"start": 1719,
"end": 3065
} | interface ____<IN, OUT> extends State {
/**
* Returns the current value for the state. When the state is not partitioned the returned value
* is the same for all inputs in a given operator instance. If state partitioning is applied,
* the value returned depends on the current operator input, as the operator maintains an
* independent state for each partition.
*
* <p><b>NOTE TO IMPLEMENTERS:</b> if the state is empty, then this method should return {@code
* null}.
*
* @return The operator state value corresponding to the current input or {@code null} if the
* state is empty.
* @throws Exception Thrown if the system cannot access the state.
*/
OUT get() throws Exception;
/**
* Updates the operator state accessible by {@link #get()} by adding the given value to the list
* of values. The next time {@link #get()} is called (for the same state partition) the returned
* state will represent the updated list.
*
* <p>If null is passed in, the behaviour is undefined (implementation related). TODO: An
* unified behaviour across all sub-classes.
*
* @param value The new value for the state.
* @throws Exception Thrown if the system cannot access the state.
*/
void add(IN value) throws Exception;
}
| AppendingState |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java | {
"start": 8480,
"end": 8702
} | class ____ {
RecoveryIterator<DeletionServiceDeleteTaskProto> it = null;
public RecoveryIterator<DeletionServiceDeleteTaskProto> getIterator(){
return it;
}
}
public static | RecoveredDeletionServiceState |
java | apache__maven | compat/maven-model-builder/src/main/java/org/apache/maven/model/interpolation/reflection/ClassMap.java | {
"start": 10801,
"end": 11396
} | interface ____ this
* method's declaring class. This counterpart method is publicly callable.
*
* @param method a method whose publicly callable counterpart is requested.
* @return the publicly callable counterpart method. Note that if the parameter
* method is itself declared by a public class, this method is an identity
* function.
*/
private static Method getPublicMethod(Method method) {
Class<?> clazz = method.getDeclaringClass();
// Short circuit for (hopefully the majority of) cases where the declaring
// | of |
java | apache__camel | components/camel-opentelemetry/src/main/java/org/apache/camel/opentelemetry/OpenTelemetryInstrumentedThreadPoolFactory.java | {
"start": 1371,
"end": 2682
} | class ____ extends DefaultThreadPoolFactory implements ThreadPoolFactory {
@Override
public ExecutorService newCachedThreadPool(ThreadFactory threadFactory) {
return Context.taskWrapping(super.newCachedThreadPool(threadFactory));
}
@Override
public ExecutorService newThreadPool(
int corePoolSize,
int maxPoolSize,
long keepAliveTime,
TimeUnit timeUnit,
int maxQueueSize,
boolean allowCoreThreadTimeOut,
RejectedExecutionHandler rejectedExecutionHandler,
ThreadFactory threadFactory)
throws IllegalArgumentException {
ExecutorService executorService = super.newThreadPool(
corePoolSize,
maxPoolSize,
keepAliveTime,
timeUnit,
maxQueueSize,
allowCoreThreadTimeOut,
rejectedExecutionHandler,
threadFactory);
return Context.taskWrapping(executorService);
}
@Override
public ScheduledExecutorService newScheduledThreadPool(ThreadPoolProfile profile, ThreadFactory threadFactory) {
return Context.taskWrapping(super.newScheduledThreadPool(profile, threadFactory));
}
}
| OpenTelemetryInstrumentedThreadPoolFactory |
java | apache__hadoop | hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SleepJob.java | {
"start": 5107,
"end": 6372
} | class ____
extends Mapper<LongWritable, LongWritable, GridmixKey, NullWritable> {
@Override
public void map(LongWritable key, LongWritable value, Context context)
throws IOException, InterruptedException {
context.setStatus("Sleeping... " + value.get() + " ms left");
long now = System.currentTimeMillis();
if (now < key.get()) {
TimeUnit.MILLISECONDS.sleep(key.get() - now);
}
}
@Override
public void cleanup(Context context)
throws IOException, InterruptedException {
final int nReds = context.getNumReduceTasks();
if (nReds > 0) {
final SleepSplit split = (SleepSplit) context.getInputSplit();
int id = split.getId();
final int nMaps = split.getNumMaps();
//This is a hack to pass the sleep duration via Gridmix key
//TODO: We need to come up with better solution for this.
final GridmixKey key = new GridmixKey(GridmixKey.REDUCE_SPEC, 0, 0L);
for (int i = id, idx = 0; i < nReds; i += nMaps) {
key.setPartition(i);
key.setReduceOutputBytes(split.getReduceDurations(idx++));
id += nReds;
context.write(key, NullWritable.get());
}
}
}
}
public static | SleepMapper |
java | dropwizard__dropwizard | dropwizard-e2e/src/main/java/com/example/forms/FormsResource.java | {
"start": 459,
"end": 1188
} | class ____ {
@POST
@Path("uploadFile")
@Consumes(MediaType.MULTIPART_FORM_DATA)
@Produces(MediaType.TEXT_PLAIN)
public StreamingOutput uploadFile(@FormDataParam("file") InputStream file,
@FormDataParam("file") FormDataContentDisposition fileDisposition) {
// Silly example that echoes back the file name and the contents
return output -> {
output.write(String.format("%s:\n", fileDisposition.getFileName()).getBytes(UTF_8));
byte[] buffer = new byte[1024];
int length;
while ((length = file.read(buffer)) != -1) {
output.write(buffer, 0, length);
}
};
}
}
| FormsResource |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java | {
"start": 1869,
"end": 5765
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestLargeDirectoryDelete.class);
private static final Configuration CONF = new HdfsConfiguration();
private static final int TOTAL_BLOCKS = 10000;
private MiniDFSCluster mc = null;
private int createOps = 0;
private int lockOps = 0;
static {
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
}
/** create a file with a length of <code>filelen</code> */
private void createFile(final String fileName, final long filelen) throws IOException {
FileSystem fs = mc.getFileSystem();
Path filePath = new Path(fileName);
DFSTestUtil.createFile(fs, filePath, filelen, (short) 1, 0);
}
/** Create a large number of directories and files */
private void createFiles() throws IOException {
Random rand = new Random();
// Create files in a directory with random depth
// ranging from 0-10.
for (int i = 0; i < TOTAL_BLOCKS; i+=100) {
String filename = "/root/";
int dirs = rand.nextInt(10); // Depth of the directory
for (int j=i; j >=(i-dirs); j--) {
filename += j + "/";
}
filename += "file" + i;
createFile(filename, 100);
}
}
private int getBlockCount() {
assertNotNull(mc, "Null cluster");
assertNotNull(mc.getNameNode(), "No Namenode in cluster");
FSNamesystem namesystem = mc.getNamesystem();
assertNotNull(namesystem, "Null Namesystem in cluster");
assertNotNull(namesystem.getBlockManager(), "Null Namesystem.blockmanager");
return (int) namesystem.getBlocksTotal();
}
/** Run multiple threads doing simultaneous operations on the namenode
* while a large directory is being deleted.
*/
private void runThreads() throws Throwable {
final TestThread threads[] = new TestThread[2];
// Thread for creating files
threads[0] = new TestThread() {
@Override
protected void execute() throws Throwable {
while(live) {
try {
int blockcount = getBlockCount();
if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
String file = "/tmp" + createOps;
createFile(file, 1);
mc.getFileSystem().delete(new Path(file), true);
createOps++;
}
} catch (IOException ex) {
LOG.info("createFile exception ", ex);
break;
}
}
}
};
// Thread that periodically acquires the FSNamesystem lock
threads[1] = new TestThread() {
@Override
protected void execute() throws Throwable {
while(live) {
try {
int blockcount = getBlockCount();
if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
mc.getNamesystem().writeLock(RwLockMode.GLOBAL);
try {
lockOps++;
} finally {
mc.getNamesystem().writeUnlock(RwLockMode.GLOBAL, "runThreads");
}
Thread.sleep(1);
}
} catch (InterruptedException ex) {
LOG.info("lockOperation exception ", ex);
break;
}
}
}
};
threads[0].start();
threads[1].start();
final long start = Time.now();
mc.getFileSystem().delete(new Path("/root"), true); // recursive delete
BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty(
mc.getNamesystem(0).getBlockManager());
final long end = Time.now();
threads[0].endThread();
threads[1].endThread();
LOG.info("Deletion took " + (end - start) + "msecs");
LOG.info("createOperations " + createOps);
LOG.info("lockOperations " + lockOps);
assertTrue(lockOps + createOps > 0);
threads[0].rethrow();
threads[1].rethrow();
}
/**
* An abstract | TestLargeDirectoryDelete |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java | {
"start": 3659,
"end": 26188
} | class ____ {
private static final String TOPIC = "connect-log";
private static final TopicPartition TP0 = new TopicPartition(TOPIC, 0);
private static final TopicPartition TP1 = new TopicPartition(TOPIC, 1);
private static final Map<String, Object> PRODUCER_PROPS = new HashMap<>();
static {
PRODUCER_PROPS.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "broker1:9092,broker2:9093");
PRODUCER_PROPS.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
PRODUCER_PROPS.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
}
private static final Map<String, Object> CONSUMER_PROPS = new HashMap<>();
static {
CONSUMER_PROPS.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "broker1:9092,broker2:9093");
CONSUMER_PROPS.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
CONSUMER_PROPS.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
}
private static final Set<TopicPartition> CONSUMER_ASSIGNMENT = Set.of(TP0, TP1);
private static final Map<String, String> FIRST_SET = new HashMap<>();
static {
FIRST_SET.put("key", "value");
FIRST_SET.put(null, null);
}
private static final Node LEADER = new Node(1, "broker1", 9092);
private static final Node REPLICA = new Node(1, "broker2", 9093);
private static final PartitionInfo TPINFO0 = new PartitionInfo(TOPIC, 0, LEADER, new Node[]{REPLICA}, new Node[]{REPLICA});
private static final PartitionInfo TPINFO1 = new PartitionInfo(TOPIC, 1, LEADER, new Node[]{REPLICA}, new Node[]{REPLICA});
private static final String TP0_KEY = "TP0KEY";
private static final String TP1_KEY = "TP1KEY";
private static final String TP0_VALUE = "VAL0";
private static final String TP1_VALUE = "VAL1";
private static final String TP0_VALUE_NEW = "VAL0_NEW";
private static final String TP1_VALUE_NEW = "VAL1_NEW";
private final Time time = new MockTime();
private KafkaBasedLog<String, String> store;
@Mock
private Consumer<TopicAdmin> initializer;
@Mock
private KafkaProducer<String, String> producer;
private TopicAdmin admin;
private final Supplier<TopicAdmin> topicAdminSupplier = () -> admin;
private final Predicate<TopicPartition> predicate = ignored -> true;
private MockConsumer<String, String> consumer;
private final Map<TopicPartition, List<ConsumerRecord<String, String>>> consumedRecords = new HashMap<>();
private final Callback<ConsumerRecord<String, String>> consumedCallback = (error, record) -> {
TopicPartition partition = new TopicPartition(record.topic(), record.partition());
List<ConsumerRecord<String, String>> records = consumedRecords.computeIfAbsent(partition, k -> new ArrayList<>());
records.add(record);
};
@BeforeEach
public void setUp() {
store = new KafkaBasedLog<>(TOPIC, PRODUCER_PROPS, CONSUMER_PROPS, topicAdminSupplier, consumedCallback, time, initializer) {
@Override
protected KafkaProducer<String, String> createProducer() {
return producer;
}
@Override
protected MockConsumer<String, String> createConsumer() {
return consumer;
}
};
consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name());
consumer.updatePartitions(TOPIC, List.of(TPINFO0, TPINFO1));
Map<TopicPartition, Long> beginningOffsets = new HashMap<>();
beginningOffsets.put(TP0, 0L);
beginningOffsets.put(TP1, 0L);
consumer.updateBeginningOffsets(beginningOffsets);
}
@Test
public void testStartStop() {
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 0L);
endOffsets.put(TP1, 0L);
consumer.updateEndOffsets(endOffsets);
store.start();
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
store.stop();
verifyStartAndStop();
}
@Test
public void testReloadOnStart() throws Exception {
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 1L);
endOffsets.put(TP1, 1L);
consumer.updateEndOffsets(endOffsets);
final CountDownLatch finishedLatch = new CountDownLatch(1);
consumer.schedulePollTask(() -> {
// Use first poll task to setup sequence of remaining responses to polls
// Should keep polling until it reaches current log end offset for all partitions. Should handle
// as many empty polls as needed
consumer.scheduleNopPollTask();
consumer.scheduleNopPollTask();
consumer.schedulePollTask(() ->
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE,
new RecordHeaders(), Optional.empty()))
);
consumer.scheduleNopPollTask();
consumer.scheduleNopPollTask();
consumer.schedulePollTask(() ->
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY, TP1_VALUE,
new RecordHeaders(), Optional.empty()))
);
consumer.schedulePollTask(finishedLatch::countDown);
});
store.start();
assertTrue(finishedLatch.await(10000, TimeUnit.MILLISECONDS));
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(2, consumedRecords.size());
assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
store.stop();
verifyStartAndStop();
}
@Test
public void testReloadOnStartWithNoNewRecordsPresent() {
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 7L);
endOffsets.put(TP1, 7L);
consumer.updateEndOffsets(endOffsets);
// Better test with an advanced offset other than just 0L
consumer.updateBeginningOffsets(endOffsets);
consumer.schedulePollTask(() -> {
// Throw an exception that will not be ignored or handled by Connect framework. In
// reality a misplaced call to poll blocks indefinitely and connect aborts due to
// time outs (for instance via ConnectRestException)
throw new WakeupException();
});
store.start();
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(7L, consumer.position(TP0));
assertEquals(7L, consumer.position(TP1));
store.stop();
verifyStartAndStop();
}
@Test
public void testSendAndReadToEnd() throws Exception {
TestFuture<RecordMetadata> tp0Future = new TestFuture<>();
ProducerRecord<String, String> tp0Record = new ProducerRecord<>(TOPIC, TP0_KEY, TP0_VALUE);
ArgumentCaptor<org.apache.kafka.clients.producer.Callback> callback0 = ArgumentCaptor.forClass(org.apache.kafka.clients.producer.Callback.class);
when(producer.send(eq(tp0Record), callback0.capture())).thenReturn(tp0Future);
TestFuture<RecordMetadata> tp1Future = new TestFuture<>();
ProducerRecord<String, String> tp1Record = new ProducerRecord<>(TOPIC, TP1_KEY, TP1_VALUE);
ArgumentCaptor<org.apache.kafka.clients.producer.Callback> callback1 = ArgumentCaptor.forClass(org.apache.kafka.clients.producer.Callback.class);
when(producer.send(eq(tp1Record), callback1.capture())).thenReturn(tp1Future);
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 0L);
endOffsets.put(TP1, 0L);
consumer.updateEndOffsets(endOffsets);
store.start();
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(0L, consumer.position(TP0));
assertEquals(0L, consumer.position(TP1));
// Set some keys
final AtomicInteger invoked = new AtomicInteger(0);
org.apache.kafka.clients.producer.Callback producerCallback = (metadata, exception) -> invoked.incrementAndGet();
store.send(TP0_KEY, TP0_VALUE, producerCallback);
store.send(TP1_KEY, TP1_VALUE, producerCallback);
assertEquals(0, invoked.get());
tp1Future.resolve((RecordMetadata) null); // Output not used, so safe to not return a real value for testing
callback1.getValue().onCompletion(null, null);
assertEquals(1, invoked.get());
tp0Future.resolve((RecordMetadata) null);
callback0.getValue().onCompletion(null, null);
assertEquals(2, invoked.get());
// Now we should have to wait for the records to be read back when we call readToEnd()
final AtomicBoolean getInvoked = new AtomicBoolean(false);
final FutureCallback<Void> readEndFutureCallback = new FutureCallback<>((error, result) -> getInvoked.set(true));
consumer.schedulePollTask(() -> {
// Once we're synchronized in a poll, start the read to end and schedule the exact set of poll events
// that should follow. This readToEnd call will immediately wakeup this consumer.poll() call without
// returning any data.
Map<TopicPartition, Long> newEndOffsets = new HashMap<>();
newEndOffsets.put(TP0, 2L);
newEndOffsets.put(TP1, 2L);
consumer.updateEndOffsets(newEndOffsets);
store.readToEnd(readEndFutureCallback);
// Should keep polling until it reaches current log end offset for all partitions
consumer.scheduleNopPollTask();
consumer.scheduleNopPollTask();
consumer.scheduleNopPollTask();
consumer.schedulePollTask(() -> {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE,
new RecordHeaders(), Optional.empty()));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE_NEW,
new RecordHeaders(), Optional.empty()));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY, TP1_VALUE,
new RecordHeaders(), Optional.empty()));
});
consumer.schedulePollTask(() ->
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY, TP1_VALUE_NEW,
new RecordHeaders(), Optional.empty())));
// Already have FutureCallback that should be invoked/awaited, so no need for follow up finishedLatch
});
readEndFutureCallback.get(10000, TimeUnit.MILLISECONDS);
assertTrue(getInvoked.get());
assertEquals(2, consumedRecords.size());
assertEquals(2, consumedRecords.get(TP0).size());
assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
assertEquals(TP0_VALUE_NEW, consumedRecords.get(TP0).get(1).value());
assertEquals(2, consumedRecords.get(TP1).size());
assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
assertEquals(TP1_VALUE_NEW, consumedRecords.get(TP1).get(1).value());
// Cleanup
store.stop();
// Producer flushes when read to log end is called
verify(producer).flush();
verifyStartAndStop();
}
@Test
public void testPollConsumerError() throws Exception {
final CountDownLatch finishedLatch = new CountDownLatch(1);
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 1L);
endOffsets.put(TP1, 1L);
consumer.updateEndOffsets(endOffsets);
consumer.schedulePollTask(() -> {
// Trigger exception
consumer.schedulePollTask(() ->
consumer.setPollException(Errors.COORDINATOR_NOT_AVAILABLE.exception()));
// Should keep polling until it reaches current log end offset for all partitions
consumer.scheduleNopPollTask();
consumer.scheduleNopPollTask();
consumer.schedulePollTask(() -> {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE_NEW,
new RecordHeaders(), Optional.empty()));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE_NEW,
new RecordHeaders(), Optional.empty()));
});
consumer.schedulePollTask(finishedLatch::countDown);
});
store.start();
assertTrue(finishedLatch.await(10000, TimeUnit.MILLISECONDS));
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(1L, consumer.position(TP0));
store.stop();
verifyStartAndStop();
}
@Test
public void testGetOffsetsConsumerErrorOnReadToEnd() throws Exception {
final CountDownLatch finishedLatch = new CountDownLatch(1);
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 0L);
endOffsets.put(TP1, 0L);
consumer.updateEndOffsets(endOffsets);
store.start();
final AtomicBoolean getInvoked = new AtomicBoolean(false);
final FutureCallback<Void> readEndFutureCallback = new FutureCallback<>((error, result) -> getInvoked.set(true));
consumer.schedulePollTask(() -> {
// Once we're synchronized in a poll, start the read to end and schedule the exact set of poll events
// that should follow. This readToEnd call will immediately wakeup this consumer.poll() call without
// returning any data.
Map<TopicPartition, Long> newEndOffsets = new HashMap<>();
newEndOffsets.put(TP0, 1L);
newEndOffsets.put(TP1, 1L);
consumer.updateEndOffsets(newEndOffsets);
// Set exception to occur when getting offsets to read log to end. It'll be caught in the work thread,
// which will retry and eventually get the correct offsets and read log to end.
consumer.setOffsetsException(new TimeoutException("Failed to get offsets by times"));
store.readToEnd(readEndFutureCallback);
// Should keep polling until it reaches current log end offset for all partitions
consumer.scheduleNopPollTask();
consumer.scheduleNopPollTask();
consumer.schedulePollTask(() -> {
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE,
new RecordHeaders(), Optional.empty()));
consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE_NEW,
new RecordHeaders(), Optional.empty()));
});
consumer.schedulePollTask(finishedLatch::countDown);
});
readEndFutureCallback.get(10000, TimeUnit.MILLISECONDS);
assertTrue(getInvoked.get());
assertTrue(finishedLatch.await(10000, TimeUnit.MILLISECONDS));
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(1L, consumer.position(TP0));
store.stop();
// Producer flushes when read to log end is called
verify(producer).flush();
verifyStartAndStop();
}
@Test
public void testOffsetReadFailureWhenWorkThreadFails() throws Exception {
RuntimeException exception = new RuntimeException();
Set<TopicPartition> tps = Set.of(TP0, TP1);
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 0L);
endOffsets.put(TP1, 0L);
admin = mock(TopicAdmin.class);
when(admin.endOffsets(eq(tps)))
.thenReturn(endOffsets)
.thenThrow(exception)
.thenReturn(endOffsets);
store.start();
AtomicInteger numSuccesses = new AtomicInteger();
AtomicInteger numFailures = new AtomicInteger();
AtomicReference<FutureCallback<Void>> finalSuccessCallbackRef = new AtomicReference<>();
final FutureCallback<Void> successCallback = new FutureCallback<>((error, result) -> numSuccesses.getAndIncrement());
store.readToEnd(successCallback);
// First log end read should succeed.
successCallback.get(1000, TimeUnit.MILLISECONDS);
// Second log end read fails.
final FutureCallback<Void> firstFailedCallback = new FutureCallback<>((error, result) -> {
numFailures.getAndIncrement();
// We issue another readToEnd call here to simulate the case that more read requests can come in while
// the failure is being handled in the WorkThread. This read request should not be impacted by the outcome of
// the current read request's failure.
final FutureCallback<Void> finalSuccessCallback = new FutureCallback<>((e, r) -> numSuccesses.getAndIncrement());
finalSuccessCallbackRef.set(finalSuccessCallback);
store.readToEnd(finalSuccessCallback);
});
store.readToEnd(firstFailedCallback);
ExecutionException e1 = assertThrows(ExecutionException.class, () -> firstFailedCallback.get(1000, TimeUnit.MILLISECONDS));
assertEquals(exception, e1.getCause());
// Last log read end should succeed.
finalSuccessCallbackRef.get().get(1000, TimeUnit.MILLISECONDS);
assertEquals(2, numSuccesses.get());
assertEquals(1, numFailures.get());
}
@Test
public void testProducerError() {
TestFuture<RecordMetadata> tp0Future = new TestFuture<>();
ProducerRecord<String, String> tp0Record = new ProducerRecord<>(TOPIC, TP0_KEY, TP0_VALUE);
ArgumentCaptor<org.apache.kafka.clients.producer.Callback> callback0 = ArgumentCaptor.forClass(org.apache.kafka.clients.producer.Callback.class);
when(producer.send(eq(tp0Record), callback0.capture())).thenReturn(tp0Future);
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 0L);
endOffsets.put(TP1, 0L);
consumer.updateEndOffsets(endOffsets);
store.start();
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(0L, consumer.position(TP0));
assertEquals(0L, consumer.position(TP1));
final AtomicReference<Throwable> setException = new AtomicReference<>();
store.send(TP0_KEY, TP0_VALUE, (metadata, exception) -> {
assertNull(setException.get()); // Should only be invoked once
setException.set(exception);
});
KafkaException exc = new LeaderNotAvailableException("Error");
tp0Future.resolve(exc);
callback0.getValue().onCompletion(null, exc);
assertNotNull(setException.get());
store.stop();
verifyStartAndStop();
}
@Test
public void testReadEndOffsetsUsingAdmin() {
Set<TopicPartition> tps = Set.of(TP0, TP1);
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 0L);
endOffsets.put(TP1, 0L);
admin = mock(TopicAdmin.class);
when(admin.retryEndOffsets(eq(tps), any(), anyLong())).thenReturn(endOffsets);
when(admin.endOffsets(eq(tps))).thenReturn(endOffsets);
store.start();
assertEquals(endOffsets, store.readEndOffsets(tps, false));
verify(admin).retryEndOffsets(eq(tps), any(), anyLong());
verify(admin).endOffsets(eq(tps));
}
@Test
public void testReadEndOffsetsUsingAdminThatFailsWithUnsupported() {
Set<TopicPartition> tps = Set.of(TP0, TP1);
admin = mock(TopicAdmin.class);
// Getting end offsets using the admin client should fail with unsupported version
when(admin.retryEndOffsets(eq(tps), any(), anyLong())).thenThrow(new UnsupportedVersionException("too old"));
// Falls back to the consumer
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 0L);
endOffsets.put(TP1, 0L);
consumer.updateEndOffsets(endOffsets);
store.start();
assertEquals(endOffsets, store.readEndOffsets(tps, false));
verify(admin).retryEndOffsets(eq(tps), any(), anyLong());
}
@Test
public void testReadEndOffsetsUsingAdminThatFailsWithRetriable() {
Set<TopicPartition> tps = Set.of(TP0, TP1);
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 0L);
endOffsets.put(TP1, 0L);
admin = mock(TopicAdmin.class);
// Getting end offsets upon startup should work fine
when(admin.retryEndOffsets(eq(tps), any(), anyLong())).thenReturn(endOffsets);
// Getting end offsets using the admin client should fail with leader not available
when(admin.endOffsets(eq(tps))).thenThrow(new LeaderNotAvailableException("retry"));
store.start();
assertThrows(LeaderNotAvailableException.class, () -> store.readEndOffsets(tps, false));
verify(admin).retryEndOffsets(eq(tps), any(), anyLong());
verify(admin).endOffsets(eq(tps));
}
@Test
public void testWithExistingClientsStartAndStop() {
admin = mock(TopicAdmin.class);
store = KafkaBasedLog.withExistingClients(TOPIC, consumer, producer, admin, consumedCallback, time, initializer, predicate);
store.start();
store.stop();
verifyStartAndStop();
}
@Test
public void testWithExistingClientsStopOnly() {
admin = mock(TopicAdmin.class);
store = KafkaBasedLog.withExistingClients(TOPIC, consumer, producer, admin, consumedCallback, time, initializer, predicate);
store.stop();
verifyStop();
}
private void verifyStartAndStop() {
verify(initializer).accept(admin);
verifyStop();
assertFalse(store.thread.isAlive());
}
private void verifyStop() {
verify(producer, atLeastOnce()).close();
assertTrue(consumer.closed());
}
}
| KafkaBasedLogTest |
java | elastic__elasticsearch | x-pack/plugin/migrate/src/main/java/org/elasticsearch/system_indices/task/SystemResourceMigrationInfo.java | {
"start": 778,
"end": 3969
} | class ____ implements Comparable<SystemResourceMigrationInfo> permits SystemDataStreamMigrationInfo,
SystemIndexMigrationInfo {
private static final Comparator<SystemResourceMigrationInfo> SAME_CLASS_COMPARATOR = Comparator.comparing(
SystemResourceMigrationInfo::getFeatureName
).thenComparing(SystemResourceMigrationInfo::getCurrentResourceName);
protected final String featureName;
protected final String origin;
protected final SystemIndices.Feature owningFeature;
SystemResourceMigrationInfo(String featureName, String origin, SystemIndices.Feature owningFeature) {
this.featureName = featureName;
this.origin = origin;
this.owningFeature = owningFeature;
}
protected abstract String getCurrentResourceName();
/**
* Gets the name of the feature which owns the index to be migrated.
*/
String getFeatureName() {
return featureName;
}
/**
* Gets the origin that should be used when interacting with this index.
*/
String getOrigin() {
return origin;
}
/**
* Creates a client that's been configured to be able to properly access the system index to be migrated.
*
* @param baseClient The base client to wrap.
* @return An {@link OriginSettingClient} which uses the origin provided by {@link SystemIndexMigrationInfo#getOrigin()}.
*/
Client createClient(Client baseClient) {
return new OriginSettingClient(baseClient, this.getOrigin());
}
abstract Stream<IndexMetadata> getIndices(ProjectMetadata metadata);
@Override
public int compareTo(SystemResourceMigrationInfo o) {
return SAME_CLASS_COMPARATOR.compare(this, o);
}
abstract boolean isCurrentIndexClosed();
/**
* Invokes the pre-migration hook for the feature that owns this index.
* See {@link SystemIndexPlugin#prepareForIndicesMigration(ProjectMetadata, Client, ActionListener)}.
* @param project The project metadata
* @param client For performing any update operations necessary to prepare for the upgrade.
* @param listener Call {@link ActionListener#onResponse(Object)} when preparation for migration is complete.
*/
void prepareForIndicesMigration(ProjectMetadata project, Client client, ActionListener<Map<String, Object>> listener) {
owningFeature.getPreMigrationFunction().prepareForIndicesMigration(project, client, listener);
}
/**
* Invokes the post-migration hooks for the feature that owns this index.
* See {@link SystemIndexPlugin#indicesMigrationComplete(Map, Client, ActionListener)}.
*
* @param metadata The metadata that was passed into the listener by the pre-migration hook.
* @param client For performing any update operations necessary to prepare for the upgrade.
* @param listener Call {@link ActionListener#onResponse(Object)} when the hook is finished.
*/
void indicesMigrationComplete(Map<String, Object> metadata, Client client, ActionListener<Boolean> listener) {
owningFeature.getPostMigrationFunction().indicesMigrationComplete(metadata, client, listener);
}
}
| SystemResourceMigrationInfo |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AtMostOnce.java | {
"start": 1147,
"end": 1594
} | interface ____ at-most-once semantics.
*
* Server must guarantee that methods are executed at most once, by keeping
* a retry cache. The previous response must be returned when duplicate
* requests are received. Because of these guarantee, a client can retry
* this request on failover and other network failure conditions.
*/
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
@InterfaceStability.Evolving
public @ | with |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/builditem/ConfigMappingBuildItem.java | {
"start": 409,
"end": 556
} | class ____ the respective configuration prefix, enabling Quarkus to map configuration
* properties to strongly-typed interfaces.
*/
public final | with |
java | quarkusio__quarkus | core/devmode-spi/src/main/java/io/quarkus/dev/ErrorPageGenerators.java | {
"start": 489,
"end": 1406
} | class ____ {
private static final Map<String, Function<Throwable, String>> generators = new ConcurrentHashMap<>();
/**
* Register a function that will be used to generate the error page for the given root cause.
*
* @param rootCauseClassName
* @param function
*/
public static void register(String rootCauseClassName, Function<Throwable, String> function) {
if (generators.putIfAbsent(rootCauseClassName, function) != null) {
throw new IllegalStateException("Template builder already specified for: " + rootCauseClassName);
}
}
public static Function<Throwable, String> get(String rootCauseClassName) {
return generators.get(rootCauseClassName);
}
// This method is called by a relevant service provider during HotReplacementSetup#close()
public static void clear() {
generators.clear();
}
}
| ErrorPageGenerators |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java | {
"start": 936,
"end": 1129
} | class ____ {
static boolean ignoreDeserializationErrors; // disable assertions just to test production behaviour
/** An entry in the registry, made up of a category | NamedWriteableRegistry |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/scenario/MaintenanceNotificationConnectionTest.java | {
"start": 11686,
"end": 12346
} | class ____ {
final RedisClient client;
final StatefulRedisConnection<String, String> connection;
final TestCapture capture;
final String bdbId;
final EndpointType expectedEndpointType;
TestContext(RedisClient client, StatefulRedisConnection<String, String> connection, TestCapture capture, String bdbId,
EndpointType expectedEndpointType) {
this.client = client;
this.connection = connection;
this.capture = capture;
this.bdbId = bdbId;
this.expectedEndpointType = expectedEndpointType;
}
}
public static | TestContext |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bootstrap/spi/delegation/TestDelegatingSessionFactoryBuilderImplementor.java | {
"start": 490,
"end": 879
} | class ____ extends AbstractDelegatingSessionFactoryBuilderImplementor<TestDelegatingSessionFactoryBuilderImplementor> {
public TestDelegatingSessionFactoryBuilderImplementor(SessionFactoryBuilderImplementor delegate) {
super( delegate );
}
@Override
protected TestDelegatingSessionFactoryBuilderImplementor getThis() {
return this;
}
}
| TestDelegatingSessionFactoryBuilderImplementor |
java | google__guice | core/test/com/google/inject/NullableInjectionPointTest.java | {
"start": 8092,
"end": 8162
} | class ____ {
@Inject @Nullable Foo foo;
}
static | NullableFooField |
java | google__gson | gson/src/main/java/com/google/gson/stream/MalformedJsonException.java | {
"start": 893,
"end": 1245
} | class ____ extends IOException {
private static final long serialVersionUID = 1L;
public MalformedJsonException(String msg) {
super(msg);
}
public MalformedJsonException(String msg, Throwable throwable) {
super(msg, throwable);
}
public MalformedJsonException(Throwable throwable) {
super(throwable);
}
}
| MalformedJsonException |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/model/DaggerProcessingEnv.java | {
"start": 942,
"end": 1039
} | class ____ {
/** Represents a type of backend used for compilation. */
public | DaggerProcessingEnv |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/CoordinationRequestGateway.java | {
"start": 1080,
"end": 1835
} | interface ____ {
/**
* Send out a request to a specified coordinator and return the response.
*
* <p>On the client side, a unique operatorUid must be defined to identify an operator.
* Otherwise, the query cannot be executed correctly. Note that we use operatorUid instead of
* operatorID because the latter is an internal runtime concept that cannot be recognized by the
* client.
*
* @param operatorUid specifies which coordinator to receive the request
* @param request the request to send
* @return the response from the coordinator
*/
CompletableFuture<CoordinationResponse> sendCoordinationRequest(
String operatorUid, CoordinationRequest request);
}
| CoordinationRequestGateway |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/validation/DataBinder.java | {
"start": 34341,
"end": 52659
} | class ____ -> resolve constructor arguments from request parameters.
@Nullable String[] paramNames = BeanUtils.getParameterNames(ctor);
Class<?>[] paramTypes = ctor.getParameterTypes();
@Nullable Object[] args = new Object[paramTypes.length];
Set<String> failedParamNames = new HashSet<>(4);
for (int i = 0; i < paramNames.length; i++) {
MethodParameter param = MethodParameter.forFieldAwareConstructor(ctor, i, paramNames[i]);
String lookupName = null;
if (this.nameResolver != null) {
lookupName = this.nameResolver.resolveName(param);
}
if (lookupName == null) {
lookupName = paramNames[i];
}
String paramPath = nestedPath + lookupName;
Class<?> paramType = paramTypes[i];
ResolvableType resolvableType = ResolvableType.forMethodParameter(param);
Object value = valueResolver.resolveValue(paramPath, paramType);
if (value == null) {
if (List.class.isAssignableFrom(paramType)) {
value = createList(paramPath, paramType, resolvableType, valueResolver);
}
else if (Map.class.isAssignableFrom(paramType)) {
value = createMap(paramPath, paramType, resolvableType, valueResolver);
}
else if (paramType.isArray()) {
value = createArray(paramPath, paramType, resolvableType, valueResolver);
}
}
if (value == null && shouldConstructArgument(param) && hasValuesFor(paramPath, valueResolver)) {
args[i] = createObject(resolvableType, paramPath + ".", valueResolver);
}
else {
try {
if (value == null && (param.isOptional() || getBindingResult().hasErrors())) {
args[i] = (param.getParameterType() == Optional.class ? Optional.empty() : null);
}
else {
args[i] = convertIfNecessary(value, paramType, param);
}
}
catch (TypeMismatchException ex) {
args[i] = null;
failedParamNames.add(paramPath);
handleTypeMismatchException(ex, paramPath, paramType, value);
}
}
}
if (getBindingResult().hasErrors()) {
for (int i = 0; i < paramNames.length; i++) {
String paramPath = nestedPath + paramNames[i];
if (!failedParamNames.contains(paramPath)) {
Object value = args[i];
getBindingResult().recordFieldValue(paramPath, paramTypes[i], value);
validateConstructorArgument(ctor.getDeclaringClass(), nestedPath, paramNames[i], value);
}
}
if (!(objectType.getSource() instanceof MethodParameter param && param.isOptional())) {
try {
result = BeanUtils.instantiateClass(ctor, args);
}
catch (BeanInstantiationException ex) {
// swallow and proceed without target instance
}
}
}
else {
try {
result = BeanUtils.instantiateClass(ctor, args);
}
catch (BeanInstantiationException ex) {
if (KotlinDetector.isKotlinType(clazz) && ex.getCause() instanceof NullPointerException cause) {
ObjectError error = new ObjectError(ctor.getName(), cause.getMessage());
getBindingResult().addError(error);
}
else {
throw ex;
}
}
}
}
return (isOptional && !nestedPath.isEmpty() ? Optional.ofNullable(result) : result);
}
/**
* Whether to instantiate the constructor argument of the given type,
* matching its own constructor arguments to bind values.
* <p>By default, simple value types, maps, collections, and arrays are
* excluded from nested constructor binding initialization.
* @since 6.1.2
*/
protected boolean shouldConstructArgument(MethodParameter param) {
Class<?> type = param.nestedIfOptional().getNestedParameterType();
return !BeanUtils.isSimpleValueType(type) && !type.getPackageName().startsWith("java.");
}
private boolean hasValuesFor(String paramPath, ValueResolver resolver) {
for (String name : resolver.getNames()) {
if (name.startsWith(paramPath + ".")) {
return true;
}
}
return false;
}
private @Nullable List<?> createList(
String paramPath, Class<?> paramType, ResolvableType type, ValueResolver valueResolver) {
ResolvableType elementType = type.getNested(2);
SortedSet<Integer> indexes = getIndexes(paramPath, valueResolver);
if (indexes == null) {
return null;
}
int lastIndex = Math.max(indexes.last(), 0);
int size = (lastIndex < this.autoGrowCollectionLimit ? lastIndex + 1 : 0);
List<?> list = (List<?>) CollectionFactory.createCollection(paramType, size);
for (int i = 0; i < size; i++) {
list.add(null);
}
for (int index : indexes) {
String indexedPath = paramPath + "[" + (index != NO_INDEX ? index : "") + "]";
list.set(Math.max(index, 0),
createIndexedValue(paramPath, paramType, elementType, indexedPath, valueResolver));
}
return list;
}
private <V> @Nullable Map<String, V> createMap(
String paramPath, Class<?> paramType, ResolvableType type, ValueResolver valueResolver) {
ResolvableType elementType = type.getNested(2);
Map<String, V> map = null;
for (String name : valueResolver.getNames()) {
if (!name.startsWith(paramPath + "[")) {
continue;
}
int startIdx = paramPath.length() + 1;
int endIdx = name.indexOf(']', startIdx);
boolean quoted = (endIdx - startIdx > 2 && name.charAt(startIdx) == '\'' && name.charAt(endIdx - 1) == '\'');
String key = (quoted ? name.substring(startIdx + 1, endIdx - 1) : name.substring(startIdx, endIdx));
if (map == null) {
map = CollectionFactory.createMap(paramType, 16);
}
String indexedPath = name.substring(0, endIdx + 1);
map.put(key, createIndexedValue(paramPath, paramType, elementType, indexedPath, valueResolver));
}
return map;
}
@SuppressWarnings("unchecked")
private <V> @Nullable V @Nullable [] createArray(
String paramPath, Class<?> paramType, ResolvableType type, ValueResolver valueResolver) {
ResolvableType elementType = type.getNested(2);
SortedSet<Integer> indexes = getIndexes(paramPath, valueResolver);
if (indexes == null) {
return null;
}
int lastIndex = Math.max(indexes.last(), 0);
int size = (lastIndex < this.autoGrowCollectionLimit ? lastIndex + 1: 0);
@Nullable V[] array = (V[]) Array.newInstance(elementType.resolve(), size);
for (int index : indexes) {
String indexedPath = paramPath + "[" + (index != NO_INDEX ? index : "") + "]";
array[Math.max(index, 0)] =
createIndexedValue(paramPath, paramType, elementType, indexedPath, valueResolver);
}
return array;
}
private static @Nullable SortedSet<Integer> getIndexes(String paramPath, ValueResolver valueResolver) {
SortedSet<Integer> indexes = null;
for (String name : valueResolver.getNames()) {
if (name.startsWith(paramPath + "[")) {
int index;
if (paramPath.length() + 2 == name.length()) {
if (!name.endsWith("[]")) {
continue;
}
index = NO_INDEX;
}
else {
int endIndex = name.indexOf(']', paramPath.length() + 2);
String indexValue = name.substring(paramPath.length() + 1, endIndex);
index = Integer.parseInt(indexValue);
}
indexes = (indexes != null ? indexes : new TreeSet<>());
indexes.add(index);
}
}
return indexes;
}
@SuppressWarnings("unchecked")
private <V> @Nullable V createIndexedValue(
String paramPath, Class<?> containerType, ResolvableType elementType,
String indexedPath, ValueResolver valueResolver) {
Object value = null;
Class<?> elementClass = elementType.resolve(Object.class);
if (List.class.isAssignableFrom(elementClass)) {
value = createList(indexedPath, elementClass, elementType, valueResolver);
}
else if (Map.class.isAssignableFrom(elementClass)) {
value = createMap(indexedPath, elementClass, elementType, valueResolver);
}
else if (elementClass.isArray()) {
value = createArray(indexedPath, elementClass, elementType, valueResolver);
}
else {
Object rawValue = valueResolver.resolveValue(indexedPath, elementClass);
if (rawValue != null) {
try {
value = convertIfNecessary(rawValue, elementClass);
}
catch (TypeMismatchException ex) {
handleTypeMismatchException(ex, paramPath, containerType, rawValue);
}
}
else {
value = createObject(elementType, indexedPath + ".", valueResolver);
}
}
return (V) value;
}
private void handleTypeMismatchException(
TypeMismatchException ex, String paramPath, Class<?> paramType, @Nullable Object value) {
ex.initPropertyName(paramPath);
getBindingResult().recordFieldValue(paramPath, paramType, value);
getBindingErrorProcessor().processPropertyAccessException(ex, getBindingResult());
}
private void validateConstructorArgument(
Class<?> constructorClass, String nestedPath, @Nullable String name, @Nullable Object value) {
Object[] hints = null;
if (this.targetType != null && this.targetType.getSource() instanceof MethodParameter parameter) {
for (Annotation ann : parameter.getParameterAnnotations()) {
hints = ValidationAnnotationUtils.determineValidationHints(ann);
if (hints != null) {
break;
}
}
}
if (hints == null) {
return;
}
for (Validator validator : getValidatorsToApply()) {
if (validator instanceof SmartValidator smartValidator) {
boolean isNested = !nestedPath.isEmpty();
if (isNested) {
getBindingResult().pushNestedPath(nestedPath.substring(0, nestedPath.length() - 1));
}
try {
smartValidator.validateValue(constructorClass, name, value, getBindingResult(), hints);
}
catch (IllegalArgumentException ex) {
// No corresponding field on the target class...
}
if (isNested) {
getBindingResult().popNestedPath();
}
}
}
}
/**
* Bind the given property values to this binder's target.
* <p>This call can create field errors, representing basic binding
* errors like a required field (code "required"), or type mismatch
* between value and bean property (code "typeMismatch").
* <p>Note that the given PropertyValues should be a throwaway instance:
* For efficiency, it will be modified to just contain allowed fields if it
* implements the MutablePropertyValues interface; else, an internal mutable
* copy will be created for this purpose. Pass in a copy of the PropertyValues
* if you want your original instance to stay unmodified in any case.
* @param pvs property values to bind
* @see #doBind(org.springframework.beans.MutablePropertyValues)
*/
public void bind(PropertyValues pvs) {
if (shouldNotBindPropertyValues()) {
return;
}
MutablePropertyValues mpvs = (pvs instanceof MutablePropertyValues mutablePropertyValues ?
mutablePropertyValues : new MutablePropertyValues(pvs));
doBind(mpvs);
}
/**
* Whether to not bind parameters to properties. Returns "true" if
* {@link #isDeclarativeBinding()} is on, and
* {@link #setAllowedFields(String...) allowedFields} are not configured.
* @since 6.1
*/
protected boolean shouldNotBindPropertyValues() {
return (isDeclarativeBinding() && ObjectUtils.isEmpty(this.allowedFields));
}
/**
* Actual implementation of the binding process, working with the
* passed-in MutablePropertyValues instance.
* @param mpvs the property values to bind,
* as MutablePropertyValues instance
* @see #checkAllowedFields
* @see #checkRequiredFields
* @see #applyPropertyValues
*/
protected void doBind(MutablePropertyValues mpvs) {
checkAllowedFields(mpvs);
checkRequiredFields(mpvs);
applyPropertyValues(mpvs);
}
/**
* Check the given property values against the allowed fields,
* removing values for fields that are not allowed.
* @param mpvs the property values to be bound (can be modified)
* @see #getAllowedFields
* @see #isAllowed(String)
*/
protected void checkAllowedFields(MutablePropertyValues mpvs) {
PropertyValue[] pvs = mpvs.getPropertyValues();
for (PropertyValue pv : pvs) {
String field = PropertyAccessorUtils.canonicalPropertyName(pv.getName());
if (!isAllowed(field)) {
mpvs.removePropertyValue(pv);
getBindingResult().recordSuppressedField(field);
if (logger.isDebugEnabled()) {
logger.debug("Field [" + field + "] has been removed from PropertyValues " +
"and will not be bound, because it has not been found in the list of allowed fields");
}
}
}
}
/**
* Determine if the given field is allowed for binding.
* <p>Invoked for each passed-in property value.
* <p>Checks for {@code "xxx*"}, {@code "*xxx"}, {@code "*xxx*"}, and
* {@code "xxx*yyy"} matches (with an arbitrary number of pattern parts),
* as well as direct equality, in the configured lists of allowed field
* patterns and disallowed field patterns.
* <p>Matching against allowed field patterns is case-sensitive; whereas,
* matching against disallowed field patterns is case-insensitive.
* <p>A field matching a disallowed pattern will not be accepted even if it
* also happens to match a pattern in the allowed list.
* <p>Can be overridden in subclasses, but care must be taken to honor the
* aforementioned contract.
* @param field the field to check
* @return {@code true} if the field is allowed
* @see #setAllowedFields
* @see #setDisallowedFields
* @see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
*/
protected boolean isAllowed(String field) {
String[] allowed = getAllowedFields();
String[] disallowed = getDisallowedFields();
if (!ObjectUtils.isEmpty(allowed) && !PatternMatchUtils.simpleMatch(allowed, field)) {
return false;
}
if (!ObjectUtils.isEmpty(disallowed)) {
return !PatternMatchUtils.simpleMatchIgnoreCase(disallowed, field);
}
return true;
}
/**
* Check the given property values against the required fields,
* generating missing field errors where appropriate.
* @param mpvs the property values to be bound (can be modified)
* @see #getRequiredFields
* @see #getBindingErrorProcessor
* @see BindingErrorProcessor#processMissingFieldError
*/
@SuppressWarnings("NullAway") // Dataflow analysis limitation
protected void checkRequiredFields(MutablePropertyValues mpvs) {
String[] requiredFields = getRequiredFields();
if (!ObjectUtils.isEmpty(requiredFields)) {
Map<String, PropertyValue> propertyValues = new HashMap<>();
PropertyValue[] pvs = mpvs.getPropertyValues();
for (PropertyValue pv : pvs) {
String canonicalName = PropertyAccessorUtils.canonicalPropertyName(pv.getName());
propertyValues.put(canonicalName, pv);
}
for (String field : requiredFields) {
PropertyValue pv = propertyValues.get(field);
boolean empty = (pv == null || pv.getValue() == null);
if (!empty) {
if (pv.getValue() instanceof String text) {
empty = !StringUtils.hasText(text);
}
else if (pv.getValue() instanceof String[] values) {
empty = (values.length == 0 || !StringUtils.hasText(values[0]));
}
}
if (empty) {
// Use bind error processor to create FieldError.
getBindingErrorProcessor().processMissingFieldError(field, getInternalBindingResult());
// Remove property from property values to bind:
// It has already caused a field error with a rejected value.
if (pv != null) {
mpvs.removePropertyValue(pv);
propertyValues.remove(field);
}
}
}
}
}
/**
* Apply given property values to the target object.
* <p>Default implementation applies all the supplied property
* values as bean property values. By default, unknown fields will
* be ignored.
* @param mpvs the property values to be bound (can be modified)
* @see #getTarget
* @see #getPropertyAccessor
* @see #isIgnoreUnknownFields
* @see #getBindingErrorProcessor
* @see BindingErrorProcessor#processPropertyAccessException
*/
protected void applyPropertyValues(MutablePropertyValues mpvs) {
try {
// Bind request parameters onto target object.
getPropertyAccessor().setPropertyValues(mpvs, isIgnoreUnknownFields(), isIgnoreInvalidFields());
}
catch (PropertyBatchUpdateException ex) {
// Use bind error processor to create FieldErrors.
for (PropertyAccessException pae : ex.getPropertyAccessExceptions()) {
getBindingErrorProcessor().processPropertyAccessException(pae, getInternalBindingResult());
}
}
}
/**
* Invoke the specified Validators, if any.
* @see #setValidator(Validator)
* @see #getBindingResult()
*/
public void validate() {
Object target = getTarget();
Assert.state(target != null, "No target to validate");
BindingResult bindingResult = getBindingResult();
// Call each validator with the same binding result
for (Validator validator : getValidatorsToApply()) {
validator.validate(target, bindingResult);
}
}
/**
* Invoke the specified Validators, if any, with the given validation hints.
* <p>Note: Validation hints may get ignored by the actual target Validator.
* @param validationHints one or more hint objects to be passed to a {@link SmartValidator}
* @since 3.1
* @see #setValidator(Validator)
* @see SmartValidator#validate(Object, Errors, Object...)
*/
public void validate(Object... validationHints) {
Object target = getTarget();
Assert.state(target != null, "No target to validate");
BindingResult bindingResult = getBindingResult();
// Call each validator with the same binding result
for (Validator validator : getValidatorsToApply()) {
if (!ObjectUtils.isEmpty(validationHints) && validator instanceof SmartValidator smartValidator) {
smartValidator.validate(target, bindingResult, validationHints);
}
else if (validator != null) {
validator.validate(target, bindingResult);
}
}
}
/**
* Close this DataBinder, which may result in throwing
* a BindException if it encountered any errors.
* @return the model Map, containing target object and Errors instance
* @throws BindException if there were any errors in the bind operation
* @see BindingResult#getModel()
*/
public Map<?, ?> close() throws BindException {
if (getBindingResult().hasErrors()) {
throw new BindException(getBindingResult());
}
return getBindingResult().getModel();
}
/**
* Strategy to determine the name of the value to bind to a method parameter.
* Supported on constructor parameters with {@link #construct constructor binding}
* which performs lookups via {@link ValueResolver#resolveValue}.
*/
public | constructor |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/samples/spr/RequestContextHolderTests.java | {
"start": 7270,
"end": 7421
} | class ____ {
@Autowired
private ServletRequest request;
void process() {
assertRequestAttributes(request);
}
}
static | RequestScopedService |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/method/configuration/ReactiveMethodSecurityConfigurationTests.java | {
"start": 16058,
"end": 17146
} | class ____ {
private final String id;
private final Double altitude;
private final Integer seats;
private final List<Passenger> passengers = new ArrayList<>();
Flight(String id, Double altitude, Integer seats) {
this.id = id;
this.altitude = altitude;
this.seats = seats;
}
String getId() {
return this.id;
}
@PreAuthorize("hasAuthority('airplane:read')")
Mono<Double> getAltitude() {
return Mono.just(this.altitude);
}
@PreAuthorize("hasAnyAuthority('seating:read', 'airplane:read')")
Mono<Integer> getSeats() {
return Mono.just(this.seats);
}
@PostAuthorize("hasAnyAuthority('seating:read', 'airplane:read')")
@PostFilter("@isNotKevin.apply(filterObject)")
Flux<Passenger> getPassengers() {
return Flux.fromIterable(this.passengers);
}
@PreAuthorize("hasAnyAuthority('seating:read', 'airplane:read')")
@PreFilter("filterObject.contains(' ')")
Mono<Void> board(Flux<String> passengers) {
return passengers.doOnNext((passenger) -> this.passengers.add(new Passenger(passenger))).then();
}
}
public static | Flight |
java | apache__flink | flink-connectors/flink-connector-files/src/test/java/org/apache/flink/connector/file/sink/writer/FileWriterBucketTest.java | {
"start": 16965,
"end": 18076
} | class ____ extends LocalRecoverableWriter {
private int cleanupCallCounter = 0;
TestRecoverableWriter(LocalFileSystem fs) {
super(fs);
}
int getCleanupCallCounter() {
return cleanupCallCounter;
}
@Override
public boolean requiresCleanupOfRecoverableState() {
// here we return true so that the cleanupRecoverableState() is called.
return true;
}
@Override
public boolean cleanupRecoverableState(ResumeRecoverable resumable) throws IOException {
cleanupCallCounter++;
return false;
}
@Override
public String toString() {
return "TestRecoverableWriter has called discardRecoverableState() "
+ cleanupCallCounter
+ " times.";
}
}
/**
* A test implementation of a {@link RecoverableWriter} that does not support resuming, i.e.
* keep on writing to the in-progress file at the point we were before the failure.
*/
private static | TestRecoverableWriter |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ServletAnnotationControllerHandlerMethodTests.java | {
"start": 137432,
"end": 137579
} | class ____ {
@RequestMapping("/")
public MySpecialArg handle() {
return new MySpecialArg("foo");
}
}
static | ModelAndViewResolverController |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java | {
"start": 6668,
"end": 7445
} | class ____ {
private final Container container;
private final String resourceName;
/**
* Creates an instance of Allocation.
* @param container allocated container.
* @param resourceName location where it got allocated.
*/
public Allocation(Container container, String resourceName) {
this.container = container;
this.resourceName = resourceName;
}
/**
* Get container of the allocation.
* @return container of the allocation.
*/
public Container getContainer() {
return container;
}
/**
* Get resource name of the allocation.
* @return resource name of the allocation.
*/
public String getResourceName() {
return resourceName;
}
}
/**
* This | Allocation |
java | apache__spark | common/unsafe/src/main/java/org/apache/spark/unsafe/types/GeometryVal.java | {
"start": 926,
"end": 1012
} | class ____ the physical type for the GEOMETRY data type.
@Unstable
public final | represents |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ext/jdk9/Java9ListsTest.java | {
"start": 529,
"end": 4074
} | class ____ extends DatabindTestUtil
{
private final ObjectMapper MAPPER = JsonMapper.builder()
.activateDefaultTypingAsProperty(
NoCheckSubTypeValidator.instance,
DefaultTyping.NON_FINAL,
"@class"
).build();
@Test
public void testUnmodifiableList() throws Exception
{
final List<String> list = Collections.unmodifiableList(Collections.singletonList("a"));
final String actualJson = MAPPER.writeValueAsString(list);
final List<?> output = MAPPER.readValue(actualJson, List.class);
assertEquals(1, output.size());
}
@Test
public void testJava9ListOf() throws Exception
{
List<String> list = List.of("a");
/* {
Class<?> cls = list.getClass();
tools.jackson.databind.JavaType type = MAPPER.constructType(cls);
System.err.println("LIST type: "+type);
System.err.println(" final? "+type.isFinal());
}
*/
ObjectWriter w = MAPPER.writerFor(List.class);
String actualJson = w.writeValueAsString(list);
List<?> output = MAPPER.readValue(actualJson, List.class);
assertEquals(1, output.size());
// and couple of alternatives:
list = List.of("a", "b");
actualJson = w.writeValueAsString(list);
output = MAPPER.readValue(actualJson, List.class);
assertEquals(2, output.size());
list = List.of("a", "b", "c");
actualJson = w.writeValueAsString(list);
output = MAPPER.readValue(actualJson, List.class);
assertEquals(3, output.size());
list = List.of();
actualJson = w.writeValueAsString(list);
output = MAPPER.readValue(actualJson, List.class);
assertEquals(0, output.size());
}
@Test
public void testJava9MapOf() throws Exception
{
ObjectWriter w = MAPPER.writerFor(Map.class);
Map<String,String> map = Map.of("key", "value");
String actualJson = w.writeValueAsString(map);
Map<?,?> output = MAPPER.readValue(actualJson, Map.class);
assertEquals(1, output.size());
// and alternatives
map = Map.of("key", "value", "foo", "bar");
actualJson = w.writeValueAsString(map);
output = MAPPER.readValue(actualJson, Map.class);
assertEquals(2, output.size());
map = Map.of("key", "value", "foo", "bar", "last", "one");
actualJson = w.writeValueAsString(map);
output = MAPPER.readValue(actualJson, Map.class);
assertEquals(3, output.size());
map = Map.of();
actualJson = w.writeValueAsString(map);
output = MAPPER.readValue(actualJson, Map.class);
assertEquals(0, output.size());
}
// [databind#3344]
@Test
public void testJava9SetOf() throws Exception
{
ObjectWriter w = MAPPER.writerFor(Set.class);
Set<?> set = Set.of("a", "b", "c");
String actualJson = w.writeValueAsString(set);
Set<?> output = MAPPER.readValue(actualJson, Set.class);
assertTrue(output instanceof Set<?>);
assertEquals(set, output);
}
@Test
public void testJava9ListWrapped() throws Exception
{
final List<String> list = Collections.unmodifiableList(List.of("a"));
final String actualJson = MAPPER.writeValueAsString(list);
final List<?> output = MAPPER.readValue(actualJson, List.class);
assertEquals(1, output.size());
}
}
| Java9ListsTest |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/aop/framework/AbstractAopProxyTests.java | {
"start": 46456,
"end": 50682
} | class ____ implements AfterReturningAdvice {
public int sum;
@Override
public void afterReturning(@Nullable Object returnValue, Method m, Object[] args, @Nullable Object target) {
sum += (Integer) returnValue;
}
}
SummingAfterAdvice aa = new SummingAfterAdvice();
@SuppressWarnings("serial")
Advisor matchesInt = new StaticMethodMatcherPointcutAdvisor(aa) {
@Override
public boolean matches(Method m, @Nullable Class<?> targetClass) {
return m.getReturnType() == int.class;
}
};
TestBean target = new TestBean();
ProxyFactory pf = new ProxyFactory(target);
pf.addAdvice(new NopInterceptor());
pf.addAdvisor(matchesInt);
assertThat(pf.getAdvisors()[1]).as("Advisor was added").isEqualTo(matchesInt);
ITestBean proxied = (ITestBean) createProxy(pf);
assertThat(aa.sum).isEqualTo(0);
int i1 = 12;
int i2 = 13;
// Won't be advised
proxied.setAge(i1);
assertThat(proxied.getAge()).isEqualTo(i1);
assertThat(aa.sum).isEqualTo(i1);
proxied.setAge(i2);
assertThat(proxied.getAge()).isEqualTo(i2);
assertThat(aa.sum).isEqualTo((i1 + i2));
assertThat(proxied.getAge()).isEqualTo(i2);
}
@Test
void afterReturningAdvisorIsNotInvokedOnException() {
CountingAfterReturningAdvice car = new CountingAfterReturningAdvice();
TestBean target = new TestBean();
ProxyFactory pf = new ProxyFactory(target);
pf.addAdvice(new NopInterceptor());
pf.addAdvice(car);
assertThat(pf.getAdvisors()[1].getAdvice()).as("Advice was wrapped in Advisor and added").isEqualTo(car);
ITestBean proxied = (ITestBean) createProxy(pf);
assertThat(car.getCalls()).isEqualTo(0);
int age = 10;
proxied.setAge(age);
assertThat(proxied.getAge()).isEqualTo(age);
assertThat(car.getCalls()).isEqualTo(2);
Exception exc = new Exception();
// On exception it won't be invoked
assertThatExceptionOfType(Throwable.class).isThrownBy(() -> proxied.exceptional(exc))
.satisfies(ex -> assertThat(ex).isSameAs(exc));
assertThat(car.getCalls()).isEqualTo(2);
}
@Test
void throwsAdvisorIsInvoked() {
// Reacts to ServletException and RemoteException
MyThrowsHandler th = new MyThrowsHandler();
@SuppressWarnings("serial")
Advisor matchesEchoInvocations = new StaticMethodMatcherPointcutAdvisor(th) {
@Override
public boolean matches(Method m, @Nullable Class<?> targetClass) {
return m.getName().startsWith("echo");
}
};
Echo target = new Echo();
target.setA(16);
ProxyFactory pf = new ProxyFactory(target);
pf.addAdvice(new NopInterceptor());
pf.addAdvisor(matchesEchoInvocations);
assertThat(pf.getAdvisors()[1]).as("Advisor was added").isEqualTo(matchesEchoInvocations);
IEcho proxied = (IEcho) createProxy(pf);
assertThat(th.getCalls()).isEqualTo(0);
assertThat(proxied.getA()).isEqualTo(target.getA());
assertThat(th.getCalls()).isEqualTo(0);
Exception ex = new Exception();
// Will be advised but doesn't match
assertThatException().isThrownBy(() -> proxied.echoException(1, ex))
.matches(ex::equals);
FileNotFoundException fex = new FileNotFoundException();
assertThatExceptionOfType(FileNotFoundException.class)
.isThrownBy(() -> proxied.echoException(1, fex))
.matches(fex::equals);
assertThat(th.getCalls("ioException")).isEqualTo(1);
}
@Test
void addThrowsAdviceWithoutAdvisor() {
// Reacts to ServletException and RemoteException
MyThrowsHandler th = new MyThrowsHandler();
Echo target = new Echo();
target.setA(16);
ProxyFactory pf = new ProxyFactory(target);
pf.addAdvice(new NopInterceptor());
pf.addAdvice(th);
IEcho proxied = (IEcho) createProxy(pf);
assertThat(th.getCalls()).isEqualTo(0);
assertThat(proxied.getA()).isEqualTo(target.getA());
assertThat(th.getCalls()).isEqualTo(0);
Exception ex = new Exception();
// Will be advised but doesn't match
assertThatException().isThrownBy(() -> proxied.echoException(1, ex))
.matches(ex::equals);
// Subclass of RemoteException
MarshalException mex = new MarshalException("");
assertThatExceptionOfType(MarshalException.class).isThrownBy(() -> proxied.echoException(1, mex))
.matches(mex::equals);
assertThat(th.getCalls("remoteException")).isEqualTo(1);
}
private static | SummingAfterAdvice |
java | apache__rocketmq | proxy/src/main/java/org/apache/rocketmq/proxy/service/admin/DefaultAdminService.java | {
"start": 1652,
"end": 5827
} | class ____ implements AdminService {
private static final Logger log = LoggerFactory.getLogger(LoggerName.PROXY_LOGGER_NAME);
private final MQClientAPIFactory mqClientAPIFactory;
public DefaultAdminService(MQClientAPIFactory mqClientAPIFactory) {
this.mqClientAPIFactory = mqClientAPIFactory;
}
@Override
public boolean topicExist(String topic) {
boolean topicExist;
TopicRouteData topicRouteData;
try {
topicRouteData = this.getTopicRouteDataDirectlyFromNameServer(topic);
topicExist = topicRouteData != null;
} catch (Throwable e) {
topicExist = false;
}
return topicExist;
}
@Override
public boolean createTopicOnTopicBrokerIfNotExist(String createTopic, String sampleTopic, int wQueueNum,
int rQueueNum, boolean examineTopic, int retryCheckCount) {
TopicRouteData curTopicRouteData = new TopicRouteData();
try {
curTopicRouteData = this.getTopicRouteDataDirectlyFromNameServer(createTopic);
} catch (Exception e) {
if (!TopicRouteHelper.isTopicNotExistError(e)) {
log.error("get cur topic route {} failed.", createTopic, e);
return false;
}
}
TopicRouteData sampleTopicRouteData = null;
try {
sampleTopicRouteData = this.getTopicRouteDataDirectlyFromNameServer(sampleTopic);
} catch (Exception e) {
log.error("create topic {} failed.", createTopic, e);
return false;
}
if (sampleTopicRouteData == null || sampleTopicRouteData.getBrokerDatas().isEmpty()) {
return false;
}
try {
return this.createTopicOnBroker(createTopic, wQueueNum, rQueueNum, curTopicRouteData.getBrokerDatas(),
sampleTopicRouteData.getBrokerDatas(), examineTopic, retryCheckCount);
} catch (Exception e) {
log.error("create topic {} failed.", createTopic, e);
}
return false;
}
@Override
public boolean createTopicOnBroker(String topic, int wQueueNum, int rQueueNum, List<BrokerData> curBrokerDataList,
List<BrokerData> sampleBrokerDataList, boolean examineTopic, int retryCheckCount) throws Exception {
Set<String> curBrokerAddr = new HashSet<>();
if (curBrokerDataList != null) {
for (BrokerData brokerData : curBrokerDataList) {
curBrokerAddr.add(brokerData.getBrokerAddrs().get(MixAll.MASTER_ID));
}
}
TopicConfig topicConfig = new TopicConfig();
topicConfig.setTopicName(topic);
topicConfig.setWriteQueueNums(wQueueNum);
topicConfig.setReadQueueNums(rQueueNum);
topicConfig.setPerm(PermName.PERM_READ | PermName.PERM_WRITE);
for (BrokerData brokerData : sampleBrokerDataList) {
String addr = brokerData.getBrokerAddrs() == null ? null : brokerData.getBrokerAddrs().get(MixAll.MASTER_ID);
if (addr == null) {
continue;
}
if (curBrokerAddr.contains(addr)) {
continue;
}
try {
this.getClient().createTopic(addr, TopicValidator.AUTO_CREATE_TOPIC_KEY_TOPIC, topicConfig, Duration.ofSeconds(3).toMillis());
} catch (Exception e) {
log.error("create topic on broker failed. topic:{}, broker:{}", topicConfig, addr, e);
}
}
if (examineTopic) {
// examine topic exist.
int count = retryCheckCount;
while (count-- > 0) {
if (this.topicExist(topic)) {
return true;
}
}
} else {
return true;
}
return false;
}
protected TopicRouteData getTopicRouteDataDirectlyFromNameServer(String topic) throws Exception {
return this.getClient().getTopicRouteInfoFromNameServer(topic, Duration.ofSeconds(3).toMillis());
}
protected MQClientAPIExt getClient() {
return this.mqClientAPIFactory.getClient();
}
}
| DefaultAdminService |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/MutuallyExclusiveConfigurationPropertiesException.java | {
"start": 1303,
"end": 4877
} | class ____ extends RuntimeException {
private final Set<String> configuredNames;
private final Set<String> mutuallyExclusiveNames;
/**
* Creates a new instance for mutually exclusive configuration properties when two or
* more of those properties have been configured.
* @param configuredNames the names of the properties that have been configured
* @param mutuallyExclusiveNames the names of the properties that are mutually
* exclusive
*/
public MutuallyExclusiveConfigurationPropertiesException(Collection<String> configuredNames,
Collection<String> mutuallyExclusiveNames) {
this(asSet(configuredNames), asSet(mutuallyExclusiveNames));
}
private MutuallyExclusiveConfigurationPropertiesException(Set<String> configuredNames,
Set<String> mutuallyExclusiveNames) {
super(buildMessage(mutuallyExclusiveNames, configuredNames));
this.configuredNames = configuredNames;
this.mutuallyExclusiveNames = mutuallyExclusiveNames;
}
/**
* Return the names of the properties that have been configured.
* @return the names of the configured properties
*/
public Set<String> getConfiguredNames() {
return this.configuredNames;
}
/**
* Return the names of the properties that are mutually exclusive.
* @return the names of the mutually exclusive properties
*/
public Set<String> getMutuallyExclusiveNames() {
return this.mutuallyExclusiveNames;
}
@Contract("null -> null; !null -> !null")
private static @Nullable Set<String> asSet(@Nullable Collection<String> collection) {
return (collection != null) ? new LinkedHashSet<>(collection) : null;
}
private static String buildMessage(Set<String> mutuallyExclusiveNames, Set<String> configuredNames) {
Assert.isTrue(configuredNames != null && configuredNames.size() > 1,
"'configuredNames' must contain 2 or more names");
Assert.isTrue(mutuallyExclusiveNames != null && mutuallyExclusiveNames.size() > 1,
"'mutuallyExclusiveNames' must contain 2 or more names");
return "The configuration properties '" + String.join(", ", mutuallyExclusiveNames)
+ "' are mutually exclusive and '" + String.join(", ", configuredNames)
+ "' have been configured together";
}
/**
* Throw a new {@link MutuallyExclusiveConfigurationPropertiesException} if multiple
* non-null values are defined in a set of entries.
* @param entries a consumer used to populate the entries to check
*/
public static void throwIfMultipleNonNullValuesIn(Consumer<Map<String, @Nullable Object>> entries) {
Predicate<@Nullable Object> isNonNull = Objects::nonNull;
throwIfMultipleMatchingValuesIn(entries, isNonNull);
}
/**
* Throw a new {@link MutuallyExclusiveConfigurationPropertiesException} if multiple
* values are defined in a set of entries that match the given predicate.
* @param <V> the value type
* @param entries a consumer used to populate the entries to check
* @param predicate the predicate used to check for matching values
* @since 3.3.7
*/
public static <V> void throwIfMultipleMatchingValuesIn(Consumer<Map<String, @Nullable V>> entries,
Predicate<@Nullable V> predicate) {
Map<String, V> map = new LinkedHashMap<>();
entries.accept(map);
Set<String> configuredNames = map.entrySet()
.stream()
.filter((entry) -> predicate.test(entry.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toCollection(LinkedHashSet::new));
if (configuredNames.size() > 1) {
throw new MutuallyExclusiveConfigurationPropertiesException(configuredNames, map.keySet());
}
}
}
| MutuallyExclusiveConfigurationPropertiesException |
java | spring-projects__spring-framework | spring-webflux/src/test/java/org/springframework/web/reactive/DispatcherHandlerTests.java | {
"start": 4634,
"end": 5316
} | class ____ implements HandlerResultHandler {
@Override
public boolean supports(HandlerResult result) {
Object value = result.getReturnValue();
return value != null && String.class.equals(value.getClass());
}
@Override
public Mono<Void> handleResult(ServerWebExchange exchange, HandlerResult result) {
Object returnValue = result.getReturnValue();
if (returnValue == null) {
return Mono.empty();
}
byte[] bytes = ((String) returnValue).getBytes(StandardCharsets.UTF_8);
DataBuffer dataBuffer = DefaultDataBufferFactory.sharedInstance.wrap(bytes);
return exchange.getResponse().writeWith(Mono.just(dataBuffer));
}
}
}
| StringHandlerResultHandler |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_issue_273.java | {
"start": 111,
"end": 291
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
JSON.parseObject("{\"value\":\"\0x16\0x26\"}");
}
public static | Bug_for_issue_273 |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/beans/factory/xml/XmlBeanFactoryTestTypes.java | {
"start": 5676,
"end": 6452
} | class ____ {
private TestBean testBean1;
private TestBean testBean2;
private DummyFactory dummyFactory;
public DummyReferencer() {
}
public DummyReferencer(DummyFactory dummyFactory) {
this.dummyFactory = dummyFactory;
}
public void setDummyFactory(DummyFactory dummyFactory) {
this.dummyFactory = dummyFactory;
}
public DummyFactory getDummyFactory() {
return dummyFactory;
}
public void setTestBean1(TestBean testBean1) {
this.testBean1 = testBean1;
}
public TestBean getTestBean1() {
return testBean1;
}
public void setTestBean2(TestBean testBean2) {
this.testBean2 = testBean2;
}
public TestBean getTestBean2() {
return testBean2;
}
}
/**
* Fixed method replacer for String return types
* @author Rod Johnson
*/
| DummyReferencer |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/datasource/embedded/AbstractEmbeddedDatabaseConfigurer.java | {
"start": 1108,
"end": 1910
} | class ____ implements EmbeddedDatabaseConfigurer {
protected final Log logger = LogFactory.getLog(getClass());
@Override
public void shutdown(DataSource dataSource, String databaseName) {
Connection con = null;
try {
con = dataSource.getConnection();
if (con != null) {
try (Statement stmt = con.createStatement()) {
stmt.execute("SHUTDOWN");
}
}
}
catch (SQLException ex) {
logger.info("Could not shut down embedded database", ex);
}
finally {
if (con != null) {
try {
con.close();
}
catch (SQLException ex) {
logger.debug("Could not close JDBC Connection on shutdown", ex);
}
catch (Throwable ex) {
logger.debug("Unexpected exception on closing JDBC Connection", ex);
}
}
}
}
}
| AbstractEmbeddedDatabaseConfigurer |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StGeotile.java | {
"start": 4006,
"end": 11881
} | class ____ {
private final int precision;
private final GeoBoundingBox bbox;
Factory(int precision, GeoBoundingBox bbox) {
this.precision = checkPrecisionRange(precision);
this.bbox = bbox;
}
public GeoTileBoundedGrid get(DriverContext context) {
return new GeoTileBoundedGrid(precision, bbox);
}
}
}
/**
* For unbounded grids, we don't need to check if the tile is valid,
* just calculate the encoded long intersecting the point at that precision.
*/
public static final UnboundedGrid unboundedGrid = (point, precision) -> GeoTileUtils.longEncode(
point.getX(),
point.getY(),
checkPrecisionRange(precision)
);
@FunctionInfo(
returnType = "geotile",
preview = true,
appliesTo = { @FunctionAppliesTo(lifeCycle = FunctionAppliesToLifecycle.PREVIEW) },
description = """
Calculates the `geotile` of the supplied geo_point at the specified precision.
The result is long encoded. Use [TO_STRING](#esql-to_string) to convert the result to a string,
[TO_LONG](#esql-to_long) to convert it to a `long`, or [TO_GEOSHAPE](#esql-to_geoshape) to calculate
the `geo_shape` bounding geometry.
These functions are related to the [`geo_grid` query](/reference/query-languages/query-dsl/query-dsl-geo-grid-query.md)
and the [`geotile_grid` aggregation](/reference/aggregations/search-aggregations-bucket-geotilegrid-aggregation.md).""",
examples = @Example(file = "spatial-grid", tag = "st_geotile-grid")
)
public StGeotile(
Source source,
@Param(
name = "geometry",
type = { "geo_point" },
description = "Expression of type `geo_point`. If `null`, the function returns `null`."
) Expression field,
@Param(name = "precision", type = { "integer" }, description = """
Expression of type `integer`. If `null`, the function returns `null`.
Valid values are between [0 and 29](https://wiki.openstreetmap.org/wiki/Zoom_levels).""") Expression precision,
@Param(name = "bounds", type = { "geo_shape" }, description = """
Optional bounds to filter the grid tiles, a `geo_shape` of type `BBOX`.
Use [`ST_ENVELOPE`](#esql-st_envelope) if the `geo_shape` is of any other type.""", optional = true) Expression bounds
) {
this(source, field, precision, bounds, false);
}
private StGeotile(Source source, Expression field, Expression precision, Expression bounds, boolean spatialDocValues) {
super(source, field, precision, bounds, spatialDocValues);
}
private StGeotile(StreamInput in) throws IOException {
super(in, false);
}
@Override
public SpatialGridFunction withDocValues(boolean useDocValues) {
// Only update the docValues flags if the field is found in the attributes
boolean docValues = this.spatialDocsValues || useDocValues;
return new StGeotile(source(), spatialField, parameter, bounds, docValues);
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
public DataType dataType() {
return GEOTILE;
}
@Override
protected SpatialGridFunction replaceChildren(Expression newSpatialField, Expression newParameter, Expression newBounds) {
return new StGeotile(source(), newSpatialField, newParameter, newBounds);
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, StGeotile::new, spatialField, parameter, bounds);
}
@Override
public EvalOperator.ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) {
if (parameter().foldable() == false) {
throw new IllegalArgumentException("precision must be foldable");
}
if (bounds != null) {
if (bounds.foldable() == false) {
throw new IllegalArgumentException("bounds must be foldable");
}
GeoBoundingBox bbox = asGeoBoundingBox(bounds.fold(toEvaluator.foldCtx()));
int precision = (int) parameter.fold(toEvaluator.foldCtx());
GeoTileBoundedGrid.Factory bounds = new GeoTileBoundedGrid.Factory(precision, bbox);
return spatialDocsValues
? new StGeotileFromFieldDocValuesAndLiteralAndLiteralEvaluator.Factory(
source(),
toEvaluator.apply(spatialField()),
bounds::get
)
: new StGeotileFromFieldAndLiteralAndLiteralEvaluator.Factory(source(), toEvaluator.apply(spatialField), bounds::get);
} else {
int precision = checkPrecisionRange((int) parameter.fold(toEvaluator.foldCtx()));
return spatialDocsValues
? new StGeotileFromFieldDocValuesAndLiteralEvaluator.Factory(source(), toEvaluator.apply(spatialField()), precision)
: new StGeotileFromFieldAndLiteralEvaluator.Factory(source(), toEvaluator.apply(spatialField), precision);
}
}
@Override
public Object fold(FoldContext ctx) {
var point = (BytesRef) spatialField().fold(ctx);
int precision = checkPrecisionRange((int) parameter().fold(ctx));
if (bounds() == null) {
return unboundedGrid.calculateGridId(GEO.wkbAsPoint(point), precision);
} else {
GeoBoundingBox bbox = asGeoBoundingBox(bounds().fold(ctx));
GeoTileBoundedGrid bounds = new GeoTileBoundedGrid(precision, bbox);
long gridId = bounds.calculateGridId(GEO.wkbAsPoint(point));
return gridId < 0 ? null : gridId;
}
}
@Evaluator(extraName = "FromFieldAndLiteral", warnExceptions = { IllegalArgumentException.class })
static void fromFieldAndLiteral(LongBlock.Builder results, @Position int p, BytesRefBlock wkbBlock, @Fixed int precision) {
fromWKB(results, p, wkbBlock, precision, unboundedGrid);
}
@Evaluator(extraName = "FromFieldDocValuesAndLiteral", warnExceptions = { IllegalArgumentException.class })
static void fromFieldDocValuesAndLiteral(LongBlock.Builder results, @Position int p, LongBlock encoded, @Fixed int precision) {
fromEncodedLong(results, p, encoded, precision, unboundedGrid);
}
@Evaluator(extraName = "FromFieldAndLiteralAndLiteral", warnExceptions = { IllegalArgumentException.class })
static void fromFieldAndLiteralAndLiteral(
LongBlock.Builder results,
@Position int p,
BytesRefBlock in,
@Fixed(includeInToString = false, scope = THREAD_LOCAL) GeoTileBoundedGrid bounds
) {
fromWKB(results, p, in, bounds);
}
@Evaluator(extraName = "FromFieldDocValuesAndLiteralAndLiteral", warnExceptions = { IllegalArgumentException.class })
static void fromFieldDocValuesAndLiteralAndLiteral(
LongBlock.Builder results,
@Position int p,
LongBlock encoded,
@Fixed(includeInToString = false, scope = THREAD_LOCAL) GeoTileBoundedGrid bounds
) {
fromEncodedLong(results, p, encoded, bounds);
}
public static BytesRef toBounds(long gridId) {
return fromRectangle(GeoTileUtils.toBoundingBox(gridId));
}
static BytesRef fromRectangle(Rectangle bbox) {
double[] x = new double[] { bbox.getMinX(), bbox.getMaxX(), bbox.getMaxX(), bbox.getMinX(), bbox.getMinX() };
double[] y = new double[] { bbox.getMinY(), bbox.getMinY(), bbox.getMaxY(), bbox.getMaxY(), bbox.getMinY() };
LinearRing ring = new LinearRing(x, y);
Polygon polygon = new Polygon(ring);
return SpatialCoordinateTypes.GEO.asWkb(polygon);
}
}
| Factory |
java | apache__flink | flink-formats/flink-orc/src/main/java/org/apache/flink/orc/OrcFileFormatFactory.java | {
"start": 2930,
"end": 5617
} | class ____ implements BulkReaderFormatFactory, BulkWriterFormatFactory {
public static final String IDENTIFIER = "orc";
@Override
public String factoryIdentifier() {
return IDENTIFIER;
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
return new HashSet<>();
}
@Override
public Set<ConfigOption<?>> optionalOptions() {
// support "orc.*"
return new HashSet<>();
}
private static Properties getOrcProperties(ReadableConfig options) {
Properties orcProperties = new Properties();
Properties properties = new Properties();
((org.apache.flink.configuration.Configuration) options).addAllToProperties(properties);
properties.forEach((k, v) -> orcProperties.put(IDENTIFIER + "." + k, v));
return orcProperties;
}
private static Configuration getOrcConfiguration(ReadableConfig formatOptions) {
Properties properties = getOrcProperties(formatOptions);
Configuration hadoopConf = new Configuration();
properties.forEach((k, v) -> hadoopConf.set(k.toString(), v.toString()));
return hadoopConf;
}
@Override
public BulkDecodingFormat<RowData> createDecodingFormat(
DynamicTableFactory.Context context, ReadableConfig formatOptions) {
return new OrcBulkDecodingFormat(formatOptions);
}
@Override
public EncodingFormat<BulkWriter.Factory<RowData>> createEncodingFormat(
DynamicTableFactory.Context context, ReadableConfig formatOptions) {
return new EncodingFormat<BulkWriter.Factory<RowData>>() {
@Override
public BulkWriter.Factory<RowData> createRuntimeEncoder(
DynamicTableSink.Context sinkContext, DataType consumedDataType) {
RowType formatRowType = (RowType) consumedDataType.getLogicalType();
LogicalType[] orcTypes = formatRowType.getChildren().toArray(new LogicalType[0]);
TypeDescription typeDescription =
OrcSplitReaderUtil.logicalTypeToOrcType(formatRowType);
return new OrcBulkWriterFactory<>(
new RowDataVectorizer(typeDescription.toString(), orcTypes),
getOrcProperties(formatOptions),
new Configuration());
}
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.insertOnly();
}
};
}
/** OrcBulkDecodingFormat which implements {@link FileBasedStatisticsReportableInputFormat}. */
@VisibleForTesting
public static | OrcFileFormatFactory |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/TimeSeriesBareAggregationsTests.java | {
"start": 2092,
"end": 13866
} | class ____ extends AbstractLogicalPlanOptimizerTests {
private static Map<String, EsField> mappingK8s;
private static Analyzer k8sAnalyzer;
@BeforeClass
public static void initK8s() {
mappingK8s = loadMapping("k8s-mappings.json");
EsIndex k8sIndex = new EsIndex("k8s", mappingK8s, Map.of("k8s", IndexMode.TIME_SERIES), Map.of(), Map.of(), Set.of());
IndexResolution indexResolution = IndexResolution.valid(k8sIndex);
Map<IndexPattern, IndexResolution> resolutions = new HashMap<>();
resolutions.put(new IndexPattern(Source.EMPTY, indexResolution.get().name()), indexResolution);
k8sAnalyzer = new Analyzer(
new AnalyzerContext(
EsqlTestUtils.TEST_CFG,
new EsqlFunctionRegistry(),
resolutions,
defaultLookupResolution(),
enrichResolution,
emptyInferenceResolution(),
TransportVersion.minimumCompatible()
),
TEST_VERIFIER
);
}
protected LogicalPlan planK8s(String query) {
LogicalPlan analyzed = k8sAnalyzer.analyze(parser.createStatement(query));
return logicalOptimizer.optimize(analyzed);
}
/**
* Translation: TS k8s | STATS avg_over_time(field) → TS k8s | STATS VALUES(avg_over_time(field)) BY _tsid
* <br/>
* AVG_OVER_TIME translates into [Eval[[SUMOVERTIME(network.cost{f}#22,true[BOOLEAN]) / COUNTOVERTIME(network.cost{f}#22,true[BOOLEAN])
* AS avg_over_time(network.cost)#4]]]
*/
public void testBareAvgOverTime() {
assumeTrue("requires metrics command", EsqlCapabilities.Cap.METRICS_GROUP_BY_ALL.isEnabled());
LogicalPlan plan = planK8s("""
TS k8s
| STATS avg_over_time(network.cost)
""");
TimeSeriesAggregate tsa = findTimeSeriesAggregate(plan);
assertThat("Should have TimeSeriesAggregate", tsa, is(instanceOf(TimeSeriesAggregate.class)));
List<Attribute> groupings = tsa.groupings().stream().filter(g -> g instanceof Attribute).map(g -> (Attribute) g).toList();
boolean hasTsid = groupings.stream().anyMatch(g -> g.name().equals(MetadataAttribute.TSID_FIELD));
assertThat("Should group by _tsid", hasTsid, is(true));
var aggregates = tsa.aggregates();
assertThat("Should have aggregates", aggregates.isEmpty(), is(false));
List<Attribute> output = plan.output();
boolean hasTimeseries = output.stream().anyMatch(attr -> attr.name().equals(MetadataAttribute.TIMESERIES));
assertThat("Should have _timeseries in output", hasTimeseries, is(true));
Attribute timeseriesAttr = output.stream()
.filter(attr -> attr.name().equals(MetadataAttribute.TIMESERIES))
.findFirst()
.orElse(null);
assertNotNull(timeseriesAttr);
assertThat("_timeseries attribute should exist", timeseriesAttr, is(instanceOf(Attribute.class)));
assertThat("_timeseries should be KEYWORD type", timeseriesAttr.dataType(), is(DataType.KEYWORD));
}
/**
* Translation: TS k8s | STATS sum_over_time(field) → TS k8s | STATS VALUES(sum_over_time(field)) BY _tsid
*/
public void testBareSumOverTime() {
assumeTrue("requires metrics command", EsqlCapabilities.Cap.METRICS_GROUP_BY_ALL.isEnabled());
LogicalPlan plan = planK8s("""
TS k8s
| STATS sum_over_time(network.cost)
""");
TimeSeriesAggregate tsa = findTimeSeriesAggregate(plan);
assertThat("Should have TimeSeriesAggregate", tsa, is(instanceOf(TimeSeriesAggregate.class)));
List<Attribute> groupings = tsa.groupings().stream().filter(g -> g instanceof Attribute).map(g -> (Attribute) g).toList();
boolean hasTsid = groupings.stream().anyMatch(g -> g.name().equals(MetadataAttribute.TSID_FIELD));
assertThat("Should group by _tsid", hasTsid, is(true));
var aggregates = tsa.aggregates();
assertThat("Should have aggregates", aggregates.isEmpty(), is(false));
List<Attribute> output = plan.output();
boolean hasTimeseries = output.stream().anyMatch(attr -> attr.name().equals(MetadataAttribute.TIMESERIES));
assertThat("Should have _timeseries in output", hasTimeseries, is(true));
Attribute timeseriesAttr = output.stream()
.filter(attr -> attr.name().equals(MetadataAttribute.TIMESERIES))
.findFirst()
.orElse(null);
assertNotNull(timeseriesAttr);
assertThat("_timeseries attribute should exist", timeseriesAttr, is(instanceOf(Attribute.class)));
assertThat("_timeseries should be KEYWORD type", timeseriesAttr.dataType(), is(DataType.KEYWORD));
}
/**
* Translation: TS k8s | STATS sum_over_time(field) BY TBUCKET(1h)
* → TS k8s | STATS VALUES(sum_over_time(field)) BY _tsid, TBUCKET(1h)
*/
public void testSumOverTimeWithTBucket() {
assumeTrue("requires metrics command", EsqlCapabilities.Cap.METRICS_GROUP_BY_ALL.isEnabled());
LogicalPlan plan = planK8s("""
TS k8s
| STATS sum_over_time(network.cost) BY TBUCKET(1 hour)
""");
TimeSeriesAggregate tsa = findTimeSeriesAggregate(plan);
assertThat("Should have TimeSeriesAggregate", tsa, is(instanceOf(TimeSeriesAggregate.class)));
List<Attribute> groupings = tsa.groupings().stream().filter(g -> g instanceof Attribute).map(g -> (Attribute) g).toList();
assertThat(groupings.size(), is(2));
boolean hasTsid = groupings.stream().anyMatch(g -> g.name().equals(MetadataAttribute.TSID_FIELD));
assertThat("Should group by _tsid", hasTsid, is(true));
boolean hasBucket = groupings.stream().anyMatch(g -> g.name().equalsIgnoreCase("BUCKET"));
assertThat("Should group by bucket", hasBucket, is(true));
List<Attribute> output = plan.output();
boolean hasTimeseries = output.stream().anyMatch(attr -> attr.name().equals(MetadataAttribute.TIMESERIES));
assertThat("Should have _timeseries in output", hasTimeseries, is(true));
Attribute timeseriesAttr = output.stream()
.filter(attr -> attr.name().equals(MetadataAttribute.TIMESERIES))
.findFirst()
.orElse(null);
assertNotNull(timeseriesAttr);
assertThat("_timeseries attribute should exist", timeseriesAttr, is(instanceOf(Attribute.class)));
assertThat("_timeseries should be KEYWORD type", timeseriesAttr.dataType(), is(DataType.KEYWORD));
}
/**
* Translation: TS k8s | STATS rate(field) BY TBUCKET(1h)
* → TS k8s | STATS VALUES(rate(field)) BY _tsid, TBUCKET(1h)
*/
public void testRateWithTBucket() {
assumeTrue("requires metrics command", EsqlCapabilities.Cap.METRICS_GROUP_BY_ALL.isEnabled());
LogicalPlan plan = planK8s("""
TS k8s
| STATS rate(network.total_bytes_out) BY TBUCKET(1 hour)
""");
TimeSeriesAggregate tsa = findTimeSeriesAggregate(plan);
assertThat("Should have TimeSeriesAggregate", tsa, is(instanceOf(TimeSeriesAggregate.class)));
List<Attribute> groupings = tsa.groupings().stream().filter(g -> g instanceof Attribute).map(g -> (Attribute) g).toList();
assertThat(groupings.size(), is(2));
boolean hasTsid = groupings.stream().anyMatch(g -> g.name().equals(MetadataAttribute.TSID_FIELD));
assertThat("Should group by _tsid", hasTsid, is(true));
boolean hasBucket = groupings.stream().anyMatch(g -> g.name().equalsIgnoreCase("BUCKET"));
assertThat("Should group by bucket", hasBucket, is(true));
List<Attribute> output = plan.output();
boolean hasTimeseries = output.stream().anyMatch(attr -> attr.name().equals(MetadataAttribute.TIMESERIES));
assertThat("Should have _timeseries in output", hasTimeseries, is(true));
Attribute timeseriesAttr = output.stream()
.filter(attr -> attr.name().equals(MetadataAttribute.TIMESERIES))
.findFirst()
.orElse(null);
assertNotNull(timeseriesAttr);
assertThat("_timeseries attribute should exist", timeseriesAttr, is(instanceOf(Attribute.class)));
assertThat("_timeseries should be KEYWORD type", timeseriesAttr.dataType(), is(DataType.KEYWORD));
}
/**
* Wrapped _OVER_TIME functions are not translated.
*/
public void testAlreadyWrappedAggregateNotModified() {
assumeTrue("requires metrics command", EsqlCapabilities.Cap.METRICS_GROUP_BY_ALL.isEnabled());
LogicalPlan planBefore = planK8s("""
TS k8s
| STATS MAX(rate(network.total_bytes_out))
""");
assertThat("Plan should be valid", planBefore, is(instanceOf(LogicalPlan.class)));
}
public void testCountOverTime() {
assumeTrue("requires metrics command", EsqlCapabilities.Cap.METRICS_GROUP_BY_ALL.isEnabled());
LogicalPlan plan = planK8s("TS k8s | STATS count = count_over_time(network.cost)");
TimeSeriesAggregate tsa = findTimeSeriesAggregate(plan);
assertThat("Should have TimeSeriesAggregate", tsa, is(instanceOf(TimeSeriesAggregate.class)));
List<Attribute> groupings = tsa.groupings().stream().filter(g -> g instanceof Attribute).map(g -> (Attribute) g).toList();
assertThat(groupings.size(), is(1));
boolean hasTsid = groupings.stream().anyMatch(g -> g.name().equals(MetadataAttribute.TSID_FIELD));
assertThat("Should still group by _tsid", hasTsid, is(true));
List<Attribute> output = plan.output();
boolean hasTimeseries = output.stream().anyMatch(attr -> attr.name().equals(MetadataAttribute.TIMESERIES));
assertThat("Should have _timeseries in output", hasTimeseries, is(true));
Attribute timeseriesAttr = output.stream()
.filter(attr -> attr.name().equals(MetadataAttribute.TIMESERIES))
.findFirst()
.orElse(null);
assertNotNull(timeseriesAttr);
assertThat("_timeseries attribute should exist", timeseriesAttr, is(instanceOf(Attribute.class)));
assertThat("_timeseries should be KEYWORD type", timeseriesAttr.dataType(), is(DataType.KEYWORD));
}
public void testMixedBareOverTimeAndRegularAggregates() {
assumeTrue("requires metrics command", EsqlCapabilities.Cap.METRICS_GROUP_BY_ALL.isEnabled());
var error = expectThrows(EsqlIllegalArgumentException.class, () -> { planK8s("""
TS k8s
| STATS avg_over_time(network.cost), sum(network.total_bytes_in)
"""); });
assertThat(error.getMessage(), equalTo("""
Cannot mix time-series aggregate [avg_over_time(network.cost)] and \
regular aggregate [sum(network.total_bytes_in)] in the same TimeSeriesAggregate."""));
}
public void testGroupingKeyInAggregatesListPreserved() {
assumeTrue("requires metrics command", EsqlCapabilities.Cap.METRICS_GROUP_BY_ALL.isEnabled());
var error = expectThrows(EsqlIllegalArgumentException.class, () -> { planK8s("""
TS k8s
| STATS rate(network.total_bytes_out) BY region, TBUCKET(1hour)
"""); });
assertThat(error.getMessage(), equalTo("Cannot mix time-series aggregate and grouping attributes. Found [region]."));
}
private TimeSeriesAggregate findTimeSeriesAggregate(LogicalPlan plan) {
Holder<TimeSeriesAggregate> tsAggregateHolder = new Holder<>();
plan.forEachDown(TimeSeriesAggregate.class, tsAggregateHolder::set);
return tsAggregateHolder.get();
}
}
| TimeSeriesBareAggregationsTests |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/reverse/erroneous/SourceTargetMapperAmbiguous1.java | {
"start": 515,
"end": 1485
} | interface ____ {
SourceTargetMapperAmbiguous1 INSTANCE = Mappers.getMapper( SourceTargetMapperAmbiguous1.class );
@Mappings({
@Mapping(target = "stringPropY", source = "stringPropX"),
@Mapping(target = "integerPropY", source = "integerPropX"),
@Mapping(target = "propertyNotToIgnoreUpstream", source = "propertyToIgnoreDownstream")
})
Target forward(Source source);
@Mappings({
@Mapping(target = "stringPropY", source = "stringPropX"),
@Mapping(target = "integerPropY", source = "integerPropX"),
@Mapping(target = "propertyNotToIgnoreUpstream", source = "propertyToIgnoreDownstream")
})
Target forwardNotToReverse(Source source);
@InheritInverseConfiguration
@Mappings({
@Mapping(target = "someConstantDownstream", constant = "test"),
@Mapping(target = "propertyToIgnoreDownstream", ignore = true)
})
Source reverse(Target target);
}
| SourceTargetMapperAmbiguous1 |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/context/FactoryContextMethods.java | {
"start": 408,
"end": 727
} | class ____ {
public NodeDto createNodeDto(@Context FactoryContext context) {
return context.createNode();
}
@ObjectFactory
public AttributeDto createAttributeDto(Attribute source, @Context FactoryContext context) {
return context.createAttributeDto( source );
}
}
| FactoryContextMethods |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/TestExecutionListenersTests.java | {
"start": 12157,
"end": 12263
} | interface ____ {
}
@TestExecutionListeners
@Retention(RetentionPolicy.RUNTIME)
@ | MetaNonInheritedListeners |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/impl/TestEntryFileIO.java | {
"start": 2079,
"end": 11635
} | class ____ extends AbstractManifestCommitterTest {
private static final Logger LOG = LoggerFactory.getLogger(
TestEntryFileIO.class);
/**
* Entry to save.
*/
public static final FileEntry ENTRY = new FileEntry("source", "dest", 100, "etag");
/**
* Entry file instance.
*/
private EntryFileIO entryFileIO;
/**
* Path to a test entry file.
*/
private File entryFile;
/**
* Create an entry file during setup.
*/
@BeforeEach
public void setup() throws Exception {
entryFileIO = new EntryFileIO(new Configuration());
createEntryFile();
}
/**
* Teardown deletes any entry file.
* @throws Exception on any failure
*/
@AfterEach
public void teardown() throws Exception {
Thread.currentThread().setName("teardown");
if (getEntryFile() != null) {
getEntryFile().delete();
}
}
/**
* Create a temp entry file and set the entryFile field to it.
* @throws IOException creation failure
*/
private void createEntryFile() throws IOException {
setEntryFile(File.createTempFile("entry", ".seq"));
}
/**
* reference to any temp file created.
*/
private File getEntryFile() {
return entryFile;
}
private void setEntryFile(File entryFile) {
this.entryFile = entryFile;
}
/**
* Create a file with one entry, then read it back
* via all the mechanisms available.
*/
@Test
public void testCreateWriteReadFileOneEntry() throws Throwable {
final FileEntry source = ENTRY;
// do an explicit close to help isolate any failure.
SequenceFile.Writer writer = createWriter();
writer.append(NullWritable.get(), source);
writer.flush();
writer.close();
FileEntry readBack = new FileEntry();
try (SequenceFile.Reader reader = readEntryFile()) {
reader.next(NullWritable.get(), readBack);
}
Assertions.assertThat(readBack)
.describedAs("entry read back from sequence file")
.isEqualTo(source);
// now use the iterator to access it.
final RemoteIterator<FileEntry> it =
iterateOverEntryFile();
List<FileEntry> files = new ArrayList<>();
foreach(it, files::add);
Assertions.assertThat(files)
.describedAs("iteration over the entry file")
.hasSize(1)
.element(0)
.isEqualTo(source);
final EntryFileIO.EntryIterator et = (EntryFileIO.EntryIterator) it;
Assertions.assertThat(et)
.describedAs("entry iterator %s", et)
.matches(p -> p.isClosed())
.extracting(p -> p.getCount())
.isEqualTo(1);
}
/**
* Create a writer.
* @return a writer
* @throws IOException failure to create the file.
*/
private SequenceFile.Writer createWriter() throws IOException {
return entryFileIO.createWriter(getEntryFile());
}
/**
* Create an iterator over the records in the (non empty) entry file.
* @return an iterator over entries.
* @throws IOException failure to open the file
*/
private RemoteIterator<FileEntry> iterateOverEntryFile() throws IOException {
return entryFileIO.iterateOver(readEntryFile());
}
/**
* Create a reader for the (non empty) entry file.
* @return a reader.
* @throws IOException failure to open the file
*/
private SequenceFile.Reader readEntryFile() throws IOException {
assertEntryFileNonEmpty();
return entryFileIO.createReader(getEntryFile());
}
/**
* Create a file with one entry.
*/
@Test
public void testCreateEmptyFile() throws Throwable {
final File file = getEntryFile();
entryFileIO.createWriter(file).close();
// now use the iterator to access it.
List<FileEntry> files = new ArrayList<>();
Assertions.assertThat(foreach(iterateOverEntryFile(), files::add))
.describedAs("Count of iterations over entries in an entry file with no entries")
.isEqualTo(0);
}
private void assertEntryFileNonEmpty() {
Assertions.assertThat(getEntryFile().length())
.describedAs("Length of file %s", getEntryFile())
.isGreaterThan(0);
}
@Test
public void testCreateInvalidWriter() throws Throwable {
intercept(NullPointerException.class, () ->
entryFileIO.launchEntryWriter(null, 1));
}
@Test
public void testCreateInvalidWriterCapacity() throws Throwable {
intercept(IllegalStateException.class, () ->
entryFileIO.launchEntryWriter(null, 0));
}
/**
* Generate lots of data and write it.
*/
@Test
public void testLargeStreamingWrite() throws Throwable {
// list of 100 entries at a time
int listSize = 100;
// and the number of block writes
int writes = 100;
List<FileEntry> list = buildEntryList(listSize);
int total = listSize * writes;
try (EntryFileIO.EntryWriter out = entryFileIO.launchEntryWriter(createWriter(), 2)) {
Assertions.assertThat(out.isActive())
.describedAs("out.isActive in ()", out)
.isTrue();
for (int i = 0; i < writes; i++) {
Assertions.assertThat(out.enqueue(list))
.describedAs("enqueue of list")
.isTrue();
}
out.close();
out.maybeRaiseWriteException();
Assertions.assertThat(out.isActive())
.describedAs("out.isActive in ()", out)
.isFalse();
Assertions.assertThat(out.getCount())
.describedAs("total elements written")
.isEqualTo(total);
}
// now read it back
AtomicInteger count = new AtomicInteger();
foreach(iterateOverEntryFile(), e -> {
final int elt = count.getAndIncrement();
final int index = elt % listSize;
Assertions.assertThat(e)
.describedAs("element %d in file mapping to index %d", elt, index)
.isEqualTo(list.get(index));
});
Assertions.assertThat(count.get())
.describedAs("total elements read")
.isEqualTo(total);
}
/**
* Build an entry list.
* @param listSize size of the list
* @return a list of entries
*/
private static List<FileEntry> buildEntryList(final int listSize) {
List<FileEntry> list = new ArrayList<>(listSize);
for (int i = 0; i < listSize; i++) {
list.add(new FileEntry("source" + i, "dest" + i, i, "etag-" + i));
}
// just for debugging/regression testing
Assertions.assertThat(list).hasSize(listSize);
return list;
}
/**
* Write lists to the output, but the stream is going to fail after a
* configured number of records have been written.
* Verify that the (blocked) submitter is woken up
* and that the exception was preserved for rethrowing.
*/
@Test
public void testFailurePropagation() throws Throwable {
final int count = 4;
final SequenceFile.Writer writer = spyWithFailingAppend(
entryFileIO.createWriter(getEntryFile()), count);
// list of 100 entries at a time
// and the number of block writes
List<FileEntry> list = buildEntryList(1);
// small queue ensures the posting thread is blocked
try (EntryFileIO.EntryWriter out = entryFileIO.launchEntryWriter(writer, 2)) {
boolean valid = true;
for (int i = 0; valid && i < count * 2; i++) {
valid = out.enqueue(list);
}
LOG.info("queue to {} finished valid={}", out, valid);
out.close();
// verify the exception is as expected
intercept(IOException.class, "mocked", () ->
out.maybeRaiseWriteException());
// and verify the count of invocations.
Assertions.assertThat(out.getCount())
.describedAs("process count of %s", count)
.isEqualTo(count);
}
}
/**
* Spy on a writer with the append operation to fail after the given count of calls
* is reached.
* @param writer write.
* @param count number of allowed append calls.
* @return spied writer.
* @throws IOException from the signature of the append() call mocked.
*/
private static SequenceFile.Writer spyWithFailingAppend(final SequenceFile.Writer writer,
final int count)
throws IOException {
AtomicLong limit = new AtomicLong(count);
final SequenceFile.Writer spied = Mockito.spy(writer);
Mockito.doAnswer((InvocationOnMock invocation) -> {
final Writable k = invocation.getArgument(0);
final Writable v = invocation.getArgument(1);
if (limit.getAndDecrement() > 0) {
writer.append(k, v);
} else {
throw new IOException("mocked");
}
return null;
}).when(spied).append(Mockito.any(Writable.class), Mockito.any(Writable.class));
return spied;
}
/**
* Multithreaded writing.
*/
@Test
public void testParallelWrite() throws Throwable {
// list of 100 entries at a time
int listSize = 100;
// and the number of block writes
int attempts = 100;
List<FileEntry> list = buildEntryList(listSize);
int total = listSize * attempts;
try (EntryFileIO.EntryWriter out = entryFileIO.launchEntryWriter(createWriter(), 20)) {
TaskPool.foreach(rangeExcludingIterator(0, attempts))
.executeWith(getSubmitter())
.stopOnFailure()
.run(l -> {
out.enqueue(list);
});
out.close();
out.maybeRaiseWriteException();
Assertions.assertThat(out.getCount())
.describedAs("total elements written")
.isEqualTo(total);
}
// now read it back
Assertions.assertThat(foreach(iterateOverEntryFile(), e -> { }))
.describedAs("total elements read")
.isEqualTo(total);
}
}
| TestEntryFileIO |
java | elastic__elasticsearch | libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/XContentProviderImpl.java | {
"start": 1161,
"end": 3205
} | class ____ implements XContentProvider {
public XContentProviderImpl() {}
@Override
public FormatProvider getCborXContent() {
return new FormatProvider() {
@Override
public XContentBuilder getContentBuilder() throws IOException {
return CborXContentImpl.getContentBuilder();
}
@Override
public XContent XContent() {
return CborXContentImpl.cborXContent();
}
};
}
@Override
public FormatProvider getJsonXContent() {
return new FormatProvider() {
@Override
public XContentBuilder getContentBuilder() throws IOException {
return JsonXContentImpl.getContentBuilder();
}
@Override
public XContent XContent() {
return JsonXContentImpl.jsonXContent();
}
};
}
@Override
public FormatProvider getSmileXContent() {
return new FormatProvider() {
@Override
public XContentBuilder getContentBuilder() throws IOException {
return SmileXContentImpl.getContentBuilder();
}
@Override
public XContent XContent() {
return SmileXContentImpl.smileXContent();
}
};
}
@Override
public FormatProvider getYamlXContent() {
return new FormatProvider() {
@Override
public XContentBuilder getContentBuilder() throws IOException {
return YamlXContentImpl.getContentBuilder();
}
@Override
public XContent XContent() {
return YamlXContentImpl.yamlXContent();
}
};
}
@Override
public XContentParserConfiguration empty() {
return XContentParserConfigurationImpl.EMPTY;
}
@Override
public JsonStringEncoder getJsonStringEncoder() {
return JsonStringEncoderImpl.getInstance();
}
}
| XContentProviderImpl |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-api/src/main/java/org/apache/dubbo/rpc/protocol/AbstractProtocol.java | {
"start": 2519,
"end": 7731
} | class ____ implements Protocol, ScopeModelAware {
protected final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(getClass());
protected final Map<String, Exporter<?>> exporterMap = new ConcurrentHashMap<>();
/**
* <host:port, ProtocolServer>
*/
protected final Map<String, ProtocolServer> serverMap = new ConcurrentHashMap<>();
// TODO SoftReference
protected final Set<Invoker<?>> invokers = new ConcurrentHashSet<>();
protected FrameworkModel frameworkModel;
private final Set<String> optimizers = new ConcurrentHashSet<>();
@Override
public void setFrameworkModel(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
}
protected static String serviceKey(URL url) {
int port = url.getParameter(Constants.BIND_PORT_KEY, url.getPort());
return serviceKey(port, url.getPath(), url.getVersion(), url.getGroup());
}
protected static String serviceKey(int port, String serviceName, String serviceVersion, String serviceGroup) {
return ProtocolUtils.serviceKey(port, serviceName, serviceVersion, serviceGroup);
}
@Override
public List<ProtocolServer> getServers() {
return Collections.unmodifiableList(new ArrayList<>(serverMap.values()));
}
protected void loadServerProperties(ProtocolServer server) {
// read and hold config before destroy
int serverShutdownTimeout =
ConfigurationUtils.getServerShutdownTimeout(server.getUrl().getScopeModel());
server.getAttributes().put(SHUTDOWN_WAIT_KEY, serverShutdownTimeout);
}
protected int getServerShutdownTimeout(ProtocolServer server) {
return (int) server.getAttributes().getOrDefault(SHUTDOWN_WAIT_KEY, DEFAULT_SERVER_SHUTDOWN_TIMEOUT);
}
@Override
public void destroy() {
for (Invoker<?> invoker : invokers) {
if (invoker != null) {
try {
if (logger.isInfoEnabled()) {
logger.info("Destroy reference: " + invoker.getUrl());
}
invoker.destroy();
} catch (Throwable t) {
logger.warn(PROTOCOL_FAILED_DESTROY_INVOKER, "", "", t.getMessage(), t);
}
}
}
invokers.clear();
exporterMap.forEach((key, exporter) -> {
if (exporter != null) {
try {
if (logger.isInfoEnabled()) {
logger.info("Unexport service: " + exporter.getInvoker().getUrl());
}
exporter.unexport();
} catch (Throwable t) {
logger.warn(PROTOCOL_FAILED_DESTROY_INVOKER, "", "", t.getMessage(), t);
}
}
});
exporterMap.clear();
}
@Override
public <T> Invoker<T> refer(Class<T> type, URL url) throws RpcException {
return protocolBindingRefer(type, url);
}
@Deprecated
protected abstract <T> Invoker<T> protocolBindingRefer(Class<T> type, URL url) throws RpcException;
public Map<String, Exporter<?>> getExporterMap() {
return exporterMap;
}
public Collection<Exporter<?>> getExporters() {
return Collections.unmodifiableCollection(exporterMap.values());
}
protected void optimizeSerialization(URL url) throws RpcException {
String className = url.getParameter(OPTIMIZER_KEY, "");
if (StringUtils.isEmpty(className) || optimizers.contains(className)) {
return;
}
logger.info("Optimizing the serialization process for Kryo, FST, etc...");
try {
Class clazz = Thread.currentThread().getContextClassLoader().loadClass(className);
if (!SerializationOptimizer.class.isAssignableFrom(clazz)) {
throw new RpcException("The serialization optimizer " + className + " isn't an instance of "
+ SerializationOptimizer.class.getName());
}
SerializationOptimizer optimizer = (SerializationOptimizer) clazz.newInstance();
if (optimizer.getSerializableClasses() == null) {
return;
}
for (Class c : optimizer.getSerializableClasses()) {
SerializableClassRegistry.registerClass(c);
}
optimizers.add(className);
} catch (ClassNotFoundException e) {
throw new RpcException("Cannot find the serialization optimizer class: " + className, e);
} catch (InstantiationException | IllegalAccessException e) {
throw new RpcException("Cannot instantiate the serialization optimizer class: " + className, e);
}
}
protected String getAddr(URL url) {
String bindIp = url.getParameter(org.apache.dubbo.remoting.Constants.BIND_IP_KEY, url.getHost());
if (url.getParameter(ANYHOST_KEY, false)) {
bindIp = ANYHOST_VALUE;
}
return NetUtils.getIpByHost(bindIp) + ":"
+ url.getParameter(org.apache.dubbo.remoting.Constants.BIND_PORT_KEY, url.getPort());
}
}
| AbstractProtocol |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/datastream/KeyedStream.java | {
"start": 17828,
"end": 18014
} | class ____<T1, T2, KEY> {
private final KeyedStream<T1, KEY> streamOne;
private final KeyedStream<T2, KEY> streamTwo;
/**
* The time behaviour | IntervalJoin |
java | quarkusio__quarkus | integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/KnativeWithSpecifiedContainerNameTest.java | {
"start": 502,
"end": 2445
} | class ____ {
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withApplicationRoot((jar) -> jar.addClasses(GreetingResource.class))
.setApplicationName("knative-with-specified-container-name")
.setApplicationVersion("0.1-SNAPSHOT")
.withConfigurationResource("knative-with-specified-container-name.properties");
@ProdBuildResults
private ProdModeTestResults prodModeTestResults;
@Test
public void assertGeneratedResources() throws IOException {
Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir)
.isDirectoryContaining(p -> p.getFileName().endsWith("knative.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("knative.yml"))
.satisfies(p -> assertThat(p.toFile().listFiles()).hasSize(2));
List<HasMetadata> kubernetesList = DeserializationUtil
.deserializeAsList(kubernetesDir.resolve("knative.yml"));
assertThat(kubernetesList).filteredOn(i -> "Service".equals(i.getKind())).singleElement().satisfies(i -> {
assertThat(i).isInstanceOfSatisfying(Service.class, s -> {
assertThat(s.getSpec()).satisfies(spec -> {
assertThat(s.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("kfoo");
});
assertThat(spec.getTemplate().getSpec().getContainers()).singleElement().satisfies(container -> {
assertThat(container.getCommand()).contains("my-command");
assertThat(container.getName()).isEqualTo("kbar");
assertThat(container.getArgs()).containsExactly("A", "B");
});
});
});
});
}
}
| KnativeWithSpecifiedContainerNameTest |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeutils/SerializerTestBase.java | {
"start": 16280,
"end": 17199
} | class ____ extends DataOutputStream implements DataOutputView {
public TestOutputView() {
super(new ByteArrayOutputStream(4096));
}
public TestInputView getInputView() {
ByteArrayOutputStream baos = (ByteArrayOutputStream) out;
return new TestInputView(baos.toByteArray());
}
@Override
public void skipBytesToWrite(int numBytes) throws IOException {
for (int i = 0; i < numBytes; i++) {
write(0);
}
}
@Override
public void write(DataInputView source, int numBytes) throws IOException {
byte[] buffer = new byte[numBytes];
source.readFully(buffer);
write(buffer);
}
}
/**
* Runner to test serializer duplication via concurrency.
*
* @param <T> type of the test elements.
*/
static | TestOutputView |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/param/MySqlParameterizedOutputVisitorTest_35.java | {
"start": 485,
"end": 3154
} | class ____ extends TestCase {
public void test_for_parameterize() throws Exception {
final DbType dbType = JdbcConstants.MYSQL;
String sql = "/*+TDDL({'extra':{'SOCKET_TIMEOUT':'3600000'}})*/\n" +
"select sample_table_schema, \n" +
"sample_table_name,\n" +
"sample_table_orig_size,\n" +
"sample_table_sample_size,\n" +
"col.id as id,\n" +
"sample_column_name, \n" +
"sample_column_type,\n" +
"sample_string, \n" +
"sample_column_highkey, \n" +
"sample_column_high2key, \n" +
"sample_column_lowkey,\n" +
"sample_column_low2key,\n" +
"sample_column_cardinality,\n" +
"sample_avg_length,\n" +
"sample_column_dist_type as type, \n" +
"sample_column_dist_quantileno as quantileno,\n" +
"sample_column_dist_highkey,\n" +
"sample_column_dist_lowkey, \n" +
"sample_column_dist_value,\n" +
"sample_column_dist_cardinality,\n" +
"sample_column_dist_count,\n" +
"col.gmt_create as time\n" +
"from sample_tables tab, sample_columns col, sample_column_distribution dist\n" +
"where \n" +
"tab.id = col.sample_column_table_id and \n" +
"col.id = dist.sample_column_dist_column_id and \n" +
"col.id = ( \n" +
" SELECT id FROM sample_columns col \n" +
" WHERE sample_column_name = 'gmt_modified'\n" +
" AND sample_column_table_schema = 'SC_PRODUCT_03'\n" +
" AND sample_column_table_name = 'product_0096'\n" +
" ORDER BY id DESC LIMIT 1 \n" +
" order by type, quantileno)";
SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, dbType);
List<SQLStatement> stmtList = parser.parseStatementList();
SQLStatement statement = stmtList.get(0);
StringBuilder out = new StringBuilder();
// List<Object> parameters = new ArrayList<Object>();
SQLASTOutputVisitor visitor = SQLUtils.createOutputVisitor(out, JdbcConstants.MYSQL);
visitor.setParameterized(true);
visitor.setParameterizedMergeInList(true);
// visitor.setParameters(parameters);
visitor.setExportTables(true);
visitor.setPrettyFormat(false);
statement.accept(visitor);
System.out.println(out);
}
}
| MySqlParameterizedOutputVisitorTest_35 |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/referencedcolumnname/Luggage.java | {
"start": 550,
"end": 1876
} | class ____ implements Serializable {
private Integer id;
private String owner;
@Column(name = "`type`")
private String type;
private Set<Clothes> hasInside = new HashSet<>();
public Luggage() {
}
public Luggage(String owner, String type) {
this.owner = owner;
this.type = type;
}
@Id
@GeneratedValue
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getOwner() {
return owner;
}
public void setOwner(String owner) {
this.owner = owner;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
@OneToMany(cascade = {CascadeType.PERSIST, CascadeType.MERGE})
@JoinColumn(name = "lug_type", referencedColumnName = "type")
@JoinColumn(name = "lug_owner", referencedColumnName = "owner")
public Set<Clothes> getHasInside() {
return hasInside;
}
public void setHasInside(Set<Clothes> hasInside) {
this.hasInside = hasInside;
}
public boolean equals(Object o) {
if ( this == o ) return true;
if ( !(o instanceof Luggage luggage) ) return false;
if ( !owner.equals( luggage.owner ) ) return false;
return type.equals( luggage.type );
}
public int hashCode() {
int result;
result = owner.hashCode();
result = 29 * result + type.hashCode();
return result;
}
}
| Luggage |
java | apache__camel | components/camel-json-validator/src/main/java/org/apache/camel/component/jsonvalidator/JsonValidatorEndpoint.java | {
"start": 1938,
"end": 2681
} | class ____ extends ResourceEndpoint {
private volatile JsonSchema schema;
@UriParam(defaultValue = "true")
private boolean failOnNullBody = true;
@UriParam(defaultValue = "true")
private boolean failOnNullHeader = true;
@UriParam(description = "To validate against a header instead of the message body.")
private String headerName;
@UriParam(label = "advanced")
private JsonValidatorErrorHandler errorHandler = new DefaultJsonValidationErrorHandler();
@UriParam(label = "advanced")
private JsonUriSchemaLoader uriSchemaLoader = new DefaultJsonUriSchemaLoader();
@UriParam(label = "advanced",
description = "Comma-separated list of Jackson DeserializationFeature | JsonValidatorEndpoint |
java | elastic__elasticsearch | distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ProcessUtil.java | {
"start": 666,
"end": 1390
} | interface ____ {
void run() throws InterruptedException;
}
/**
* Runs an interruptable method, but throws an assertion if an interrupt is received.
*
* This is useful for threads which expect a no interruption policy
*/
static <T> T nonInterruptible(Interruptible<T> interruptible) {
try {
return interruptible.run();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new AssertionError(e);
}
}
static void nonInterruptibleVoid(InterruptibleVoid interruptible) {
nonInterruptible(() -> {
interruptible.run();
return null;
});
}
}
| InterruptibleVoid |
java | quarkusio__quarkus | extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/security/AbstractFormAuthTestCase.java | {
"start": 480,
"end": 4813
} | class ____ {
protected static final String APP_PROPS = "" +
"quarkus.http.auth.form.enabled=true\n" +
"quarkus.http.auth.form.login-page=login\n" +
"quarkus.http.auth.form.error-page=error\n" +
"quarkus.http.auth.form.landing-page=landing\n" +
"quarkus.http.auth.policy.r1.roles-allowed=admin\n" +
"quarkus.http.auth.permission.roles1.paths=/admin\n" +
"quarkus.http.auth.permission.roles1.policy=r1\n";
@BeforeAll
public static void setup() {
TestIdentityController.resetRoles()
.add("admin", "admin", "admin")
.add("test", "test", "admin");
}
@Test
public void testFormBasedAuthSuccess() {
CookieFilter cookies = new CookieFilter();
RestAssured
.given()
.filter(cookies)
.redirects().follow(false)
.when()
.get("/admin")
.then()
.assertThat()
.statusCode(302)
.header("location", containsString("/login"))
.cookie("quarkus-redirect-location",
RestAssuredMatchers.detailedCookie().value(containsString("/admin")).secured(false));
RestAssured
.given()
.filter(cookies)
.redirects().follow(false)
.when()
.formParam("j_username", "admin")
.formParam("j_password", "admin")
.post("/j_security_check")
.then()
.assertThat()
.statusCode(302)
.header("location", containsString("/admin"))
.cookie("quarkus-credential",
RestAssuredMatchers.detailedCookie().value(notNullValue()).secured(false));
RestAssured
.given()
.filter(cookies)
.redirects().follow(false)
.when()
.get("/admin")
.then()
.assertThat()
.statusCode(200)
.body(equalTo("admin:/admin"));
//now authenticate with a different user
RestAssured
.given()
.filter(cookies)
.redirects().follow(false)
.when()
.formParam("j_username", "test")
.formParam("j_password", "test")
.post("/j_security_check")
.then()
.assertThat()
.statusCode(302)
.header("location", containsString("/landing"))
.cookie("quarkus-credential",
RestAssuredMatchers.detailedCookie().value(notNullValue()).secured(false));
RestAssured
.given()
.filter(cookies)
.redirects().follow(false)
.when()
.get("/admin")
.then()
.assertThat()
.statusCode(200)
.body(equalTo("test:/admin"));
}
@Test
public void testFormBasedAuthSuccessLandingPage() {
CookieFilter cookies = new CookieFilter();
RestAssured
.given()
.filter(cookies)
.redirects().follow(false)
.when()
.formParam("j_username", "admin")
.formParam("j_password", "admin")
.post("/j_security_check")
.then()
.assertThat()
.statusCode(302)
.header("location", containsString("/landing"))
.cookie("quarkus-credential", notNullValue());
}
@Test
public void testFormAuthFailure() {
CookieFilter cookies = new CookieFilter();
RestAssured
.given()
.filter(cookies)
.redirects().follow(false)
.when()
.formParam("j_username", "admin")
.formParam("j_password", "wrongpassword")
.post("/j_security_check")
.then()
.assertThat()
.statusCode(302)
.header("location", containsString("/error"));
}
}
| AbstractFormAuthTestCase |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/BufferFileSegmentReader.java | {
"start": 896,
"end": 1081
} | interface ____ extends FileIOChannel {
void read() throws IOException;
void seekTo(long position) throws IOException;
boolean hasReachedEndOfFile();
}
| BufferFileSegmentReader |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-http12/src/main/java/org/apache/dubbo/remoting/http12/HttpInputMessage.java | {
"start": 909,
"end": 1084
} | interface ____ extends AutoCloseable {
InputStream getBody();
@Override
default void close() throws IOException {
getBody().close();
}
}
| HttpInputMessage |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableObserveOnTest.java | {
"start": 1883,
"end": 26329
} | class ____ extends RxJavaTest {
/**
* This is testing a no-op path since it uses Schedulers.immediate() which will not do scheduling.
*/
@Test
public void observeOn() {
Observer<Integer> observer = TestHelper.mockObserver();
Observable.just(1, 2, 3).observeOn(ImmediateThinScheduler.INSTANCE).subscribe(observer);
verify(observer, times(1)).onNext(1);
verify(observer, times(1)).onNext(2);
verify(observer, times(1)).onNext(3);
verify(observer, times(1)).onComplete();
}
@Test
public void ordering() throws InterruptedException {
// Observable<String> obs = Observable.just("one", null, "two", "three", "four");
// FIXME null values not allowed
Observable<String> obs = Observable.just("one", "null", "two", "three", "four");
Observer<String> observer = TestHelper.mockObserver();
InOrder inOrder = inOrder(observer);
TestObserverEx<String> to = new TestObserverEx<>(observer);
obs.observeOn(Schedulers.computation()).subscribe(to);
to.awaitDone(1000, TimeUnit.MILLISECONDS);
if (to.errors().size() > 0) {
for (Throwable t : to.errors()) {
t.printStackTrace();
}
fail("failed with exception");
}
inOrder.verify(observer, times(1)).onNext("one");
inOrder.verify(observer, times(1)).onNext("null");
inOrder.verify(observer, times(1)).onNext("two");
inOrder.verify(observer, times(1)).onNext("three");
inOrder.verify(observer, times(1)).onNext("four");
inOrder.verify(observer, times(1)).onComplete();
inOrder.verifyNoMoreInteractions();
}
@Test
public void threadName() throws InterruptedException {
System.out.println("Main Thread: " + Thread.currentThread().getName());
// FIXME null values not allowed
// Observable<String> obs = Observable.just("one", null, "two", "three", "four");
Observable<String> obs = Observable.just("one", "null", "two", "three", "four");
Observer<String> observer = TestHelper.mockObserver();
final String parentThreadName = Thread.currentThread().getName();
final CountDownLatch completedLatch = new CountDownLatch(1);
// assert subscribe is on main thread
obs = obs.doOnNext(new Consumer<String>() {
@Override
public void accept(String s) {
String threadName = Thread.currentThread().getName();
System.out.println("Source ThreadName: " + threadName + " Expected => " + parentThreadName);
assertEquals(parentThreadName, threadName);
}
});
// assert observe is on new thread
obs.observeOn(Schedulers.newThread()).doOnNext(new Consumer<String>() {
@Override
public void accept(String t1) {
String threadName = Thread.currentThread().getName();
boolean correctThreadName = threadName.startsWith("RxNewThreadScheduler");
System.out.println("ObserveOn ThreadName: " + threadName + " Correct => " + correctThreadName);
assertTrue(correctThreadName);
}
}).doAfterTerminate(new Action() {
@Override
public void run() {
completedLatch.countDown();
}
}).subscribe(observer);
if (!completedLatch.await(1000, TimeUnit.MILLISECONDS)) {
fail("timed out waiting");
}
verify(observer, never()).onError(any(Throwable.class));
verify(observer, times(5)).onNext(any(String.class));
verify(observer, times(1)).onComplete();
}
@Test
public void observeOnTheSameSchedulerTwice() {
Scheduler scheduler = ImmediateThinScheduler.INSTANCE;
Observable<Integer> o = Observable.just(1, 2, 3);
Observable<Integer> o2 = o.observeOn(scheduler);
Observer<Object> observer1 = TestHelper.mockObserver();
Observer<Object> observer2 = TestHelper.mockObserver();
InOrder inOrder1 = inOrder(observer1);
InOrder inOrder2 = inOrder(observer2);
o2.subscribe(observer1);
o2.subscribe(observer2);
inOrder1.verify(observer1, times(1)).onNext(1);
inOrder1.verify(observer1, times(1)).onNext(2);
inOrder1.verify(observer1, times(1)).onNext(3);
inOrder1.verify(observer1, times(1)).onComplete();
verify(observer1, never()).onError(any(Throwable.class));
inOrder1.verifyNoMoreInteractions();
inOrder2.verify(observer2, times(1)).onNext(1);
inOrder2.verify(observer2, times(1)).onNext(2);
inOrder2.verify(observer2, times(1)).onNext(3);
inOrder2.verify(observer2, times(1)).onComplete();
verify(observer2, never()).onError(any(Throwable.class));
inOrder2.verifyNoMoreInteractions();
}
@Test
public void observeSameOnMultipleSchedulers() {
TestScheduler scheduler1 = new TestScheduler();
TestScheduler scheduler2 = new TestScheduler();
Observable<Integer> o = Observable.just(1, 2, 3);
Observable<Integer> o1 = o.observeOn(scheduler1);
Observable<Integer> o2 = o.observeOn(scheduler2);
Observer<Object> observer1 = TestHelper.mockObserver();
Observer<Object> observer2 = TestHelper.mockObserver();
InOrder inOrder1 = inOrder(observer1);
InOrder inOrder2 = inOrder(observer2);
o1.subscribe(observer1);
o2.subscribe(observer2);
scheduler1.advanceTimeBy(1, TimeUnit.SECONDS);
scheduler2.advanceTimeBy(1, TimeUnit.SECONDS);
inOrder1.verify(observer1, times(1)).onNext(1);
inOrder1.verify(observer1, times(1)).onNext(2);
inOrder1.verify(observer1, times(1)).onNext(3);
inOrder1.verify(observer1, times(1)).onComplete();
verify(observer1, never()).onError(any(Throwable.class));
inOrder1.verifyNoMoreInteractions();
inOrder2.verify(observer2, times(1)).onNext(1);
inOrder2.verify(observer2, times(1)).onNext(2);
inOrder2.verify(observer2, times(1)).onNext(3);
inOrder2.verify(observer2, times(1)).onComplete();
verify(observer2, never()).onError(any(Throwable.class));
inOrder2.verifyNoMoreInteractions();
}
/**
* Confirm that running on a NewThreadScheduler uses the same thread for the entire stream.
*/
@Test
public void observeOnWithNewThreadScheduler() {
final AtomicInteger count = new AtomicInteger();
final int _multiple = 99;
Observable.range(1, 100000).map(new Function<Integer, Integer>() {
@Override
public Integer apply(Integer t1) {
return t1 * _multiple;
}
}).observeOn(Schedulers.newThread())
.blockingForEach(new Consumer<Integer>() {
@Override
public void accept(Integer t1) {
assertEquals(count.incrementAndGet() * _multiple, t1.intValue());
// FIXME toBlocking methods run on the current thread
String name = Thread.currentThread().getName();
assertFalse("Wrong thread name: " + name, name.startsWith("Rx"));
}
});
}
/**
* Confirm that running on a ThreadPoolScheduler allows multiple threads but is still ordered.
*/
@Test
public void observeOnWithThreadPoolScheduler() {
final AtomicInteger count = new AtomicInteger();
final int _multiple = 99;
Observable.range(1, 100000).map(new Function<Integer, Integer>() {
@Override
public Integer apply(Integer t1) {
return t1 * _multiple;
}
}).observeOn(Schedulers.computation())
.blockingForEach(new Consumer<Integer>() {
@Override
public void accept(Integer t1) {
assertEquals(count.incrementAndGet() * _multiple, t1.intValue());
// FIXME toBlocking methods run on the caller's thread
String name = Thread.currentThread().getName();
assertFalse("Wrong thread name: " + name, name.startsWith("Rx"));
}
});
}
/**
* Attempts to confirm that when pauses exist between events, the ScheduledObserver
* does not lose or reorder any events since the scheduler will not block, but will
* be re-scheduled when it receives new events after each pause.
*
*
* This is non-deterministic in proving success, but if it ever fails (non-deterministically)
* it is a sign of potential issues as thread-races and scheduling should not affect output.
*/
@Test
public void observeOnOrderingConcurrency() {
final AtomicInteger count = new AtomicInteger();
final int _multiple = 99;
Observable.range(1, 10000).map(new Function<Integer, Integer>() {
@Override
public Integer apply(Integer t1) {
if (randomIntFrom0to100() > 98) {
try {
Thread.sleep(2);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
return t1 * _multiple;
}
}).observeOn(Schedulers.computation())
.blockingForEach(new Consumer<Integer>() {
@Override
public void accept(Integer t1) {
assertEquals(count.incrementAndGet() * _multiple, t1.intValue());
// assertTrue(name.startsWith("RxComputationThreadPool"));
// FIXME toBlocking now runs its methods on the caller thread
String name = Thread.currentThread().getName();
assertFalse("Wrong thread name: " + name, name.startsWith("Rx"));
}
});
}
@Test
public void nonBlockingOuterWhileBlockingOnNext() throws InterruptedException {
final CountDownLatch completedLatch = new CountDownLatch(1);
final CountDownLatch nextLatch = new CountDownLatch(1);
final AtomicLong completeTime = new AtomicLong();
// use subscribeOn to make async, observeOn to move
Observable.range(1, 2).subscribeOn(Schedulers.newThread()).observeOn(Schedulers.newThread()).subscribe(new DefaultObserver<Integer>() {
@Override
public void onComplete() {
System.out.println("onComplete");
completeTime.set(System.nanoTime());
completedLatch.countDown();
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(Integer t) {
// don't let this thing finish yet
try {
if (!nextLatch.await(1000, TimeUnit.MILLISECONDS)) {
throw new RuntimeException("it shouldn't have timed out");
}
} catch (InterruptedException e) {
throw new RuntimeException("it shouldn't have failed");
}
}
});
long afterSubscribeTime = System.nanoTime();
System.out.println("After subscribe: " + completedLatch.getCount());
assertEquals(1, completedLatch.getCount());
nextLatch.countDown();
completedLatch.await(1000, TimeUnit.MILLISECONDS);
assertTrue(completeTime.get() > afterSubscribeTime);
System.out.println("onComplete nanos after subscribe: " + (completeTime.get() - afterSubscribeTime));
}
private static int randomIntFrom0to100() {
// XORShift instead of Math.random http://javamex.com/tutorials/random_numbers/xorshift.shtml
long x = System.nanoTime();
x ^= (x << 21);
x ^= (x >>> 35);
x ^= (x << 4);
return Math.abs((int) x % 100);
}
@Test
public void delayedErrorDeliveryWhenSafeSubscriberUnsubscribes() {
TestScheduler testScheduler = new TestScheduler();
Observable<Integer> source = Observable.concat(Observable.<Integer> error(new TestException()), Observable.just(1));
Observer<Integer> o = TestHelper.mockObserver();
InOrder inOrder = inOrder(o);
source.observeOn(testScheduler).subscribe(o);
inOrder.verify(o, never()).onError(any(TestException.class));
testScheduler.advanceTimeBy(1, TimeUnit.SECONDS);
inOrder.verify(o).onError(any(TestException.class));
inOrder.verify(o, never()).onNext(anyInt());
inOrder.verify(o, never()).onComplete();
}
@Test
public void afterUnsubscribeCalledThenObserverOnNextNeverCalled() {
final TestScheduler testScheduler = new TestScheduler();
final Observer<Integer> observer = TestHelper.mockObserver();
TestObserver<Integer> to = new TestObserver<>(observer);
Observable.just(1, 2, 3)
.observeOn(testScheduler)
.subscribe(to);
to.dispose();
testScheduler.advanceTimeBy(1, TimeUnit.SECONDS);
final InOrder inOrder = inOrder(observer);
inOrder.verify(observer, never()).onNext(anyInt());
inOrder.verify(observer, never()).onError(any(Exception.class));
inOrder.verify(observer, never()).onComplete();
}
@Test
public void backpressureWithTakeBefore() {
final AtomicInteger generated = new AtomicInteger();
Observable<Integer> o = Observable.fromIterable(new Iterable<Integer>() {
@Override
public Iterator<Integer> iterator() {
return new Iterator<Integer>() {
@Override
public void remove() {
}
@Override
public Integer next() {
return generated.getAndIncrement();
}
@Override
public boolean hasNext() {
return true;
}
};
}
});
TestObserver<Integer> to = new TestObserver<>();
o
.take(7)
.observeOn(Schedulers.newThread())
.subscribe(to);
to.awaitDone(5, TimeUnit.SECONDS);
to.assertValues(0, 1, 2, 3, 4, 5, 6);
assertEquals(7, generated.get());
}
@Test
public void asyncChild() {
TestObserver<Integer> to = new TestObserver<>();
Observable.range(0, 100000).observeOn(Schedulers.newThread()).observeOn(Schedulers.newThread()).subscribe(to);
to.awaitDone(5, TimeUnit.SECONDS);
to.assertNoErrors();
}
@Test
public void delayError() {
Observable.range(1, 5).concatWith(Observable.<Integer>error(new TestException()))
.observeOn(Schedulers.computation(), true)
.doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer v) throws Exception {
if (v == 1) {
Thread.sleep(100);
}
}
})
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertFailure(TestException.class, 1, 2, 3, 4, 5);
}
@Test
public void trampolineScheduler() {
Observable.just(1)
.observeOn(Schedulers.trampoline())
.test()
.assertResult(1);
}
@Test
public void dispose() {
TestHelper.checkDisposed(PublishSubject.create().observeOn(new TestScheduler()));
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeObservable(new Function<Observable<Object>, ObservableSource<Object>>() {
@Override
public ObservableSource<Object> apply(Observable<Object> o) throws Exception {
return o.observeOn(new TestScheduler());
}
});
}
@Test
public void badSource() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
TestScheduler scheduler = new TestScheduler();
TestObserver<Integer> to = new Observable<Integer>() {
@Override
protected void subscribeActual(Observer<? super Integer> observer) {
observer.onSubscribe(Disposable.empty());
observer.onComplete();
observer.onNext(1);
observer.onError(new TestException());
observer.onComplete();
}
}
.observeOn(scheduler)
.test();
scheduler.triggerActions();
to.assertResult();
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void inputSyncFused() {
Observable.range(1, 5)
.observeOn(Schedulers.single())
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1, 2, 3, 4, 5);
}
@Test
public void inputAsyncFused() {
UnicastSubject<Integer> us = UnicastSubject.create();
TestObserver<Integer> to = us.observeOn(Schedulers.single()).test();
TestHelper.emit(us, 1, 2, 3, 4, 5);
to
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1, 2, 3, 4, 5);
}
@Test
public void inputAsyncFusedError() {
UnicastSubject<Integer> us = UnicastSubject.create();
TestObserver<Integer> to = us.observeOn(Schedulers.single()).test();
us.onError(new TestException());
to
.awaitDone(5, TimeUnit.SECONDS)
.assertFailure(TestException.class);
}
@Test
public void inputAsyncFusedErrorDelayed() {
UnicastSubject<Integer> us = UnicastSubject.create();
TestObserver<Integer> to = us.observeOn(Schedulers.single(), true).test();
us.onError(new TestException());
to
.awaitDone(5, TimeUnit.SECONDS)
.assertFailure(TestException.class);
}
@Test
public void outputFused() {
TestObserverEx<Integer> to = new TestObserverEx<>(QueueFuseable.ANY);
Observable.range(1, 5).hide()
.observeOn(Schedulers.single())
.subscribe(to);
to.assertFusionMode(QueueFuseable.ASYNC)
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1, 2, 3, 4, 5);
}
@Test
public void outputFusedReject() {
TestObserverEx<Integer> to = new TestObserverEx<>(QueueFuseable.SYNC);
Observable.range(1, 5).hide()
.observeOn(Schedulers.single())
.subscribe(to);
to.assertFusionMode(QueueFuseable.NONE)
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1, 2, 3, 4, 5);
}
@Test
public void inputOutputAsyncFusedError() {
TestObserverEx<Integer> to = new TestObserverEx<>(QueueFuseable.ANY);
UnicastSubject<Integer> us = UnicastSubject.create();
us.observeOn(Schedulers.single())
.subscribe(to);
us.onError(new TestException());
to
.awaitDone(5, TimeUnit.SECONDS)
.assertFailure(TestException.class);
to.assertFusionMode(QueueFuseable.ASYNC)
.awaitDone(5, TimeUnit.SECONDS)
.assertFailure(TestException.class);
}
@Test
public void inputOutputAsyncFusedErrorDelayed() {
TestObserverEx<Integer> to = new TestObserverEx<>(QueueFuseable.ANY);
UnicastSubject<Integer> us = UnicastSubject.create();
us.observeOn(Schedulers.single(), true)
.subscribe(to);
us.onError(new TestException());
to
.awaitDone(5, TimeUnit.SECONDS)
.assertFailure(TestException.class);
to.assertFusionMode(QueueFuseable.ASYNC)
.awaitDone(5, TimeUnit.SECONDS)
.assertFailure(TestException.class);
}
@Test
public void outputFusedCancelReentrant() throws Exception {
final UnicastSubject<Integer> us = UnicastSubject.create();
final CountDownLatch cdl = new CountDownLatch(1);
us.observeOn(Schedulers.single())
.subscribe(new Observer<Integer>() {
Disposable upstream;
int count;
@Override
public void onSubscribe(Disposable d) {
this.upstream = d;
((QueueDisposable<?>)d).requestFusion(QueueFuseable.ANY);
}
@Override
public void onNext(Integer value) {
if (++count == 1) {
us.onNext(2);
upstream.dispose();
cdl.countDown();
}
}
@Override
public void onError(Throwable e) {
}
@Override
public void onComplete() {
}
});
us.onNext(1);
cdl.await();
}
@Test
public void nonFusedPollThrows() {
new Observable<Integer>() {
@Override
protected void subscribeActual(Observer<? super Integer> observer) {
observer.onSubscribe(Disposable.empty());
@SuppressWarnings("unchecked")
ObserveOnObserver<Integer> oo = (ObserveOnObserver<Integer>)observer;
oo.queue = new SimpleQueue<Integer>() {
@Override
public boolean offer(Integer value) {
return false;
}
@Override
public boolean offer(Integer v1, Integer v2) {
return false;
}
@Nullable
@Override
public Integer poll() throws Exception {
throw new TestException();
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public void clear() {
}
};
oo.clear();
oo.schedule();
}
}
.observeOn(Schedulers.single())
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertFailure(TestException.class);
}
@Test
public void outputFusedOneSignal() {
final BehaviorSubject<Integer> bs = BehaviorSubject.createDefault(1);
bs.observeOn(ImmediateThinScheduler.INSTANCE)
.concatMap(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v)
throws Exception {
return Observable.just(v + 1);
}
})
.subscribeWith(new TestObserver<Integer>() {
@Override
public void onNext(Integer t) {
super.onNext(t);
if (t == 2) {
bs.onNext(2);
}
}
})
.assertValuesOnly(2, 3);
}
@Test
public void workerNotDisposedPrematurelyNormalInNormalOut() {
DisposeTrackingScheduler s = new DisposeTrackingScheduler();
Observable.concat(
Observable.just(1).hide().observeOn(s),
Observable.just(2)
)
.test()
.assertResult(1, 2);
assertEquals(1, s.disposedCount.get());
}
@Test
public void workerNotDisposedPrematurelySyncInNormalOut() {
DisposeTrackingScheduler s = new DisposeTrackingScheduler();
Observable.concat(
Observable.just(1).observeOn(s),
Observable.just(2)
)
.test()
.assertResult(1, 2);
assertEquals(1, s.disposedCount.get());
}
@Test
public void workerNotDisposedPrematurelyAsyncInNormalOut() {
DisposeTrackingScheduler s = new DisposeTrackingScheduler();
UnicastSubject<Integer> us = UnicastSubject.create();
us.onNext(1);
us.onComplete();
Observable.concat(
us.observeOn(s),
Observable.just(2)
)
.test()
.assertResult(1, 2);
assertEquals(1, s.disposedCount.get());
}
static final | ObservableObserveOnTest |
java | netty__netty | codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketServerCompressionHandlerTest.java | {
"start": 1754,
"end": 8806
} | class ____ {
@Test
public void testNormalSuccess() {
EmbeddedChannel ch = new EmbeddedChannel(new WebSocketServerCompressionHandler(0));
HttpRequest req = newUpgradeRequest(PERMESSAGE_DEFLATE_EXTENSION);
ch.writeInbound(req);
HttpResponse res = newUpgradeResponse(null);
ch.writeOutbound(res);
HttpResponse res2 = ch.readOutbound();
List<WebSocketExtensionData> exts = WebSocketExtensionUtil.extractExtensions(
res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS));
assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name());
assertTrue(exts.get(0).parameters().isEmpty());
assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class));
assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class));
}
@Test
public void testClientWindowSizeSuccess() {
EmbeddedChannel ch = new EmbeddedChannel(new WebSocketServerExtensionHandler(
new PerMessageDeflateServerExtensionHandshaker(6, false, 10, false, false, 0)));
HttpRequest req = newUpgradeRequest(PERMESSAGE_DEFLATE_EXTENSION + "; " + CLIENT_MAX_WINDOW);
ch.writeInbound(req);
HttpResponse res = newUpgradeResponse(null);
ch.writeOutbound(res);
HttpResponse res2 = ch.readOutbound();
List<WebSocketExtensionData> exts = WebSocketExtensionUtil.extractExtensions(
res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS));
assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name());
assertEquals("10", exts.get(0).parameters().get(CLIENT_MAX_WINDOW));
assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class));
assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class));
}
@Test
public void testClientWindowSizeUnavailable() {
EmbeddedChannel ch = new EmbeddedChannel(new WebSocketServerExtensionHandler(
new PerMessageDeflateServerExtensionHandshaker(6, false, 10, false, false, 0)));
HttpRequest req = newUpgradeRequest(PERMESSAGE_DEFLATE_EXTENSION);
ch.writeInbound(req);
HttpResponse res = newUpgradeResponse(null);
ch.writeOutbound(res);
HttpResponse res2 = ch.readOutbound();
List<WebSocketExtensionData> exts = WebSocketExtensionUtil.extractExtensions(
res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS));
assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name());
assertTrue(exts.get(0).parameters().isEmpty());
assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class));
assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class));
}
@Test
public void testServerWindowSizeSuccess() {
EmbeddedChannel ch = new EmbeddedChannel(new WebSocketServerExtensionHandler(
new PerMessageDeflateServerExtensionHandshaker(6, true, 15, false, false, 0)));
HttpRequest req = newUpgradeRequest(PERMESSAGE_DEFLATE_EXTENSION + "; " + SERVER_MAX_WINDOW + "=10");
ch.writeInbound(req);
HttpResponse res = newUpgradeResponse(null);
ch.writeOutbound(res);
HttpResponse res2 = ch.readOutbound();
List<WebSocketExtensionData> exts = WebSocketExtensionUtil.extractExtensions(
res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS));
assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name());
assertEquals("10", exts.get(0).parameters().get(SERVER_MAX_WINDOW));
assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class));
assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class));
}
@Test
public void testServerWindowSizeDisable() {
EmbeddedChannel ch = new EmbeddedChannel(new WebSocketServerExtensionHandler(
new PerMessageDeflateServerExtensionHandshaker(6, false, 15, false, false, 0)));
HttpRequest req = newUpgradeRequest(PERMESSAGE_DEFLATE_EXTENSION + "; " + SERVER_MAX_WINDOW + "=10");
ch.writeInbound(req);
HttpResponse res = newUpgradeResponse(null);
ch.writeOutbound(res);
HttpResponse res2 = ch.readOutbound();
assertFalse(res2.headers().contains(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS));
assertNull(ch.pipeline().get(PerMessageDeflateDecoder.class));
assertNull(ch.pipeline().get(PerMessageDeflateEncoder.class));
}
@Test
public void testServerNoContext() {
EmbeddedChannel ch = new EmbeddedChannel(new WebSocketServerCompressionHandler(0));
HttpRequest req = newUpgradeRequest(PERMESSAGE_DEFLATE_EXTENSION + "; " + SERVER_NO_CONTEXT);
ch.writeInbound(req);
HttpResponse res = newUpgradeResponse(null);
ch.writeOutbound(res);
HttpResponse res2 = ch.readOutbound();
assertFalse(res2.headers().contains(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS));
assertNull(ch.pipeline().get(PerMessageDeflateDecoder.class));
assertNull(ch.pipeline().get(PerMessageDeflateEncoder.class));
}
@Test
public void testClientNoContext() {
EmbeddedChannel ch = new EmbeddedChannel(new WebSocketServerCompressionHandler(0));
HttpRequest req = newUpgradeRequest(PERMESSAGE_DEFLATE_EXTENSION + "; " + CLIENT_NO_CONTEXT);
ch.writeInbound(req);
HttpResponse res = newUpgradeResponse(null);
ch.writeOutbound(res);
HttpResponse res2 = ch.readOutbound();
List<WebSocketExtensionData> exts = WebSocketExtensionUtil.extractExtensions(
res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS));
assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name());
assertTrue(exts.get(0).parameters().isEmpty());
assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class));
assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class));
}
@Test
public void testServerWindowSizeDisableThenFallback() {
EmbeddedChannel ch = new EmbeddedChannel(new WebSocketServerExtensionHandler(
new PerMessageDeflateServerExtensionHandshaker(6, false, 15, false, false, 0)));
HttpRequest req = newUpgradeRequest(PERMESSAGE_DEFLATE_EXTENSION + "; " + SERVER_MAX_WINDOW + "=10, " +
PERMESSAGE_DEFLATE_EXTENSION);
ch.writeInbound(req);
HttpResponse res = newUpgradeResponse(null);
ch.writeOutbound(res);
HttpResponse res2 = ch.readOutbound();
List<WebSocketExtensionData> exts = WebSocketExtensionUtil.extractExtensions(
res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS));
assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name());
assertTrue(exts.get(0).parameters().isEmpty());
assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class));
assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class));
}
}
| WebSocketServerCompressionHandlerTest |
java | quarkusio__quarkus | core/devmode-spi/src/main/java/io/quarkus/dev/console/DeploymentLinker.java | {
"start": 155,
"end": 780
} | interface ____ on a different classloader.
* <p>
* This implies all communication must go through JDK classes, so the transfer involves Maps, Functions, ...
* Yes this is awful. No there's no better solution ATM.
* Ideally we'd automate this through bytecode generation,
* but feasibility is uncertain, and we'd need a volunteer who has time for that.
* <p>
* Implementations should live in the runtime module.
* To transfer {@link #createLinkData(Object) link data} between deployment and runtime,
* see {@link DevConsoleManager#setGlobal(String, Object)} and {@link DevConsoleManager#getGlobal(String)}.
*/
public | but |
java | quarkusio__quarkus | integration-tests/maven/src/test/resources-filtered/projects/classic-noconfig/src/main/java/org/acme/HelloResource.java | {
"start": 257,
"end": 516
} | class ____ {
@Inject
@ConfigProperty(name = "greeting", defaultValue = "initialValue")
String greeting;
@GET
@Path("/greeting")
@Produces(MediaType.TEXT_PLAIN)
public String greeting() {
return greeting;
}
}
| HelloResource |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringDoc.java | {
"start": 4137,
"end": 7379
} | class ____ implements Writeable, ToXContentObject {
private final String uuid;
private final String host;
private final String transportAddress;
private final String ip;
private final String name;
private final long timestamp;
public Node(
final String uuid,
final String host,
final String transportAddress,
final String ip,
final String name,
final long timestamp
) {
this.uuid = uuid;
this.host = host;
this.transportAddress = transportAddress;
this.ip = ip;
this.name = name;
this.timestamp = timestamp;
}
/**
* Read from a stream.
*/
public Node(StreamInput in) throws IOException {
uuid = in.readOptionalString();
host = in.readOptionalString();
transportAddress = in.readOptionalString();
ip = in.readOptionalString();
name = in.readOptionalString();
timestamp = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(uuid);
out.writeOptionalString(host);
out.writeOptionalString(transportAddress);
out.writeOptionalString(ip);
out.writeOptionalString(name);
out.writeVLong(timestamp);
}
public String getUUID() {
return uuid;
}
public String getHost() {
return host;
}
public String getTransportAddress() {
return transportAddress;
}
public String getIp() {
return ip;
}
public String getName() {
return name;
}
public long getTimestamp() {
return timestamp;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
{
builder.field("uuid", uuid);
builder.field("host", host);
builder.field("transport_address", transportAddress);
builder.field("ip", ip);
builder.field("name", name);
builder.field("timestamp", toUTC(timestamp));
}
return builder.endObject();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Node node = (Node) o;
return Objects.equals(uuid, node.uuid)
&& Objects.equals(host, node.host)
&& Objects.equals(transportAddress, node.transportAddress)
&& Objects.equals(ip, node.ip)
&& Objects.equals(name, node.name)
&& Objects.equals(timestamp, node.timestamp);
}
@Override
public int hashCode() {
return Objects.hash(uuid, host, transportAddress, ip, name, timestamp);
}
}
}
| Node |
java | elastic__elasticsearch | client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java | {
"start": 2136,
"end": 8742
} | class ____ extends RestClientTestCase {
public void testTraceRequest() throws IOException, URISyntaxException {
HttpHost host = new HttpHost("localhost", 9200, randomBoolean() ? "http" : "https");
String expectedEndpoint = "/index/type/_api";
URI uri;
if (randomBoolean()) {
uri = new URI(expectedEndpoint);
} else {
uri = new URI("index/type/_api");
}
HttpUriRequest request = randomHttpRequest(uri);
String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'";
boolean hasBody = request instanceof HttpEntityEnclosingRequest && randomBoolean();
String requestBody = "{ \"field\": \"value\" }";
if (hasBody) {
expected += " -d '" + requestBody + "'";
HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
HttpEntity entity;
switch (randomIntBetween(0, 4)) {
case 0:
entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON);
break;
case 1:
entity = new InputStreamEntity(
new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)),
ContentType.APPLICATION_JSON
);
break;
case 2:
entity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON);
break;
case 3:
entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON);
break;
case 4:
// Evil entity without a charset
entity = new StringEntity(requestBody, ContentType.create("application/json", (Charset) null));
break;
default:
throw new UnsupportedOperationException();
}
enclosingRequest.setEntity(entity);
}
String traceRequest = RequestLogger.buildTraceRequest(request, host);
assertThat(traceRequest, equalTo(expected));
if (hasBody) {
// check that the body is still readable as most entities are not repeatable
String body = EntityUtils.toString(((HttpEntityEnclosingRequest) request).getEntity(), StandardCharsets.UTF_8);
assertThat(body, equalTo(requestBody));
}
}
public void testTraceResponse() throws IOException {
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
int statusCode = randomIntBetween(200, 599);
String reasonPhrase = "REASON";
BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase);
String expected = "# " + statusLine.toString();
BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine);
int numHeaders = randomIntBetween(0, 3);
for (int i = 0; i < numHeaders; i++) {
httpResponse.setHeader("header" + i, "value");
expected += "\n# header" + i + ": value";
}
expected += "\n#";
boolean hasBody = getRandom().nextBoolean();
String responseBody = "{\n \"field\": \"value\"\n}";
if (hasBody) {
expected += "\n# {";
expected += "\n# \"field\": \"value\"";
expected += "\n# }";
HttpEntity entity;
switch (randomIntBetween(0, 2)) {
case 0:
entity = new StringEntity(responseBody, ContentType.APPLICATION_JSON);
break;
case 1:
// test a non repeatable entity
entity = new InputStreamEntity(
new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)),
ContentType.APPLICATION_JSON
);
break;
case 2:
// Evil entity without a charset
entity = new StringEntity(responseBody, ContentType.create("application/json", (Charset) null));
break;
default:
throw new UnsupportedOperationException();
}
httpResponse.setEntity(entity);
}
String traceResponse = RequestLogger.buildTraceResponse(httpResponse);
assertThat(traceResponse, equalTo(expected));
if (hasBody) {
// check that the body is still readable as most entities are not repeatable
String body = EntityUtils.toString(httpResponse.getEntity(), StandardCharsets.UTF_8);
assertThat(body, equalTo(responseBody));
}
}
public void testResponseWarnings() throws Exception {
HttpHost host = new HttpHost("localhost", 9200);
HttpUriRequest request = randomHttpRequest(new URI("/index/type/_api"));
int numWarnings = randomIntBetween(1, 5);
StringBuilder expected = new StringBuilder("request [").append(request.getMethod())
.append(" ")
.append(host)
.append("/index/type/_api] returned ")
.append(numWarnings)
.append(" warnings: ");
Header[] warnings = new Header[numWarnings];
for (int i = 0; i < numWarnings; i++) {
String warning = "this is warning number " + i;
warnings[i] = new BasicHeader("Warning", warning);
if (i > 0) {
expected.append(",");
}
expected.append("[").append(warning).append("]");
}
assertEquals(expected.toString(), RequestLogger.buildWarningMessage(request, host, warnings));
}
private static HttpUriRequest randomHttpRequest(URI uri) {
int requestType = randomIntBetween(0, 7);
switch (requestType) {
case 0:
return new HttpGetWithEntity(uri);
case 1:
return new HttpPost(uri);
case 2:
return new HttpPut(uri);
case 3:
return new HttpDeleteWithEntity(uri);
case 4:
return new HttpHead(uri);
case 5:
return new HttpTrace(uri);
case 6:
return new HttpOptions(uri);
case 7:
return new HttpPatch(uri);
default:
throw new UnsupportedOperationException();
}
}
}
| RequestLoggerTests |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/AbstractFileStateBackend.java | {
"start": 1474,
"end": 1564
} | class ____ all state backends that store their metadata (and data) in files.
*
* <p>This | for |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy-client-jsonb/deployment/src/main/java/io/quarkus/restclient/jsonb/deployment/RestClientJsonbProcessor.java | {
"start": 260,
"end": 453
} | class ____ {
@BuildStep
void build(BuildProducer<FeatureBuildItem> feature) {
feature.produce(new FeatureBuildItem(Feature.RESTEASY_CLIENT_JSONB));
}
}
| RestClientJsonbProcessor |
java | spring-projects__spring-security | access/src/test/java/org/springframework/security/access/vote/AbstractAccessDecisionManagerTests.java | {
"start": 3613,
"end": 3958
} | class ____ extends AbstractAccessDecisionManager {
protected MockDecisionManagerImpl(List<AccessDecisionVoter<? extends Object>> decisionVoters) {
super(decisionVoters);
}
@Override
public void decide(Authentication authentication, Object object, Collection<ConfigAttribute> configAttributes) {
}
}
private | MockDecisionManagerImpl |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Task.java | {
"start": 1426,
"end": 1530
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(Task.class);
public | Task |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.