language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/MaskUtils.java | {
"start": 1116,
"end": 3920
} | class ____ {
public static void writeMask(boolean[] mask, DataOutputView target) throws IOException {
writeMask(mask, mask.length, target);
}
@SuppressWarnings("UnusedAssignment")
public static void writeMask(boolean[] mask, int len, DataOutputView target)
throws IOException {
int b = 0x00;
int bytePos = 0;
int fieldPos = 0;
int numPos = 0;
while (fieldPos < len) {
b = 0x00;
// set bits in byte
bytePos = 0;
numPos = Math.min(8, len - fieldPos);
while (bytePos < numPos) {
b = b << 1;
// set bit if element is true
if (mask[fieldPos + bytePos]) {
b |= 0x01;
}
bytePos += 1;
}
fieldPos += numPos;
// shift bits if last byte is not completely filled
b <<= (8 - bytePos);
// write byte
target.writeByte(b);
}
}
public static void readIntoMask(DataInputView source, boolean[] mask) throws IOException {
readIntoMask(source, mask, mask.length);
}
@SuppressWarnings("UnusedAssignment")
public static void readIntoMask(DataInputView source, boolean[] mask, int len)
throws IOException {
int b = 0x00;
int bytePos = 0;
int fieldPos = 0;
int numPos = 0;
while (fieldPos < len) {
// read byte
b = source.readUnsignedByte();
bytePos = 0;
numPos = Math.min(8, len - fieldPos);
while (bytePos < numPos) {
mask[fieldPos + bytePos] = (b & 0x80) > 0;
b = b << 1;
bytePos += 1;
}
fieldPos += numPos;
}
}
public static void readIntoAndCopyMask(
DataInputView source, DataOutputView target, boolean[] mask) throws IOException {
readIntoAndCopyMask(source, target, mask, mask.length);
}
@SuppressWarnings("UnusedAssignment")
public static void readIntoAndCopyMask(
DataInputView source, DataOutputView target, boolean[] mask, int len)
throws IOException {
int b = 0x00;
int bytePos = 0;
int fieldPos = 0;
int numPos = 0;
while (fieldPos < len) {
// read byte
b = source.readUnsignedByte();
// copy byte
target.writeByte(b);
bytePos = 0;
numPos = Math.min(8, len - fieldPos);
while (bytePos < numPos) {
mask[fieldPos + bytePos] = (b & 0x80) > 0;
b = b << 1;
bytePos += 1;
}
fieldPos += numPos;
}
}
}
| MaskUtils |
java | apache__camel | components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindySimpleCsvUnmarshallUnicodeNextLineTest.java | {
"start": 1523,
"end": 2511
} | class ____ {
private static final String URI_MOCK_RESULT = "mock:result";
private static final String URI_DIRECT_START = "direct:start";
@Produce(URI_DIRECT_START)
protected ProducerTemplate template;
@EndpointInject(URI_MOCK_RESULT)
private MockEndpoint result;
private String record;
@Test
@DirtiesContext
public void testUnicodeNextLineCharacterParsing() throws Exception {
record = "123\u0085 Anywhere Lane,United States";
template.sendBody(record);
result.expectedMessageCount(1);
result.assertIsSatisfied();
LocationRecord data = result.getExchanges().get(0).getIn().getBody(LocationRecord.class);
assertNotNull(data);
assertEquals("123\u0085 Anywhere Lane", data.getAddress(), "Parsing error with unicode next line");
assertEquals("United States", data.getNation(), "Parsing error with unicode next line");
}
public static | BindySimpleCsvUnmarshallUnicodeNextLineTest |
java | netty__netty | codec-marshalling/src/test/java/io/netty/handler/codec/marshalling/AbstractMarshallingTest.java | {
"start": 1022,
"end": 2057
} | class ____ {
static final String SERIAL_FACTORY = "serial";
static final String RIVER_FACTORY = "river";
@BeforeAll
public static void checkSupported() throws Throwable {
Throwable error = null;
try {
checkFactorySupported(Marshalling.getProvidedMarshallerFactory(SERIAL_FACTORY));
checkFactorySupported(Marshalling.getProvidedMarshallerFactory(RIVER_FACTORY));
} catch (Throwable cause) {
// This may fail on Java 9+ depending on which command-line arguments are used when building.
if (PlatformDependent.javaVersion() < 9) {
throw cause;
}
error = cause;
}
assumeTrue(error == null, error + " was not null");
}
private static void checkFactorySupported(MarshallerFactory factory) throws IOException {
factory.createMarshaller(new MarshallingConfiguration()).close();
factory.createUnmarshaller(new MarshallingConfiguration()).close();
}
}
| AbstractMarshallingTest |
java | apache__camel | tooling/maven/camel-package-maven-plugin/src/main/java/org/apache/camel/maven/packaging/AbstractGeneratorMojo.java | {
"start": 2326,
"end": 9822
} | class ____ extends AbstractMojo {
public static final String GENERATED_MSG = "Generated by camel build tools - do NOT edit this file!";
public static final String NL = "\n";
private static final Map<String, Class<?>> KNOWN_CLASSES_CACHE = new ConcurrentHashMap<>();
private static final RuntimeInstance VELOCITY = createVelocityRuntime();
private static final Map<String, Template> VELOCITY_TEMPLATES = new ConcurrentHashMap<>();
/**
* The maven project.
*/
@Parameter(property = "project", required = true, readonly = true)
protected MavenProject project;
/**
* Maven ProjectHelper.
*/
protected final MavenProjectHelper projectHelper;
/**
* build context to check changed files and mark them for refresh (used for m2e compatibility)
*/
protected final BuildContext buildContext;
private DynamicClassLoader projectClassLoader;
static {
KNOWN_CLASSES_CACHE.put("Byte", Byte.class);
KNOWN_CLASSES_CACHE.put("Boolean", Boolean.class);
KNOWN_CLASSES_CACHE.put("Date", Date.class);
KNOWN_CLASSES_CACHE.put("Double", Double.class);
KNOWN_CLASSES_CACHE.put("Duration", Duration.class);
KNOWN_CLASSES_CACHE.put("String", String.class);
KNOWN_CLASSES_CACHE.put("Integer", Integer.class);
KNOWN_CLASSES_CACHE.put("Long", Long.class);
KNOWN_CLASSES_CACHE.put("File", File.class);
KNOWN_CLASSES_CACHE.put("Object", Object.class);
KNOWN_CLASSES_CACHE.put("int", int.class);
KNOWN_CLASSES_CACHE.put("long", long.class);
KNOWN_CLASSES_CACHE.put("boolean", boolean.class);
}
protected AbstractGeneratorMojo(MavenProjectHelper projectHelper, BuildContext buildContext) {
this.projectHelper = projectHelper;
this.buildContext = buildContext;
}
public void execute(MavenProject project) throws MojoFailureException, MojoExecutionException {
this.project = project;
execute();
}
protected void addResourceDirectory(Path path) {
projectHelper.addResource(project, path.toString(), Collections.singletonList("**/*"), Collections.emptyList());
}
public void refresh(Path file) {
refresh(buildContext, file);
}
protected String velocity(String templatePath, Map<String, Object> ctx) {
VelocityContext context = new VelocityContext(ctx);
Template template = VELOCITY_TEMPLATES.computeIfAbsent(templatePath, VELOCITY::getTemplate);
StringWriter writer = new StringWriter();
template.merge(context, writer);
return writer.toString();
}
private static RuntimeInstance createVelocityRuntime() {
Properties props = new Properties();
props.setProperty("resource.loaders", "class");
props.setProperty("resource.loader.class.class", "org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader");
RuntimeInstance velocity = new RuntimeInstance();
velocity.init(props);
return velocity;
}
protected boolean updateResource(Path dir, String fileName, String data) {
boolean updated = updateResource(buildContext, dir.resolve(fileName), data);
if (!fileName.endsWith(".java")) {
Path outputDir = Paths.get(project.getBuild().getOutputDirectory());
updated |= updateResource(buildContext, outputDir.resolve(fileName), data);
}
return updated;
}
protected String createProperties(String key, String val) {
return createProperties(project, key, val);
}
public static String createProperties(MavenProject project, String key, String val) {
StringBuilder properties = new StringBuilder(256);
properties.append("# ").append(GENERATED_MSG).append(NL);
properties.append(key).append("=").append(val).append(NL);
properties.append("groupId=").append(project.getGroupId()).append(NL);
properties.append("artifactId=").append(project.getArtifactId()).append(NL);
properties.append("version=").append(project.getVersion()).append(NL);
properties.append("projectName=").append(project.getName()).append(NL);
if (project.getDescription() != null) {
properties.append("projectDescription=").append(project.getDescription()).append(NL);
}
String annotations = project.getProperties().getProperty("annotations");
if (!Strings.isNullOrEmpty(annotations)) {
properties.append("annotations=").append(annotations).append(NL);
}
return properties.toString();
}
public static void refresh(BuildContext buildContext, Path file) {
if (buildContext != null) {
buildContext.refresh(file.toFile());
}
}
public static boolean updateResource(BuildContext buildContext, Path out, String data) {
try {
if (FileUtil.updateFile(out, data)) {
refresh(buildContext, out);
return true;
}
} catch (IOException e) {
throw new IOError(e);
}
return false;
}
public static boolean haveResourcesChanged(Log log, MavenProject project, BuildContext buildContext, String suffix) {
String baseDir = project.getBasedir().getAbsolutePath();
for (Resource r : project.getBuild().getResources()) {
File file = new File(r.getDirectory());
if (file.isAbsolute()) {
file = new File(r.getDirectory().substring(baseDir.length() + 1));
}
if (log.isDebugEnabled()) {
String path = file.getPath() + "/" + suffix;
log.debug("Checking if " + path + " (" + r.getDirectory() + "/" + suffix + ") has changed.");
}
if (buildContext.hasDelta(new File(file, suffix))) {
if (log.isDebugEnabled()) {
log.debug("Indeed " + suffix + " has changed.");
}
return true;
}
}
return false;
}
protected static <T> Supplier<T> cache(Supplier<T> supplier) {
return new Supplier<>() {
T value;
@Override
public T get() {
if (value == null) {
value = supplier.get();
}
return value;
}
};
}
protected Class<?> loadClass(String loadClassName) {
return KNOWN_CLASSES_CACHE.computeIfAbsent(loadClassName, k -> doLoadClass(loadClassName));
}
private Class<?> doLoadClass(String loadClassName) {
Class<?> optionClass;
String org = loadClassName;
while (true) {
try {
optionClass = getProjectClassLoader().loadClass(loadClassName);
break;
} catch (ClassNotFoundException e) {
int dotIndex = loadClassName.lastIndexOf('.');
if (dotIndex == -1) {
if (getLog().isDebugEnabled()) {
getLog().debug("Failed to load class: " + loadClassName);
}
throw new NoClassDefFoundError(org);
} else {
loadClassName = loadClassName.substring(0, dotIndex) + "$" + loadClassName.substring(dotIndex + 1);
if (getLog().isDebugEnabled()) {
getLog().debug("Relocating previous | AbstractGeneratorMojo |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/localtime/LocalTimeAssert_isEqualTo_Test.java | {
"start": 1179,
"end": 2164
} | class ____ extends LocalTimeAssertBaseTest {
@Test
void should_pass_if_actual_is_equal_to_localTime_as_string_parameter() {
assertThat(REFERENCE).isEqualTo(REFERENCE.toString());
}
@Test
void should_fail_if_actual_is_not_equal_to_date_as_string_parameter() {
// WHEN
ThrowingCallable code = () -> assertThat(AFTER).isEqualTo(REFERENCE.toString());
// THEN
assertThatAssertionErrorIsThrownBy(code).withMessage(shouldBeEqualMessage(AFTER.toString(), REFERENCE.toString()));
}
@Test
void should_fail_if_localTime_as_string_parameter_is_null() {
// GIVEN
String otherLocalTimeAsString = null;
// WHEN
ThrowingCallable code = () -> assertThat(LocalTime.now()).isEqualTo(otherLocalTimeAsString);
// THEN
assertThatIllegalArgumentException().isThrownBy(code)
.withMessage("The String representing the LocalTime to compare actual with should not be null");
}
}
| LocalTimeAssert_isEqualTo_Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/LocalTemporaryTableMutationStrategyNoDropTest.java | {
"start": 4228,
"end": 6591
} | class ____ implements RuntimeModelCreationContext, GeneratorSettings {
private final SessionFactoryImplementor sessionFactory;
private final SessionFactoryScope scope;
private final JdbcServices jdbcServices;
public ModelCreationContext(SessionFactoryImplementor sessionFactory, SessionFactoryScope scope, JdbcServices jdbcServices) {
this.sessionFactory = sessionFactory;
this.scope = scope;
this.jdbcServices = jdbcServices;
}
@Override
public SessionFactoryImplementor getSessionFactory() {
return sessionFactory;
}
@Override
public BootstrapContext getBootstrapContext() {
return null;
}
@Override
public TypeConfiguration getTypeConfiguration() {
return sessionFactory.getTypeConfiguration();
}
@Override
public MetadataImplementor getBootModel() {
return scope.getMetadataImplementor();
}
@Override
public MappingMetamodelImplementor getDomainModel() {
return null;
}
@Override
public SqmFunctionRegistry getFunctionRegistry() {
return null;
}
@Override
public Map<String, Object> getSettings() {
return sessionFactory.getProperties();
}
@Override
public Dialect getDialect() {
return jdbcServices.getDialect();
}
@Override
public CacheImplementor getCache() {
return null;
}
@Override
public SessionFactoryOptions getSessionFactoryOptions() {
return sessionFactory.getSessionFactoryOptions();
}
@Override
public JdbcServices getJdbcServices() {
return jdbcServices;
}
@Override
public SqlStringGenerationContext getSqlStringGenerationContext() {
return SqlStringGenerationContextImpl.fromExplicit(
jdbcServices.getJdbcEnvironment(),
scope.getMetadataImplementor().getDatabase(),
null,
null
);
}
@Override
public org.hibernate.service.ServiceRegistry getServiceRegistry() {
return sessionFactory.getServiceRegistry();
}
@Override
public Map<String, Generator> getGenerators() {
return emptyMap();
}
@Override
public String getDefaultCatalog() {
return null;
}
@Override
public String getDefaultSchema() {
return null;
}
@Override
public GeneratorSettings getGeneratorSettings() {
return this;
}
@Override
public Generator getOrCreateIdGenerator(String rootName, PersistentClass persistentClass) {
return null;
}
}
}
| ModelCreationContext |
java | google__guava | android/guava/src/com/google/common/base/FinalizableReference.java | {
"start": 1081,
"end": 1386
} | interface ____ {
/**
* Invoked on a background thread after the referent has been garbage collected unless security
* restrictions prevented starting a background thread, in which case this method is invoked when
* new references are created.
*/
void finalizeReferent();
}
| FinalizableReference |
java | spring-projects__spring-security | webauthn/src/main/java/org/springframework/security/web/webauthn/api/Bytes.java | {
"start": 1002,
"end": 2776
} | class ____ implements Serializable {
@Serial
private static final long serialVersionUID = -3278138671365709777L;
private static final SecureRandom RANDOM = new SecureRandom();
private static final Base64.Encoder ENCODER = Base64.getUrlEncoder().withoutPadding();
private static final Base64.Decoder DECODER = Base64.getUrlDecoder();
private final byte[] bytes;
/**
* Creates a new instance
* @param bytes the raw base64UrlString that will be encoded.
*/
public Bytes(byte[] bytes) {
Assert.notNull(bytes, "bytes cannot be null");
this.bytes = bytes;
}
/**
* Gets the raw bytes.
* @return the bytes
*/
public byte[] getBytes() {
return Arrays.copyOf(this.bytes, this.bytes.length);
}
/**
* Gets the bytes as Base64 URL encoded String.
* @return
*/
public String toBase64UrlString() {
return ENCODER.encodeToString(getBytes());
}
@Override
public boolean equals(Object obj) {
if (obj instanceof Bytes that) {
return that.toBase64UrlString().equals(toBase64UrlString());
}
return false;
}
@Override
public int hashCode() {
return toBase64UrlString().hashCode();
}
public String toString() {
return "Bytes[" + toBase64UrlString() + "]";
}
/**
* Creates a secure random {@link Bytes} with random bytes and sufficient entropy.
* @return a new secure random generated {@link Bytes}
*/
public static Bytes random() {
byte[] bytes = new byte[32];
RANDOM.nextBytes(bytes);
return new Bytes(bytes);
}
/**
* Creates a new instance from a base64 url string.
* @param base64UrlString the base64 url string
* @return the {@link Bytes}
*/
public static Bytes fromBase64(@Nullable String base64UrlString) {
byte[] bytes = DECODER.decode(base64UrlString);
return new Bytes(bytes);
}
}
| Bytes |
java | quarkusio__quarkus | integration-tests/hibernate-validator-resteasy-reactive/src/main/java/io/quarkus/it/hibernate/validator/GreetingService.java | {
"start": 220,
"end": 337
} | class ____ {
public String greeting(@NotNull String name) {
return "hello " + name;
}
}
| GreetingService |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/inheritfromconfig/NotToBeUsedMapper.java | {
"start": 340,
"end": 600
} | interface ____ {
@Mappings({
@Mapping(target = "primaryKey", ignore = true),
@Mapping(target = "auditTrail", ignore = true),
@Mapping(target = "color", ignore = true)
})
CarEntity toCarEntity(CarDto carDto);
}
| NotToBeUsedMapper |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/ApplicationContext.java | {
"start": 12673,
"end": 13502
} | class ____ the application
* @param environments The environment to use
* @return The application context builder
*/
static @NonNull ApplicationContextBuilder builder(@NonNull Class<?> mainClass, @NonNull String... environments) {
ArgumentUtils.requireNonNull("environments", environments);
ArgumentUtils.requireNonNull("mainClass", mainClass);
return builder(environments)
.mainClass(mainClass);
}
/**
* Creates the {@link ApplicationContext} using the given {@link Environment}.
*
* @return The created {@link ApplicationContext}
* @since 5.0
*/
static @NonNull ApplicationContext create(@NonNull Environment environment) {
return new DefaultApplicationContext(new DefaultApplicationContextBuilder(), environment);
}
}
| of |
java | elastic__elasticsearch | x-pack/plugin/migrate/src/main/java/org/elasticsearch/system_indices/action/GetFeatureUpgradeStatusAction.java | {
"start": 412,
"end": 765
} | class ____ extends ActionType<GetFeatureUpgradeStatusResponse> {
public static final GetFeatureUpgradeStatusAction INSTANCE = new GetFeatureUpgradeStatusAction();
public static final String NAME = "cluster:admin/migration/get_system_feature";
private GetFeatureUpgradeStatusAction() {
super(NAME);
}
}
| GetFeatureUpgradeStatusAction |
java | netty__netty | handler/src/test/java/io/netty/handler/ssl/MockAlternativeKeyProvider.java | {
"start": 1469,
"end": 3437
} | class ____ extends Provider {
private static final String PROVIDER_NAME = "MockAlternativeKeyProvider";
private static final double PROVIDER_VERSION = 1.0;
private static final String PROVIDER_INFO = "Mock provider simulating alternative key providers";
// Track signature operations for test verification
private static final AtomicInteger signatureOperations = new AtomicInteger(0);
MockAlternativeKeyProvider() {
super(PROVIDER_NAME, PROVIDER_VERSION, PROVIDER_INFO);
// Register RSA signature algorithms
put("Signature.RSASSA-PSS", MockRsaPssSignature.class.getName());
put("Signature.SHA1withRSA", MockSha1WithRsaSignature.class.getName());
put("Signature.SHA256withRSA", MockSha256WithRsaSignature.class.getName());
put("Signature.SHA384withRSA", MockSha384WithRsaSignature.class.getName());
put("Signature.SHA512withRSA", MockSha512WithRsaSignature.class.getName());
put("Signature.MD5withRSA", MockMd5WithRsaSignature.class.getName());
// Register ECDSA signature algorithms
put("Signature.SHA1withECDSA", MockSha1WithEcdsaSignature.class.getName());
put("Signature.SHA256withECDSA", MockSha256WithEcdsaSignature.class.getName());
put("Signature.SHA384withECDSA", MockSha384WithEcdsaSignature.class.getName());
put("Signature.SHA512withECDSA", MockSha512WithEcdsaSignature.class.getName());
}
static PrivateKey wrapPrivateKey(PrivateKey realKey) {
return new AlternativePrivateKeyWrapper(realKey);
}
/**
* Reset the signature operation counter for test isolation.
*/
static void resetSignatureOperationCount() {
signatureOperations.set(0);
}
/**
* Get the number of signature operations performed by this provider.
*/
static int getSignatureOperationCount() {
return signatureOperations.get();
}
private static final | MockAlternativeKeyProvider |
java | google__guice | core/test/com/google/inject/Java8LanguageFeatureBindingTest.java | {
"start": 999,
"end": 3711
} | class ____ extends TestCase {
// Some of these tests are kind of weird.
// See https://github.com/google/guice/issues/757 for more on why they exist.
public void testBinding_lambdaToInterface() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(new TypeLiteral<Predicate<Object>>() {}).toInstance(o -> o != null);
}
});
Predicate<Object> predicate = injector.getInstance(new Key<Predicate<Object>>() {});
assertTrue(predicate.test(new Object()));
assertFalse(predicate.test(null));
}
public void testProviderMethod_returningLambda() throws Exception {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Provides
public Callable<String> provideCallable() {
return () -> "foo";
}
});
Callable<String> callable = injector.getInstance(new Key<Callable<String>>() {});
assertEquals("foo", callable.call());
}
public void testProviderMethod_containingLambda_throwingException() throws Exception {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Provides
public Callable<String> provideCallable() {
if (Boolean.parseBoolean("false")) { // avoid dead code warnings
return () -> "foo";
} else {
throw new RuntimeException("foo");
}
}
});
ProvisionException expected =
assertThrows(
ProvisionException.class, () -> injector.getInstance(new Key<Callable<String>>() {}));
assertTrue(expected.getCause() instanceof RuntimeException);
assertEquals("foo", expected.getCause().getMessage());
}
public void testProvider_usingJdk8Features() {
try {
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(String.class).toProvider(StringProvider.class);
}
});
fail();
} catch (CreationException expected) {
}
UUID uuid = UUID.randomUUID();
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(UUID.class).toInstance(uuid);
bind(String.class).toProvider(StringProvider.class);
}
});
assertEquals(uuid.toString(), injector.getInstance(String.class));
}
private static final | Java8LanguageFeatureBindingTest |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/overallbuckets/OverallBucketsProcessor.java | {
"start": 419,
"end": 564
} | interface ____ {
void process(List<OverallBucket> overallBuckets);
List<OverallBucket> finish();
int size();
}
| OverallBucketsProcessor |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/aggregator/PredicateAggregatorCollectionTest.java | {
"start": 1167,
"end": 3211
} | class ____ extends ContextTestSupport {
@Test
public void testPredicateAggregateCollection() throws Exception {
// START SNIPPET: e2
MockEndpoint result = getMockEndpoint("mock:result");
// we only expect two messages as they have reached the completed
// predicate
// that we want 3 messages that has the same header id
result.expectedMessageCount(2);
result.expectedBodiesReceived("Message 1c", "Message 3c");
// then we sent all the message at once
template.sendBodyAndHeader("direct:start", "Message 1a", "id", "1");
template.sendBodyAndHeader("direct:start", "Message 2a", "id", "2");
template.sendBodyAndHeader("direct:start", "Message 3a", "id", "3");
template.sendBodyAndHeader("direct:start", "Message 1b", "id", "1");
template.sendBodyAndHeader("direct:start", "Message 3b", "id", "3");
template.sendBodyAndHeader("direct:start", "Message 1c", "id", "1");
template.sendBodyAndHeader("direct:start", "Message 3c", "id", "3");
template.sendBodyAndHeader("direct:start", "Message 2b", "id", "2");
template.sendBodyAndHeader("direct:start", "Message 1d", "id", "1");
template.sendBodyAndHeader("direct:start", "Message 4", "id", "4");
assertMockEndpointsSatisfied();
// END SNIPPET: e2
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// START SNIPPET: e1
// our route is aggregating from the direct queue and sending
// the response to the mock
from("direct:start")
// we use the collection based aggregator we already have
// configured
.aggregate(header("id"), new UseLatestAggregationStrategy()).completionSize(3).to("mock:result");
// END SNIPPET: e1
}
};
}
}
| PredicateAggregatorCollectionTest |
java | quarkusio__quarkus | core/deployment/src/test/java/io/quarkus/deployment/runnerjar/BootstrapFromOriginalJarTestBase.java | {
"start": 1010,
"end": 13620
} | class ____ extends PackageAppTestBase {
private TsArtifact appJar;
private List<TsArtifact> wsModules = List.of();
private Map<String, List<TsArtifact>> profileWsModules = Map.of();
protected boolean createWorkspace() {
return false;
}
protected boolean workspaceModuleParentHierarchy() {
return false;
}
protected void addWorkspaceModule(TsArtifact a) {
if (wsModules.isEmpty()) {
wsModules = new ArrayList<>();
}
wsModules.add(a);
}
protected void addWorkspaceModuleToProfile(TsArtifact a, String profile) {
if (profileWsModules.isEmpty()) {
profileWsModules = new LinkedHashMap<>();
}
profileWsModules.computeIfAbsent(profile, k -> new ArrayList<>()).add(a);
}
@BeforeEach
public void initAppModel() throws Exception {
appJar = composeApplication();
appJar.install(repo);
}
protected abstract TsArtifact composeApplication() throws Exception;
protected QuarkusBootstrap.Builder initBootstrapBuilder() throws Exception {
final Path ws = workDir.resolve("workspace");
IoUtils.recursiveDelete(ws);
IoUtils.mkdirs(ws);
Path applicationRoot = resolver.resolve(appJar.toArtifact()).getResolvedPaths().getSinglePath();
final QuarkusBootstrap.Builder bootstrap = QuarkusBootstrap.builder()
.setApplicationRoot(applicationRoot)
.setProjectRoot(applicationRoot)
.setAppModelResolver(resolver);
switch (getBootstrapMode()) {
case PROD:
break;
case TEST:
bootstrap.setTest(true);
break;
default:
throw new IllegalArgumentException("Not supported bootstrap mode " + getBootstrapMode());
}
if (createWorkspace() || !wsModules.isEmpty()) {
System.setProperty("basedir", ws.toAbsolutePath().toString());
final Model appPom = appJar.getPomModel();
List<Dependency> bomModules = List.of();
List<Dependency> depModules = List.of();
if (createWorkspace()) {
bomModules = (appPom.getDependencyManagement() == null ? List.<Dependency> of()
: appPom.getDependencyManagement().getDependencies()).stream()
.filter(d -> "import".equals(d.getScope())
&& d.getGroupId().equals(appPom.getGroupId()))
.toList();
depModules = appPom.getDependencies().stream()
.filter(d -> d.getGroupId().equals(appPom.getGroupId()) &&
(d.getType().isEmpty() || ArtifactCoords.TYPE_JAR.equals(d.getType())))
.toList();
}
final Path appModule;
final Path appPomXml;
if (depModules.isEmpty() && bomModules.isEmpty() && wsModules.isEmpty() || appPom.getParent() != null) {
appModule = ws;
appPomXml = ws.resolve("pom.xml");
ModelUtils.persistModel(appPomXml, appPom);
} else {
Model parentPom = new Model();
parentPom.setModelVersion(appPom.getModelVersion());
parentPom.setPackaging(ArtifactCoords.TYPE_POM);
parentPom.setGroupId(appPom.getGroupId());
parentPom.setArtifactId(appPom.getArtifactId() + "-parent");
parentPom.setVersion(appPom.getVersion());
Parent parent = new Parent();
parent.setGroupId(parentPom.getGroupId());
parent.setArtifactId(parentPom.getArtifactId());
parent.setVersion(parentPom.getVersion());
// BOM modules
for (Dependency bomModule : bomModules) {
parentPom.getModules().add(bomModule.getArtifactId());
final String moduleVersion = bomModule.getVersion();
Model modulePom = ModelUtils.readModel(resolver
.resolve(ArtifactCoords.pom(bomModule.getGroupId(), bomModule.getArtifactId(), moduleVersion))
.getResolvedPaths().getSinglePath());
modulePom.setParent(parent);
final Path moduleDir = IoUtils.mkdirs(ws.resolve(modulePom.getArtifactId()));
ModelUtils.persistModel(moduleDir.resolve("pom.xml"), modulePom);
}
// APP module
parentPom.getModules().add(appPom.getArtifactId());
appModule = ws.resolve(appPom.getArtifactId());
Files.createDirectories(appModule);
appPom.setParent(parent);
appPomXml = appModule.resolve("pom.xml");
ModelUtils.persistModel(appPomXml, appPom);
// dependency modules
if (!depModules.isEmpty()) {
final Map<ArtifactKey, String> managedVersions = new HashMap<>();
collectManagedDeps(appPom, managedVersions);
for (Dependency moduleDep : depModules) {
parentPom.getModules().add(moduleDep.getArtifactId());
final String moduleVersion = moduleDep.getVersion() == null
? managedVersions.get(ArtifactKey.of(moduleDep.getGroupId(), moduleDep.getArtifactId(),
moduleDep.getClassifier(), moduleDep.getType()))
: moduleDep.getVersion();
Model modulePom = ModelUtils.readModel(resolver
.resolve(ArtifactCoords.pom(moduleDep.getGroupId(), moduleDep.getArtifactId(), moduleVersion))
.getResolvedPaths().getSinglePath());
modulePom.setParent(parent);
final Path moduleDir = IoUtils.mkdirs(ws.resolve(modulePom.getArtifactId()));
ModelUtils.persistModel(moduleDir.resolve("pom.xml"), modulePom);
final Path resolvedJar = resolver
.resolve(ArtifactCoords.of(modulePom.getGroupId(), modulePom.getArtifactId(),
moduleDep.getClassifier(), moduleDep.getType(), modulePom.getVersion()))
.getResolvedPaths()
.getSinglePath();
final Path moduleTargetDir = moduleDir.resolve("target");
ZipUtils.unzip(resolvedJar, moduleTargetDir.resolve("classes"));
IoUtils.copy(resolvedJar,
moduleTargetDir.resolve(modulePom.getArtifactId() + "-" + modulePom.getVersion() + ".jar"));
}
}
for (TsArtifact module : wsModules) {
parentPom.getModules().add(module.getArtifactId());
Model modulePom = module.getPomModel();
modulePom.setParent(parent);
final Path moduleDir = IoUtils.mkdirs(ws.resolve(modulePom.getArtifactId()));
ModelUtils.persistModel(moduleDir.resolve("pom.xml"), modulePom);
final Path resolvedJar = resolver
.resolve(ArtifactCoords.of(modulePom.getGroupId(), modulePom.getArtifactId(),
module.getClassifier(), module.getType(), modulePom.getVersion()))
.getResolvedPaths()
.getSinglePath();
final Path moduleTargetDir = moduleDir.resolve("target");
ZipUtils.unzip(resolvedJar, moduleTargetDir.resolve("classes"));
IoUtils.copy(resolvedJar,
moduleTargetDir.resolve(modulePom.getArtifactId() + "-" + modulePom.getVersion() + ".jar"));
}
for (Map.Entry<String, List<TsArtifact>> profileModules : profileWsModules.entrySet()) {
Profile profile = null;
for (Profile p : parentPom.getProfiles()) {
if (p.getId().equals(profileModules.getKey())) {
profile = p;
break;
}
}
if (profile == null) {
for (Profile p : appPom.getProfiles()) {
if (p.getId().equals(profileModules.getKey())) {
profile = p;
break;
}
}
if (profile == null) {
throw new IllegalStateException(
"Failed to locate profile " + profileModules.getKey() + " in the application POM");
}
final Profile tmp = new Profile();
tmp.setActivation(profile.getActivation());
profile = tmp;
parentPom.getProfiles().add(profile);
}
for (TsArtifact a : profileModules.getValue()) {
profile.getModules().add(a.getArtifactId());
Model modulePom = a.getPomModel();
modulePom.setParent(parent);
final Path moduleDir = IoUtils.mkdirs(ws.resolve(modulePom.getArtifactId()));
ModelUtils.persistModel(moduleDir.resolve("pom.xml"), modulePom);
final Path resolvedJar = resolver
.resolve(ArtifactCoords.of(modulePom.getGroupId(), modulePom.getArtifactId(),
a.getClassifier(), a.getType(), modulePom.getVersion()))
.getResolvedPaths()
.getSinglePath();
final Path moduleTargetDir = moduleDir.resolve("target");
ZipUtils.unzip(resolvedJar, moduleTargetDir.resolve("classes"));
IoUtils.copy(resolvedJar,
moduleTargetDir.resolve(modulePom.getArtifactId() + "-" + modulePom.getVersion() + ".jar"));
}
}
ModelUtils.persistModel(ws.resolve("pom.xml"), parentPom);
}
final Path appOutputDir = IoUtils.mkdirs(appModule.resolve("target"));
final Path appClassesDir = appOutputDir.resolve("classes");
ZipUtils.unzip(applicationRoot, appClassesDir);
final LocalProject appProject = new BootstrapMavenContext(BootstrapMavenContext.config()
.setWorkspaceDiscovery(true)
.setWorkspaceModuleParentHierarchy(workspaceModuleParentHierarchy())
.setRootProjectDir(ws)
.setUserSettings(getSettingsXml() == null ? null : getSettingsXml().toFile())
.setCurrentProject(appPomXml.toString()))
.getCurrentProject();
bootstrap.setProjectRoot(appModule)
.setTargetDirectory(appOutputDir)
.setLocalProjectDiscovery(true)
.setAppModelResolver(newAppModelResolver(appProject));
} else {
bootstrap.setTargetDirectory(IoUtils.mkdirs(ws.resolve("target")));
}
return bootstrap;
}
private void collectManagedDeps(Model appPom, Map<ArtifactKey, String> managedVersions)
throws IOException, AppModelResolverException {
final List<Dependency> managed = appPom.getDependencyManagement() == null ? List.of()
: appPom.getDependencyManagement().getDependencies();
for (Dependency d : managed) {
managedVersions.put(ArtifactKey.of(d.getGroupId(), d.getArtifactId(), d.getClassifier(), d.getType()),
d.getVersion());
if (d.getType().equals(ArtifactCoords.TYPE_POM) && d.getScope().equals("import")) {
collectManagedDeps(ModelUtils
.readModel(resolver.resolve(ArtifactCoords.pom(d.getGroupId(), d.getArtifactId(), d.getVersion()))
.getResolvedPaths().getSinglePath()),
managedVersions);
}
}
}
}
| BootstrapFromOriginalJarTestBase |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java | {
"start": 2254,
"end": 4737
} | class ____ a metrics sink that uses
* {@link org.apache.hadoop.fs.FileSystem} to write the metrics logs. Every
* roll interval a new directory will be created under the path specified by the
* <code>basepath</code> property. All metrics will be logged to a file in the
* current interval's directory in a file named <hostname>.log, where
* <hostname> is the name of the host on which the metrics logging
* process is running. The base path is set by the
* <code><prefix>.sink.<instance>.basepath</code> property. The
* time zone used to create the current interval's directory name is GMT. If
* the <code>basepath</code> property isn't specified, it will default to
* "/tmp", which is the temp directory on whatever default file
* system is configured for the cluster.</p>
*
* <p>The <code><prefix>.sink.<instance>.ignore-error</code>
* property controls whether an exception is thrown when an error is encountered
* writing a log file. The default value is <code>true</code>. When set to
* <code>false</code>, file errors are quietly swallowed.</p>
*
* <p>The <code>roll-interval</code> property sets the amount of time before
* rolling the directory. The default value is 1 hour. The roll interval may
* not be less than 1 minute. The property's value should be given as
* <i>number unit</i>, where <i>number</i> is an integer value, and
* <i>unit</i> is a valid unit. Valid units are <i>minute</i>, <i>hour</i>,
* and <i>day</i>. The units are case insensitive and may be abbreviated or
* plural. If no units are specified, hours are assumed. For example,
* "2", "2h", "2 hour", and
* "2 hours" are all valid ways to specify two hours.</p>
*
* <p>The <code>roll-offset-interval-millis</code> property sets the upper
* bound on a random time interval (in milliseconds) that is used to delay
* before the initial roll. All subsequent rolls will happen an integer
* number of roll intervals after the initial roll, hence retaining the original
* offset. The purpose of this property is to insert some variance in the roll
* times so that large clusters using this sink on every node don't cause a
* performance impact on HDFS by rolling simultaneously. The default value is
* 30000 (30s). When writing to HDFS, as a rule of thumb, the roll offset in
* millis should be no less than the number of sink instances times 5.
*
* <p>The primary use of this | is |
java | quarkusio__quarkus | extensions/jackson/deployment/src/test/java/io/quarkus/jackson/deployment/JacksonIgnoreUnknownPropertiesTest.java | {
"start": 447,
"end": 923
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest();
@Inject
ObjectMapper objectMapper;
@Test
public void testIgnoreUnknownProperties() throws JsonMappingException, JsonProcessingException {
Pojo pojo = objectMapper.readValue("{\"property\": \"name\", \"unknownProperty\": \"unknown\"}", Pojo.class);
assertEquals("name", pojo.property);
}
public static | JacksonIgnoreUnknownPropertiesTest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/LeaderElectionEvent.java | {
"start": 2958,
"end": 3687
} | class ____ extends LeaderElectionEvent {
private final LeaderInformationRegister leaderInformationRegister;
AllLeaderInformationChangeEvent(LeaderInformationRegister leaderInformationRegister) {
this.leaderInformationRegister = leaderInformationRegister;
}
@Override
public boolean isAllKnownLeaderInformationEvent() {
return true;
}
public LeaderInformationRegister getLeaderInformationRegister() {
return leaderInformationRegister;
}
}
/**
* A {@code LeaderElectionEvent} that's triggered by {@link
* LeaderElectionDriver.Listener#onError(Throwable)}.
*/
public static | AllLeaderInformationChangeEvent |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/configuration/interfaces/BootstrapWithTestInterface.java | {
"start": 1257,
"end": 1581
} | class ____ extends DefaultTestContextBootstrapper {
@Override
protected List<ContextCustomizerFactory> getContextCustomizerFactories() {
return singletonList((testClass, configAttributes) ->
(context,mergedConfig) -> context.getBeanFactory().registerSingleton("foo", "foo"));
}
}
}
| CustomTestContextBootstrapper |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/archive/scan/spi/ClassDescriptor.java | {
"start": 635,
"end": 692
} | class ____.
*/
InputStreamAccess getStreamAccess();
}
| file |
java | apache__flink | flink-clients/src/test/java/org/apache/flink/client/cli/CliFrontendPackageProgramTest.java | {
"start": 11028,
"end": 14440
} | class ____ is only available in
* the jarfile itself (via a custom classloader)
* <li>Change the Usercode classloader of the PackagedProgram to a special classloader for
* this test
* <li>the classloader will accept the special class (and return a String.class)
* </ul>
*/
@Test
void testPlanWithExternalClass() throws Exception {
final boolean[] callme = {
false
}; // create a final object reference, to be able to change its val later
try {
String[] arguments = {
"--classpath",
"file:///tmp/foo",
"--classpath",
"file:///tmp/bar",
"-c",
TEST_JAR_CLASSLOADERTEST_CLASS,
getTestJarPath(),
"true",
"arg1",
"arg2"
};
URL[] classpath = new URL[] {new URL("file:///tmp/foo"), new URL("file:///tmp/bar")};
String[] reducedArguments = {"true", "arg1", "arg2"};
CommandLine commandLine =
CliFrontendParser.parse(CliFrontendParser.RUN_OPTIONS, arguments, true);
ProgramOptions programOptions = ProgramOptions.create(commandLine);
assertThat(programOptions.getJarFilePath()).isEqualTo(getTestJarPath());
assertThat(programOptions.getClasspaths().toArray()).isEqualTo(classpath);
assertThat(programOptions.getEntryPointClassName())
.isEqualTo(TEST_JAR_CLASSLOADERTEST_CLASS);
assertThat(programOptions.getProgramArgs()).isEqualTo(reducedArguments);
PackagedProgram prog = spy(frontend.buildProgram(programOptions));
ClassLoader testClassLoader =
new ClassLoader(prog.getUserCodeClassLoader()) {
@Override
public Class<?> loadClass(String name) throws ClassNotFoundException {
if ("org.apache.hadoop.hive.ql.io.RCFileInputFormat".equals(name)) {
callme[0] = true;
return String.class; // Intentionally return the wrong class.
} else {
return super.loadClass(name);
}
}
};
when(prog.getUserCodeClassLoader()).thenReturn(testClassLoader);
assertThat(prog.getMainClassName()).isEqualTo(TEST_JAR_CLASSLOADERTEST_CLASS);
assertThat(prog.getArguments()).isEqualTo(reducedArguments);
Configuration c = new Configuration();
// we expect this to fail with a "ClassNotFoundException"
Pipeline pipeline = PackagedProgramUtils.getPipelineFromProgram(prog, c, 666, true);
FlinkPipelineTranslationUtil.translateToJSONExecutionPlan(
prog.getUserCodeClassLoader(), pipeline);
fail("Should have failed with a ClassNotFoundException");
} catch (ProgramInvocationException e) {
if (!(e.getCause() instanceof ClassNotFoundException)) {
e.printStackTrace();
fail("Program didn't throw ClassNotFoundException");
}
if (!callme[0]) {
fail("Classloader was not called");
}
}
}
}
| which |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2245/TestMapper.java | {
"start": 788,
"end": 1110
} | class ____ {
private final Inner inner;
public TenantDTO(Inner inner) {
this.inner = inner;
}
public Inner getInner() {
return inner;
}
}
@Mapping(target = "id", source = "inner.id", defaultValue = "test")
Tenant map(TenantDTO tenant);
}
| TenantDTO |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1100/Issue1134.java | {
"start": 1515,
"end": 1606
} | class ____ {
public int x;
public int y;
public int z;
}
}
| BlockPos |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/logging/DeferredLogFactory.java | {
"start": 1012,
"end": 1929
} | interface ____ {
/**
* Create a new {@link DeferredLog} for the given destination.
* @param destination the ultimate log destination
* @return a deferred log instance that will switch to the destination when
* appropriate.
*/
default Log getLog(Class<?> destination) {
return getLog(() -> LogFactory.getLog(destination));
}
/**
* Create a new {@link DeferredLog} for the given destination.
* @param destination the ultimate log destination
* @return a deferred log instance that will switch to the destination when
* appropriate.
*/
default Log getLog(Log destination) {
return getLog(() -> destination);
}
/**
* Create a new {@link DeferredLog} for the given destination.
* @param destination the ultimate log destination
* @return a deferred log instance that will switch to the destination when
* appropriate.
*/
Log getLog(Supplier<Log> destination);
}
| DeferredLogFactory |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/compactor/ConcatFileCompactor.java | {
"start": 1395,
"end": 2449
} | class ____ extends OutputStreamBasedFileCompactor {
private static final int CHUNK_SIZE = 4 * 1024 * 1024;
private final byte[] fileDelimiter;
public ConcatFileCompactor() {
this(null);
}
public ConcatFileCompactor(@Nullable byte[] fileDelimiter) {
this.fileDelimiter = fileDelimiter;
}
@Override
protected void doCompact(List<Path> inputFiles, OutputStream outputStream) throws Exception {
FileSystem fs = inputFiles.get(0).getFileSystem();
for (Path input : inputFiles) {
try (FSDataInputStream inputStream = fs.open(input)) {
copy(inputStream, outputStream);
}
if (fileDelimiter != null) {
outputStream.write(fileDelimiter);
}
}
}
private void copy(InputStream in, OutputStream out) throws IOException {
byte[] buf = new byte[CHUNK_SIZE];
int length;
while ((length = in.read(buf)) > 0) {
out.write(buf, 0, length);
}
}
}
| ConcatFileCompactor |
java | spring-projects__spring-framework | spring-messaging/src/main/java/org/springframework/messaging/converter/MessageConverter.java | {
"start": 1137,
"end": 1597
} | interface ____ {
/**
* Convert the payload of a {@link Message} from a serialized form to a typed Object
* of the specified target class. The {@link MessageHeaders#CONTENT_TYPE} header
* should indicate the MIME type to convert from.
* <p>If the converter does not support the specified media type or cannot perform
* the conversion, it should return {@code null}.
* @param message the input message
* @param targetClass the target | MessageConverter |
java | apache__camel | components/camel-knative/camel-knative-api/src/main/java/org/apache/camel/component/knative/spi/KnativeSinkBinding.java | {
"start": 912,
"end": 2010
} | class ____ {
private String name;
private Knative.Type type;
private String objectKind;
private String objectApiVersion;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Knative.Type getType() {
return type;
}
public void setType(Knative.Type type) {
this.type = type;
}
public String getObjectKind() {
return objectKind;
}
public void setObjectKind(String objectKind) {
this.objectKind = objectKind;
}
public String getObjectApiVersion() {
return objectApiVersion;
}
public void setObjectApiVersion(String objectApiVersion) {
this.objectApiVersion = objectApiVersion;
}
@Override
public String toString() {
return "KnativeSinkBinding{" +
"name='" + name + '\'' +
", type=" + type +
", objectKind='" + objectKind + '\'' +
", objectApiVersion='" + objectApiVersion + '\'' +
'}';
}
}
| KnativeSinkBinding |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/jdk/CollectionSerializationTest.java | {
"start": 878,
"end": 1250
} | class ____
{
EnumMap<Key,String> _map;
public EnumMapBean(EnumMap<Key,String> m)
{
_map = m;
}
public EnumMap<Key,String> getMap() { return _map; }
}
/**
* Class needed for testing [JACKSON-220]
*/
@SuppressWarnings("serial")
@JsonSerialize(using=ListSerializer.class)
static | EnumMapBean |
java | google__auto | value/src/test/java/com/google/auto/value/processor/AutoBuilderCompilationTest.java | {
"start": 15719,
"end": 15793
} | class ____ {",
" @AutoBuilder",
" public | Private |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/EnableLoadTimeWeaving.java | {
"start": 2671,
"end": 3821
} | class ____ implements LoadTimeWeavingConfigurer {
*
* @Override
* public LoadTimeWeaver getLoadTimeWeaver() {
* MyLoadTimeWeaver ltw = new MyLoadTimeWeaver();
* ltw.addClassTransformer(myClassFileTransformer);
* // ...
* return ltw;
* }
* }</pre>
*
* <p>The example above can be compared to the following Spring XML configuration:
*
* <pre class="code">
* <beans>
*
* <context:load-time-weaver weaverClass="com.acme.MyLoadTimeWeaver"/>
*
* </beans>
* </pre>
*
* <p>The code example differs from the XML example in that it actually instantiates the
* {@code MyLoadTimeWeaver} type, meaning that it can also configure the instance, for example,
* calling the {@code #addClassTransformer} method. This demonstrates how the code-based
* configuration approach is more flexible through direct programmatic access.
*
* <h2>Enabling AspectJ-based weaving</h2>
* AspectJ load-time weaving may be enabled with the {@link #aspectjWeaving()}
* attribute, which will cause the {@linkplain
* org.aspectj.weaver.loadtime.ClassPreProcessorAgentAdapter AspectJ | AppConfig |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/UnalignedCheckpointStressITCase.java | {
"start": 17132,
"end": 18834
} | class ____ extends AbstractRichFunction
implements ParallelSourceFunction<Record>, CheckpointedFunction {
private final int sourceIdOffset;
private long nextValue;
private ListState<Long> nextState;
private volatile boolean running = true;
public LegacySourceFunction(int sourceIdOffset) {
this.sourceIdOffset = sourceIdOffset;
}
@Override
public void run(SourceContext<Record> ctx) throws Exception {
RecordGenerator generator =
new RecordGenerator(
getRuntimeContext().getTaskInfo().getIndexOfThisSubtask()
+ sourceIdOffset);
while (running) {
Record next = generator.next(nextValue);
synchronized (ctx.getCheckpointLock()) {
nextValue++;
ctx.collect(next);
}
}
}
@Override
public void cancel() {
running = false;
}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
nextState.update(Collections.singletonList(nextValue));
}
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
nextState =
context.getOperatorStateStore()
.getListState(new ListStateDescriptor<>("state", Long.class));
// We are not supporting rescaling
nextValue = requireNonNull(getOnlyElement(nextState.get(), 0L));
}
}
private static | LegacySourceFunction |
java | apache__kafka | storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageEvent.java | {
"start": 1309,
"end": 1473
} | interface ____ Kafka and external storage systems, through
* which all such {@link LocalTieredStorageEvent.EventType} interactions go through.
*/
public final | between |
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/condition/EnabledIf.java | {
"start": 963,
"end": 1508
} | class ____
* be enabled on the same condition.
*
* <p>This annotation is not {@link java.lang.annotation.Inherited @Inherited}.
* Consequently, if you wish to apply the same semantics to a subclass, this
* annotation must be redeclared on the subclass.
*
* <p>If a test method is disabled via this annotation, that prevents execution
* of the test method and method-level lifecycle callbacks such as
* {@code @BeforeEach} methods, {@code @AfterEach} methods, and corresponding
* extension APIs. However, that does not prevent the test | will |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/steps/ApplicationIndexBuildStep.java | {
"start": 796,
"end": 1954
} | class ____ {
private static final Logger log = Logger.getLogger(ApplicationIndexBuildStep.class);
@BuildStep
ApplicationIndexBuildItem build(ArchiveRootBuildItem root, CurateOutcomeBuildItem curation,
ClassLoadingConfig classLoadingConfig) throws IOException {
Indexer indexer = new Indexer();
Set<String> removedApplicationClasses = removedApplicationClasses(curation, classLoadingConfig);
for (Path p : root.getRootDirectories()) {
Files.walkFileTree(p, new FileVisitor<Path>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (file.getFileName().toString().endsWith(".class")) {
if (isRemovedApplicationClass(file, removedApplicationClasses)) {
log.debugf("File %s will not be indexed because the | ApplicationIndexBuildStep |
java | quarkusio__quarkus | extensions/arc/deployment/src/main/java/io/quarkus/arc/deployment/BeanArchiveIndexBuildItem.java | {
"start": 651,
"end": 1681
} | class ____ extends SimpleBuildItem {
private final IndexView index;
private final IndexView immutableIndex;
private final Set<DotName> generatedClassNames;
public BeanArchiveIndexBuildItem(IndexView index, IndexView immutableIndex, Set<DotName> generatedClassNames) {
this.index = index;
this.immutableIndex = immutableIndex;
this.generatedClassNames = generatedClassNames;
}
/**
* This index is built on top of the immutable index.
*
* @return the computing index that can also index classes on demand
*/
public IndexView getIndex() {
return index;
}
/**
*
* @return an immutable index that represents the bean archive
*/
public IndexView getImmutableIndex() {
return immutableIndex;
}
/**
*
* @return the set of classes generated via {@link GeneratedBeanBuildItem}
*/
public Set<DotName> getGeneratedClassNames() {
return generatedClassNames;
}
}
| BeanArchiveIndexBuildItem |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/license/internal/MutableLicenseService.java | {
"start": 1201,
"end": 2197
} | interface ____ extends LicenseService, LifecycleComponent {
/**
* Creates or updates the current license as defined by the request.
*/
void registerLicense(PutLicenseRequest request, ActionListener<PutLicenseResponse> listener);
/**
* Removes the current license. Implementations should remove the current license and ensure that attempts to read returns
* {@link LicensesMetadata#LICENSE_TOMBSTONE} if a license was removed. Additionally the {@link XPackLicenseState} must be updated.
*/
void removeLicense(TimeValue masterNodeTimeout, TimeValue ackTimeout, ActionListener<? extends AcknowledgedResponse> listener);
/**
* Installs a basic license.
*/
void startBasicLicense(PostStartBasicRequest request, ActionListener<PostStartBasicResponse> listener);
/**
* Installs a trial license.
*/
void startTrialLicense(PostStartTrialRequest request, ActionListener<PostStartTrialResponse> listener);
}
| MutableLicenseService |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ComponentScanParserTests.java | {
"start": 5919,
"end": 6299
} | class ____ {
@Autowired
@CustomAnnotation
private KustomAnnotationDependencyBean dependency;
public KustomAnnotationDependencyBean getDependency() {
return this.dependency;
}
}
/**
* Intentionally spelling "custom" with a "k" since there are numerous
* classes in this package named *Custom*.
*/
@CustomAnnotation
public static | KustomAnnotationAutowiredBean |
java | google__guava | guava/src/com/google/common/collect/Synchronized.java | {
"start": 15746,
"end": 20799
} | class ____<K extends @Nullable Object, V extends @Nullable Object>
extends SynchronizedObject implements Multimap<K, V> {
transient @Nullable Set<K> keySet;
transient @Nullable Collection<V> valuesCollection;
transient @Nullable Collection<Map.Entry<K, V>> entries;
transient @Nullable Map<K, Collection<V>> asMap;
transient @Nullable Multiset<K> keys;
@SuppressWarnings("unchecked")
@Override
Multimap<K, V> delegate() {
return (Multimap<K, V>) super.delegate();
}
SynchronizedMultimap(Multimap<K, V> delegate, @Nullable Object mutex) {
super(delegate, mutex);
}
@Override
public int size() {
synchronized (mutex) {
return delegate().size();
}
}
@Override
public boolean isEmpty() {
synchronized (mutex) {
return delegate().isEmpty();
}
}
@Override
public boolean containsKey(@Nullable Object key) {
synchronized (mutex) {
return delegate().containsKey(key);
}
}
@Override
public boolean containsValue(@Nullable Object value) {
synchronized (mutex) {
return delegate().containsValue(value);
}
}
@Override
public boolean containsEntry(@Nullable Object key, @Nullable Object value) {
synchronized (mutex) {
return delegate().containsEntry(key, value);
}
}
@Override
public Collection<V> get(@ParametricNullness K key) {
synchronized (mutex) {
return typePreservingCollection(delegate().get(key), mutex);
}
}
@Override
public boolean put(@ParametricNullness K key, @ParametricNullness V value) {
synchronized (mutex) {
return delegate().put(key, value);
}
}
@Override
public boolean putAll(@ParametricNullness K key, Iterable<? extends V> values) {
synchronized (mutex) {
return delegate().putAll(key, values);
}
}
@Override
public boolean putAll(Multimap<? extends K, ? extends V> multimap) {
synchronized (mutex) {
return delegate().putAll(multimap);
}
}
@Override
public Collection<V> replaceValues(@ParametricNullness K key, Iterable<? extends V> values) {
synchronized (mutex) {
return delegate().replaceValues(key, values); // copy not synchronized
}
}
@Override
public boolean remove(@Nullable Object key, @Nullable Object value) {
synchronized (mutex) {
return delegate().remove(key, value);
}
}
@Override
public Collection<V> removeAll(@Nullable Object key) {
synchronized (mutex) {
return delegate().removeAll(key); // copy not synchronized
}
}
@Override
public void clear() {
synchronized (mutex) {
delegate().clear();
}
}
@Override
public Set<K> keySet() {
synchronized (mutex) {
if (keySet == null) {
keySet = typePreservingSet(delegate().keySet(), mutex);
}
return keySet;
}
}
@Override
public Collection<V> values() {
synchronized (mutex) {
if (valuesCollection == null) {
valuesCollection = collection(delegate().values(), mutex);
}
return valuesCollection;
}
}
@Override
public Collection<Map.Entry<K, V>> entries() {
synchronized (mutex) {
if (entries == null) {
entries = typePreservingCollection(delegate().entries(), mutex);
}
return entries;
}
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
synchronized (mutex) {
delegate().forEach(action);
}
}
@Override
public Map<K, Collection<V>> asMap() {
synchronized (mutex) {
if (asMap == null) {
asMap = new SynchronizedAsMap<>(delegate().asMap(), mutex);
}
return asMap;
}
}
@Override
public Multiset<K> keys() {
synchronized (mutex) {
if (keys == null) {
keys = multiset(delegate().keys(), mutex);
}
return keys;
}
}
@Override
// A forwarding implementation can't do any better than the underlying object.
@SuppressWarnings("UndefinedEquals")
public boolean equals(@Nullable Object o) {
if (o == this) {
return true;
}
synchronized (mutex) {
return delegate().equals(o);
}
}
@Override
public int hashCode() {
synchronized (mutex) {
return delegate().hashCode();
}
}
@GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0;
}
static <K extends @Nullable Object, V extends @Nullable Object> ListMultimap<K, V> listMultimap(
ListMultimap<K, V> multimap, @Nullable Object mutex) {
if (multimap instanceof SynchronizedListMultimap || multimap instanceof BaseImmutableMultimap) {
return multimap;
}
return new SynchronizedListMultimap<>(multimap, mutex);
}
static final | SynchronizedMultimap |
java | spring-projects__spring-framework | integration-tests/src/test/java/org/springframework/transaction/annotation/EnableTransactionManagementIntegrationTests.java | {
"start": 6755,
"end": 7319
} | class ____ implements TransactionManagementConfigurer {
@Bean
public PlatformTransactionManager txManager1() {
return new CallCountingTransactionManager();
}
@Bean
public PlatformTransactionManager txManager2() {
return new CallCountingTransactionManager();
}
@Override
public PlatformTransactionManager annotationDrivenTransactionManager() {
return txManager1();
}
@Bean
public FooRepository fooRepository() {
return new DummyFooRepository();
}
}
@Configuration
@EnableTransactionManagement
static | ExplicitTxManagerConfig |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/ColumnLastIndexNotLetterAliasTest.java | {
"start": 647,
"end": 1217
} | class ____ {
// Arbitrarily choose PostgreSQL
private static final Dialect DIALECT = new PostgreSQLDialect();
@Test
@JiraKey(value = "HHH-14720")
public void testColumnNameEndinWithNonCharacter() {
test( "aColumn1" );
test( "aColumn_" );
test( "aVeryVeryVeryLongColumnName1" );
test( "aVeryVeryVeryLongColumnName_" );
}
private void test(String columnName) {
final Column column = new Column( columnName );
final String alias = column.getAlias( DIALECT );
assertEquals( alias.toLowerCase( Locale.ROOT ), alias );
}
}
| ColumnLastIndexNotLetterAliasTest |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/websocketx/PongWebSocketFrame.java | {
"start": 810,
"end": 2684
} | class ____ extends WebSocketFrame {
/**
* Creates a new empty pong frame.
*/
public PongWebSocketFrame() {
super(Unpooled.buffer(0));
}
/**
* Creates a new pong frame with the specified binary data.
*
* @param binaryData
* the content of the frame.
*/
public PongWebSocketFrame(ByteBuf binaryData) {
super(binaryData);
}
/**
* Creates a new pong frame with the specified binary data
*
* @param finalFragment
* flag indicating if this frame is the final fragment
* @param rsv
* reserved bits used for protocol extensions
* @param binaryData
* the content of the frame.
*/
public PongWebSocketFrame(boolean finalFragment, int rsv, ByteBuf binaryData) {
super(finalFragment, rsv, binaryData);
}
@Override
public PongWebSocketFrame copy() {
return (PongWebSocketFrame) super.copy();
}
@Override
public PongWebSocketFrame duplicate() {
return (PongWebSocketFrame) super.duplicate();
}
@Override
public PongWebSocketFrame retainedDuplicate() {
return (PongWebSocketFrame) super.retainedDuplicate();
}
@Override
public PongWebSocketFrame replace(ByteBuf content) {
return new PongWebSocketFrame(isFinalFragment(), rsv(), content);
}
@Override
public PongWebSocketFrame retain() {
super.retain();
return this;
}
@Override
public PongWebSocketFrame retain(int increment) {
super.retain(increment);
return this;
}
@Override
public PongWebSocketFrame touch() {
super.touch();
return this;
}
@Override
public PongWebSocketFrame touch(Object hint) {
super.touch(hint);
return this;
}
}
| PongWebSocketFrame |
java | apache__camel | tooling/camel-tooling-model/src/main/java/org/apache/camel/tooling/model/Kind.java | {
"start": 1098,
"end": 1679
} | enum ____ implements Jsonable {
component,
dataformat,
language,
transformer,
console,
other,
eip,
bean,
model;
@Override
public String toJson() {
final StringWriter writable = new StringWriter();
try {
this.toJson(writable);
} catch (final IOException caught) {
/* See java.io.StringWriter. */
}
return writable.toString();
}
@Override
public void toJson(final Writer writable) throws IOException {
writable.write(Jsoner.serialize(name()));
}
}
| Kind |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/results/ErrorResultTests.java | {
"start": 460,
"end": 998
} | class ____ extends AbstractXContentTestCase<ErrorResult> {
public static ErrorResult createRandom() {
return new ErrorResult(randomAlphaOfLength(50));
}
@Override
protected ErrorResult createTestInstance() {
return createRandom();
}
@Override
protected ErrorResult doParseInstance(XContentParser parser) throws IOException {
return ErrorResult.PARSER.parse(parser, null);
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
}
| ErrorResultTests |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesAction.java | {
"start": 446,
"end": 792
} | class ____ extends ActionType<GetBuiltinPrivilegesResponse> {
public static final GetBuiltinPrivilegesAction INSTANCE = new GetBuiltinPrivilegesAction();
public static final String NAME = "cluster:admin/xpack/security/privilege/builtin/get";
private GetBuiltinPrivilegesAction() {
super(NAME);
}
}
| GetBuiltinPrivilegesAction |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/util/clhm/ConcurrentLinkedHashMap.java | {
"start": 4980,
"end": 42695
} | class ____ a best-effort bounding of a ConcurrentHashMap using a
* page-replacement algorithm to determine which entries to evict when the
* capacity is exceeded.
*
* The page replacement algorithm's data structures are kept eventually
* consistent with the map. An update to the map and recording of reads may
* not be immediately reflected on the algorithm's data structures. These
* structures are guarded by a lock and operations are applied in batches to
* avoid lock contention. The penalty of applying the batches is spread across
* threads so that the amortized cost is slightly higher than performing just
* the ConcurrentHashMap operation.
*
* A memento of the reads and writes that were performed on the map are
* recorded in buffers. These buffers are drained at the first opportunity
* after write or when the read buffer exceeds a threshold size. The reads
* are recorded in a lossy buffer, allowing the reordering operations to be
* discarded if the draining process cannot keep up. Due to the concurrent
* nature of the read and write operations a strict policy ordering is not
* possible, but is observably strict when single threaded.
*
* Due to a lack of a strict ordering guarantee, a task can be executed
* out-of-order, such as a removal followed by its addition. The state of the
* entry is encoded within the value's weight.
*
* Alive: The entry is in both the hash-table and the page replacement policy.
* This is represented by a positive weight.
*
* Retired: The entry is not in the hash-table and is pending removal from the
* page replacement policy. This is represented by a negative weight.
*
* Dead: The entry is not in the hash-table and is not in the page replacement
* policy. This is represented by a weight of zero.
*
* The Least Recently Used page replacement algorithm was chosen due to its
* simplicity, high hit rate, and ability to be implemented with O(1) time
* complexity.
*/
/** The number of CPUs. */
static final int NCPU = Runtime.getRuntime().availableProcessors();
/** The maximum weighted capacity of the map. */
static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
/** The number of read buffers to use. */
static final int NUMBER_OF_READ_BUFFERS = ceilingNextPowerOfTwo(NCPU);
/** Mask value for indexing into the read buffers. */
static final int READ_BUFFERS_MASK = NUMBER_OF_READ_BUFFERS - 1;
/** The number of pending read operations before attempting to drain. */
static final int READ_BUFFER_THRESHOLD = 32;
/** The maximum number of read operations to perform per amortized drain. */
static final int READ_BUFFER_DRAIN_THRESHOLD = 2 * READ_BUFFER_THRESHOLD;
/** The maximum number of pending reads per buffer. */
static final int READ_BUFFER_SIZE = 2 * READ_BUFFER_DRAIN_THRESHOLD;
/** Mask value for indexing into the read buffer. */
static final int READ_BUFFER_INDEX_MASK = READ_BUFFER_SIZE - 1;
/** The maximum number of write operations to perform per amortized drain. */
static final int WRITE_BUFFER_DRAIN_THRESHOLD = 16;
/** A queue that discards all entries. */
static final Queue<?> DISCARDING_QUEUE = new DiscardingQueue();
private static final long serialVersionUID = 1;
// The backing data store holding the key-value associations
private final ConcurrentMap<K, Node<K, V>> data;
private final int concurrencyLevel;
// These fields provide support to bound the map by a maximum capacity
// @GuardedBy("evictionLock")
private final long[] readBufferReadCount;
// @GuardedBy("evictionLock")
private final LinkedDeque<Node<K, V>> evictionDeque;
// @GuardedBy("evictionLock") // must write under lock
private final AtomicLong weightedSize;
// @GuardedBy("evictionLock") // must write under lock
private final AtomicLong capacity;
private final Lock evictionLock;
private final Queue<Runnable> writeBuffer;
private final AtomicLong[] readBufferWriteCount;
private final AtomicLong[] readBufferDrainAtWriteCount;
private final AtomicReference<Node<K, V>>[][] readBuffers;
private final AtomicReference<DrainStatus> drainStatus;
private final EntryWeigher<? super K, ? super V> weigher;
// These fields provide support for notifying a listener.
private final Queue<Node<K, V>> pendingNotifications;
private final EvictionListener<K, V> listener;
private transient Set<K> keySet;
private transient Collection<V> values;
private transient Set<Entry<K, V>> entrySet;
/**
* Creates an instance based on the builder's configuration.
*/
@SuppressWarnings({"unchecked", "cast"})
private ConcurrentLinkedHashMap(Builder<K, V> builder) {
// The data store and its maximum capacity
concurrencyLevel = builder.concurrencyLevel;
capacity = new AtomicLong(Math.min(builder.capacity, MAXIMUM_CAPACITY));
data = new ConcurrentHashMap<>(builder.initialCapacity, 0.75f, concurrencyLevel);
// The eviction support
weigher = builder.weigher;
evictionLock = new ReentrantLock();
weightedSize = new AtomicLong();
evictionDeque = new LinkedDeque<>();
writeBuffer = new ConcurrentLinkedQueue<>();
drainStatus = new AtomicReference<>(IDLE);
readBufferReadCount = new long[NUMBER_OF_READ_BUFFERS];
readBufferWriteCount = new AtomicLong[NUMBER_OF_READ_BUFFERS];
readBufferDrainAtWriteCount = new AtomicLong[NUMBER_OF_READ_BUFFERS];
readBuffers = new AtomicReference[NUMBER_OF_READ_BUFFERS][READ_BUFFER_SIZE];
for (int i = 0; i < NUMBER_OF_READ_BUFFERS; i++) {
readBufferWriteCount[i] = new AtomicLong();
readBufferDrainAtWriteCount[i] = new AtomicLong();
readBuffers[i] = new AtomicReference[READ_BUFFER_SIZE];
for (int j = 0; j < READ_BUFFER_SIZE; j++) {
readBuffers[i][j] = new AtomicReference<>();
}
}
// The notification queue and listener
listener = builder.listener;
pendingNotifications = (listener == DiscardingListener.INSTANCE)
? (Queue<Node<K, V>>) DISCARDING_QUEUE
: new ConcurrentLinkedQueue<>();
}
private static void checkNotNull(Object o) {
if (o == null) {
throw new NullPointerException();
}
}
private static int ceilingNextPowerOfTwo(int x) {
// From Hacker's Delight, Chapter 3, Harry S. Warren Jr.
return 1 << (Integer.SIZE - Integer.numberOfLeadingZeros(x - 1));
}
private static void checkArgument(boolean expression) {
if (!expression) {
throw new IllegalArgumentException();
}
}
private static void checkState(boolean expression) {
if (!expression) {
throw new IllegalStateException();
}
}
/* ---------------- Eviction Support -------------- */
/**
* Retrieves the maximum weighted capacity of the map.
*
* @return the maximum weighted capacity
*/
public long capacity() {
return capacity.get();
}
/**
* Sets the maximum weighted capacity of the map and eagerly evicts entries
* until it shrinks to the appropriate size.
*
* @param capacity the maximum weighted capacity of the map
* @throws IllegalArgumentException if the capacity is negative
*/
public void setCapacity(long capacity) {
checkArgument(capacity >= 0);
evictionLock.lock();
try {
this.capacity.lazySet(Math.min(capacity, MAXIMUM_CAPACITY));
drainBuffers();
evict();
} finally {
evictionLock.unlock();
}
notifyListener();
}
// @GuardedBy("evictionLock")
private boolean hasOverflowed() {
return weightedSize.get() > capacity.get();
}
// @GuardedBy("evictionLock")
private void evict() {
// Attempts to evict entries from the map if it exceeds the maximum
// capacity. If the eviction fails due to a concurrent removal of the
// victim, that removal may cancel out the addition that triggered this
// eviction. The victim is eagerly unlinked before the removal task so
// that if an eviction is still required then a new victim will be chosen
// for removal.
while (hasOverflowed()) {
final Node<K, V> node = evictionDeque.poll();
// If weighted values are used, then the pending operations will adjust
// the size to reflect the correct weight
if (node == null) {
return;
}
// Notify the listener only if the entry was evicted
if (data.remove(node.key, node)) {
pendingNotifications.add(node);
}
makeDead(node);
}
}
/**
* Performs the post-processing work required after a read.
*
* @param node the entry in the page replacement policy
*/
void afterRead(Node<K, V> node) {
final int bufferIndex = readBufferIndex();
final long writeCount = recordRead(bufferIndex, node);
drainOnReadIfNeeded(bufferIndex, writeCount);
notifyListener();
}
private static int readBufferIndex() {
// A buffer is chosen by the thread's id so that tasks are distributed in a
// pseudo evenly manner. This helps avoid hot entries causing contention
// due to other threads trying to append to the same buffer.
return ((int) Thread.currentThread().getId()) & READ_BUFFERS_MASK;
}
/**
* Records a read in the buffer and return its write count.
*
* @param bufferIndex the index to the chosen read buffer
* @param node the entry in the page replacement policy
* @return the number of writes on the chosen read buffer
*/
long recordRead(int bufferIndex, Node<K, V> node) {
// The location in the buffer is chosen in a racy fashion as the increment
// is not atomic with the insertion. This means that concurrent reads can
// overlap and overwrite one another, resulting in a lossy buffer.
final AtomicLong counter = readBufferWriteCount[bufferIndex];
final long writeCount = counter.get();
counter.lazySet(writeCount + 1);
final int index = (int) (writeCount & READ_BUFFER_INDEX_MASK);
readBuffers[bufferIndex][index].lazySet(node);
return writeCount;
}
/**
* Attempts to drain the buffers if it is determined to be needed when
* post-processing a read.
*
* @param bufferIndex the index to the chosen read buffer
* @param writeCount the number of writes on the chosen read buffer
*/
void drainOnReadIfNeeded(int bufferIndex, long writeCount) {
final long pending = (writeCount - readBufferDrainAtWriteCount[bufferIndex].get());
final boolean delayable = (pending < READ_BUFFER_THRESHOLD);
final DrainStatus status = drainStatus.get();
if (status.shouldDrainBuffers(delayable)) {
tryToDrainBuffers();
}
}
/**
* Performs the post-processing work required after write.
*
* @param task the pending operation to be applied
*/
void afterWrite(Runnable task) {
writeBuffer.add(task);
drainStatus.lazySet(REQUIRED);
tryToDrainBuffers();
notifyListener();
}
/**
* Attempts to acquire the eviction lock and apply the pending operations, up
* to the amortized threshold, to the page replacement policy.
*/
void tryToDrainBuffers() {
if (evictionLock.tryLock()) {
try {
drainStatus.lazySet(PROCESSING);
drainBuffers();
} finally {
drainStatus.compareAndSet(PROCESSING, IDLE);
evictionLock.unlock();
}
}
}
/** Drains the read and write buffers up to an amortized threshold. */
// @GuardedBy("evictionLock")
void drainBuffers() {
drainReadBuffers();
drainWriteBuffer();
}
/** Drains the read buffers, each up to an amortized threshold. */
// @GuardedBy("evictionLock")
void drainReadBuffers() {
final int start = (int) Thread.currentThread().getId();
final int end = start + NUMBER_OF_READ_BUFFERS;
for (int i = start; i < end; i++) {
drainReadBuffer(i & READ_BUFFERS_MASK);
}
}
// @GuardedBy("evictionLock")
private void drainReadBuffer(int bufferIndex) {
final long writeCount = readBufferWriteCount[bufferIndex].get();
for (int i = 0; i < READ_BUFFER_DRAIN_THRESHOLD; i++) {
final int index = (int) (readBufferReadCount[bufferIndex] & READ_BUFFER_INDEX_MASK);
final AtomicReference<Node<K, V>> slot = readBuffers[bufferIndex][index];
final Node<K, V> node = slot.get();
if (node == null) {
break;
}
slot.lazySet(null);
applyRead(node);
readBufferReadCount[bufferIndex]++;
}
readBufferDrainAtWriteCount[bufferIndex].lazySet(writeCount);
}
// @GuardedBy("evictionLock")
private void applyRead(Node<K, V> node) {
// An entry may be scheduled for reordering despite having been removed.
// This can occur when the entry was concurrently read while a writer was
// removing it. If the entry is no longer linked then it does not need to
// be processed.
if (evictionDeque.contains(node)) {
evictionDeque.moveToBack(node);
}
}
/** Drains the read buffer up to an amortized threshold. */
// @GuardedBy("evictionLock")
void drainWriteBuffer() {
for (int i = 0; i < WRITE_BUFFER_DRAIN_THRESHOLD; i++) {
final Runnable task = writeBuffer.poll();
if (task == null) {
break;
}
task.run();
}
}
/**
* Attempts to transition the node from the {@code alive} state to the
* {@code retired} state.
*
* @param node the entry in the page replacement policy
* @param expect the expected weighted value
* @return if successful
*/
boolean tryToRetire(Node<K, V> node, WeightedValue<V> expect) {
if (expect.isAlive()) {
final WeightedValue<V> retired = new WeightedValue<>(expect.value, -expect.weight);
return node.compareAndSet(expect, retired);
}
return false;
}
/**
* Atomically transitions the node from the {@code alive} state to the
* {@code retired} state, if a valid transition.
*
* @param node the entry in the page replacement policy
*/
void makeRetired(Node<K, V> node) {
for (;;) {
final WeightedValue<V> current = node.get();
if (!current.isAlive()) {
return;
}
final WeightedValue<V> retired = new WeightedValue<>(current.value, -current.weight);
if (node.compareAndSet(current, retired)) {
return;
}
}
}
/**
* Atomically transitions the node to the {@code dead} state and decrements
* the {@code weightedSize}.
*
* @param node the entry in the page replacement policy
*/
// @GuardedBy("evictionLock")
void makeDead(Node<K, V> node) {
for (;;) {
WeightedValue<V> current = node.get();
WeightedValue<V> dead = new WeightedValue<>(current.value, 0);
if (node.compareAndSet(current, dead)) {
weightedSize.lazySet(weightedSize.get() - Math.abs(current.weight));
return;
}
}
}
/** Notifies the listener of entries that were evicted. */
void notifyListener() {
Node<K, V> node;
while ((node = pendingNotifications.poll()) != null) {
listener.onEviction(node.key, node.getValue());
}
}
/* ---------------- Concurrent Map Support -------------- */
@Override
public boolean isEmpty() {
return data.isEmpty();
}
@Override
public int size() {
return data.size();
}
/**
* Returns the weighted size of this map.
*
* @return the combined weight of the values in this map
*/
public long weightedSize() {
return Math.max(0, weightedSize.get());
}
@Override
public void clear() {
evictionLock.lock();
try {
// Discard all entries
Node<K, V> node;
while ((node = evictionDeque.poll()) != null) {
data.remove(node.key, node);
makeDead(node);
}
// Discard all pending reads
for (AtomicReference<Node<K, V>>[] buffer : readBuffers) {
for (AtomicReference<Node<K, V>> slot : buffer) {
slot.lazySet(null);
}
}
// Apply all pending writes
Runnable task;
while ((task = writeBuffer.poll()) != null) {
task.run();
}
} finally {
evictionLock.unlock();
}
}
@Override
public boolean containsKey(Object key) {
return data.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
checkNotNull(value);
for (Node<K, V> node : data.values()) {
if (node.getValue().equals(value)) {
return true;
}
}
return false;
}
@Override
public V get(Object key) {
final Node<K, V> node = data.get(key);
if (node == null) {
return null;
}
afterRead(node);
return node.getValue();
}
/**
* Returns the value to which the specified key is mapped, or {@code null}
* if this map contains no mapping for the key. This method differs from
* {@link #get(Object)} in that it does not record the operation with the
* page replacement policy.
*
* @param key the key whose associated value is to be returned
* @return the value to which the specified key is mapped, or
* {@code null} if this map contains no mapping for the key
* @throws NullPointerException if the specified key is null
*/
public V getQuietly(Object key) {
final Node<K, V> node = data.get(key);
return (node == null) ? null : node.getValue();
}
@Override
public V put(K key, V value) {
return put(key, value, false);
}
@Override
public V putIfAbsent(K key, V value) {
return put(key, value, true);
}
/**
* Adds a node to the list and the data store. If an existing node is found,
* then its value is updated if allowed.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @param onlyIfAbsent write is performed only if the key is not already
* associated with a value
* @return the prior value in the data store or null if no mapping was found
*/
private V put(K key, V value, boolean onlyIfAbsent) {
checkNotNull(key);
checkNotNull(value);
final int weight = weigher.weightOf(key, value);
final WeightedValue<V> weightedValue = new WeightedValue<>(value, weight);
final Node<K, V> node = new Node<>(key, weightedValue);
for (;;) {
final Node<K, V> prior = data.putIfAbsent(node.key, node);
if (prior == null) {
afterWrite(new AddTask(node, weight));
return null;
}
if (onlyIfAbsent) {
afterRead(prior);
return prior.getValue();
}
for (;;) {
final WeightedValue<V> oldWeightedValue = prior.get();
if (!oldWeightedValue.isAlive()) {
break;
}
if (prior.compareAndSet(oldWeightedValue, weightedValue)) {
final int weightedDifference = weight - oldWeightedValue.weight;
if (weightedDifference == 0) {
afterRead(prior);
} else {
afterWrite(new UpdateTask(prior, weightedDifference));
}
return oldWeightedValue.value;
}
}
}
}
/**
* If the specified key is not already associated with a value,
* attempts to compute its value using the given mapping function
* and enters it into this map unless {@code null}. The entire
* method invocation is performed atomically, so the function is
* applied at most once per key. Some attempted update operations
* on this map by other threads may be blocked while computation
* is in progress, so the computation should be short and simple,
* and must not attempt to update any other mappings of this map.
*
* @param key key with which the specified value is to be associated
* @param mappingFunction the function to compute a value
* @return the current (existing or computed) value associated with
* the specified key, or null if the computed value is null
* @throws NullPointerException if the specified key or mappingFunction
* is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
* @throws RuntimeException or Error if the mappingFunction does so,
* in which case the mapping is left unestablished
*/
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
return compute(key, mappingFunction, true);
}
private V compute(final K key, final Function<? super K, ? extends V> mappingFunction, boolean onlyIfAbsent) {
checkNotNull(key);
checkNotNull(mappingFunction);
final ObjectHolder<Node<K, V>> objectHolder = new ObjectHolder<>();
for (;;) {
Function<K, Node<K, V>> f = k -> {
final V value = mappingFunction.apply(key);
checkNotNull(value);
final int weight = weigher.weightOf(key, value);
final WeightedValue<V> weightedValue = new WeightedValue<>(value, weight);
final Node<K, V> node = new Node<>(key, weightedValue);
objectHolder.setObject(node);
return node;
};
Node<K, V> prior = data.computeIfAbsent(key, f);
Node<K, V> node = objectHolder.getObject();
if (null == node) { // the entry is present
V value = prior.getValue();
final int weight = weigher.weightOf(key, value);
final WeightedValue<V> weightedValue = new WeightedValue<>(value, weight);
node = new Node<>(key, weightedValue);
} else {
// the return value of `computeIfAbsent` is different from the one of `putIfAbsent`.
// if the key is absent in map, the return value of `computeIfAbsent` is the newly computed value, but `putIfAbsent` return null.
// prior should keep the value with the same meaning of the return value of `putIfAbsent`, so reset it as null here.
prior = null;
}
final WeightedValue<V> weightedValue = node.weightedValue;
final int weight = weightedValue.weight;
if (prior == null) {
afterWrite(new AddTask(node, weight));
return weightedValue.value;
}
if (onlyIfAbsent) {
afterRead(prior);
return prior.getValue();
}
for (;;) {
final WeightedValue<V> oldWeightedValue = prior.get();
if (!oldWeightedValue.isAlive()) {
break;
}
if (prior.compareAndSet(oldWeightedValue, weightedValue)) {
final int weightedDifference = weight - oldWeightedValue.weight;
if (weightedDifference == 0) {
afterRead(prior);
} else {
afterWrite(new UpdateTask(prior, weightedDifference));
}
return oldWeightedValue.value;
}
}
}
}
@Override
public V remove(Object key) {
final Node<K, V> node = data.remove(key);
if (node == null) {
return null;
}
makeRetired(node);
afterWrite(new RemovalTask(node));
return node.getValue();
}
@Override
public boolean remove(Object key, Object value) {
final Node<K, V> node = data.get(key);
if ((node == null) || (value == null)) {
return false;
}
WeightedValue<V> weightedValue = node.get();
for (;;) {
if (weightedValue.contains(value)) {
if (tryToRetire(node, weightedValue)) {
if (data.remove(key, node)) {
afterWrite(new RemovalTask(node));
return true;
}
} else {
weightedValue = node.get();
if (weightedValue.isAlive()) {
// retry as an intermediate update may have replaced the value with
// an equal instance that has a different reference identity
continue;
}
}
}
return false;
}
}
@Override
public V replace(K key, V value) {
checkNotNull(key);
checkNotNull(value);
final Node<K, V> node = data.get(key);
if (node == null) {
return null;
}
final int weight = weigher.weightOf(key, value);
final WeightedValue<V> weightedValue = new WeightedValue<>(value, weight);
for (;;) {
final WeightedValue<V> oldWeightedValue = node.get();
if (!oldWeightedValue.isAlive()) {
return null;
}
if (node.compareAndSet(oldWeightedValue, weightedValue)) {
final int weightedDifference = weight - oldWeightedValue.weight;
if (weightedDifference == 0) {
afterRead(node);
} else {
afterWrite(new UpdateTask(node, weightedDifference));
}
return oldWeightedValue.value;
}
}
}
@Override
public boolean replace(K key, V oldValue, V newValue) {
checkNotNull(key);
checkNotNull(oldValue);
checkNotNull(newValue);
final Node<K, V> node = data.get(key);
if (node == null) {
return false;
}
final int weight = weigher.weightOf(key, newValue);
final WeightedValue<V> newWeightedValue = new WeightedValue<>(newValue, weight);
for (;;) {
final WeightedValue<V> weightedValue = node.get();
if (!weightedValue.isAlive() || !weightedValue.contains(oldValue)) {
return false;
}
if (node.compareAndSet(weightedValue, newWeightedValue)) {
final int weightedDifference = weight - weightedValue.weight;
if (weightedDifference == 0) {
afterRead(node);
} else {
afterWrite(new UpdateTask(node, weightedDifference));
}
return true;
}
}
}
@Override
public Set<K> keySet() {
final Set<K> ks = keySet;
if (ks == null) {
keySet = new KeySet();
return keySet;
}
return ks;
}
/**
* Returns an unmodifiable snapshot {@link Set} view of the keys contained in
* this map. The set's iterator returns the keys whose order of iteration is
* the ascending order in which its entries are considered eligible for
* retention, from the least-likely to be retained to the most-likely.
* <p>
* Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
* a constant-time operation. Because of the asynchronous nature of the page
* replacement policy, determining the retention ordering requires a traversal
* of the keys.
*
* @return an ascending snapshot view of the keys in this map
*/
public Set<K> ascendingKeySet() {
return ascendingKeySetWithLimit(Integer.MAX_VALUE);
}
/**
* Returns an unmodifiable snapshot {@link Set} view of the keys contained in
* this map. The set's iterator returns the keys whose order of iteration is
* the ascending order in which its entries are considered eligible for
* retention, from the least-likely to be retained to the most-likely.
* <p>
* Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
* a constant-time operation. Because of the asynchronous nature of the page
* replacement policy, determining the retention ordering requires a traversal
* of the keys.
*
* @param limit the maximum size of the returned set
* @return an ascending snapshot view of the keys in this map
* @throws IllegalArgumentException if the limit is negative
*/
public Set<K> ascendingKeySetWithLimit(int limit) {
return orderedKeySet(true, limit);
}
/**
* Returns an unmodifiable snapshot {@link Set} view of the keys contained in
* this map. The set's iterator returns the keys whose order of iteration is
* the descending order in which its entries are considered eligible for
* retention, from the most-likely to be retained to the least-likely.
* <p>
* Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
* a constant-time operation. Because of the asynchronous nature of the page
* replacement policy, determining the retention ordering requires a traversal
* of the keys.
*
* @return a descending snapshot view of the keys in this map
*/
public Set<K> descendingKeySet() {
return descendingKeySetWithLimit(Integer.MAX_VALUE);
}
/**
* Returns an unmodifiable snapshot {@link Set} view of the keys contained in
* this map. The set's iterator returns the keys whose order of iteration is
* the descending order in which its entries are considered eligible for
* retention, from the most-likely to be retained to the least-likely.
* <p>
* Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
* a constant-time operation. Because of the asynchronous nature of the page
* replacement policy, determining the retention ordering requires a traversal
* of the keys.
*
* @param limit the maximum size of the returned set
* @return a descending snapshot view of the keys in this map
* @throws IllegalArgumentException if the limit is negative
*/
public Set<K> descendingKeySetWithLimit(int limit) {
return orderedKeySet(false, limit);
}
/* ---------------- Serialization Support -------------- */
/**
* Serialization support.
*
* @return The write replacement
*/
Object writeReplace() {
return new SerializationProxy<K, V>(this);
}
private void readObject(ObjectInputStream stream) throws InvalidObjectException {
throw new InvalidObjectException("Proxy required");
}
private Set<K> orderedKeySet(boolean ascending, int limit) {
checkArgument(limit >= 0);
evictionLock.lock();
try {
drainBuffers();
final int initialCapacity = (weigher == Weighers.entrySingleton())
? Math.min(limit, (int) weightedSize())
: 16;
final Set<K> keys = new LinkedHashSet<>(initialCapacity);
final Iterator<Node<K, V>> iterator = ascending
? evictionDeque.iterator()
: evictionDeque.descendingIterator();
while (iterator.hasNext() && (limit > keys.size())) {
keys.add(iterator.next().key);
}
return unmodifiableSet(keys);
} finally {
evictionLock.unlock();
}
}
@Override
public Collection<V> values() {
final Collection<V> vs = values;
if (vs == null) {
values = new Values();
return values;
}
return vs;
}
@Override
public Set<Entry<K, V>> entrySet() {
final Set<Entry<K, V>> es = entrySet;
if (es == null) {
entrySet = new EntrySet();
return entrySet;
}
return es;
}
/**
* Returns an unmodifiable snapshot {@link Map} view of the mappings contained
* in this map. The map's collections return the mappings whose order of
* iteration is the ascending order in which its entries are considered
* eligible for retention, from the least-likely to be retained to the
* most-likely.
* <p>
* Beware that obtaining the mappings is <em>NOT</em> a constant-time
* operation. Because of the asynchronous nature of the page replacement
* policy, determining the retention ordering requires a traversal of the
* entries.
*
* @return an ascending snapshot view of this map
*/
public Map<K, V> ascendingMap() {
return ascendingMapWithLimit(Integer.MAX_VALUE);
}
/**
* Returns an unmodifiable snapshot {@link Map} view of the mappings contained
* in this map. The map's collections return the mappings whose order of
* iteration is the ascending order in which its entries are considered
* eligible for retention, from the least-likely to be retained to the
* most-likely.
* <p>
* Beware that obtaining the mappings is <em>NOT</em> a constant-time
* operation. Because of the asynchronous nature of the page replacement
* policy, determining the retention ordering requires a traversal of the
* entries.
*
* @param limit the maximum size of the returned map
* @return an ascending snapshot view of this map
* @throws IllegalArgumentException if the limit is negative
*/
public Map<K, V> ascendingMapWithLimit(int limit) {
return orderedMap(true, limit);
}
/**
* Returns an unmodifiable snapshot {@link Map} view of the mappings contained
* in this map. The map's collections return the mappings whose order of
* iteration is the descending order in which its entries are considered
* eligible for retention, from the most-likely to be retained to the
* least-likely.
* <p>
* Beware that obtaining the mappings is <em>NOT</em> a constant-time
* operation. Because of the asynchronous nature of the page replacement
* policy, determining the retention ordering requires a traversal of the
* entries.
*
* @return a descending snapshot view of this map
*/
public Map<K, V> descendingMap() {
return descendingMapWithLimit(Integer.MAX_VALUE);
}
/**
* Returns an unmodifiable snapshot {@link Map} view of the mappings contained
* in this map. The map's collections return the mappings whose order of
* iteration is the descending order in which its entries are considered
* eligible for retention, from the most-likely to be retained to the
* least-likely.
* <p>
* Beware that obtaining the mappings is <em>NOT</em> a constant-time
* operation. Because of the asynchronous nature of the page replacement
* policy, determining the retention ordering requires a traversal of the
* entries.
*
* @param limit the maximum size of the returned map
* @return a descending snapshot view of this map
* @throws IllegalArgumentException if the limit is negative
*/
public Map<K, V> descendingMapWithLimit(int limit) {
return orderedMap(false, limit);
}
private Map<K, V> orderedMap(boolean ascending, int limit) {
checkArgument(limit >= 0);
evictionLock.lock();
try {
drainBuffers();
final int initialCapacity = (weigher == Weighers.entrySingleton())
? Math.min(limit, (int) weightedSize())
: 16;
final Map<K, V> map = new LinkedHashMap<>(initialCapacity);
final Iterator<Node<K, V>> iterator = ascending
? evictionDeque.iterator()
: evictionDeque.descendingIterator();
while (iterator.hasNext() && (limit > map.size())) {
Node<K, V> node = iterator.next();
map.put(node.key, node.getValue());
}
return unmodifiableMap(map);
} finally {
evictionLock.unlock();
}
}
/** The draining status of the buffers. */
| performs |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/bean/override/BeanOverrideHandlerTests.java | {
"start": 10929,
"end": 11274
} | class ____ {
@DummyBean(beanName = "messageBean", contextName = "parent")
String parentMessageBean;
@DummyBean(beanName = "messageBean", contextName = "parent")
String parentMessageBean2;
@DummyBean(beanName = "messageBean", contextName = "child")
String childMessageBean;
}
static | MultipleAnnotationsWithSameNameInDifferentContext |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/JobVertexBackPressureHeaders.java | {
"start": 1155,
"end": 2630
} | class ____
implements RuntimeMessageHeaders<
EmptyRequestBody, JobVertexBackPressureInfo, JobVertexMessageParameters> {
private static final JobVertexBackPressureHeaders INSTANCE = new JobVertexBackPressureHeaders();
private static final String URL =
"/jobs/:"
+ JobIDPathParameter.KEY
+ "/vertices/:"
+ JobVertexIdPathParameter.KEY
+ "/backpressure";
@Override
public Class<EmptyRequestBody> getRequestClass() {
return EmptyRequestBody.class;
}
@Override
public Class<JobVertexBackPressureInfo> getResponseClass() {
return JobVertexBackPressureInfo.class;
}
@Override
public HttpResponseStatus getResponseStatusCode() {
return HttpResponseStatus.OK;
}
@Override
public JobVertexMessageParameters getUnresolvedMessageParameters() {
return new JobVertexMessageParameters();
}
@Override
public HttpMethodWrapper getHttpMethod() {
return HttpMethodWrapper.GET;
}
@Override
public String getTargetRestEndpointURL() {
return URL;
}
public static JobVertexBackPressureHeaders getInstance() {
return INSTANCE;
}
@Override
public String getDescription() {
return "Returns back-pressure information for a job, and may initiate back-pressure sampling if necessary.";
}
}
| JobVertexBackPressureHeaders |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java | {
"start": 2033,
"end": 2322
} | class ____ implements AutoCloseable {
public static final Logger LOG =
LoggerFactory.getLogger(YarnConfigurationStore.class);
/**
* LogMutation encapsulates the fields needed for configuration mutation
* audit logging and recovery.
*/
public static | YarnConfigurationStore |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/connector/sink2/CommittableMessageSerializer.java | {
"start": 1464,
"end": 4533
} | class ____<CommT>
implements SimpleVersionedSerializer<CommittableMessage<CommT>> {
@VisibleForTesting static final int VERSION = 1;
private static final int COMMITTABLE = 1;
private static final int SUMMARY = 2;
private final SimpleVersionedSerializer<CommT> committableSerializer;
public CommittableMessageSerializer(SimpleVersionedSerializer<CommT> committableSerializer) {
this.committableSerializer = checkNotNull(committableSerializer);
}
@Override
public int getVersion() {
return VERSION;
}
@Override
public byte[] serialize(CommittableMessage<CommT> obj) throws IOException {
DataOutputSerializer out = new DataOutputSerializer(256);
if (obj instanceof CommittableWithLineage) {
out.writeByte(COMMITTABLE);
SimpleVersionedSerialization.writeVersionAndSerialize(
committableSerializer,
((CommittableWithLineage<CommT>) obj).getCommittable(),
out);
out.writeLong(obj.getCheckpointIdOrEOI());
out.writeInt(obj.getSubtaskId());
} else if (obj instanceof CommittableSummary) {
out.writeByte(SUMMARY);
out.writeInt(obj.getSubtaskId());
CommittableSummary<?> committableSummary = (CommittableSummary<?>) obj;
out.writeInt(committableSummary.getNumberOfSubtasks());
out.writeLong(obj.getCheckpointIdOrEOI());
out.writeInt(committableSummary.getNumberOfCommittables());
out.writeInt(committableSummary.getNumberOfPendingCommittables());
out.writeInt(committableSummary.getNumberOfFailedCommittables());
} else {
throw new IllegalArgumentException("Unknown message: " + obj.getClass());
}
return out.getCopyOfBuffer();
}
@Override
public CommittableMessage<CommT> deserialize(int version, byte[] serialized)
throws IOException {
DataInputDeserializer in = new DataInputDeserializer(serialized);
byte messageType = in.readByte();
switch (messageType) {
case COMMITTABLE:
return new CommittableWithLineage<>(
SimpleVersionedSerialization.readVersionAndDeSerialize(
committableSerializer, in),
in.readLong(),
in.readInt());
case SUMMARY:
return new CommittableSummary<>(
in.readInt(),
in.readInt(),
in.readLong(),
in.readInt(),
in.readInt(),
in.readInt());
default:
throw new IllegalStateException(
"Unexpected message type "
+ messageType
+ " in "
+ StringUtils.byteToHexString(serialized));
}
}
}
| CommittableMessageSerializer |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/deny/DenyTest14.java | {
"start": 212,
"end": 550
} | class ____ extends TestCase {
public void test_deny() throws Exception {
String text = "{\"value\":{\"@type\":\"com.alibaba.json.bvt.parser.deser.deny.DenyTest14$MyException\"}}";
Model model = JSON.parseObject(text, Model.class);
assertTrue(model.value instanceof MyException);
}
public static | DenyTest14 |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsBindingTest.java | {
"start": 1757,
"end": 4338
} | class ____ {
private final Instant instant = Instant.ofEpochMilli(1519672338000L);
@Mock
private JmsConfiguration mockJmsConfiguration;
@Mock
private JmsEndpoint mockJmsEndpoint;
private JmsBinding jmsBindingUnderTest;
@BeforeEach
public void setup() {
lenient().when(mockJmsConfiguration.isFormatDateHeadersToIso8601()).thenReturn(false);
lenient().when(mockJmsConfiguration.isMapJmsMessage()).thenReturn(true);
lenient().when(mockJmsEndpoint.getConfiguration()).thenReturn(mockJmsConfiguration);
jmsBindingUnderTest = new JmsBinding(mockJmsEndpoint);
}
@Test
public void noEndpointTest() throws Exception {
JmsBinding testBindingWithoutEndpoint = new JmsBinding();
ActiveMQTextMessage message = mock(ActiveMQTextMessage.class);
message.setText("test");
DefaultCamelContext camelContext = new DefaultCamelContext();
Exchange exchange = camelContext.getEndpoint("jms:queue:foo").createExchange();
exchange.getIn().setBody("test");
exchange.getIn().setHeader("JMSCorrelationID", null);
assertDoesNotThrow(() -> testBindingWithoutEndpoint.appendJmsProperties(message, exchange));
}
@Test
public void testExtractNullBodyFromJmsShouldReturnNull() throws JMSException {
ActiveMQTextMessage message = mock(ActiveMQTextMessage.class);
assertNull(jmsBindingUnderTest.extractBodyFromJms(null, message));
}
@Test
public void testGetValidJmsHeaderValueWithBigIntegerShouldSucceed() {
Object value = jmsBindingUnderTest.getValidJMSHeaderValue("foo", new BigInteger("12345"));
assertEquals("12345", value);
}
@Test
public void testGetValidJmsHeaderValueWithBigDecimalShouldSucceed() {
Object value = jmsBindingUnderTest.getValidJMSHeaderValue("foo", new BigDecimal("123.45"));
assertEquals("123.45", value);
}
@Test
public void testGetValidJmsHeaderValueWithDateShouldSucceed() {
Object value = jmsBindingUnderTest.getValidJMSHeaderValue("foo", Date.from(instant));
assertNotNull(value);
// We can't assert further as the returned value is bound to the machine time zone and locale
}
@Test
public void testGetValidJmsHeaderValueWithIso8601DateShouldSucceed() {
when(mockJmsConfiguration.isFormatDateHeadersToIso8601()).thenReturn(true);
Object value = jmsBindingUnderTest.getValidJMSHeaderValue("foo", Date.from(instant));
assertEquals("2018-02-26T19:12:18Z", value);
}
}
| JmsBindingTest |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/buildextension/annotations/AnnotationsTransformerInterceptorBindingTest.java | {
"start": 1563,
"end": 1694
} | class ____ {
// => add @Simple here
public int size() {
return 0;
}
}
}
| IWantToBeIntercepted |
java | spring-projects__spring-boot | module/spring-boot-http-client/src/main/java/org/springframework/boot/http/client/autoconfigure/HttpClientSettingsPropertyMapper.java | {
"start": 1161,
"end": 2450
} | class ____ {
private final @Nullable SslBundles sslBundles;
private final HttpClientSettings settings;
public HttpClientSettingsPropertyMapper(@Nullable SslBundles sslBundles, @Nullable HttpClientSettings settings) {
this.sslBundles = sslBundles;
this.settings = (settings != null) ? settings : HttpClientSettings.defaults();
}
public HttpClientSettings map(@Nullable HttpClientSettingsProperties properties) {
HttpClientSettings settings = HttpClientSettings.defaults();
if (properties != null) {
PropertyMapper map = PropertyMapper.get();
settings = map.from(properties::getRedirects).to(settings, HttpClientSettings::withRedirects);
settings = map.from(properties::getConnectTimeout).to(settings, HttpClientSettings::withConnectTimeout);
settings = map.from(properties::getReadTimeout).to(settings, HttpClientSettings::withReadTimeout);
settings = map.from(properties::getSsl)
.as(HttpClientSettingsProperties.Ssl::getBundle)
.as(this::getSslBundle)
.to(settings, HttpClientSettings::withSslBundle);
}
return settings.orElse(this.settings);
}
private SslBundle getSslBundle(String name) {
Assert.state(this.sslBundles != null, "No 'sslBundles' available");
return this.sslBundles.getBundle(name);
}
}
| HttpClientSettingsPropertyMapper |
java | hibernate__hibernate-orm | hibernate-scan-jandex/src/main/java/org/hibernate/archive/scan/internal/ScanResultCollector.java | {
"start": 679,
"end": 4158
} | class ____ {
private final ScanEnvironment environment;
private final ScanOptions options;
private final Set<ClassDescriptor> discoveredClasses;
private final Set<PackageDescriptor> discoveredPackages;
private final Set<MappingFileDescriptor> discoveredMappingFiles;
public ScanResultCollector(ScanEnvironment environment, ScanOptions options, ScanParameters parameters) {
this.environment = environment;
this.options = options;
if ( environment.getExplicitlyListedClassNames() == null ) {
throw new IllegalArgumentException( "ScanEnvironment#getExplicitlyListedClassNames should not return null" );
}
if ( environment.getExplicitlyListedMappingFiles() == null ) {
throw new IllegalArgumentException( "ScanEnvironment#getExplicitlyListedMappingFiles should not return null" );
}
this.discoveredPackages = new HashSet<>();
this.discoveredClasses = new HashSet<>();
this.discoveredMappingFiles = new HashSet<>();
}
public void handleClass(ClassDescriptor classDescriptor, boolean rootUrl) {
if ( !isListedOrDetectable( classDescriptor.getName(), rootUrl ) ) {
return;
}
discoveredClasses.add( classDescriptor );
}
protected boolean isListedOrDetectable(String name, boolean rootUrl) {
// IMPL NOTE: protect the calls to getExplicitlyListedClassNames unless needed,
// since it can take time with lots of listed classes.
if ( rootUrl ) {
// The entry comes from the root url. Allow it if either:
// 1) we are allowed to discover classes/packages in the root url
// 2) the entry was explicitly listed
return options.canDetectUnlistedClassesInRoot()
|| environment.getExplicitlyListedClassNames().contains( name );
}
else {
// The entry comes from a non-root url. Allow it if either:
// 1) we are allowed to discover classes/packages in non-root urls
// 2) the entry was explicitly listed
return options.canDetectUnlistedClassesInNonRoot()
|| environment.getExplicitlyListedClassNames().contains( name );
}
}
public void handlePackage(PackageDescriptor packageDescriptor, boolean rootUrl) {
if ( !isListedOrDetectable( packageDescriptor.getName(), rootUrl ) ) {
// not strictly needed, but helps cut down on the size of discoveredPackages
return;
}
discoveredPackages.add( packageDescriptor );
}
public void handleMappingFile(MappingFileDescriptor mappingFileDescriptor, boolean rootUrl) {
if ( acceptAsMappingFile( mappingFileDescriptor, rootUrl ) ) {
discoveredMappingFiles.add( mappingFileDescriptor );
}
}
private boolean acceptAsMappingFile(MappingFileDescriptor mappingFileDescriptor, boolean rootUrl) {
if ( mappingFileDescriptor.getName().endsWith( "hbm.xml" ) ) {
return options.canDetectHibernateMappingFiles();
}
if ( mappingFileDescriptor.getName().endsWith( "META-INF/orm.xml" ) ) {
if ( environment.getExplicitlyListedMappingFiles().contains( "META-INF/orm.xml" ) ) {
// if the user explicitly listed META-INF/orm.xml, only except the root one
//
// not sure why exactly, but this is what the old code does
return rootUrl;
}
return true;
}
return environment.getExplicitlyListedMappingFiles().contains( mappingFileDescriptor.getName() );
}
public ScanResult toScanResult() {
return new ScanResultImpl(
Collections.unmodifiableSet( discoveredPackages ),
Collections.unmodifiableSet( discoveredClasses ),
Collections.unmodifiableSet( discoveredMappingFiles )
);
}
}
| ScanResultCollector |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/SpliteratorTester.java | {
"start": 3549,
"end": 4299
} | class ____<E extends @Nullable Object>
extends GeneralSpliterator<E> {
GeneralSpliteratorOfObject(Spliterator<E> spliterator) {
super(spliterator);
}
@Override
void forEachRemaining(Consumer<? super E> action) {
spliterator.forEachRemaining(action);
}
@Override
boolean tryAdvance(Consumer<? super E> action) {
return spliterator.tryAdvance(action);
}
@Override
@Nullable GeneralSpliterator<E> trySplit() {
Spliterator<E> split = spliterator.trySplit();
return split == null ? null : new GeneralSpliteratorOfObject<>(split);
}
}
@IgnoreJRERequirement // *should* be redundant with the annotation on SpliteratorTester
private static final | GeneralSpliteratorOfObject |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java | {
"start": 1490,
"end": 3008
} | class ____ {
@Test
void testApplicationReport() {
long timestamp = System.currentTimeMillis();
ApplicationReport appReport1 =
createApplicationReport(1, 1, timestamp);
ApplicationReport appReport2 =
createApplicationReport(1, 1, timestamp);
ApplicationReport appReport3 =
createApplicationReport(1, 1, timestamp);
assertEquals(appReport1, appReport2);
assertEquals(appReport2, appReport3);
appReport1.setApplicationId(null);
assertNull(appReport1.getApplicationId());
assertNotSame(appReport1, appReport2);
appReport2.setCurrentApplicationAttemptId(null);
assertNull(appReport2.getCurrentApplicationAttemptId());
assertNotSame(appReport2, appReport3);
assertNull(appReport1.getAMRMToken());
}
protected static ApplicationReport createApplicationReport(
int appIdInt, int appAttemptIdInt, long timestamp) {
ApplicationId appId = ApplicationId.newInstance(timestamp, appIdInt);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, appAttemptIdInt);
ApplicationReport appReport =
ApplicationReport.newInstance(appId, appAttemptId, "user", "queue",
"appname", "host", 124, null, YarnApplicationState.FINISHED,
"diagnostics", "url", 0, 0, 0, FinalApplicationStatus.SUCCEEDED, null,
"N/A", 0.53789f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null,
null, false, Priority.newInstance(0),"","");
return appReport;
}
}
| TestApplicatonReport |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/impl/LogEventFactory.java | {
"start": 1119,
"end": 1817
} | interface ____ extends LocationAwareLogEventFactory {
LogEvent createEvent(
String loggerName,
Marker marker,
String fqcn,
Level level,
Message data,
List<Property> properties,
Throwable t);
@Override
default LogEvent createEvent(
String loggerName,
Marker marker,
String fqcn,
@SuppressWarnings("unused") StackTraceElement location,
Level level,
Message data,
List<Property> properties,
Throwable t) {
return createEvent(loggerName, marker, fqcn, level, data, properties, t);
}
}
| LogEventFactory |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/configuration/ConfigurationClassProcessingTests.java | {
"start": 16906,
"end": 17207
} | class ____ implements Provider<TestBean> {
static TestBean testBean = new TestBean(ConfigWithBeanWithProviderImplementation.class.getSimpleName());
@Override
@Bean("customName")
public TestBean get() {
return testBean;
}
}
@Configuration
static | ConfigWithBeanWithProviderImplementation |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/JmsTransferExceptionIT.java | {
"start": 4662,
"end": 5374
} | class ____ extends CamelLogger {
private Throwable exception;
private String message;
private LoggingLevel loggingLevel;
@Override
public void log(String message, Throwable exception, LoggingLevel loggingLevel) {
super.log(message, exception, loggingLevel);
this.message = message;
this.exception = exception;
this.loggingLevel = loggingLevel;
}
public Throwable getException() {
return exception;
}
public String getMessage() {
return message;
}
public LoggingLevel getLoggingLevel() {
return loggingLevel;
}
}
}
| MyErrorLogger |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/MiniBatchAssignerRestoreTest.java | {
"start": 1132,
"end": 1548
} | class ____ extends RestoreTestBase {
public MiniBatchAssignerRestoreTest() {
super(StreamExecMiniBatchAssigner.class);
}
@Override
public List<TableTestProgram> programs() {
return Arrays.asList(
MiniBatchAssignerTestPrograms.MINI_BATCH_ASSIGNER_ROW_TIME,
MiniBatchAssignerTestPrograms.MINI_BATCH_ASSIGNER_PROC_TIME);
}
}
| MiniBatchAssignerRestoreTest |
java | elastic__elasticsearch | x-pack/plugin/gpu/src/internalClusterTest/java/org/elasticsearch/xpack/gpu/TestCuVSServiceProvider.java | {
"start": 601,
"end": 1118
} | class ____ extends CuVSServiceProvider {
static Function<CuVSProvider, GPUInfoProvider> mockedGPUInfoProvider;
@Override
public CuVSProvider get(CuVSProvider builtin) {
if (mockedGPUInfoProvider == null) {
return builtin;
}
return new CuVSProviderDelegate(builtin) {
@Override
public GPUInfoProvider gpuInfoProvider() {
return mockedGPUInfoProvider.apply(builtin);
}
};
}
static | TestCuVSServiceProvider |
java | quarkusio__quarkus | extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/proxy/TrustedForwarderProxyTest.java | {
"start": 413,
"end": 4085
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(ForwardedHandlerInitializer.class)
.addAsResource(new StringAsset("quarkus.http.proxy.proxy-address-forwarding=true\n" +
"quarkus.http.proxy.allow-forwarded=true\n" +
"quarkus.http.proxy.enable-forwarded-host=true\n" +
"quarkus.http.proxy.enable-forwarded-prefix=true\n" +
"quarkus.http.proxy.trusted-proxies=localhost"),
"application.properties"));
@Test
public void testHeadersAreUsed() {
RestAssured.given()
.header("Forwarded", "proto=http;for=backend2:5555;host=somehost2")
.get("/path")
.then()
.body(Matchers.equalTo("http|somehost2|backend2:5555|/path|http://somehost2/path"));
}
@Test
public void testHeadersAreUsedWithTrustedProxyHeader() {
RestAssured.given()
.header("Forwarded", "proto=http;for=backend2:5555;host=somehost2")
.get("/path-trusted-proxy")
.then()
.body(Matchers
.equalTo("http|somehost2|backend2:5555|/path-trusted-proxy|http://somehost2/path-trusted-proxy|null"));
}
@Test
public void testWithoutTrustedProxyHeader() {
assertThat(RestAssured.get("/forward").asString()).startsWith("http|");
RestAssured.given()
.header("Forwarded", "by=proxy;for=backend:4444;host=somehost;proto=https")
.get("/trusted-proxy")
.then()
.body(Matchers.equalTo("https|somehost|backend:4444|null"));
}
@Test
public void testThatTrustedProxyHeaderCannotBeForged() {
assertThat(RestAssured.get("/forward").asString()).startsWith("http|");
RestAssured.given()
.header("Forwarded", "by=proxy;for=backend:4444;host=somehost;proto=https")
.header("X-Forwarded-Trusted-Proxy", "true")
.get("/trusted-proxy")
.then()
.body(Matchers.equalTo("https|somehost|backend:4444|null"));
RestAssured.given()
.header("Forwarded", "by=proxy;for=backend:4444;host=somehost;proto=https")
.header("X-Forwarded-Trusted-Proxy", "hello")
.get("/trusted-proxy")
.then()
.body(Matchers.equalTo("https|somehost|backend:4444|null"));
RestAssured.given()
.header("Forwarded", "by=proxy;for=backend:4444;host=somehost;proto=https")
.header("X-Forwarded-Trusted-Proxy", "false")
.get("/trusted-proxy")
.then()
.body(Matchers.equalTo("https|somehost|backend:4444|null"));
}
/**
* As described on <a href=
* "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Forwarded">https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Forwarded</a>,
* the syntax should be case-insensitive.
* <p>
* Kong, for example, uses `Proto` instead of `proto` and `For` instead of `for`.
*/
@Test
public void testHeadersAreUsedWhenUsingCasedCharacters() {
RestAssured.given()
.header("Forwarded", "Proto=http;For=backend2:5555;Host=somehost2")
.get("/path")
.then()
.body(Matchers.equalTo("http|somehost2|backend2:5555|/path|http://somehost2/path"));
}
}
| TrustedForwarderProxyTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/struct/TestUnwrappedWithPrefix.java | {
"start": 1008,
"end": 1263
} | class ____ {
public int x;
public int y;
public Location() { }
protected Location(int x, int y) {
this.x = x;
this.y = y;
}
}
// Class with unwrapping using prefixes
static | Location |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskSpeculationPredicate.java | {
"start": 1128,
"end": 1233
} | class ____ {
boolean canSpeculate(AppContext context, TaskId taskID) {
// This | TaskSpeculationPredicate |
java | apache__camel | components/camel-jacksonxml/src/main/java/org/apache/camel/component/jacksonxml/ListJacksonXMLDataFormat.java | {
"start": 1015,
"end": 1939
} | class ____ extends JacksonXMLDataFormat {
public ListJacksonXMLDataFormat() {
useList();
}
public ListJacksonXMLDataFormat(Class<?> unmarshalType) {
super(unmarshalType);
useList();
}
public ListJacksonXMLDataFormat(Class<?> unmarshalType, Class<?> jsonView) {
super(unmarshalType, jsonView);
useList();
}
public ListJacksonXMLDataFormat(Class<?> unmarshalType, Class<?> jsonView, boolean enableJaxbAnnotationModule) {
super(unmarshalType, jsonView, enableJaxbAnnotationModule);
useList();
}
public ListJacksonXMLDataFormat(XmlMapper mapper, Class<?> unmarshalType) {
super(mapper, unmarshalType);
useList();
}
public ListJacksonXMLDataFormat(XmlMapper mapper, Class<?> unmarshalType, Class<?> jsonView) {
super(mapper, unmarshalType, jsonView);
useList();
}
}
| ListJacksonXMLDataFormat |
java | apache__maven | compat/maven-model-builder/src/main/java/org/apache/maven/model/profile/activation/FileProfileActivator.java | {
"start": 2055,
"end": 4461
} | class ____ implements ProfileActivator {
@Inject
private ProfileActivationFilePathInterpolator profileActivationFilePathInterpolator;
public FileProfileActivator setProfileActivationFilePathInterpolator(
ProfileActivationFilePathInterpolator profileActivationFilePathInterpolator) {
this.profileActivationFilePathInterpolator = profileActivationFilePathInterpolator;
return this;
}
@Override
public boolean isActive(Profile profile, ProfileActivationContext context, ModelProblemCollector problems) {
Activation activation = profile.getActivation();
if (activation == null) {
return false;
}
ActivationFile file = activation.getFile();
if (file == null) {
return false;
}
String path;
boolean missing;
if (StringUtils.isNotEmpty(file.getExists())) {
path = file.getExists();
missing = false;
} else if (StringUtils.isNotEmpty(file.getMissing())) {
path = file.getMissing();
missing = true;
} else {
return false;
}
try {
path = profileActivationFilePathInterpolator.interpolate(path, context);
} catch (InterpolationException e) {
problems.add(new ModelProblemCollectorRequest(Severity.ERROR, Version.BASE)
.setMessage("Failed to interpolate file location " + path + " for profile " + profile.getId() + ": "
+ e.getMessage())
.setLocation(file.getLocation(missing ? "missing" : "exists"))
.setException(e));
return false;
}
if (path == null) {
return false;
}
File f = new File(path);
if (!f.isAbsolute()) {
return false;
}
boolean fileExists = f.exists();
return missing ? !fileExists : fileExists;
}
@Override
public boolean presentInConfig(Profile profile, ProfileActivationContext context, ModelProblemCollector problems) {
Activation activation = profile.getActivation();
if (activation == null) {
return false;
}
ActivationFile file = activation.getFile();
if (file == null) {
return false;
}
return true;
}
}
| FileProfileActivator |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/Byte2DArraysBaseTest.java | {
"start": 888,
"end": 1106
} | class ____ testing <code>{@link Byte2DArrays}</code>.
* <p>
* Is in <code>org.assertj.core.internal</code> package to be able to set {@link Byte2DArrays#failures} appropriately.
*
* @author Maciej Wajcht
*/
public | for |
java | google__guice | core/src/com/google/inject/internal/AbstractBindingProcessor.java | {
"start": 1258,
"end": 5013
} | class ____ extends AbstractProcessor {
// It's unfortunate that we have to maintain a list of specific
// classes, but we can't easily block the whole package because of
// all our unit tests.
private static final ImmutableSet<Class<?>> FORBIDDEN_TYPES =
ImmutableSet.<Class<?>>of(
AbstractModule.class,
Binder.class,
Binding.class,
Injector.class,
Key.class,
MembersInjector.class,
Module.class,
Provider.class,
Scope.class,
Stage.class,
TypeLiteral.class);
protected final ProcessedBindingData processedBindingData;
AbstractBindingProcessor(Errors errors, ProcessedBindingData processedBindingData) {
super(errors);
this.processedBindingData = processedBindingData;
}
protected <T> UntargettedBindingImpl<T> invalidBinding(
InjectorImpl injector, Key<T> key, Object source) {
return new UntargettedBindingImpl<T>(injector, key, source);
}
protected void putBinding(BindingImpl<?> binding) {
Key<?> key = binding.getKey();
Class<?> rawType = key.getTypeLiteral().getRawType();
if (FORBIDDEN_TYPES.contains(rawType)) {
errors.cannotBindToGuiceType(rawType.getSimpleName());
return;
}
BindingImpl<?> original = injector.getExistingBinding(key);
if (original != null) {
// If it failed because of an explicit duplicate binding...
if (injector.getBindingData().getExplicitBinding(key) != null) {
try {
if (!isOkayDuplicate(original, binding, injector.getBindingData())) {
errors.bindingAlreadySet(binding, original);
return;
}
} catch (Throwable t) {
errors.errorCheckingDuplicateBinding(key, original.getSource(), t);
return;
}
} else {
// Otherwise, it failed because of a duplicate JIT binding
// in the parent
errors.jitBindingAlreadySet(key);
return;
}
}
// prevent the parent from creating a JIT binding for this key
injector
.getJitBindingData()
.banKeyInParent(key, injector.getBindingData(), binding.getSource());
injector.getBindingData().putBinding(key, binding);
}
/**
* We tolerate duplicate bindings if one exposes the other or if the two bindings are considered
* duplicates (see {@link Bindings#areDuplicates(BindingImpl, BindingImpl)}.
*
* @param original the binding in the parent injector (candidate for an exposing binding)
* @param binding the binding to check (candidate for the exposed binding)
*/
private static boolean isOkayDuplicate(
BindingImpl<?> original, BindingImpl<?> binding, InjectorBindingData bindingData) {
if (original instanceof ExposedBindingImpl) {
ExposedBindingImpl<?> exposed = (ExposedBindingImpl<?>) original;
InjectorImpl exposedFrom = (InjectorImpl) exposed.getPrivateElements().getInjector();
return (exposedFrom == binding.getInjector());
} else {
original = (BindingImpl<?>) bindingData.getExplicitBindingsThisLevel().get(binding.getKey());
// If no original at this level, the original was on a parent, and we don't
// allow deduplication between parents & children.
if (original == null) {
return false;
} else {
return original.equals(binding);
}
}
}
private <T> void validateKey(Object source, Key<T> key) {
Annotations.checkForMisplacedScopeAnnotations(
key.getTypeLiteral().getRawType(), source, errors);
}
/**
* Processor for visiting bindings. Each overridden method that wants to actually process the
* binding should call prepareBinding first.
*/
abstract | AbstractBindingProcessor |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UngroupedOverloadsTest.java | {
"start": 21434,
"end": 21897
} | class ____ {
void foo() {
System.err.println();
}
void bar() {
System.err.println();
}
void foo(int x) {
System.err.println();
}
void bar(int x) {
System.err.println();
}
}
""")
.addOutputLines(
"out/Test.java",
"""
| Test |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MustBeClosedCheckerTest.java | {
"start": 9114,
"end": 9253
} | class ____ {
void bar() {}
@MustBeClosed
Closeable mustBeClosedAnnotatedMethod() {
return new Closeable();
}
}
| Foo |
java | quarkusio__quarkus | integration-tests/jpa-oracle/src/main/java/io/quarkus/example/jpaoracle/LdapUrlTestEndpoint.java | {
"start": 402,
"end": 991
} | class ____ {
private final Logger LOG = Logger.getLogger(LdapUrlTestEndpoint.class.getName());
@Inject
@DataSource("ldap")
AgroalDataSource ds;
@GET
public String test() throws SQLException {
try {
ds.getConnection().close();
} catch (SQLException e) {
LOG.info("received exception: " + e);
if (e.toString().contains("java.net.UnknownHostException: oid")) {
return "OK";
}
throw e;
}
return "KO: did not get expected exception";
}
}
| LdapUrlTestEndpoint |
java | apache__avro | lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroReducer.java | {
"start": 1394,
"end": 2275
} | class ____<K, V, OUT> extends Configured implements JobConfigurable, Closeable {
private Pair<K, V> outputPair;
/**
* Called with all map output values with a given key. By default, pairs key
* with each value, collecting {@link Pair} instances.
*/
@SuppressWarnings("unchecked")
public void reduce(K key, Iterable<V> values, AvroCollector<OUT> collector, Reporter reporter) throws IOException {
if (outputPair == null)
outputPair = new Pair<>(AvroJob.getOutputSchema(getConf()));
for (V value : values) {
outputPair.set(key, value);
collector.collect((OUT) outputPair);
}
}
/** Subclasses can override this as desired. */
@Override
public void close() throws IOException {
// no op
}
/** Subclasses can override this as desired. */
@Override
public void configure(JobConf jobConf) {
// no op
}
}
| AvroReducer |
java | grpc__grpc-java | android-interop-testing/src/generated/debug/grpc/io/grpc/testing/integration/ReconnectServiceGrpc.java | {
"start": 229,
"end": 6785
} | class ____ {
private ReconnectServiceGrpc() {}
public static final java.lang.String SERVICE_NAME = "grpc.testing.ReconnectService";
// Static method descriptors that strictly reflect the proto.
private static volatile io.grpc.MethodDescriptor<io.grpc.testing.integration.Messages.ReconnectParams,
io.grpc.testing.integration.EmptyProtos.Empty> getStartMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "Start",
requestType = io.grpc.testing.integration.Messages.ReconnectParams.class,
responseType = io.grpc.testing.integration.EmptyProtos.Empty.class,
methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
public static io.grpc.MethodDescriptor<io.grpc.testing.integration.Messages.ReconnectParams,
io.grpc.testing.integration.EmptyProtos.Empty> getStartMethod() {
io.grpc.MethodDescriptor<io.grpc.testing.integration.Messages.ReconnectParams, io.grpc.testing.integration.EmptyProtos.Empty> getStartMethod;
if ((getStartMethod = ReconnectServiceGrpc.getStartMethod) == null) {
synchronized (ReconnectServiceGrpc.class) {
if ((getStartMethod = ReconnectServiceGrpc.getStartMethod) == null) {
ReconnectServiceGrpc.getStartMethod = getStartMethod =
io.grpc.MethodDescriptor.<io.grpc.testing.integration.Messages.ReconnectParams, io.grpc.testing.integration.EmptyProtos.Empty>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.UNARY)
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "Start"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.lite.ProtoLiteUtils.marshaller(
io.grpc.testing.integration.Messages.ReconnectParams.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.lite.ProtoLiteUtils.marshaller(
io.grpc.testing.integration.EmptyProtos.Empty.getDefaultInstance()))
.build();
}
}
}
return getStartMethod;
}
private static volatile io.grpc.MethodDescriptor<io.grpc.testing.integration.EmptyProtos.Empty,
io.grpc.testing.integration.Messages.ReconnectInfo> getStopMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "Stop",
requestType = io.grpc.testing.integration.EmptyProtos.Empty.class,
responseType = io.grpc.testing.integration.Messages.ReconnectInfo.class,
methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
public static io.grpc.MethodDescriptor<io.grpc.testing.integration.EmptyProtos.Empty,
io.grpc.testing.integration.Messages.ReconnectInfo> getStopMethod() {
io.grpc.MethodDescriptor<io.grpc.testing.integration.EmptyProtos.Empty, io.grpc.testing.integration.Messages.ReconnectInfo> getStopMethod;
if ((getStopMethod = ReconnectServiceGrpc.getStopMethod) == null) {
synchronized (ReconnectServiceGrpc.class) {
if ((getStopMethod = ReconnectServiceGrpc.getStopMethod) == null) {
ReconnectServiceGrpc.getStopMethod = getStopMethod =
io.grpc.MethodDescriptor.<io.grpc.testing.integration.EmptyProtos.Empty, io.grpc.testing.integration.Messages.ReconnectInfo>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.UNARY)
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "Stop"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.lite.ProtoLiteUtils.marshaller(
io.grpc.testing.integration.EmptyProtos.Empty.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.lite.ProtoLiteUtils.marshaller(
io.grpc.testing.integration.Messages.ReconnectInfo.getDefaultInstance()))
.build();
}
}
}
return getStopMethod;
}
/**
* Creates a new async stub that supports all call types for the service
*/
public static ReconnectServiceStub newStub(io.grpc.Channel channel) {
io.grpc.stub.AbstractStub.StubFactory<ReconnectServiceStub> factory =
new io.grpc.stub.AbstractStub.StubFactory<ReconnectServiceStub>() {
@java.lang.Override
public ReconnectServiceStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
return new ReconnectServiceStub(channel, callOptions);
}
};
return ReconnectServiceStub.newStub(factory, channel);
}
/**
* Creates a new blocking-style stub that supports all types of calls on the service
*/
public static ReconnectServiceBlockingV2Stub newBlockingV2Stub(
io.grpc.Channel channel) {
io.grpc.stub.AbstractStub.StubFactory<ReconnectServiceBlockingV2Stub> factory =
new io.grpc.stub.AbstractStub.StubFactory<ReconnectServiceBlockingV2Stub>() {
@java.lang.Override
public ReconnectServiceBlockingV2Stub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
return new ReconnectServiceBlockingV2Stub(channel, callOptions);
}
};
return ReconnectServiceBlockingV2Stub.newStub(factory, channel);
}
/**
* Creates a new blocking-style stub that supports unary and streaming output calls on the service
*/
public static ReconnectServiceBlockingStub newBlockingStub(
io.grpc.Channel channel) {
io.grpc.stub.AbstractStub.StubFactory<ReconnectServiceBlockingStub> factory =
new io.grpc.stub.AbstractStub.StubFactory<ReconnectServiceBlockingStub>() {
@java.lang.Override
public ReconnectServiceBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
return new ReconnectServiceBlockingStub(channel, callOptions);
}
};
return ReconnectServiceBlockingStub.newStub(factory, channel);
}
/**
* Creates a new ListenableFuture-style stub that supports unary calls on the service
*/
public static ReconnectServiceFutureStub newFutureStub(
io.grpc.Channel channel) {
io.grpc.stub.AbstractStub.StubFactory<ReconnectServiceFutureStub> factory =
new io.grpc.stub.AbstractStub.StubFactory<ReconnectServiceFutureStub>() {
@java.lang.Override
public ReconnectServiceFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
return new ReconnectServiceFutureStub(channel, callOptions);
}
};
return ReconnectServiceFutureStub.newStub(factory, channel);
}
/**
* <pre>
* A service used to control reconnect server.
* </pre>
*/
public | ReconnectServiceGrpc |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/service/capacity/CapacityService.java | {
"start": 1789,
"end": 23036
} | class ____ {
private static final Logger LOGGER = LoggerFactory.getLogger(CapacityService.class);
private static final Integer ZERO = 0;
private static final int INIT_PAGE_SIZE = 500;
@Autowired
private GroupCapacityPersistService groupCapacityPersistService;
@Autowired
private TenantCapacityPersistService tenantCapacityPersistService;
@Autowired
private ConfigInfoPersistService configInfoPersistService;
/**
* Init.
*/
@PostConstruct
@SuppressWarnings("PMD.ThreadPoolCreationRule")
public void init() {
// All servers have jobs that modify usage, idempotent.
ConfigExecutor.scheduleCorrectUsageTask(() -> {
LOGGER.info("[capacityManagement] start correct usage");
StopWatch watch = new StopWatch();
watch.start();
correctUsage();
watch.stop();
LOGGER.info("[capacityManagement] end correct usage, cost: {}s", watch.getTotalTimeSeconds());
}, PropertyUtil.getCorrectUsageDelay(), PropertyUtil.getCorrectUsageDelay(), TimeUnit.SECONDS);
}
public void correctUsage() {
correctGroupUsage();
correctTenantUsage();
}
/**
* Correct the usage of group capacity.
*/
private void correctGroupUsage() {
long lastId = 0;
int pageSize = 100;
while (true) {
List<GroupCapacity> groupCapacityList = groupCapacityPersistService
.getCapacityList4CorrectUsage(lastId, pageSize);
if (groupCapacityList.isEmpty()) {
break;
}
lastId = groupCapacityList.get(groupCapacityList.size() - 1).getId();
for (GroupCapacity groupCapacity : groupCapacityList) {
String group = groupCapacity.getGroupName();
groupCapacityPersistService.correctUsage(group, TimeUtils.getCurrentTime());
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
// ignore
// set the interrupted flag
Thread.currentThread().interrupt();
}
}
}
public void correctGroupUsage(String group) {
groupCapacityPersistService.correctUsage(group, TimeUtils.getCurrentTime());
}
public void correctTenantUsage(String tenant) {
tenantCapacityPersistService.correctUsage(tenant, TimeUtils.getCurrentTime());
}
/**
* Correct the usage of group capacity.
*/
private void correctTenantUsage() {
long lastId = 0;
int pageSize = 100;
while (true) {
List<NamespaceCapacity> tenantCapacityList = tenantCapacityPersistService
.getCapacityList4CorrectUsage(lastId, pageSize);
if (tenantCapacityList.isEmpty()) {
break;
}
lastId = tenantCapacityList.get(tenantCapacityList.size() - 1).getId();
try {
Thread.sleep(100);
} catch (InterruptedException ignored) {
}
for (NamespaceCapacity tenantCapacity : tenantCapacityList) {
String tenant = tenantCapacity.getNamespaceId();
tenantCapacityPersistService.correctUsage(tenant, TimeUtils.getCurrentTime());
}
}
}
public void initAllCapacity() {
initAllCapacity(false);
initAllCapacity(true);
}
private void initAllCapacity(boolean isTenant) {
int page = 1;
while (true) {
List<String> list;
if (isTenant) {
list = configInfoPersistService.getTenantIdList(page, INIT_PAGE_SIZE);
} else {
list = configInfoPersistService.getGroupIdList(page, INIT_PAGE_SIZE);
}
for (String targetId : list) {
if (isTenant) {
insertTenantCapacity(targetId);
autoExpansion(null, targetId);
} else {
insertGroupCapacity(targetId);
autoExpansion(targetId, null);
}
}
if (list.size() < INIT_PAGE_SIZE) {
break;
}
try {
Thread.sleep(100);
} catch (InterruptedException ignored) {
}
++page;
}
}
/**
* To Cluster. 1.If the capacity information does not exist, initialize the capacity information. 2.Update capacity
* usage, plus or minus one.
*
* @param counterMode increase or decrease mode.
* @param ignoreQuotaLimit ignoreQuotaLimit flag.
* @return the result of update cluster usage.
*/
public boolean insertAndUpdateClusterUsage(CounterMode counterMode, boolean ignoreQuotaLimit) {
Capacity capacity = groupCapacityPersistService.getClusterCapacity();
if (capacity == null) {
insertGroupCapacity(GroupCapacityPersistService.CLUSTER);
}
return updateGroupUsage(counterMode, GroupCapacityPersistService.CLUSTER, PropertyUtil.getDefaultClusterQuota(),
ignoreQuotaLimit);
}
public boolean updateClusterUsage(CounterMode counterMode) {
return updateGroupUsage(counterMode, GroupCapacityPersistService.CLUSTER, PropertyUtil.getDefaultClusterQuota(),
false);
}
/**
* It is used for counting when the limit check function of capacity management is turned off. 1.If the capacity
* information does not exist, initialize the capacity information. 2.Update capacity usage, plus or minus one.
*
* @param counterMode increase or decrease mode.
* @param group tenant string value.
* @param ignoreQuotaLimit ignoreQuotaLimit flag.
* @return operate successfully or not.
*/
public boolean insertAndUpdateGroupUsage(CounterMode counterMode, String group, boolean ignoreQuotaLimit) {
GroupCapacity groupCapacity = getGroupCapacity(group);
if (groupCapacity == null) {
initGroupCapacity(group, null, null, null, null);
}
return updateGroupUsage(counterMode, group, PropertyUtil.getDefaultGroupQuota(), ignoreQuotaLimit);
}
public boolean updateGroupUsage(CounterMode counterMode, String group) {
return updateGroupUsage(counterMode, group, PropertyUtil.getDefaultGroupQuota(), false);
}
private boolean updateGroupUsage(CounterMode counterMode, String group, int defaultQuota,
boolean ignoreQuotaLimit) {
final Timestamp now = TimeUtils.getCurrentTime();
GroupCapacity groupCapacity = new GroupCapacity();
groupCapacity.setGroupName(group);
groupCapacity.setQuota(defaultQuota);
groupCapacity.setGmtModified(now);
if (CounterMode.INCREMENT == counterMode) {
if (ignoreQuotaLimit) {
return groupCapacityPersistService.incrementUsage(groupCapacity);
}
// First update the quota according to the default value. In most cases, it is the default value.
// The quota field in the default value table is 0
return groupCapacityPersistService.incrementUsageWithDefaultQuotaLimit(groupCapacity)
|| groupCapacityPersistService.incrementUsageWithQuotaLimit(groupCapacity);
}
return groupCapacityPersistService.decrementUsage(groupCapacity);
}
public GroupCapacity getGroupCapacity(String group) {
return groupCapacityPersistService.getGroupCapacity(group);
}
/**
* Initialize the capacity information of the group. If the quota is reached, the capacity will be automatically
* expanded to reduce the operation and maintenance cost.
*
* @param group group string value.
* @return init result.
*/
public boolean initGroupCapacity(String group) {
return initGroupCapacity(group, null, null, null, null);
}
/**
* Initialize the capacity information of the group. If the quota is reached, the capacity will be automatically
* expanded to reduce the operation and maintenance cost.
*
* @param group group string value.
* @param quota quota int value.
* @param maxSize maxSize int value.
* @param maxAggrCount maxAggrCount int value.
* @param maxAggrSize maxAggrSize int value.
* @return init result.
*/
private boolean initGroupCapacity(String group, Integer quota, Integer maxSize, Integer maxAggrCount,
Integer maxAggrSize) {
boolean insertSuccess = insertGroupCapacity(group, quota, maxSize, maxAggrCount, maxAggrSize);
if (quota == null) {
autoExpansion(group, null);
}
return insertSuccess;
}
/**
* Expand capacity automatically.
*
* @param group group string value.
* @param tenant tenant string value.
*/
private void autoExpansion(String group, String tenant) {
Capacity capacity = getCapacity(group, tenant);
int defaultQuota = getDefaultQuota(tenant != null);
Integer usage = capacity.getUsage();
if (usage < defaultQuota) {
return;
}
// Initialize the capacity information of the group. If the quota is reached,
// the capacity will be automatically expanded to reduce the operation and maintenance cost.
int initialExpansionPercent = PropertyUtil.getInitialExpansionPercent();
if (initialExpansionPercent > 0) {
int finalQuota = (int) (usage + defaultQuota * (1.0 * initialExpansionPercent / 100));
if (tenant != null) {
tenantCapacityPersistService.updateQuota(tenant, finalQuota);
LogUtil.DEFAULT_LOG.warn("[capacityManagement] The usage({}) already reach the upper limit({}) when init the tenant({}), "
+ "automatic upgrade to ({})", usage, defaultQuota, tenant, finalQuota);
} else {
groupCapacityPersistService.updateQuota(group, finalQuota);
LogUtil.DEFAULT_LOG.warn("[capacityManagement] The usage({}) already reach the upper limit({}) when init the group({}), "
+ "automatic upgrade to ({})", usage, defaultQuota, group, finalQuota);
}
}
}
private int getDefaultQuota(boolean isTenant) {
if (isTenant) {
return PropertyUtil.getDefaultTenantQuota();
}
return PropertyUtil.getDefaultGroupQuota();
}
public Capacity getCapacity(String group, String tenant) {
if (tenant != null) {
return getTenantCapacity(tenant);
}
return getGroupCapacity(group);
}
public Capacity getCapacityWithDefault(String group, String tenant) {
Capacity capacity;
boolean isTenant = StringUtils.isNotBlank(tenant);
if (isTenant) {
capacity = getTenantCapacity(tenant);
} else {
capacity = getGroupCapacity(group);
}
if (capacity == null) {
return null;
}
Integer quota = capacity.getQuota();
if (quota == 0) {
if (isTenant) {
capacity.setQuota(PropertyUtil.getDefaultTenantQuota());
} else {
if (GroupCapacityPersistService.CLUSTER.equals(group)) {
capacity.setQuota(PropertyUtil.getDefaultClusterQuota());
} else {
capacity.setQuota(PropertyUtil.getDefaultGroupQuota());
}
}
}
Integer maxSize = capacity.getMaxSize();
if (maxSize == 0) {
capacity.setMaxSize(PropertyUtil.getDefaultMaxSize());
}
Integer maxAggrCount = capacity.getMaxAggrCount();
if (maxAggrCount == 0) {
capacity.setMaxAggrCount(PropertyUtil.getDefaultMaxAggrCount());
}
Integer maxAggrSize = capacity.getMaxAggrSize();
if (maxAggrSize == 0) {
capacity.setMaxAggrSize(PropertyUtil.getDefaultMaxAggrSize());
}
return capacity;
}
/**
* Init capacity.
*
* @param group group string value.
* @param tenant tenant string value.
* @return init result.
*/
public boolean initCapacity(String group, String tenant) {
if (StringUtils.isNotBlank(tenant)) {
return initTenantCapacity(tenant);
}
if (GroupCapacityPersistService.CLUSTER.equals(group)) {
return insertGroupCapacity(GroupCapacityPersistService.CLUSTER);
}
// Group can expand capacity automatically.
return initGroupCapacity(group);
}
private boolean insertGroupCapacity(String group) {
return insertGroupCapacity(group, null, null, null, null);
}
private boolean insertGroupCapacity(String group, Integer quota, Integer maxSize, Integer maxAggrCount,
Integer maxAggrSize) {
try {
final Timestamp now = TimeUtils.getCurrentTime();
GroupCapacity groupCapacity = new GroupCapacity();
groupCapacity.setGroupName(group);
// When adding a new quota, quota = 0 means that the quota is the default value.
// In order to update the default quota, only the Nacos configuration needs to be modified,
// and most of the data in the table need not be updated.
groupCapacity.setQuota(quota == null ? ZERO : quota);
// When adding new data, maxsize = 0 means that the size is the default value.
// In order to update the default size, you only need to modify the Nacos configuration without updating most of the data in the table.
groupCapacity.setMaxSize(maxSize == null ? ZERO : maxSize);
groupCapacity.setMaxAggrCount(maxAggrCount == null ? ZERO : maxAggrCount);
groupCapacity.setMaxAggrSize(maxAggrSize == null ? ZERO : maxAggrSize);
groupCapacity.setGmtCreate(now);
groupCapacity.setGmtModified(now);
return groupCapacityPersistService.insertGroupCapacity(groupCapacity);
} catch (DuplicateKeyException e) {
// this exception will meet when concurrent insert,ignore it
LogUtil.DEFAULT_LOG.warn("group: {}, message: {}", group, e.getMessage());
}
return false;
}
/**
* It is used for counting when the limit check function of capacity management is turned off. 1.If the capacity
* information does not exist, initialize the capacity information. 2.Update capacity usage, plus or minus one.
*
* @param counterMode increase or decrease mode.
* @param tenant tenant string value.
* @param ignoreQuotaLimit ignoreQuotaLimit flag.
* @return operate successfully or not.
*/
public boolean insertAndUpdateTenantUsage(CounterMode counterMode, String tenant, boolean ignoreQuotaLimit) {
NamespaceCapacity tenantCapacity = getTenantCapacity(tenant);
if (tenantCapacity == null) {
// Init capacity information.
initTenantCapacity(tenant);
}
return updateTenantUsage(counterMode, tenant, ignoreQuotaLimit);
}
private boolean updateTenantUsage(CounterMode counterMode, String tenant, boolean ignoreQuotaLimit) {
final Timestamp now = TimeUtils.getCurrentTime();
NamespaceCapacity tenantCapacity = new NamespaceCapacity();
tenantCapacity.setNamespaceId(tenant);
tenantCapacity.setQuota(PropertyUtil.getDefaultTenantQuota());
tenantCapacity.setGmtModified(now);
if (CounterMode.INCREMENT == counterMode) {
if (ignoreQuotaLimit) {
return tenantCapacityPersistService.incrementUsage(tenantCapacity);
}
// First update the quota according to the default value. In most cases, it is the default value.
// The quota field in the default value table is 0.
return tenantCapacityPersistService.incrementUsageWithDefaultQuotaLimit(tenantCapacity)
|| tenantCapacityPersistService.incrementUsageWithQuotaLimit(tenantCapacity);
}
return tenantCapacityPersistService.decrementUsage(tenantCapacity);
}
public boolean updateTenantUsage(CounterMode counterMode, String tenant) {
return updateTenantUsage(counterMode, tenant, false);
}
/**
* Initialize the capacity information of the tenant. If the quota is reached, the capacity will be automatically
* expanded to reduce the operation and maintenance cos.
*
* @param tenant tenant string value.
* @return init result.
*/
public boolean initTenantCapacity(String tenant) {
return initTenantCapacity(tenant, null, null, null, null);
}
/**
* Initialize the capacity information of the tenant. If the quota is reached, the capacity will be automatically
* expanded to reduce the operation and maintenance cost
*
* @param tenant tenant string value.
* @param quota quota int value.
* @param maxSize maxSize int value.
* @param maxAggrCount maxAggrCount int value.
* @param maxAggrSize maxAggrSize int value.
* @return
*/
public boolean initTenantCapacity(String tenant, Integer quota, Integer maxSize, Integer maxAggrCount,
Integer maxAggrSize) {
boolean insertSuccess = insertTenantCapacity(tenant, quota, maxSize, maxAggrCount, maxAggrSize);
if (quota != null) {
return insertSuccess;
}
autoExpansion(null, tenant);
return insertSuccess;
}
private boolean insertTenantCapacity(String tenant) {
return insertTenantCapacity(tenant, null, null, null, null);
}
private boolean insertTenantCapacity(String tenant, Integer quota, Integer maxSize, Integer maxAggrCount,
Integer maxAggrSize) {
try {
final Timestamp now = TimeUtils.getCurrentTime();
NamespaceCapacity tenantCapacity = new NamespaceCapacity();
tenantCapacity.setNamespaceId(tenant);
// When adding a new quota, quota = 0 means that the quota is the default value.
// In order to update the default quota, only the Nacos configuration needs to be modified,
// and most of the data in the table need not be updated.
tenantCapacity.setQuota(quota == null ? ZERO : quota);
// When adding new data, maxsize = 0 means that the size is the default value.
// In order to update the default size, you only need to modify the Nacos configuration without updating most of the data in the table.
tenantCapacity.setMaxSize(maxSize == null ? ZERO : maxSize);
tenantCapacity.setMaxAggrCount(maxAggrCount == null ? ZERO : maxAggrCount);
tenantCapacity.setMaxAggrSize(maxAggrSize == null ? ZERO : maxAggrSize);
tenantCapacity.setGmtCreate(now);
tenantCapacity.setGmtModified(now);
return tenantCapacityPersistService.insertTenantCapacity(tenantCapacity);
} catch (DuplicateKeyException e) {
// this exception will meet when concurrent insert,ignore it
LogUtil.DEFAULT_LOG.warn("tenant: {}, message: {}", tenant, e.getMessage());
}
return false;
}
public NamespaceCapacity getTenantCapacity(String tenant) {
return tenantCapacityPersistService.getTenantCapacity(tenant);
}
/**
* Support for API interface, Tenant: initialize if the record does not exist, and update the capacity quota or
* content size directly if it exists.
*
* @param group group string value.
* @param tenant tenant string value.
* @param quota quota int value.
* @param maxSize maxSize int value.
* @param maxAggrCount maxAggrCount int value.
* @param maxAggrSize maxAggrSize int value.
* @return operate successfully or not.
*/
public boolean insertOrUpdateCapacity(String group, String tenant, Integer quota, Integer maxSize,
Integer maxAggrCount, Integer maxAggrSize) {
if (StringUtils.isNotBlank(tenant)) {
Capacity capacity = tenantCapacityPersistService.getTenantCapacity(tenant);
if (capacity == null) {
return initTenantCapacity(tenant, quota, maxSize, maxAggrCount, maxAggrSize);
}
return tenantCapacityPersistService.updateTenantCapacity(tenant, quota, maxSize, maxAggrCount, maxAggrSize);
}
Capacity capacity = groupCapacityPersistService.getGroupCapacity(group);
if (capacity == null) {
return initGroupCapacity(group, quota, maxSize, maxAggrCount, maxAggrSize);
}
return groupCapacityPersistService.updateGroupCapacity(group, quota, maxSize, maxAggrCount, maxAggrSize);
}
}
| CapacityService |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/sql/test/TestUtils.java | {
"start": 1161,
"end": 6349
} | class ____ {
public static String outputOracle(List<SQLStatement> stmtList) {
StringBuilder out = new StringBuilder();
OracleOutputVisitor visitor = new OracleOutputVisitor(out);
for (SQLStatement stmt : stmtList) {
stmt.accept(visitor);
}
return out.toString();
}
public static String outputPg(List<SQLStatement> stmtList) {
StringBuilder out = new StringBuilder();
PGOutputVisitor visitor = new PGOutputVisitor(out);
for (SQLStatement stmt : stmtList) {
stmt.accept(visitor);
}
return out.toString();
}
public static String outputSqlServer(List<SQLStatement> stmtList) {
StringBuilder out = new StringBuilder();
SQLServerOutputVisitor visitor = new SQLServerOutputVisitor(out);
for (SQLStatement stmt : stmtList) {
stmt.accept(visitor);
}
return out.toString();
}
public static String outputOracle(SQLStatement... stmtList) {
return outputOracle(Arrays.asList(stmtList));
}
public static String outputSqlServer(SQLStatement... stmtList) {
return outputSqlServer(Arrays.asList(stmtList));
}
public static String output(SQLStatement... stmtList) {
return output(Arrays.asList(stmtList));
}
public static String output(List<SQLStatement> stmtList) {
StringBuilder out = new StringBuilder();
SQLASTOutputVisitor visitor = new SQLASTOutputVisitor(out);
for (SQLStatement stmt : stmtList) {
stmt.accept(visitor);
}
return out.toString();
}
public static long getYoungGC() {
try {
// java.lang:type=GarbageCollector,name=G1 Young Generation
// java.lang:type=GarbageCollector,name=G1 Old Generation
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
ObjectName objectName;
if (mbeanServer.isRegistered(new ObjectName("java.lang:type=GarbageCollector,name=ParNew"))) {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=ParNew");
} else if (mbeanServer.isRegistered(new ObjectName("java.lang:type=GarbageCollector,name=Copy"))) {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=Copy");
} else if (mbeanServer.isRegistered(new ObjectName("java.lang:type=GarbageCollector,name=G1 Young Generation"))) {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=G1 Young Generation");
} else {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=PS Scavenge");
}
return (Long) mbeanServer.getAttribute(objectName, "CollectionCount");
} catch (Exception e) {
throw new RuntimeException("error");
}
}
public static long getYoungGCTime() {
try {
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
ObjectName objectName;
if (mbeanServer.isRegistered(new ObjectName("java.lang:type=GarbageCollector,name=ParNew"))) {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=ParNew");
} else if (mbeanServer.isRegistered(new ObjectName("java.lang:type=GarbageCollector,name=Copy"))) {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=Copy");
} else if (mbeanServer.isRegistered(new ObjectName("java.lang:type=GarbageCollector,name=G1 Young Generation"))) {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=G1 Young Generation");
} else {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=PS Scavenge");
}
return (Long) mbeanServer.getAttribute(objectName, "CollectionTime");
} catch (Exception e) {
throw new RuntimeException("error", e);
}
}
public static long getFullGC() {
try {
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
ObjectName objectName;
if (mbeanServer.isRegistered(new ObjectName("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep"))) {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep");
} else if (mbeanServer.isRegistered(new ObjectName("java.lang:type=GarbageCollector,name=MarkSweepCompact"))) {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=MarkSweepCompact");
} else if (mbeanServer.isRegistered(new ObjectName("java.lang:type=GarbageCollector,name=G1 Old Generation"))) {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=G1 Old Generation");
} else {
objectName = new ObjectName("java.lang:type=GarbageCollector,name=PS MarkSweep");
}
return (Long) mbeanServer.getAttribute(objectName, "CollectionCount");
} catch (Exception e) {
throw new RuntimeException("error");
}
}
}
| TestUtils |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/cdi/bcextensions/ChangeQualifierTest.java | {
"start": 943,
"end": 1462
} | class ____ {
@RegisterExtension
public ArcTestContainer container = ArcTestContainer.builder()
.beanClasses(MyQualifier.class, MyService.class, MyServiceConsumer.class)
.buildCompatibleExtensions(new MyExtension())
.build();
@Test
public void test() {
MyServiceConsumer myServiceConsumer = Arc.container().select(MyServiceConsumer.class).get();
assertTrue(myServiceConsumer.myService instanceof MyBarService);
}
public static | ChangeQualifierTest |
java | alibaba__nacos | core/src/main/java/com/alibaba/nacos/core/distributed/distro/exception/DistroException.java | {
"start": 739,
"end": 1166
} | class ____ extends RuntimeException {
private static final long serialVersionUID = 1711141952413139786L;
public DistroException(String message) {
super(message);
}
public DistroException(String message, Throwable cause) {
super(message, cause);
}
@Override
public String getMessage() {
return "[DISTRO-EXCEPTION]" + super.getMessage();
}
}
| DistroException |
java | apache__rocketmq | store/src/main/java/org/apache/rocketmq/store/queue/QueueOffsetOperator.java | {
"start": 1436,
"end": 5936
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
private ConcurrentMap<String, Long> topicQueueTable = new ConcurrentHashMap<>(1024);
private ConcurrentMap<String, Long> batchTopicQueueTable = new ConcurrentHashMap<>(1024);
/**
* {TOPIC}-{QUEUE_ID} --> NEXT Consume Queue Offset
*/
private ConcurrentMap<String/* topic-queue-id */, Long/* offset */> lmqTopicQueueTable = new ConcurrentHashMap<>(1024);
public long getQueueOffset(String topicQueueKey) {
return ConcurrentHashMapUtils.computeIfAbsent(this.topicQueueTable, topicQueueKey, k -> 0L);
}
public Long getTopicQueueNextOffset(String topicQueueKey) {
return this.topicQueueTable.get(topicQueueKey);
}
public void increaseQueueOffset(String topicQueueKey, short messageNum) {
Long queueOffset = ConcurrentHashMapUtils.computeIfAbsent(this.topicQueueTable, topicQueueKey, k -> 0L);
topicQueueTable.put(topicQueueKey, queueOffset + messageNum);
}
public void updateQueueOffset(String topicQueueKey, long offset) {
this.topicQueueTable.put(topicQueueKey, offset);
}
public long getBatchQueueOffset(String topicQueueKey) {
return ConcurrentHashMapUtils.computeIfAbsent(this.batchTopicQueueTable, topicQueueKey, k -> 0L);
}
public void increaseBatchQueueOffset(String topicQueueKey, short messageNum) {
Long batchQueueOffset = ConcurrentHashMapUtils.computeIfAbsent(this.batchTopicQueueTable, topicQueueKey, k -> 0L);
this.batchTopicQueueTable.put(topicQueueKey, batchQueueOffset + messageNum);
}
public long getLmqOffset(String topic, int queueId, OffsetInitializer callback) throws ConsumeQueueException {
Preconditions.checkNotNull(callback, "ConsumeQueueOffsetCallback cannot be null");
String topicQueue = topic + "-" + queueId;
if (!lmqTopicQueueTable.containsKey(topicQueue)) {
// Load from RocksDB on cache miss.
Long prev = lmqTopicQueueTable.putIfAbsent(topicQueue, callback.maxConsumeQueueOffset(topic, queueId));
if (null != prev) {
log.error("[BUG] Data racing, lmqTopicQueueTable should NOT contain key={}", topicQueue);
}
}
return lmqTopicQueueTable.get(topicQueue);
}
public void increaseLmqOffset(String topic, int queueId, short delta) throws ConsumeQueueException {
String topicQueue = topic + "-" + queueId;
if (!this.lmqTopicQueueTable.containsKey(topicQueue)) {
throw new ConsumeQueueException(String.format("Max offset of Queue[name=%s, id=%d] should have existed", topic, queueId));
}
long prev = lmqTopicQueueTable.get(topicQueue);
this.lmqTopicQueueTable.compute(topicQueue, (k, offset) -> offset + delta);
long current = lmqTopicQueueTable.get(topicQueue);
log.debug("Max offset of LMQ[{}:{}] increased: {} --> {}", topic, queueId, prev, current);
}
public long currentQueueOffset(String topicQueueKey) {
Long currentQueueOffset = this.topicQueueTable.get(topicQueueKey);
return currentQueueOffset == null ? 0L : currentQueueOffset;
}
public synchronized void remove(String topic, Integer queueId) {
String topicQueueKey = topic + "-" + queueId;
// Beware of thread-safety
this.topicQueueTable.remove(topicQueueKey);
this.batchTopicQueueTable.remove(topicQueueKey);
this.lmqTopicQueueTable.remove(topicQueueKey);
log.info("removeQueueFromTopicQueueTable OK Topic: {} QueueId: {}", topic, queueId);
}
public void setTopicQueueTable(ConcurrentMap<String, Long> topicQueueTable) {
this.topicQueueTable = topicQueueTable;
}
public void setLmqTopicQueueTable(ConcurrentMap<String, Long> lmqTopicQueueTable) {
ConcurrentMap<String, Long> table = new ConcurrentHashMap<String, Long>(1024);
for (Map.Entry<String, Long> entry : lmqTopicQueueTable.entrySet()) {
if (MixAll.isLmq(entry.getKey())) {
table.put(entry.getKey(), entry.getValue());
}
}
this.lmqTopicQueueTable = table;
}
public ConcurrentMap<String, Long> getTopicQueueTable() {
return topicQueueTable;
}
public void setBatchTopicQueueTable(ConcurrentMap<String, Long> batchTopicQueueTable) {
this.batchTopicQueueTable = batchTopicQueueTable;
}
}
| QueueOffsetOperator |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/bean/override/BeanOverrideBeanFactoryPostProcessorTests.java | {
"start": 21481,
"end": 21922
} | class ____ implements BeanFactoryPostProcessor {
@Override
@SuppressWarnings("unchecked")
public void postProcessBeanFactory(ConfigurableListableBeanFactory beanFactory) {
Map<String, BeanWrapper> cache = (Map<String, BeanWrapper>) ReflectionTestUtils.getField(beanFactory,
"factoryBeanInstanceCache");
Assert.isTrue(cache.isEmpty(), "Early initialization of factory bean triggered.");
}
}
}
| EarlyBeanInitializationDetector |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/builder/ignore/Person.java | {
"start": 236,
"end": 702
} | class ____ extends BaseEntity {
private final String name;
private final String lastName;
public Person(Builder builder) {
super( builder );
this.name = builder.name;
this.lastName = builder.lastName;
}
public String getName() {
return name;
}
public String getLastName() {
return lastName;
}
public static Builder builder() {
return new Builder();
}
public static | Person |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/enums/EnumDeserFromIntJsonValueTest.java | {
"start": 759,
"end": 946
} | enum ____ {
A(-13L);
final long x;
Bean1850LongMethod(long x) { this.x = x; }
@JsonValue
public long code() { return x; }
}
| Bean1850LongMethod |
java | elastic__elasticsearch | x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockElasticInferenceServiceAuthorizationServer.java | {
"start": 694,
"end": 3220
} | class ____ implements TestRule {
private static final Logger logger = LogManager.getLogger(MockElasticInferenceServiceAuthorizationServer.class);
private final MockWebServer webServer = new MockWebServer();
public static MockElasticInferenceServiceAuthorizationServer enabledWithRainbowSprinklesAndElser() {
var server = new MockElasticInferenceServiceAuthorizationServer();
server.enqueueAuthorizeAllModelsResponse();
return server;
}
public void enqueueAuthorizeAllModelsResponse() {
String responseJson = """
{
"models": [
{
"model_name": "rainbow-sprinkles",
"task_types": ["chat"]
},
{
"model_name": "gp-llm-v2",
"task_types": ["chat"]
},
{
"model_name": "elser_model_2",
"task_types": ["embed/text/sparse"]
},
{
"model_name": "jina-embeddings-v3",
"task_types": ["embed/text/dense"]
},
{
"model_name": "jina-reranker-v2",
"task_types": ["rerank/text/text-similarity"]
}
]
}
""";
webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson));
}
public String getUrl() {
return format("http://%s:%s", webServer.getHostName(), webServer.getPort());
}
@Override
public Statement apply(Statement statement, Description description) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
try {
logger.info("Starting mock EIS gateway");
webServer.start();
logger.info(Strings.format("Started mock EIS gateway with address: %s", getUrl()));
} catch (Exception e) {
logger.warn("Failed to start mock EIS gateway", e);
}
try {
statement.evaluate();
} finally {
logger.info(Strings.format("Stopping mock EIS gateway address: %s", getUrl()));
webServer.close();
}
}
};
}
}
| MockElasticInferenceServiceAuthorizationServer |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/testutil/WithServiceImplementation.java | {
"start": 651,
"end": 739
} | interface ____ {
/**
* @return The service implementation | WithServiceImplementation |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/language/VelocitySqlSourceBuilder.java | {
"start": 2083,
"end": 5836
} | class ____ extends BaseBuilder implements TokenHandler {
private final List<ParameterMapping> parameterMappings = new ArrayList<>();
private final Class<?> parameterType;
public ParameterMappingTokenHandler(Configuration configuration, Class<?> parameterType) {
super(configuration);
this.parameterType = parameterType;
}
public List<ParameterMapping> getParameterMappings() {
return parameterMappings;
}
@Override
public String handleToken(String content) {
parameterMappings.add(buildParameterMapping(content));
return "?";
}
private ParameterMapping buildParameterMapping(String content) {
Map<String, String> propertiesMap = parseParameterMapping(content);
String property = propertiesMap.get("property");
JdbcType jdbcType = resolveJdbcType(propertiesMap.get("jdbcType"));
Class<?> propertyType;
if (typeHandlerRegistry.hasTypeHandler(parameterType)) {
propertyType = parameterType;
} else if (JdbcType.CURSOR.equals(jdbcType)) {
propertyType = ResultSet.class;
} else if (property != null) {
MetaClass metaClass = MetaClass.forClass(parameterType, configuration.getReflectorFactory());
if (metaClass.hasGetter(property)) {
propertyType = metaClass.getGetterType(property);
} else {
propertyType = Object.class;
}
} else {
propertyType = Object.class;
}
ParameterMapping.Builder builder = new ParameterMapping.Builder(configuration, property, propertyType);
if (jdbcType != null) {
builder.jdbcType(jdbcType);
}
Class<?> javaType = null;
String typeHandlerAlias = null;
for (Map.Entry<String, String> entry : propertiesMap.entrySet()) {
String name = entry.getKey();
String value = entry.getValue();
if (name != null) {
switch (name) {
case "javaType":
javaType = resolveClass(value);
builder.javaType(javaType);
break;
case "mode":
builder.mode(resolveParameterMode(value));
break;
case "numericScale":
builder.numericScale(Integer.valueOf(value));
break;
case "resultMap":
builder.resultMapId(value);
break;
case "typeHandler":
typeHandlerAlias = value;
break;
case "jdbcTypeName":
builder.jdbcTypeName(value);
break;
case "property":
break;
case "expression":
builder.expression(value);
break;
default:
throw new BuilderException("An invalid property '" + name + "' was found in mapping @{" + content
+ "}. Valid properties are " + parameterProperties);
}
} else {
throw new BuilderException("An invalid property '" + name + "' was found in mapping @{" + content
+ "}. Valid properties are " + parameterProperties);
}
}
if (typeHandlerAlias != null) {
builder.typeHandler(resolveTypeHandler(propertyType, jdbcType, typeHandlerAlias));
}
return builder.build();
}
private Map<String, String> parseParameterMapping(String content) {
try {
return new ParameterExpression(content);
} catch (BuilderException ex) {
throw ex;
} catch (Exception ex) {
throw new BuilderException("Parsing error was found in mapping @{" + content
+ "}. Check syntax #{property|(expression), var1=value1, var2=value2, ...} ", ex);
}
}
}
}
| ParameterMappingTokenHandler |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/validation/beanvalidation/MethodValidationAdapter.java | {
"start": 19926,
"end": 20512
} | class ____ extends DefaultMessageSourceResolvable {
private final transient ConstraintViolation<Object> violation;
public ViolationMessageSourceResolvable(
String[] codes, Object[] arguments, String defaultMessage, ConstraintViolation<Object> violation) {
super(codes, arguments, defaultMessage);
this.violation = violation;
}
public ConstraintViolation<Object> getViolation() {
return this.violation;
}
}
/**
* Default algorithm to select an object name, as described in {@link #setObjectNameResolver}.
*/
private static | ViolationMessageSourceResolvable |
java | apache__camel | components/camel-vertx/camel-vertx-websocket/src/test/java/org/apache/camel/component/vertx/websocket/VertxWebsocketHandshakeHeadersTest.java | {
"start": 1695,
"end": 10099
} | class ____ extends VertxWebSocketTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(VertxWebsocketHandshakeHeadersTest.class);
@Test
public void testHandshakeHeadersAsProducer() throws Exception {
CountDownLatch latch = new CountDownLatch(2);
Vertx vertx = Vertx.vertx();
Router router = Router.router(vertx);
Route route = router.route("/ws");
route.handler(new Handler<RoutingContext>() {
@Override
public void handle(RoutingContext context) {
HttpServerRequest request = context.request();
String authorizationHeader = request.getHeader("Authorization");
assertNotNull(authorizationHeader, "Authorization header is not passed in the request.");
assertFalse(authorizationHeader.isBlank(), "Authorization header is blank.");
String apiSignHeader = request.getHeader("ApiSign");
assertNotNull(apiSignHeader, "ApiSign header is not passed in the request.");
assertFalse(apiSignHeader.isBlank(), "ApiSign header is blank.");
String connectionHeader = request.headers().get(HttpHeaders.CONNECTION);
if (connectionHeader == null || !connectionHeader.toLowerCase().contains("upgrade")) {
context.response().setStatusCode(400);
context.response().end("Can \"Upgrade\" only to \"WebSocket\".");
} else {
// we're about to upgrade the connection, which means an asynchronous
// operation. We have to pause the request otherwise we will loose the
// body of the request once the upgrade completes
final boolean parseEnded = request.isEnded();
if (!parseEnded) {
request.pause();
}
// upgrade
request.toWebSocket(toWebSocket -> {
if (toWebSocket.succeeded()) {
// resume the parsing
if (!parseEnded) {
request.resume();
}
// handle the websocket session as usual
ServerWebSocket webSocket = toWebSocket.result();
webSocket.textMessageHandler(new Handler<String>() {
@Override
public void handle(String message) {
latch.countDown();
}
});
} else {
// the upgrade failed
context.fail(toWebSocket.cause());
}
});
}
}
});
HttpServerOptions options = new HttpServerOptions();
VertxWebsocketHostConfiguration configuration = new VertxWebsocketHostConfiguration(vertx, router, options, null);
VertxWebsocketHostKey key = new VertxWebsocketHostKey("localhost", 0);
VertxWebsocketHost host = new VertxWebsocketHost(context, configuration, key);
host.start();
String handshakeHeaders = "handshake.Authorization=Bearer token&handshake.ApiSign=-u-4tjFSE=";
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.toD("vertx-websocket:localhost:${header.port}/ws?" + handshakeHeaders);
}
});
context.start();
try {
ProducerTemplate template = context.createProducerTemplate();
template.sendBodyAndHeader("direct:start", "Hello world", "port", host.getPort());
template.sendBodyAndHeader("direct:start", "Hello world after handshake", "port", host.getPort());
assertTrue(latch.await(10, TimeUnit.SECONDS));
} finally {
try {
host.stop();
} catch (Exception e) {
LOG.warn("Failed to stop Vert.x server", e);
}
context.stop();
}
}
@Test
public void testHandshakeHeadersAsConsumer() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
Vertx vertx = Vertx.vertx();
Router router = Router.router(vertx);
Route route = router.route("/ws");
route.handler(new Handler<RoutingContext>() {
@Override
public void handle(RoutingContext context) {
HttpServerRequest request = context.request();
String authorizationHeader = request.getHeader("Authorization");
assertNotNull(authorizationHeader, "Authorization header is not passed in the request.");
assertFalse(authorizationHeader.isBlank(), "Authorization header is blank.");
String apiSignHeader = request.getHeader("ApiSign");
assertNotNull(apiSignHeader, "ApiSign header is not passed in the request.");
assertFalse(apiSignHeader.isBlank(), "ApiSign header is blank.");
String connectionHeader = request.headers().get(HttpHeaders.CONNECTION);
if (connectionHeader == null || !connectionHeader.toLowerCase().contains("upgrade")) {
context.response().setStatusCode(400);
context.response().end("Can \"Upgrade\" only to \"WebSocket\".");
} else {
// we're about to upgrade the connection, which means an asynchronous
// operation. We have to pause the request otherwise we will loose the
// body of the request once the upgrade completes
final boolean parseEnded = request.isEnded();
if (!parseEnded) {
request.pause();
}
// upgrade
request.toWebSocket(toWebSocket -> {
if (toWebSocket.succeeded()) {
// resume the parsing
if (!parseEnded) {
request.resume();
}
// Send a text message to consumer
ServerWebSocket webSocket = toWebSocket.result();
webSocket.writeTextMessage("Hello World");
webSocket.writeTextMessage("Ping").onComplete(event -> latch.countDown());
} else {
// the upgrade failed
context.fail(toWebSocket.cause());
}
});
}
}
});
HttpServerOptions options = new HttpServerOptions();
VertxWebsocketHostConfiguration configuration = new VertxWebsocketHostConfiguration(vertx, router, options, null);
VertxWebsocketHostKey key = new VertxWebsocketHostKey("localhost", 0);
VertxWebsocketHost host = new VertxWebsocketHost(context, configuration, key);
host.start();
String handshakeHeaders = "handshake.Authorization=Bearer token&handshake.ApiSign=-u-4tjFSE=";
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
fromF("vertx-websocket:localhost:%d/ws?consumeAsClient=true&" + handshakeHeaders, host.getPort())
.log("Consume websocket message ${body}")
.to("mock:result");
}
});
context.start();
try {
assertTrue(latch.await(10, TimeUnit.SECONDS));
MockEndpoint mockEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
mockEndpoint.expectedBodiesReceivedInAnyOrder("Hello World", "Ping");
mockEndpoint.assertIsSatisfied();
} finally {
try {
host.stop();
} catch (Exception e) {
LOG.warn("Failed to stop Vert.x server", e);
}
context.stop();
}
}
@Override
protected void startCamelContext() {
}
}
| VertxWebsocketHandshakeHeadersTest |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/FluxBufferTest.java | {
"start": 1521,
"end": 20589
} | class ____ extends FluxOperatorTest<String, List<String>> {
@Override
protected Scenario<String, List<String>> defaultScenarioOptions(Scenario<String, List<String>> defaultOptions) {
return defaultOptions.shouldAssertPostTerminateState(false);
}
@Override
protected List<Scenario<String, List<String>>> scenarios_operatorError() {
return Arrays.asList(
scenario(f -> f.buffer(Integer.MAX_VALUE, () -> null)),
scenario(f -> f.buffer(Integer.MAX_VALUE, () -> {
throw exception();
})),
scenario(f -> f.buffer(2, 1, () -> null)),
scenario(f -> f.buffer(2, 1, () -> {
throw exception();
})),
scenario(f -> f.buffer(1, 2, () -> null)),
scenario(f -> f.buffer(1, 2, () -> {
throw exception();
}))
);
}
@Override
protected List<Scenario<String, List<String>>> scenarios_operatorSuccess() {
return Arrays.asList(
scenario(f -> f.buffer(1, 2))
.receive(s -> assertThat(s).containsExactly(item(0)),
s -> assertThat(s).containsExactly(item(2))),
scenario(f -> f.buffer(2, 1))
.receive(s -> assertThat(s).containsExactly(item(0), item(1)),
s -> assertThat(s).containsExactly(item(1), item(2)),
s -> assertThat(s).containsExactly(item(2))),
scenario(f -> f.buffer(1))
.receive(s -> assertThat(s).containsExactly(item(0)),
s -> assertThat(s).containsExactly(item(1)),
s -> assertThat(s).containsExactly(item(2))),
scenario(Flux::buffer)
.receive(s -> assertThat(s).containsExactly(item(0), item(1), item(2)))
);
}
@Override
protected List<Scenario<String, List<String>>> scenarios_errorFromUpstreamFailure() {
return Arrays.asList(
scenario(Flux::buffer),
scenario(f -> f.buffer(1, 2)),
scenario(f -> f.buffer(2, 1))
);
}
@Test
public void sourceNull() {
assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> {
new FluxBuffer<>(null, 1, ArrayList::new);
});
}
@Test
public void supplierNull() {
assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> {
Flux.never().buffer(1, 1, null);
});
}
@Test
public void sizeZero() {
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> {
Flux.never().buffer(0, 1);
});
}
@Test
public void skipZero() {
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> {
Flux.never().buffer(1, 0);
});
}
@Test
public void normalExact() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create();
Flux.range(1, 10).buffer(2).subscribe(ts);
ts.assertValues(Arrays.asList(1, 2),
Arrays.asList(3, 4),
Arrays.asList(5, 6),
Arrays.asList(7, 8),
Arrays.asList(9, 10))
.assertComplete()
.assertNoError();
}
@Test
public void normalExactBackpressured() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create(0);
Flux.range(1, 10).buffer(2).subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(Arrays.asList(1, 2), Arrays.asList(3, 4))
.assertNoError()
.assertNotComplete();
ts.request(3);
ts.assertValues(Arrays.asList(1, 2),
Arrays.asList(3, 4),
Arrays.asList(5, 6),
Arrays.asList(7, 8),
Arrays.asList(9, 10))
.assertComplete()
.assertNoError();
}
@Test
public void largerSkip() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create();
Flux.range(1, 10).buffer(2, 3).subscribe(ts);
ts.assertValues(Arrays.asList(1, 2),
Arrays.asList(4, 5),
Arrays.asList(7, 8),
Arrays.asList(10))
.assertComplete()
.assertNoError();
}
@Test
public void largerSkipEven() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create();
Flux.range(1, 8).buffer(2, 3).subscribe(ts);
ts.assertValues(Arrays.asList(1, 2), Arrays.asList(4, 5), Arrays.asList(7, 8))
.assertComplete()
.assertNoError();
}
@Test
public void largerSkipEvenBackpressured() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create(0);
Flux.range(1, 8).buffer(2, 3).subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(Arrays.asList(1, 2), Arrays.asList(4, 5))
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(Arrays.asList(1, 2), Arrays.asList(4, 5), Arrays.asList(7, 8))
.assertComplete()
.assertNoError();
}
@Test
public void largerSkipBackpressured() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create(0);
Flux.range(1, 10).buffer(2, 3).subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(Arrays.asList(1, 2), Arrays.asList(4, 5))
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(Arrays.asList(1, 2),
Arrays.asList(4, 5),
Arrays.asList(7, 8),
Arrays.asList(10))
.assertComplete()
.assertNoError();
}
@Test
public void smallerSkip() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create();
Flux.range(1, 10).buffer(2, 1).subscribe(ts);
ts.assertValues(Arrays.asList(1, 2),
Arrays.asList(2, 3),
Arrays.asList(3, 4),
Arrays.asList(4, 5),
Arrays.asList(5, 6),
Arrays.asList(6, 7),
Arrays.asList(7, 8),
Arrays.asList(8, 9),
Arrays.asList(9, 10),
Arrays.asList(10))
.assertComplete()
.assertNoError();
}
@Test
public void smallerSkipBackpressured() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create(0);
Flux.range(1, 10).buffer(2, 1).subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(Arrays.asList(1, 2), Arrays.asList(2, 3))
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(Arrays.asList(1, 2),
Arrays.asList(2, 3),
Arrays.asList(3, 4),
Arrays.asList(4, 5))
.assertNoError()
.assertNotComplete();
ts.request(5);
ts.assertValues(Arrays.asList(1, 2),
Arrays.asList(2, 3),
Arrays.asList(3, 4),
Arrays.asList(4, 5),
Arrays.asList(5, 6),
Arrays.asList(6, 7),
Arrays.asList(7, 8),
Arrays.asList(8, 9),
Arrays.asList(9, 10))
.assertNoError()
.assertNotComplete();
ts.request(1);
ts.assertValues(Arrays.asList(1, 2),
Arrays.asList(2, 3),
Arrays.asList(3, 4),
Arrays.asList(4, 5),
Arrays.asList(5, 6),
Arrays.asList(6, 7),
Arrays.asList(7, 8),
Arrays.asList(8, 9),
Arrays.asList(9, 10),
Arrays.asList(10))
.assertComplete()
.assertNoError();
}
@Test
public void smallerSkip3Backpressured() {
AssertSubscriber<List<Integer>> ts = AssertSubscriber.create(0);
Flux.range(1, 10).buffer(3, 1).subscribe(ts);
ts.assertNoValues()
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(Arrays.asList(1, 2, 3), Arrays.asList(2, 3, 4))
.assertNoError()
.assertNotComplete();
ts.request(2);
ts.assertValues(Arrays.asList(1, 2, 3),
Arrays.asList(2, 3, 4),
Arrays.asList(3, 4, 5),
Arrays.asList(4, 5, 6))
.assertNoError()
.assertNotComplete();
ts.request(4);
ts.assertValues(Arrays.asList(1, 2, 3),
Arrays.asList(2, 3, 4),
Arrays.asList(3, 4, 5),
Arrays.asList(4, 5, 6),
Arrays.asList(5, 6, 7),
Arrays.asList(6, 7, 8),
Arrays.asList(7, 8, 9),
Arrays.asList(8, 9, 10))
.assertNoError()
.assertNotComplete();
ts.request(1);
ts.assertValues(Arrays.asList(1, 2, 3),
Arrays.asList(2, 3, 4),
Arrays.asList(3, 4, 5),
Arrays.asList(4, 5, 6),
Arrays.asList(5, 6, 7),
Arrays.asList(6, 7, 8),
Arrays.asList(7, 8, 9),
Arrays.asList(8, 9, 10),
Arrays.asList(9, 10))
.assertNoError()
.assertNotComplete();
ts.request(1);
ts.assertValues(Arrays.asList(1, 2, 3),
Arrays.asList(2, 3, 4),
Arrays.asList(3, 4, 5),
Arrays.asList(4, 5, 6),
Arrays.asList(5, 6, 7),
Arrays.asList(6, 7, 8),
Arrays.asList(7, 8, 9),
Arrays.asList(8, 9, 10),
Arrays.asList(9, 10),
Arrays.asList(10))
.assertComplete()
.assertNoError();
}
@Test
public void supplierReturnsNull() {
AssertSubscriber<Object> ts = AssertSubscriber.create();
Flux.range(1, 10).buffer(2, 1, () -> null).subscribe(ts);
ts.assertNoValues()
.assertError(NullPointerException.class)
.assertNotComplete();
}
@Test
public void supplierThrows() {
AssertSubscriber<Object> ts = AssertSubscriber.create();
Flux.range(1, 10).buffer(2, 1, () -> {
throw new RuntimeException("forced failure");
}).subscribe(ts);
ts.assertNoValues()
.assertError(RuntimeException.class)
.assertErrorMessage("forced failure")
.assertNotComplete();
}
@Test
public void bufferWillSubdivideAnInputFlux() {
Flux<Integer> numbers = Flux.just(1, 2, 3, 4, 5, 6, 7, 8);
//"non overlapping buffers"
List<List<Integer>> res = numbers.buffer(2, 3)
.buffer()
.blockLast();
assertThat(res).containsExactly(Arrays.asList(1, 2),
Arrays.asList(4, 5),
Arrays.asList(7, 8));
}
@Test
public void bufferWillSubdivideAnInputFluxOverlap() {
Flux<Integer> numbers = Flux.just(1, 2, 3, 4, 5, 6, 7, 8);
//"non overlapping buffers"
List<List<Integer>> res = numbers.buffer(3, 2)
.buffer()
.blockLast();
assertThat(res).containsExactly(
Arrays.asList(1, 2, 3),
Arrays.asList(3, 4, 5),
Arrays.asList(5, 6, 7),
Arrays.asList(7, 8));
}
@Test
public void bufferWillRerouteAsManyElementAsSpecified() {
assertThat(Flux.just(1, 2, 3, 4, 5)
.buffer(2)
.collectList()
.block()).containsExactly(Arrays.asList(1, 2),
Arrays.asList(3, 4),
Arrays.asList(5));
}
@Test
public void scanOperator(){
FluxBuffer<Integer, List<Integer>> test = new FluxBuffer<>(Flux.just(1, 2, 3), 2, 1, ArrayList::new);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
@Test
public void scanExactSubscriber() {
CoreSubscriber<? super List> actual = new LambdaSubscriber<>(null, e -> {}, null, null);
FluxBuffer.BufferExactSubscriber<String, List<String>> test = new FluxBuffer.BufferExactSubscriber<>(
actual, 23, ArrayList::new );
Subscription parent = Operators.emptySubscription();
test.onSubscribe(parent);
test.onNext("foo");
assertThat(test.scan(Scannable.Attr.CAPACITY)).isEqualTo(23);
assertThat(test.scan(Scannable.Attr.BUFFERED)).isEqualTo(1);
assertThat(test.scan(Scannable.Attr.PREFETCH)).isEqualTo(23);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
test.onError(new IllegalStateException("boom"));
assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue();
}
@Test
public void scanOverlappingSubscriber() {
CoreSubscriber<? super List> actual = new LambdaSubscriber<>(null, e -> {
}, null, null);
FluxBuffer.BufferOverlappingSubscriber<String, List<String>> test =
new FluxBuffer.BufferOverlappingSubscriber<>(actual, 23, 2, ArrayList::new);
Subscription parent = Operators.emptySubscription();
test.onSubscribe(parent);
test.onNext("foo");
test.onNext("bar");
assertThat(test.scan(Scannable.Attr.CAPACITY)).isEqualTo(23);
assertThat(test.scan(Scannable.Attr.BUFFERED)).isEqualTo(2);
test.onNext("baz");
assertThat(test.scan(Scannable.Attr.CAPACITY)).isEqualTo(46); //2 buffers
assertThat(test.scan(Scannable.Attr.BUFFERED)).isEqualTo(4); // buffered foo bar baz then baz
assertThat(test.scan(Scannable.Attr.PREFETCH)).isEqualTo(Integer.MAX_VALUE);
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(Long.MAX_VALUE);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
test.onError(new IllegalStateException("boom"));
assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue();
}
@Test
public void scanOverlappingSubscriberCancelled() {
CoreSubscriber<? super List>
actual = new LambdaSubscriber<>(null, e -> {}, null, null);
FluxBuffer.BufferOverlappingSubscriber<String, List<String>> test = new FluxBuffer.BufferOverlappingSubscriber<>(
actual, 23, 5, ArrayList::new);
Subscription parent = Operators.emptySubscription();
test.onSubscribe(parent);
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
test.cancel();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isTrue();
}
@Test
public void scanSkipSubscriber() {
CoreSubscriber<? super List> actual = new LambdaSubscriber<>(null, e -> {}, null, null);
FluxBuffer.BufferSkipSubscriber<String, List<String>> test = new FluxBuffer.BufferSkipSubscriber<>(actual, 2, 3, ArrayList::new);
Subscription parent = Operators.emptySubscription();
test.onSubscribe(parent);
assertThat(test.scan(Scannable.Attr.BUFFERED)).isEqualTo(0);
test.onNext("foo");
assertThat(test.scan(Scannable.Attr.BUFFERED)).isEqualTo(1);
test.onNext("bar");
assertThat(test.scan(Scannable.Attr.BUFFERED)).isEqualTo(0); //buffer emitted
test.onNext("drop");
assertThat(test.scan(Scannable.Attr.BUFFERED)).isEqualTo(0); //buffer not replenished
assertThat(test.scan(Scannable.Attr.CAPACITY)).isEqualTo(2);
assertThat(test.scan(Scannable.Attr.PREFETCH)).isEqualTo(2);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
test.onError(new IllegalStateException("boom"));
assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue();
}
@Test
public void discardOnCancel() {
StepVerifier.create(Flux.just(1, 2, 3)
.concatWith(Mono.never())
.buffer(4))
.thenAwait(Duration.ofMillis(10))
.thenCancel()
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2, 3);
}
@Test
public void discardOnCancelSkip() {
StepVerifier.create(Flux.just(1, 2, 3, 4, 5)
.take(2)
.concatWith(Mono.never())
.buffer(3, 4))
.thenAwait(Duration.ofMillis(10))
.thenCancel()
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2);
}
@Test
public void discardOnCancelOverlap() {
StepVerifier.create(Flux.just(1, 2, 3, 4, 5, 6)
.take(2)
.concatWith(Mono.never())
.buffer(4, 2))
.thenAwait(Duration.ofMillis(10))
.thenCancel()
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2);
}
@Test
public void discardOnNextSupplierError() {
Supplier<List<Integer>> bufferSupplier = () -> null;
StepVerifier.create(Flux.just(1, 2, 3)
.buffer(4, 4, bufferSupplier))
.expectErrorMessage("The bufferSupplier returned a null buffer")
.verifyThenAssertThat()
.hasDiscardedExactly(1);
}
@Test
public void discardOnNextSupplierErrorSkip() {
Supplier<List<Integer>> bufferSupplier = () -> null;
StepVerifier.create(Flux.just(1, 2, 3, 4, 5)
.buffer(3, 4, bufferSupplier))
.expectErrorMessage("The bufferSupplier returned a null buffer")
.verifyThenAssertThat()
.hasDiscardedExactly(1);
}
@Test
public void discardOnNextSupplierErrorOverlap() {
Supplier<List<Integer>> bufferSupplier = () -> null;
StepVerifier.create(Flux.just(1, 2, 3, 4, 5, 6)
.buffer(4, 2, bufferSupplier))
.expectErrorMessage("The bufferSupplier returned a null buffer")
.verifyThenAssertThat()
.hasDiscardedExactly(1);
}
@Test
public void discardOnSkippedElements() {
//the skip flavor should discard elements that are not added to any buffer
StepVerifier.create(Flux.just(1, 2, 3, 4, 5)
.buffer(2, 3)
.flatMapIterable(Function.identity()))
.expectNext(1, 2, 4, 5)
.expectComplete()
.verifyThenAssertThat()
.hasDiscardedExactly(3);
}
@Test
public void discardOnError() {
StepVerifier.create(Flux.just(1, 2, 3)
.concatWith(Mono.error(new IllegalStateException("boom")))
.buffer(4))
.expectErrorMessage("boom")
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2, 3);
}
@Test
public void discardOnErrorSkip() {
StepVerifier.create(Flux.just(1, 2, 3)
.concatWith(Mono.error(new IllegalStateException("boom")))
.buffer(4, 5))
.expectErrorMessage("boom")
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2, 3);
}
@Test
public void discardOnErrorOverlap() {
StepVerifier.create(Flux.just(1, 2, 3)
.concatWith(Mono.error(new IllegalStateException("boom")))
.buffer(4, 2))
.expectErrorMessage("boom")
.verifyThenAssertThat()
.hasDiscardedExactly(1, 2, 3, 3); //we already opened a 2nd buffer
}
@ParameterizedTestWithName
@CsvSource({
"1|2, 1|2, ",
"1|1|1, 1, 1|1",
"1|1|2, 1|2, 1",
"1|2|1, 1|2;1, ",
"1|2|1|3, 1|2;1|3, ",
"1|1|2|3, 1|2;3, 1",
"2|1|1|3, 2|1;1|3, "
})
public void bufferExactSupplierUsesSet(String input, String output, @Nullable String discard) {
List<Set<Object>> outputs = Arrays.stream(output.split(";"))
.map(it -> Arrays.<Object>stream(it.split("\\|")).collect(Collectors.toSet()))
.collect(Collectors.toList());
StepVerifier.Assertions assertions = Flux.just(input.split("\\|"))
.<Collection<Object>>buffer(2, HashSet::new)
.as(it -> StepVerifier.create(it, outputs.size()))
.expectNextSequence(outputs)
.expectComplete()
.verifyThenAssertThat(Duration.ofSeconds(2));
if (discard == null) {
assertions.hasNotDiscardedElements();
} else {
assertions.hasDiscardedExactly((Object[]) discard.split("\\|"));
}
}
}
| FluxBufferTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java | {
"start": 2548,
"end": 2633
} | class ____ responsible for launching and communicating with the child
* process.
*/
| is |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/GoogleSecretManagerComponentBuilderFactory.java | {
"start": 1943,
"end": 4097
} | interface ____ extends ComponentBuilder<GoogleSecretManagerComponent> {
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default GoogleSecretManagerComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default GoogleSecretManagerComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
| GoogleSecretManagerComponentBuilder |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithEvaluator.java | {
"start": 1242,
"end": 5410
} | class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(StartsWithEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator str;
private final EvalOperator.ExpressionEvaluator prefix;
private final DriverContext driverContext;
private Warnings warnings;
public StartsWithEvaluator(Source source, EvalOperator.ExpressionEvaluator str,
EvalOperator.ExpressionEvaluator prefix, DriverContext driverContext) {
this.source = source;
this.str = str;
this.prefix = prefix;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) {
try (BytesRefBlock prefixBlock = (BytesRefBlock) prefix.eval(page)) {
BytesRefVector strVector = strBlock.asVector();
if (strVector == null) {
return eval(page.getPositionCount(), strBlock, prefixBlock);
}
BytesRefVector prefixVector = prefixBlock.asVector();
if (prefixVector == null) {
return eval(page.getPositionCount(), strBlock, prefixBlock);
}
return eval(page.getPositionCount(), strVector, prefixVector).asBlock();
}
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += str.baseRamBytesUsed();
baseRamBytesUsed += prefix.baseRamBytesUsed();
return baseRamBytesUsed;
}
public BooleanBlock eval(int positionCount, BytesRefBlock strBlock, BytesRefBlock prefixBlock) {
try(BooleanBlock.Builder result = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) {
BytesRef strScratch = new BytesRef();
BytesRef prefixScratch = new BytesRef();
position: for (int p = 0; p < positionCount; p++) {
switch (strBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
switch (prefixBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
BytesRef str = strBlock.getBytesRef(strBlock.getFirstValueIndex(p), strScratch);
BytesRef prefix = prefixBlock.getBytesRef(prefixBlock.getFirstValueIndex(p), prefixScratch);
result.appendBoolean(StartsWith.process(str, prefix));
}
return result.build();
}
}
public BooleanVector eval(int positionCount, BytesRefVector strVector,
BytesRefVector prefixVector) {
try(BooleanVector.FixedBuilder result = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) {
BytesRef strScratch = new BytesRef();
BytesRef prefixScratch = new BytesRef();
position: for (int p = 0; p < positionCount; p++) {
BytesRef str = strVector.getBytesRef(p, strScratch);
BytesRef prefix = prefixVector.getBytesRef(p, prefixScratch);
result.appendBoolean(p, StartsWith.process(str, prefix));
}
return result.build();
}
}
@Override
public String toString() {
return "StartsWithEvaluator[" + "str=" + str + ", prefix=" + prefix + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(str, prefix);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static | StartsWithEvaluator |
java | greenrobot__EventBus | EventBusTestJava/src/main/java/org/greenrobot/eventbus/EventBusInheritanceDisabledTest.java | {
"start": 4837,
"end": 4924
} | interface ____ extends MyEventInterface {
}
public static | MyEventInterfaceExtended |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java | {
"start": 2643,
"end": 15480
} | enum ____ {
EXCLUDE,
PINNED;
public static QueryRuleType queryRuleType(String type) {
for (QueryRuleType queryRuleType : QueryRuleType.values()) {
if (queryRuleType.name().equalsIgnoreCase(type)) {
return queryRuleType;
}
}
throw new IllegalArgumentException("Unknown QueryRuleType: " + type);
}
@Override
public String toString() {
return name().toLowerCase(Locale.ROOT);
}
}
public static final NodeFeature NUMERIC_VALIDATION = new NodeFeature("query_rules.numeric_validation", true);
/**
* Public constructor.
*
* @param id The unique identifier associated with this query rule
* @param type The {@link QueryRuleType} of this rule
* @param criteria The {@link QueryRuleCriteria} required for a query to match this rule
* @param actions The actions that should be taken if this rule is matched, dependent on the type of rule
* @param priority If specified, assigns a priority to the rule. Rules with specified priorities are applied before
* rules without specified priorities, in ascending priority order.
*/
public QueryRule(
@Nullable String id,
QueryRuleType type,
List<QueryRuleCriteria> criteria,
Map<String, Object> actions,
@Nullable Integer priority
) {
// Interstitial null state allowed during rule creation; validation occurs in CRUD API
this.id = id;
Objects.requireNonNull(type, "Query rule type cannot be null");
this.type = type;
Objects.requireNonNull(criteria, "Query rule criteria cannot be null");
if (criteria.isEmpty()) {
throw new IllegalArgumentException("Query rule criteria cannot be empty");
}
this.criteria = criteria;
Objects.requireNonNull(actions, "Query rule actions cannot be null");
if (actions.isEmpty()) {
throw new IllegalArgumentException("Query rule actions cannot be empty");
}
this.actions = actions;
this.priority = priority;
validate();
}
public QueryRule(String id, QueryRule other) {
this(id, other.type, other.criteria, other.actions, other.priority);
}
public QueryRule(StreamInput in) throws IOException {
this.id = in.readString();
this.type = QueryRuleType.queryRuleType(in.readString());
this.criteria = in.readCollectionAsList(QueryRuleCriteria::new);
this.actions = in.readGenericMap();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
this.priority = in.readOptionalVInt();
} else {
this.priority = null;
}
validate();
}
private void validate() {
if (priority != null && (priority < MIN_PRIORITY || priority > MAX_PRIORITY)) {
throw new IllegalArgumentException("Priority was " + priority + ", must be between " + MIN_PRIORITY + " and " + MAX_PRIORITY);
}
if (Set.of(QueryRuleType.PINNED, QueryRuleType.EXCLUDE).contains(type)) {
boolean ruleContainsIds = actions.containsKey(IDS_FIELD.getPreferredName());
boolean ruleContainsDocs = actions.containsKey(DOCS_FIELD.getPreferredName());
if (ruleContainsIds ^ ruleContainsDocs) {
validateIdOrDocAction(actions.get(IDS_FIELD.getPreferredName()));
validateIdOrDocAction(actions.get(DOCS_FIELD.getPreferredName()));
} else {
throw new ElasticsearchParseException(type.toString() + " query rule actions must contain only one of either ids or docs");
}
}
criteria.forEach(criterion -> {
List<Object> values = criterion.criteriaValues();
if (values != null) {
values.forEach(criterion.criteriaType()::validateInput);
}
});
}
private void validateIdOrDocAction(Object action) {
if (action != null) {
if (action instanceof List == false) {
throw new ElasticsearchParseException(type + " query rule actions must be a list");
} else if (((List<?>) action).isEmpty()) {
throw new ElasticsearchParseException(type + " query rule actions cannot be empty");
} else if (((List<?>) action).size() > MAX_NUM_DOCS_IN_RULE) {
throw new ElasticsearchParseException(type + " documents cannot exceed " + MAX_NUM_DOCS_IN_RULE);
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(id);
out.writeString(type.toString());
out.writeCollection(criteria);
out.writeGenericMap(actions);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
out.writeOptionalVInt(priority);
}
}
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<QueryRule, String> PARSER = new ConstructingObjectParser<>(
"query_rule",
false,
(params, resourceName) -> {
final String id = (String) params[0];
final QueryRuleType type = QueryRuleType.queryRuleType((String) params[1]);
final List<QueryRuleCriteria> criteria = (List<QueryRuleCriteria>) params[2];
final Map<String, Object> actions = (Map<String, Object>) params[3];
final Integer priority = (Integer) params[4];
return new QueryRule(id, type, criteria, actions, priority);
}
);
public static final ParseField ID_FIELD = new ParseField("rule_id");
public static final ParseField TYPE_FIELD = new ParseField("type");
public static final ParseField CRITERIA_FIELD = new ParseField("criteria");
public static final ParseField ACTIONS_FIELD = new ParseField("actions");
public static final ParseField PRIORITY_FIELD = new ParseField("priority");
static {
PARSER.declareStringOrNull(optionalConstructorArg(), ID_FIELD);
PARSER.declareString(constructorArg(), TYPE_FIELD);
PARSER.declareObjectArray(constructorArg(), (p, c) -> QueryRuleCriteria.fromXContent(p), CRITERIA_FIELD);
PARSER.declareObject(constructorArg(), (p, c) -> p.map(), ACTIONS_FIELD);
PARSER.declareInt(optionalConstructorArg(), PRIORITY_FIELD);
}
/**
* Parses a {@link QueryRule} from its {@param xContentType} representation in bytes.
*
* @param source The bytes that represents the {@link QueryRule}.
* @param xContentType The format of the representation.
*
* @return The parsed {@link QueryRule}.
*/
public static QueryRule fromXContentBytes(BytesReference source, XContentType xContentType) {
try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) {
return QueryRule.fromXContent(parser);
} catch (IOException e) {
throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e);
}
}
/**
* Parses a {@link QueryRule} through the provided {@param parser}.
* @param parser The {@link XContentType} parser.
*
* @return The parsed {@link QueryRule}.
*/
public static QueryRule fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
/**
* Converts the {@link QueryRule} to XContent.
*
* @return The {@link XContentBuilder} containing the serialized {@link QueryRule}.
*/
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
{
builder.field(ID_FIELD.getPreferredName(), id);
builder.field(TYPE_FIELD.getPreferredName(), type);
builder.xContentList(CRITERIA_FIELD.getPreferredName(), criteria);
builder.field(ACTIONS_FIELD.getPreferredName());
builder.map(actions);
if (priority != null) {
builder.field(PRIORITY_FIELD.getPreferredName(), priority);
}
}
builder.endObject();
return builder;
}
/**
* Returns the unique ID of the {@link QueryRule}.
*
* @return The unique ID of the {@link QueryRule}.
*/
public String id() {
return id;
}
/**
* Returns the {@link QueryRuleType} of {@link QueryRule}.
*
* @return The type of the {@link QueryRule}.
*/
public QueryRuleType type() {
return type;
}
/**
* Returns the {@link QueryRuleCriteria} that causes the {@link QueryRule} to match a query.
*
* @return the {@link QueryRuleCriteria}
*/
public List<QueryRuleCriteria> criteria() {
return criteria;
}
/**
* Returns the actions that are executed when the {@link QueryRule} matches a query.
*
* @return The actions that are executed when the {@link QueryRule} matches a query.
*/
public Map<String, Object> actions() {
return actions;
}
public Integer priority() {
return priority;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
QueryRule queryRule = (QueryRule) o;
return Objects.equals(id, queryRule.id)
&& type == queryRule.type
&& Objects.equals(criteria, queryRule.criteria)
&& Objects.equals(actions, queryRule.actions)
&& Objects.equals(priority, queryRule.priority);
}
@Override
public int hashCode() {
return Objects.hash(id, type, criteria, actions, priority);
}
@Override
public String toString() {
return Strings.toString(this);
}
public AppliedQueryRules applyRule(AppliedQueryRules appliedRules, Map<String, Object> matchCriteria) {
List<SpecifiedDocument> pinnedDocs = appliedRules.pinnedDocs();
List<SpecifiedDocument> excludedDocs = appliedRules.excludedDocs();
List<SpecifiedDocument> matchingDocs = identifyMatchingDocs(matchCriteria);
switch (type) {
case PINNED -> pinnedDocs.addAll(matchingDocs);
case EXCLUDE -> excludedDocs.addAll(matchingDocs);
default -> throw new IllegalStateException("Unsupported query rule type: " + type);
}
return new AppliedQueryRules(pinnedDocs, excludedDocs);
}
public boolean isRuleMatch(Map<String, Object> matchCriteria) {
Boolean isRuleMatch = null;
for (QueryRuleCriteria criterion : criteria) {
for (String match : matchCriteria.keySet()) {
final Object matchValue = matchCriteria.get(match);
final QueryRuleCriteriaType criteriaType = criterion.criteriaType();
final String criteriaMetadata = criterion.criteriaMetadata();
if (criteriaType == ALWAYS || (criteriaMetadata != null && criteriaMetadata.equals(match))) {
boolean singleCriterionMatches = criterion.isMatch(matchValue, criteriaType, false);
isRuleMatch = (isRuleMatch == null) ? singleCriterionMatches : isRuleMatch && singleCriterionMatches;
}
}
}
return isRuleMatch != null && isRuleMatch;
}
@SuppressWarnings("unchecked")
private List<SpecifiedDocument> identifyMatchingDocs(Map<String, Object> matchCriteria) {
List<SpecifiedDocument> matchingDocs = new ArrayList<>();
if (isRuleMatch(matchCriteria)) {
if (actions.containsKey(IDS_FIELD.getPreferredName())) {
matchingDocs.addAll(
((List<String>) actions.get(IDS_FIELD.getPreferredName())).stream().map(id -> new SpecifiedDocument(null, id)).toList()
);
} else if (actions.containsKey(DOCS_FIELD.getPreferredName())) {
List<Map<String, String>> docsToPin = (List<Map<String, String>>) actions.get(DOCS_FIELD.getPreferredName());
List<SpecifiedDocument> specifiedDocuments = docsToPin.stream()
.map(
map -> new SpecifiedDocument(
map.get(INDEX_FIELD.getPreferredName()),
map.get(SpecifiedDocument.ID_FIELD.getPreferredName())
)
)
.toList();
matchingDocs.addAll(specifiedDocuments);
}
}
return matchingDocs;
}
}
| QueryRuleType |
java | google__guava | android/guava-tests/test/com/google/common/io/BaseEncodingTest.java | {
"start": 19492,
"end": 19908
} | enum ____ each assertion we want to make as a way
// of dealing with the fact that one of the assertions is @GwtIncompatible but we don't want to
// have to have duplicate @GwtIncompatible test methods just to make that assertion.
for (AssertFailsToDecodeStrategy strategy : AssertFailsToDecodeStrategy.values()) {
strategy.assertFailsToDecode(encoding, cannotDecode, expectedMessage);
}
}
| for |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java | {
"start": 15831,
"end": 18052
} | interface ____ by clients to submit a new reservation to the
* {@code ResourceManager}.
* </p>
*
* <p>
* The client packages all details of its request in a
* {@link ReservationSubmissionRequest} object. This contains information
* about the amount of capacity, temporal constraints, and concurrency needs.
* Furthermore, the reservation might be composed of multiple stages, with
* ordering dependencies among them.
* </p>
*
* <p>
* In order to respond, a new admission control component in the
* {@code ResourceManager} performs an analysis of the resources that have
* been committed over the period of time the user is requesting, verify that
* the user requests can be fulfilled, and that it respect a sharing policy
* (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
* that the ReservationSubmissionRequest is satisfiable the
* {@code ResourceManager} answers with a
* {@link ReservationSubmissionResponse} that include a non-null
* {@link ReservationId}. Upon failure to find a valid allocation the response
* is an exception with the reason.
*
* On application submission the client can use this {@link ReservationId} to
* obtain access to the reserved resources.
* </p>
*
* <p>
* The system guarantees that during the time-range specified by the user, the
* reservationID will be corresponding to a valid reservation. The amount of
* capacity dedicated to such queue can vary overtime, depending of the
* allocation that has been determined. But it is guaranteed to satisfy all
* the constraint expressed by the user in the
* {@link ReservationSubmissionRequest}.
* </p>
*
* @param request the request to submit a new Reservation
* @return response the {@link ReservationId} on accepting the submission
* @throws YarnException if the request is invalid or reservation cannot be
* created successfully
* @throws IOException io error occur.
*
*/
@Public
@Unstable
@Idempotent
public ReservationSubmissionResponse submitReservation(
ReservationSubmissionRequest request) throws YarnException, IOException;
/**
* <p>
* The | used |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ConstantPatternCompileTest.java | {
"start": 7818,
"end": 8370
} | class ____ {
static final String MY_COOL_PATTERN = "a+";
public static void myPopularStaticMethod() {
Matcher m = SOME_PATTERN.matcher("aaaaab");
}
private static final Pattern SOME_PATTERN = Pattern.compile(MY_COOL_PATTERN);
}
""")
.doTest();
}
@Test
public void fixGeneration_multiplePatterns() {
testHelper
.addInputLines(
"in/Test.java",
"""
import java.util.regex.Pattern;
| Test |
java | apache__logging-log4j2 | log4j-api-test/src/main/java/org/apache/logging/log4j/test/junit/TestPropertySource.java | {
"start": 4253,
"end": 4832
} | class ____ implements TestProperties {
@Override
public String getProperty(final String key) {
return null;
}
@Override
public boolean containsProperty(final String key) {
return false;
}
@Override
public void setProperty(final String key, final String value) {
throw new UnsupportedOperationException();
}
@Override
public void clearProperty(final String key) {
throw new UnsupportedOperationException();
}
}
}
| EmptyTestProperties |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/threadpool/MemoryLimitCalculator.java | {
"start": 1592,
"end": 3883
} | class ____ {
private static volatile long maxAvailable;
private static final AtomicBoolean refreshStarted = new AtomicBoolean(false);
private static void refresh() {
maxAvailable = Runtime.getRuntime().freeMemory();
}
private static void checkAndScheduleRefresh() {
if (!refreshStarted.get()) {
// immediately refresh when first call to prevent maxAvailable from being 0
// to ensure that being refreshed before refreshStarted being set as true
// notice: refresh may be called for more than once because there is no lock
refresh();
if (refreshStarted.compareAndSet(false, true)) {
ScheduledExecutorService scheduledExecutorService =
Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("Dubbo-Memory-Calculator"));
// check every 50 ms to improve performance
scheduledExecutorService.scheduleWithFixedDelay(
MemoryLimitCalculator::refresh, 50, 50, TimeUnit.MILLISECONDS);
GlobalResourcesRepository.registerGlobalDisposable(() -> {
refreshStarted.set(false);
scheduledExecutorService.shutdown();
});
}
}
}
/**
* Get the maximum available memory of the current JVM.
*
* @return maximum available memory
*/
public static long maxAvailable() {
checkAndScheduleRefresh();
return maxAvailable;
}
/**
* Take the current JVM's maximum available memory
* as a percentage of the result as the limit.
*
* @param percentage percentage
* @return available memory
*/
public static long calculate(final float percentage) {
if (percentage <= 0 || percentage > 1) {
throw new IllegalArgumentException();
}
checkAndScheduleRefresh();
return (long) (maxAvailable() * percentage);
}
/**
* By default, it takes 80% of the maximum available memory of the current JVM.
*
* @return available memory
*/
public static long defaultLimit() {
checkAndScheduleRefresh();
return (long) (maxAvailable() * 0.8);
}
}
| MemoryLimitCalculator |
java | apache__maven | its/core-it-suite/src/test/resources/mng-8750-new-scopes/deps/test-dep/src/main/java/org/apache/maven/its/mng8750/deps/TestDep.java | {
"start": 859,
"end": 950
} | class ____ {
public String getMessage() {
return "Test dependency";
}
}
| TestDep |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.