language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__camel
|
components/camel-twilio/src/generated/java/org/apache/camel/component/twilio/internal/SigningKeyApiMethod.java
|
{
"start": 656,
"end": 2663
}
|
enum ____ implements ApiMethod {
DELETER(
com.twilio.rest.api.v2010.account.SigningKeyDeleter.class,
"deleter",
arg("pathSid", String.class)),
DELETER_1(
com.twilio.rest.api.v2010.account.SigningKeyDeleter.class,
"deleter",
arg("pathAccountSid", String.class),
arg("pathSid", String.class)),
FETCHER(
com.twilio.rest.api.v2010.account.SigningKeyFetcher.class,
"fetcher",
arg("pathSid", String.class)),
FETCHER_1(
com.twilio.rest.api.v2010.account.SigningKeyFetcher.class,
"fetcher",
arg("pathAccountSid", String.class),
arg("pathSid", String.class)),
READER(
com.twilio.rest.api.v2010.account.SigningKeyReader.class,
"reader"),
READER_1(
com.twilio.rest.api.v2010.account.SigningKeyReader.class,
"reader",
arg("pathAccountSid", String.class)),
UPDATER(
com.twilio.rest.api.v2010.account.SigningKeyUpdater.class,
"updater",
arg("pathSid", String.class)),
UPDATER_1(
com.twilio.rest.api.v2010.account.SigningKeyUpdater.class,
"updater",
arg("pathAccountSid", String.class),
arg("pathSid", String.class));
private final ApiMethod apiMethod;
SigningKeyApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(SigningKey.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
|
SigningKeyApiMethod
|
java
|
dropwizard__dropwizard
|
dropwizard-core/src/main/java/io/dropwizard/core/setup/Bootstrap.java
|
{
"start": 1586,
"end": 5012
}
|
class ____<T extends Configuration> {
private final Application<T> application;
private final List<ConfiguredBundle<? super T>> configuredBundles;
private final List<Command> commands;
private ObjectMapper objectMapper;
private MetricRegistry metricRegistry;
@Nullable
private JmxReporter jmxReporter;
private ConfigurationSourceProvider configurationSourceProvider;
private ClassLoader classLoader;
private ConfigurationFactoryFactory<T> configurationFactoryFactory;
private ValidatorFactory validatorFactory;
private boolean metricsAreRegistered;
private HealthCheckRegistry healthCheckRegistry;
/**
* Creates a new {@link Bootstrap} for the given application.
*
* @param application a Dropwizard {@link Application}
*/
public Bootstrap(Application<T> application) {
this.application = application;
this.objectMapper = Jackson.newObjectMapper();
this.configuredBundles = new ArrayList<>();
this.commands = new ArrayList<>();
this.validatorFactory = Validators.newValidatorFactory();
this.metricRegistry = new MetricRegistry();
this.configurationSourceProvider = new FileConfigurationSourceProvider();
this.classLoader = Thread.currentThread().getContextClassLoader();
this.configurationFactoryFactory = new DefaultConfigurationFactoryFactory<>();
this.healthCheckRegistry = new HealthCheckRegistry();
}
/**
* Registers the JVM metrics to the metric registry and start to report
* the registry metrics via JMX.
*/
public void registerMetrics() {
if (metricsAreRegistered) {
return;
}
getMetricRegistry().register("jvm.attribute", new JvmAttributeGaugeSet());
getMetricRegistry().register("jvm.buffers", new BufferPoolMetricSet(ManagementFactory
.getPlatformMBeanServer()));
getMetricRegistry().register("jvm.classloader", new ClassLoadingGaugeSet());
getMetricRegistry().register("jvm.filedescriptor", new FileDescriptorRatioGauge());
getMetricRegistry().register("jvm.gc", new GarbageCollectorMetricSet());
getMetricRegistry().register("jvm.memory", new MemoryUsageGaugeSet());
getMetricRegistry().register("jvm.threads", new ThreadStatesGaugeSet());
jmxReporter = JmxReporter.forRegistry(metricRegistry).build();
jmxReporter.start();
metricsAreRegistered = true;
}
/**
* Returns the {@link JmxReporter} registered with the bootstrap's {@link MetricRegistry}.
*
* @since 2.1
*/
@Nullable
public JmxReporter getJmxReporter() {
return jmxReporter;
}
/**
* Returns the bootstrap's {@link Application}.
*/
public Application<T> getApplication() {
return application;
}
/**
* Returns the bootstrap's {@link ConfigurationSourceProvider}.
*/
public ConfigurationSourceProvider getConfigurationSourceProvider() {
return configurationSourceProvider;
}
/**
* Sets the bootstrap's {@link ConfigurationSourceProvider}.
*/
public void setConfigurationSourceProvider(ConfigurationSourceProvider provider) {
this.configurationSourceProvider = requireNonNull(provider);
}
/**
* Returns the bootstrap's
|
Bootstrap
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/error/ShouldBeGreaterOrEqual.java
|
{
"start": 995,
"end": 2505
}
|
class ____ extends BasicErrorMessageFactory {
/**
* Creates a new <code>{@link ShouldBeGreaterOrEqual}</code>.
* @param <T> guarantees that the values used in this factory have the same type.
* @param actual the actual value in the failed assertion.
* @param other the value used in the failed assertion to compare the actual value to.
* @return the created {@code ErrorMessageFactory}.
*/
public static <T> ErrorMessageFactory shouldBeGreaterOrEqual(Comparable<? super T> actual, Comparable<? super T> other) {
return new ShouldBeGreaterOrEqual(actual, other, StandardComparisonStrategy.instance());
}
/**
* Creates a new <code>{@link ShouldBeGreaterOrEqual}</code>.
* @param actual the actual value in the failed assertion.
* @param other the value used in the failed assertion to compare the actual value to.
* @param comparisonStrategy the {@link ComparisonStrategy} used to evaluate assertion.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldBeGreaterOrEqual(Object actual, Object other,
ComparisonStrategy comparisonStrategy) {
return new ShouldBeGreaterOrEqual(actual, other, comparisonStrategy);
}
private <T> ShouldBeGreaterOrEqual(T actual, T other, ComparisonStrategy comparisonStrategy) {
super("%nExpecting actual:%n %s%nto be greater than or equal to:%n %s%n%s", actual, other, comparisonStrategy);
}
}
|
ShouldBeGreaterOrEqual
|
java
|
apache__camel
|
components/camel-wordpress/src/main/java/org/apache/camel/component/wordpress/api/service/impl/WordpressServicePagesAdapter.java
|
{
"start": 1713,
"end": 3789
}
|
class ____ extends AbstractWordpressCrudServiceAdapter<PagesSPI, Page, PageSearchCriteria>
implements WordpressServicePages {
private static final Logger LOGGER = LoggerFactory.getLogger(WordpressServicePagesAdapter.class);
public WordpressServicePagesAdapter(String wordpressUrl, String apiVersion) {
super(wordpressUrl, apiVersion);
}
@Override
protected Class<PagesSPI> getSpiType() {
return PagesSPI.class;
}
// @formatter:off
@Override
public List<Page> list(PageSearchCriteria c) {
LOGGER.debug("Calling list pages: searchCriteria {}", c);
Objects.requireNonNull(c, "Please provide a search criteria");
return getSpi().list(this.getApiVersion(), c.getContext(), c.getPage(), c.getPerPage(), c.getSearch(), c.getAfter(),
c.getAuthor(), c.getAuthorExclude(), c.getBefore(), c.getExclude(),
c.getInclude(), c.getMenuOrder(), c.getOffset(), c.getOrder(), c.getOrderBy(), c.getParent(),
c.getParentExclude(), c.getSlug(), c.getStatus(), c.getFilter());
}
// @formatter:on
@Override
public Page retrieve(Integer pageId, Context context, String password) {
LOGGER.debug("Calling retrieve: postId {}; context: {}", pageId, context);
if (pageId <= 0) {
throw new IllegalArgumentException("Please provide a non zero post id");
}
return getSpi().retrieve(this.getApiVersion(), pageId, context, password);
}
@Override
protected Page doCreate(Page object) {
return getSpi().create(getApiVersion(), object);
}
@Override
protected Page doDelete(Integer id) {
return getSpi().delete(getApiVersion(), id, false);
}
@Override
protected Page doUpdate(Integer id, Page object) {
return getSpi().update(getApiVersion(), id, object);
}
@Override
protected Page doRetrieve(Integer entityID, Context context) {
return getSpi().retrieve(getApiVersion(), entityID, context, null);
}
}
|
WordpressServicePagesAdapter
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/config/GatewayRedisAutoConfigurationTests.java
|
{
"start": 1596,
"end": 2276
}
|
class ____ {
// TODO: figure out why I need these
@Bean
RedisRouteDefinitionRepositoryTests.TestGatewayFilterFactory testGatewayFilterFactory() {
return new RedisRouteDefinitionRepositoryTests.TestGatewayFilterFactory();
}
@Bean
RedisRouteDefinitionRepositoryTests.TestFilterGatewayFilterFactory testFilterGatewayFilterFactory() {
return new RedisRouteDefinitionRepositoryTests.TestFilterGatewayFilterFactory();
}
@Bean
RedisRouteDefinitionRepositoryTests.TestRoutePredicateFactory testRoutePredicateFactory() {
return new RedisRouteDefinitionRepositoryTests.TestRoutePredicateFactory();
}
}
@Nested
@SpringBootTest(classes = Config.class)
|
Config
|
java
|
google__dagger
|
javatests/dagger/hilt/android/processor/internal/androidentrypoint/AndroidEntryPointProcessorTest.java
|
{
"start": 2605,
"end": 3174
}
|
class ____ extends ComponentActivity { }");
HiltCompilerTests.hiltCompiler(testActivity)
.compile(
(CompilationResultSubject subject) -> {
subject.hasErrorCount(1);
subject
.hasErrorContaining("Expected @AndroidEntryPoint to have a value.")
;
});
}
@Test
public void generatedSuperclass() {
Source annotation =
HiltCompilerTests.javaSource(
"test.GenerateAndroidActivity",
"package test;",
"",
"@
|
MyActivity
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/type/descriptor/java/CoercionHelper.java
|
{
"start": 382,
"end": 8983
}
|
class ____ {
private CoercionHelper() {
// disallow direct instantiation
}
public static Byte toByte(Short value) {
if ( value > Byte.MAX_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Short value `%s` to Byte : overflow",
value
)
);
}
if ( value < Byte.MIN_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Short value `%s` to Byte : underflow",
value
)
);
}
return value.byteValue();
}
public static Byte toByte(Integer value) {
if ( value > Byte.MAX_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Integer value `%s` to Byte : overflow",
value
)
);
}
if ( value < Byte.MIN_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Integer value `%s` to Byte : underflow",
value
)
);
}
return value.byteValue();
}
public static Byte toByte(Long value) {
if ( value > Byte.MAX_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Long value `%s` to Byte : overflow",
value
)
);
}
if ( value < Byte.MIN_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Long value `%s` to Byte : underflow",
value
)
);
}
return value.byteValue();
}
public static Byte toByte(Double value) {
if ( ! isWholeNumber( value ) ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Double value `%s` to Byte : not a whole number",
value
)
);
}
if ( value > Byte.MAX_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Double value `%s` to Byte : overflow",
value
)
);
}
if ( value < Byte.MIN_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Double value `%s` to Byte : underflow",
value
)
);
}
return value.byteValue();
}
public static Byte toByte(Float value) {
if ( ! isWholeNumber( value ) ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Float value `%s` to Byte : not a whole number",
value
)
);
}
if ( value > Byte.MAX_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Float value `%s` to Byte : overflow",
value
)
);
}
if ( value < Byte.MIN_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Float value `%s` to Byte : underflow",
value
)
);
}
return value.byteValue();
}
public static Byte toByte(BigInteger value) {
return coerceWrappingError( value::byteValueExact );
}
public static Byte toByte(BigDecimal value) {
return coerceWrappingError( value::byteValueExact );
}
public static Short toShort(Byte value) {
return value.shortValue();
}
public static Short toShort(Integer value) {
if ( value > Short.MAX_VALUE ) {
throw new CoercionException( "Cannot coerce Integer value `" + value + "` as Short : overflow" );
}
if ( value < Short.MIN_VALUE ) {
throw new CoercionException( "Cannot coerce Integer value `" + value + "` as Short : underflow" );
}
return value.shortValue();
}
public static Short toShort(Long value) {
if ( value > Short.MAX_VALUE ) {
throw new CoercionException( "Cannot coerce Long value `" + value + "` as Short : overflow" );
}
if ( value < Short.MIN_VALUE ) {
throw new CoercionException( "Cannot coerce Long value `" + value + "` as Short : underflow" );
}
return value.shortValue();
}
public static Short toShort(Double doubleValue) {
if ( ! isWholeNumber( doubleValue ) ) {
throw new CoercionException( "Cannot coerce Double value `" + doubleValue + "` as Short : not a whole number" );
}
return toShort( doubleValue.longValue() );
}
public static Short toShort(Float floatValue) {
if ( ! isWholeNumber( floatValue ) ) {
throw new CoercionException( "Cannot coerce Float value `" + floatValue + "` as Short : not a whole number" );
}
return toShort( floatValue.longValue() );
}
public static Short toShort(BigInteger value) {
return coerceWrappingError( value::shortValueExact );
}
public static Short toShort(BigDecimal value) {
return coerceWrappingError( value::shortValueExact );
}
public static Integer toInteger(Byte value) {
return value.intValue();
}
public static Integer toInteger(Short value) {
return value.intValue();
}
public static Integer toInteger(Long value) {
return coerceWrappingError( () -> Math.toIntExact( value ) );
}
public static Integer toInteger(Double doubleValue) {
if ( ! isWholeNumber( doubleValue ) ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Unable to coerce Double value `%s` to Integer: not a whole number",
doubleValue
)
);
}
return toInteger( doubleValue.longValue() );
}
public static Integer toInteger(Float floatValue) {
if ( ! isWholeNumber( floatValue ) ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Unable to coerce Float value `%s` to Integer: not a whole number",
floatValue
)
);
}
return toInteger( floatValue.longValue() );
}
public static Integer toInteger(BigInteger value) {
return coerceWrappingError( value::intValueExact );
}
public static Integer toInteger(BigDecimal value) {
return coerceWrappingError( value::intValueExact );
}
public static Long toLong(Byte value) {
return value.longValue();
}
public static Long toLong(Short value) {
return value.longValue();
}
public static Long toLong(Integer value) {
return value.longValue();
}
public static Long toLong(Double doubleValue) {
if ( ! isWholeNumber( doubleValue ) ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Unable to coerce Double value `%s` as Integer: not a whole number",
doubleValue
)
);
}
return doubleValue.longValue();
}
public static Long toLong(Float floatValue) {
if ( ! isWholeNumber( floatValue ) ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Unable to coerce Float value `%s` as Integer: not a whole number",
floatValue
)
);
}
return floatValue.longValue();
}
public static Long toLong(BigInteger value) {
return coerceWrappingError( value::longValueExact );
}
public static Long toLong(BigDecimal value) {
return coerceWrappingError( value::longValueExact );
}
public static BigInteger toBigInteger(Double doubleValue) {
if ( ! isWholeNumber( doubleValue ) ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Unable to coerce Double value `%s` as BigInteger: not a whole number",
doubleValue
)
);
}
return BigInteger.valueOf( doubleValue.longValue() );
}
public static BigInteger toBigInteger(Float floatValue) {
if ( ! isWholeNumber( floatValue ) ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Unable to coerce Double Float `%s` as BigInteger: not a whole number",
floatValue
)
);
}
return BigInteger.valueOf( floatValue.longValue() );
}
public static BigInteger toBigInteger(BigDecimal value) {
return coerceWrappingError( value::toBigIntegerExact );
}
public static Double toDouble(Float floatValue) {
if ( floatValue > (float) Double.MAX_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Float value `%s` to Double : overflow",
floatValue
)
);
}
if ( floatValue < (float) Double.MIN_VALUE ) {
throw new CoercionException(
String.format(
Locale.ROOT,
"Cannot coerce Float value `%s` to Double : underflow",
floatValue
)
);
}
return (double) floatValue;
}
public static Double toDouble(BigInteger value) {
return coerceWrappingError( value::doubleValue );
}
public static Double toDouble(BigDecimal value) {
return coerceWrappingError( value::doubleValue );
}
public static boolean isWholeNumber(double doubleValue) {
return doubleValue % 1 == 0;
}
public static boolean isWholeNumber(float floatValue) {
return floatValue == ( (float) (long) floatValue );
}
@FunctionalInterface
public
|
CoercionHelper
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/config/arbiters/SystemPropertyArbiter.java
|
{
"start": 2178,
"end": 3400
}
|
class ____ implements org.apache.logging.log4j.core.util.Builder<SystemPropertyArbiter> {
public static final String ATTR_PROPERTY_NAME = "propertyName";
public static final String ATTR_PROPERTY_VALUE = "propertyValue";
@PluginBuilderAttribute(ATTR_PROPERTY_NAME)
private String propertyName;
@PluginBuilderAttribute(ATTR_PROPERTY_VALUE)
private String propertyValue;
/**
* Sets the Property Name.
* @param propertyName the property name.
* @return this
*/
public Builder setPropertyName(final String propertyName) {
this.propertyName = propertyName;
return asBuilder();
}
/**
* Sets the Property Value.
* @param propertyValue the property value.
* @return this
*/
public Builder setPropertyValue(final String propertyValue) {
this.propertyValue = propertyValue;
return asBuilder();
}
public Builder asBuilder() {
return this;
}
public SystemPropertyArbiter build() {
return new SystemPropertyArbiter(propertyName, propertyValue);
}
}
}
|
Builder
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/type/TypeFactoryTest.java
|
{
"start": 25092,
"end": 30269
}
|
class ____ implements Map.Entry<String,Integer> { }
@Test
public void testMapEntryResolution()
{
JavaType t = TF.constructType(StringIntMapEntry.class);
JavaType mapEntryType = t.findSuperType(Map.Entry.class);
assertNotNull(mapEntryType);
assertTrue(mapEntryType.hasGenericTypes());
assertEquals(2, mapEntryType.containedTypeCount());
assertEquals(String.class, mapEntryType.containedType(0).getRawClass());
assertEquals(Integer.class, mapEntryType.containedType(1).getRawClass());
}
/*
/**********************************************************
/* Unit tests: construction of "raw" types
/**********************************************************
*/
@Test
public void testRawCollections()
{
JavaType type = TF.constructRawCollectionType(ArrayList.class);
assertTrue(type.isContainerType());
assertEquals(TypeFactory.unknownType(), type.getContentType());
type = TF.constructRawCollectionLikeType(CollectionLike.class); // must have type vars
assertTrue(type.isCollectionLikeType());
assertEquals(TypeFactory.unknownType(), type.getContentType());
// actually, should also allow "no type vars" case
type = TF.constructRawCollectionLikeType(String.class);
assertTrue(type.isCollectionLikeType());
assertEquals(TypeFactory.unknownType(), type.getContentType());
}
@Test
public void testRawMaps()
{
JavaType type = TF.constructRawMapType(HashMap.class);
assertTrue(type.isContainerType());
assertEquals(TypeFactory.unknownType(), type.getKeyType());
assertEquals(TypeFactory.unknownType(), type.getContentType());
type = TF.constructRawMapLikeType(MapLike.class); // must have type vars
assertTrue(type.isMapLikeType());
assertEquals(TypeFactory.unknownType(), type.getKeyType());
assertEquals(TypeFactory.unknownType(), type.getContentType());
// actually, should also allow "no type vars" case
type = TF.constructRawMapLikeType(String.class);
assertTrue(type.isMapLikeType());
assertEquals(TypeFactory.unknownType(), type.getKeyType());
assertEquals(TypeFactory.unknownType(), type.getContentType());
}
/*
/**********************************************************
/* Unit tests: other
/**********************************************************
*/
@Test
public void testMoreSpecificType()
{
JavaType t1 = TF.constructCollectionType(Collection.class, Object.class);
JavaType t2 = TF.constructCollectionType(List.class, Object.class);
assertSame(t2, TF.moreSpecificType(t1, t2));
assertSame(t2, TF.moreSpecificType(t2, t1));
t1 = TF.constructType(Double.class);
t2 = TF.constructType(Number.class);
assertSame(t1, TF.moreSpecificType(t1, t2));
assertSame(t1, TF.moreSpecificType(t2, t1));
// and then unrelated, return first
t1 = TF.constructType(Double.class);
t2 = TF.constructType(String.class);
assertSame(t1, TF.moreSpecificType(t1, t2));
assertSame(t2, TF.moreSpecificType(t2, t1));
}
// [databind#489]
@Test
public void testCacheClearing()
{
TypeFactory tf = TF.withModifier(null);
assertEquals(0, tf._typeCache.size());
tf.constructType(getClass());
// 19-Oct-2015, tatu: This is pretty fragile but
assertTrue(tf._typeCache.size() > 0);
tf.clearCache();
assertEquals(0, tf._typeCache.size());
}
// for [databind#1297]
@Test
public void testRawMapType()
{
TypeFactory tf = TF.withModifier(null); // to get a new copy
JavaType type = tf.constructParametricType(Wrapper1297.class, Map.class);
assertNotNull(type);
assertEquals(Wrapper1297.class, type.getRawClass());
}
// for [databind#3443]
@Test
public void testParameterizedClassType() {
JavaType t = TF.constructType(new TypeReference<Class<? extends CharSequence>>() { });
assertEquals(SimpleType.class, t.getClass());
assertEquals(1, t.containedTypeCount());
assertEquals(CharSequence.class, t.containedType(0).getRawClass());
}
// for [databind#3876]
@Test
public void testParameterizedSimpleType() {
JavaType charSequenceClass = TF.constructType(new TypeReference<Class<? extends CharSequence>>() { });
JavaType numberClass = TF.constructType(new TypeReference<Class<? extends Number>>() { });
assertEquals(SimpleType.class, charSequenceClass.getClass());
assertEquals(SimpleType.class, numberClass.getClass());
assertNotEquals(charSequenceClass, numberClass);
assertNotEquals(
charSequenceClass.hashCode(), numberClass.hashCode(),
"hash values should be distributed");
}
private void assertEqualsAndHash(JavaType t1, JavaType t2) {
assertEquals(t1, t2);
assertEquals(t2, t1);
assertEquals(t1.hashCode(), t2.hashCode());
}
}
|
StringIntMapEntry
|
java
|
elastic__elasticsearch
|
build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/SnykDependencyGraph.java
|
{
"start": 654,
"end": 1542
}
|
class ____ {
static private final String schemaVersion = "1.2.0";
private final Map<String, Object> graph;
private final Set<SnykDependencyPkg> pkgs;
private final Map<String, String> pkgManager;
public SnykDependencyGraph(String gradleVersion, Set<SnykDependencyNode> nodes, Set<SnykDependencyPkg> pkgs) {
this.pkgs = pkgs;
this.graph = new HashMap();
graph.put("rootNodeId", "root-node");
graph.put("nodes", nodes);
this.pkgManager = Map.of("name", "gradle", "version", gradleVersion);
}
public String getSchemaVersion() {
return schemaVersion;
}
public Map<String, String> getPkgManager() {
return pkgManager;
}
public Map<String, Object> getGraph() {
return graph;
}
public Set<SnykDependencyPkg> getPkgs() {
return pkgs;
}
static
|
SnykDependencyGraph
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/JSONBytesTest2.java
|
{
"start": 133,
"end": 656
}
|
class ____ extends TestCase {
public void test_codec() throws Exception {
String text="𠜎𠜱𠝹𠱓𠱸𠲖𠳏𠳕𠴕𠵼𠵿𠸎𠸏𠹷𠺝𠺢𠻗𠻹𠻺𠼭𠼮𠽌𠾴𠾼𠿪𡁜𡁯𡁵𡁶𡁻𡃁𡃉𡇙𢃇𢞵𢫕𢭃𢯊𢱑𢱕𢳂𢴈𢵌𢵧𢺳𣲷𤓓𤶸𤷪𥄫𦉘𦟌𦧲𦧺𧨾𨅝𨈇𨋢𨳊𨳍𨳒𩶘";
byte[] bytes = JSON.toJSONBytes(text);
String text2 = (String) JSON.parse(bytes);
Assert.assertEquals(text.length(), text2.length());
for (int i = 0; i < text.length(); ++i) {
char c1 = text.charAt(i);
char c2 = text2.charAt(i);
Assert.assertEquals(c1, c2);
}
}
}
|
JSONBytesTest2
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/emptytarget/EmptyTargetMapper.java
|
{
"start": 237,
"end": 423
}
|
interface ____ {
TargetWithNoSetters mapToTargetWithSetters(Source source);
EmptyTarget mapToEmptyTarget(Source source);
Target mapToTarget(Source source);
}
|
EmptyTargetMapper
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/configuration/ImportTests.java
|
{
"start": 8722,
"end": 8877
}
|
class ____ {
@Bean
ITestBean dataSourceA() {
return new TestBean();
}
}
@Configuration
@Import({Foo1.class, Foo2.class})
static
|
DataSourceConfig
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/util/StringUtilsTest.java
|
{
"start": 1030,
"end": 2674
}
|
class ____ {
@Test
void testControlCharacters() {
String testString = "\b \t \n \f \r default";
String controlString = StringUtils.showControlCharacters(testString);
assertThat(controlString).isEqualTo("\\b \\t \\n \\f \\r default");
}
@Test
void testArrayAwareToString() {
assertThat(StringUtils.arrayAwareToString(null)).isEqualTo("null");
assertThat(StringUtils.arrayAwareToString(DayOfWeek.MONDAY)).isEqualTo("MONDAY");
assertThat(StringUtils.arrayAwareToString(new int[] {1, 2, 3})).isEqualTo("[1, 2, 3]");
assertThat(StringUtils.arrayAwareToString(new byte[][] {{4, 5, 6}, null, {}}))
.isEqualTo("[[4, 5, 6], null, []]");
assertThat(
StringUtils.arrayAwareToString(
new Object[] {new Integer[] {4, 5, 6}, null, DayOfWeek.MONDAY}))
.isEqualTo("[[4, 5, 6], null, MONDAY]");
}
@Test
void testStringToHexArray() {
String hex = "019f314a";
byte[] hexArray = StringUtils.hexStringToByte(hex);
byte[] expectedArray = new byte[] {1, -97, 49, 74};
assertThat(hexArray).isEqualTo(expectedArray);
}
@Test
void testHexArrayToString() {
byte[] byteArray = new byte[] {1, -97, 49, 74};
String hex = StringUtils.byteToHexString(byteArray);
assertThat(hex).isEqualTo("019f314a");
}
@Test
void testGenerateAlphanumeric() {
String str = StringUtils.generateRandomAlphanumericString(new Random(), 256);
assertThat(str).matches("[a-zA-Z0-9]{256}");
}
}
|
StringUtilsTest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
|
{
"start": 1075,
"end": 1208
}
|
class ____ a FileStatus that includes a file's block locations.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public
|
defines
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/insertordering/InsertOrderingWithJoinedTableInheritance.java
|
{
"start": 3682,
"end": 4308
}
|
class ____ {
@Id
@Column(name = "ID", nullable = false)
private Integer id;
@OneToMany(orphanRemoval = true, cascade = {
CascadeType.PERSIST,
CascadeType.REMOVE
})
@JoinColumn(name = "PERSONID", referencedColumnName = "ID", nullable = false, updatable = false)
@BatchSize(size = 100)
private Set<Address> addresses = new HashSet<>();
protected Person() {
}
public Person(int id) {
this.id = id;
}
public void addAddress(Address address) {
this.addresses.add( address );
}
}
@Entity(name = "SpecialPerson")
@Access(AccessType.FIELD)
@DiscriminatorValue("2")
public static
|
Person
|
java
|
apache__camel
|
components/camel-snakeyaml/src/test/java/org/apache/camel/component/snakeyaml/SnakeYAMLTypeFilterHelper.java
|
{
"start": 1446,
"end": 2659
}
|
class ____ {
private SnakeYAMLTypeFilterHelper() {
}
static void testSafeConstructor(ProducerTemplate template) {
Exception ex = assertThrows(CamelExecutionException.class,
() -> template.sendBody(
"direct:safe-constructor",
"!!org.apache.camel.component.snakeyaml.model.TestPojo {name: Camel}"),
"As SnakeYAML uses SafeConstructor, custom types should not be allowed");
assertTrue(ex.getCause() instanceof ConstructorException);
}
static void testTypeConstructor(ProducerTemplate template) {
Object result = assertDoesNotThrow(() -> template.requestBody(
"direct:type-constructor",
"!!org.apache.camel.component.snakeyaml.model.TestPojo {name: Camel}"));
assertNotNull(result);
assertTrue(result instanceof TestPojo);
Exception ex = assertThrows(CamelExecutionException.class,
() -> template.sendBody(
"direct:type-constructor",
"!!org.apache.camel.component.snakeyaml.model.UnsafePojo {name: Camel}"),
"As SnakeYAML filters
|
SnakeYAMLTypeFilterHelper
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/DebeziumPostgresComponentBuilderFactory.java
|
{
"start": 83639,
"end": 85029
}
|
interface ____ is called on each app boot to determine whether to do a
* snapshot.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: postgres
*
* @param snapshotModeCustomName the value to set
* @return the dsl builder
*/
default DebeziumPostgresComponentBuilder snapshotModeCustomName(java.lang.String snapshotModeCustomName) {
doSetProperty("snapshotModeCustomName", snapshotModeCustomName);
return this;
}
/**
* Controls query used during the snapshot.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: select_all
* Group: postgres
*
* @param snapshotQueryMode the value to set
* @return the dsl builder
*/
default DebeziumPostgresComponentBuilder snapshotQueryMode(java.lang.String snapshotQueryMode) {
doSetProperty("snapshotQueryMode", snapshotQueryMode);
return this;
}
/**
* When 'snapshot.query.mode' is set as custom, this setting must be set
* to specify a the name of the custom implementation provided in the
* 'name()' method. The implementations must implement the
* 'SnapshotterQuery'
|
and
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/asm/SymbolTable.java
|
{
"start": 14311,
"end": 14396
}
|
class ____ which this symbol table belongs.
*
* @return the major version of the
|
to
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KubernetesNodesEndpointBuilderFactory.java
|
{
"start": 37138,
"end": 46804
}
|
interface ____
extends
KubernetesNodesEndpointConsumerBuilder,
KubernetesNodesEndpointProducerBuilder {
default AdvancedKubernetesNodesEndpointBuilder advanced() {
return (AdvancedKubernetesNodesEndpointBuilder) this;
}
/**
* The Kubernetes API Version to use.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param apiVersion the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder apiVersion(String apiVersion) {
doSetProperty("apiVersion", apiVersion);
return this;
}
/**
* The dns domain, used for ServiceCall EIP.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param dnsDomain the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder dnsDomain(String dnsDomain) {
doSetProperty("dnsDomain", dnsDomain);
return this;
}
/**
* Default KubernetesClient to use if provided.
*
* The option is a:
* <code>io.fabric8.kubernetes.client.KubernetesClient</code> type.
*
* Group: common
*
* @param kubernetesClient the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder kubernetesClient(io.fabric8.kubernetes.client.KubernetesClient kubernetesClient) {
doSetProperty("kubernetesClient", kubernetesClient);
return this;
}
/**
* Default KubernetesClient to use if provided.
*
* The option will be converted to a
* <code>io.fabric8.kubernetes.client.KubernetesClient</code> type.
*
* Group: common
*
* @param kubernetesClient the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder kubernetesClient(String kubernetesClient) {
doSetProperty("kubernetesClient", kubernetesClient);
return this;
}
/**
* The namespace.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param namespace the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder namespace(String namespace) {
doSetProperty("namespace", namespace);
return this;
}
/**
* The port name, used for ServiceCall EIP.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param portName the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder portName(String portName) {
doSetProperty("portName", portName);
return this;
}
/**
* The port protocol, used for ServiceCall EIP.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: tcp
* Group: common
*
* @param portProtocol the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder portProtocol(String portProtocol) {
doSetProperty("portProtocol", portProtocol);
return this;
}
/**
* The CA Cert Data.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param caCertData the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder caCertData(String caCertData) {
doSetProperty("caCertData", caCertData);
return this;
}
/**
* The CA Cert File.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param caCertFile the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder caCertFile(String caCertFile) {
doSetProperty("caCertFile", caCertFile);
return this;
}
/**
* The Client Cert Data.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientCertData the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder clientCertData(String clientCertData) {
doSetProperty("clientCertData", clientCertData);
return this;
}
/**
* The Client Cert File.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientCertFile the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder clientCertFile(String clientCertFile) {
doSetProperty("clientCertFile", clientCertFile);
return this;
}
/**
* The Key Algorithm used by the client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyAlgo the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder clientKeyAlgo(String clientKeyAlgo) {
doSetProperty("clientKeyAlgo", clientKeyAlgo);
return this;
}
/**
* The Client Key data.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyData the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder clientKeyData(String clientKeyData) {
doSetProperty("clientKeyData", clientKeyData);
return this;
}
/**
* The Client Key file.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyFile the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder clientKeyFile(String clientKeyFile) {
doSetProperty("clientKeyFile", clientKeyFile);
return this;
}
/**
* The Client Key Passphrase.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientKeyPassphrase the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder clientKeyPassphrase(String clientKeyPassphrase) {
doSetProperty("clientKeyPassphrase", clientKeyPassphrase);
return this;
}
/**
* The Auth Token.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param oauthToken the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder oauthToken(String oauthToken) {
doSetProperty("oauthToken", oauthToken);
return this;
}
/**
* Password to connect to Kubernetes.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param password the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder password(String password) {
doSetProperty("password", password);
return this;
}
/**
* Define if the certs we used are trusted anyway or not.
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Default: false
* Group: security
*
* @param trustCerts the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder trustCerts(Boolean trustCerts) {
doSetProperty("trustCerts", trustCerts);
return this;
}
/**
* Define if the certs we used are trusted anyway or not.
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Default: false
* Group: security
*
* @param trustCerts the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder trustCerts(String trustCerts) {
doSetProperty("trustCerts", trustCerts);
return this;
}
/**
* Username to connect to Kubernetes.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param username the value to set
* @return the dsl builder
*/
default KubernetesNodesEndpointBuilder username(String username) {
doSetProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint for the Kubernetes Nodes component.
*/
public
|
KubernetesNodesEndpointBuilder
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java
|
{
"start": 23159,
"end": 24288
}
|
interface ____ {
/**
* Add a A (anchor) element.
* @return a new A element builder
*/
A a();
/**
* Add a A (anchor) element.
* @param selector the css selector in the form of (#id)*(.class)*
* @return a new A element builder
*/
A a(String selector);
/** Shortcut for <code>a().$href(href).__(anchorText).__();</code>
* @param href the URI
* @param anchorText for the URI
* @return the current element builder
*/
_Anchor a(String href, String anchorText);
/** Shortcut for <code>a(selector).$href(href).__(anchorText).__();</code>
* @param selector in the form of (#id)?(.class)*
* @param href the URI
* @param anchorText for the URI
* @return the current element builder
*/
_Anchor a(String selector, String href, String anchorText);
}
/**
* INS and DEL are unusual for HTML
* "in that they may serve as either block-level or inline elements
* (but not both)".
* <br>cf. http://www.w3.org/TR/html4/struct/text.html#h-9.4
* <br>cf. http://www.w3.org/TR/html5/edits.html#edits
*/
public
|
_Anchor
|
java
|
apache__camel
|
components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/spring/tx/security/JmsToJmsTransactedSecurityIT.java
|
{
"start": 1688,
"end": 4910
}
|
class ____ extends CamelSpringTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(JmsToJmsTransactedSecurityIT.class);
private static ActiveMQServer activeMQServer;
@BeforeAll
public static void before() throws Exception {
SecurityConfiguration securityConfig = new SecurityConfiguration();
securityConfig.addUser("admin", "secret");
securityConfig.addUser("scott", "tiger");
securityConfig.addRole("scott", "user");
securityConfig.addRole("admin", "admin");
securityConfig.addRole("admin", "user");
ActiveMQJAASSecurityManager securityManager
= new ActiveMQJAASSecurityManager(InVMLoginModule.class.getName(), securityConfig);
activeMQServer = ActiveMQServers.newActiveMQServer(
"org/apache/camel/component/jms/integration/spring/tx/security/artemis-security.xml", null,
securityManager);
activeMQServer.start();
}
@AfterAll
public static void after() throws Exception {
activeMQServer.stop();
}
/**
* Used by spring xml configurations
*
* @return
*/
public static String getServiceAddress() {
return "vm://999";
}
@Override
protected ClassPathXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext(
"/org/apache/camel/component/jms/integration/spring/tx/security/JmsToJmsTransactedSecurityTest.xml");
}
@Test
public void testJmsSecurityFailure() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("activemq:queue:JmsToJmsTransactedSecurityTest")
.transacted()
.to("log:foo")
.to("activemq:queue:JmsToJmsTransactedSecurityTest.reply");
from("activemq:queue:JmsToJmsTransactedSecurityTest.reply").to("mock:bar");
}
});
context.start();
MockEndpoint mock = getMockEndpoint("mock:bar");
mock.expectedMessageCount(0);
template.sendBody("activemq:queue:JmsToJmsTransactedSecurityTest", "Hello World");
// get the message that got rolled back
Exchange exch = consumer.receive("activemq:queue:JmsToJmsTransactedSecurityTest", 250);
if (exch != null) {
LOG.info("Cleaned up orphaned message: {}", exch);
}
mock.assertIsSatisfied(3000);
}
@Test
public void testJmsSecurityOK() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.to("log:start")
.to("activemq:queue:JmsToJmsTransactedSecurityTest");
from("activemq:queue:JmsToJmsTransactedSecurityTest").to("mock:foo");
}
});
context.start();
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello World");
template.sendBody("direct:start", "Hello World");
MockEndpoint.assertIsSatisfied(context);
}
}
|
JmsToJmsTransactedSecurityIT
|
java
|
elastic__elasticsearch
|
build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java
|
{
"start": 804,
"end": 1715
}
|
class ____ extends DefaultTask {
private File dir;
private int dirMode = 0755;
/**
* Creates an empty directory with the configured permissions.
*/
@TaskAction
public void create() {
dir.mkdirs();
getChmod().chmod(dir, dirMode);
}
@Inject
public Chmod getChmod() {
throw new UnsupportedOperationException();
}
@OutputDirectory
public File getDir() {
return dir;
}
@Input
public String getDirPath() {
return dir.getPath();
}
/**
* @param dir The directory to create
*/
public void setDir(File dir) {
this.dir = dir;
}
@Input
public int getDirMode() {
return dirMode;
}
/**
* @param dirMode The permissions to apply to the new directory
*/
public void setDirMode(int dirMode) {
this.dirMode = dirMode;
}
}
|
EmptyDirTask
|
java
|
apache__camel
|
components/camel-cxf/camel-cxf-rest/src/main/java/org/apache/camel/component/cxf/jaxrs/ChainedCxfRsConfigurer.java
|
{
"start": 2200,
"end": 2526
}
|
class ____ implements CxfRsConfigurer {
@Override
public void configure(AbstractJAXRSFactoryBean factoryBean) {
}
@Override
public void configureClient(Client client) {
}
@Override
public void configureServer(Server server) {
}
}
}
|
NullCxfRsConfigurer
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
|
{
"start": 2686,
"end": 13670
}
|
class ____ {
private void testSkipInner(MiniDFSCluster cluster) throws IOException {
DistributedFileSystem fs = cluster.getFileSystem();
DFSClient client = fs.dfs;
Path file = new Path("/testfile");
int fileLength = 1 << 22;
byte[] fileContent = new byte[fileLength];
for (int i = 0; i < fileLength; i++) {
fileContent[i] = (byte) (i % 133);
}
FSDataOutputStream fout = fs.create(file);
fout.write(fileContent);
fout.close();
Random random = new Random();
for (int i = 3; i < 18; i++) {
DFSInputStream fin = client.open("/testfile");
for (long pos = 0; pos < fileLength;) {
long skip = random.nextInt(1 << i) + 1;
long skipped = fin.skip(skip);
if (pos + skip >= fileLength) {
assertEquals(fileLength, pos + skipped);
break;
} else {
assertEquals(skip, skipped);
pos += skipped;
int data = fin.read();
assertEquals(pos % 133, data);
pos += 1;
}
}
fin.close();
}
}
@Test
@Timeout(value = 60)
public void testSkipWithRemoteBlockReader() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
testSkipInner(cluster);
} finally {
cluster.shutdown();
}
}
@Test
@Timeout(value = 60)
public void testSkipWithRemoteBlockReader2() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
testSkipInner(cluster);
} finally {
cluster.shutdown();
}
}
@Test
@Timeout(value = 60)
public void testSkipWithLocalBlockReader() throws IOException {
assumeTrue(DomainSocket.getLoadingFailureReason() == null);
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
"TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
DFSInputStream.tcpReadsDisabledForTesting = true;
testSkipInner(cluster);
} finally {
DFSInputStream.tcpReadsDisabledForTesting = false;
cluster.shutdown();
sockDir.close();
}
}
@Test
@Timeout(value = 60)
public void testSeekToNewSource() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
DistributedFileSystem fs = cluster.getFileSystem();
Path path = new Path("/testfile");
DFSTestUtil.createFile(fs, path, 1024, (short) 3, 0);
DFSInputStream fin = fs.dfs.open("/testfile");
try {
fin.seekToNewSource(100);
assertEquals(100, fin.getPos());
DatanodeInfo firstNode = fin.getCurrentDatanode();
assertNotNull(firstNode);
fin.seekToNewSource(100);
assertEquals(100, fin.getPos());
assertFalse(firstNode.equals(fin.getCurrentDatanode()));
} finally {
fin.close();
cluster.shutdown();
}
}
@Test
@Timeout(value = 60)
public void testOpenInfo() throws IOException {
Configuration conf = new Configuration();
conf.setInt(Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY, 0);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
try {
DistributedFileSystem fs = cluster.getFileSystem();
int chunkSize = 512;
Random r = new Random(12345L);
byte[] data = new byte[chunkSize];
r.nextBytes(data);
Path file = new Path("/testfile");
try(FSDataOutputStream fout = fs.create(file)) {
fout.write(data);
}
DfsClientConf dcconf = new DfsClientConf(conf);
int retryTimesForGetLastBlockLength =
dcconf.getRetryTimesForGetLastBlockLength();
assertEquals(0, retryTimesForGetLastBlockLength);
try(DFSInputStream fin = fs.dfs.open("/testfile")) {
long flen = fin.getFileLength();
assertEquals(chunkSize, flen);
long lastBlockBeingWrittenLength =
fin.getlastBlockBeingWrittenLengthForTesting();
assertEquals(0, lastBlockBeingWrittenLength);
}
} finally {
cluster.shutdown();
}
}
@Test
public void testNullCheckSumWhenDNRestarted()
throws IOException, InterruptedException {
Configuration conf = new Configuration();
conf.set(HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
cluster.waitActive();
try {
DistributedFileSystem fs = cluster.getFileSystem();
int chunkSize = 512;
Random r = new Random(12345L);
byte[] data = new byte[chunkSize];
r.nextBytes(data);
Path file = new Path("/testfile");
try (FSDataOutputStream fout = fs.create(file)) {
fout.write(data);
fout.hflush();
cluster.restartDataNode(0, true, true);
}
// wait for block to load
Thread.sleep(1000);
// fetch live DN
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
cluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().fetchDatanodes(live, null, false);
assertTrue(live.size() == 2,
"DN start should be success and live dn should be 2");
assertTrue(fs.getFileStatus(file).getLen() == chunkSize,
"File size should be " + chunkSize);
} finally {
cluster.shutdown();
}
}
@Test
public void testReadWithPreferredCachingReplica() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(DFS_CLIENT_READ_USE_CACHE_PRIORITY, true);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
DistributedFileSystem fs = null;
Path filePath = new Path("/testReadPreferredCachingReplica");
try {
fs = cluster.getFileSystem();
FSDataOutputStream out = fs.create(filePath, true, 4096, (short) 3, 512);
DFSInputStream dfsInputStream =
(DFSInputStream) fs.open(filePath).getWrappedStream();
LocatedBlock lb = mock(LocatedBlock.class);
when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
1112, 1113, 1114);
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[] {dnInfo});
DatanodeInfo retDNInfo =
dfsInputStream.getBestNodeDNAddrPair(lb, null).info;
assertEquals(dnInfo, retDNInfo);
} finally {
fs.delete(filePath, true);
cluster.shutdown();
}
}
@Test
public void testReadWithoutPreferredCachingReplica() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(DFS_CLIENT_READ_USE_CACHE_PRIORITY, false);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
DistributedFileSystem fs = null;
Path filePath = new Path("/testReadWithoutPreferredCachingReplica");
try {
fs = cluster.getFileSystem();
FSDataOutputStream out = fs.create(filePath, true, 4096, (short) 3, 512);
DFSInputStream dfsInputStream =
(DFSInputStream) fs.open(filePath).getWrappedStream();
LocatedBlock lb = mock(LocatedBlock.class);
when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
1112, 1113, 1114);
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
DatanodeInfoWithStorage dnInfoStorage =
new DatanodeInfoWithStorage(dnInfo, "DISK", StorageType.DISK);
when(lb.getLocations()).thenReturn(
new DatanodeInfoWithStorage[] {dnInfoStorage});
DatanodeInfo retDNInfo =
dfsInputStream.getBestNodeDNAddrPair(lb, null).info;
assertEquals(dnInfo, retDNInfo);
} finally {
fs.delete(filePath, true);
cluster.shutdown();
}
}
@Test
public void testCreateBlockReaderWhenInvalidBlockTokenException() throws
IOException, InterruptedException, TimeoutException {
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.DEBUG);
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 64 * 1024);
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 516);
DFSClientFaultInjector oldFaultInjector = DFSClientFaultInjector.get();
FSDataOutputStream out = null;
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build()) {
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
// Create file which only contains one UC block.
String file = "/testfile";
Path path = new Path(file);
out = fs.create(path, (short) 3);
int bufferLen = 5120;
byte[] toWrite = new byte[bufferLen];
Random rb = new Random(0);
rb.nextBytes(toWrite);
out.write(toWrite, 0, bufferLen);
// Wait for the block length of the file to be 1.
GenericTestUtils.waitFor(() -> {
try {
return fs.getFileBlockLocations(path, 0, bufferLen).length == 1;
} catch (IOException e) {
return false;
}
}, 100, 10000);
// Set up the InjectionHandler.
DFSClientFaultInjector.set(Mockito.mock(DFSClientFaultInjector.class));
DFSClientFaultInjector injector = DFSClientFaultInjector.get();
final AtomicInteger count = new AtomicInteger(0);
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
// Mock access token was invalid when connecting to first datanode
// throw InvalidBlockTokenException.
if (count.getAndIncrement() == 0) {
throw new InvalidBlockTokenException("Mock InvalidBlockTokenException");
}
return null;
}
}).when(injector).failCreateBlockReader();
try (DFSInputStream in = new DFSInputStream(fs.getClient(), file,
false, null)) {
int bufLen = 1024;
byte[] buf = new byte[bufLen];
// Seek the offset to 1024 and which should be in the range (0, fileSize).
in.seek(1024);
int read = in.read(buf, 0, bufLen);
assertEquals(1024, read);
}
} finally {
DFSClientFaultInjector.set(oldFaultInjector);
IOUtils.closeStream(out);
}
}
}
|
TestDFSInputStream
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ser/GenericTypeSerializationTest.java
|
{
"start": 531,
"end": 1340
}
|
class ____ {
private Long id;
private String name;
@JsonCreator
public Account(
@JsonProperty("name") String name,
@JsonProperty("id") Long id) {
this.id = id;
this.name = name;
}
public String getName() { return name; }
public Long getId() { return id; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Account account = (Account) o;
return Objects.equals(id, account.id) && Objects.equals(name, account.name);
}
@Override
public int hashCode() {
return Objects.hash(id, name);
}
}
static
|
Account
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/builder/ExpressionBuilder.java
|
{
"start": 854,
"end": 961
}
|
class ____ working with <a href="http://camel.apache.org/expression.html">expressions</a>.
*/
public final
|
for
|
java
|
quarkusio__quarkus
|
extensions/agroal/deployment/src/test/java/io/quarkus/agroal/test/XaDataSourceConfigTest.java
|
{
"start": 743,
"end": 3046
}
|
class ____ {
//tag::injection[]
@Inject
@DataSource("xa")
AgroalDataSource xaRecoverDS;
@Inject
@DataSource("xaNoRecover")
AgroalDataSource xaNoRecoverDS;
//end::injection[]
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withConfigurationResource("application-recovery-datasources.properties");
@Test
@ActivateRequestContext
public void testEnlistDatasourcesWithRecovery() throws SQLException {
AgroalConnectionPoolConfiguration xaRecoverConfig = xaRecoverDS.getConfiguration().connectionPoolConfiguration();
AgroalConnectionPoolConfiguration xaNoRecoverConfig = xaNoRecoverDS.getConfiguration().connectionPoolConfiguration();
assertTrue(xaRecoverConfig.recoveryEnable(), "xaRecoverDS datasource should have recover enabled");
assertFalse(xaNoRecoverConfig.recoveryEnable(), "xaNoRecoverDS datasource should not have recover enabled");
assertInstanceOf(NarayanaTransactionIntegration.class, xaRecoverConfig.transactionIntegration(),
"Agroal transaction integration should use Narayana for xaRecoverDS");
assertInstanceOf(NarayanaTransactionIntegration.class, xaNoRecoverConfig.transactionIntegration(),
"Agroal transaction integration should use Narayana for xaNoRecoverDS");
// run a transaction and use the two datasources, ensuring that it commits ok
QuarkusTransaction.begin();
// Remark: the two datasources will have been registered with the transaction recovery system because the config
// includes quarkus.transaction-manager.enable-recovery=true
// see QuarkusRecoveryService for details of how the recovery service manages connections to datasources
try (var conn = xaRecoverDS.getConnection()) {
assertFalse(conn.getAutoCommit(), "XA connection should not have the auto commit flag set");
try (var conn2 = xaNoRecoverDS.getConnection()) {
assertFalse(conn2.getAutoCommit(), "XA connection should not have the auto commit flag set");
}
}
assertTrue(QuarkusTransaction.isActive(), "transaction should still have been active");
QuarkusTransaction.commit();
}
}
|
XaDataSourceConfigTest
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/vector/QuantizationType.java
|
{
"start": 720,
"end": 901
}
|
enum ____ {
/**
* No quantization
*/
NOQUANT,
/**
* Binary quantization
*/
BIN,
/**
* 8-bit quantization
*/
Q8
}
|
QuantizationType
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java
|
{
"start": 1259,
"end": 2450
}
|
class ____ {
final static private File DF_DIR = GenericTestUtils.getTestDir("testdfspace");
public static final int FILE_SIZE = 1024;
@BeforeEach
public void setUp() {
FileUtil.fullyDelete(DF_DIR);
assertTrue(DF_DIR.mkdirs());
}
@AfterEach
public void tearDown() throws IOException {
FileUtil.fullyDelete(DF_DIR);
}
@Test
public void testCanBuildRun() throws Exception {
File file = writeFile("testCanBuild");
GetSpaceUsed instance = new CachingGetSpaceUsed.Builder()
.setPath(file)
.setInterval(50060)
.setKlass(DFCachingGetSpaceUsed.class)
.build();
assertTrue(instance instanceof DFCachingGetSpaceUsed);
assertTrue(instance.getUsed() >= FILE_SIZE - 20);
((DFCachingGetSpaceUsed) instance).close();
}
private File writeFile(String fileName) throws IOException {
File f = new File(DF_DIR, fileName);
assertTrue(f.createNewFile());
RandomAccessFile randomAccessFile = new RandomAccessFile(f, "rws");
randomAccessFile.writeUTF(RandomStringUtils.randomAlphabetic(FILE_SIZE));
randomAccessFile.getFD().sync();
randomAccessFile.close();
return f;
}
}
|
TestDFCachingGetSpaceUsed
|
java
|
google__guice
|
core/src/com/google/inject/internal/InternalFlags.java
|
{
"start": 4808,
"end": 7624
}
|
enum ____ {
AUTO {
@Override
boolean enabled() {
return System.console() != null && System.getenv("TERM") != null;
}
},
ON {
@Override
boolean enabled() {
return true;
}
},
OFF {
@Override
boolean enabled() {
return false;
}
};
abstract boolean enabled();
}
public static IncludeStackTraceOption getIncludeStackTraceOption() {
return INCLUDE_STACK_TRACES;
}
public static CustomClassLoadingOption getCustomClassLoadingOption() {
return CUSTOM_CLASS_LOADING;
}
public static NullableProvidesOption getNullableProvidesOption() {
return NULLABLE_PROVIDES;
}
public static boolean isBytecodeGenEnabled() {
return BYTECODE_GEN_OPTION == BytecodeGenOption.ENABLED;
}
public static boolean enableColorizeErrorMessages() {
return COLORIZE_OPTION.enabled();
}
public static boolean getUseMethodHandlesOption() {
return USE_METHOD_HANDLES
== UseMethodHandlesOption.YES
&& isBytecodeGenEnabled();
}
/**
* Gets the system option indicated by the specified key; runs as a privileged action.
*
* @param name of the system option
* @param defaultValue if the option is not set
* @return value of the option, defaultValue if not set
*/
private static <T extends Enum<T>> T getSystemOption(final String name, T defaultValue) {
return getSystemOption(name, defaultValue, defaultValue);
}
/**
* Gets the system option indicated by the specified key; runs as a privileged action.
*
* @param name of the system option
* @param defaultValue if the option is not set
* @param secureValue if the security manager disallows access to the option
* @return value of the option, defaultValue if not set, secureValue if no access
*/
private static <T extends Enum<T>> T getSystemOption(
final String name, T defaultValue, T secureValue) {
Class<T> enumType = defaultValue.getDeclaringClass();
String value = null;
try {
value =
AccessController.doPrivileged(
new PrivilegedAction<String>() {
@Override
public String run() {
return System.getProperty(name);
}
});
return (value != null && value.length() > 0) ? Enum.valueOf(enumType, value) : defaultValue;
} catch (SecurityException e) {
return secureValue;
} catch (IllegalArgumentException e) {
logger.warning(
value
+ " is not a valid flag value for "
+ name
+ ". "
+ " Values must be one of "
+ Arrays.asList(enumType.getEnumConstants()));
return defaultValue;
}
}
private InternalFlags() {}
}
|
ColorizeOption
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/WatsonSpeechToTextEndpointBuilderFactory.java
|
{
"start": 8426,
"end": 10972
}
|
interface ____
extends
EndpointProducerBuilder {
default WatsonSpeechToTextEndpointBuilder basic() {
return (WatsonSpeechToTextEndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedWatsonSpeechToTextEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedWatsonSpeechToTextEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
public
|
AdvancedWatsonSpeechToTextEndpointBuilder
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/ast/SQLSubPartitionBy.java
|
{
"start": 833,
"end": 2656
}
|
class ____ extends SQLObjectImpl {
protected SQLExpr subPartitionsCount;
protected boolean linear;
protected List<SQLAssignItem> options = new ArrayList<SQLAssignItem>();
protected List<SQLSubPartition> subPartitionTemplate = new ArrayList<SQLSubPartition>();
protected SQLIntegerExpr lifecycle;
public SQLExpr getSubPartitionsCount() {
return subPartitionsCount;
}
public void setSubPartitionsCount(SQLExpr x) {
if (x != null) {
x.setParent(this);
}
this.subPartitionsCount = x;
}
public boolean isLinear() {
return linear;
}
public void setLinear(boolean linear) {
this.linear = linear;
}
public List<SQLAssignItem> getOptions() {
return options;
}
public List<SQLSubPartition> getSubPartitionTemplate() {
return subPartitionTemplate;
}
public void cloneTo(SQLSubPartitionBy x) {
if (subPartitionsCount != null) {
x.setSubPartitionsCount(subPartitionsCount.clone());
}
x.linear = linear;
for (SQLAssignItem option : options) {
SQLAssignItem option2 = option.clone();
option2.setParent(x);
x.options.add(option2);
}
for (SQLSubPartition p : subPartitionTemplate) {
SQLSubPartition p2 = p.clone();
p2.setParent(x);
x.subPartitionTemplate.add(p2);
}
x.lifecycle = lifecycle;
}
public SQLIntegerExpr getLifecycle() {
return lifecycle;
}
public void setLifecycle(SQLIntegerExpr lifecycle) {
this.lifecycle = lifecycle;
}
public boolean isPartitionByColumn(long columnNameHashCode64) {
return false;
}
public abstract SQLSubPartitionBy clone();
}
|
SQLSubPartitionBy
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java
|
{
"start": 1568,
"end": 1779
}
|
class ____ a repository that could not be initialized due to unknown type.
* This could happen when a user creates a snapshot repository using a type from a plugin and then removes the plugin.
*/
public
|
represents
|
java
|
google__guava
|
android/guava/src/com/google/common/cache/LocalCache.java
|
{
"start": 55177,
"end": 107702
}
|
class ____ this class.
* It will require more memory but will reduce indirection.
*/
/*
* Segments maintain a table of entry lists that are ALWAYS kept in a consistent state, so can
* be read without locking. Next fields of nodes are immutable (final). All list additions are
* performed at the front of each bin. This makes it easy to check changes, and also fast to
* traverse. When nodes would otherwise be changed, new nodes are created to replace them. This
* works well for hash tables since the bin lists tend to be short. (The average length is less
* than two.)
*
* Read operations can thus proceed without locking, but rely on selected uses of volatiles to
* ensure that completed write operations performed by other threads are noticed. For most
* purposes, the "count" field, tracking the number of elements, serves as that volatile
* variable ensuring visibility. This is convenient because this field needs to be read in many
* read operations anyway:
*
* - All (unsynchronized) read operations must first read the "count" field, and should not look
* at table entries if it is 0.
*
* - All (synchronized) write operations should write to the "count" field after structurally
* changing any bin. The operations must not take any action that could even momentarily cause a
* concurrent read operation to see inconsistent data. This is made easier by the nature of the
* read operations in Map. For example, no operation can reveal that the table has grown but the
* threshold has not yet been updated, so there are no atomicity requirements for this with
* respect to reads.
*
* As a guide, all critical volatile reads and writes to the count field are marked in code
* comments.
*/
@Weak final LocalCache<K, V> map;
/** The number of live elements in this segment's region. */
volatile int count;
/** The weight of the live elements in this segment's region. */
@GuardedBy("this")
long totalWeight;
/**
* Number of updates that alter the size of the table. This is used during bulk-read methods to
* make sure they see a consistent snapshot: If modCounts change during a traversal of segments
* loading size or checking containsValue, then we might have an inconsistent view of state so
* (usually) must retry.
*/
int modCount;
/**
* The table is expanded when its size exceeds this threshold. (The value of this field is
* always {@code (int) (capacity * 0.75)}.)
*/
int threshold;
/** The per-segment table. */
volatile @Nullable AtomicReferenceArray<ReferenceEntry<K, V>> table;
/** The maximum weight of this segment. UNSET_INT if there is no maximum. */
final long maxSegmentWeight;
/**
* The key reference queue contains entries whose keys have been garbage collected, and which
* need to be cleaned up internally.
*/
final @Nullable ReferenceQueue<K> keyReferenceQueue;
/**
* The value reference queue contains value references whose values have been garbage collected,
* and which need to be cleaned up internally.
*/
final @Nullable ReferenceQueue<V> valueReferenceQueue;
/**
* The recency queue is used to record which entries were accessed for updating the access
* list's ordering. It is drained as a batch operation when either the DRAIN_THRESHOLD is
* crossed or a write occurs on the segment.
*/
final Queue<ReferenceEntry<K, V>> recencyQueue;
/**
* A counter of the number of reads since the last write, used to drain queues on a small
* fraction of read operations.
*/
final AtomicInteger readCount = new AtomicInteger();
/**
* A queue of elements currently in the map, ordered by write time. Elements are added to the
* tail of the queue on write.
*/
@GuardedBy("this")
final Queue<ReferenceEntry<K, V>> writeQueue;
/**
* A queue of elements currently in the map, ordered by access time. Elements are added to the
* tail of the queue on access (note that writes count as accesses).
*/
@GuardedBy("this")
final Queue<ReferenceEntry<K, V>> accessQueue;
/** Accumulates cache statistics. */
final StatsCounter statsCounter;
Segment(
LocalCache<K, V> map,
int initialCapacity,
long maxSegmentWeight,
StatsCounter statsCounter) {
this.map = map;
this.maxSegmentWeight = maxSegmentWeight;
this.statsCounter = checkNotNull(statsCounter);
initTable(newEntryArray(initialCapacity));
keyReferenceQueue = map.usesKeyReferences() ? new ReferenceQueue<>() : null;
valueReferenceQueue = map.usesValueReferences() ? new ReferenceQueue<>() : null;
recencyQueue =
map.usesAccessQueue() ? new ConcurrentLinkedQueue<>() : LocalCache.discardingQueue();
writeQueue = map.usesWriteQueue() ? new WriteQueue<>() : LocalCache.discardingQueue();
accessQueue = map.usesAccessQueue() ? new AccessQueue<>() : LocalCache.discardingQueue();
}
AtomicReferenceArray<ReferenceEntry<K, V>> newEntryArray(int size) {
return new AtomicReferenceArray<>(size);
}
void initTable(AtomicReferenceArray<ReferenceEntry<K, V>> newTable) {
this.threshold = newTable.length() * 3 / 4; // 0.75
if (!map.customWeigher() && this.threshold == maxSegmentWeight) {
// prevent spurious expansion before eviction
this.threshold++;
}
this.table = newTable;
}
@GuardedBy("this")
ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
return map.entryFactory.newEntry(this, checkNotNull(key), hash, next);
}
/**
* Copies {@code original} into a new entry chained to {@code newNext}. Returns the new entry,
* or {@code null} if {@code original} was already garbage collected.
*/
@GuardedBy("this")
@Nullable ReferenceEntry<K, V> copyEntry(
ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
K key = original.getKey();
if (key == null) {
// key collected
return null;
}
ValueReference<K, V> valueReference = original.getValueReference();
V value = valueReference.get();
if ((value == null) && valueReference.isActive()) {
// value collected
return null;
}
ReferenceEntry<K, V> newEntry = map.entryFactory.copyEntry(this, original, newNext, key);
newEntry.setValueReference(valueReference.copyFor(this.valueReferenceQueue, value, newEntry));
return newEntry;
}
/** Sets a new value of an entry. Adds newly created entries at the end of the access queue. */
@GuardedBy("this")
void setValue(ReferenceEntry<K, V> entry, K key, V value, long now) {
ValueReference<K, V> previous = entry.getValueReference();
int weight = map.weigher.weigh(key, value);
checkState(weight >= 0, "Weights must be non-negative");
ValueReference<K, V> valueReference =
map.valueStrength.referenceValue(this, entry, value, weight);
entry.setValueReference(valueReference);
recordWrite(entry, weight, now);
previous.notifyNewValue(value);
}
// loading
@CanIgnoreReturnValue
V get(K key, int hash, CacheLoader<? super K, V> loader) throws ExecutionException {
checkNotNull(key);
checkNotNull(loader);
try {
if (count != 0) { // read-volatile
// don't call getLiveEntry, which would ignore loading values
ReferenceEntry<K, V> e = getEntry(key, hash);
if (e != null) {
long now = map.ticker.read();
V value = getLiveValue(e, now);
if (value != null) {
recordRead(e, now);
statsCounter.recordHits(1);
return scheduleRefresh(e, key, hash, value, now, loader);
}
ValueReference<K, V> valueReference = e.getValueReference();
if (valueReference.isLoading()) {
return waitForLoadingValue(e, key, valueReference);
}
}
}
// at this point e is either null or expired;
return lockedGetOrLoad(key, hash, loader);
} catch (ExecutionException ee) {
Throwable cause = ee.getCause();
if (cause instanceof Error) {
throw new ExecutionError((Error) cause);
} else if (cause instanceof RuntimeException) {
throw new UncheckedExecutionException(cause);
}
throw ee;
} finally {
postReadCleanup();
}
}
@Nullable V get(Object key, int hash) {
try {
if (count != 0) { // read-volatile
long now = map.ticker.read();
ReferenceEntry<K, V> e = getLiveEntry(key, hash, now);
if (e == null) {
return null;
}
V value = e.getValueReference().get();
if (value != null) {
recordRead(e, now);
return scheduleRefresh(e, e.getKey(), hash, value, now, map.defaultLoader);
}
tryDrainReferenceQueues();
}
return null;
} finally {
postReadCleanup();
}
}
V lockedGetOrLoad(K key, int hash, CacheLoader<? super K, V> loader) throws ExecutionException {
ReferenceEntry<K, V> e;
ValueReference<K, V> valueReference = null;
LoadingValueReference<K, V> loadingValueReference = null;
boolean createNewEntry = true;
lock();
try {
// re-read ticker once inside the lock
long now = map.ticker.read();
preWriteCleanup(now);
int newCount = this.count - 1;
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
for (e = first; e != null; e = e.getNext()) {
K entryKey = e.getKey();
if (e.getHash() == hash
&& entryKey != null
&& map.keyEquivalence.equivalent(key, entryKey)) {
valueReference = e.getValueReference();
if (valueReference.isLoading()) {
createNewEntry = false;
} else {
V value = valueReference.get();
if (value == null) {
enqueueNotification(
entryKey, hash, value, valueReference.getWeight(), RemovalCause.COLLECTED);
} else if (map.isExpired(e, now)) {
// This is a duplicate check, as preWriteCleanup already purged expired
// entries, but let's accommodate an incorrect expiration queue.
enqueueNotification(
entryKey, hash, value, valueReference.getWeight(), RemovalCause.EXPIRED);
} else {
recordLockedRead(e, now);
statsCounter.recordHits(1);
// we were concurrent with loading; don't consider refresh
return value;
}
// immediately reuse invalid entries
writeQueue.remove(e);
accessQueue.remove(e);
this.count = newCount; // write-volatile
}
break;
}
}
if (createNewEntry) {
loadingValueReference = new LoadingValueReference<>();
if (e == null) {
e = newEntry(key, hash, first);
e.setValueReference(loadingValueReference);
table.set(index, e);
} else {
e.setValueReference(loadingValueReference);
}
}
} finally {
unlock();
postWriteCleanup();
}
if (createNewEntry) {
try {
// Synchronizes on the entry to allow failing fast when a recursive load is
// detected. This may be circumvented when an entry is copied, but will fail fast most
// of the time.
synchronized (e) {
return loadSync(key, hash, loadingValueReference, loader);
}
} finally {
statsCounter.recordMisses(1);
}
} else {
// The entry already exists. Wait for loading.
return waitForLoadingValue(e, key, valueReference);
}
}
V waitForLoadingValue(ReferenceEntry<K, V> e, K key, ValueReference<K, V> valueReference)
throws ExecutionException {
if (!valueReference.isLoading()) {
throw new AssertionError();
}
checkState(!Thread.holdsLock(e), "Recursive load of: %s", key);
// don't consider expiration as we're concurrent with loading
try {
V value = valueReference.waitForValue();
if (value == null) {
throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + ".");
}
// re-read ticker now that loading has completed
long now = map.ticker.read();
recordRead(e, now);
return value;
} finally {
statsCounter.recordMisses(1);
}
}
// at most one of loadSync/loadAsync may be called for any given LoadingValueReference
V loadSync(
K key,
int hash,
LoadingValueReference<K, V> loadingValueReference,
CacheLoader<? super K, V> loader)
throws ExecutionException {
ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader);
return getAndRecordStats(key, hash, loadingValueReference, loadingFuture);
}
ListenableFuture<V> loadAsync(
K key,
int hash,
LoadingValueReference<K, V> loadingValueReference,
CacheLoader<? super K, V> loader) {
ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader);
loadingFuture.addListener(
() -> {
try {
getAndRecordStats(key, hash, loadingValueReference, loadingFuture);
} catch (Throwable t) {
logger.log(Level.WARNING, "Exception thrown during refresh", t);
loadingValueReference.setException(t);
}
},
directExecutor());
return loadingFuture;
}
/** Waits uninterruptibly for {@code newValue} to be loaded, and then records loading stats. */
@CanIgnoreReturnValue
V getAndRecordStats(
K key,
int hash,
LoadingValueReference<K, V> loadingValueReference,
ListenableFuture<V> newValue)
throws ExecutionException {
V value = null;
try {
value = getUninterruptibly(newValue);
if (value == null) {
throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + ".");
}
statsCounter.recordLoadSuccess(loadingValueReference.elapsedNanos());
storeLoadedValue(key, hash, loadingValueReference, value);
return value;
} finally {
if (value == null) {
statsCounter.recordLoadException(loadingValueReference.elapsedNanos());
removeLoadingValue(key, hash, loadingValueReference);
}
}
}
V scheduleRefresh(
ReferenceEntry<K, V> entry,
K key,
int hash,
V oldValue,
long now,
CacheLoader<? super K, V> loader) {
if (map.refreshes()
&& (now - entry.getWriteTime() > map.refreshNanos)
&& !entry.getValueReference().isLoading()) {
V newValue = refresh(key, hash, loader, true);
if (newValue != null) {
return newValue;
}
}
return oldValue;
}
/**
* Refreshes the value associated with {@code key}, unless another thread is already doing so.
* Returns the newly refreshed value associated with {@code key} if it was refreshed inline, or
* {@code null} if another thread is performing the refresh or if an error occurs during
* refresh.
*/
@CanIgnoreReturnValue
@Nullable V refresh(K key, int hash, CacheLoader<? super K, V> loader, boolean checkTime) {
LoadingValueReference<K, V> loadingValueReference =
insertLoadingValueReference(key, hash, checkTime);
if (loadingValueReference == null) {
return null;
}
ListenableFuture<V> result = loadAsync(key, hash, loadingValueReference, loader);
if (result.isDone()) {
try {
return Uninterruptibles.getUninterruptibly(result);
} catch (Throwable t) {
// don't let refresh exceptions propagate; error was already logged
}
}
return null;
}
/**
* Returns a newly inserted {@code LoadingValueReference}, or null if the live value reference
* is already loading.
*/
@Nullable LoadingValueReference<K, V> insertLoadingValueReference(
K key, int hash, boolean checkTime) {
ReferenceEntry<K, V> e = null;
lock();
try {
long now = map.ticker.read();
preWriteCleanup(now);
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
// Look for an existing entry.
for (e = first; e != null; e = e.getNext()) {
K entryKey = e.getKey();
if (e.getHash() == hash
&& entryKey != null
&& map.keyEquivalence.equivalent(key, entryKey)) {
// We found an existing entry.
ValueReference<K, V> valueReference = e.getValueReference();
if (valueReference.isLoading()
|| (checkTime && (now - e.getWriteTime() < map.refreshNanos))) {
// refresh is a no-op if loading is pending
// if checkTime, we want to check *after* acquiring the lock if refresh still needs
// to be scheduled
return null;
}
// continue returning old value while loading
++modCount;
LoadingValueReference<K, V> loadingValueReference =
new LoadingValueReference<>(valueReference);
e.setValueReference(loadingValueReference);
return loadingValueReference;
}
}
++modCount;
LoadingValueReference<K, V> loadingValueReference = new LoadingValueReference<>();
e = newEntry(key, hash, first);
e.setValueReference(loadingValueReference);
table.set(index, e);
return loadingValueReference;
} finally {
unlock();
postWriteCleanup();
}
}
// reference queues, for garbage collection cleanup
/** Cleanup collected entries when the lock is available. */
void tryDrainReferenceQueues() {
if (tryLock()) {
try {
drainReferenceQueues();
} finally {
unlock();
}
}
}
/**
* Drain the key and value reference queues, cleaning up internal entries containing garbage
* collected keys or values.
*/
@GuardedBy("this")
void drainReferenceQueues() {
if (map.usesKeyReferences()) {
drainKeyReferenceQueue();
}
if (map.usesValueReferences()) {
drainValueReferenceQueue();
}
}
@GuardedBy("this")
void drainKeyReferenceQueue() {
Reference<? extends K> ref;
int i = 0;
while ((ref = keyReferenceQueue.poll()) != null) {
@SuppressWarnings("unchecked")
ReferenceEntry<K, V> entry = (ReferenceEntry<K, V>) ref;
map.reclaimKey(entry);
if (++i == DRAIN_MAX) {
break;
}
}
}
@GuardedBy("this")
void drainValueReferenceQueue() {
Reference<? extends V> ref;
int i = 0;
while ((ref = valueReferenceQueue.poll()) != null) {
@SuppressWarnings("unchecked")
ValueReference<K, V> valueReference = (ValueReference<K, V>) ref;
map.reclaimValue(valueReference);
if (++i == DRAIN_MAX) {
break;
}
}
}
/** Clears all entries from the key and value reference queues. */
void clearReferenceQueues() {
if (map.usesKeyReferences()) {
clearKeyReferenceQueue();
}
if (map.usesValueReferences()) {
clearValueReferenceQueue();
}
}
void clearKeyReferenceQueue() {
while (keyReferenceQueue.poll() != null) {}
}
void clearValueReferenceQueue() {
while (valueReferenceQueue.poll() != null) {}
}
// recency queue, shared by expiration and eviction
/**
* Records the relative order in which this read was performed by adding {@code entry} to the
* recency queue. At write-time, or when the queue is full past the threshold, the queue will be
* drained and the entries therein processed.
*
* <p>Note: locked reads should use {@link #recordLockedRead}.
*/
void recordRead(ReferenceEntry<K, V> entry, long now) {
if (map.recordsAccess()) {
entry.setAccessTime(now);
}
recencyQueue.add(entry);
}
/**
* Updates the eviction metadata that {@code entry} was just read. This currently amounts to
* adding {@code entry} to relevant eviction lists.
*
* <p>Note: this method should only be called under lock, as it directly manipulates the
* eviction queues. Unlocked reads should use {@link #recordRead}.
*/
@GuardedBy("this")
void recordLockedRead(ReferenceEntry<K, V> entry, long now) {
if (map.recordsAccess()) {
entry.setAccessTime(now);
}
accessQueue.add(entry);
}
/**
* Updates eviction metadata that {@code entry} was just written. This currently amounts to
* adding {@code entry} to relevant eviction lists.
*/
@GuardedBy("this")
void recordWrite(ReferenceEntry<K, V> entry, int weight, long now) {
// we are already under lock, so drain the recency queue immediately
drainRecencyQueue();
totalWeight += weight;
if (map.recordsAccess()) {
entry.setAccessTime(now);
}
if (map.recordsWrite()) {
entry.setWriteTime(now);
}
accessQueue.add(entry);
writeQueue.add(entry);
}
/**
* Drains the recency queue, updating eviction metadata that the entries therein were read in
* the specified relative order. This currently amounts to adding them to relevant eviction
* lists (accounting for the fact that they could have been removed from the map since being
* added to the recency queue).
*/
@GuardedBy("this")
void drainRecencyQueue() {
ReferenceEntry<K, V> e;
while ((e = recencyQueue.poll()) != null) {
// An entry may be in the recency queue despite it being removed from
// the map . This can occur when the entry was concurrently read while a
// writer is removing it from the segment or after a clear has removed
// all the segment's entries.
if (accessQueue.contains(e)) {
accessQueue.add(e);
}
}
}
// expiration
/** Cleanup expired entries when the lock is available. */
void tryExpireEntries(long now) {
if (tryLock()) {
try {
expireEntries(now);
} finally {
unlock();
// don't call postWriteCleanup as we're in a read
}
}
}
@GuardedBy("this")
void expireEntries(long now) {
drainRecencyQueue();
ReferenceEntry<K, V> e;
while ((e = writeQueue.peek()) != null && map.isExpired(e, now)) {
if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) {
throw new AssertionError();
}
}
while ((e = accessQueue.peek()) != null && map.isExpired(e, now)) {
if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) {
throw new AssertionError();
}
}
}
// eviction
@GuardedBy("this")
void enqueueNotification(
@Nullable K key, int hash, @Nullable V value, int weight, RemovalCause cause) {
totalWeight -= weight;
if (cause.wasEvicted()) {
statsCounter.recordEviction();
}
if (map.removalNotificationQueue != DISCARDING_QUEUE) {
RemovalNotification<K, V> notification = RemovalNotification.create(key, value, cause);
map.removalNotificationQueue.offer(notification);
}
}
/**
* Performs eviction if the segment is over capacity. Avoids flushing the entire cache if the
* newest entry exceeds the maximum weight all on its own.
*
* @param newest the most recently added entry
*/
@GuardedBy("this")
void evictEntries(ReferenceEntry<K, V> newest) {
if (!map.evictsBySize()) {
return;
}
drainRecencyQueue();
// If the newest entry by itself is too heavy for the segment, don't bother evicting
// anything else, just that
if (newest.getValueReference().getWeight() > maxSegmentWeight) {
if (!removeEntry(newest, newest.getHash(), RemovalCause.SIZE)) {
throw new AssertionError();
}
}
while (totalWeight > maxSegmentWeight) {
ReferenceEntry<K, V> e = getNextEvictable();
if (!removeEntry(e, e.getHash(), RemovalCause.SIZE)) {
throw new AssertionError();
}
}
}
// TODO(fry): instead implement this with an eviction head
@GuardedBy("this")
ReferenceEntry<K, V> getNextEvictable() {
for (ReferenceEntry<K, V> e : accessQueue) {
int weight = e.getValueReference().getWeight();
if (weight > 0) {
return e;
}
}
throw new AssertionError();
}
/** Returns first entry of bin for given hash. */
ReferenceEntry<K, V> getFirst(int hash) {
// read this volatile field only once
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
return table.get(hash & (table.length() - 1));
}
// Specialized implementations of map methods
@Nullable ReferenceEntry<K, V> getEntry(Object key, int hash) {
for (ReferenceEntry<K, V> e = getFirst(hash); e != null; e = e.getNext()) {
if (e.getHash() != hash) {
continue;
}
K entryKey = e.getKey();
if (entryKey == null) {
tryDrainReferenceQueues();
continue;
}
if (map.keyEquivalence.equivalent(key, entryKey)) {
return e;
}
}
return null;
}
@Nullable ReferenceEntry<K, V> getLiveEntry(Object key, int hash, long now) {
ReferenceEntry<K, V> e = getEntry(key, hash);
if (e == null) {
return null;
} else if (map.isExpired(e, now)) {
tryExpireEntries(now);
return null;
}
return e;
}
/**
* Gets the value from an entry. Returns null if the entry is invalid, partially-collected,
* loading, or expired.
*/
V getLiveValue(ReferenceEntry<K, V> entry, long now) {
if (entry.getKey() == null) {
tryDrainReferenceQueues();
return null;
}
V value = entry.getValueReference().get();
if (value == null) {
tryDrainReferenceQueues();
return null;
}
if (map.isExpired(entry, now)) {
tryExpireEntries(now);
return null;
}
return value;
}
boolean containsKey(Object key, int hash) {
try {
if (count != 0) { // read-volatile
long now = map.ticker.read();
ReferenceEntry<K, V> e = getLiveEntry(key, hash, now);
if (e == null) {
return false;
}
return e.getValueReference().get() != null;
}
return false;
} finally {
postReadCleanup();
}
}
/**
* This method is a convenience for testing. Code should call {@link LocalCache#containsValue}
* directly.
*/
@VisibleForTesting
boolean containsValue(Object value) {
try {
if (count != 0) { // read-volatile
long now = map.ticker.read();
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int length = table.length();
for (int i = 0; i < length; ++i) {
for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) {
V entryValue = getLiveValue(e, now);
if (entryValue == null) {
continue;
}
if (map.valueEquivalence.equivalent(value, entryValue)) {
return true;
}
}
}
}
return false;
} finally {
postReadCleanup();
}
}
@CanIgnoreReturnValue
@Nullable V put(K key, int hash, V value, boolean onlyIfAbsent) {
lock();
try {
long now = map.ticker.read();
preWriteCleanup(now);
int newCount = this.count + 1;
if (newCount > this.threshold) { // ensure capacity
expand();
newCount = this.count + 1;
}
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
// Look for an existing entry.
for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
K entryKey = e.getKey();
if (e.getHash() == hash
&& entryKey != null
&& map.keyEquivalence.equivalent(key, entryKey)) {
// We found an existing entry.
ValueReference<K, V> valueReference = e.getValueReference();
V entryValue = valueReference.get();
if (entryValue == null) {
++modCount;
if (valueReference.isActive()) {
enqueueNotification(
key, hash, entryValue, valueReference.getWeight(), RemovalCause.COLLECTED);
setValue(e, key, value, now);
newCount = this.count; // count remains unchanged
} else {
setValue(e, key, value, now);
newCount = this.count + 1;
}
this.count = newCount; // write-volatile
evictEntries(e);
return null;
} else if (onlyIfAbsent) {
// Mimic
// "if (!map.containsKey(key)) ...
// else return map.get(key);
recordLockedRead(e, now);
return entryValue;
} else {
// clobber existing entry, count remains unchanged
++modCount;
enqueueNotification(
key, hash, entryValue, valueReference.getWeight(), RemovalCause.REPLACED);
setValue(e, key, value, now);
evictEntries(e);
return entryValue;
}
}
}
// Create a new entry.
++modCount;
ReferenceEntry<K, V> newEntry = newEntry(key, hash, first);
setValue(newEntry, key, value, now);
table.set(index, newEntry);
newCount = this.count + 1;
this.count = newCount; // write-volatile
evictEntries(newEntry);
return null;
} finally {
unlock();
postWriteCleanup();
}
}
/** Expands the table if possible. */
@GuardedBy("this")
void expand() {
AtomicReferenceArray<ReferenceEntry<K, V>> oldTable = table;
int oldCapacity = oldTable.length();
if (oldCapacity >= MAXIMUM_CAPACITY) {
return;
}
/*
* Reclassify nodes in each list to new Map. Because we are using power-of-two expansion, the
* elements from each bin must either stay at same index, or move with a power of two offset.
* We eliminate unnecessary node creation by catching cases where old nodes can be reused
* because their next fields won't change. Statistically, at the default threshold, only about
* one-sixth of them need cloning when a table doubles. The nodes they replace will be garbage
* collectable as soon as they are no longer referenced by any reader thread that may be in
* the midst of traversing table right now.
*/
int newCount = count;
AtomicReferenceArray<ReferenceEntry<K, V>> newTable = newEntryArray(oldCapacity << 1);
threshold = newTable.length() * 3 / 4;
int newMask = newTable.length() - 1;
for (int oldIndex = 0; oldIndex < oldCapacity; ++oldIndex) {
// We need to guarantee that any existing reads of old Map can
// proceed. So we cannot yet null out each bin.
ReferenceEntry<K, V> head = oldTable.get(oldIndex);
if (head != null) {
ReferenceEntry<K, V> next = head.getNext();
int headIndex = head.getHash() & newMask;
// Single node on list
if (next == null) {
newTable.set(headIndex, head);
} else {
// Reuse the consecutive sequence of nodes with the same target
// index from the end of the list. tail points to the first
// entry in the reusable list.
ReferenceEntry<K, V> tail = head;
int tailIndex = headIndex;
for (ReferenceEntry<K, V> e = next; e != null; e = e.getNext()) {
int newIndex = e.getHash() & newMask;
if (newIndex != tailIndex) {
// The index changed. We'll need to copy the previous entry.
tailIndex = newIndex;
tail = e;
}
}
newTable.set(tailIndex, tail);
// Clone nodes leading up to the tail.
for (ReferenceEntry<K, V> e = head; e != tail; e = e.getNext()) {
int newIndex = e.getHash() & newMask;
ReferenceEntry<K, V> newNext = newTable.get(newIndex);
ReferenceEntry<K, V> newFirst = copyEntry(e, newNext);
if (newFirst != null) {
newTable.set(newIndex, newFirst);
} else {
removeCollectedEntry(e);
newCount--;
}
}
}
}
}
table = newTable;
this.count = newCount;
}
boolean replace(K key, int hash, V oldValue, V newValue) {
lock();
try {
long now = map.ticker.read();
preWriteCleanup(now);
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
K entryKey = e.getKey();
if (e.getHash() == hash
&& entryKey != null
&& map.keyEquivalence.equivalent(key, entryKey)) {
ValueReference<K, V> valueReference = e.getValueReference();
V entryValue = valueReference.get();
if (entryValue == null) {
if (valueReference.isActive()) {
// If the value disappeared, this entry is partially collected.
int newCount = this.count - 1;
++modCount;
ReferenceEntry<K, V> newFirst =
removeValueFromChain(
first,
e,
entryKey,
hash,
entryValue,
valueReference,
RemovalCause.COLLECTED);
newCount = this.count - 1;
table.set(index, newFirst);
this.count = newCount; // write-volatile
}
return false;
}
if (map.valueEquivalence.equivalent(oldValue, entryValue)) {
++modCount;
enqueueNotification(
key, hash, entryValue, valueReference.getWeight(), RemovalCause.REPLACED);
setValue(e, key, newValue, now);
evictEntries(e);
return true;
} else {
// Mimic
// "if (map.containsKey(key) && map.get(key).equals(oldValue))..."
recordLockedRead(e, now);
return false;
}
}
}
return false;
} finally {
unlock();
postWriteCleanup();
}
}
@Nullable V replace(K key, int hash, V newValue) {
lock();
try {
long now = map.ticker.read();
preWriteCleanup(now);
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
K entryKey = e.getKey();
if (e.getHash() == hash
&& entryKey != null
&& map.keyEquivalence.equivalent(key, entryKey)) {
ValueReference<K, V> valueReference = e.getValueReference();
V entryValue = valueReference.get();
if (entryValue == null) {
if (valueReference.isActive()) {
// If the value disappeared, this entry is partially collected.
int newCount = this.count - 1;
++modCount;
ReferenceEntry<K, V> newFirst =
removeValueFromChain(
first,
e,
entryKey,
hash,
entryValue,
valueReference,
RemovalCause.COLLECTED);
newCount = this.count - 1;
table.set(index, newFirst);
this.count = newCount; // write-volatile
}
return null;
}
++modCount;
enqueueNotification(
key, hash, entryValue, valueReference.getWeight(), RemovalCause.REPLACED);
setValue(e, key, newValue, now);
evictEntries(e);
return entryValue;
}
}
return null;
} finally {
unlock();
postWriteCleanup();
}
}
@Nullable V remove(Object key, int hash) {
lock();
try {
long now = map.ticker.read();
preWriteCleanup(now);
int newCount = this.count - 1;
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
K entryKey = e.getKey();
if (e.getHash() == hash
&& entryKey != null
&& map.keyEquivalence.equivalent(key, entryKey)) {
ValueReference<K, V> valueReference = e.getValueReference();
V entryValue = valueReference.get();
RemovalCause cause;
if (entryValue != null) {
cause = RemovalCause.EXPLICIT;
} else if (valueReference.isActive()) {
cause = RemovalCause.COLLECTED;
} else {
// currently loading
return null;
}
++modCount;
ReferenceEntry<K, V> newFirst =
removeValueFromChain(first, e, entryKey, hash, entryValue, valueReference, cause);
newCount = this.count - 1;
table.set(index, newFirst);
this.count = newCount; // write-volatile
return entryValue;
}
}
return null;
} finally {
unlock();
postWriteCleanup();
}
}
boolean remove(Object key, int hash, Object value) {
lock();
try {
long now = map.ticker.read();
preWriteCleanup(now);
int newCount = this.count - 1;
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
K entryKey = e.getKey();
if (e.getHash() == hash
&& entryKey != null
&& map.keyEquivalence.equivalent(key, entryKey)) {
ValueReference<K, V> valueReference = e.getValueReference();
V entryValue = valueReference.get();
RemovalCause cause;
if (map.valueEquivalence.equivalent(value, entryValue)) {
cause = RemovalCause.EXPLICIT;
} else if (entryValue == null && valueReference.isActive()) {
cause = RemovalCause.COLLECTED;
} else {
// currently loading
return false;
}
++modCount;
ReferenceEntry<K, V> newFirst =
removeValueFromChain(first, e, entryKey, hash, entryValue, valueReference, cause);
newCount = this.count - 1;
table.set(index, newFirst);
this.count = newCount; // write-volatile
return (cause == RemovalCause.EXPLICIT);
}
}
return false;
} finally {
unlock();
postWriteCleanup();
}
}
@CanIgnoreReturnValue
boolean storeLoadedValue(
K key, int hash, LoadingValueReference<K, V> oldValueReference, V newValue) {
lock();
try {
long now = map.ticker.read();
preWriteCleanup(now);
int newCount = this.count + 1;
if (newCount > this.threshold) { // ensure capacity
expand();
newCount = this.count + 1;
}
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
K entryKey = e.getKey();
if (e.getHash() == hash
&& entryKey != null
&& map.keyEquivalence.equivalent(key, entryKey)) {
ValueReference<K, V> valueReference = e.getValueReference();
V entryValue = valueReference.get();
// replace the old LoadingValueReference if it's live, otherwise
// perform a putIfAbsent
if (oldValueReference == valueReference
|| (entryValue == null && valueReference != UNSET)) {
++modCount;
if (oldValueReference.isActive()) {
RemovalCause cause =
(entryValue == null) ? RemovalCause.COLLECTED : RemovalCause.REPLACED;
enqueueNotification(key, hash, entryValue, oldValueReference.getWeight(), cause);
newCount--;
}
setValue(e, key, newValue, now);
this.count = newCount; // write-volatile
evictEntries(e);
return true;
}
// the loaded value was already clobbered
enqueueNotification(key, hash, newValue, 0, RemovalCause.REPLACED);
return false;
}
}
++modCount;
ReferenceEntry<K, V> newEntry = newEntry(key, hash, first);
setValue(newEntry, key, newValue, now);
table.set(index, newEntry);
this.count = newCount; // write-volatile
evictEntries(newEntry);
return true;
} finally {
unlock();
postWriteCleanup();
}
}
void clear() {
if (count != 0) { // read-volatile
lock();
try {
long now = map.ticker.read();
preWriteCleanup(now);
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
for (int i = 0; i < table.length(); ++i) {
for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) {
// Loading references aren't actually in the map yet.
if (e.getValueReference().isActive()) {
K key = e.getKey();
V value = e.getValueReference().get();
RemovalCause cause =
(key == null || value == null) ? RemovalCause.COLLECTED : RemovalCause.EXPLICIT;
enqueueNotification(
key, e.getHash(), value, e.getValueReference().getWeight(), cause);
}
}
}
for (int i = 0; i < table.length(); ++i) {
table.set(i, null);
}
clearReferenceQueues();
writeQueue.clear();
accessQueue.clear();
readCount.set(0);
++modCount;
count = 0; // write-volatile
} finally {
unlock();
postWriteCleanup();
}
}
}
@GuardedBy("this")
@Nullable ReferenceEntry<K, V> removeValueFromChain(
ReferenceEntry<K, V> first,
ReferenceEntry<K, V> entry,
@Nullable K key,
int hash,
V value,
ValueReference<K, V> valueReference,
RemovalCause cause) {
enqueueNotification(key, hash, value, valueReference.getWeight(), cause);
writeQueue.remove(entry);
accessQueue.remove(entry);
if (valueReference.isLoading()) {
valueReference.notifyNewValue(null);
return first;
} else {
return removeEntryFromChain(first, entry);
}
}
@GuardedBy("this")
@Nullable ReferenceEntry<K, V> removeEntryFromChain(
ReferenceEntry<K, V> first, ReferenceEntry<K, V> entry) {
int newCount = count;
ReferenceEntry<K, V> newFirst = entry.getNext();
for (ReferenceEntry<K, V> e = first; e != entry; e = e.getNext()) {
ReferenceEntry<K, V> next = copyEntry(e, newFirst);
if (next != null) {
newFirst = next;
} else {
removeCollectedEntry(e);
newCount--;
}
}
this.count = newCount;
return newFirst;
}
@GuardedBy("this")
void removeCollectedEntry(ReferenceEntry<K, V> entry) {
enqueueNotification(
entry.getKey(),
entry.getHash(),
entry.getValueReference().get(),
entry.getValueReference().getWeight(),
RemovalCause.COLLECTED);
writeQueue.remove(entry);
accessQueue.remove(entry);
}
/** Removes an entry whose key has been garbage collected. */
@CanIgnoreReturnValue
boolean reclaimKey(ReferenceEntry<K, V> entry, int hash) {
lock();
try {
int newCount = count - 1;
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
if (e == entry) {
++modCount;
ReferenceEntry<K, V> newFirst =
removeValueFromChain(
first,
e,
e.getKey(),
hash,
e.getValueReference().get(),
e.getValueReference(),
RemovalCause.COLLECTED);
newCount = this.count - 1;
table.set(index, newFirst);
this.count = newCount; // write-volatile
return true;
}
}
return false;
} finally {
unlock();
postWriteCleanup();
}
}
/** Removes an entry whose value has been garbage collected. */
@CanIgnoreReturnValue
boolean reclaimValue(K key, int hash, ValueReference<K, V> valueReference) {
lock();
try {
int newCount = this.count - 1;
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
K entryKey = e.getKey();
if (e.getHash() == hash
&& entryKey != null
&& map.keyEquivalence.equivalent(key, entryKey)) {
ValueReference<K, V> v = e.getValueReference();
if (v == valueReference) {
++modCount;
ReferenceEntry<K, V> newFirst =
removeValueFromChain(
first,
e,
entryKey,
hash,
valueReference.get(),
valueReference,
RemovalCause.COLLECTED);
newCount = this.count - 1;
table.set(index, newFirst);
this.count = newCount; // write-volatile
return true;
}
return false;
}
}
return false;
} finally {
unlock();
if (!isHeldByCurrentThread()) { // don't clean up inside of put
postWriteCleanup();
}
}
}
@CanIgnoreReturnValue
boolean removeLoadingValue(K key, int hash, LoadingValueReference<K, V> valueReference) {
lock();
try {
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
K entryKey = e.getKey();
if (e.getHash() == hash
&& entryKey != null
&& map.keyEquivalence.equivalent(key, entryKey)) {
ValueReference<K, V> v = e.getValueReference();
if (v == valueReference) {
if (valueReference.isActive()) {
e.setValueReference(valueReference.getOldValue());
} else {
ReferenceEntry<K, V> newFirst = removeEntryFromChain(first, e);
table.set(index, newFirst);
}
return true;
}
return false;
}
}
return false;
} finally {
unlock();
postWriteCleanup();
}
}
@VisibleForTesting
@GuardedBy("this")
@CanIgnoreReturnValue
boolean removeEntry(ReferenceEntry<K, V> entry, int hash, RemovalCause cause) {
int newCount = this.count - 1;
AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
int index = hash & (table.length() - 1);
ReferenceEntry<K, V> first = table.get(index);
for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
if (e == entry) {
++modCount;
ReferenceEntry<K, V> newFirst =
removeValueFromChain(
first,
e,
e.getKey(),
hash,
e.getValueReference().get(),
e.getValueReference(),
cause);
newCount = this.count - 1;
table.set(index, newFirst);
this.count = newCount; // write-volatile
return true;
}
}
return false;
}
/**
* Performs routine cleanup following a read. Normally cleanup happens during writes. If cleanup
* is not observed after a sufficient number of reads, try cleaning up from the read thread.
*/
void postReadCleanup() {
if ((readCount.incrementAndGet() & DRAIN_THRESHOLD) == 0) {
cleanUp();
}
}
/**
* Performs routine cleanup prior to executing a write. This should be called every time a write
* thread acquires the segment lock, immediately after acquiring the lock.
*
* <p>Post-condition: expireEntries has been run.
*/
@GuardedBy("this")
void preWriteCleanup(long now) {
runLockedCleanup(now);
}
/** Performs routine cleanup following a write. */
void postWriteCleanup() {
runUnlockedCleanup();
}
void cleanUp() {
long now = map.ticker.read();
runLockedCleanup(now);
runUnlockedCleanup();
}
void runLockedCleanup(long now) {
if (tryLock()) {
try {
drainReferenceQueues();
expireEntries(now); // calls drainRecencyQueue
readCount.set(0);
} finally {
unlock();
}
}
}
void runUnlockedCleanup() {
// locked cleanup may generate notifications we can send unlocked
if (!isHeldByCurrentThread()) {
map.processPendingNotifications();
}
}
}
static
|
into
|
java
|
netty__netty
|
transport-classes-io_uring/src/main/java/io/netty/channel/uring/IoUringSocketChannelConfig.java
|
{
"start": 1180,
"end": 22045
}
|
class ____ extends IoUringStreamChannelConfig implements SocketChannelConfig {
private volatile boolean allowHalfClosure;
private volatile boolean tcpFastopen;
static final int DISABLE_WRITE_ZERO_COPY = -1;
private volatile int writeZeroCopyThreshold = DISABLE_WRITE_ZERO_COPY;
IoUringSocketChannelConfig(AbstractIoUringChannel channel) {
super(channel);
if (PlatformDependent.canEnableTcpNoDelayByDefault()) {
setTcpNoDelay(true);
}
}
@Override
public Map<ChannelOption<?>, Object> getOptions() {
return getOptions(
super.getOptions(),
SO_RCVBUF, SO_SNDBUF, TCP_NODELAY, SO_KEEPALIVE, SO_REUSEADDR, SO_LINGER, IP_TOS,
ALLOW_HALF_CLOSURE, IoUringChannelOption.TCP_CORK, IoUringChannelOption.TCP_NOTSENT_LOWAT,
IoUringChannelOption.TCP_KEEPCNT, IoUringChannelOption.TCP_KEEPIDLE, IoUringChannelOption.TCP_KEEPINTVL,
IoUringChannelOption.TCP_QUICKACK, IoUringChannelOption.IP_TRANSPARENT,
ChannelOption.TCP_FASTOPEN_CONNECT, IoUringChannelOption.IO_URING_WRITE_ZERO_COPY_THRESHOLD);
}
@SuppressWarnings("unchecked")
@Override
public <T> T getOption(ChannelOption<T> option) {
if (option == SO_RCVBUF) {
return (T) Integer.valueOf(getReceiveBufferSize());
}
if (option == SO_SNDBUF) {
return (T) Integer.valueOf(getSendBufferSize());
}
if (option == TCP_NODELAY) {
return (T) Boolean.valueOf(isTcpNoDelay());
}
if (option == SO_KEEPALIVE) {
return (T) Boolean.valueOf(isKeepAlive());
}
if (option == SO_REUSEADDR) {
return (T) Boolean.valueOf(isReuseAddress());
}
if (option == SO_LINGER) {
return (T) Integer.valueOf(getSoLinger());
}
if (option == IP_TOS) {
return (T) Integer.valueOf(getTrafficClass());
}
if (option == ALLOW_HALF_CLOSURE) {
return (T) Boolean.valueOf(isAllowHalfClosure());
}
if (option == IoUringChannelOption.TCP_CORK) {
return (T) Boolean.valueOf(isTcpCork());
}
if (option == IoUringChannelOption.TCP_NOTSENT_LOWAT) {
return (T) Long.valueOf(getTcpNotSentLowAt());
}
if (option == IoUringChannelOption.TCP_KEEPIDLE) {
return (T) Integer.valueOf(getTcpKeepIdle());
}
if (option == IoUringChannelOption.TCP_KEEPINTVL) {
return (T) Integer.valueOf(getTcpKeepIntvl());
}
if (option == IoUringChannelOption.TCP_KEEPCNT) {
return (T) Integer.valueOf(getTcpKeepCnt());
}
if (option == IoUringChannelOption.TCP_USER_TIMEOUT) {
return (T) Integer.valueOf(getTcpUserTimeout());
}
if (option == IoUringChannelOption.TCP_QUICKACK) {
return (T) Boolean.valueOf(isTcpQuickAck());
}
if (option == IoUringChannelOption.IP_TRANSPARENT) {
return (T) Boolean.valueOf(isIpTransparent());
}
if (option == ChannelOption.TCP_FASTOPEN_CONNECT) {
return (T) Boolean.valueOf(isTcpFastOpenConnect());
}
if (option == IoUringChannelOption.IO_URING_WRITE_ZERO_COPY_THRESHOLD) {
return (T) Integer.valueOf(getWriteZeroCopyThreshold());
}
return super.getOption(option);
}
@Override
public <T> boolean setOption(ChannelOption<T> option, T value) {
validate(option, value);
if (option == SO_RCVBUF) {
setReceiveBufferSize((Integer) value);
} else if (option == SO_SNDBUF) {
setSendBufferSize((Integer) value);
} else if (option == TCP_NODELAY) {
setTcpNoDelay((Boolean) value);
} else if (option == SO_KEEPALIVE) {
setKeepAlive((Boolean) value);
} else if (option == SO_REUSEADDR) {
setReuseAddress((Boolean) value);
} else if (option == SO_LINGER) {
setSoLinger((Integer) value);
} else if (option == IP_TOS) {
setTrafficClass((Integer) value);
} else if (option == ALLOW_HALF_CLOSURE) {
setAllowHalfClosure((Boolean) value);
} else if (option == IoUringChannelOption.TCP_CORK) {
setTcpCork((Boolean) value);
} else if (option == IoUringChannelOption.TCP_NOTSENT_LOWAT) {
setTcpNotSentLowAt((Long) value);
} else if (option == IoUringChannelOption.TCP_KEEPIDLE) {
setTcpKeepIdle((Integer) value);
} else if (option == IoUringChannelOption.TCP_KEEPCNT) {
setTcpKeepCnt((Integer) value);
} else if (option == IoUringChannelOption.TCP_KEEPINTVL) {
setTcpKeepIntvl((Integer) value);
} else if (option == IoUringChannelOption.TCP_USER_TIMEOUT) {
setTcpUserTimeout((Integer) value);
} else if (option == IoUringChannelOption.IP_TRANSPARENT) {
setIpTransparent((Boolean) value);
} else if (option == IoUringChannelOption.TCP_QUICKACK) {
setTcpQuickAck((Boolean) value);
} else if (option == ChannelOption.TCP_FASTOPEN_CONNECT) {
setTcpFastOpenConnect((Boolean) value);
} else if (option == IoUringChannelOption.IO_URING_WRITE_ZERO_COPY_THRESHOLD) {
setWriteZeroCopyThreshold((Integer) value);
} else {
return super.setOption(option, value);
}
return true;
}
@Override
public int getSendBufferSize() {
try {
return ((IoUringSocketChannel) channel).socket.getSendBufferSize();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public int getSoLinger() {
try {
return ((IoUringSocketChannel) channel).socket.getSoLinger();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public int getTrafficClass() {
try {
return ((IoUringSocketChannel) channel).socket.getTrafficClass();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public boolean isKeepAlive() {
try {
return ((IoUringSocketChannel) channel).socket.isKeepAlive();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public boolean isReuseAddress() {
try {
return ((IoUringSocketChannel) channel).socket.isReuseAddress();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public boolean isTcpNoDelay() {
try {
return ((IoUringSocketChannel) channel).socket.isTcpNoDelay();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Get the {@code TCP_CORK} option on the socket. See {@code man 7 tcp} for more details.
*/
public boolean isTcpCork() {
try {
return ((IoUringSocketChannel) channel).socket.isTcpCork();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Get the {@code SO_BUSY_POLL} option on the socket. See {@code man 7 tcp} for more details.
*/
public int getSoBusyPoll() {
try {
return ((IoUringSocketChannel) channel).socket.getSoBusyPoll();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Get the {@code TCP_NOTSENT_LOWAT} option on the socket. See {@code man 7 tcp} for more details.
*
* @return value is a uint32_t
*/
public long getTcpNotSentLowAt() {
try {
return ((IoUringSocketChannel) channel).socket.getTcpNotSentLowAt();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Get the {@code TCP_KEEPIDLE} option on the socket. See {@code man 7 tcp} for more details.
*/
public int getTcpKeepIdle() {
try {
return ((IoUringSocketChannel) channel).socket.getTcpKeepIdle();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Get the {@code TCP_KEEPINTVL} option on the socket. See {@code man 7 tcp} for more details.
*/
public int getTcpKeepIntvl() {
try {
return ((IoUringSocketChannel) channel).socket.getTcpKeepIntvl();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Get the {@code TCP_KEEPCNT} option on the socket. See {@code man 7 tcp} for more details.
*/
public int getTcpKeepCnt() {
try {
return ((IoUringSocketChannel) channel).socket.getTcpKeepCnt();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Get the {@code TCP_USER_TIMEOUT} option on the socket. See {@code man 7 tcp} for more details.
*/
public int getTcpUserTimeout() {
try {
return ((IoUringSocketChannel) channel).socket.getTcpUserTimeout();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringSocketChannelConfig setKeepAlive(boolean keepAlive) {
try {
((IoUringSocketChannel) channel).socket.setKeepAlive(keepAlive);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringSocketChannelConfig setPerformancePreferences(
int connectionTime, int latency, int bandwidth) {
return this;
}
@Override
public IoUringSocketChannelConfig setReceiveBufferSize(int receiveBufferSize) {
try {
((IoUringSocketChannel) channel).socket.setReceiveBufferSize(receiveBufferSize);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringSocketChannelConfig setReuseAddress(boolean reuseAddress) {
try {
((IoUringSocketChannel) channel).socket.setReuseAddress(reuseAddress);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringSocketChannelConfig setSendBufferSize(int sendBufferSize) {
try {
((IoUringSocketChannel) channel).socket.setSendBufferSize(sendBufferSize);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public int getReceiveBufferSize() {
try {
return ((IoUringSocketChannel) channel).socket.getReceiveBufferSize();
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringSocketChannelConfig setSoLinger(int soLinger) {
try {
((IoUringSocketChannel) channel).socket.setSoLinger(soLinger);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringSocketChannelConfig setTcpNoDelay(boolean tcpNoDelay) {
try {
((IoUringSocketChannel) channel).socket.setTcpNoDelay(tcpNoDelay);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Set the {@code TCP_CORK} option on the socket. See {@code man 7 tcp} for more details.
*/
public IoUringSocketChannelConfig setTcpCork(boolean tcpCork) {
try {
((IoUringSocketChannel) channel).socket.setTcpCork(tcpCork);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Set the {@code SO_BUSY_POLL} option on the socket. See {@code man 7 tcp} for more details.
*/
public IoUringSocketChannelConfig setSoBusyPoll(int loopMicros) {
try {
((IoUringSocketChannel) channel).socket.setSoBusyPoll(loopMicros);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Set the {@code TCP_NOTSENT_LOWAT} option on the socket. See {@code man 7 tcp} for more details.
*
* @param tcpNotSentLowAt is a uint32_t
*/
public IoUringSocketChannelConfig setTcpNotSentLowAt(long tcpNotSentLowAt) {
try {
((IoUringSocketChannel) channel).socket.setTcpNotSentLowAt(tcpNotSentLowAt);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
@Override
public IoUringSocketChannelConfig setTrafficClass(int trafficClass) {
try {
((IoUringSocketChannel) channel).socket.setTrafficClass(trafficClass);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Set the {@code TCP_KEEPIDLE} option on the socket. See {@code man 7 tcp} for more details.
*/
public IoUringSocketChannelConfig setTcpKeepIdle(int seconds) {
try {
((IoUringSocketChannel) channel).socket.setTcpKeepIdle(seconds);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Set the {@code TCP_KEEPINTVL} option on the socket. See {@code man 7 tcp} for more details.
*/
public IoUringSocketChannelConfig setTcpKeepIntvl(int seconds) {
try {
((IoUringSocketChannel) channel).socket.setTcpKeepIntvl(seconds);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* @deprecated use {@link #setTcpKeepCnt(int)}
*/
@Deprecated
public IoUringSocketChannelConfig setTcpKeepCntl(int probes) {
return setTcpKeepCnt(probes);
}
/**
* Set the {@code TCP_KEEPCNT} option on the socket. See {@code man 7 tcp} for more details.
*/
public IoUringSocketChannelConfig setTcpKeepCnt(int probes) {
try {
((IoUringSocketChannel) channel).socket.setTcpKeepCnt(probes);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Set the {@code TCP_USER_TIMEOUT} option on the socket. See {@code man 7 tcp} for more details.
*/
public IoUringSocketChannelConfig setTcpUserTimeout(int milliseconds) {
try {
((IoUringSocketChannel) channel).socket.setTcpUserTimeout(milliseconds);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Returns {@code true} if <a href="https://man7.org/linux/man-pages/man7/ip.7.html">IP_TRANSPARENT</a> is enabled,
* {@code false} otherwise.
*/
public boolean isIpTransparent() {
try {
return ((IoUringSocketChannel) channel).socket.isIpTransparent();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* If {@code true} is used <a href="https://man7.org/linux/man-pages/man7/ip.7.html">IP_TRANSPARENT</a> is enabled,
* {@code false} for disable it. Default is disabled.
*/
public IoUringSocketChannelConfig setIpTransparent(boolean transparent) {
try {
((IoUringSocketChannel) channel).socket.setIpTransparent(transparent);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
// /**
// * Set the {@code TCP_MD5SIG} option on the socket. See {@code linux/tcp.h} for more details. Keys can only be set
// * on, not read to prevent a potential leak, as they are confidential. Allowing them being read would mean anyone
// * with access to the channel could get them.
// */
// public IOUringSocketChannelConfig setTcpMd5Sig(Map<InetAddress, byte[]> keys) {
// try {
// ((IOUringSocketChannel) channel).setTcpMd5Sig(keys);
// return this;
// } catch (IOException e) {
// throw new ChannelException(e);
// }
// }
/**
* Set the {@code TCP_QUICKACK} option on the socket. See <a href="https://linux.die.net/man/7/tcp">TCP_QUICKACK</a>
* for more details.
*/
public IoUringSocketChannelConfig setTcpQuickAck(boolean quickAck) {
try {
((IoUringSocketChannel) channel).socket.setTcpQuickAck(quickAck);
return this;
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Returns {@code true} if <a href="https://linux.die.net/man/7/tcp">TCP_QUICKACK</a> is enabled, {@code false}
* otherwise.
*/
public boolean isTcpQuickAck() {
try {
return ((IoUringSocketChannel) channel).socket.isTcpQuickAck();
} catch (IOException e) {
throw new ChannelException(e);
}
}
/**
* Enables client TCP fast open. See this <a href="https://lwn.net/Articles/508865/">LWN article</a> for more info.
*/
public IoUringSocketChannelConfig setTcpFastOpenConnect(boolean fastOpenConnect) {
this.tcpFastopen = fastOpenConnect;
return this;
}
/**
* Returns {@code true} if {@code TCP_FASTOPEN_CONNECT} is enabled, {@code false} otherwise.
*/
public boolean isTcpFastOpenConnect() {
return tcpFastopen;
}
@Override
public boolean isAllowHalfClosure() {
return allowHalfClosure;
}
@Override
public IoUringSocketChannelConfig setAllowHalfClosure(boolean allowHalfClosure) {
this.allowHalfClosure = allowHalfClosure;
return this;
}
@Override
public IoUringSocketChannelConfig setConnectTimeoutMillis(int connectTimeoutMillis) {
super.setConnectTimeoutMillis(connectTimeoutMillis);
return this;
}
@Override
@Deprecated
public IoUringSocketChannelConfig setMaxMessagesPerRead(int maxMessagesPerRead) {
super.setMaxMessagesPerRead(maxMessagesPerRead);
return this;
}
@Override
public IoUringSocketChannelConfig setWriteSpinCount(int writeSpinCount) {
super.setWriteSpinCount(writeSpinCount);
return this;
}
@Override
public IoUringSocketChannelConfig setAllocator(ByteBufAllocator allocator) {
super.setAllocator(allocator);
return this;
}
@Override
public IoUringSocketChannelConfig setRecvByteBufAllocator(RecvByteBufAllocator allocator) {
super.setRecvByteBufAllocator(allocator);
return this;
}
@Override
public IoUringSocketChannelConfig setAutoRead(boolean autoRead) {
super.setAutoRead(autoRead);
return this;
}
@Override
public IoUringSocketChannelConfig setAutoClose(boolean autoClose) {
super.setAutoClose(autoClose);
return this;
}
@Override
@Deprecated
public IoUringSocketChannelConfig setWriteBufferHighWaterMark(int writeBufferHighWaterMark) {
super.setWriteBufferHighWaterMark(writeBufferHighWaterMark);
return this;
}
@Override
@Deprecated
public IoUringSocketChannelConfig setWriteBufferLowWaterMark(int writeBufferLowWaterMark) {
super.setWriteBufferLowWaterMark(writeBufferLowWaterMark);
return this;
}
@Override
public IoUringSocketChannelConfig setWriteBufferWaterMark(WriteBufferWaterMark writeBufferWaterMark) {
super.setWriteBufferWaterMark(writeBufferWaterMark);
return this;
}
@Override
public IoUringSocketChannelConfig setMessageSizeEstimator(MessageSizeEstimator estimator) {
super.setMessageSizeEstimator(estimator);
return this;
}
private int getWriteZeroCopyThreshold() {
return writeZeroCopyThreshold;
}
IoUringSocketChannelConfig setWriteZeroCopyThreshold(int setWriteZeroCopyThreshold) {
if (setWriteZeroCopyThreshold == DISABLE_WRITE_ZERO_COPY) {
this.writeZeroCopyThreshold = DISABLE_WRITE_ZERO_COPY;
} else {
this.writeZeroCopyThreshold =
ObjectUtil.checkPositiveOrZero(setWriteZeroCopyThreshold, "setWriteZeroCopyThreshold");
}
return this;
}
boolean shouldWriteZeroCopy(int amount) {
// This can reduce one read operation on a volatile field.
int threshold = this.getWriteZeroCopyThreshold();
return threshold != DISABLE_WRITE_ZERO_COPY && amount >= threshold;
}
}
|
IoUringSocketChannelConfig
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java
|
{
"start": 2876,
"end": 5123
}
|
class ____ {
private static final Logger logger = LogManager.getLogger(MetadataUpdateSettingsService.class);
private final AllocationService allocationService;
private final IndexScopedSettings indexScopedSettings;
private final IndicesService indicesService;
private final ShardLimitValidator shardLimitValidator;
private final MasterServiceTaskQueue<UpdateSettingsTask> taskQueue;
public MetadataUpdateSettingsService(
ClusterService clusterService,
AllocationService allocationService,
IndexScopedSettings indexScopedSettings,
IndicesService indicesService,
ShardLimitValidator shardLimitValidator,
ThreadPool threadPool
) {
this.allocationService = allocationService;
this.indexScopedSettings = indexScopedSettings;
this.indicesService = indicesService;
this.shardLimitValidator = shardLimitValidator;
this.taskQueue = clusterService.createTaskQueue("update-settings", Priority.URGENT, batchExecutionContext -> {
var listener = new AllocationActionMultiListener<AcknowledgedResponse>(threadPool.getThreadContext());
var state = batchExecutionContext.initialState();
for (final var taskContext : batchExecutionContext.taskContexts()) {
try {
final var task = taskContext.getTask();
try (var ignored = taskContext.captureResponseHeaders()) {
state = task.execute(state);
}
taskContext.success(task.getAckListener(listener));
} catch (Exception e) {
taskContext.onFailure(e);
}
}
if (state != batchExecutionContext.initialState()) {
// reroute in case things change that require it (like number of replicas)
try (var ignored = batchExecutionContext.dropHeadersContext()) {
state = allocationService.reroute(state, "settings update", listener.reroute());
}
} else {
listener.noRerouteNeeded();
}
return state;
});
}
private final
|
MetadataUpdateSettingsService
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webmvc/src/main/java/org/springframework/cloud/gateway/server/mvc/common/KeyValues.java
|
{
"start": 913,
"end": 1473
}
|
class ____ {
private List<KeyValue> keyValues = new ArrayList<>();
public List<KeyValue> getKeyValues() {
return keyValues;
}
public void setKeyValues(List<KeyValue> keyValues) {
this.keyValues = keyValues;
}
public static KeyValues valueOf(String s) {
String[] tokens = StringUtils.tokenizeToStringArray(s, ",", true, true);
List<KeyValue> parsedKeyValues = Arrays.stream(tokens).map(KeyValue::valueOf).toList();
KeyValues keyValues = new KeyValues();
keyValues.setKeyValues(parsedKeyValues);
return keyValues;
}
public static
|
KeyValues
|
java
|
micronaut-projects__micronaut-core
|
core-processor/src/main/java/io/micronaut/inject/writer/DispatchWriter.java
|
{
"start": 28599,
"end": 30949
}
|
class ____ extends AbstractDispatchTarget {
private final TypedElement declaringType;
private final MethodElement methodElement;
private final int methodIndex;
private final boolean useOneDispatch;
private MethodReflectionDispatchTarget(TypedElement declaringType,
MethodElement methodElement,
int methodIndex,
boolean useOneDispatch) {
this.declaringType = declaringType;
this.methodElement = methodElement;
this.methodIndex = methodIndex;
this.useOneDispatch = useOneDispatch;
}
@Override
public boolean supportsDispatchOne() {
return useOneDispatch;
}
@Override
public boolean supportsDispatchMulti() {
return !useOneDispatch;
}
@Override
public TypedElement getDeclaringType() {
return declaringType;
}
@Override
public MethodElement getMethodElement() {
return methodElement;
}
@Override
public ExpressionDef dispatchMultiExpression(ExpressionDef target, ExpressionDef valuesArray) {
return TYPE_REFLECTION_UTILS.invokeStatic(
METHOD_INVOKE_METHOD,
methodElement.isStatic() ? ExpressionDef.nullValue() : target,
new VariableDef.This().invoke(GET_ACCESSIBLE_TARGET_METHOD, ExpressionDef.constant(methodIndex)),
valuesArray
);
}
@Override
public ExpressionDef dispatchOneExpression(ExpressionDef target, ExpressionDef value) {
return TYPE_REFLECTION_UTILS.invokeStatic(
METHOD_INVOKE_METHOD,
methodElement.isStatic() ? ExpressionDef.nullValue() : target,
new VariableDef.This().invoke(GET_ACCESSIBLE_TARGET_METHOD, ExpressionDef.constant(methodIndex)),
methodElement.getSuspendParameters().length > 0 ? TypeDef.OBJECT.array().instantiate(value) : TypeDef.OBJECT.array().instantiate()
);
}
}
/**
* Interceptable method invocation dispatch target.
*/
@Internal
public static final
|
MethodReflectionDispatchTarget
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/builditem/nativeimage/NativeImageResourceBuildItem.java
|
{
"start": 1134,
"end": 2335
}
|
class ____ extends MultiBuildItem {
private final List<String> resources;
/**
* Builds a {@code NativeImageResourceBuildItem} for the given artifact and path
*
* @param dependencies the resolved dependencies of the build
* @param artifactCoordinates the coordinates of the artifact containing the resources
* @param resourceFilter the filter for the resources in glob syntax (see {@link GlobUtil})
* @return
*/
public static NativeImageResourceBuildItem ofDependencyResources(
Collection<ResolvedDependency> dependencies,
ArtifactCoords artifactCoordinates,
PathFilter resourceFilter) {
var resolver = ArtifactResourceResolver.of(dependencies, artifactCoordinates);
return new NativeImageResourceBuildItem(resolver.resourceList(resourceFilter));
}
public NativeImageResourceBuildItem(String... resources) {
this.resources = Arrays.asList(resources);
}
public NativeImageResourceBuildItem(List<String> resources) {
this.resources = new ArrayList<>(resources);
}
public List<String> getResources() {
return resources;
}
}
|
NativeImageResourceBuildItem
|
java
|
spring-projects__spring-framework
|
spring-aspects/src/test/java/org/springframework/scheduling/aspectj/AnnotationDrivenBeanDefinitionParserTests.java
|
{
"start": 1192,
"end": 2480
}
|
class ____ {
private ConfigurableApplicationContext context;
@BeforeEach
public void setup() {
this.context = new ClassPathXmlApplicationContext(
"annotationDrivenContext.xml", AnnotationDrivenBeanDefinitionParserTests.class);
}
@AfterEach
public void after() {
if (this.context != null) {
this.context.close();
}
}
@Test
void asyncAspectRegistered() {
assertThat(context.containsBean(TaskManagementConfigUtils.ASYNC_EXECUTION_ASPECT_BEAN_NAME)).isTrue();
}
@Test
@SuppressWarnings("rawtypes")
public void asyncPostProcessorExecutorReference() {
Object executor = context.getBean("testExecutor");
Object aspect = context.getBean(TaskManagementConfigUtils.ASYNC_EXECUTION_ASPECT_BEAN_NAME);
assertThat(((Supplier) new DirectFieldAccessor(aspect).getPropertyValue("defaultExecutor")).get()).isSameAs(executor);
}
@Test
@SuppressWarnings("rawtypes")
public void asyncPostProcessorExceptionHandlerReference() {
Object exceptionHandler = context.getBean("testExceptionHandler");
Object aspect = context.getBean(TaskManagementConfigUtils.ASYNC_EXECUTION_ASPECT_BEAN_NAME);
assertThat(((Supplier) new DirectFieldAccessor(aspect).getPropertyValue("exceptionHandler")).get()).isSameAs(exceptionHandler);
}
}
|
AnnotationDrivenBeanDefinitionParserTests
|
java
|
apache__kafka
|
connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java
|
{
"start": 19294,
"end": 19957
}
|
class ____ the plugin type
* in a connector config (e.g., {@link ConnectorConfig#KEY_CONVERTER_CLASS_CONFIG});
* may not be null
* @param defaultProperties any default properties to include in the configuration that will be used for
* the plugin; may be null
* @return a {@link ConfigInfos} object containing validation results for the plugin in the connector config,
* or null if either no custom validation was performed (possibly because no custom plugin was defined in the
* connector config), or if custom validation failed
* @param <T> the plugin
|
for
|
java
|
apache__camel
|
components/camel-mail/src/test/java/org/apache/camel/component/mail/RawMailMessageTest.java
|
{
"start": 1752,
"end": 7895
}
|
class ____ extends CamelTestSupport {
private static final MailboxUser jonesPop3 = Mailbox.getOrCreateUser("jonesPop3", "secret");
private static final MailboxUser jonesRawPop3 = Mailbox.getOrCreateUser("jonesRawPop3", "secret");
private static final MailboxUser jonesImap = Mailbox.getOrCreateUser("jonesImap", "secret");
private static final MailboxUser jonesRawImap = Mailbox.getOrCreateUser("jonesRawImap", "secret");
private static final MailboxUser davsclaus = Mailbox.getOrCreateUser("davsclaus", "secret");
@Override
public void doPreSetup() throws Exception {
Mailbox.clearAll();
prepareMailbox(jonesPop3);
prepareMailbox(jonesRawPop3);
prepareMailbox(jonesImap);
prepareMailbox(jonesRawImap);
}
@Test
public void testGetRawJavaMailMessage() throws Exception {
Mailbox.clearAll();
Map<String, Object> map = new HashMap<>();
map.put("To", davsclaus.getEmail());
map.put("From", "jstrachan@apache.org");
map.put("Subject", "Camel rocks");
String body = "Hello Claus.\nYes it does.\n\nRegards James.";
getMockEndpoint("mock:mail").expectedMessageCount(1);
template.sendBodyAndHeaders(
"smtp://davsclaus@localhost:" + Mailbox.getPort(Protocol.smtp) + "?password=" + davsclaus.getPassword(), body,
map);
MockEndpoint.assertIsSatisfied(context);
Exchange exchange = getMockEndpoint("mock:mail").getReceivedExchanges().get(0);
// START SNIPPET: e1
// get access to the raw jakarta.mail.Message as shown below
Message javaMailMessage = exchange.getIn(MailMessage.class).getMessage();
assertNotNull(javaMailMessage, "The mail message should not be null");
assertEquals("Camel rocks", javaMailMessage.getSubject());
// END SNIPPET: e1
}
@Test
public void testRawMessageConsumerPop3() throws Exception {
testRawMessageConsumer("Pop3", jonesRawPop3);
}
@Test
public void testRawMessageConsumerImap() throws Exception {
testRawMessageConsumer("Imap", jonesRawImap);
}
private void testRawMessageConsumer(String type, MailboxUser user) throws Exception {
Mailbox mailboxRaw = user.getInbox();
assertEquals(1, mailboxRaw.getMessageCount(), "expected 1 message in the mailbox");
MockEndpoint mock = getMockEndpoint("mock://rawMessage" + type);
mock.expectedMessageCount(1);
mock.message(0).body().isNotNull();
MockEndpoint.assertIsSatisfied(context);
Message mailMessage = mock.getExchanges().get(0).getIn().getBody(Message.class);
assertNotNull("mail subject should not be null", mailMessage.getSubject());
assertEquals("hurz", mailMessage.getSubject(), "mail subject should be hurz");
Map<String, Object> headers = mock.getExchanges().get(0).getIn().getHeaders();
assertNotNull(headers, "headers should not be null");
assertFalse(headers.isEmpty(), "headers should not be empty");
}
@Test
public void testNormalMessageConsumerPop3() throws Exception {
testNormalMessageConsumer("Pop3", jonesPop3);
}
@Test
public void testNormalMessageConsumerImap() throws Exception {
testNormalMessageConsumer("Imap", jonesImap);
}
private void testNormalMessageConsumer(String type, MailboxUser user) throws Exception {
Mailbox mailbox = user.getInbox();
assertEquals(1, mailbox.getMessageCount(), "expected 1 message in the mailbox");
MockEndpoint mock = getMockEndpoint("mock://normalMessage" + type);
mock.expectedMessageCount(1);
mock.message(0).body().isNotNull();
MockEndpoint.assertIsSatisfied(context);
String body = mock.getExchanges().get(0).getIn().getBody(String.class);
MimeMessage mm = new MimeMessage(null, new ByteArrayInputStream(body.getBytes()));
String subject = mm.getSubject();
assertNull(subject, "mail subject should not be available");
Map<String, Object> headers = mock.getExchanges().get(0).getIn().getHeaders();
assertNotNull(headers, "headers should not be null");
assertFalse(headers.isEmpty(), "headers should not be empty");
}
private void prepareMailbox(MailboxUser user) throws Exception {
// connect to mailbox
JavaMailSender sender = new DefaultJavaMailSender();
Store store = sender.getSession().getStore("imap");
store.connect("localhost", Mailbox.getPort(Protocol.imap), user.getLogin(), user.getPassword());
Folder folder = store.getFolder("INBOX");
folder.open(Folder.READ_WRITE);
folder.expunge();
InputStream is = getClass().getResourceAsStream("/SignedMailTestCaseHurz.txt");
Message hurzMsg = new MimeMessage(sender.getSession(), is);
Message[] messages = new Message[] { hurzMsg };
// insert one signed message
folder.appendMessages(messages);
folder.close(true);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(davsclaus.uriPrefix(Protocol.pop3) + "&closeFolder=false").to("mock:mail");
from(jonesRawPop3.uriPrefix(Protocol.pop3)
+ "&closeFolder=false&initialDelay=100&delay=100&delete=true&mapMailMessage=false")
.to("mock://rawMessagePop3");
from(jonesImap.uriPrefix(Protocol.imap)
+ "&closeFolder=false&initialDelay=100&delay=100&delete=true&mapMailMessage=false")
.to("mock://rawMessageImap");
from(jonesPop3.uriPrefix(Protocol.pop3) + "&closeFolder=false&initialDelay=100&delay=100&delete=true")
.to("mock://normalMessagePop3");
from(jonesImap.uriPrefix(Protocol.imap) + "&closeFolder=false&initialDelay=100&delay=100&delete=true")
.to("mock://normalMessageImap");
}
};
}
}
|
RawMailMessageTest
|
java
|
apache__avro
|
lang/java/tools/src/test/compiler/output/Player.java
|
{
"start": 7940,
"end": 20346
}
|
class ____ extends org.apache.avro.specific.SpecificRecordBuilderBase<Player>
implements org.apache.avro.data.RecordBuilder<Player> {
/** The number of the player */
private int number;
private java.lang.CharSequence first_name;
private java.lang.CharSequence last_name;
private java.util.List<avro.examples.baseball.Position> position;
/** Creates a new Builder */
private Builder() {
super(SCHEMA$, MODEL$);
}
/**
* Creates a Builder by copying an existing Builder.
* @param other The existing Builder to copy.
*/
private Builder(avro.examples.baseball.Player.Builder other) {
super(other);
if (isValidValue(fields()[0], other.number)) {
this.number = data().deepCopy(fields()[0].schema(), other.number);
fieldSetFlags()[0] = other.fieldSetFlags()[0];
}
if (isValidValue(fields()[1], other.first_name)) {
this.first_name = data().deepCopy(fields()[1].schema(), other.first_name);
fieldSetFlags()[1] = other.fieldSetFlags()[1];
}
if (isValidValue(fields()[2], other.last_name)) {
this.last_name = data().deepCopy(fields()[2].schema(), other.last_name);
fieldSetFlags()[2] = other.fieldSetFlags()[2];
}
if (isValidValue(fields()[3], other.position)) {
this.position = data().deepCopy(fields()[3].schema(), other.position);
fieldSetFlags()[3] = other.fieldSetFlags()[3];
}
}
/**
* Creates a Builder by copying an existing Player instance
* @param other The existing instance to copy.
*/
private Builder(avro.examples.baseball.Player other) {
super(SCHEMA$, MODEL$);
if (isValidValue(fields()[0], other.number)) {
this.number = data().deepCopy(fields()[0].schema(), other.number);
fieldSetFlags()[0] = true;
}
if (isValidValue(fields()[1], other.first_name)) {
this.first_name = data().deepCopy(fields()[1].schema(), other.first_name);
fieldSetFlags()[1] = true;
}
if (isValidValue(fields()[2], other.last_name)) {
this.last_name = data().deepCopy(fields()[2].schema(), other.last_name);
fieldSetFlags()[2] = true;
}
if (isValidValue(fields()[3], other.position)) {
this.position = data().deepCopy(fields()[3].schema(), other.position);
fieldSetFlags()[3] = true;
}
}
/**
* Gets the value of the 'number' field.
* The number of the player
* @return The value.
*/
public int getNumber() {
return number;
}
/**
* Sets the value of the 'number' field.
* The number of the player
* @param value The value of 'number'.
* @return This builder.
*/
public avro.examples.baseball.Player.Builder setNumber(int value) {
validate(fields()[0], value);
this.number = value;
fieldSetFlags()[0] = true;
return this;
}
/**
* Checks whether the 'number' field has been set.
* The number of the player
* @return True if the 'number' field has been set, false otherwise.
*/
public boolean hasNumber() {
return fieldSetFlags()[0];
}
/**
* Clears the value of the 'number' field.
* The number of the player
* @return This builder.
*/
public avro.examples.baseball.Player.Builder clearNumber() {
fieldSetFlags()[0] = false;
return this;
}
/**
* Gets the value of the 'first_name' field.
* @return The value.
*/
public java.lang.CharSequence getFirstName() {
return first_name;
}
/**
* Sets the value of the 'first_name' field.
* @param value The value of 'first_name'.
* @return This builder.
*/
public avro.examples.baseball.Player.Builder setFirstName(java.lang.CharSequence value) {
validate(fields()[1], value);
this.first_name = value;
fieldSetFlags()[1] = true;
return this;
}
/**
* Checks whether the 'first_name' field has been set.
* @return True if the 'first_name' field has been set, false otherwise.
*/
public boolean hasFirstName() {
return fieldSetFlags()[1];
}
/**
* Clears the value of the 'first_name' field.
* @return This builder.
*/
public avro.examples.baseball.Player.Builder clearFirstName() {
first_name = null;
fieldSetFlags()[1] = false;
return this;
}
/**
* Gets the value of the 'last_name' field.
* @return The value.
*/
public java.lang.CharSequence getLastName() {
return last_name;
}
/**
* Sets the value of the 'last_name' field.
* @param value The value of 'last_name'.
* @return This builder.
*/
public avro.examples.baseball.Player.Builder setLastName(java.lang.CharSequence value) {
validate(fields()[2], value);
this.last_name = value;
fieldSetFlags()[2] = true;
return this;
}
/**
* Checks whether the 'last_name' field has been set.
* @return True if the 'last_name' field has been set, false otherwise.
*/
public boolean hasLastName() {
return fieldSetFlags()[2];
}
/**
* Clears the value of the 'last_name' field.
* @return This builder.
*/
public avro.examples.baseball.Player.Builder clearLastName() {
last_name = null;
fieldSetFlags()[2] = false;
return this;
}
/**
* Gets the value of the 'position' field.
* @return The value.
*/
public java.util.List<avro.examples.baseball.Position> getPosition() {
return position;
}
/**
* Sets the value of the 'position' field.
* @param value The value of 'position'.
* @return This builder.
*/
public avro.examples.baseball.Player.Builder setPosition(java.util.List<avro.examples.baseball.Position> value) {
validate(fields()[3], value);
this.position = value;
fieldSetFlags()[3] = true;
return this;
}
/**
* Checks whether the 'position' field has been set.
* @return True if the 'position' field has been set, false otherwise.
*/
public boolean hasPosition() {
return fieldSetFlags()[3];
}
/**
* Clears the value of the 'position' field.
* @return This builder.
*/
public avro.examples.baseball.Player.Builder clearPosition() {
position = null;
fieldSetFlags()[3] = false;
return this;
}
@Override
@SuppressWarnings("unchecked")
public Player build() {
try {
Player record = new Player();
record.number = fieldSetFlags()[0] ? this.number : (java.lang.Integer) defaultValue(fields()[0]);
record.first_name = fieldSetFlags()[1] ? this.first_name : (java.lang.CharSequence) defaultValue(fields()[1]);
record.last_name = fieldSetFlags()[2] ? this.last_name : (java.lang.CharSequence) defaultValue(fields()[2]);
record.position = fieldSetFlags()[3] ? this.position : (java.util.List<avro.examples.baseball.Position>) defaultValue(fields()[3]);
return record;
} catch (org.apache.avro.AvroMissingFieldException e) {
throw e;
} catch (java.lang.Exception e) {
throw new org.apache.avro.AvroRuntimeException(e);
}
}
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumWriter<Player>
WRITER$ = (org.apache.avro.io.DatumWriter<Player>)MODEL$.createDatumWriter(SCHEMA$);
@Override public void writeExternal(java.io.ObjectOutput out)
throws java.io.IOException {
WRITER$.write(this, SpecificData.getEncoder(out));
}
@SuppressWarnings("unchecked")
private static final org.apache.avro.io.DatumReader<Player>
READER$ = (org.apache.avro.io.DatumReader<Player>)MODEL$.createDatumReader(SCHEMA$);
@Override public void readExternal(java.io.ObjectInput in)
throws java.io.IOException {
READER$.read(this, SpecificData.getDecoder(in));
}
@Override protected boolean hasCustomCoders() { return true; }
@Override public void customEncode(org.apache.avro.io.Encoder out)
throws java.io.IOException
{
out.writeInt(this.number);
out.writeString(this.first_name);
out.writeString(this.last_name);
long size0 = this.position.size();
out.writeArrayStart();
out.setItemCount(size0);
long actualSize0 = 0;
for (avro.examples.baseball.Position e0: this.position) {
actualSize0++;
out.startItem();
out.writeEnum(e0.ordinal());
}
out.writeArrayEnd();
if (actualSize0 != size0)
throw new java.util.ConcurrentModificationException("Array-size written was " + size0 + ", but element count was " + actualSize0 + ".");
}
@Override public void customDecode(org.apache.avro.io.ResolvingDecoder in)
throws java.io.IOException
{
org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff();
if (fieldOrder == null) {
this.number = in.readInt();
this.first_name = in.readString(this.first_name instanceof Utf8 ? (Utf8)this.first_name : null);
this.last_name = in.readString(this.last_name instanceof Utf8 ? (Utf8)this.last_name : null);
long size0 = in.readArrayStart();
java.util.List<avro.examples.baseball.Position> a0 = this.position;
if (a0 == null) {
a0 = new SpecificData.Array<avro.examples.baseball.Position>((int)size0, SCHEMA$.getField("position").schema());
this.position = a0;
} else a0.clear();
SpecificData.Array<avro.examples.baseball.Position> ga0 = (a0 instanceof SpecificData.Array ? (SpecificData.Array<avro.examples.baseball.Position>)a0 : null);
for ( ; 0 < size0; size0 = in.arrayNext()) {
for ( ; size0 != 0; size0--) {
avro.examples.baseball.Position e0 = (ga0 != null ? ga0.peek() : null);
e0 = avro.examples.baseball.Position.values()[in.readEnum()];
a0.add(e0);
}
}
} else {
for (int i = 0; i < 4; i++) {
switch (fieldOrder[i].pos()) {
case 0:
this.number = in.readInt();
break;
case 1:
this.first_name = in.readString(this.first_name instanceof Utf8 ? (Utf8)this.first_name : null);
break;
case 2:
this.last_name = in.readString(this.last_name instanceof Utf8 ? (Utf8)this.last_name : null);
break;
case 3:
long size0 = in.readArrayStart();
java.util.List<avro.examples.baseball.Position> a0 = this.position;
if (a0 == null) {
a0 = new SpecificData.Array<avro.examples.baseball.Position>((int)size0, SCHEMA$.getField("position").schema());
this.position = a0;
} else a0.clear();
SpecificData.Array<avro.examples.baseball.Position> ga0 = (a0 instanceof SpecificData.Array ? (SpecificData.Array<avro.examples.baseball.Position>)a0 : null);
for ( ; 0 < size0; size0 = in.arrayNext()) {
for ( ; size0 != 0; size0--) {
avro.examples.baseball.Position e0 = (ga0 != null ? ga0.peek() : null);
e0 = avro.examples.baseball.Position.values()[in.readEnum()];
a0.add(e0);
}
}
break;
default:
throw new java.io.IOException("Corrupt ResolvingDecoder.");
}
}
}
}
@Override
public int hashCode() {
int result = 1;
result = 31 * result + Integer.hashCode(this.number);
result = 31 * result + (this.first_name == null ? 0 : this.first_name.hashCode());
result = 31 * result + (this.last_name == null ? 0 : this.last_name.hashCode());
result = 31 * result + (this.position == null ? 0 : this.position.hashCode());
return result;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Player)) {
return false;
}
Player other = (Player) o;
if (this.number != other.number) {
return false;
}
if (Utf8.compareSequences(this.first_name, other.first_name) != 0) {
return false;
}
if (Utf8.compareSequences(this.last_name, other.last_name) != 0) {
return false;
}
if (!java.util.Objects.equals(this.position, other.position)) {
return false;
}
return true;
}
}
|
Builder
|
java
|
apache__camel
|
components/camel-netty-http/src/test/java/org/apache/camel/component/netty/http/NettyHttpAccessHttpRequestAndResponseBeanTest.java
|
{
"start": 1555,
"end": 3312
}
|
class ____ extends BaseNettyTest {
@Test
public void testRawHttpRequestAndResponseInBean() throws Exception {
getMockEndpoint("mock:input").expectedBodiesReceived("World", "Camel");
String out = template.requestBody("netty-http:http://localhost:{{port}}/foo", "World", String.class);
await().atMost(3, TimeUnit.SECONDS).untilAsserted(() -> assertEquals("Bye World", out));
String out2 = template.requestBody("netty-http:http://localhost:{{port}}/foo", "Camel", String.class);
await().atMost(3, TimeUnit.SECONDS).untilAsserted(() -> assertEquals("Bye Camel", out2));
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("netty-http:http://0.0.0.0:{{port}}/foo")
.to("mock:input")
.transform().method(NettyHttpAccessHttpRequestAndResponseBeanTest.class, "myTransformer");
}
};
}
/**
* We can use both a netty http request and response type for transformation
*/
public static HttpResponse myTransformer(FullHttpRequest request) {
String in = request.content().toString(StandardCharsets.UTF_8);
String reply = "Bye " + in;
request.content().release();
HttpResponse response = new DefaultFullHttpResponse(
HttpVersion.HTTP_1_1, HttpResponseStatus.OK,
NettyConverter.toByteBuffer(reply.getBytes()));
response.headers().set(HttpHeaderNames.CONTENT_LENGTH.toString(), reply.length());
return response;
}
}
|
NettyHttpAccessHttpRequestAndResponseBeanTest
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/StateHandleTransferSpec.java
|
{
"start": 1117,
"end": 2043
}
|
class ____ {
/** The state handle to transfer. */
private final IncrementalRemoteKeyedStateHandle stateHandle;
/** The path to which the content of the state handle shall be transferred. */
private final Path transferDestination;
public StateHandleTransferSpec(
IncrementalRemoteKeyedStateHandle stateHandle, Path transferDestination) {
this.stateHandle = stateHandle;
this.transferDestination = transferDestination;
}
public IncrementalRemoteKeyedStateHandle getStateHandle() {
return stateHandle;
}
public Path getTransferDestination() {
return transferDestination;
}
@Override
public String toString() {
return "StateHandleTransferSpec(transferDestination = ["
+ transferDestination
+ "] stateHandle = ["
+ stateHandle
+ "])";
}
}
|
StateHandleTransferSpec
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-ecs/src/test/java/org/apache/camel/component/aws2/ecs/ECS2ProducerSpringTest.java
|
{
"start": 1618,
"end": 5492
}
|
class ____ extends CamelSpringTestSupport {
@EndpointInject("mock:result")
private MockEndpoint mock;
@Test
public void ecsListClustersTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:listClusters", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(ECS2Constants.OPERATION, ECS2Operations.listClusters);
}
});
MockEndpoint.assertIsSatisfied(context);
ListClustersResponse resultGet = (ListClustersResponse) exchange.getIn().getBody();
assertEquals(1, resultGet.clusterArns().size());
assertEquals("Test", resultGet.clusterArns().get(0));
}
@Test
public void ecsListClustersPojoTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:listClustersPojo", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(ECS2Constants.OPERATION, ECS2Operations.listClusters);
exchange.getIn().setBody(ListClustersRequest.builder().maxResults(10).build());
}
});
MockEndpoint.assertIsSatisfied(context);
ListClustersResponse resultGet = (ListClustersResponse) exchange.getIn().getBody();
assertEquals(1, resultGet.clusterArns().size());
assertEquals("Test", resultGet.clusterArns().get(0));
}
@Test
public void ecsCreateClusterTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:createCluster", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(ECS2Constants.OPERATION, ECS2Operations.createCluster);
exchange.getIn().setHeader(ECS2Constants.CLUSTER_NAME, "Test");
}
});
MockEndpoint.assertIsSatisfied(context);
CreateClusterResponse resultGet = (CreateClusterResponse) exchange.getIn().getBody();
assertEquals("Test", resultGet.cluster().clusterName());
}
@Test
public void eksDescribeClusterTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:describeCluster", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(ECS2Constants.OPERATION, ECS2Operations.describeCluster);
exchange.getIn().setHeader(ECS2Constants.CLUSTER_NAME, "Test");
}
});
MockEndpoint.assertIsSatisfied(context);
DescribeClustersResponse resultGet = exchange.getIn().getBody(DescribeClustersResponse.class);
assertEquals("Test", resultGet.clusters().get(0).clusterName());
}
@Test
public void eksDeleteClusterTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:deleteCluster", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(ECS2Constants.OPERATION, ECS2Operations.deleteCluster);
exchange.getIn().setHeader(ECS2Constants.CLUSTER_NAME, "Test");
}
});
MockEndpoint.assertIsSatisfied(context);
DeleteClusterResponse resultGet = exchange.getIn().getBody(DeleteClusterResponse.class);
assertEquals("Test", resultGet.cluster().clusterName());
}
@Override
protected ClassPathXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/component/aws2/ecs/ECSComponentSpringTest-context.xml");
}
}
|
ECS2ProducerSpringTest
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/spi/context/storage/AccessMode.java
|
{
"start": 641,
"end": 1698
}
|
interface ____ {
/**
* This access mode provides concurrent access to context local storage with thread safety and atomicity.
*/
AccessMode CONCURRENT = ConcurrentAccessMode.INSTANCE;
/**
* Return the object at index {@code idx} in the {@code locals} array.
* @param locals the array
* @param idx the index
* @return the object at {@code index}
*/
Object get(Object[] locals, int idx);
/**
* Put {@code value} in the {@code locals} array at index {@code idx}
* @param locals the array
* @param idx the index
* @param value the value
*/
void put(Object[] locals, int idx, Object value);
/**
* Get or create the object at index {@code index} in the {@code locals} array. When the object
* does not exist, {@code initialValueSupplier} must be called to obtain this value.
*
* @param locals the array
* @param idx the index
* @param initialValueSupplier the supplier of the initial value
*/
Object getOrCreate(Object[] locals, int idx, Supplier<Object> initialValueSupplier);
}
|
AccessMode
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/schedulers/Schedulers.java
|
{
"start": 3093,
"end": 3383
}
|
class ____ {
@NonNull
static final Scheduler SINGLE;
@NonNull
static final Scheduler COMPUTATION;
@NonNull
static final Scheduler IO;
@NonNull
static final Scheduler TRAMPOLINE;
@NonNull
static final Scheduler NEW_THREAD;
static final
|
Schedulers
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/engine/support/descriptor/AbstractTestDescriptorTests.java
|
{
"start": 7246,
"end": 7440
}
|
class ____ extends AbstractTestDescriptor {
DemoDescriptor(String displayName) {
super(mock(), displayName);
}
@Override
public Type getType() {
return Type.CONTAINER;
}
}
|
DemoDescriptor
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/doublearray/DoubleArrayAssert_hasSize_Test.java
|
{
"start": 905,
"end": 1226
}
|
class ____ extends DoubleArrayAssertBaseTest {
@Override
protected DoubleArrayAssert invoke_api_method() {
return assertions.hasSize(6);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasSize(getInfo(assertions), getActual(assertions), 6);
}
}
|
DoubleArrayAssert_hasSize_Test
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/dialect/gaussdb/ast/GaussDbObjectImpl.java
|
{
"start": 241,
"end": 419
}
|
class ____ extends SQLObjectImpl implements GaussDbObject {
public void accept0(SQLASTVisitor v) {}
public void accept0(GaussDbASTVisitor visitor) {
}
}
|
GaussDbObjectImpl
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
|
{
"start": 1155,
"end": 1425
}
|
class ____ allows a DataNode to communicate information about all
* its disks that appear to be slow.
*
* The wire representation of this structure is a list of
* SlowDiskReportProto messages.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final
|
that
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-guava-tests/src/test/java/org/assertj/tests/guava/api/RangeSetAssert_enclosesAnyRangesOf_with_Iterable_Test.java
|
{
"start": 1560,
"end": 3825
}
|
class ____ {
@Test
void should_fail_if_actual_is_null() {
// GIVEN
RangeSet<Integer> actual = null;
Iterable<Range<Integer>> ranges = emptySet();
// WHEN
var error = expectAssertionError(() -> assertThat(actual).enclosesAnyRangesOf(ranges));
// THEN
then(error).hasMessage(actualIsNull());
}
@Test
void should_fail_if_ranges_is_null() {
// GIVEN
RangeSet<Integer> actual = ImmutableRangeSet.of();
Iterable<Range<Integer>> ranges = null;
// WHEN
Throwable thrown = catchThrowable(() -> assertThat(actual).enclosesAnyRangesOf(ranges));
// THEN
then(thrown).isInstanceOf(NullPointerException.class)
.hasMessage(shouldNotBeNull("ranges").create());
}
@Test
void should_fail_if_ranges_is_empty() {
// GIVEN
RangeSet<Integer> actual = ImmutableRangeSet.of(closed(0, 1));
Iterable<Range<Integer>> ranges = emptySet();
// WHEN
Throwable thrown = catchThrowable(() -> assertThat(actual).enclosesAnyRangesOf(ranges));
// THEN
then(thrown).isInstanceOf(IllegalArgumentException.class)
.hasMessage("Expecting ranges not to be empty");
}
@Test
void should_fail_if_actual_does_not_enclose_ranges() {
// GIVEN
RangeSet<Integer> actual = ImmutableRangeSet.of(open(0, 100));
Iterable<Range<Integer>> ranges = asList(closed(0, 10), open(90, 110));
// WHEN
var error = expectAssertionError(() -> assertThat(actual).enclosesAnyRangesOf(ranges));
// THEN
then(error).hasMessage(shouldEncloseAnyOf(actual, ranges).create());
}
@Test
void should_pass_if_both_actual_and_ranges_are_empty() {
// GIVEN
RangeSet<Integer> actual = create();
Iterable<Range<Integer>> ranges = emptySet();
// WHEN/THEN
assertThat(actual).enclosesAnyRangesOf(ranges);
}
@Test
void should_pass_if_actual_encloses_ranges() {
// GIVEN
RangeSet<Integer> actual = ImmutableRangeSet.of(closed(0, 100));
Iterable<Range<Integer>> ranges = asList(open(0, 10),
open(50, 60),
open(90, 110));
// WHEN/THEN
assertThat(actual).enclosesAnyRangesOf(ranges);
}
}
|
RangeSetAssert_enclosesAnyRangesOf_with_Iterable_Test
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/matchers/AnnotationHasArgumentWithValueTest.java
|
{
"start": 1289,
"end": 1484
}
|
interface ____ {
String stuff();
}
""");
}
@Test
public void matches() {
writeFile(
"A.java",
"""
@Thing(stuff = "y")
public
|
Thing
|
java
|
micronaut-projects__micronaut-core
|
test-suite/src/test/java/io/micronaut/docs/replaces/qualifiers/named/beans/SomeInterfaceReplaceNamedImplOne.java
|
{
"start": 156,
"end": 293
}
|
class ____ implements SomeInterfaceReplaceNamed
{
@Override
public void doSomething()
{
}
}
|
SomeInterfaceReplaceNamedImplOne
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskStatus.java
|
{
"start": 2036,
"end": 17294
}
|
enum ____ {RUNNING, SUCCEEDED, FAILED, UNASSIGNED, KILLED,
COMMIT_PENDING, FAILED_UNCLEAN, KILLED_UNCLEAN, PREEMPTED}
private final TaskAttemptID taskid;
private float progress;
private volatile State runState;
private String diagnosticInfo;
private String stateString;
private String taskTracker;
private int numSlots;
private long startTime; //in ms
private long finishTime;
private long outputSize = -1L;
private volatile Phase phase = Phase.STARTING;
private Counters counters;
private boolean includeAllCounters;
private SortedRanges.Range nextRecordRange = new SortedRanges.Range();
// max task-status string size
static final int MAX_STRING_SIZE = 1024;
/**
* Testcases can override {@link #getMaxStringSize()} to control the max-size
* of strings in {@link TaskStatus}. Note that the {@link TaskStatus} is never
* exposed to clients or users (i.e Map or Reduce) and hence users cannot
* override this api to pass large strings in {@link TaskStatus}.
*/
protected int getMaxStringSize() {
return MAX_STRING_SIZE;
}
public TaskStatus() {
taskid = new TaskAttemptID();
numSlots = 0;
}
public TaskStatus(TaskAttemptID taskid, float progress, int numSlots,
State runState, String diagnosticInfo,
String stateString, String taskTracker,
Phase phase, Counters counters) {
this.taskid = taskid;
this.progress = progress;
this.numSlots = numSlots;
this.runState = runState;
setDiagnosticInfo(diagnosticInfo);
setStateString(stateString);
this.taskTracker = taskTracker;
this.phase = phase;
this.counters = counters;
this.includeAllCounters = true;
}
public TaskAttemptID getTaskID() { return taskid; }
public abstract boolean getIsMap();
public int getNumSlots() {
return numSlots;
}
public float getProgress() { return progress; }
public void setProgress(float progress) {
this.progress = progress;
}
public State getRunState() { return runState; }
public String getTaskTracker() {return taskTracker;}
public void setTaskTracker(String tracker) { this.taskTracker = tracker;}
public void setRunState(State runState) { this.runState = runState; }
public String getDiagnosticInfo() { return diagnosticInfo; }
public void setDiagnosticInfo(String info) {
// if the diag-info has already reached its max then log and return
if (diagnosticInfo != null
&& diagnosticInfo.length() == getMaxStringSize()) {
LOG.info("task-diagnostic-info for task " + taskid + " : " + info);
return;
}
diagnosticInfo =
((diagnosticInfo == null) ? info : diagnosticInfo.concat(info));
// trim the string to MAX_STRING_SIZE if needed
if (diagnosticInfo != null
&& diagnosticInfo.length() > getMaxStringSize()) {
LOG.info("task-diagnostic-info for task " + taskid + " : "
+ diagnosticInfo);
diagnosticInfo = diagnosticInfo.substring(0, getMaxStringSize());
}
}
public String getStateString() { return stateString; }
/**
* Set the state of the {@link TaskStatus}.
*/
public void setStateString(String stateString) {
if (stateString != null) {
if (stateString.length() <= getMaxStringSize()) {
this.stateString = stateString;
} else {
// log it
LOG.info("state-string for task " + taskid + " : " + stateString);
// trim the state string
this.stateString = stateString.substring(0, getMaxStringSize());
}
}
}
/**
* Get the next record range which is going to be processed by Task.
* @return nextRecordRange
*/
public SortedRanges.Range getNextRecordRange() {
return nextRecordRange;
}
/**
* Set the next record range which is going to be processed by Task.
* @param nextRecordRange
*/
public void setNextRecordRange(SortedRanges.Range nextRecordRange) {
this.nextRecordRange = nextRecordRange;
}
/**
* Get task finish time. if shuffleFinishTime and sortFinishTime
* are not set before, these are set to finishTime. It takes care of
* the case when shuffle, sort and finish are completed with in the
* heartbeat interval and are not reported separately. if task state is
* TaskStatus.FAILED then finish time represents when the task failed.
* @return finish time of the task.
*/
public long getFinishTime() {
return finishTime;
}
/**
* Sets finishTime for the task status if and only if the
* start time is set and passed finish time is greater than
* zero.
*
* @param finishTime finish time of task.
*/
void setFinishTime(long finishTime) {
if(this.getStartTime() > 0 && finishTime > 0) {
this.finishTime = finishTime;
} else {
//Using String utils to get the stack trace.
LOG.error("Trying to set finish time for task " + taskid +
" when no start time is set, stackTrace is : " +
StringUtils.stringifyException(new Exception()));
}
}
/**
* Get shuffle finish time for the task. If shuffle finish time was
* not set due to shuffle/sort/finish phases ending within same
* heartbeat interval, it is set to finish time of next phase i.e. sort
* or task finish when these are set.
* @return 0 if shuffleFinishTime, sortFinishTime and finish time are not set. else
* it returns approximate shuffle finish time.
*/
public long getShuffleFinishTime() {
return 0;
}
/**
* Set shuffle finish time.
* @param shuffleFinishTime
*/
void setShuffleFinishTime(long shuffleFinishTime) {}
/**
* Get map phase finish time for the task. If map finsh time was
* not set due to sort phase ending within same heartbeat interval,
* it is set to finish time of next phase i.e. sort phase
* when it is set.
* @return 0 if mapFinishTime, sortFinishTime are not set. else
* it returns approximate map finish time.
*/
public long getMapFinishTime() {
return 0;
}
/**
* Set map phase finish time.
* @param mapFinishTime
*/
void setMapFinishTime(long mapFinishTime) {}
/**
* Get sort finish time for the task,. If sort finish time was not set
* due to sort and reduce phase finishing in same heartebat interval, it is
* set to finish time, when finish time is set.
* @return 0 if sort finish time and finish time are not set, else returns sort
* finish time if that is set, else it returns finish time.
*/
public long getSortFinishTime() {
return 0;
}
/**
* Sets sortFinishTime, if shuffleFinishTime is not set before
* then its set to sortFinishTime.
* @param sortFinishTime
*/
void setSortFinishTime(long sortFinishTime) {}
/**
* Get start time of the task.
* @return 0 is start time is not set, else returns start time.
*/
public long getStartTime() {
return startTime;
}
/**
* Set startTime of the task if start time is greater than zero.
* @param startTime start time
*/
void setStartTime(long startTime) {
//Making the assumption of passed startTime to be a positive
//long value explicit.
if (startTime > 0) {
this.startTime = startTime;
} else {
//Using String utils to get the stack trace.
LOG.error("Trying to set illegal startTime for task : " + taskid +
".Stack trace is : " +
StringUtils.stringifyException(new Exception()));
}
}
/**
* Get current phase of this task. Phase.Map in case of map tasks,
* for reduce one of Phase.SHUFFLE, Phase.SORT or Phase.REDUCE.
* @return .
*/
public Phase getPhase(){
return this.phase;
}
/**
* Set current phase of this task.
* @param phase phase of this task
*/
public void setPhase(Phase phase){
TaskStatus.Phase oldPhase = getPhase();
if (oldPhase != phase){
// sort phase started
if (phase == TaskStatus.Phase.SORT){
if (oldPhase == TaskStatus.Phase.MAP) {
setMapFinishTime(System.currentTimeMillis());
}
else {
setShuffleFinishTime(System.currentTimeMillis());
}
}else if (phase == TaskStatus.Phase.REDUCE){
setSortFinishTime(System.currentTimeMillis());
}
this.phase = phase;
}
}
boolean inTaskCleanupPhase() {
return (this.phase == TaskStatus.Phase.CLEANUP &&
(this.runState == TaskStatus.State.FAILED_UNCLEAN ||
this.runState == TaskStatus.State.KILLED_UNCLEAN));
}
public boolean getIncludeAllCounters() {
return includeAllCounters;
}
public void setIncludeAllCounters(boolean send) {
includeAllCounters = send;
counters.setWriteAllCounters(send);
}
/**
* Get task's counters.
*/
public Counters getCounters() {
return counters;
}
/**
* Set the task's counters.
* @param counters
*/
public void setCounters(Counters counters) {
this.counters = counters;
}
/**
* Returns the number of bytes of output from this map.
*/
public long getOutputSize() {
return outputSize;
}
/**
* Set the size on disk of this task's output.
* @param l the number of map output bytes
*/
void setOutputSize(long l) {
outputSize = l;
}
/**
* Get the list of maps from which output-fetches failed.
*
* @return the list of maps from which output-fetches failed.
*/
public List<TaskAttemptID> getFetchFailedMaps() {
return null;
}
/**
* Add to the list of maps from which output-fetches failed.
*
* @param mapTaskId map from which fetch failed
*/
public abstract void addFetchFailedMap(TaskAttemptID mapTaskId);
/**
* Update the status of the task.
*
* This update is done by ping thread before sending the status.
*
* @param progress
* @param state
* @param counters
*/
synchronized void statusUpdate(float progress,
String state,
Counters counters) {
setProgress(progress);
setStateString(state);
setCounters(counters);
}
/**
* Update the status of the task.
*
* @param status updated status
*/
synchronized void statusUpdate(TaskStatus status) {
setProgress (status.getProgress());
this.runState = status.getRunState();
setStateString(status.getStateString());
this.nextRecordRange = status.getNextRecordRange();
setDiagnosticInfo(status.getDiagnosticInfo());
if (status.getStartTime() > 0) {
this.setStartTime(status.getStartTime());
}
if (status.getFinishTime() > 0) {
this.setFinishTime(status.getFinishTime());
}
this.phase = status.getPhase();
this.counters = status.getCounters();
this.outputSize = status.outputSize;
}
/**
* Update specific fields of task status
*
* This update is done in JobTracker when a cleanup attempt of task
* reports its status. Then update only specific fields, not all.
*
* @param runState
* @param progress
* @param state
* @param phase
* @param finishTime
*/
synchronized void statusUpdate(State runState,
float progress,
String state,
Phase phase,
long finishTime) {
setRunState(runState);
setProgress(progress);
setStateString(state);
setPhase(phase);
if (finishTime > 0) {
setFinishTime(finishTime);
}
}
/**
* Clear out transient information after sending out a status-update
* from either the {@link Task} to the {@link TaskTracker} or from the
* {@link TaskTracker} to the {@link JobTracker}.
*/
synchronized void clearStatus() {
// Clear diagnosticInfo
diagnosticInfo = "";
}
@Override
public Object clone() {
try {
return super.clone();
} catch (CloneNotSupportedException cnse) {
// Shouldn't happen since we do implement Clonable
throw new InternalError(cnse.toString());
}
}
//////////////////////////////////////////////
// Writable
//////////////////////////////////////////////
public void write(DataOutput out) throws IOException {
taskid.write(out);
out.writeFloat(progress);
out.writeInt(numSlots);
WritableUtils.writeEnum(out, runState);
Text.writeString(out, diagnosticInfo);
Text.writeString(out, stateString);
WritableUtils.writeEnum(out, phase);
out.writeLong(startTime);
out.writeLong(finishTime);
out.writeBoolean(includeAllCounters);
out.writeLong(outputSize);
counters.write(out);
nextRecordRange.write(out);
}
public void readFields(DataInput in) throws IOException {
this.taskid.readFields(in);
setProgress(in.readFloat());
this.numSlots = in.readInt();
this.runState = WritableUtils.readEnum(in, State.class);
setDiagnosticInfo(StringInterner.weakIntern(Text.readString(in)));
setStateString(StringInterner.weakIntern(Text.readString(in)));
this.phase = WritableUtils.readEnum(in, Phase.class);
this.startTime = in.readLong();
this.finishTime = in.readLong();
counters = new Counters();
this.includeAllCounters = in.readBoolean();
this.outputSize = in.readLong();
counters.readFields(in);
nextRecordRange.readFields(in);
}
//////////////////////////////////////////////////////////////////////////////
// Factory-like methods to create/read/write appropriate TaskStatus objects
//////////////////////////////////////////////////////////////////////////////
static TaskStatus createTaskStatus(DataInput in, TaskAttemptID taskId,
float progress, int numSlots,
State runState, String diagnosticInfo,
String stateString, String taskTracker,
Phase phase, Counters counters)
throws IOException {
boolean isMap = in.readBoolean();
return createTaskStatus(isMap, taskId, progress, numSlots, runState,
diagnosticInfo, stateString, taskTracker, phase,
counters);
}
static TaskStatus createTaskStatus(boolean isMap, TaskAttemptID taskId,
float progress, int numSlots,
State runState, String diagnosticInfo,
String stateString, String taskTracker,
Phase phase, Counters counters) {
return (isMap) ? new MapTaskStatus(taskId, progress, numSlots, runState,
diagnosticInfo, stateString, taskTracker,
phase, counters) :
new ReduceTaskStatus(taskId, progress, numSlots, runState,
diagnosticInfo, stateString,
taskTracker, phase, counters);
}
static TaskStatus createTaskStatus(boolean isMap) {
return (isMap) ? new MapTaskStatus() : new ReduceTaskStatus();
}
}
|
State
|
java
|
apache__camel
|
components/camel-github/src/test/java/org/apache/camel/component/github/consumer/CommitConsumerLastTest.java
|
{
"start": 2676,
"end": 3238
}
|
class ____ implements Processor {
@Override
public void process(Exchange exchange) {
String author = exchange.getMessage().getHeader(GitHubConstants.GITHUB_COMMIT_AUTHOR, String.class);
String sha = exchange.getMessage().getHeader(GitHubConstants.GITHUB_COMMIT_SHA, String.class);
if (log.isDebugEnabled()) {
log.debug("Commit SHA: {}", sha);
log.debug("Got commit with author: {}: SHA {}", author, sha);
}
}
}
private static final
|
GitHubCommitProcessor
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/codec/FindBinary.java
|
{
"start": 512,
"end": 769
}
|
class ____ extends AbstractFind {
// There's no binary codec available out of the box so the codecs below are needed
@OnBinaryMessage
Item find(List<Item> items) {
return super.find(items);
}
@Singleton
public static
|
FindBinary
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/AbstractHeapPriorityQueueElement.java
|
{
"start": 930,
"end": 1361
}
|
class ____ implements HeapPriorityQueueElement {
private int internalIndex;
public AbstractHeapPriorityQueueElement() {
this.internalIndex = HeapPriorityQueueElement.NOT_CONTAINED;
}
@Override
public int getInternalIndex() {
return internalIndex;
}
@Override
public void setInternalIndex(int newIndex) {
this.internalIndex = newIndex;
}
}
|
AbstractHeapPriorityQueueElement
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/multipart/MultipartProgrammaticTest.java
|
{
"start": 3716,
"end": 3874
}
|
class ____ {
@FormParam("fileFormName")
public FileUpload file;
@FormParam("otherFormName")
public String other;
}
}
|
FormData
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/support/JpaMetamodelEntityInformationIntegrationTests.java
|
{
"start": 11037,
"end": 11172
}
|
class ____ extends Identifiable {
}
@Entity
@Access(AccessType.FIELD)
@IdClass(EntityWithNestedIdClassPK.class)
public static
|
Sample
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/clientproxy/finalmethod/FinalMethodIllegalWhenNotInjectedTest.java
|
{
"start": 854,
"end": 936
}
|
class ____ {
final int getVal() {
return -1;
}
}
}
|
Moo
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/internal/Failures.java
|
{
"start": 1360,
"end": 6658
}
|
class ____ {
private static final String LINE_SEPARATOR = System.lineSeparator();
private static final Failures INSTANCE = new Failures();
private final AssertionErrorCreator assertionErrorCreator = new AssertionErrorCreator();
/**
* flag indicating that in case of a failure a thread dump is printed out.
*/
private boolean printThreadDump = false;
/**
* Returns the singleton instance of this class.
*
* @return the singleton instance of this class.
*/
public static Failures instance() {
return INSTANCE;
}
/**
* flag indicating whether or not we remove elements related to AssertJ from assertion error stack trace.
*/
private boolean removeAssertJRelatedElementsFromStackTrace = Configuration.REMOVE_ASSERTJ_RELATED_ELEMENTS_FROM_STACK_TRACE;
/**
* Sets whether we remove elements related to AssertJ from assertion error stack trace.
*
* @param removeAssertJRelatedElementsFromStackTrace flag
*/
public void setRemoveAssertJRelatedElementsFromStackTrace(boolean removeAssertJRelatedElementsFromStackTrace) {
ConfigurationProvider.loadRegisteredConfiguration();
this.removeAssertJRelatedElementsFromStackTrace = removeAssertJRelatedElementsFromStackTrace;
}
/**
* Returns whether or not we remove elements related to AssertJ from assertion error stack trace.
* @return whether or not we remove elements related to AssertJ from assertion error stack trace.
*/
public boolean isRemoveAssertJRelatedElementsFromStackTrace() {
return removeAssertJRelatedElementsFromStackTrace;
}
private Failures() {}
public AssertionError failure(AssertionInfo info, ShouldBeEqual shouldBeEqual) {
AssertionError error = failureIfErrorMessageIsOverridden(info);
if (error != null) return error;
printThreadDumpIfNeeded();
return shouldBeEqual.toAssertionError(info.description(), info.representation());
}
/**
* Creates a <code>{@link AssertionError}</code> following this pattern:
* <ol>
* <li>creates a <code>{@link AssertionError}</code> using <code>{@link AssertionInfo#overridingErrorMessage()}</code>
* as the error message if such value is not {@code null}, or</li>
* <li>uses the given <code>{@link ErrorMessageFactory}</code> to create the detail message of the
* <code>{@link AssertionError}</code>, prepending the value of <code>{@link AssertionInfo#description()}</code> to
* the error message</li>
* </ol>
*
* @param info contains information about the failed assertion.
* @param messageFactory knows how to create detail messages for {@code AssertionError}s.
* @return the created <code>{@link AssertionError}</code>.
*/
public AssertionError failure(AssertionInfo info, ErrorMessageFactory messageFactory) {
AssertionError error = failureIfErrorMessageIsOverridden(info);
if (error != null) return error;
String assertionErrorMessage = assertionErrorMessage(info, messageFactory);
AssertionError assertionError = assertionErrorCreator.assertionError(assertionErrorMessage);
removeAssertJRelatedElementsFromStackTraceIfNeeded(assertionError);
printThreadDumpIfNeeded();
return assertionError;
}
public AssertionError failure(AssertionInfo info, ErrorMessageFactory messageFactory, Object actual, Object expected) {
String assertionErrorMessage = assertionErrorMessage(info, messageFactory);
AssertionError assertionError = assertionErrorCreator.assertionError(assertionErrorMessage, actual, expected,
info.representation());
removeAssertJRelatedElementsFromStackTraceIfNeeded(assertionError);
printThreadDumpIfNeeded();
return assertionError;
}
protected String assertionErrorMessage(AssertionInfo info, ErrorMessageFactory messageFactory) {
String overridingErrorMessage = info.overridingErrorMessage();
return isNullOrEmpty(overridingErrorMessage)
? messageFactory.create(info.description(), info.representation())
: MessageFormatter.instance().format(info.description(), info.representation(), overridingErrorMessage);
}
public AssertionError failureIfErrorMessageIsOverridden(AssertionInfo info) {
String overridingErrorMessage = info.overridingErrorMessage();
return isNullOrEmpty(overridingErrorMessage) ? null
: failure(MessageFormatter.instance().format(info.description(), info.representation(),
overridingErrorMessage));
}
/**
* Creates a <code>{@link AssertionError}</code> using the given {@code String} as message.
* <p>
* It filters the AssertionError stack trace by default, to have full stack trace use
* {@link #setRemoveAssertJRelatedElementsFromStackTrace(boolean)}.
*
* @param message the message of the {@code AssertionError} to create.
* @return the created <code>{@link AssertionError}</code>.
*/
public AssertionError failure(String message) {
AssertionError assertionError = assertionErrorCreator.assertionError(message);
removeAssertJRelatedElementsFromStackTraceIfNeeded(assertionError);
printThreadDumpIfNeeded();
return assertionError;
}
/**
* Creates a <code>{@link AssertionError}</code> for a {@link Throwable}
|
Failures
|
java
|
quarkusio__quarkus
|
independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/ObserverConfigurator.java
|
{
"start": 4193,
"end": 4652
}
|
class ____ be set!");
}
if (observedType == null) {
throw new IllegalStateException("Observed type must be set!");
}
if (notifyConsumer == null) {
throw new IllegalStateException("Bytecode generator for notify() method must be set!");
}
consumer.accept(this);
}
@Override
public void accept(AnnotationInstance qualifier) {
addQualifier(qualifier);
}
public
|
must
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryOperationUtils.java
|
{
"start": 1052,
"end": 1989
}
|
class ____ extends Assertions {
@Test
public void testUsernameExtractionEnvVarOverrride() throws Throwable {
String whoami = RegistryUtils.getCurrentUsernameUnencoded("drwho");
assertEquals("drwho", whoami);
}
@Test
public void testUsernameExtractionCurrentuser() throws Throwable {
String whoami = RegistryUtils.getCurrentUsernameUnencoded("");
String ugiUser = UserGroupInformation.getCurrentUser().getShortUserName();
assertEquals(ugiUser, whoami);
}
@Test
public void testShortenUsername() throws Throwable {
assertEquals("hbase",
RegistryUtils.convertUsername("hbase@HADOOP.APACHE.ORG"));
assertEquals("hbase",
RegistryUtils.convertUsername("hbase/localhost@HADOOP.APACHE.ORG"));
assertEquals("hbase",
RegistryUtils.convertUsername("hbase"));
assertEquals("hbase user",
RegistryUtils.convertUsername("hbase user"));
}
}
|
TestRegistryOperationUtils
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/filter/wall/WallReadOnlyTest.java
|
{
"start": 836,
"end": 1943
}
|
class ____ extends TestCase {
private WallConfig config = new WallConfig();
protected void setUp() throws Exception {
config.addReadOnlyTable("members");
}
private String sql = "SELECT F1, F2 members";
private String insert_sql = "INSERT INTO members (FID, FNAME) VALUES (?, ?)";
private String update_sql = "UPDATE members SET FNAME = ? WHERe FID = ?";
private String delete_sql = "DELETE members WHERE FID = ?";
public void testMySql() throws Exception {
assertTrue(WallUtils.isValidateMySql(sql, config));
assertFalse(WallUtils.isValidateMySql(insert_sql, config));
assertFalse(WallUtils.isValidateMySql(update_sql, config));
assertFalse(WallUtils.isValidateMySql(delete_sql, config));
}
public void testORACLE() throws Exception {
assertTrue(WallUtils.isValidateOracle(sql, config));
assertFalse(WallUtils.isValidateOracle(insert_sql, config));
assertFalse(WallUtils.isValidateOracle(update_sql, config));
assertFalse(WallUtils.isValidateOracle(delete_sql, config));
}
}
|
WallReadOnlyTest
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/model/language/PythonExpression.java
|
{
"start": 1287,
"end": 1954
}
|
class ____ extends TypedExpressionDefinition {
public PythonExpression() {
}
protected PythonExpression(PythonExpression source) {
super(source);
}
public PythonExpression(String expression) {
super(expression);
}
private PythonExpression(Builder builder) {
super(builder);
}
@Override
public PythonExpression copyDefinition() {
return new PythonExpression(this);
}
@Override
public String getLanguage() {
return "python";
}
/**
* {@code Builder} is a specific builder for {@link PythonExpression}.
*/
@XmlTransient
public static
|
PythonExpression
|
java
|
apache__camel
|
components/camel-ai/camel-langchain4j-embeddings/src/main/java/org/apache/camel/component/langchain4j/embeddings/LangChain4jEmbeddingsConverter.java
|
{
"start": 1141,
"end": 1542
}
|
class ____ {
@Converter
public static TextSegment toTextSegment(String value) {
return TextSegment.from(value);
}
@Converter
public static Embedding toEmbedding(float[] value) {
return Embedding.from(value);
}
@Converter
public static Embedding toEmbedding(List<Float> value) {
return Embedding.from(value);
}
}
|
LangChain4jEmbeddingsConverter
|
java
|
elastic__elasticsearch
|
libs/x-content/src/test/java/org/elasticsearch/xcontent/InstantiatingObjectParserTests.java
|
{
"start": 9013,
"end": 9866
}
|
class ____ {
private int intField;
DoubleFieldDeclaration(int intField) {
this.intField = intField;
}
}
InstantiatingObjectParser.Builder<DoubleFieldDeclaration, Void> builder = InstantiatingObjectParser.builder(
"double_declaration",
DoubleFieldDeclaration.class
);
builder.declareInt(constructorArg(), new ParseField("name"));
IllegalArgumentException exception = expectThrows(
IllegalArgumentException.class,
() -> builder.declareInt(constructorArg(), new ParseField("name"))
);
assertThat(exception, instanceOf(IllegalArgumentException.class));
assertThat(exception.getMessage(), startsWith("Parser already registered for name=[name]"));
}
public static
|
DoubleFieldDeclaration
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/SideOutputITCase.java
|
{
"start": 41205,
"end": 41749
}
|
class ____
implements WatermarkStrategyWithPunctuatedWatermarks<Integer> {
private static final long serialVersionUID = 1L;
@Nullable
@Override
public Watermark checkAndGetNextWatermark(Integer lastElement, long extractedTimestamp) {
return new Watermark(extractedTimestamp);
}
@Override
public long extractTimestamp(Integer element, long previousElementTimestamp) {
return Long.valueOf(element);
}
}
private static
|
TestWatermarkAssigner
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/CatalogTable.java
|
{
"start": 4609,
"end": 6189
}
|
class ____ {
private @Nullable Schema schema;
private @Nullable String comment;
private List<String> partitionKeys = Collections.emptyList();
private Map<String, String> options = Collections.emptyMap();
private @Nullable Long snapshot;
private @Nullable TableDistribution distribution;
private Builder() {}
public Builder schema(Schema schema) {
this.schema = Preconditions.checkNotNull(schema, "Schema must not be null.");
return this;
}
public Builder comment(@Nullable String comment) {
this.comment = comment;
return this;
}
public Builder partitionKeys(List<String> partitionKeys) {
this.partitionKeys =
Preconditions.checkNotNull(partitionKeys, "Partition keys must not be null.");
return this;
}
public Builder options(Map<String, String> options) {
this.options = Preconditions.checkNotNull(options, "Options must not be null.");
return this;
}
public Builder snapshot(@Nullable Long snapshot) {
this.snapshot = snapshot;
return this;
}
public Builder distribution(@Nullable TableDistribution distribution) {
this.distribution = distribution;
return this;
}
public CatalogTable build() {
return new DefaultCatalogTable(
schema, comment, partitionKeys, options, snapshot, distribution);
}
}
}
|
Builder
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
|
{
"start": 3280,
"end": 6126
}
|
class ____ extends MapReduceBase
implements Mapper<LongWritable, Text, Text, IntWritable> {
Path tmpDir;
public void map (LongWritable key, Text value,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
if (localFs.exists(tmpDir)) {
} else {
fail("Temp directory " + tmpDir +" doesnt exist.");
}
File tmpFile = File.createTempFile("test", ".tmp");
}
public void configure(JobConf job) {
tmpDir = new Path(System.getProperty("java.io.tmpdir"));
try {
localFs = FileSystem.getLocal(job);
} catch (IOException ioe) {
ioe.printStackTrace();
fail("IOException in getting localFS");
}
}
}
// configure a job
private void configure(JobConf conf, Path inDir, Path outDir, String input,
Class<? extends Mapper> map,
Class<? extends Reducer> reduce)
throws IOException {
// set up the input file system and write input text.
FileSystem inFs = inDir.getFileSystem(conf);
FileSystem outFs = outDir.getFileSystem(conf);
outFs.delete(outDir, true);
if (!inFs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
// write input into input file
DataOutputStream file = inFs.create(new Path(inDir, "part-0"));
file.writeBytes(input);
file.close();
}
// configure the mapred Job which creates a tempfile in map.
conf.setJobName("testmap");
conf.setMapperClass(map);
conf.setReducerClass(reduce);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(0);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data",
"/tmp")).toString().replace(' ', '+');
conf.set("test.build.data", TEST_ROOT_DIR);
}
private static void checkEnv(String envName, String expValue, String mode) {
String envValue = System.getenv(envName);
if ("append".equals(mode)) {
if (envValue == null || !envValue.contains(File.pathSeparator)) {
throw new RuntimeException("Missing env variable");
} else {
String[] parts = envValue.trim().split(File.pathSeparator);
// check if the value is appended
if (!parts[parts.length - 1].equals(expValue)) {
throw new RuntimeException("Wrong env variable in append mode");
}
}
} else {
if (envValue == null || !envValue.trim().equals(expValue)) {
throw new RuntimeException("Wrong env variable in noappend mode");
}
}
}
// Mappers that simply checks if the desired user env are present or not
private static
|
MapClass
|
java
|
netty__netty
|
transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollDatagramMulticastIpv6WithIpv4AddrTest.java
|
{
"start": 796,
"end": 1141
}
|
class ____ extends DatagramMulticastTest {
@Override
protected SocketProtocolFamily groupInternetProtocalFamily() {
return SocketProtocolFamily.INET;
}
@Override
protected SocketProtocolFamily socketInternetProtocalFamily() {
return SocketProtocolFamily.INET6;
}
}
|
EpollDatagramMulticastIpv6WithIpv4AddrTest
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/StreamGroupedReduceAsyncStateOperatorTest.java
|
{
"start": 5031,
"end": 5988
}
|
class ____ extends RichReduceFunction<Integer> {
private static final long serialVersionUID = 1L;
public static boolean openCalled = false;
public static boolean closeCalled = false;
@Override
public void open(OpenContext openContext) throws Exception {
super.open(openContext);
assertThat(closeCalled).as("Close called before open.").isFalse();
openCalled = true;
}
@Override
public void close() throws Exception {
super.close();
assertThat(openCalled).as("Open was not called before close.").isTrue();
closeCalled = true;
}
@Override
public Integer reduce(Integer in1, Integer in2) throws Exception {
assertThat(openCalled).as("Open was not called before run.").isTrue();
return in1 + in2;
}
}
// Utilities
private static
|
TestOpenCloseReduceFunction
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java
|
{
"start": 1482,
"end": 2787
}
|
class ____ extends Hamlet {
Page(PrintWriter out) {
super(out, 0, false);
}
@Override
protected void subView(Class<? extends SubView> cls) {
context().set(nestLevel(), wasInline());
render(cls);
setWasInline(context().wasInline());
}
public HTML<HtmlPage.__> html() {
return new HTML<HtmlPage.__>("html", null, EnumSet.of(EOpt.ENDTAG));
}
}
public static final String DOCTYPE =
"<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01//EN\""+
" \"http://www.w3.org/TR/html4/strict.dtd\">";
private Page page;
private Page page() {
if (page == null) {
page = new Page(writer());
}
return page;
}
protected HtmlPage() {
this(null);
}
protected HtmlPage(ViewContext ctx) {
super(ctx, MimeType.HTML);
}
@Override
public void render() {
putWithoutEscapeHtml(DOCTYPE);
render(page().html().meta_http("X-UA-Compatible", "IE=8")
.meta_http("Content-type", MimeType.HTML));
if (page().nestLevel() != 0) {
throw new WebAppException("Error rendering page: nestLevel="+
page().nestLevel());
}
}
/**
* Render the HTML page.
* @param html the page to render data to.
*/
protected abstract void render(Page.HTML<__> html);
}
|
Page
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/jdk7/PathTest.java
|
{
"start": 194,
"end": 1037
}
|
class ____ extends TestCase {
public void test_for_path() throws Exception {
Model model = new Model();
model.path = Paths.get("/root/fastjson");
String text = JSON.toJSONString(model);
System.out.println(text);
//windows下,输出为
//Assert.assertEquals("{\"path\":\"\\root\\fastjson\"}", text);
//linux ,mac
//Assert.assertEquals("{\"path\":\"/root/fastjson\"}", text);
Model model2 = JSON.parseObject(text, Model.class);
Assert.assertEquals(model.path.toString(), model2.path.toString());
}
public void test_for_null() throws Exception {
String text = "{\"path\":null}";
Model model2 = JSON.parseObject(text, Model.class);
Assert.assertNull(model2.path);
}
public static
|
PathTest
|
java
|
spring-projects__spring-boot
|
buildSrc/src/main/java/org/springframework/boot/build/classpath/CheckClasspathForProhibitedDependencies.java
|
{
"start": 1297,
"end": 3371
}
|
class ____ extends DefaultTask {
private static final Set<String> PROHIBITED_GROUPS = Set.of("org.codehaus.groovy", "org.eclipse.jetty.toolchain",
"org.apache.geronimo.specs", "com.sun.activation");
private static final Set<String> PERMITTED_JAVAX_GROUPS = Set.of("javax.batch", "javax.cache", "javax.money");
private Configuration classpath;
public CheckClasspathForProhibitedDependencies() {
getOutputs().upToDateWhen((task) -> true);
}
@Input
public abstract SetProperty<String> getPermittedGroups();
public void setClasspath(Configuration classpath) {
this.classpath = classpath;
}
@Classpath
public FileCollection getClasspath() {
return this.classpath;
}
@TaskAction
public void checkForProhibitedDependencies() {
TreeSet<String> prohibited = this.classpath.getResolvedConfiguration()
.getResolvedArtifacts()
.stream()
.map((artifact) -> artifact.getModuleVersion().getId())
.filter(this::prohibited)
.map((id) -> id.getGroup() + ":" + id.getName())
.collect(Collectors.toCollection(TreeSet::new));
if (!prohibited.isEmpty()) {
StringBuilder message = new StringBuilder(String.format("Found prohibited dependencies:%n"));
for (String dependency : prohibited) {
message.append(String.format(" %s%n", dependency));
}
throw new GradleException(message.toString());
}
}
private boolean prohibited(ModuleVersionIdentifier id) {
return (!getPermittedGroups().get().contains(id.getGroup())) && (PROHIBITED_GROUPS.contains(id.getGroup())
|| prohibitedJavax(id) || prohibitedSlf4j(id) || prohibitedJbossSpec(id));
}
private boolean prohibitedSlf4j(ModuleVersionIdentifier id) {
return id.getGroup().equals("org.slf4j") && id.getName().equals("jcl-over-slf4j");
}
private boolean prohibitedJbossSpec(ModuleVersionIdentifier id) {
return id.getGroup().startsWith("org.jboss.spec");
}
private boolean prohibitedJavax(ModuleVersionIdentifier id) {
return id.getGroup().startsWith("javax.") && !PERMITTED_JAVAX_GROUPS.contains(id.getGroup());
}
}
|
CheckClasspathForProhibitedDependencies
|
java
|
spring-projects__spring-framework
|
spring-web/src/test/java/org/springframework/http/codec/json/CustomizedJacksonJsonEncoderTests.java
|
{
"start": 3213,
"end": 3515
}
|
class ____ extends JacksonJsonEncoder {
@Override
protected ObjectWriter customizeWriter(
ObjectWriter writer, MimeType mimeType, ResolvableType elementType, Map<String, Object> hints) {
return writer.with(EnumFeature.WRITE_ENUMS_USING_TO_STRING);
}
}
}
|
JacksonJsonEncoderWithCustomization
|
java
|
quarkusio__quarkus
|
extensions/oidc-client-filter/deployment/src/test/java/io/quarkus/oidc/client/filter/OidcClientFilterRevokedAccessTokenDevModeTest.java
|
{
"start": 4916,
"end": 7074
}
|
class ____ extends MyClientResource {
@Inject
@RestClient
MyDefaultClient myDefaultClient;
@Inject
@RestClient
MyNamedClient myNamedClient;
@Inject
@RestClient
MyDefaultClientWithoutRefresh myDefaultClientWithoutRefresh;
@Inject
@RestClient
MyNamedClientWithoutRefresh myNamedClientWithoutRefresh;
@Inject
@RestClient
MyDefaultClient_AnnotationOnMethod myDefaultClientAnnotationOnMethod;
@Inject
@RestClient
MyNamedClient_AnnotationOnMethod myNamedClientAnnotationOnMethod;
@Inject
@RestClient
MyClient_MultipleMethods myClientMultipleMethods;
@Override
protected MyClient myDefaultClient() {
return myDefaultClient;
}
@Override
protected MyClient myNamedClient() {
return myNamedClient;
}
@Override
protected MyClient myDefaultClientWithoutRefresh() {
return myDefaultClientWithoutRefresh;
}
@Override
protected MyClient myNamedClientWithoutRefresh() {
return myNamedClientWithoutRefresh;
}
@Override
protected String myDefaultClient_AnnotationOnMethod(String named) {
return myDefaultClientAnnotationOnMethod.revokeAccessTokenAndRespond(named);
}
@Override
protected String myNamedClient_AnnotationOnMethod(String named) {
return myNamedClientAnnotationOnMethod.revokeAccessTokenAndRespond(named);
}
@Override
protected String myDefaultClient_MultipleMethods(String named) {
return myClientMultipleMethods.revokeAccessTokenAndRespond_DefaultClient(named);
}
@Override
protected String myNamedClient_MultipleMethods(String named) {
return myClientMultipleMethods.revokeAccessTokenAndRespond_NamedClient(named);
}
@Override
protected String multipleMethods_noAccessToken() {
return myClientMultipleMethods.noAccessToken();
}
}
}
|
MyClientResourceImpl
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/crossproject/CrossProjectIndexResolutionValidator.java
|
{
"start": 1515,
"end": 1605
}
|
class ____ validating index resolution results in cross-project operations.
* <p>
* This
|
for
|
java
|
quarkusio__quarkus
|
extensions/micrometer/deployment/src/test/java/io/quarkus/micrometer/deployment/binder/VertxConnectionMetricsTest.java
|
{
"start": 4142,
"end": 4454
}
|
class ____ {
public void start(@Observes StartupEvent ev, Router router, io.vertx.core.Vertx vertx) {
router.get("/ok").handler(rc -> {
// Keep the connection open for 100ms.
vertx.setTimer(250, l -> rc.endAndForget("ok"));
});
}
}
}
|
App
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableSerializeTest.java
|
{
"start": 10877,
"end": 15030
}
|
class ____ implements Publisher<String> {
final String[] values;
Thread t;
AtomicInteger threadsRunning = new AtomicInteger();
AtomicInteger maxConcurrentThreads = new AtomicInteger();
ExecutorService threadPool;
TestMultiThreadedObservable(String... values) {
this.values = values;
this.threadPool = Executors.newCachedThreadPool();
}
@Override
public void subscribe(final Subscriber<? super String> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
System.out.println("TestMultiThreadedObservable subscribed to ...");
final NullPointerException npe = new NullPointerException();
t = new Thread(new Runnable() {
@Override
public void run() {
try {
System.out.println("running TestMultiThreadedObservable thread");
for (final String s : values) {
threadPool.execute(new Runnable() {
@Override
public void run() {
threadsRunning.incrementAndGet();
try {
// perform onNext call
if (s == null) {
System.out.println("TestMultiThreadedObservable onNext: null");
// force an error
throw npe;
} else {
try {
Thread.sleep(10);
} catch (InterruptedException ex) {
// ignored
}
System.out.println("TestMultiThreadedObservable onNext: " + s);
}
subscriber.onNext(s);
// capture 'maxThreads'
int concurrentThreads = threadsRunning.get();
int maxThreads = maxConcurrentThreads.get();
if (concurrentThreads > maxThreads) {
maxConcurrentThreads.compareAndSet(maxThreads, concurrentThreads);
}
} catch (Throwable e) {
subscriber.onError(e);
} finally {
threadsRunning.decrementAndGet();
}
}
});
}
// we are done spawning threads
threadPool.shutdown();
} catch (Throwable e) {
throw new RuntimeException(e);
}
// wait until all threads are done, then mark it as COMPLETED
try {
// wait for all the threads to finish
threadPool.awaitTermination(2, TimeUnit.SECONDS);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
subscriber.onComplete();
}
});
System.out.println("starting TestMultiThreadedObservable thread");
t.start();
System.out.println("done starting TestMultiThreadedObservable thread");
}
public void waitToFinish() {
try {
t.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
private static
|
TestMultiThreadedObservable
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/main/java/org/springframework/boot/logging/logback/LogstashStructuredLogFormatter.java
|
{
"start": 1919,
"end": 4480
}
|
class ____ extends JsonWriterStructuredLogFormatter<ILoggingEvent> {
private static final PairExtractor<KeyValuePair> keyValuePairExtractor = PairExtractor.of((pair) -> pair.key,
(pair) -> pair.value);
LogstashStructuredLogFormatter(@Nullable StackTracePrinter stackTracePrinter, ContextPairs contextPairs,
ThrowableProxyConverter throwableProxyConverter,
@Nullable StructuredLoggingJsonMembersCustomizer<?> customizer) {
super((members) -> jsonMembers(stackTracePrinter, contextPairs, throwableProxyConverter, members), customizer);
}
private static void jsonMembers(@Nullable StackTracePrinter stackTracePrinter, ContextPairs contextPairs,
ThrowableProxyConverter throwableProxyConverter, JsonWriter.Members<ILoggingEvent> members) {
Extractor extractor = new Extractor(stackTracePrinter, throwableProxyConverter);
members.add("@timestamp", ILoggingEvent::getInstant).as(LogstashStructuredLogFormatter::asTimestamp);
members.add("@version", "1");
members.add("message", ILoggingEvent::getFormattedMessage);
members.add("logger_name", ILoggingEvent::getLoggerName);
members.add("thread_name", ILoggingEvent::getThreadName);
members.add("level", ILoggingEvent::getLevel);
members.add("level_value", ILoggingEvent::getLevel).as(Level::toInt);
members.add().usingPairs(contextPairs.flat("_", (pairs) -> {
pairs.addMapEntries(ILoggingEvent::getMDCPropertyMap);
pairs.add(ILoggingEvent::getKeyValuePairs, keyValuePairExtractor);
}));
members.add("tags", ILoggingEvent::getMarkerList)
.whenNotNull()
.as(LogstashStructuredLogFormatter::getMarkers)
.whenNotEmpty();
Function<@Nullable ILoggingEvent, @Nullable Object> getThrowableProxy = (event) -> (event != null)
? event.getThrowableProxy() : null;
members.add("stack_trace", (event) -> event).whenNotNull(getThrowableProxy).as(extractor::stackTrace);
}
private static String asTimestamp(Instant instant) {
OffsetDateTime offsetDateTime = OffsetDateTime.ofInstant(instant, ZoneId.systemDefault());
return DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(offsetDateTime);
}
private static Set<String> getMarkers(List<Marker> markers) {
Set<String> result = new LinkedHashSet<>();
addMarkers(result, markers.iterator());
return result;
}
private static void addMarkers(Set<String> result, Iterator<Marker> iterator) {
while (iterator.hasNext()) {
Marker marker = iterator.next();
result.add(marker.getName());
if (marker.hasReferences()) {
addMarkers(result, marker.iterator());
}
}
}
}
|
LogstashStructuredLogFormatter
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/SameNameButDifferentTest.java
|
{
"start": 4636,
"end": 4911
}
|
class ____ extends A {
A.Supplier supplier2 = new A.Supplier();
}
}
""")
.doTest();
}
@Test
public void neverShadowing() {
helper
.addSourceLines(
"A.java",
"""
|
C
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/api/functions/sink/TransactionHolderTest.java
|
{
"start": 1185,
"end": 1498
}
|
class ____ {
@Test
void testElapsedTime() {
final long elapsedTime =
new TransactionHolder<>(new Object(), 0)
.elapsedTime(Clock.fixed(Instant.ofEpochMilli(1000), ZoneOffset.UTC));
assertThat(elapsedTime).isEqualTo(1000L);
}
}
|
TransactionHolderTest
|
java
|
apache__kafka
|
streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableTransformValuesTest.java
|
{
"start": 22342,
"end": 23048
}
|
class ____ implements ValueTransformerWithKey<Object, String, String> {
private final List<String> expectedStoredNames;
ExclamationValueTransformer(final List<String> expectedStoredNames) {
this.expectedStoredNames = expectedStoredNames;
}
@Override
public void init(final ProcessorContext context) {
throwIfStoresNotAvailable(context, expectedStoredNames);
}
@Override
public String transform(final Object readOnlyKey, final String value) {
return readOnlyKey.toString() + "->" + value + "!";
}
@Override
public void close() {}
}
private static
|
ExclamationValueTransformer
|
java
|
grpc__grpc-java
|
api/src/testFixtures/java/io/grpc/ForwardingTestUtil.java
|
{
"start": 1133,
"end": 1189
}
|
class ____ help test forwarding classes.
*/
public final
|
to
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/redisnode/SentinelRedisNode.java
|
{
"start": 1725,
"end": 11722
}
|
class ____ implements RedisSentinel, RedisSentinelAsync {
private final RedisClient client;
private final CommandAsyncExecutor commandAsyncService;
public SentinelRedisNode(RedisClient client, CommandAsyncExecutor commandAsyncService) {
super();
this.client = client;
this.commandAsyncService = commandAsyncService;
}
public RedisClient getClient() {
return client;
}
@Override
public InetSocketAddress getAddr() {
return client.getAddr();
}
@Override
public Map<String, String> getMemoryStatistics() {
return getMemoryStatisticsAsync().toCompletableFuture().join();
}
@Override
public RFuture<Map<String, String>> getMemoryStatisticsAsync() {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.MEMORY_STATS);
}
@Override
public RFuture<Boolean> pingAsync() {
return pingAsync(1, TimeUnit.SECONDS);
}
@Override
public RFuture<Boolean> pingAsync(long timeout, TimeUnit timeUnit) {
return executeAsync(false, null, timeUnit.toMillis(timeout), RedisCommands.PING_BOOL);
}
@Override
public boolean ping() {
return pingAsync().toCompletableFuture().join();
}
@Override
public boolean ping(long timeout, TimeUnit timeUnit) {
return pingAsync(timeout, timeUnit).toCompletableFuture().join();
}
@Override
@SuppressWarnings("AvoidInlineConditionals")
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((client == null) ? 0 : client.getAddr().hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SentinelRedisNode other = (SentinelRedisNode) obj;
if (client == null) {
if (other.client != null)
return false;
} else if (!client.getAddr().equals(other.client.getAddr()))
return false;
return true;
}
private <T> RFuture<T> executeAsync(T defaultValue, Codec codec, long timeout, RedisCommand<T> command, Object... params) {
CompletableFuture<RedisConnection> connectionFuture = client.connectAsync().toCompletableFuture();
CompletableFuture<Object> f = connectionFuture.thenCompose(connection -> {
return connection.async(timeout, codec, command, params);
}).handle((r, e) -> {
if (connectionFuture.isDone() && !connectionFuture.isCompletedExceptionally()) {
connectionFuture.getNow(null).closeAsync();
}
if (e != null) {
if (defaultValue != null) {
return defaultValue;
}
throw new CompletionException(e);
}
return r;
});
return new CompletableFutureWrapper<T>((CompletionStage<T>) f);
}
@Override
public RFuture<Time> timeAsync() {
return executeAsync(null, LongCodec.INSTANCE, -1, RedisCommands.TIME);
}
@Override
public Time time() {
return timeAsync().toCompletableFuture().join();
}
@Override
public String toString() {
return this.getClass().toString() + " [client=" + client + "]";
}
@Override
public Map<String, String> info(InfoSection section) {
return infoAsync(section).toCompletableFuture().join();
}
@Override
public RFuture<Map<String, String>> infoAsync(InfoSection section) {
if (section == InfoSection.ALL) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_ALL);
} else if (section == InfoSection.DEFAULT) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_DEFAULT);
} else if (section == InfoSection.SERVER) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_SERVER);
} else if (section == InfoSection.CLIENTS) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_CLIENTS);
} else if (section == InfoSection.MEMORY) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_MEMORY);
} else if (section == InfoSection.PERSISTENCE) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_PERSISTENCE);
} else if (section == InfoSection.STATS) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_STATS);
} else if (section == InfoSection.REPLICATION) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_REPLICATION);
} else if (section == InfoSection.CPU) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_CPU);
} else if (section == InfoSection.COMMANDSTATS) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_COMMANDSTATS);
} else if (section == InfoSection.CLUSTER) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_CLUSTER);
} else if (section == InfoSection.KEYSPACE) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.INFO_KEYSPACE);
}
throw new IllegalStateException();
}
@Override
public RedisURI getMasterAddr(String masterName) {
return commandAsyncService.get(getMasterAddrAsync(masterName));
}
@Override
public List<Map<String, String>> getSentinels(String masterName) {
return commandAsyncService.get(getSentinelsAsync(masterName));
}
@Override
public List<Map<String, String>> getMasters() {
return commandAsyncService.get(getMastersAsync());
}
@Override
public List<Map<String, String>> getSlaves(String masterName) {
return commandAsyncService.get(getSlavesAsync(masterName));
}
@Override
public Map<String, String> getMaster(String masterName) {
return commandAsyncService.get(getMasterAsync(masterName));
}
@Override
public void failover(String masterName) {
commandAsyncService.get(failoverAsync(masterName));
}
@Override
public RFuture<RedisURI> getMasterAddrAsync(String masterName) {
RedisStrictCommand<RedisURI> masterHostCommand = new RedisStrictCommand<>("SENTINEL", "GET-MASTER-ADDR-BY-NAME",
new RedisURIDecoder(client.getConfig().getAddress().getScheme()));
return executeAsync(null, StringCodec.INSTANCE, -1, masterHostCommand, masterName);
}
@Override
public RFuture<List<Map<String, String>>> getSentinelsAsync(String masterName) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.SENTINEL_SENTINELS, masterName);
}
@Override
public RFuture<List<Map<String, String>>> getMastersAsync() {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.SENTINEL_MASTERS);
}
@Override
public RFuture<List<Map<String, String>>> getSlavesAsync(String masterName) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.SENTINEL_SLAVES, masterName);
}
@Override
public RFuture<Map<String, String>> getMasterAsync(String masterName) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.SENTINEL_MASTER, masterName);
}
@Override
public RFuture<Void> failoverAsync(String masterName) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.SENTINEL_FAILOVER, masterName);
}
@Override
public Map<String, String> getConfig(String parameter) {
return getConfigAsync(parameter).toCompletableFuture().join();
}
@Override
public void setConfig(String parameter, String value) {
setConfigAsync(parameter, value).toCompletableFuture().join();
}
@Override
public RFuture<Map<String, String>> getConfigAsync(String parameter) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.CONFIG_GET_MAP, parameter);
}
@Override
public RFuture<Void> setConfigAsync(String parameter, String value) {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.CONFIG_SET, parameter, value);
}
@Override
public void bgSave() {
commandAsyncService.get(bgSaveAsync());
}
@Override
public void scheduleBgSave() {
commandAsyncService.get(scheduleBgSaveAsync());
}
@Override
public void save() {
commandAsyncService.get(saveAsync());
}
@Override
public Instant getLastSaveTime() {
return commandAsyncService.get(getLastSaveTimeAsync());
}
@Override
public RFuture<Void> bgSaveAsync() {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.BGSAVE);
}
@Override
public RFuture<Void> scheduleBgSaveAsync() {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.BGSAVE, "SCHEDULE");
}
@Override
public RFuture<Void> saveAsync() {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.SAVE);
}
@Override
public RFuture<Instant> getLastSaveTimeAsync() {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.LASTSAVE_INSTANT);
}
@Override
public void bgRewriteAOF() {
commandAsyncService.get(bgRewriteAOFAsync());
}
@Override
public RFuture<Void> bgRewriteAOFAsync() {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.BGREWRITEAOF);
}
@Override
public long size() {
return commandAsyncService.get(sizeAsync());
}
@Override
public RFuture<Long> sizeAsync() {
return executeAsync(null, StringCodec.INSTANCE, -1, RedisCommands.DBSIZE);
}
}
|
SentinelRedisNode
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/ErrorProneJavacPluginTest.java
|
{
"start": 4790,
"end": 4968
}
|
class ____ implements Runnable {", //
" public void run() {}",
"}"),
UTF_8);
Files.write(
fileB,
ImmutableList.of(
"
|
A
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/global_variables_defaults/AnnotationMapperTest.java
|
{
"start": 3507,
"end": 3643
}
|
interface ____ {
@Select("${ping.sql:SELECT 'Hello' FROM INFORMATION_SCHEMA.SYSTEM_USERS}")
String ping();
}
}
|
AnnotationMapper
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/cache/CacheReproTests.java
|
{
"start": 22109,
"end": 22453
}
|
class ____ {
@Cacheable(value = "itemCache", sync = true)
public Optional<TestBean> findById(String id) {
return Optional.of(new TestBean(id));
}
@CachePut(cacheNames = "itemCache", key = "#item.name")
public TestBean insertItem(TestBean item) {
return item;
}
}
@Configuration
@EnableCaching
public static
|
Spr14853Service
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/authentication/rememberme/PersistentTokenBasedRememberMeServices.java
|
{
"start": 2554,
"end": 8356
}
|
class ____ extends AbstractRememberMeServices {
private PersistentTokenRepository tokenRepository = new InMemoryTokenRepositoryImpl();
private SecureRandom random;
public static final int DEFAULT_SERIES_LENGTH = 16;
public static final int DEFAULT_TOKEN_LENGTH = 16;
private int seriesLength = DEFAULT_SERIES_LENGTH;
private int tokenLength = DEFAULT_TOKEN_LENGTH;
public PersistentTokenBasedRememberMeServices(String key, UserDetailsService userDetailsService,
PersistentTokenRepository tokenRepository) {
super(key, userDetailsService);
this.random = new SecureRandom();
this.tokenRepository = tokenRepository;
}
/**
* Locates the presented cookie data in the token repository, using the series id. If
* the data compares successfully with that in the persistent store, a new token is
* generated and stored with the same series. The corresponding cookie value is set on
* the response.
* @param cookieTokens the series and token values
* @throws RememberMeAuthenticationException if there is no stored token corresponding
* to the submitted cookie, or if the token in the persistent store has expired.
* @throws InvalidCookieException if the cookie doesn't have two tokens as expected.
* @throws CookieTheftException if a presented series value is found, but the stored
* token is different from the one presented.
*/
@Override
protected UserDetails processAutoLoginCookie(String[] cookieTokens, HttpServletRequest request,
HttpServletResponse response) {
if (cookieTokens.length != 2) {
throw new InvalidCookieException("Cookie token did not contain " + 2 + " tokens, but contained '"
+ Arrays.asList(cookieTokens) + "'");
}
String presentedSeries = cookieTokens[0];
String presentedToken = cookieTokens[1];
PersistentRememberMeToken token = this.tokenRepository.getTokenForSeries(presentedSeries);
if (token == null) {
// No series match, so we can't authenticate using this cookie
throw new RememberMeAuthenticationException("No persistent token found for series id: " + presentedSeries);
}
// We have a match for this user/series combination
if (!presentedToken.equals(token.getTokenValue())) {
// Token doesn't match series value. Delete all logins for this user and throw
// an exception to warn them.
this.tokenRepository.removeUserTokens(token.getUsername());
throw new CookieTheftException(this.messages.getMessage(
"PersistentTokenBasedRememberMeServices.cookieStolen",
"Invalid remember-me token (Series/token) mismatch. Implies previous cookie theft attack."));
}
if (token.getDate().getTime() + getTokenValiditySeconds() * 1000L < System.currentTimeMillis()) {
throw new RememberMeAuthenticationException("Remember-me login has expired");
}
// Token also matches, so login is valid. Update the token value, keeping the
// *same* series number.
this.logger.debug(LogMessage.format("Refreshing persistent login token for user '%s', series '%s'",
token.getUsername(), token.getSeries()));
PersistentRememberMeToken newToken = new PersistentRememberMeToken(token.getUsername(), token.getSeries(),
generateTokenData(), new Date());
try {
this.tokenRepository.updateToken(newToken.getSeries(), newToken.getTokenValue(), newToken.getDate());
addCookie(newToken, request, response);
}
catch (Exception ex) {
this.logger.error("Failed to update token: ", ex);
throw new RememberMeAuthenticationException("Autologin failed due to data access problem");
}
return getUserDetailsService().loadUserByUsername(token.getUsername());
}
/**
* Creates a new persistent login token with a new series number, stores the data in
* the persistent token repository and adds the corresponding cookie to the response.
*
*/
@Override
protected void onLoginSuccess(HttpServletRequest request, HttpServletResponse response,
Authentication successfulAuthentication) {
String username = successfulAuthentication.getName();
this.logger.debug(LogMessage.format("Creating new persistent login for user %s", username));
PersistentRememberMeToken persistentToken = new PersistentRememberMeToken(username, generateSeriesData(),
generateTokenData(), new Date());
try {
this.tokenRepository.createNewToken(persistentToken);
addCookie(persistentToken, request, response);
}
catch (Exception ex) {
this.logger.error("Failed to save persistent token ", ex);
}
}
@Override
public void logout(HttpServletRequest request, HttpServletResponse response,
@Nullable Authentication authentication) {
super.logout(request, response, authentication);
if (authentication != null) {
this.tokenRepository.removeUserTokens(authentication.getName());
}
}
protected String generateSeriesData() {
byte[] newSeries = new byte[this.seriesLength];
this.random.nextBytes(newSeries);
return new String(Base64.getEncoder().encode(newSeries));
}
protected String generateTokenData() {
byte[] newToken = new byte[this.tokenLength];
this.random.nextBytes(newToken);
return new String(Base64.getEncoder().encode(newToken));
}
private void addCookie(PersistentRememberMeToken token, HttpServletRequest request, HttpServletResponse response) {
setCookie(new String[] { token.getSeries(), token.getTokenValue() }, getTokenValiditySeconds(), request,
response);
}
public void setSeriesLength(int seriesLength) {
this.seriesLength = seriesLength;
}
public void setTokenLength(int tokenLength) {
this.tokenLength = tokenLength;
}
@Override
public void setTokenValiditySeconds(int tokenValiditySeconds) {
Assert.isTrue(tokenValiditySeconds > 0, "tokenValiditySeconds must be positive for this implementation");
super.setTokenValiditySeconds(tokenValiditySeconds);
}
}
|
PersistentTokenBasedRememberMeServices
|
java
|
apache__flink
|
flink-python/src/main/java/org/apache/beam/runners/fnexecution/control/DefaultJobBundleFactory.java
|
{
"start": 5438,
"end": 10006
}
|
class ____ implements JobBundleFactory {
private static final Logger LOG = LoggerFactory.getLogger(DefaultJobBundleFactory.class);
private static final IdGenerator factoryIdGenerator = IdGenerators.incrementingLongs();
private final String factoryId = factoryIdGenerator.getId();
private final ImmutableList<EnvironmentCacheAndLock> environmentCaches;
private final AtomicInteger stageBundleFactoryCount = new AtomicInteger();
private final Map<String, EnvironmentFactory.Provider> environmentFactoryProviderMap;
private final ExecutorService executor;
private final MapControlClientPool clientPool;
private final IdGenerator stageIdGenerator;
private final int environmentExpirationMillis;
private final Semaphore availableCachesSemaphore;
private final LinkedBlockingDeque<EnvironmentCacheAndLock> availableCaches;
private final boolean loadBalanceBundles;
/**
* Clients which were evicted due to environment expiration but still had pending references.
*/
private final Set<WrappedSdkHarnessClient> evictedActiveClients;
private boolean closed;
public static DefaultJobBundleFactory create(JobInfo jobInfo) {
PipelineOptions pipelineOptions =
PipelineOptionsTranslation.fromProto(jobInfo.pipelineOptions());
Map<String, EnvironmentFactory.Provider> environmentFactoryProviderMap =
ImmutableMap.of(
BeamUrns.getUrn(StandardEnvironments.Environments.DOCKER),
new DockerEnvironmentFactory.Provider(pipelineOptions),
BeamUrns.getUrn(StandardEnvironments.Environments.PROCESS),
new ProcessEnvironmentFactory.Provider(pipelineOptions),
BeamUrns.getUrn(StandardEnvironments.Environments.EXTERNAL),
new ExternalEnvironmentFactory.Provider(),
Environments.ENVIRONMENT_EMBEDDED, // Non Public urn for testing.
new EmbeddedEnvironmentFactory.Provider(pipelineOptions));
return new DefaultJobBundleFactory(jobInfo, environmentFactoryProviderMap);
}
public static DefaultJobBundleFactory create(
JobInfo jobInfo,
Map<String, EnvironmentFactory.Provider> environmentFactoryProviderMap) {
return new DefaultJobBundleFactory(jobInfo, environmentFactoryProviderMap);
}
DefaultJobBundleFactory(
JobInfo jobInfo, Map<String, EnvironmentFactory.Provider> environmentFactoryMap) {
IdGenerator stageIdSuffixGenerator = IdGenerators.incrementingLongs();
this.environmentFactoryProviderMap = environmentFactoryMap;
this.executor = Executors.newCachedThreadPool();
this.clientPool = MapControlClientPool.create();
this.stageIdGenerator = () -> factoryId + "-" + stageIdSuffixGenerator.getId();
this.environmentExpirationMillis = getEnvironmentExpirationMillis(jobInfo);
this.loadBalanceBundles = shouldLoadBalanceBundles(jobInfo);
this.environmentCaches =
createEnvironmentCaches(
serverFactory -> createServerInfo(jobInfo, serverFactory),
getMaxEnvironmentClients(jobInfo));
this.availableCachesSemaphore = new Semaphore(environmentCaches.size(), true);
this.availableCaches = new LinkedBlockingDeque<>(environmentCaches);
this.evictedActiveClients = Sets.newConcurrentHashSet();
}
@VisibleForTesting
DefaultJobBundleFactory(
JobInfo jobInfo,
Map<String, EnvironmentFactory.Provider> environmentFactoryMap,
IdGenerator stageIdGenerator,
ServerInfo serverInfo) {
this.environmentFactoryProviderMap = environmentFactoryMap;
this.executor = Executors.newCachedThreadPool();
this.clientPool = MapControlClientPool.create();
this.stageIdGenerator = stageIdGenerator;
this.environmentExpirationMillis = getEnvironmentExpirationMillis(jobInfo);
this.loadBalanceBundles = shouldLoadBalanceBundles(jobInfo);
this.environmentCaches =
createEnvironmentCaches(
serverFactory -> serverInfo, getMaxEnvironmentClients(jobInfo));
this.availableCachesSemaphore = new Semaphore(environmentCaches.size(), true);
this.availableCaches = new LinkedBlockingDeque<>(environmentCaches);
this.evictedActiveClients = Sets.newConcurrentHashSet();
}
private static
|
DefaultJobBundleFactory
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/resource/basic/resource/CovariantReturnSubresourceLocatorsRootProxy.java
|
{
"start": 142,
"end": 311
}
|
interface ____ {
@Path("sub/{path}")
CovariantReturnSubresourceLocatorsSubProxy getSub(@PathParam("path") String path);
}
|
CovariantReturnSubresourceLocatorsRootProxy
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/streaming/runtime/tasks/TestBoundedOneInputStreamOperator.java
|
{
"start": 1193,
"end": 2482
}
|
class ____ extends AbstractStreamOperator<String>
implements OneInputStreamOperator<String, String>, BoundedOneInput {
private static final long serialVersionUID = 1L;
private final String name;
private static volatile boolean inputEnded = false;
public TestBoundedOneInputStreamOperator() {
this("test");
}
public TestBoundedOneInputStreamOperator(String name) {
this.name = name;
inputEnded = false;
}
@Override
public void processElement(StreamRecord<String> element) {
output.collect(element);
}
@Override
public void endInput() {
inputEnded = true;
output("[" + name + "]: End of input");
}
@Override
public void finish() throws Exception {
ProcessingTimeService timeService = getProcessingTimeService();
timeService.registerTimer(
timeService.getCurrentProcessingTime(),
t -> output("[" + name + "]: Timer registered in finish"));
output("[" + name + "]: Finish");
super.finish();
}
private void output(String record) {
output.collect(new StreamRecord<>(record));
}
public static boolean isInputEnded() {
return inputEnded;
}
}
|
TestBoundedOneInputStreamOperator
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManagerGateway.java
|
{
"start": 2935,
"end": 13359
}
|
interface ____
extends FencedRpcGateway<ResourceManagerId>, ClusterPartitionManager, BlocklistListener {
/**
* Register a {@link JobMaster} at the resource manager.
*
* @param jobMasterId The fencing token for the JobMaster leader
* @param jobMasterResourceId The resource ID of the JobMaster that registers
* @param jobMasterAddress The address of the JobMaster that registers
* @param jobId The Job ID of the JobMaster that registers
* @param timeout Timeout for the future to complete
* @return Future registration response
*/
CompletableFuture<RegistrationResponse> registerJobMaster(
JobMasterId jobMasterId,
ResourceID jobMasterResourceId,
String jobMasterAddress,
JobID jobId,
@RpcTimeout Duration timeout);
/**
* Declares the absolute resource requirements for a job.
*
* @param jobMasterId id of the JobMaster
* @param resourceRequirements resource requirements
* @return The confirmation that the requirements have been processed
*/
CompletableFuture<Acknowledge> declareRequiredResources(
JobMasterId jobMasterId,
ResourceRequirements resourceRequirements,
@RpcTimeout Duration timeout);
/**
* Register a {@link TaskExecutor} at the resource manager.
*
* @param taskExecutorRegistration the task executor registration.
* @param timeout The timeout for the response.
* @return The future to the response by the ResourceManager.
*/
CompletableFuture<RegistrationResponse> registerTaskExecutor(
TaskExecutorRegistration taskExecutorRegistration, @RpcTimeout Duration timeout);
/**
* Sends the given {@link SlotReport} to the ResourceManager.
*
* @param taskManagerResourceId The resource ID of the sending TaskManager
* @param taskManagerRegistrationId id identifying the sending TaskManager
* @param slotReport which is sent to the ResourceManager
* @param timeout for the operation
* @return Future which is completed with {@link Acknowledge} once the slot report has been
* received.
*/
CompletableFuture<Acknowledge> sendSlotReport(
ResourceID taskManagerResourceId,
InstanceID taskManagerRegistrationId,
SlotReport slotReport,
@RpcTimeout Duration timeout);
/**
* Sent by the TaskExecutor to notify the ResourceManager that a slot has become available.
*
* @param instanceId TaskExecutor's instance id
* @param slotID The SlotID of the freed slot
* @param oldAllocationId to which the slot has been allocated
*/
void notifySlotAvailable(InstanceID instanceId, SlotID slotID, AllocationID oldAllocationId);
/**
* Deregister Flink from the underlying resource management system.
*
* @param finalStatus final status with which to deregister the Flink application
* @param diagnostics additional information for the resource management system, can be {@code
* null}
*/
CompletableFuture<Acknowledge> deregisterApplication(
final ApplicationStatus finalStatus, @Nullable final String diagnostics);
/**
* Gets the currently registered number of TaskManagers.
*
* @return The future to the number of registered TaskManagers.
*/
CompletableFuture<Integer> getNumberOfRegisteredTaskManagers();
/**
* Sends the heartbeat to resource manager from task manager.
*
* @param heartbeatOrigin unique id of the task manager
* @param heartbeatPayload payload from the originating TaskManager
* @return future which is completed exceptionally if the operation fails
*/
CompletableFuture<Void> heartbeatFromTaskManager(
final ResourceID heartbeatOrigin, final TaskExecutorHeartbeatPayload heartbeatPayload);
/**
* Sends the heartbeat to resource manager from job manager.
*
* @param heartbeatOrigin unique id of the job manager
* @return future which is completed exceptionally if the operation fails
*/
CompletableFuture<Void> heartbeatFromJobManager(final ResourceID heartbeatOrigin);
/**
* Disconnects a TaskManager specified by the given resourceID from the {@link ResourceManager}.
*
* @param resourceID identifying the TaskManager to disconnect
* @param cause for the disconnection of the TaskManager
*/
void disconnectTaskManager(ResourceID resourceID, Exception cause);
/**
* Disconnects a JobManager specified by the given resourceID from the {@link ResourceManager}.
*
* @param jobId JobID for which the JobManager was the leader
* @param jobStatus status of the job at the time of disconnection
* @param cause for the disconnection of the JobManager
*/
void disconnectJobManager(JobID jobId, JobStatus jobStatus, Exception cause);
/**
* Requests information about the registered {@link TaskExecutor}.
*
* @param timeout of the request
* @return Future collection of TaskManager information
*/
CompletableFuture<Collection<TaskManagerInfo>> requestTaskManagerInfo(
@RpcTimeout Duration timeout);
/**
* Requests detail information about the given {@link TaskExecutor}.
*
* @param taskManagerId identifying the TaskExecutor for which to return information
* @param timeout of the request
* @return Future TaskManager information and its allocated slots
*/
CompletableFuture<TaskManagerInfoWithSlots> requestTaskManagerDetailsInfo(
ResourceID taskManagerId, @RpcTimeout Duration timeout);
/**
* Requests the resource overview. The resource overview provides information about the
* connected TaskManagers, the total number of slots and the number of available slots.
*
* @param timeout of the request
* @return Future containing the resource overview
*/
CompletableFuture<ResourceOverview> requestResourceOverview(@RpcTimeout Duration timeout);
/**
* Requests the paths for the TaskManager's {@link MetricQueryService} to query.
*
* @param timeout for the asynchronous operation
* @return Future containing the collection of resource ids and the corresponding metric query
* service path
*/
CompletableFuture<Collection<Tuple2<ResourceID, String>>>
requestTaskManagerMetricQueryServiceAddresses(@RpcTimeout Duration timeout);
/**
* Request the file upload from the given {@link TaskExecutor} to the cluster's {@link
* BlobServer}. The corresponding {@link TransientBlobKey} is returned.
*
* @param taskManagerId identifying the {@link TaskExecutor} to upload the specified file
* @param fileType type of the file to upload
* @param timeout for the asynchronous operation
* @return Future which is completed with the {@link TransientBlobKey} after uploading the file
* to the {@link BlobServer}.
*/
CompletableFuture<TransientBlobKey> requestTaskManagerFileUploadByType(
ResourceID taskManagerId, FileType fileType, @RpcTimeout Duration timeout);
/**
* Request the file upload from the given {@link TaskExecutor} to the cluster's {@link
* BlobServer}. The corresponding {@link TransientBlobKey} is returned.
*
* @param taskManagerId identifying the {@link TaskExecutor} to upload the specified file
* @param fileName name of the file to upload
* @param fileType type of the file to upload
* @param timeout for the asynchronous operation
* @return Future which is completed with the {@link TransientBlobKey} after uploading the file
* to the {@link BlobServer}.
*/
CompletableFuture<TransientBlobKey> requestTaskManagerFileUploadByNameAndType(
ResourceID taskManagerId,
String fileName,
FileType fileType,
@RpcTimeout Duration timeout);
/**
* Request log list from the given {@link TaskExecutor}.
*
* @param taskManagerId identifying the {@link TaskExecutor} to get log list from
* @param timeout for the asynchronous operation
* @return Future which is completed with the historical log list
*/
CompletableFuture<Collection<LogInfo>> requestTaskManagerLogList(
ResourceID taskManagerId, @RpcTimeout Duration timeout);
/**
* Requests the thread dump from the given {@link TaskExecutor}.
*
* @param taskManagerId taskManagerId identifying the {@link TaskExecutor} to get the thread
* dump from
* @param timeout timeout of the asynchronous operation
* @return Future containing the thread dump information
*/
CompletableFuture<ThreadDumpInfo> requestThreadDump(
ResourceID taskManagerId, @RpcTimeout Duration timeout);
/**
* Requests the {@link TaskExecutorGateway}.
*
* @param taskManagerId identifying the {@link TaskExecutor}.
* @return Future containing the task executor gateway.
*/
CompletableFuture<TaskExecutorThreadInfoGateway> requestTaskExecutorThreadInfoGateway(
ResourceID taskManagerId, @RpcTimeout Duration timeout);
/**
* Request profiling list from the given {@link TaskExecutor}.
*
* @param taskManagerId identifying the {@link TaskExecutor} to get profiling list from
* @param timeout for the asynchronous operation
* @return Future which is completed with the historical profiling list
*/
CompletableFuture<Collection<ProfilingInfo>> requestTaskManagerProfilingList(
ResourceID taskManagerId, @RpcTimeout Duration timeout);
/**
* Requests the profiling instance from the given {@link TaskExecutor}.
*
* @param taskManagerId taskManagerId identifying the {@link TaskExecutor} to get the profiling
* from
* @param duration profiling duration
* @param mode profiling mode {@link ProfilingMode}
* @param timeout timeout of the asynchronous operation
* @return Future containing the created profiling information
*/
CompletableFuture<ProfilingInfo> requestProfiling(
ResourceID taskManagerId,
int duration,
ProfilingInfo.ProfilingMode mode,
@RpcTimeout Duration timeout);
}
|
ResourceManagerGateway
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/web/client/HttpClientErrorException.java
|
{
"start": 10370,
"end": 10953
}
|
class ____ extends HttpClientErrorException {
private Gone(String statusText, HttpHeaders headers, byte @Nullable [] body, @Nullable Charset charset) {
super(HttpStatus.GONE, statusText, headers, body, charset);
}
private Gone(String message, String statusText, HttpHeaders headers, byte @Nullable [] body, @Nullable Charset charset) {
super(message, HttpStatus.GONE, statusText, headers, body, charset);
}
}
/**
* {@link HttpClientErrorException} for status HTTP 415 Unsupported Media Type.
* @since 5.1
*/
@SuppressWarnings("serial")
public static final
|
Gone
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/support/GenericXmlContextLoaderResourceLocationsTests.java
|
{
"start": 4674,
"end": 4755
}
|
class ____ {
}
@ContextConfiguration
|
ClasspathNonExistentDefaultLocationsTestCase
|
java
|
spring-projects__spring-boot
|
module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/endpoint/invoke/convert/IsoOffsetDateTimeConverterTests.java
|
{
"start": 981,
"end": 1555
}
|
class ____ {
@Test
void convertShouldConvertIsoDate() {
IsoOffsetDateTimeConverter converter = new IsoOffsetDateTimeConverter();
OffsetDateTime time = converter.convert("2011-12-03T10:15:30+01:00");
assertThat(time).isNotNull();
}
@Test
void registerConverterShouldRegister() {
DefaultConversionService service = new DefaultConversionService();
IsoOffsetDateTimeConverter.registerConverter(service);
OffsetDateTime time = service.convert("2011-12-03T10:15:30+01:00", OffsetDateTime.class);
assertThat(time).isNotNull();
}
}
|
IsoOffsetDateTimeConverterTests
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.