language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | netty__netty | codec-http2/src/test/java/io/netty/handler/codec/http2/DecoratingHttp2ConnectionEncoderTest.java | {
"start": 987,
"end": 2134
} | class ____ {
@Test
public void testConsumeReceivedSettingsThrows() {
Http2ConnectionEncoder encoder = mock(Http2ConnectionEncoder.class);
final DecoratingHttp2ConnectionEncoder decoratingHttp2ConnectionEncoder =
new DecoratingHttp2ConnectionEncoder(encoder);
assertThrows(IllegalStateException.class, new Executable() {
@Override
public void execute() {
decoratingHttp2ConnectionEncoder.consumeReceivedSettings(Http2Settings.defaultSettings());
}
});
}
@Test
public void testConsumeReceivedSettingsDelegate() {
TestHttp2ConnectionEncoder encoder = mock(TestHttp2ConnectionEncoder.class);
DecoratingHttp2ConnectionEncoder decoratingHttp2ConnectionEncoder =
new DecoratingHttp2ConnectionEncoder(encoder);
Http2Settings settings = Http2Settings.defaultSettings();
decoratingHttp2ConnectionEncoder.consumeReceivedSettings(Http2Settings.defaultSettings());
verify(encoder, times(1)).consumeReceivedSettings(eq(settings));
}
private | DecoratingHttp2ConnectionEncoderTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java | {
"start": 1757,
"end": 1949
} | enum ____ to fit the super constructor that expects a BinaryOperation which is
* used just for its symbol.
* The rest of the methods should not be triggered hence the UOE.
*/
| is |
java | google__dagger | javatests/dagger/functional/producers/builder/TestComponentWithBuilder.java | {
"start": 1023,
"end": 1187
} | interface ____ {
Builder depComponent(DepComponent depComponent);
Builder strModule(StringModule strModule);
TestComponentWithBuilder build();
}
}
| Builder |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/float_/FloatAssert_isNotEqualTo_float_primitive_Test.java | {
"start": 1467,
"end": 2942
} | class ____ extends FloatAssertBaseTest {
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
protected FloatAssert invoke_api_method() {
// trick to simulate a custom comparator
given(floats.getComparator()).willReturn((Comparator) ALWAY_EQUAL_FLOAT);
return assertions.isNotEqualTo(8f);
}
@Override
protected void verify_internal_effects() {
verify(floats).getComparator();
verify(floats).assertNotEqual(getInfo(assertions), getActual(assertions), 8f);
verifyNoMoreInteractions(floats);
}
@ParameterizedTest
@CsvSource({ "1.0f, -1.0f", "NaN, NaN" })
void should_pass_using_primitive_comparison(float actual, float expected) {
assertThat(actual).isNotEqualTo(expected);
}
@Test
void should_honor_user_specified_comparator() {
// GIVEN
final float one = 1.0f;
// THEN
assertThat(one).usingComparator(ALWAY_DIFFERENT)
.isNotEqualTo(one);
}
@Test
void should_fail_if_floats_are_equal() {
// GIVEN
float actual = 0.0f;
float expected = -0.0f;
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).isNotEqualTo(expected));
// THEN
then(assertionError).hasMessage(format("%nExpecting actual:%n" +
" 0.0f%n" +
"not to be equal to:%n" +
" -0.0f%n"));
}
}
| FloatAssert_isNotEqualTo_float_primitive_Test |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStOperationsUtilsTest.java | {
"start": 1457,
"end": 3208
} | class ____ {
@ClassRule public static final TemporaryFolder TMP_DIR = new TemporaryFolder();
@BeforeClass
public static void loadRocksLibrary() throws Exception {
NativeLibraryLoader.getInstance().loadLibrary(TMP_DIR.newFolder().getAbsolutePath());
}
@Test
public void testPathExceptionOnWindows() throws Exception {
assumeTrue(OperatingSystem.isWindows());
final File folder = TMP_DIR.newFolder();
final File rocksDir =
new File(folder, getLongString(247 - folder.getAbsolutePath().length()));
Files.createDirectories(rocksDir.toPath());
try (DBOptions dbOptions = new DBOptions().setCreateIfMissing(true);
ColumnFamilyOptions colOptions = new ColumnFamilyOptions()) {
RocksDB rocks =
ForStOperationUtils.openDB(
rocksDir.getAbsolutePath(),
Collections.emptyList(),
Collections.emptyList(),
colOptions,
dbOptions);
rocks.close();
// do not provoke a test failure if this passes, because some setups may actually
// support long paths, in which case: great!
} catch (IOException e) {
assertThat(
e.getMessage(),
containsString("longer than the directory path length limit for Windows"));
}
}
private static String getLongString(int numChars) {
final StringBuilder builder = new StringBuilder();
for (int i = numChars; i > 0; --i) {
builder.append('a');
}
return builder.toString();
}
}
| ForStOperationsUtilsTest |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/config/properties/EngineImpl.java | {
"start": 832,
"end": 1428
} | class ____ implements Engine {
private final EngineConfig config;
public EngineImpl(EngineConfig config) { // <1>
this.config = config;
}
@Override
public int getCylinders() {
return config.getCylinders();
}
@Override
public String start() {// <2>
return getConfig().getManufacturer() + " Engine Starting V" + getConfig().getCylinders() +
" [rodLength=" + getConfig().getCrankShaft().getRodLength().orElse(6d) + "]";
}
public final EngineConfig getConfig() {
return config;
}
}
// end::class[]
| EngineImpl |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/suite/engine/testcases/MultipleTestsTestCase.java | {
"start": 432,
"end": 517
} | class ____ {
@Test
void test() {
}
@Test
void test2() {
}
}
| MultipleTestsTestCase |
java | google__guice | extensions/servlet/test/com/google/inject/servlet/ServletDispatchIntegrationTest.java | {
"start": 1632,
"end": 4939
} | class ____ extends TestCase {
private static int inits, services, destroys, doFilters;
@Override
public void setUp() {
inits = 0;
services = 0;
destroys = 0;
doFilters = 0;
GuiceFilter.reset();
}
public final void testDispatchRequestToManagedPipelineServlets()
throws ServletException, IOException {
final Injector injector =
Guice.createInjector(
new ServletModule() {
@Override
protected void configureServlets() {
serve("/*").with(TestServlet.class);
// These servets should never fire... (ordering test)
serve("*.html").with(NeverServlet.class);
serve("/test/*").with(Key.get(NeverServlet.class));
serve("/index/*").with(Key.get(NeverServlet.class));
serve("*.jsp").with(Key.get(NeverServlet.class));
}
});
final FilterPipeline pipeline = injector.getInstance(FilterPipeline.class);
pipeline.initPipeline(null);
// create ourselves a mock request with test URI
HttpServletRequest requestMock = mock(HttpServletRequest.class);
when(requestMock.getRequestURI()).thenReturn("/index.html");
when(requestMock.getContextPath()).thenReturn("");
// dispatch request
pipeline.dispatch(requestMock, null, mock(FilterChain.class));
pipeline.destroyPipeline();
assertTrue(
"lifecycle states did not fire correct number of times-- inits: "
+ inits
+ "; dos: "
+ services
+ "; destroys: "
+ destroys,
inits == 2 && services == 1 && destroys == 2);
}
public final void testDispatchRequestToManagedPipelineWithFilter()
throws ServletException, IOException {
final Injector injector =
Guice.createInjector(
new ServletModule() {
@Override
protected void configureServlets() {
filter("/*").through(TestFilter.class);
serve("/*").with(TestServlet.class);
// These servets should never fire...
serve("*.html").with(NeverServlet.class);
serve("/test/*").with(Key.get(NeverServlet.class));
serve("/index/*").with(Key.get(NeverServlet.class));
serve("*.jsp").with(Key.get(NeverServlet.class));
}
});
final FilterPipeline pipeline = injector.getInstance(FilterPipeline.class);
pipeline.initPipeline(null);
// create ourselves a mock request with test URI
HttpServletRequest requestMock = mock(HttpServletRequest.class);
when(requestMock.getRequestURI()).thenReturn("/index.html");
when(requestMock.getContextPath()).thenReturn("");
// dispatch request
pipeline.dispatch(requestMock, null, mock(FilterChain.class));
pipeline.destroyPipeline();
assertTrue(
"lifecycle states did not fire correct number of times-- inits: "
+ inits
+ "; dos: "
+ services
+ "; destroys: "
+ destroys
+ "; doFilters: "
+ doFilters,
inits == 3 && services == 1 && destroys == 3 && doFilters == 1);
}
@Singleton
public static | ServletDispatchIntegrationTest |
java | apache__camel | components/camel-test/camel-test-junit5/src/test/java/org/apache/camel/test/junit5/patterns/AsyncSendMockTest.java | {
"start": 1161,
"end": 2139
} | class ____ extends CamelTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(AsyncSendMockTest.class);
@Override
public String isMockEndpoints() {
return "seda*";
}
@Test
public void testMakeAsyncApiCall() {
try {
getMockEndpoint("mock:seda:start").expectedHeaderReceived("username", "admin123");
getMockEndpoint("mock:seda:start").expectedBodiesReceived("Hello");
DefaultExchange dfex = new DefaultExchange(context);
dfex.getIn().setHeader("username", "admin123");
dfex.getIn().setHeader("password", "admin");
dfex.getIn().setBody("Hello");
template.asyncSend("seda:start", dfex);
MockEndpoint.assertIsSatisfied(context);
} catch (Exception e) {
LOG.warn("Failed to make async call to api: {}", e.getMessage(), e);
fail("Failed to make async call to api");
}
}
}
| AsyncSendMockTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/translators/BatchExecutionUtils.java | {
"start": 1786,
"end": 4670
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(BatchExecutionUtils.class);
public static void applyBatchExecutionSettings(
int transformationId,
TransformationTranslator.Context context,
StreamConfig.InputRequirement... inputRequirements) {
StreamNode node = context.getStreamGraph().getStreamNode(transformationId);
boolean sortInputs = context.getGraphGeneratorConfig().get(ExecutionOptions.SORT_INPUTS);
boolean isInputSelectable = isInputSelectable(node);
adjustChainingStrategy(node);
checkState(
!isInputSelectable || !sortInputs,
"Batch state backend and sorting inputs are not supported in graphs with an InputSelectable operator.");
if (sortInputs) {
LOG.debug("Applying sorting/pass-through input requirements for operator {}.", node);
for (int i = 0; i < inputRequirements.length; i++) {
node.addInputRequirement(i, inputRequirements[i]);
}
Map<ManagedMemoryUseCase, Integer> operatorScopeUseCaseWeights = new HashMap<>();
operatorScopeUseCaseWeights.put(
ManagedMemoryUseCase.OPERATOR,
deriveMemoryWeight(context.getGraphGeneratorConfig()));
node.setManagedMemoryUseCaseWeights(
operatorScopeUseCaseWeights, Collections.emptySet());
}
}
private static int deriveMemoryWeight(ReadableConfig configuration) {
return Math.max(1, configuration.get(ExecutionOptions.SORTED_INPUTS_MEMORY).getMebiBytes());
}
@SuppressWarnings("rawtypes")
private static boolean isInputSelectable(StreamNode node) {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
Class<? extends StreamOperator> operatorClass =
Preconditions.checkNotNull(node.getOperatorFactory())
.getStreamOperatorClass(classLoader);
return InputSelectable.class.isAssignableFrom(operatorClass);
}
private static void adjustChainingStrategy(StreamNode node) {
StreamOperatorFactory<?> operatorFactory =
Preconditions.checkNotNull(node.getOperatorFactory());
ChainingStrategy currentChainingStrategy = operatorFactory.getChainingStrategy();
switch (currentChainingStrategy) {
case ALWAYS:
case HEAD_WITH_SOURCES:
LOG.debug(
"Setting chaining strategy to HEAD for operator {}, because of the BATCH execution mode.",
node);
operatorFactory.setChainingStrategy(ChainingStrategy.HEAD);
break;
case NEVER:
case HEAD:
break;
}
}
private BatchExecutionUtils() {}
}
| BatchExecutionUtils |
java | spring-projects__spring-boot | module/spring-boot-kafka/src/test/java/org/springframework/boot/kafka/autoconfigure/KafkaAutoConfigurationTests.java | {
"start": 54579,
"end": 54901
} | class ____ {
@SuppressWarnings("unchecked")
private final ConsumerFactory<String, Object> consumerFactory = mock(ConsumerFactory.class);
@Bean
ConsumerFactory<String, Object> myConsumerFactory() {
return this.consumerFactory;
}
}
@Configuration(proxyBeanMethods = false)
static | ConsumerFactoryConfiguration |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/vector/RedisVectorSetAdvancedIntegrationTests.java | {
"start": 1737,
"end": 21754
} | class ____ {
private static final String POINTS_KEY = "points";
protected static RedisClient client;
protected static RedisCommands<String, String> redis;
public RedisVectorSetAdvancedIntegrationTests() {
RedisURI redisURI = RedisURI.Builder.redis("127.0.0.1").withPort(16379).build();
client = RedisClient.create(redisURI);
StatefulRedisConnection<String, String> connection = client.connect();
redis = connection.sync();
}
@BeforeEach
public void prepare() {
assumeTrue(RedisConditions.of(redis).hasVersionGreaterOrEqualsTo("8.0"));
redis.flushall();
// Add the example points from the Redis documentation
// A: (1.0, 1.0), B: (-1.0, -1.0), C: (-1.0, 1.0), D: (1.0, -1.0), and E: (1.0, 0)
redis.vadd(POINTS_KEY, "pt:A", 1.0, 1.0);
redis.vadd(POINTS_KEY, "pt:B", -1.0, -1.0);
redis.vadd(POINTS_KEY, "pt:C", -1.0, 1.0);
redis.vadd(POINTS_KEY, "pt:D", 1.0, -1.0);
redis.vadd(POINTS_KEY, "pt:E", 1.0, 0.0);
}
@AfterAll
static void teardown() {
if (client != null) {
client.shutdown();
}
}
/**
* Test basic vector set operations as shown in the Redis documentation.
*/
@Test
void testBasicOperations() {
// Check the type of the vector set
String type = redis.type(POINTS_KEY);
assertThat(type).isEqualTo("vectorset");
// Check the cardinality of the vector set
Long count = redis.vcard(POINTS_KEY);
assertThat(count).isEqualTo(5);
// Check the dimensionality of the vectors
Long dim = redis.vdim(POINTS_KEY);
assertThat(dim).isEqualTo(2);
// Retrieve the vectors for each point
List<Double> vectorA = redis.vemb(POINTS_KEY, "pt:A");
assertThat(vectorA).hasSize(2);
assertThat(vectorA.get(0)).isCloseTo(1.0, within(0.001));
assertThat(vectorA.get(1)).isCloseTo(1.0, within(0.001));
List<Double> vectorB = redis.vemb(POINTS_KEY, "pt:B");
assertThat(vectorB).hasSize(2);
assertThat(vectorB.get(0)).isCloseTo(-1.0, within(0.001));
assertThat(vectorB.get(1)).isCloseTo(-1.0, within(0.001));
List<Double> vectorC = redis.vemb(POINTS_KEY, "pt:C");
assertThat(vectorC).hasSize(2);
assertThat(vectorC.get(0)).isCloseTo(-1.0, within(0.001));
assertThat(vectorC.get(1)).isCloseTo(1.0, within(0.001));
List<Double> vectorD = redis.vemb(POINTS_KEY, "pt:D");
assertThat(vectorD).hasSize(2);
assertThat(vectorD.get(0)).isCloseTo(1.0, within(0.001));
assertThat(vectorD.get(1)).isCloseTo(-1.0, within(0.001));
List<Double> vectorE = redis.vemb(POINTS_KEY, "pt:E");
assertThat(vectorE).hasSize(2);
assertThat(vectorE.get(0)).isCloseTo(1.0, within(0.001));
assertThat(vectorE.get(1)).isCloseTo(0.0, within(0.001));
}
/**
* Test attribute operations on vector set elements.
*/
@Test
void testAttributeOperations() {
// Set attributes for point A
String attributes = "{\"name\":\"Point A\",\"description\":\"First point added\"}";
Boolean result = redis.vsetattr(POINTS_KEY, "pt:A", attributes);
assertThat(result).isTrue();
// Get attributes for point A
String retrievedAttributes = redis.vgetattr(POINTS_KEY, "pt:A");
assertThat(retrievedAttributes).contains("\"name\":\"Point A\"");
assertThat(retrievedAttributes).contains("\"description\":\"First point added\"");
// Delete attributes by setting an empty string
result = redis.vsetattr(POINTS_KEY, "pt:A", "");
assertThat(result).isTrue();
// Verify attributes are deleted
retrievedAttributes = redis.vgetattr(POINTS_KEY, "pt:A");
assertThat(retrievedAttributes).isNull();
}
/**
* Test adding and removing elements from a vector set.
*/
@Test
void testAddAndRemoveElements() {
// Add a new point F at (0, 0)
Boolean result = redis.vadd(POINTS_KEY, "pt:F", 0.0, 0.0);
assertThat(result).isTrue();
// Check the updated cardinality
Long count = redis.vcard(POINTS_KEY);
assertThat(count).isEqualTo(6);
// Remove point F
result = redis.vrem(POINTS_KEY, "pt:F");
assertThat(result).isTrue();
// Check the cardinality after removal
count = redis.vcard(POINTS_KEY);
assertThat(count).isEqualTo(5);
}
/**
* Test vector similarity search as shown in the Redis documentation.
*/
@Test
void testVectorSimilaritySearch() {
// Search for vectors similar to (0.9, 0.1)
List<String> similar = redis.vsim(POINTS_KEY, 0.9, 0.1);
assertThat(similar).isNotEmpty();
// The expected order based on similarity to (0.9, 0.1) should be:
// E (1.0, 0.0), A (1.0, 1.0), D (1.0, -1.0), C (-1.0, 1.0), B (-1.0, -1.0)
assertThat(similar.get(0)).isEqualTo("pt:E");
assertThat(similar.get(1)).isEqualTo("pt:A");
assertThat(similar.get(2)).isEqualTo("pt:D");
assertThat(similar.get(3)).isEqualTo("pt:C");
assertThat(similar.get(4)).isEqualTo("pt:B");
// Search for vectors similar to point A with scores
Map<String, Double> similarWithScores = redis.vsimWithScore(POINTS_KEY, "pt:A");
assertThat(similarWithScores).isNotEmpty();
// Point A should have a perfect similarity score of 1.0 with itself
assertThat(similarWithScores.get("pt:A")).isCloseTo(1.0, within(0.001));
// Limit the number of results to 4
VSimArgs args = new VSimArgs();
args.count(4L);
similar = redis.vsim(POINTS_KEY, args, "pt:A");
assertThat(similar).hasSize(4);
}
/**
* Test filtered vector similarity search as shown in the Redis documentation.
*/
@Test
void testFilteredVectorSimilaritySearch() {
// Set attributes for all points
redis.vsetattr(POINTS_KEY, "pt:A", "{\"size\":\"large\",\"price\":18.99}");
redis.vsetattr(POINTS_KEY, "pt:B", "{\"size\":\"large\",\"price\":35.99}");
redis.vsetattr(POINTS_KEY, "pt:C", "{\"size\":\"large\",\"price\":25.99}");
redis.vsetattr(POINTS_KEY, "pt:D", "{\"size\":\"small\",\"price\":21.00}");
redis.vsetattr(POINTS_KEY, "pt:E", "{\"size\":\"small\",\"price\":17.75}");
// Filter by size = "large"
VSimArgs args = new VSimArgs();
args.filter(".size == \"large\"");
List<String> similar = redis.vsim(POINTS_KEY, args, "pt:A");
assertThat(similar).hasSize(3);
assertThat(similar).containsExactly("pt:A", "pt:C", "pt:B");
// Filter by size = "large" AND price > 20.00
args = new VSimArgs();
args.filter(".size == \"large\" && .price > 20.00");
similar = redis.vsim(POINTS_KEY, args, "pt:A");
assertThat(similar).hasSize(2);
assertThat(similar).containsExactly("pt:C", "pt:B");
}
/**
* Test advanced filter expressions for vector similarity search.
*
* @see <a href="https://redis.io/docs/latest/develop/data-types/vector-sets/filtered-search/">Redis Vector Sets Filter
* Expressions</a>
*/
@Test
void testAdvancedFilterExpressions() {
// Create a vector set with movie data
String moviesKey = "movies";
// Add movie vectors with attributes
VAddArgs args = new VAddArgs();
args.attributes(
"{\"title\":\"The Godfather\",\"year\":1972,\"rating\":9.2,\"genre\":\"drama\",\"director\":\"Coppola\"}");
redis.vadd(moviesKey, "movie1", args, 0.1, 0.2, 0.3);
args = new VAddArgs();
args.attributes("{\"title\":\"Star Wars\",\"year\":1977,\"rating\":8.6,\"genre\":\"sci-fi\",\"director\":\"Lucas\"}");
redis.vadd(moviesKey, "movie2", args, 0.2, 0.3, 0.4);
args = new VAddArgs();
args.attributes(
"{\"title\":\"Jurassic Park\",\"year\":1993,\"rating\":8.1,\"genre\":\"adventure\",\"director\":\"Spielberg\"}");
redis.vadd(moviesKey, "movie3", args, 0.3, 0.4, 0.5);
args = new VAddArgs();
args.attributes(
"{\"title\":\"The Dark Knight\",\"year\":2008,\"rating\":9.0,\"genre\":\"action\",\"director\":\"Nolan\"}");
redis.vadd(moviesKey, "movie4", args, 0.4, 0.5, 0.6);
args = new VAddArgs();
args.attributes("{\"title\":\"Inception\",\"year\":2010,\"rating\":8.8,\"genre\":\"sci-fi\",\"director\":\"Nolan\"}");
redis.vadd(moviesKey, "movie5", args, 0.5, 0.6, 0.7);
args = new VAddArgs();
args.attributes(
"{\"title\":\"Interstellar\",\"year\":2014,\"rating\":8.6,\"genre\":\"sci-fi\",\"director\":\"Nolan\"}");
redis.vadd(moviesKey, "movie6", args, 0.6, 0.7, 0.8);
args = new VAddArgs();
args.attributes("{\"title\":\"E.T.\",\"year\":1982,\"rating\":7.8,\"genre\":\"sci-fi\",\"director\":\"Spielberg\"}");
redis.vadd(moviesKey, "movie7", args, 0.7, 0.8, 0.9);
// Test filter by year range
VSimArgs vSimArgs = new VSimArgs();
vSimArgs.filter(".year >= 1980 and .year < 1990");
List<String> similar = redis.vsim(moviesKey, vSimArgs, 0.5, 0.6, 0.7);
assertThat(similar).hasSize(1);
assertThat(similar.get(0)).isEqualTo("movie7"); // E.T. (1982)
// Test filter by genre and rating
vSimArgs = new VSimArgs();
vSimArgs.filter(".genre == \"sci-fi\" and .rating > 8.5");
similar = redis.vsim(moviesKey, vSimArgs, 0.5, 0.6, 0.7);
assertThat(similar).hasSize(3);
// Should include Star Wars, Inception, and Interstellar
assertThat(similar).contains("movie2", "movie5", "movie6");
// Test IN operator with array
vSimArgs = new VSimArgs();
vSimArgs.filter(".director in [\"Spielberg\", \"Nolan\"]");
similar = redis.vsim(moviesKey, vSimArgs, 0.5, 0.6, 0.7);
assertThat(similar).hasSize(5);
// Should include Jurassic Park, The Dark Knight, Inception, Interstellar, and E.T.
assertThat(similar).contains("movie3", "movie4", "movie5", "movie6", "movie7");
// Test mathematical expressions
vSimArgs = new VSimArgs();
vSimArgs.filter("(.year - 2000) ** 2 < 100 and .rating / 2 > 4");
similar = redis.vsim(moviesKey, vSimArgs, 0.5, 0.6, 0.7);
assertThat(similar).hasSize(2);
// Should include The Dark Knight (2008) and Inception (2010)
assertThat(similar).contains("movie4", "movie3");
}
/**
* Test the FILTER-EF option for controlling how many candidate nodes are inspected.
*
* @see <a href="https://redis.io/docs/latest/develop/data-types/vector-sets/filtered-search/">Redis Vector Sets Filter
* Expressions</a>
*/
@Test
void testFilterEfOption() {
// Create a vector set with 100 elements
String largeSetKey = "large_set";
// Add 100 vectors with different attributes
for (int i = 0; i < 100; i++) {
VAddArgs args = new VAddArgs();
// Only 10% of elements have category = "rare"
String category = (i % 10 == 0) ? "rare" : "common";
// Prices range from 10 to 109
double price = 10 + i;
// Years range from 1950 to 2049
int year = 1950 + i;
args.attributes(
String.format("{\"id\":%d,\"category\":\"%s\",\"price\":%.2f,\"year\":%d}", i, category, price, year));
// Create vectors with slightly different values
double x = 0.1 + (i * 0.01);
double y = 0.2 + (i * 0.01);
double z = 0.3 + (i * 0.01);
redis.vadd(largeSetKey, "item" + i, args, x, y, z);
}
// Test with default FILTER-EF (COUNT * 100)
VSimArgs args = new VSimArgs();
args.count(3L);
args.filter(".category == \"rare\"");
List<String> similar = redis.vsim(largeSetKey, args, 0.5, 0.6, 0.7);
assertThat(similar).hasSize(3);
// Test with explicit FILTER-EF
args = new VSimArgs();
args.count(5L);
args.filter(".category == \"rare\"");
args.filterEfficiency(500L);
similar = redis.vsim(largeSetKey, args, 0.5, 0.6, 0.7);
assertThat(similar).hasSize(5);
// Test with FILTER-EF = 0 (scan as many as needed)
args = new VSimArgs();
args.count(10L);
args.filter(".category == \"rare\" and .year > 2000");
// args.filterEfficiency(0L);
similar = redis.vsim(largeSetKey, args, 0.5, 0.6, 0.7);
// Should find all rare items with year > 2000
assertThat(similar).isNotEmpty();
// Verify all results match the filter
for (String item : similar) {
String attributes = redis.vgetattr(largeSetKey, item);
assertThat(attributes).contains("\"category\":\"rare\"");
// Extract the year from the attributes
int startIndex = attributes.indexOf("\"year\":") + 7;
int endIndex = attributes.indexOf(",", startIndex);
if (endIndex == -1) {
endIndex = attributes.indexOf("}", startIndex);
}
int year = Integer.parseInt(attributes.substring(startIndex, endIndex));
assertThat(year).isGreaterThan(2000);
}
}
/**
* Test handling of missing attributes and edge cases in filter expressions.
*
* @see <a href="https://redis.io/docs/latest/develop/data-types/vector-sets/filtered-search/">Redis Vector Sets Filter
* Expressions</a>
*/
@Test
void testMissingAttributesAndEdgeCases() {
// Create a vector set with elements having different attributes
String edgeCaseKey = "edge_cases";
// Element with complete attributes
VAddArgs args = new VAddArgs();
args.attributes("{\"category\":\"electronics\",\"price\":100.0,\"inStock\":true,\"tags\":[\"laptop\",\"computer\"]}");
redis.vadd(edgeCaseKey, "complete", args, 0.1, 0.2, 0.3);
// Element with missing price attribute
args = new VAddArgs();
args.attributes("{\"category\":\"electronics\",\"inStock\":false,\"tags\":[\"phone\"]}");
redis.vadd(edgeCaseKey, "missing_price", args, 0.2, 0.3, 0.4);
// Element with missing category attribute
args = new VAddArgs();
args.attributes("{\"price\":50.0,\"inStock\":true}");
redis.vadd(edgeCaseKey, "missing_category", args, 0.3, 0.4, 0.5);
// Element with boolean attributes
args = new VAddArgs();
args.attributes("{\"category\":\"clothing\",\"price\":25.0,\"inStock\":true,\"onSale\":true}");
redis.vadd(edgeCaseKey, "boolean_attrs", args, 0.4, 0.5, 0.6);
// Element with no attributes
redis.vadd(edgeCaseKey, "no_attrs", 0.5, 0.6, 0.7);
// Element with empty attributes
args = new VAddArgs();
args.attributes("{}");
redis.vadd(edgeCaseKey, "empty_attrs", args, 0.6, 0.7, 0.8);
// Test filtering on missing attributes
VSimArgs vSimArgs = new VSimArgs();
vSimArgs.filter(".price > 50");
List<String> similar = redis.vsim(edgeCaseKey, vSimArgs, 0.3, 0.4, 0.5);
// Should only include "complete" as it's the only one with price > 50
assertThat(similar).hasSize(1);
assertThat(similar.get(0)).isEqualTo("complete");
// Test filtering with boolean attributes
vSimArgs = new VSimArgs();
vSimArgs.filter(".inStock");
similar = redis.vsim(edgeCaseKey, vSimArgs, 0.3, 0.4, 0.5);
// Should include "complete", "missing_category", and "boolean_attrs"
assertThat(similar).hasSize(3);
assertThat(similar).contains("complete", "missing_category", "boolean_attrs");
// Test filtering with the IN operator and arrays
vSimArgs = new VSimArgs();
vSimArgs.filter("\"laptop\" in .tags");
similar = redis.vsim(edgeCaseKey, vSimArgs, 0.3, 0.4, 0.5);
// Should only include "complete"
assertThat(similar).hasSize(1);
assertThat(similar.get(0)).isEqualTo("complete");
}
/**
* Test different quantization types as shown in the Redis documentation.
*/
@Test
void testQuantizationTypes() {
// Test Q8 quantization
VAddArgs q8Args = new VAddArgs();
q8Args.quantizationType(QuantizationType.Q8);
redis.vadd("quantSetQ8", "quantElement", q8Args, 1.262185, 1.958231);
List<Double> q8Vector = redis.vemb("quantSetQ8", "quantElement");
assertThat(q8Vector).hasSize(2);
// Values will be slightly different due to quantization
// Test NOQUANT (no quantization)
VAddArgs noQuantArgs = new VAddArgs();
noQuantArgs.quantizationType(QuantizationType.NO_QUANTIZATION);
redis.vadd("quantSetNoQ", "quantElement", noQuantArgs, 1.262185, 1.958231);
List<Double> noQuantVector = redis.vemb("quantSetNoQ", "quantElement");
assertThat(noQuantVector).hasSize(2);
assertThat(noQuantVector.get(0)).isCloseTo(1.262185, within(0.0001));
assertThat(noQuantVector.get(1)).isCloseTo(1.958231, within(0.0001));
// Test BIN (binary) quantization
VAddArgs binArgs = new VAddArgs();
binArgs.quantizationType(QuantizationType.BINARY);
redis.vadd("quantSetBin", "quantElement", binArgs, 1.262185, 1.958231);
List<Double> binVector = redis.vemb("quantSetBin", "quantElement");
assertThat(binVector).hasSize(2);
// Binary quantization will convert values to either 1 or -1
assertThat(binVector.get(0)).isIn(1.0, -1.0);
assertThat(binVector.get(1)).isIn(1.0, -1.0);
}
/**
* Test dimensionality reduction as shown in the Redis documentation.
*/
@Test
void testDimensionalityReduction() {
// Create a vector with 300 dimensions
Double[] values = new Double[300];
for (int i = 0; i < 300; i++) {
values[i] = (double) i / 299;
}
// Add the vector without dimensionality reduction
redis.vadd("setNotReduced", "element", values);
Long dim = redis.vdim("setNotReduced");
assertThat(dim).isEqualTo(300);
redis.vadd("setReduced", 100, "element", values);
dim = redis.vdim("setReduced");
assertThat(dim).isEqualTo(100);
}
/**
* Test vector set metadata information.
*/
@Test
void testVectorSetMetadata() {
VectorMetadata info = redis.vinfo(POINTS_KEY);
assertThat(info).isNotNull();
assertThat(info.getDimensionality()).isEqualTo(2);
assertThat(info.getSize()).isEqualTo(5);
assertThat(info.getType()).isNotNull();
}
/**
* Test random sampling from a vector set.
*/
@Test
void testRandomSampling() {
// Get a single random element
String randomElement = redis.vrandmember(POINTS_KEY);
assertThat(randomElement).isNotNull();
assertThat(randomElement).startsWith("pt:");
// Get multiple random elements
List<String> randomElements = redis.vrandmember(POINTS_KEY, 3);
assertThat(randomElements).hasSize(3);
for (String element : randomElements) {
assertThat(element).startsWith("pt:");
}
}
/**
* Test HNSW graph links.
*/
@Test
void testHnswGraphLinks() {
// Get links for point A
List<String> links = redis.vlinks(POINTS_KEY, "pt:A");
assertThat(links).isNotEmpty();
// Get links with scores
Map<String, Double> linksWithScores = redis.vlinksWithScores(POINTS_KEY, "pt:A");
assertThat(linksWithScores).isNotEmpty();
}
}
| RedisVectorSetAdvancedIntegrationTests |
java | apache__flink | flink-table/flink-table-api-java-bridge/src/test/java/org/apache/flink/table/factories/BlackHoleSinkFactoryTest.java | {
"start": 1647,
"end": 3077
} | class ____ {
private static final ResolvedSchema SCHEMA =
ResolvedSchema.of(
Column.physical("f0", DataTypes.STRING()),
Column.physical("f1", DataTypes.BIGINT()),
Column.physical("f2", DataTypes.BIGINT()));
@Test
void testBlackHole() {
Map<String, String> properties = new HashMap<>();
properties.put("connector", "blackhole");
List<String> partitionKeys = Arrays.asList("f0", "f1");
DynamicTableSink sink = createTableSink(SCHEMA, partitionKeys, properties);
assertThat(sink.asSummaryString()).isEqualTo("BlackHole");
assertThat(sink).isInstanceOf(SupportsPartitioning.class);
}
@Test
void testWrongKey() {
try {
Map<String, String> properties = new HashMap<>();
properties.put("connector", "blackhole");
properties.put("unknown-key", "1");
createTableSink(SCHEMA, properties);
} catch (ValidationException e) {
Throwable cause = e.getCause();
assertThat(cause).as(cause.toString()).isInstanceOf(ValidationException.class);
assertThat(cause.getMessage())
.as(cause.getMessage())
.contains("Unsupported options:\n\nunknown-key");
return;
}
fail("Should fail by ValidationException.");
}
}
| BlackHoleSinkFactoryTest |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RShardedTopic.java | {
"start": 794,
"end": 857
} | interface ____ extends RTopic, RShardedTopicAsync {
}
| RShardedTopic |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest69.java | {
"start": 974,
"end": 2386
} | class ____ extends MysqlTest {
@Test
public void test_one() throws Exception {
String sql = "CREATE TABLE t1 ("
+ " s1 INT,"
+ " s2 INT AS (EXP(s1)) STORED"
+ ")"
+ "PARTITION BY LIST (s2) ("
+ " PARTITION p1 VALUES IN (1)"
+ ");";
MySqlStatementParser parser = new MySqlStatementParser(sql);
SQLStatement stmt = parser.parseCreateTable();
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
{
String output = SQLUtils.toMySqlString(stmt);
assertEquals("CREATE TABLE t1 (\n" +
"\ts1 INT,\n" +
"\ts2 INT AS (EXP(s1)) STORED\n" +
")\n" +
"PARTITION BY LIST (s2) (\n" +
"\tPARTITION p1 VALUES IN (1)\n" +
")", output);
}
{
String output = SQLUtils.toMySqlString(stmt, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION);
assertEquals("create table t1 (\n" +
"\ts1 INT,\n" +
"\ts2 INT as (EXP(s1)) stored\n" +
")\n" +
"partition by list (s2) (\n" +
"\tpartition p1 values in (1)\n" +
")", output);
}
}
}
| MySqlCreateTableTest69 |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/collector/TableFunctionResultFuture.java | {
"start": 1172,
"end": 2576
} | class ____<T> extends AbstractRichFunction
implements ResultFuture<T> {
private static final long serialVersionUID = 1L;
private Object input;
private ResultFuture<?> resultFuture;
/**
* Sets the input row from left table, which will be used to cross join with the result of right
* table.
*/
public void setInput(Object input) {
this.input = input;
}
/**
* Gets the input value from left table, which will be used to cross join with the result of
* right table.
*/
public Object getInput() {
return input;
}
/** Sets the current collector, which used to emit the final row. */
public void setResultFuture(ResultFuture<?> resultFuture) {
this.resultFuture = resultFuture;
}
/** Gets the internal collector which used to emit the final row. */
public ResultFuture<?> getResultFuture() {
return this.resultFuture;
}
@Override
public void completeExceptionally(Throwable error) {
this.resultFuture.completeExceptionally(error);
}
/**
* Unsupported, because the containing classes are AsyncFunctions which don't have access to the
* mailbox to invoke from the caller thread.
*/
@Override
public void complete(CollectionSupplier<T> supplier) {
throw new UnsupportedOperationException();
}
}
| TableFunctionResultFuture |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringCustomExpressionTest.java | {
"start": 1750,
"end": 2137
} | class ____ extends org.apache.camel.support.ExpressionAdapter {
@Override
public Object evaluate(Exchange exchange) {
String body = exchange.getIn().getBody(String.class);
if (body.contains("Camel")) {
return "Yes Camel rocks";
} else {
return "Hello " + body;
}
}
}
}
| MyExpression |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/bean/override/mockito/MockitoBeanContextCustomizerEqualityTests.java | {
"start": 3501,
"end": 3586
} | class ____ {
@MockitoSpyBean
private String exampleService;
}
static | Case4ByType |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/tofix/ExternalTypeCustomResolver1288Test.java | {
"start": 6922,
"end": 8297
} | class ____ implements Builder {
private String cardHolderFirstName;
private String cardHolderLastName;
private String number;
private int csc;
@Override
public CreditCardDetails build() {
return new CreditCardDetails (cardHolderFirstName, cardHolderLastName, number, csc,
"COMPANY CREDIT CARD");
}
public CompanyCreditCardDetailsBuilder cardHolderFirstName(final String cardHolderFirstName) {
this.cardHolderFirstName = cardHolderFirstName;
return this;
}
public CompanyCreditCardDetailsBuilder cardHolderLastName(final String cardHolderLastName) {
this.cardHolderLastName = cardHolderLastName;
return this;
}
public CompanyCreditCardDetailsBuilder csc(final int csc) {
this.csc = csc;
return this;
}
public CompanyCreditCardDetailsBuilder number(final String number) {
this.number = number;
return this;
}
}
@JsonPOJOBuilder (withPrefix = "")
public static | CompanyCreditCardDetailsBuilder |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/support/InstanceSupplier.java | {
"start": 1442,
"end": 5053
} | interface ____<T> extends ThrowingSupplier<T> {
@Override
default T getWithException() {
throw new IllegalStateException("No RegisteredBean parameter provided");
}
/**
* Get the supplied instance.
* @param registeredBean the registered bean requesting the instance
* @return the supplied instance
* @throws Exception on error
*/
T get(RegisteredBean registeredBean) throws Exception;
/**
* Return the factory method that this supplier uses to create the
* instance, or {@code null} if it is not known or this supplier uses
* another means.
* @return the factory method used to create the instance, or {@code null}
*/
default @Nullable Method getFactoryMethod() {
return null;
}
/**
* Return a composed instance supplier that first obtains the instance from
* this supplier and then applies the {@code after} function to obtain the
* result.
* @param <V> the type of output of the {@code after} function, and of the
* composed function
* @param after the function to apply after the instance is obtained
* @return a composed instance supplier
*/
default <V> InstanceSupplier<V> andThen(
ThrowingBiFunction<RegisteredBean, ? super T, ? extends V> after) {
Assert.notNull(after, "'after' function must not be null");
return new InstanceSupplier<>() {
@Override
public V get(RegisteredBean registeredBean) throws Exception {
return after.applyWithException(registeredBean, InstanceSupplier.this.get(registeredBean));
}
@Override
public @Nullable Method getFactoryMethod() {
return InstanceSupplier.this.getFactoryMethod();
}
};
}
/**
* Factory method to create an {@link InstanceSupplier} from a
* {@link ThrowingSupplier}.
* @param <T> the type of instance supplied by this supplier
* @param supplier the source supplier
* @return a new {@link InstanceSupplier}
*/
static <T> InstanceSupplier<T> using(ThrowingSupplier<T> supplier) {
Assert.notNull(supplier, "Supplier must not be null");
if (supplier instanceof InstanceSupplier<T> instanceSupplier) {
return instanceSupplier;
}
return registeredBean -> supplier.getWithException();
}
/**
* Factory method to create an {@link InstanceSupplier} from a
* {@link ThrowingSupplier}.
* @param <T> the type of instance supplied by this supplier
* @param factoryMethod the factory method being used
* @param supplier the source supplier
* @return a new {@link InstanceSupplier}
*/
static <T> InstanceSupplier<T> using(@Nullable Method factoryMethod, ThrowingSupplier<T> supplier) {
Assert.notNull(supplier, "Supplier must not be null");
if (supplier instanceof InstanceSupplier<T> instanceSupplier &&
instanceSupplier.getFactoryMethod() == factoryMethod) {
return instanceSupplier;
}
return new InstanceSupplier<>() {
@Override
public T get(RegisteredBean registeredBean) throws Exception {
return supplier.getWithException();
}
@Override
public @Nullable Method getFactoryMethod() {
return factoryMethod;
}
};
}
/**
* Lambda friendly method that can be used to create an
* {@link InstanceSupplier} and add post processors in a single call. For
* example: {@code InstanceSupplier.of(registeredBean -> ...).andThen(...)}.
* @param <T> the type of instance supplied by this supplier
* @param instanceSupplier the source instance supplier
* @return a new {@link InstanceSupplier}
*/
static <T> InstanceSupplier<T> of(InstanceSupplier<T> instanceSupplier) {
Assert.notNull(instanceSupplier, "InstanceSupplier must not be null");
return instanceSupplier;
}
}
| InstanceSupplier |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptive/ExecutingTest.java | {
"start": 5937,
"end": 33554
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(ExecutingTest.class);
@RegisterExtension
private static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_EXTENSION =
TestingUtils.defaultExecutorExtension();
@Test
void testExecutionGraphDeploymentOnEnter() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
MockExecutionJobVertex mockExecutionJobVertex =
new MockExecutionJobVertex(MockExecutionVertex::new);
MockExecutionVertex mockExecutionVertex =
(MockExecutionVertex) mockExecutionJobVertex.getMockExecutionVertex();
mockExecutionVertex.setMockedExecutionState(ExecutionState.CREATED);
ExecutionGraph executionGraph =
new MockExecutionGraph(() -> Collections.singletonList(mockExecutionJobVertex));
Executing exec =
new ExecutingStateBuilder().setExecutionGraph(executionGraph).build(ctx);
assertThat(mockExecutionVertex.isDeployCalled()).isTrue();
assertThat(executionGraph.getState()).isEqualTo(JobStatus.RUNNING);
}
}
@Test
void testNoDeploymentCallOnEnterWhenVertexRunning() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
MockExecutionJobVertex mockExecutionJobVertex =
new MockExecutionJobVertex(MockExecutionVertex::new);
ExecutionGraph executionGraph =
new MockExecutionGraph(() -> Collections.singletonList(mockExecutionJobVertex));
executionGraph.transitionToRunning();
final MockExecutionVertex mockExecutionVertex =
(MockExecutionVertex) mockExecutionJobVertex.getMockExecutionVertex();
mockExecutionVertex.setMockedExecutionState(ExecutionState.RUNNING);
new Executing(
executionGraph,
getExecutionGraphHandler(executionGraph, ctx.getMainThreadExecutor()),
new TestingOperatorCoordinatorHandler(),
log,
ctx,
ClassLoader.getSystemClassLoader(),
new ArrayList<>(),
(context) -> TestingStateTransitionManager.withNoOp(),
1);
assertThat(mockExecutionVertex.isDeployCalled()).isFalse();
}
}
@Test
void testIllegalStateExceptionOnNotRunningExecutionGraph() {
assertThatThrownBy(
() -> {
try (MockExecutingContext ctx = new MockExecutingContext()) {
ExecutionGraph notRunningExecutionGraph =
new StateTrackingMockExecutionGraph();
assertThat(notRunningExecutionGraph.getState())
.isNotEqualTo(JobStatus.RUNNING);
new Executing(
notRunningExecutionGraph,
getExecutionGraphHandler(
notRunningExecutionGraph,
ctx.getMainThreadExecutor()),
new TestingOperatorCoordinatorHandler(),
log,
ctx,
ClassLoader.getSystemClassLoader(),
new ArrayList<>(),
context -> TestingStateTransitionManager.withNoOp(),
1);
}
})
.isInstanceOf(IllegalStateException.class);
}
@Test
public void testTriggerRescaleOnCompletedCheckpoint() throws Exception {
final AtomicBoolean rescaleTriggered = new AtomicBoolean();
final Function<StateTransitionManager.Context, StateTransitionManager>
stateTransitionManagerFactory =
context ->
TestingStateTransitionManager.withOnTriggerEventOnly(
() -> rescaleTriggered.set(true));
try (MockExecutingContext ctx = new MockExecutingContext()) {
final Executing testInstance =
new ExecutingStateBuilder()
.setStateTransitionManagerFactory(stateTransitionManagerFactory)
.build(ctx);
assertThat(rescaleTriggered).isFalse();
testInstance.onCompletedCheckpoint();
assertThat(rescaleTriggered).isTrue();
}
}
@Test
public void testTriggerRescaleOnFailedCheckpoint() throws Exception {
final AtomicInteger rescaleTriggerCount = new AtomicInteger();
final Function<StateTransitionManager.Context, StateTransitionManager>
stateTransitionManagerFactory =
context ->
TestingStateTransitionManager.withOnTriggerEventOnly(
rescaleTriggerCount::incrementAndGet);
final int rescaleOnFailedCheckpointsCount = 3;
try (MockExecutingContext ctx = new MockExecutingContext()) {
final Executing testInstance =
new ExecutingStateBuilder()
.setStateTransitionManagerFactory(stateTransitionManagerFactory)
.setRescaleOnFailedCheckpointCount(rescaleOnFailedCheckpointsCount)
.build(ctx);
// do multiple rescale iterations to verify that subsequent failed checkpoints after a
// rescale result in the expected behavior
for (int rescaleIteration = 1; rescaleIteration <= 3; rescaleIteration++) {
// trigger an initial failed checkpoint event to show that the counting only starts
// with the subsequent change event
testInstance.onFailedCheckpoint();
// trigger change
testInstance.onNewResourceRequirements();
for (int i = 0; i < rescaleOnFailedCheckpointsCount; i++) {
assertThat(rescaleTriggerCount)
.as(
"No rescale operation should have been triggered for iteration #%d, yet.",
rescaleIteration)
.hasValue(rescaleIteration - 1);
testInstance.onFailedCheckpoint();
}
assertThat(rescaleTriggerCount)
.as(
"The rescale operation for iteration #%d should have been properly triggered.",
rescaleIteration)
.hasValue(rescaleIteration);
}
}
}
@Test
public void testOnCompletedCheckpointResetsFailedCheckpointCount() throws Exception {
final AtomicInteger rescaleTriggeredCount = new AtomicInteger();
final Function<StateTransitionManager.Context, StateTransitionManager>
stateTransitionManagerFactory =
context ->
TestingStateTransitionManager.withOnTriggerEventOnly(
rescaleTriggeredCount::incrementAndGet);
final int rescaleOnFailedCheckpointsCount = 3;
try (MockExecutingContext ctx = new MockExecutingContext()) {
final Executing testInstance =
new ExecutingStateBuilder()
.setStateTransitionManagerFactory(stateTransitionManagerFactory)
.setRescaleOnFailedCheckpointCount(rescaleOnFailedCheckpointsCount)
.build(ctx);
// trigger an initial failed checkpoint event to show that the counting only starts with
// the subsequent change event
testInstance.onFailedCheckpoint();
// trigger change
testInstance.onNewResourcesAvailable();
IntStream.range(0, rescaleOnFailedCheckpointsCount - 1)
.forEach(ignored -> testInstance.onFailedCheckpoint());
assertThat(rescaleTriggeredCount)
.as("No rescaling should have been trigger, yet.")
.hasValue(0);
testInstance.onCompletedCheckpoint();
// trigger change
testInstance.onNewResourceRequirements();
assertThat(rescaleTriggeredCount)
.as("The completed checkpoint should have triggered a rescale.")
.hasValue(1);
IntStream.range(0, rescaleOnFailedCheckpointsCount - 1)
.forEach(ignored -> testInstance.onFailedCheckpoint());
assertThat(rescaleTriggeredCount)
.as(
"No additional rescaling should have been trigger by any subsequent failed checkpoint, yet.")
.hasValue(1);
testInstance.onFailedCheckpoint();
assertThat(rescaleTriggeredCount)
.as("The previous failed checkpoint should have triggered the rescale.")
.hasValue(2);
}
}
@Test
void testDisposalOfOperatorCoordinatorsOnLeaveOfStateWithExecutionGraph() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
TestingOperatorCoordinatorHandler operatorCoordinator =
new TestingOperatorCoordinatorHandler();
Executing exec =
new ExecutingStateBuilder()
.setOperatorCoordinatorHandler(operatorCoordinator)
.build(ctx);
exec.onLeave(MockState.class);
assertThat(operatorCoordinator.isDisposed()).isTrue();
}
}
@Test
void testUnrecoverableGlobalFailureTransitionsToFailingState() throws Exception {
final String failureMsg = "test exception";
try (MockExecutingContext ctx = new MockExecutingContext()) {
Executing exec = new ExecutingStateBuilder().build(ctx);
ctx.setExpectFailing(
failingArguments -> {
assertThat(failingArguments.getExecutionGraph()).isNotNull();
assertThat(failingArguments.getFailureCause().getMessage())
.isEqualTo(failureMsg);
});
ctx.setHowToHandleFailure(FailureResult::canNotRestart);
exec.handleGlobalFailure(
new RuntimeException(failureMsg), FailureEnricherUtils.EMPTY_FAILURE_LABELS);
}
}
@Test
void testRecoverableGlobalFailureTransitionsToRestarting() throws Exception {
final Duration duration = Duration.ZERO;
try (MockExecutingContext ctx = new MockExecutingContext()) {
Executing exec = new ExecutingStateBuilder().build(ctx);
ctx.setExpectRestarting(
restartingArguments -> {
assertThat(restartingArguments.getBackoffTime()).isEqualTo(duration);
assertThat(restartingArguments.getRestartWithParallelism()).isEmpty();
});
ctx.setHowToHandleFailure(f -> FailureResult.canRestart(f, duration));
exec.handleGlobalFailure(
new RuntimeException("Recoverable error"),
FailureEnricherUtils.EMPTY_FAILURE_LABELS);
}
}
@Test
void testCancelTransitionsToCancellingState() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
Executing exec = new ExecutingStateBuilder().build(ctx);
ctx.setExpectCancelling(assertNonNull());
exec.cancel();
}
}
@Test
void testTransitionToFinishedOnFailedExecutionGraph() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
Executing exec = new ExecutingStateBuilder().build(ctx);
ctx.setExpectFinished(
archivedExecutionGraph ->
assertThat(archivedExecutionGraph.getState())
.isEqualTo(JobStatus.FAILED));
// transition EG into terminal state, which will notify the Executing state about the
// failure (async via the supplied executor)
exec.getExecutionGraph()
.failJob(new RuntimeException("test failure"), System.currentTimeMillis());
}
}
@Test
void testTransitionToFinishedOnSuspend() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
Executing exec = new ExecutingStateBuilder().build(ctx);
ctx.setExpectFinished(
archivedExecutionGraph ->
assertThat(archivedExecutionGraph.getState())
.isEqualTo(JobStatus.SUSPENDED));
exec.suspend(new RuntimeException("suspend"));
}
}
@Test
void testFailureReportedViaUpdateTaskExecutionStateCausesFailingOnNoRestart() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
StateTrackingMockExecutionGraph returnsFailedStateExecutionGraph =
new StateTrackingMockExecutionGraph();
Executing exec =
new ExecutingStateBuilder()
.setExecutionGraph(returnsFailedStateExecutionGraph)
.build(ctx);
ctx.setHowToHandleFailure(FailureResult::canNotRestart);
ctx.setExpectFailing(assertNonNull());
Exception exception = new RuntimeException();
TestingAccessExecution execution =
TestingAccessExecution.newBuilder()
.withExecutionState(ExecutionState.FAILED)
.withErrorInfo(new ErrorInfo(exception, System.currentTimeMillis()))
.build();
returnsFailedStateExecutionGraph.registerExecution(execution);
TaskExecutionStateTransition taskExecutionStateTransition =
createFailingStateTransition(execution.getAttemptId(), exception);
exec.updateTaskExecutionState(
taskExecutionStateTransition, FailureEnricherUtils.EMPTY_FAILURE_LABELS);
}
}
@Test
void testFailureReportedViaUpdateTaskExecutionStateCausesRestart() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
StateTrackingMockExecutionGraph returnsFailedStateExecutionGraph =
new StateTrackingMockExecutionGraph();
Executing exec =
new ExecutingStateBuilder()
.setExecutionGraph(returnsFailedStateExecutionGraph)
.build(ctx);
ctx.setHowToHandleFailure(failure -> FailureResult.canRestart(failure, Duration.ZERO));
ctx.setExpectRestarting(
restartingArguments -> {
assertThat(restartingArguments).isNotNull();
assertThat(restartingArguments.getRestartWithParallelism()).isEmpty();
});
Exception exception = new RuntimeException();
TestingAccessExecution execution =
TestingAccessExecution.newBuilder()
.withExecutionState(ExecutionState.FAILED)
.withErrorInfo(new ErrorInfo(exception, System.currentTimeMillis()))
.build();
returnsFailedStateExecutionGraph.registerExecution(execution);
TaskExecutionStateTransition taskExecutionStateTransition =
createFailingStateTransition(execution.getAttemptId(), exception);
exec.updateTaskExecutionState(
taskExecutionStateTransition, FailureEnricherUtils.EMPTY_FAILURE_LABELS);
}
}
@Test
void testFalseReportsViaUpdateTaskExecutionStateAreIgnored() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
MockExecutionGraph returnsFailedStateExecutionGraph =
new MockExecutionGraph(false, Collections::emptyList);
Executing exec =
new ExecutingStateBuilder()
.setExecutionGraph(returnsFailedStateExecutionGraph)
.build(ctx);
Exception exception = new RuntimeException();
TestingAccessExecution execution =
TestingAccessExecution.newBuilder()
.withExecutionState(ExecutionState.FAILED)
.withErrorInfo(new ErrorInfo(exception, System.currentTimeMillis()))
.build();
returnsFailedStateExecutionGraph.registerExecution(execution);
TaskExecutionStateTransition taskExecutionStateTransition =
createFailingStateTransition(execution.getAttemptId(), exception);
exec.updateTaskExecutionState(
taskExecutionStateTransition, FailureEnricherUtils.EMPTY_FAILURE_LABELS);
ctx.assertNoStateTransition();
}
}
@Test
void testExecutionVertexMarkedAsFailedOnDeploymentFailure() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
MockExecutionJobVertex mejv =
new MockExecutionJobVertex(FailOnDeployMockExecutionVertex::new);
ExecutionGraph executionGraph =
new MockExecutionGraph(() -> Collections.singletonList(mejv));
Executing exec =
new ExecutingStateBuilder().setExecutionGraph(executionGraph).build(ctx);
assertThat(
((FailOnDeployMockExecutionVertex) mejv.getMockExecutionVertex())
.getMarkedFailure())
.isInstanceOf(JobException.class);
}
}
@Test
void testTransitionToStopWithSavepointState() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
CheckpointCoordinator coordinator =
new CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder()
.build(EXECUTOR_EXTENSION.getExecutor());
StateTrackingMockExecutionGraph mockedExecutionGraphWithCheckpointCoordinator =
new StateTrackingMockExecutionGraph() {
@Nullable
@Override
public CheckpointCoordinator getCheckpointCoordinator() {
return coordinator;
}
};
Executing exec =
new ExecutingStateBuilder()
.setExecutionGraph(mockedExecutionGraphWithCheckpointCoordinator)
.build(ctx);
ctx.setExpectStopWithSavepoint(assertNonNull());
exec.stopWithSavepoint("file:///tmp/target", true, SavepointFormatType.CANONICAL);
}
}
@Test
void testCheckpointSchedulerIsStoppedOnStopWithSavepoint() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
CheckpointCoordinator coordinator =
new CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder()
.build(EXECUTOR_EXTENSION.getExecutor());
StateTrackingMockExecutionGraph mockedExecutionGraphWithCheckpointCoordinator =
new StateTrackingMockExecutionGraph() {
@Nullable
@Override
public CheckpointCoordinator getCheckpointCoordinator() {
return coordinator;
}
};
Executing exec =
new ExecutingStateBuilder()
.setExecutionGraph(mockedExecutionGraphWithCheckpointCoordinator)
.build(ctx);
coordinator.startCheckpointScheduler();
// we assume checkpointing to be enabled
assertThat(coordinator.isPeriodicCheckpointingStarted()).isTrue();
ctx.setExpectStopWithSavepoint(assertNonNull());
exec.stopWithSavepoint("file:///tmp/target", true, SavepointFormatType.CANONICAL);
assertThat(coordinator.isPeriodicCheckpointingStarted()).isFalse();
}
}
@Test
void testJobInformationMethods() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
Executing exec = new ExecutingStateBuilder().build(ctx);
final JobID jobId = exec.getExecutionGraph().getJobID();
assertThat(exec.getJob()).isInstanceOf(ArchivedExecutionGraph.class);
assertThat(exec.getJob().getJobID()).isEqualTo(jobId);
assertThat(exec.getJobStatus()).isEqualTo(JobStatus.RUNNING);
}
}
@Test
void testStateDoesNotExposeGloballyTerminalExecutionGraph() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
final FinishingMockExecutionGraph finishingMockExecutionGraph =
new FinishingMockExecutionGraph();
Executing executing =
new ExecutingStateBuilder()
.setExecutionGraph(finishingMockExecutionGraph)
.build(ctx);
// ideally we'd delay the async call to #onGloballyTerminalState instead, but the
// context does not support that
ctx.setExpectFinished(eg -> {});
finishingMockExecutionGraph.completeTerminationFuture(JobStatus.FINISHED);
// this is just a sanity check for the test
assertThat(executing.getExecutionGraph().getState()).isEqualTo(JobStatus.FINISHED);
assertThat(executing.getJobStatus()).isEqualTo(JobStatus.RUNNING);
assertThat(executing.getJob().getState()).isEqualTo(JobStatus.RUNNING);
assertThat(executing.getJob().getStatusTimestamp(JobStatus.FINISHED)).isZero();
}
}
@Test
void testExecutingChecksForNewResourcesWhenBeingCreated() throws Exception {
final String onChangeEventLabel = "onChange";
final String onTriggerEventLabel = "onTrigger";
final Queue<String> actualEvents = new ArrayDeque<>();
try (MockExecutingContext ctx = new MockExecutingContext()) {
new ExecutingStateBuilder()
.setStateTransitionManagerFactory(
context ->
new TestingStateTransitionManager(
() -> actualEvents.add(onChangeEventLabel),
() -> actualEvents.add(onTriggerEventLabel)))
.build(ctx);
ctx.triggerExecutors();
assertThat(actualEvents.poll()).isEqualTo(onChangeEventLabel);
assertThat(actualEvents.poll()).isEqualTo(onTriggerEventLabel);
assertThat(actualEvents.isEmpty()).isTrue();
}
}
@Test
public void testOmitsWaitingForResourcesStateWhenRestarting() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
final Executing testInstance = new ExecutingStateBuilder().build(ctx);
final VertexParallelism vertexParallelism =
new VertexParallelism(Collections.singletonMap(new JobVertexID(), 2));
ctx.setVertexParallelism(vertexParallelism);
ctx.setExpectRestarting(
restartingArguments ->
assertThat(restartingArguments.getRestartWithParallelism())
.hasValue(vertexParallelism));
testInstance.transitionToSubsequentState();
}
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testInternalParallelismChangeBehavior(boolean parallelismChanged) throws Exception {
try (MockExecutingContext adaptiveSchedulerCtx = new MockExecutingContext()) {
final AtomicBoolean onChangeCalled = new AtomicBoolean();
final Function<StateTransitionManager.Context, StateTransitionManager>
stateTransitionManagerFactory =
transitionCtx ->
TestingStateTransitionManager.withOnChangeEventOnly(
() -> {
assertThat(transitionCtx.hasDesiredResources())
.isEqualTo(parallelismChanged);
assertThat(transitionCtx.hasSufficientResources())
.isEqualTo(parallelismChanged);
onChangeCalled.set(true);
});
final MockExecutionJobVertex mockExecutionJobVertex =
new MockExecutionJobVertex(MockExecutionVertex::new);
final ExecutionGraph executionGraph =
new MockExecutionGraph(() -> Collections.singletonList(mockExecutionJobVertex));
adaptiveSchedulerCtx.setHasDesiredResources(() -> true);
adaptiveSchedulerCtx.setHasSufficientResources(() -> true);
adaptiveSchedulerCtx.setVertexParallelism(
new VertexParallelism(
executionGraph.getAllVertices().values().stream()
.collect(
Collectors.toMap(
AccessExecutionJobVertex::getJobVertexId,
v ->
parallelismChanged
? 1 + v.getParallelism()
: v.getParallelism()))));
final Executing exec =
new ExecutingStateBuilder()
.setStateTransitionManagerFactory(stateTransitionManagerFactory)
.setExecutionGraph(executionGraph)
.build(adaptiveSchedulerCtx);
exec.onNewResourcesAvailable();
assertThat(onChangeCalled.get()).isTrue();
}
}
public static TaskExecutionStateTransition createFailingStateTransition(
ExecutionAttemptID attemptId, Exception exception) throws JobException {
return new TaskExecutionStateTransition(
new TaskExecutionState(attemptId, ExecutionState.FAILED, exception));
}
private final | ExecutingTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/DefaultComponentAutowiredFalseTest.java | {
"start": 6414,
"end": 6635
} | class ____ implements ContentHandlerFactory {
@Override
public ContentHandler createContentHandler(String mimetype) {
return null;
}
}
private static final | MyContentHandlerFactory |
java | quarkusio__quarkus | integration-tests/gradle/src/main/resources/basic-multi-module-project-test-setup/application/src/main/java/com/demo/application/Application.java | {
"start": 188,
"end": 276
} | class ____ {
@Inject
BaseEntity model;
void schedule() {
}
}
| Application |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/writer/BeanDefinitionWriter.java | {
"start": 247941,
"end": 248835
} | class ____ {
final TypedElement memberBeanType;
final String memberPropertyName;
final MethodElement memberPropertyGetter;
final String requiredValue;
final String notEqualsValue;
public AnnotationVisitData(TypedElement memberBeanType,
String memberPropertyName,
MethodElement memberPropertyGetter,
@Nullable String requiredValue,
@Nullable String notEqualsValue) {
this.memberBeanType = memberBeanType;
this.memberPropertyName = memberPropertyName;
this.memberPropertyGetter = memberPropertyGetter;
this.requiredValue = requiredValue;
this.notEqualsValue = notEqualsValue;
}
}
@Internal
private static final | AnnotationVisitData |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/embeddable/EmbeddableInheritanceDiscriminatorFormulaTest.java | {
"start": 4158,
"end": 4410
} | class ____ {
protected String kind;
public FormulaEmbeddable() {
}
public FormulaEmbeddable(String kind) {
this.kind = kind;
}
}
@Embeddable
@DiscriminatorValue( "1" )
@Imported( rename = "ChildOneEmbeddable" )
static | FormulaEmbeddable |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseStatus.java | {
"start": 19008,
"end": 20327
} | class ____ this {@link HttpResponseStatus}
*/
public HttpStatusClass codeClass() {
return this.codeClass;
}
@Override
public int hashCode() {
return code();
}
/**
* Equality of {@link HttpResponseStatus} only depends on {@link #code()}. The
* reason phrase is not considered for equality.
*/
@Override
public boolean equals(Object o) {
if (!(o instanceof HttpResponseStatus)) {
return false;
}
return code() == ((HttpResponseStatus) o).code();
}
/**
* Equality of {@link HttpResponseStatus} only depends on {@link #code()}. The
* reason phrase is not considered for equality.
*/
@Override
public int compareTo(HttpResponseStatus o) {
return code() - o.code();
}
@Override
public String toString() {
return new StringBuilder(reasonPhrase.length() + 4)
.append(codeAsText)
.append(' ')
.append(reasonPhrase)
.toString();
}
void encode(ByteBuf buf) {
if (bytes == null) {
ByteBufUtil.copy(codeAsText, buf);
buf.writeByte(SP);
buf.writeCharSequence(reasonPhrase, CharsetUtil.US_ASCII);
} else {
buf.writeBytes(bytes);
}
}
}
| of |
java | apache__kafka | streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KafkaStreamsTelemetryIntegrationTest.java | {
"start": 32813,
"end": 33798
} | class ____<K, V> extends KafkaConsumer<K, V> implements TestingMetricsInterceptor {
private final List<KafkaMetric> passedMetrics = new ArrayList<>();
public TestingMetricsInterceptingConsumer(final Map<String, Object> configs, final Deserializer<K> keyDeserializer, final Deserializer<V> valueDeserializer) {
super(configs, keyDeserializer, valueDeserializer);
}
@Override
public void registerMetricForSubscription(final KafkaMetric metric) {
passedMetrics.add(metric);
super.registerMetricForSubscription(metric);
}
@Override
public void unregisterMetricFromSubscription(final KafkaMetric metric) {
passedMetrics.remove(metric);
super.unregisterMetricFromSubscription(metric);
}
@Override
public List<KafkaMetric> passedMetrics() {
return passedMetrics;
}
}
public static | TestingMetricsInterceptingConsumer |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FtpProducerDisconnectOnBatchCompleteIT.java | {
"start": 1343,
"end": 2860
} | class ____ extends FtpServerTestSupport {
@Override
public void doPostSetup() throws Exception {
// ask the singleton FtpEndpoint to make use of a custom FTPClient
// so that we can hold a reference on it inside the test below
FtpEndpoint<?> endpoint = context.getEndpoint(getFtpUrl(), FtpEndpoint.class);
endpoint.setFtpClient(new FTPClient());
}
private String getFtpUrl() {
return "ftp://admin@localhost:{{ftp.server.port}}/done?password=admin&disconnectOnBatchComplete=true";
}
@Test
public void testDisconnectOnBatchComplete() {
sendFile(getFtpUrl(), "Hello World", "claus.txt");
FtpEndpoint<?> endpoint = context.getEndpoint(getFtpUrl(), FtpEndpoint.class);
await().atMost(2, TimeUnit.SECONDS)
.untilAsserted(() -> assertFalse(endpoint.getFtpClient().isConnected(),
"The FTPClient should be already disconnected"));
assertTrue(endpoint.isDisconnectOnBatchComplete(), "The FtpEndpoint should be configured to disconnect");
}
@Override
public void sendFile(String url, Object body, String fileName) {
template.send(url, new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Exchange.FILE_NAME, new SimpleExpression(fileName));
exchange.setProperty(Exchange.BATCH_COMPLETE, true);
}
});
}
}
| FtpProducerDisconnectOnBatchCompleteIT |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MountVolumeMap.java | {
"start": 1546,
"end": 3774
} | class ____ {
private final ConcurrentMap<String, MountVolumeInfo>
mountVolumeMapping;
private final Configuration conf;
MountVolumeMap(Configuration conf) {
mountVolumeMapping = new ConcurrentHashMap<>();
this.conf = conf;
}
FsVolumeReference getVolumeRefByMountAndStorageType(String mount,
StorageType storageType) {
if (mountVolumeMapping.containsKey(mount)) {
return mountVolumeMapping
.get(mount).getVolumeRef(storageType);
}
return null;
}
/**
* Return capacity ratio.
* If not exists, return 1 to use full capacity.
*/
double getCapacityRatioByMountAndStorageType(String mount,
StorageType storageType) {
if (mountVolumeMapping.containsKey(mount)) {
return mountVolumeMapping.get(mount).getCapacityRatio(storageType);
}
return 1;
}
void addVolume(FsVolumeImpl volume) {
String mount = volume.getMount();
if (!mount.isEmpty()) {
MountVolumeInfo info;
if (mountVolumeMapping.containsKey(mount)) {
info = mountVolumeMapping.get(mount);
} else {
info = new MountVolumeInfo(conf);
mountVolumeMapping.put(mount, info);
}
info.addVolume(volume);
}
}
void removeVolume(FsVolumeImpl target) {
String mount = target.getMount();
if (!mount.isEmpty()) {
MountVolumeInfo info = mountVolumeMapping.get(mount);
info.removeVolume(target);
if (info.size() == 0) {
mountVolumeMapping.remove(mount);
}
}
}
void setCapacityRatio(FsVolumeImpl target, double capacityRatio)
throws IOException {
String mount = target.getMount();
if (!mount.isEmpty()) {
MountVolumeInfo info = mountVolumeMapping.get(mount);
if (!info.setCapacityRatio(
target.getStorageType(), capacityRatio)) {
throw new IOException(
"Not enough capacity ratio left on mount: "
+ mount + ", for " + target + ": capacity ratio: "
+ capacityRatio + ". Sum of the capacity"
+ " ratio of on same disk mount should be <= 1");
}
}
}
public boolean hasMount(String mount) {
return mountVolumeMapping.containsKey(mount);
}
}
| MountVolumeMap |
java | quarkusio__quarkus | extensions/tls-registry/runtime/src/main/java/io/quarkus/tls/runtime/config/PemCertsConfig.java | {
"start": 860,
"end": 1100
} | interface ____ {
/**
* The path to the key file (in PEM format).
*/
Path key();
/**
* The path to the certificate file (in PEM format).
*/
Path cert();
}
}
| KeyCertConfig |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/ImplicitJoinInOnClauseTest.java | {
"start": 2250,
"end": 2402
} | class ____ {
@Id
private Long id;
private String name;
}
@Entity(name = "SecondLevelReferencedEntityB")
public static | SecondLevelReferencedEntityA |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java | {
"start": 1531,
"end": 6276
} | class ____ extends ClusterStateUpdateTask {
private static final Logger logger = LogManager.getLogger(OperationModeUpdateTask.class);
private final ProjectId projectId;
@Nullable
private final OperationMode ilmMode;
@Nullable
private final OperationMode slmMode;
public static AckedClusterStateUpdateTask wrap(
OperationModeUpdateTask task,
AcknowledgedRequest<?> request,
ActionListener<AcknowledgedResponse> listener
) {
return new AckedClusterStateUpdateTask(task.priority(), request, listener) {
@Override
public ClusterState execute(ClusterState currentState) {
return task.execute(currentState);
}
};
}
private OperationModeUpdateTask(Priority priority, ProjectId projectId, OperationMode ilmMode, OperationMode slmMode) {
super(priority);
this.projectId = projectId;
this.ilmMode = ilmMode;
this.slmMode = slmMode;
}
public static OperationModeUpdateTask ilmMode(ProjectId projectId, OperationMode mode) {
return new OperationModeUpdateTask(getPriority(mode), projectId, mode, null);
}
public static OperationModeUpdateTask slmMode(OperationMode mode) {
@FixForMultiProject // Use non-default ID when SLM has been made project-aware
final var projectId = ProjectId.DEFAULT;
return new OperationModeUpdateTask(getPriority(mode), projectId, null, mode);
}
private static Priority getPriority(OperationMode mode) {
if (mode == OperationMode.STOPPED || mode == OperationMode.STOPPING) {
return Priority.IMMEDIATE;
} else {
return Priority.NORMAL;
}
}
public OperationMode getILMOperationMode() {
return ilmMode;
}
public OperationMode getSLMOperationMode() {
return slmMode;
}
@Override
public ClusterState execute(ClusterState currentState) {
ProjectMetadata oldProject = currentState.metadata().getProject(projectId);
ProjectMetadata newProject = updateILMState(oldProject);
newProject = updateSLMState(newProject);
if (newProject == oldProject) {
return currentState;
}
return ClusterState.builder(currentState).putProjectMetadata(newProject).build();
}
private ProjectMetadata updateILMState(final ProjectMetadata currentProject) {
if (ilmMode == null) {
return currentProject;
}
final OperationMode currentMode = currentILMMode(currentProject);
if (currentMode.equals(ilmMode)) {
// No need for a new state
return currentProject;
}
final OperationMode newMode;
if (currentMode.isValidChange(ilmMode)) {
newMode = ilmMode;
} else {
// The transition is invalid, return the current state
return currentProject;
}
logger.info("updating ILM operation mode to {}", newMode);
final var updatedMetadata = new LifecycleOperationMetadata(newMode, currentSLMMode(currentProject));
return currentProject.copyAndUpdate(b -> b.putCustom(LifecycleOperationMetadata.TYPE, updatedMetadata));
}
private ProjectMetadata updateSLMState(final ProjectMetadata currentProject) {
if (slmMode == null) {
return currentProject;
}
final OperationMode currentMode = currentSLMMode(currentProject);
if (currentMode.equals(slmMode)) {
// No need for a new state
return currentProject;
}
final OperationMode newMode;
if (currentMode.isValidChange(slmMode)) {
newMode = slmMode;
} else {
// The transition is invalid, return the current state
return currentProject;
}
logger.info("updating SLM operation mode to {}", newMode);
final var updatedMetadata = new LifecycleOperationMetadata(currentILMMode(currentProject), newMode);
return currentProject.copyAndUpdate(b -> b.putCustom(LifecycleOperationMetadata.TYPE, updatedMetadata));
}
@Override
public void onFailure(Exception e) {
logger.error(
() -> Strings.format("unable to update lifecycle metadata with new ilm mode [%s], slm mode [%s]", ilmMode, slmMode),
e
);
}
@Override
public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
if (ilmMode != null) {
logger.info("ILM operation mode updated to {}", ilmMode);
}
if (slmMode != null) {
logger.info("SLM operation mode updated to {}", slmMode);
}
}
}
| OperationModeUpdateTask |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockModel.java | {
"start": 935,
"end": 3225
} | class ____ extends Model {
protected String region;
protected String model;
protected AmazonBedrockProvider provider;
protected RateLimitSettings rateLimitSettings;
protected AmazonBedrockModel(ModelConfigurations modelConfigurations, ModelSecrets secrets) {
super(modelConfigurations, secrets);
setPropertiesFromServiceSettings((AmazonBedrockServiceSettings) modelConfigurations.getServiceSettings());
}
protected AmazonBedrockModel(Model model, TaskSettings taskSettings) {
super(model, taskSettings);
if (model instanceof AmazonBedrockModel bedrockModel) {
setPropertiesFromServiceSettings(bedrockModel.getServiceSettings());
}
}
protected AmazonBedrockModel(Model model, ServiceSettings serviceSettings) {
super(model, serviceSettings);
if (serviceSettings instanceof AmazonBedrockServiceSettings bedrockServiceSettings) {
setPropertiesFromServiceSettings(bedrockServiceSettings);
}
}
protected AmazonBedrockModel(ModelConfigurations modelConfigurations) {
super(modelConfigurations);
setPropertiesFromServiceSettings((AmazonBedrockServiceSettings) modelConfigurations.getServiceSettings());
}
public String region() {
return region;
}
public String model() {
return model;
}
public AmazonBedrockProvider provider() {
return provider;
}
public RateLimitSettings rateLimitSettings() {
return rateLimitSettings;
}
private void setPropertiesFromServiceSettings(AmazonBedrockServiceSettings serviceSettings) {
this.region = serviceSettings.region();
this.model = serviceSettings.modelId();
this.provider = serviceSettings.provider();
this.rateLimitSettings = serviceSettings.rateLimitSettings();
}
public abstract ExecutableAction accept(AmazonBedrockActionVisitor creator, Map<String, Object> taskSettings);
@Override
public AmazonBedrockServiceSettings getServiceSettings() {
return (AmazonBedrockServiceSettings) super.getServiceSettings();
}
@Override
public AwsSecretSettings getSecretSettings() {
return (AwsSecretSettings) super.getSecretSettings();
}
}
| AmazonBedrockModel |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/NettyEndpointBuilderFactory.java | {
"start": 137936,
"end": 160732
} | interface ____
extends
NettyEndpointConsumerBuilder,
NettyEndpointProducerBuilder {
default AdvancedNettyEndpointBuilder advanced() {
return (AdvancedNettyEndpointBuilder) this;
}
/**
* Whether or not to disconnect(close) from Netty Channel right after
* use.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param disconnect the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder disconnect(boolean disconnect) {
doSetProperty("disconnect", disconnect);
return this;
}
/**
* Whether or not to disconnect(close) from Netty Channel right after
* use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param disconnect the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder disconnect(String disconnect) {
doSetProperty("disconnect", disconnect);
return this;
}
/**
* Setting to ensure socket is not closed due to inactivity.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param keepAlive the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder keepAlive(boolean keepAlive) {
doSetProperty("keepAlive", keepAlive);
return this;
}
/**
* Setting to ensure socket is not closed due to inactivity.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param keepAlive the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder keepAlive(String keepAlive) {
doSetProperty("keepAlive", keepAlive);
return this;
}
/**
* Setting to facilitate socket multiplexing.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param reuseAddress the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder reuseAddress(boolean reuseAddress) {
doSetProperty("reuseAddress", reuseAddress);
return this;
}
/**
* Setting to facilitate socket multiplexing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param reuseAddress the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder reuseAddress(String reuseAddress) {
doSetProperty("reuseAddress", reuseAddress);
return this;
}
/**
* This option allows producers and consumers (in client mode) to reuse
* the same Netty Channel for the lifecycle of processing the Exchange.
* This is useful if you need to call a server multiple times in a Camel
* route and want to use the same network connection. When using this,
* the channel is not returned to the connection pool until the Exchange
* is done; or disconnected if the disconnect option is set to true. The
* reused Channel is stored on the Exchange as an exchange property with
* the key CamelNettyChannel which allows you to obtain the channel
* during routing and use it as well.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param reuseChannel the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder reuseChannel(boolean reuseChannel) {
doSetProperty("reuseChannel", reuseChannel);
return this;
}
/**
* This option allows producers and consumers (in client mode) to reuse
* the same Netty Channel for the lifecycle of processing the Exchange.
* This is useful if you need to call a server multiple times in a Camel
* route and want to use the same network connection. When using this,
* the channel is not returned to the connection pool until the Exchange
* is done; or disconnected if the disconnect option is set to true. The
* reused Channel is stored on the Exchange as an exchange property with
* the key CamelNettyChannel which allows you to obtain the channel
* during routing and use it as well.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param reuseChannel the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder reuseChannel(String reuseChannel) {
doSetProperty("reuseChannel", reuseChannel);
return this;
}
/**
* Setting to set endpoint as one-way (false) or request-response
* (true).
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param sync the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder sync(boolean sync) {
doSetProperty("sync", sync);
return this;
}
/**
* Setting to set endpoint as one-way (false) or request-response
* (true).
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param sync the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder sync(String sync) {
doSetProperty("sync", sync);
return this;
}
/**
* Setting to improve TCP protocol performance.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param tcpNoDelay the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder tcpNoDelay(boolean tcpNoDelay) {
doSetProperty("tcpNoDelay", tcpNoDelay);
return this;
}
/**
* Setting to improve TCP protocol performance.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param tcpNoDelay the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder tcpNoDelay(String tcpNoDelay) {
doSetProperty("tcpNoDelay", tcpNoDelay);
return this;
}
/**
* The netty component installs a default codec if both, encoder/decoder
* is null and textline is false. Setting allowDefaultCodec to false
* prevents the netty component from installing a default codec as the
* first element in the filter chain.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: codec
*
* @param allowDefaultCodec the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder allowDefaultCodec(boolean allowDefaultCodec) {
doSetProperty("allowDefaultCodec", allowDefaultCodec);
return this;
}
/**
* The netty component installs a default codec if both, encoder/decoder
* is null and textline is false. Setting allowDefaultCodec to false
* prevents the netty component from installing a default codec as the
* first element in the filter chain.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: codec
*
* @param allowDefaultCodec the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder allowDefaultCodec(String allowDefaultCodec) {
doSetProperty("allowDefaultCodec", allowDefaultCodec);
return this;
}
/**
* Whether or not to auto append missing end delimiter when sending
* using the textline codec.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: codec
*
* @param autoAppendDelimiter the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder autoAppendDelimiter(boolean autoAppendDelimiter) {
doSetProperty("autoAppendDelimiter", autoAppendDelimiter);
return this;
}
/**
* Whether or not to auto append missing end delimiter when sending
* using the textline codec.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: codec
*
* @param autoAppendDelimiter the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder autoAppendDelimiter(String autoAppendDelimiter) {
doSetProperty("autoAppendDelimiter", autoAppendDelimiter);
return this;
}
/**
* The max line length to use for the textline codec.
*
* The option is a: <code>int</code> type.
*
* Default: 1024
* Group: codec
*
* @param decoderMaxLineLength the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder decoderMaxLineLength(int decoderMaxLineLength) {
doSetProperty("decoderMaxLineLength", decoderMaxLineLength);
return this;
}
/**
* The max line length to use for the textline codec.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 1024
* Group: codec
*
* @param decoderMaxLineLength the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder decoderMaxLineLength(String decoderMaxLineLength) {
doSetProperty("decoderMaxLineLength", decoderMaxLineLength);
return this;
}
/**
* A list of decoders to be used. You can use a String which have values
* separated by comma, and have the values be looked up in the Registry.
* Just remember to prefix the value with # so Camel knows it should
* lookup.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: codec
*
* @param decoders the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder decoders(String decoders) {
doSetProperty("decoders", decoders);
return this;
}
/**
* The delimiter to use for the textline codec. Possible values are LINE
* and NULL.
*
* The option is a:
* <code>org.apache.camel.component.netty.TextLineDelimiter</code> type.
*
* Default: LINE
* Group: codec
*
* @param delimiter the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder delimiter(org.apache.camel.component.netty.TextLineDelimiter delimiter) {
doSetProperty("delimiter", delimiter);
return this;
}
/**
* The delimiter to use for the textline codec. Possible values are LINE
* and NULL.
*
* The option will be converted to a
* <code>org.apache.camel.component.netty.TextLineDelimiter</code> type.
*
* Default: LINE
* Group: codec
*
* @param delimiter the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder delimiter(String delimiter) {
doSetProperty("delimiter", delimiter);
return this;
}
/**
* A list of encoders to be used. You can use a String which have values
* separated by comma, and have the values be looked up in the Registry.
* Just remember to prefix the value with # so Camel knows it should
* lookup.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: codec
*
* @param encoders the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder encoders(String encoders) {
doSetProperty("encoders", encoders);
return this;
}
/**
* The encoding (a charset name) to use for the textline codec. If not
* provided, Camel will use the JVM default Charset.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: codec
*
* @param encoding the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder encoding(String encoding) {
doSetProperty("encoding", encoding);
return this;
}
/**
* Only used for TCP. If no codec is specified, you can use this flag to
* indicate a text line based codec; if not specified or the value is
* false, then Object Serialization is assumed over TCP - however only
* Strings are allowed to be serialized by default.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: codec
*
* @param textline the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder textline(boolean textline) {
doSetProperty("textline", textline);
return this;
}
/**
* Only used for TCP. If no codec is specified, you can use this flag to
* indicate a text line based codec; if not specified or the value is
* false, then Object Serialization is assumed over TCP - however only
* Strings are allowed to be serialized by default.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: codec
*
* @param textline the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder textline(String textline) {
doSetProperty("textline", textline);
return this;
}
/**
* Which protocols to enable when using SSL.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: TLSv1.2,TLSv1.3
* Group: security
*
* @param enabledProtocols the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder enabledProtocols(String enabledProtocols) {
doSetProperty("enabledProtocols", enabledProtocols);
return this;
}
/**
* To enable/disable hostname verification on SSLEngine.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param hostnameVerification the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder hostnameVerification(boolean hostnameVerification) {
doSetProperty("hostnameVerification", hostnameVerification);
return this;
}
/**
* To enable/disable hostname verification on SSLEngine.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param hostnameVerification the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder hostnameVerification(String hostnameVerification) {
doSetProperty("hostnameVerification", hostnameVerification);
return this;
}
/**
* Keystore format to be used for payload encryption. Defaults to JKS if
* not set.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param keyStoreFormat the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder keyStoreFormat(String keyStoreFormat) {
doSetProperty("keyStoreFormat", keyStoreFormat);
return this;
}
/**
* Client side certificate keystore to be used for encryption. Is loaded
* by default from classpath, but you can prefix with classpath:, file:,
* or http: to load the resource from different systems.
*
* This option can also be loaded from an existing file, by prefixing
* with file: or classpath: followed by the location of the file.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param keyStoreResource the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder keyStoreResource(String keyStoreResource) {
doSetProperty("keyStoreResource", keyStoreResource);
return this;
}
/**
* Password to use for the keyStore and trustStore. The same password
* must be configured for both resources.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param passphrase the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder passphrase(String passphrase) {
doSetProperty("passphrase", passphrase);
return this;
}
/**
* Security provider to be used for payload encryption. Defaults to
* SunX509 if not set.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param securityProvider the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder securityProvider(String securityProvider) {
doSetProperty("securityProvider", securityProvider);
return this;
}
/**
* Setting to specify whether SSL encryption is applied to this
* endpoint.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param ssl the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder ssl(boolean ssl) {
doSetProperty("ssl", ssl);
return this;
}
/**
* Setting to specify whether SSL encryption is applied to this
* endpoint.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param ssl the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder ssl(String ssl) {
doSetProperty("ssl", ssl);
return this;
}
/**
* When enabled and in SSL mode, then the Netty consumer will enrich the
* Camel Message with headers having information about the client
* certificate such as subject name, issuer name, serial number, and the
* valid date range.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param sslClientCertHeaders the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder sslClientCertHeaders(boolean sslClientCertHeaders) {
doSetProperty("sslClientCertHeaders", sslClientCertHeaders);
return this;
}
/**
* When enabled and in SSL mode, then the Netty consumer will enrich the
* Camel Message with headers having information about the client
* certificate such as subject name, issuer name, serial number, and the
* valid date range.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param sslClientCertHeaders the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder sslClientCertHeaders(String sslClientCertHeaders) {
doSetProperty("sslClientCertHeaders", sslClientCertHeaders);
return this;
}
/**
* To configure security using SSLContextParameters.
*
* The option is a:
* <code>org.apache.camel.support.jsse.SSLContextParameters</code> type.
*
* Group: security
*
* @param sslContextParameters the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder sslContextParameters(org.apache.camel.support.jsse.SSLContextParameters sslContextParameters) {
doSetProperty("sslContextParameters", sslContextParameters);
return this;
}
/**
* To configure security using SSLContextParameters.
*
* The option will be converted to a
* <code>org.apache.camel.support.jsse.SSLContextParameters</code> type.
*
* Group: security
*
* @param sslContextParameters the value to set
* @return the dsl builder
*/
default NettyEndpointBuilder sslContextParameters(String sslContextParameters) {
doSetProperty("sslContextParameters", sslContextParameters);
return this;
}
/**
* Reference to a | NettyEndpointBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/lob/LobAsLastValueEntity.java | {
"start": 228,
"end": 1430
} | class ____ implements Serializable {
private Integer id;
private String name;
private String details;
private String title;
public LobAsLastValueEntity() {
}
public LobAsLastValueEntity(String name, String details, String title) {
this.name = name;
this.details = details;
this.title = title;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDetails() {
return details;
}
public void setDetails(String details) {
this.details = details;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
LobAsLastValueEntity that = (LobAsLastValueEntity) o;
return Objects.equals( id, that.id ) &&
Objects.equals( details, that.details ) &&
Objects.equals( title, that.title );
}
@Override
public int hashCode() {
return Objects.hash( id, details, title );
}
}
| LobAsLastValueEntity |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/inject/AssistedInjectAndInjectOnConstructorsTest.java | {
"start": 1445,
"end": 1720
} | class ____ {
/**
* Class has a constructor annotated with @javax.inject.Inject and another constructor annotated
* with @AssistedInject.
*/
// BUG: Diagnostic contains: AssistedInjectAndInjectOnConstructors
public | AssistedInjectAndInjectOnConstructorsPositiveCases |
java | spring-projects__spring-framework | spring-beans/src/jmh/java/org/springframework/beans/factory/ConcurrentBeanFactoryBenchmark.java | {
"start": 2409,
"end": 2572
} | class ____ {
private Date date;
public Date getDate() {
return this.date;
}
public void setDate(Date date) {
this.date = date;
}
}
}
| ConcurrentBean |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/authorization/AuthorizationAdvisorProxyFactoryTests.java | {
"start": 2463,
"end": 18289
} | class ____ {
private final Authentication user = TestAuthentication.authenticatedUser();
private final Authentication admin = TestAuthentication.authenticatedAdmin();
private final Flight flight = new Flight();
private final User alan = new User("alan", "alan", "turing");
@Test
public void proxyWhenPreAuthorizeThenHonors() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Flight flight = new Flight();
assertThat(flight.getAltitude()).isEqualTo(35000d);
Flight secured = proxy(factory, flight);
assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(secured::getAltitude);
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenPreAuthorizeOnInterfaceThenHonors() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
assertThat(this.alan.getFirstName()).isEqualTo("alan");
User secured = proxy(factory, this.alan);
assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(secured::getFirstName);
SecurityContextHolder.getContext().setAuthentication(authenticated("alan"));
assertThat(secured.getFirstName()).isEqualTo("alan");
SecurityContextHolder.getContext().setAuthentication(this.admin);
assertThat(secured.getFirstName()).isEqualTo("alan");
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenPreAuthorizeOnRecordThenHonors() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
HasSecret repo = new Repository("secret");
assertThat(repo.secret()).isEqualTo("secret");
HasSecret secured = proxy(factory, repo);
assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(secured::secret);
SecurityContextHolder.getContext().setAuthentication(this.user);
assertThat(repo.secret()).isEqualTo("secret");
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenImmutableListThenReturnsSecuredImmutableList() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
List<Flight> flights = List.of(this.flight);
List<Flight> secured = proxy(factory, flights);
secured.forEach(
(flight) -> assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(flight::getAltitude));
assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(secured::clear);
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenImmutableSetThenReturnsSecuredImmutableSet() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Set<Flight> flights = Set.of(this.flight);
Set<Flight> secured = proxy(factory, flights);
secured.forEach(
(flight) -> assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(flight::getAltitude));
assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(secured::clear);
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenQueueThenReturnsSecuredQueue() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Queue<Flight> flights = new LinkedList<>(List.of(this.flight));
Queue<Flight> secured = proxy(factory, flights);
assertThat(flights.size()).isEqualTo(secured.size());
secured.forEach(
(flight) -> assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(flight::getAltitude));
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenImmutableSortedSetThenReturnsSecuredImmutableSortedSet() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
SortedSet<User> users = Collections.unmodifiableSortedSet(new TreeSet<>(Set.of(this.alan)));
SortedSet<User> secured = proxy(factory, users);
secured
.forEach((user) -> assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(user::getFirstName));
assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(secured::clear);
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenImmutableSortedMapThenReturnsSecuredImmutableSortedMap() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
SortedMap<String, User> users = Collections
.unmodifiableSortedMap(new TreeMap<>(Map.of(this.alan.getId(), this.alan)));
SortedMap<String, User> secured = proxy(factory, users);
secured.forEach(
(id, user) -> assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(user::getFirstName));
assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(secured::clear);
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenImmutableMapThenReturnsSecuredImmutableMap() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Map<String, User> users = Map.of(this.alan.getId(), this.alan);
Map<String, User> secured = proxy(factory, users);
secured.forEach(
(id, user) -> assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(user::getFirstName));
assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(secured::clear);
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenMutableListThenReturnsSecuredMutableList() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
List<Flight> flights = new ArrayList<>(List.of(this.flight));
List<Flight> secured = proxy(factory, flights);
secured.forEach(
(flight) -> assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(flight::getAltitude));
secured.clear();
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenMutableSetThenReturnsSecuredMutableSet() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Set<Flight> flights = new HashSet<>(Set.of(this.flight));
Set<Flight> secured = proxy(factory, flights);
secured.forEach(
(flight) -> assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(flight::getAltitude));
secured.clear();
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenMutableSortedSetThenReturnsSecuredMutableSortedSet() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
SortedSet<User> users = new TreeSet<>(Set.of(this.alan));
SortedSet<User> secured = proxy(factory, users);
secured.forEach((u) -> assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(u::getFirstName));
secured.clear();
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenMutableSortedMapThenReturnsSecuredMutableSortedMap() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
SortedMap<String, User> users = new TreeMap<>(Map.of(this.alan.getId(), this.alan));
SortedMap<String, User> secured = proxy(factory, users);
secured.forEach((id, u) -> assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(u::getFirstName));
secured.clear();
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenMutableMapThenReturnsSecuredMutableMap() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Map<String, User> users = new HashMap<>(Map.of(this.alan.getId(), this.alan));
Map<String, User> secured = proxy(factory, users);
secured.forEach((id, u) -> assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(u::getFirstName));
secured.clear();
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenPreAuthorizeForOptionalThenHonors() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Optional<Flight> flights = Optional.of(this.flight);
assertThat(flights.get().getAltitude()).isEqualTo(35000d);
Optional<Flight> secured = proxy(factory, flights);
assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(() -> secured.ifPresent(Flight::getAltitude));
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenPreAuthorizeForSupplierThenHonors() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Supplier<Flight> flights = () -> this.flight;
assertThat(flights.get().getAltitude()).isEqualTo(35000d);
Supplier<Flight> secured = proxy(factory, flights);
assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(() -> secured.get().getAltitude());
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenPreAuthorizeForStreamThenHonors() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Stream<Flight> flights = Stream.of(this.flight);
Stream<Flight> secured = proxy(factory, flights);
assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(() -> secured.forEach(Flight::getAltitude));
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenPreAuthorizeForArrayThenHonors() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Flight[] flights = { this.flight };
Flight[] secured = proxy(factory, flights);
assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(secured[0]::getAltitude);
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenPreAuthorizeForIteratorThenHonors() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Iterator<Flight> flights = List.of(this.flight).iterator();
Iterator<Flight> secured = proxy(factory, flights);
assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(() -> secured.next().getAltitude());
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenPreAuthorizeForIterableThenHonors() {
SecurityContextHolder.getContext().setAuthentication(this.user);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Iterable<User> users = new UserRepository();
Iterable<User> secured = proxy(factory, users);
assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(() -> secured.forEach(User::getFirstName));
SecurityContextHolder.clearContext();
}
@Test
public void proxyWhenPreAuthorizeForClassThenHonors() {
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Class<Flight> clazz = proxy(factory, Flight.class);
assertThat(clazz.getSimpleName()).contains("SpringCGLIB$$");
Flight secured = proxy(factory, this.flight);
assertThat(secured.getClass()).isSameAs(clazz);
SecurityContextHolder.getContext().setAuthentication(this.user);
assertThatExceptionOfType(AccessDeniedException.class).isThrownBy(secured::getAltitude);
SecurityContextHolder.clearContext();
}
@Test
public void setAdvisorsWhenProxyThenVisits() {
AuthorizationAdvisor advisor = mock(AuthorizationAdvisor.class);
given(advisor.getAdvice()).willReturn(advisor);
given(advisor.getPointcut()).willReturn(Pointcut.TRUE);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
factory.setAdvisors(advisor);
Flight flight = proxy(factory, this.flight);
flight.getAltitude();
verify(advisor, atLeastOnce()).getPointcut();
}
@Test
public void setTargetVisitorThenUses() {
TargetVisitor visitor = mock(TargetVisitor.class);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
factory.setTargetVisitor(visitor);
factory.proxy(new Flight());
verify(visitor).visit(any(), any());
}
@Test
public void setTargetVisitorIgnoreValueTypesThenIgnores() {
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
assertThatExceptionOfType(ClassCastException.class).isThrownBy(() -> factory.proxy(35).intValue());
factory.setTargetVisitor(TargetVisitor.defaultsSkipValueTypes());
assertThat(factory.proxy(35)).isEqualTo(35);
}
// TODO Find why callbacks property is serialized with Jackson 3, not with Jackson 2
// FIXME: https://github.com/spring-projects/spring-security/issues/18077
@Disabled("callbacks property is serialized with Jackson 3, not with Jackson 2")
@Test
public void serializeWhenAuthorizationProxyObjectThenOnlyIncludesProxiedProperties() {
SecurityContextHolder.getContext().setAuthentication(this.admin);
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
User user = proxy(factory, this.alan);
JsonMapper mapper = new JsonMapper();
String serialized = mapper.writeValueAsString(user);
Map<String, Object> properties = mapper.readValue(serialized, Map.class);
assertThat(properties).hasSize(3).containsKeys("id", "firstName", "lastName");
}
@Test
public void proxyWhenDefaultsThenInstanceOfAuthorizationProxy() {
AuthorizationAdvisorProxyFactory factory = AuthorizationAdvisorProxyFactory.withDefaults();
Flight flight = proxy(factory, this.flight);
assertThat(flight).isInstanceOf(AuthorizationProxy.class);
Flight target = (Flight) ((AuthorizationProxy) flight).toAuthorizedTarget();
assertThat(target).isSameAs(this.flight);
}
// gh-16819
@Test
void advisorsWhenWithDefaultsThenAreSorted() {
AuthorizationAdvisorProxyFactory proxyFactory = AuthorizationAdvisorProxyFactory.withDefaults();
AnnotationAwareOrderComparator comparator = AnnotationAwareOrderComparator.INSTANCE;
AuthorizationAdvisor previous = null;
for (AuthorizationAdvisor advisor : proxyFactory) {
boolean ordered = previous == null || comparator.compare(previous, advisor) < 0;
assertThat(ordered).isTrue();
previous = advisor;
}
}
// gh-16819
@Test
void advisorsWhenWithReactiveDefaultsThenAreSorted() {
AuthorizationAdvisorProxyFactory proxyFactory = AuthorizationAdvisorProxyFactory.withReactiveDefaults();
AnnotationAwareOrderComparator comparator = AnnotationAwareOrderComparator.INSTANCE;
AuthorizationAdvisor previous = null;
for (AuthorizationAdvisor advisor : proxyFactory) {
boolean ordered = previous == null || comparator.compare(previous, advisor) < 0;
assertThat(ordered).isTrue();
previous = advisor;
}
}
private Authentication authenticated(String user, String... authorities) {
return TestAuthentication.authenticated(TestAuthentication.withUsername(user).authorities(authorities).build());
}
private <T> T proxy(AuthorizationProxyFactory factory, Object target) {
return (T) factory.proxy(target);
}
static | AuthorizationAdvisorProxyFactoryTests |
java | apache__rocketmq | common/src/main/java/org/apache/rocketmq/common/ControllerConfig.java | {
"start": 956,
"end": 11146
} | class ____ {
private String rocketmqHome = MixAll.ROCKETMQ_HOME_DIR;
private String configStorePath = System.getProperty("user.home") + File.separator + "controller" + File.separator + "controller.properties";
public static final String DLEDGER_CONTROLLER = "DLedger";
public static final String JRAFT_CONTROLLER = "jRaft";
private JraftConfig jraftConfig = new JraftConfig();
private String controllerType = DLEDGER_CONTROLLER;
/**
* Interval of periodic scanning for non-active broker;
* Unit: millisecond
*/
private long scanNotActiveBrokerInterval = 5 * 1000;
/**
* Indicates the nums of thread to handle broker or operation requests, like REGISTER_BROKER.
*/
private int controllerThreadPoolNums = 16;
/**
* Indicates the capacity of queue to hold client requests.
*/
private int controllerRequestThreadPoolQueueCapacity = 50000;
private String controllerDLegerGroup;
private String controllerDLegerPeers;
private String controllerDLegerSelfId;
private int mappedFileSize = 1024 * 1024 * 1024;
private String controllerStorePath = "";
/**
* Max retry count for electing master when failed because of network or system error.
*/
private int electMasterMaxRetryCount = 3;
/**
* Whether the controller can elect a master which is not in the syncStateSet.
*/
private boolean enableElectUncleanMaster = false;
/**
* Whether process read event
*/
private boolean isProcessReadEvent = false;
/**
* Whether notify broker when its role changed
*/
private volatile boolean notifyBrokerRoleChanged = true;
/**
* Interval of periodic scanning for non-active master in each broker-set;
* Unit: millisecond
*/
private long scanInactiveMasterInterval = 5 * 1000;
private MetricsExporterType metricsExporterType = MetricsExporterType.DISABLE;
private String metricsGrpcExporterTarget = "";
private String metricsGrpcExporterHeader = "";
private long metricGrpcExporterTimeOutInMills = 3 * 1000;
private long metricGrpcExporterIntervalInMills = 60 * 1000;
private long metricLoggingExporterIntervalInMills = 10 * 1000;
private int metricsPromExporterPort = 5557;
private String metricsPromExporterHost = "";
// Label pairs in CSV. Each label follows pattern of Key:Value. eg: instance_id:xxx,uid:xxx
private String metricsLabel = "";
private boolean metricsInDelta = false;
/**
* Config in this black list will be not allowed to update by command.
* Try to update this config black list by restart process.
* Try to update configures in black list by restart process.
*/
private String configBlackList = "configBlackList;configStorePath";
public String getConfigBlackList() {
return configBlackList;
}
public void setConfigBlackList(String configBlackList) {
this.configBlackList = configBlackList;
}
public String getRocketmqHome() {
return rocketmqHome;
}
public void setRocketmqHome(String rocketmqHome) {
this.rocketmqHome = rocketmqHome;
}
public String getConfigStorePath() {
return configStorePath;
}
public void setConfigStorePath(String configStorePath) {
this.configStorePath = configStorePath;
}
public long getScanNotActiveBrokerInterval() {
return scanNotActiveBrokerInterval;
}
public void setScanNotActiveBrokerInterval(long scanNotActiveBrokerInterval) {
this.scanNotActiveBrokerInterval = scanNotActiveBrokerInterval;
}
public int getControllerThreadPoolNums() {
return controllerThreadPoolNums;
}
public void setControllerThreadPoolNums(int controllerThreadPoolNums) {
this.controllerThreadPoolNums = controllerThreadPoolNums;
}
public int getControllerRequestThreadPoolQueueCapacity() {
return controllerRequestThreadPoolQueueCapacity;
}
public void setControllerRequestThreadPoolQueueCapacity(int controllerRequestThreadPoolQueueCapacity) {
this.controllerRequestThreadPoolQueueCapacity = controllerRequestThreadPoolQueueCapacity;
}
public String getControllerDLegerGroup() {
return controllerDLegerGroup;
}
public void setControllerDLegerGroup(String controllerDLegerGroup) {
this.controllerDLegerGroup = controllerDLegerGroup;
}
public String getControllerDLegerPeers() {
return controllerDLegerPeers;
}
public void setControllerDLegerPeers(String controllerDLegerPeers) {
this.controllerDLegerPeers = controllerDLegerPeers;
}
public String getControllerDLegerSelfId() {
return controllerDLegerSelfId;
}
public void setControllerDLegerSelfId(String controllerDLegerSelfId) {
this.controllerDLegerSelfId = controllerDLegerSelfId;
}
public int getMappedFileSize() {
return mappedFileSize;
}
public void setMappedFileSize(int mappedFileSize) {
this.mappedFileSize = mappedFileSize;
}
public String getControllerStorePath() {
if (controllerStorePath.isEmpty()) {
controllerStorePath = System.getProperty("user.home") + File.separator + controllerType + "Controller";
}
return controllerStorePath;
}
public void setControllerStorePath(String controllerStorePath) {
this.controllerStorePath = controllerStorePath;
}
public boolean isEnableElectUncleanMaster() {
return enableElectUncleanMaster;
}
public void setEnableElectUncleanMaster(boolean enableElectUncleanMaster) {
this.enableElectUncleanMaster = enableElectUncleanMaster;
}
public boolean isProcessReadEvent() {
return isProcessReadEvent;
}
public void setProcessReadEvent(boolean processReadEvent) {
isProcessReadEvent = processReadEvent;
}
public boolean isNotifyBrokerRoleChanged() {
return notifyBrokerRoleChanged;
}
public void setNotifyBrokerRoleChanged(boolean notifyBrokerRoleChanged) {
this.notifyBrokerRoleChanged = notifyBrokerRoleChanged;
}
public long getScanInactiveMasterInterval() {
return scanInactiveMasterInterval;
}
public void setScanInactiveMasterInterval(long scanInactiveMasterInterval) {
this.scanInactiveMasterInterval = scanInactiveMasterInterval;
}
public String getDLedgerAddress() {
return Arrays.stream(this.controllerDLegerPeers.split(";"))
.filter(x -> this.controllerDLegerSelfId.equals(x.split("-")[0]))
.map(x -> x.split("-")[1]).findFirst().get();
}
public MetricsExporterType getMetricsExporterType() {
return metricsExporterType;
}
public void setMetricsExporterType(MetricsExporterType metricsExporterType) {
this.metricsExporterType = metricsExporterType;
}
public void setMetricsExporterType(int metricsExporterType) {
this.metricsExporterType = MetricsExporterType.valueOf(metricsExporterType);
}
public void setMetricsExporterType(String metricsExporterType) {
this.metricsExporterType = MetricsExporterType.valueOf(metricsExporterType);
}
public String getMetricsGrpcExporterTarget() {
return metricsGrpcExporterTarget;
}
public void setMetricsGrpcExporterTarget(String metricsGrpcExporterTarget) {
this.metricsGrpcExporterTarget = metricsGrpcExporterTarget;
}
public String getMetricsGrpcExporterHeader() {
return metricsGrpcExporterHeader;
}
public void setMetricsGrpcExporterHeader(String metricsGrpcExporterHeader) {
this.metricsGrpcExporterHeader = metricsGrpcExporterHeader;
}
public long getMetricGrpcExporterTimeOutInMills() {
return metricGrpcExporterTimeOutInMills;
}
public void setMetricGrpcExporterTimeOutInMills(long metricGrpcExporterTimeOutInMills) {
this.metricGrpcExporterTimeOutInMills = metricGrpcExporterTimeOutInMills;
}
public long getMetricGrpcExporterIntervalInMills() {
return metricGrpcExporterIntervalInMills;
}
public void setMetricGrpcExporterIntervalInMills(long metricGrpcExporterIntervalInMills) {
this.metricGrpcExporterIntervalInMills = metricGrpcExporterIntervalInMills;
}
public long getMetricLoggingExporterIntervalInMills() {
return metricLoggingExporterIntervalInMills;
}
public void setMetricLoggingExporterIntervalInMills(long metricLoggingExporterIntervalInMills) {
this.metricLoggingExporterIntervalInMills = metricLoggingExporterIntervalInMills;
}
public int getMetricsPromExporterPort() {
return metricsPromExporterPort;
}
public void setMetricsPromExporterPort(int metricsPromExporterPort) {
this.metricsPromExporterPort = metricsPromExporterPort;
}
public String getMetricsPromExporterHost() {
return metricsPromExporterHost;
}
public void setMetricsPromExporterHost(String metricsPromExporterHost) {
this.metricsPromExporterHost = metricsPromExporterHost;
}
public String getMetricsLabel() {
return metricsLabel;
}
public void setMetricsLabel(String metricsLabel) {
this.metricsLabel = metricsLabel;
}
public boolean isMetricsInDelta() {
return metricsInDelta;
}
public void setMetricsInDelta(boolean metricsInDelta) {
this.metricsInDelta = metricsInDelta;
}
public String getControllerType() {
return controllerType;
}
public void setControllerType(String controllerType) {
this.controllerType = controllerType;
}
public JraftConfig getJraftConfig() {
return jraftConfig;
}
public void setJraftConfig(JraftConfig jraftConfig) {
this.jraftConfig = jraftConfig;
}
public int getElectMasterMaxRetryCount() {
return this.electMasterMaxRetryCount;
}
public void setElectMasterMaxRetryCount(int electMasterMaxRetryCount) {
this.electMasterMaxRetryCount = electMasterMaxRetryCount;
}
}
| ControllerConfig |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/conditional/propertyname/sourcepropertyname/ConditionalMethodWithSourcePropertyNameInContextMapper.java | {
"start": 1588,
"end": 2169
} | class ____ {
Set<String> visited = new LinkedHashSet<>();
@Condition
public boolean isNotBlank(String value, @SourcePropertyName String propName) {
visited.add( propName );
return value != null && !value.trim().isEmpty();
}
}
@Mapping(target = "country", source = "originCountry")
@Mapping(target = "addresses", source = "originAddresses")
Employee map(EmployeeDto employee, @Context PresenceUtilsAllProps utils);
Address map(AddressDto addressDto, @Context PresenceUtilsAllProps utils);
| PresenceUtils |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnicodeEscapeTest.java | {
"start": 3240,
"end": 3499
} | class ____ {\rprivate static final String FOO = null;",
"}")
.doTest(TEXT_MATCH);
}
@Test
public void nonPrintableAsciiCharacter_noFinding() {
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/type/FloatTypeHandlerTest.java | {
"start": 922,
"end": 2798
} | class ____ extends BaseTypeHandlerTest {
private static final TypeHandler<Float> TYPE_HANDLER = new FloatTypeHandler();
@Override
@Test
public void shouldSetParameter() throws Exception {
TYPE_HANDLER.setParameter(ps, 1, 100f, null);
verify(ps).setFloat(1, 100f);
}
@Override
@Test
public void shouldGetResultFromResultSetByName() throws Exception {
when(rs.getFloat("column")).thenReturn(100f, 0f);
assertEquals(Float.valueOf(100f), TYPE_HANDLER.getResult(rs, "column"));
assertEquals(Float.valueOf(0f), TYPE_HANDLER.getResult(rs, "column"));
}
@Override
@Test
public void shouldGetResultNullFromResultSetByName() throws Exception {
when(rs.getFloat("column")).thenReturn(0f);
when(rs.wasNull()).thenReturn(true);
assertNull(TYPE_HANDLER.getResult(rs, "column"));
}
@Override
@Test
public void shouldGetResultFromResultSetByPosition() throws Exception {
when(rs.getFloat(1)).thenReturn(100f, 0f);
assertEquals(Float.valueOf(100f), TYPE_HANDLER.getResult(rs, 1));
assertEquals(Float.valueOf(0f), TYPE_HANDLER.getResult(rs, 1));
}
@Override
@Test
public void shouldGetResultNullFromResultSetByPosition() throws Exception {
when(rs.getFloat(1)).thenReturn(0f);
when(rs.wasNull()).thenReturn(true);
assertNull(TYPE_HANDLER.getResult(rs, 1));
}
@Override
@Test
public void shouldGetResultFromCallableStatement() throws Exception {
when(cs.getFloat(1)).thenReturn(100f, 0f);
assertEquals(Float.valueOf(100f), TYPE_HANDLER.getResult(cs, 1));
assertEquals(Float.valueOf(0f), TYPE_HANDLER.getResult(cs, 1));
}
@Override
@Test
public void shouldGetResultNullFromCallableStatement() throws Exception {
when(cs.getFloat(1)).thenReturn(0f);
when(cs.wasNull()).thenReturn(true);
assertNull(TYPE_HANDLER.getResult(cs, 1));
}
}
| FloatTypeHandlerTest |
java | greenrobot__greendao | tests/DaoTestBase/src/main/java/org/greenrobot/greendao/daotest/DateEntityDao.java | {
"start": 855,
"end": 5295
} | class ____ {
public final static Property Id = new Property(0, Long.class, "id", true, "_id");
public final static Property Date = new Property(1, java.util.Date.class, "date", false, "DATE");
public final static Property DateNotNull = new Property(2, java.util.Date.class, "dateNotNull", false, "DATE_NOT_NULL");
}
private Query<DateEntity> toManyEntity_DateEntityListQuery;
public DateEntityDao(DaoConfig config) {
super(config);
}
public DateEntityDao(DaoConfig config, DaoSession daoSession) {
super(config, daoSession);
}
/** Creates the underlying database table. */
public static void createTable(Database db, boolean ifNotExists) {
String constraint = ifNotExists? "IF NOT EXISTS ": "";
db.execSQL("CREATE TABLE " + constraint + "\"DATE_ENTITY\" (" + //
"\"_id\" INTEGER PRIMARY KEY ," + // 0: id
"\"DATE\" INTEGER," + // 1: date
"\"DATE_NOT_NULL\" INTEGER NOT NULL );"); // 2: dateNotNull
}
/** Drops the underlying database table. */
public static void dropTable(Database db, boolean ifExists) {
String sql = "DROP TABLE " + (ifExists ? "IF EXISTS " : "") + "\"DATE_ENTITY\"";
db.execSQL(sql);
}
@Override
protected final void bindValues(DatabaseStatement stmt, DateEntity entity) {
stmt.clearBindings();
Long id = entity.getId();
if (id != null) {
stmt.bindLong(1, id);
}
java.util.Date date = entity.getDate();
if (date != null) {
stmt.bindLong(2, date.getTime());
}
stmt.bindLong(3, entity.getDateNotNull().getTime());
}
@Override
protected final void bindValues(SQLiteStatement stmt, DateEntity entity) {
stmt.clearBindings();
Long id = entity.getId();
if (id != null) {
stmt.bindLong(1, id);
}
java.util.Date date = entity.getDate();
if (date != null) {
stmt.bindLong(2, date.getTime());
}
stmt.bindLong(3, entity.getDateNotNull().getTime());
}
@Override
public Long readKey(Cursor cursor, int offset) {
return cursor.isNull(offset + 0) ? null : cursor.getLong(offset + 0);
}
@Override
public DateEntity readEntity(Cursor cursor, int offset) {
DateEntity entity = new DateEntity( //
cursor.isNull(offset + 0) ? null : cursor.getLong(offset + 0), // id
cursor.isNull(offset + 1) ? null : new java.util.Date(cursor.getLong(offset + 1)), // date
new java.util.Date(cursor.getLong(offset + 2)) // dateNotNull
);
return entity;
}
@Override
public void readEntity(Cursor cursor, DateEntity entity, int offset) {
entity.setId(cursor.isNull(offset + 0) ? null : cursor.getLong(offset + 0));
entity.setDate(cursor.isNull(offset + 1) ? null : new java.util.Date(cursor.getLong(offset + 1)));
entity.setDateNotNull(new java.util.Date(cursor.getLong(offset + 2)));
}
@Override
protected final Long updateKeyAfterInsert(DateEntity entity, long rowId) {
entity.setId(rowId);
return rowId;
}
@Override
public Long getKey(DateEntity entity) {
if(entity != null) {
return entity.getId();
} else {
return null;
}
}
@Override
public boolean hasKey(DateEntity entity) {
return entity.getId() != null;
}
@Override
protected final boolean isEntityUpdateable() {
return true;
}
/** Internal query to resolve the "dateEntityList" to-many relationship of ToManyEntity. */
public List<DateEntity> _queryToManyEntity_DateEntityList(Long idToMany) {
synchronized (this) {
if (toManyEntity_DateEntityListQuery == null) {
QueryBuilder<DateEntity> queryBuilder = queryBuilder();
queryBuilder.join(JoinManyToDateEntity.class, JoinManyToDateEntityDao.Properties.IdDate)
.where(JoinManyToDateEntityDao.Properties.IdToMany.eq(idToMany));
toManyEntity_DateEntityListQuery = queryBuilder.build();
}
}
Query<DateEntity> query = toManyEntity_DateEntityListQuery.forCurrentThread();
query.setParameter(0, idToMany);
return query.list();
}
}
| Properties |
java | apache__camel | components/camel-test/camel-test-main-junit5/src/test/java/org/apache/camel/test/main/junit5/annotation/ConfigureTest.java | {
"start": 1793,
"end": 2169
} | class
____.addConfiguration(MyConfiguration.class);
}
@Test
void shouldConfigureTheCamelContext() throws Exception {
mock.expectedBodiesReceived("Hello Will!");
String result = template.requestBody((Object) null, String.class);
mock.assertIsSatisfied();
assertEquals("Hello Will!", result);
}
@Nested
| configuration |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractBulkDeleteTest.java | {
"start": 1956,
"end": 12161
} | class ____ extends AbstractFSContractTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractContractBulkDeleteTest.class);
/**
* Page size for bulk delete. This is calculated based
* on the store implementation.
*/
protected int pageSize;
/**
* Base path for the bulk delete tests.
* All the paths to be deleted should be under this base path.
*/
protected Path basePath;
/**
* Test file system.
*/
protected FileSystem fs;
/**
* Reflection support.
*/
private DynamicWrappedIO dynamicWrappedIO;
@BeforeEach
@Override
public void setup() throws Exception {
super.setup();
fs = getFileSystem();
basePath = path(getClass().getName());
dynamicWrappedIO = new DynamicWrappedIO();
pageSize = dynamicWrappedIO.bulkDelete_pageSize(fs, basePath);
fs.mkdirs(basePath);
}
public Path getBasePath() {
return basePath;
}
protected int getExpectedPageSize() {
return 1;
}
/**
* Validate the page size for bulk delete operation. Different stores can have different
* implementations for bulk delete operation thus different page size.
*/
@Test
public void validatePageSize() throws Exception {
Assertions.assertThat(pageSize)
.describedAs("Page size should be 1 by default for all stores")
.isEqualTo(getExpectedPageSize());
}
@Test
public void testPathsSizeEqualsPageSizePrecondition() throws Exception {
List<Path> listOfPaths = createListOfPaths(pageSize, basePath);
// Bulk delete call should pass with no exception.
bulkDelete_delete(getFileSystem(), basePath, listOfPaths);
}
@Test
public void testPathsSizeGreaterThanPageSizePrecondition() throws Exception {
List<Path> listOfPaths = createListOfPaths(pageSize + 1, basePath);
intercept(IllegalArgumentException.class, () ->
dynamicWrappedIO.bulkDelete_delete(getFileSystem(), basePath, listOfPaths));
}
@Test
public void testPathsSizeLessThanPageSizePrecondition() throws Exception {
List<Path> listOfPaths = createListOfPaths(pageSize - 1, basePath);
// Bulk delete call should pass with no exception.
dynamicWrappedIO.bulkDelete_delete(getFileSystem(), basePath, listOfPaths);
}
@Test
public void testBulkDeleteSuccessful() throws Exception {
runBulkDelete(false);
}
@Test
public void testBulkDeleteSuccessfulUsingDirectFS() throws Exception {
runBulkDelete(true);
}
private void runBulkDelete(boolean useDirectFS) throws IOException {
List<Path> listOfPaths = createListOfPaths(pageSize, basePath);
for (Path path : listOfPaths) {
touch(fs, path);
}
FileStatus[] fileStatuses = fs.listStatus(basePath);
Assertions.assertThat(fileStatuses)
.describedAs("File count after create")
.hasSize(pageSize);
if (useDirectFS) {
assertSuccessfulBulkDelete(
fs.createBulkDelete(basePath).bulkDelete(listOfPaths));
} else {
// Using WrappedIO to call bulk delete.
assertSuccessfulBulkDelete(
bulkDelete_delete(getFileSystem(), basePath, listOfPaths));
}
FileStatus[] fileStatusesAfterDelete = fs.listStatus(basePath);
Assertions.assertThat(fileStatusesAfterDelete)
.describedAs("File statuses should be empty after delete")
.isEmpty();
}
@Test
public void validatePathCapabilityDeclared() throws Exception {
Assertions.assertThat(fs.hasPathCapability(basePath, CommonPathCapabilities.BULK_DELETE))
.describedAs("Path capability BULK_DELETE should be declared")
.isTrue();
}
/**
* This test should fail as path is not under the base path.
*/
@Test
public void testDeletePathsNotUnderBase() throws Exception {
List<Path> paths = new ArrayList<>();
Path pathNotUnderBase = path("not-under-base");
paths.add(pathNotUnderBase);
intercept(IllegalArgumentException.class,
() -> bulkDelete_delete(getFileSystem(), basePath, paths));
}
/**
* We should be able to delete the base path itself
* using bulk delete operation.
*/
@Test
public void testDeletePathSameAsBasePath() throws Exception {
assertSuccessfulBulkDelete(bulkDelete_delete(getFileSystem(),
basePath,
Arrays.asList(basePath)));
}
/**
* This test should fail as path is not absolute.
*/
@Test
public void testDeletePathsNotAbsolute() throws Exception {
List<Path> paths = new ArrayList<>();
Path pathNotAbsolute = new Path("not-absolute");
paths.add(pathNotAbsolute);
intercept(IllegalArgumentException.class,
() -> bulkDelete_delete(getFileSystem(), basePath, paths));
}
@Test
public void testDeletePathsNotExists() throws Exception {
List<Path> paths = new ArrayList<>();
Path pathNotExists = new Path(basePath, "not-exists");
paths.add(pathNotExists);
// bulk delete call doesn't verify if a path exist or not before deleting.
assertSuccessfulBulkDelete(bulkDelete_delete(getFileSystem(), basePath, paths));
}
@Test
public void testDeletePathsDirectory() throws Exception {
List<Path> paths = new ArrayList<>();
Path dirPath = new Path(basePath, "dir");
paths.add(dirPath);
Path filePath = new Path(dirPath, "file");
paths.add(filePath);
pageSizePreconditionForTest(paths.size());
fs.mkdirs(dirPath);
touch(fs, filePath);
// Outcome is undefined. But call shouldn't fail.
assertSuccessfulBulkDelete(bulkDelete_delete(getFileSystem(), basePath, paths));
}
@Test
public void testBulkDeleteParentDirectoryWithDirectories() throws Exception {
List<Path> paths = new ArrayList<>();
Path dirPath = new Path(basePath, "dir");
fs.mkdirs(dirPath);
Path subDir = new Path(dirPath, "subdir");
fs.mkdirs(subDir);
// adding parent directory to the list of paths.
paths.add(dirPath);
List<Map.Entry<Path, String>> entries = bulkDelete_delete(getFileSystem(), basePath, paths);
Assertions.assertThat(entries)
.describedAs("Parent non empty directory should not be deleted")
.hasSize(1);
// During the bulk delete operation, the non-empty directories are not deleted in default implementation.
assertIsDirectory(dirPath);
}
@Test
public void testBulkDeleteParentDirectoryWithFiles() throws Exception {
List<Path> paths = new ArrayList<>();
Path dirPath = new Path(basePath, "dir");
fs.mkdirs(dirPath);
Path file = new Path(dirPath, "file");
touch(fs, file);
// adding parent directory to the list of paths.
paths.add(dirPath);
List<Map.Entry<Path, String>> entries = bulkDelete_delete(getFileSystem(), basePath, paths);
Assertions.assertThat(entries)
.describedAs("Parent non empty directory should not be deleted")
.hasSize(1);
// During the bulk delete operation, the non-empty directories are not deleted in default implementation.
assertIsDirectory(dirPath);
}
@Test
public void testDeleteEmptyDirectory() throws Exception {
List<Path> paths = new ArrayList<>();
Path emptyDirPath = new Path(basePath, "empty-dir");
fs.mkdirs(emptyDirPath);
paths.add(emptyDirPath);
// Should pass as empty directory.
assertSuccessfulBulkDelete(bulkDelete_delete(getFileSystem(), basePath, paths));
}
@Test
public void testDeleteEmptyList() throws Exception {
List<Path> paths = new ArrayList<>();
// Empty list should pass.
assertSuccessfulBulkDelete(bulkDelete_delete(getFileSystem(), basePath, paths));
}
@Test
public void testDeleteSamePathsMoreThanOnce() throws Exception {
List<Path> paths = new ArrayList<>();
Path path = new Path(basePath, "file");
paths.add(path);
paths.add(path);
Path another = new Path(basePath, "another-file");
paths.add(another);
pageSizePreconditionForTest(paths.size());
touch(fs, path);
touch(fs, another);
assertSuccessfulBulkDelete(bulkDelete_delete(getFileSystem(), basePath, paths));
}
/**
* Skip test if paths size is greater than page size.
*/
protected void pageSizePreconditionForTest(int size) {
if (size > pageSize) {
skip("Test requires paths size less than or equal to page size: "
+ pageSize
+ "; actual size is " + size);
}
}
/**
* This test validates that files to be deleted don't have
* to be direct children of the base path.
*/
@Test
public void testDeepDirectoryFilesDelete() throws Exception {
List<Path> paths = new ArrayList<>();
Path dir1 = new Path(basePath, "dir1");
Path dir2 = new Path(dir1, "dir2");
Path dir3 = new Path(dir2, "dir3");
fs.mkdirs(dir3);
Path file1 = new Path(dir3, "file1");
touch(fs, file1);
paths.add(file1);
assertSuccessfulBulkDelete(bulkDelete_delete(getFileSystem(), basePath, paths));
}
@Test
public void testChildPaths() throws Exception {
List<Path> paths = new ArrayList<>();
Path dirPath = new Path(basePath, "dir");
fs.mkdirs(dirPath);
paths.add(dirPath);
Path filePath = new Path(dirPath, "file");
touch(fs, filePath);
paths.add(filePath);
pageSizePreconditionForTest(paths.size());
// Should pass as both paths are under the base path.
assertSuccessfulBulkDelete(bulkDelete_delete(getFileSystem(), basePath, paths));
}
/**
* Assert on returned entries after bulk delete operation.
* Entries should be empty after successful delete.
*/
public static void assertSuccessfulBulkDelete(List<Map.Entry<Path, String>> entries) {
Assertions.assertThat(entries)
.describedAs("Bulk delete failed, " +
"return entries should be empty after successful delete")
.isEmpty();
}
/**
* Create a list of paths with the given count
* under the given base path.
*/
private List<Path> createListOfPaths(int count, Path basePath) {
List<Path> paths = new ArrayList<>();
for (int i = 0; i < count; i++) {
Path path = new Path(basePath, "file-" + i);
paths.add(path);
}
return paths;
}
}
| AbstractContractBulkDeleteTest |
java | netty__netty | microbench/src/main/java/io/netty/handler/codec/AdvancedCodecOutputListBenchmark.java | {
"start": 986,
"end": 1991
} | class ____ extends AbstractMicrobenchmark {
private static final Object ELEMENT = new Object();
@Param({ "1", "4" })
public int elements;
@Benchmark
public boolean codecOutListAllocRecycle() {
return benchmark(elements, CodecOutputList.newInstance(), CodecOutputList.newInstance(),
CodecOutputList.newInstance(), CodecOutputList.newInstance());
}
private static boolean benchmark(int elements, CodecOutputList list1, CodecOutputList list2,
CodecOutputList list3, CodecOutputList list4) {
return (benchmark(elements, list1) == benchmark(elements, list2)) ==
(benchmark(elements, list3) == benchmark(elements, list4));
}
private static boolean benchmark(int elements, CodecOutputList list) {
for (int i = 0; i < elements; ++i) {
list.add(ELEMENT);
}
list.recycle();
return list.insertSinceRecycled();
}
}
| AdvancedCodecOutputListBenchmark |
java | hibernate__hibernate-orm | hibernate-spatial/src/main/java/org/hibernate/spatial/dialect/oracle/SDOGeometryValueBinder.java | {
"start": 819,
"end": 2688
} | class ____<J> implements ValueBinder<J> {
private static final String SQL_TYPE_NAME = "MDSYS.SDO_GEOMETRY";
private final OracleJDBCTypeFactory typeFactory;
private final JavaType<J> javaType;
public SDOGeometryValueBinder(
JavaType<J> javaType,
JdbcType jdbcType,
OracleJDBCTypeFactory typeFactory) {
this.javaType = javaType;
this.typeFactory = typeFactory;
}
@Override
public void bind(PreparedStatement st, J value, int index, WrapperOptions options) throws SQLException {
if ( value == null ) {
st.setNull( index, Types.STRUCT, SQL_TYPE_NAME );
}
else {
final Geometry geometry = javaType.unwrap( value, Geometry.class, options );
final Object dbGeom = toNative( geometry, st.getConnection() );
st.setObject( index, dbGeom );
}
}
@Override
public void bind(
CallableStatement st, J value, String name, WrapperOptions options) throws SQLException {
if ( value == null ) {
st.setNull( name, Types.STRUCT, SQL_TYPE_NAME );
}
else {
final Geometry geometry = javaType.unwrap( value, Geometry.class, options );
final Object dbGeom = toNative( geometry, st.getConnection() );
st.setObject( name, dbGeom );
}
}
public Object store(SDOGeometry geom, Connection conn) throws SQLException {
return typeFactory.createStruct( geom, conn );
}
private Object toNative(Geometry geom, Connection connection) {
try {
final SDOGeometry sdoGeom = Encoders.encode( geom );
return store( sdoGeom, connection );
}
catch (SQLException e) {
throw new HibernateException( "Problem during conversion from JTS to SDOGeometry", e );
}
catch (IllegalArgumentException e) {
//we get here if the type of geometry is unsupported by geolatte encoders
throw new HibernateException( e.getMessage() );
}
catch (Exception e) {
throw new HibernateException( e );
}
}
}
| SDOGeometryValueBinder |
java | apache__camel | components/camel-jms/src/main/java/org/apache/camel/component/jms/reply/CorrelationTimeoutMap.java | {
"start": 1422,
"end": 2960
} | class ____ extends DefaultTimeoutMap<String, ReplyHandler> {
private final BiConsumer<ReplyHandler, String> evictionTask;
CorrelationTimeoutMap(ScheduledExecutorService executor, long requestMapPollTimeMillis, ExecutorService executorService) {
super(executor, requestMapPollTimeMillis);
// Support synchronous or asynchronous handling of evictions
evictionTask = executorService == null
? ReplyHandler::onTimeout
: (handler, key) -> executorService.submit(() -> handler.onTimeout(key));
addListener(this::listener);
}
private static long encode(long timeoutMillis) {
return timeoutMillis > 0 ? timeoutMillis : Integer.MAX_VALUE; // TODO why not Long.MAX_VALUE!
}
private void listener(Listener.Type type, String key, ReplyHandler handler) {
if (type == Put) {
log.trace("Added correlationID: {}", key);
} else if (type == Remove) {
log.trace("Removed correlationID: {}", key);
} else if (type == Evict) {
evictionTask.accept(handler, key);
log.trace("Evicted correlationID: {}", key);
}
}
@Override
public ReplyHandler put(String key, ReplyHandler value, long timeoutMillis) {
return super.put(key, value, encode(timeoutMillis));
}
@Override
public ReplyHandler putIfAbsent(String key, ReplyHandler value, long timeoutMillis) {
return super.putIfAbsent(key, value, encode(timeoutMillis));
}
}
| CorrelationTimeoutMap |
java | spring-projects__spring-boot | module/spring-boot-artemis/src/dockerTest/java/org/springframework/boot/artemis/testcontainers/ArtemisContainerConnectionDetailsFactoryIntegrationTests.java | {
"start": 1941,
"end": 2611
} | class ____ {
@Container
@ServiceConnection
static final ArtemisContainer artemis = TestImage.container(ArtemisContainer.class);
@Autowired
private JmsClient jmsClient;
@Autowired
private TestListener listener;
@Test
void connectionCanBeMadeToActiveMQContainer() {
this.jmsClient.destination("sample.queue").send("message");
Awaitility.waitAtMost(Duration.ofMinutes(1))
.untilAsserted(() -> assertThat(this.listener.messages).containsExactly("message"));
}
@Configuration(proxyBeanMethods = false)
@ImportAutoConfiguration({ ArtemisAutoConfiguration.class, JmsAutoConfiguration.class })
static | ArtemisContainerConnectionDetailsFactoryIntegrationTests |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/aroundconstruct/AroundConstructAppliedViaConstructorTest.java | {
"start": 2562,
"end": 2744
} | class ____ {
@MyTransactional
public SimpleBean_NoArgsConstructor() {
}
}
@Singleton
@MyOtherTransactional
static | SimpleBean_NoArgsConstructor |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/mom/kafka/KafkaManager.java | {
"start": 8788,
"end": 9150
} | class ____ implements ManagerFactory<KafkaManager, FactoryData> {
@Override
public KafkaManager createManager(final String name, final FactoryData data) {
return new KafkaManager(
data.loggerContext, name, data.topic, data.syncSend, data.sendTimestamp, data.properties, data.key);
}
}
}
| KafkaManagerFactory |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/TlsContextManager.java | {
"start": 845,
"end": 2275
} | interface ____ {
/** Creates a SslContextProvider. Used for retrieving a server-side SslContext. */
SslContextProvider findOrCreateServerSslContextProvider(
DownstreamTlsContext downstreamTlsContext);
/** Creates a SslContextProvider. Used for retrieving a client-side SslContext. */
SslContextProvider findOrCreateClientSslContextProvider(
UpstreamTlsContext upstreamTlsContext);
/**
* Releases an instance of the given client-side {@link SslContextProvider}.
*
* <p>The instance must have been obtained from {@link #findOrCreateClientSslContextProvider}.
* Otherwise will throw IllegalArgumentException.
*
* <p>Caller must not release a reference more than once. It's advised that you clear the
* reference to the instance with the null returned by this method.
*/
SslContextProvider releaseClientSslContextProvider(SslContextProvider sslContextProvider);
/**
* Releases an instance of the given server-side {@link SslContextProvider}.
*
* <p>The instance must have been obtained from {@link #findOrCreateServerSslContextProvider}.
* Otherwise will throw IllegalArgumentException.
*
* <p>Caller must not release a reference more than once. It's advised that you clear the
* reference to the instance with the null returned by this method.
*/
SslContextProvider releaseServerSslContextProvider(SslContextProvider sslContextProvider);
}
| TlsContextManager |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/merge/MergeUnsavedEntitiesTest.java | {
"start": 1358,
"end": 3375
} | class ____ {
public static final String CHILD_NAME = "first child";
@Test
public void testMerge(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Parent parent = new Parent( 1l, 2l );
parent = session.merge( parent );
Child child = new Child( 2l, CHILD_NAME );
child = session.merge( child );
parent.addChild( child );
parent.getId();
}
);
scope.inTransaction(
session -> {
Parent parent = session.find( Parent.class, 1l );
assertThat( parent.getChildren().size() ).isEqualTo( 1 );
Child child = parent.getChildren().get( 0 );
assertThat( child.getName() ).isEqualTo( CHILD_NAME );
}
);
scope.inTransaction(
session -> {
Parent parent = session.find( Parent.class, 1l );
session.merge( parent );
}
);
scope.inTransaction(
session -> {
Parent parent = session.find( Parent.class, 1l );
assertThat( parent.getChildren().size() ).isEqualTo( 1 );
Child child = parent.getChildren().get( 0 );
assertThat( child.getName() ).isEqualTo( CHILD_NAME );
}
);
}
@Test
public void testMergeParentWithoutChildren(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Parent parent = new Parent( 1l, 2l );
session.merge( parent );
}
);
scope.inTransaction(
session -> {
Parent parent = session.find( Parent.class, 1l );
assertThat( parent.getChildren()).isEmpty();
}
);
}
@Test
@Jira("HHH-18177")
public void testMergeTransientInstanceWithGeneratedId(SessionFactoryScope scope) {
Book merged = scope.fromTransaction(
session -> {
Book book = new Book( "9788806257231" );
return session.merge( book );
}
);
scope.inTransaction(
session -> {
Book book = session.get( Book.class, merged.getId() );
assertThat( book ).isNotNull();
assertThat( book.getBookNotes() ).isEmpty();
}
);
}
@Entity(name = "Parent")
@Table(name = "parent")
public static | MergeUnsavedEntitiesTest |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/util/function/Tuple7Test.java | {
"start": 929,
"end": 5655
} | class ____ {
private Tuple7<Integer, Integer, Integer, Integer, Integer, Integer, Integer> full =
new Tuple7<>(1, 2, 3, 4, 5, 6, 7);
@Test
public void nullT7Rejected() {
assertThatExceptionOfType(NullPointerException.class)
.isThrownBy(() -> new Tuple7<>(1, 2, 3, 4, 5, 6, null))
.withMessage("t7");
}
@Test
public void mapT1() {
Tuple7<String, Integer, Integer, Integer, Integer, Integer, Integer> base =
Tuples.of("Foo", 200, 300, 400, 500, 600, 700);
Tuple2<?,?> mapped = base.mapT1(String::length);
assertThat(mapped).isNotSameAs(base)
.hasSize(7)
.containsExactly(3, base.getT2(), base.getT3(), base.getT4(),
base.getT5(), base.getT6(), base.getT7());
}
@Test
public void mapT2() {
Tuple7<Integer, String, Integer, Integer, Integer, Integer, Integer> base =
Tuples.of(100, "Foo", 300, 400, 500, 600, 700);
Tuple2<?,?> mapped = base.mapT2(String::length);
assertThat(mapped).isNotSameAs(base)
.hasSize(7)
.containsExactly(base.getT1(), 3, base.getT3(), base.getT4(),
base.getT5(), base.getT6(), base.getT7());
}
@Test
public void mapT3() {
Tuple7<Integer, Integer, String, Integer, Integer, Integer, Integer> base =
Tuples.of(100, 200, "Foo", 400, 500, 600, 700);
Tuple2<?,?> mapped = base.mapT3(String::length);
assertThat(mapped).isNotSameAs(base)
.hasSize(7)
.containsExactly(base.getT1(), base.getT2(), 3, base.getT4(),
base.getT5(), base.getT6(), base.getT7());
}
@Test
public void mapT4() {
Tuple7<Integer, Integer, Integer, String, Integer, Integer, Integer> base =
Tuples.of(100, 200, 300, "Foo", 500, 600, 700);
Tuple2<?,?> mapped = base.mapT4(String::length);
assertThat(mapped).isNotSameAs(base)
.hasSize(7)
.containsExactly(base.getT1(), base.getT2(), base.getT3(), 3,
base.getT5(), base.getT6(), base.getT7());
}
@Test
public void mapT5() {
Tuple7<Integer, Integer, Integer, Integer, String, Integer, Integer> base =
Tuples.of(100, 200, 300, 400, "Foo", 600, 700);
Tuple2<?,?> mapped = base.mapT5(String::length);
assertThat(mapped).isNotSameAs(base)
.hasSize(7)
.containsExactly(base.getT1(), base.getT2(), base.getT3(), base.getT4(),
3, base.getT6(), base.getT7());
}
@Test
public void mapT6() {
Tuple7<Integer, Integer, Integer, Integer, Integer, String, Integer> base =
Tuples.of(100, 200, 300, 400, 500, "Foo", 700);
Tuple2<?,?> mapped = base.mapT6(String::length);
assertThat(mapped).isNotSameAs(base)
.hasSize(7)
.containsExactly(base.getT1(), base.getT2(), base.getT3(), base.getT4(),
base.getT5(), 3, base.getT7());
}
@Test
public void mapT7() {
Tuple7<Integer, Integer, Integer, Integer, Integer, Integer, String> base =
Tuples.of(100, 200, 300, 400, 500, 600, "Foo");
Tuple2<?,?> mapped = base.mapT7(String::length);
assertThat(mapped).isNotSameAs(base)
.hasSize(7)
.containsExactly(base.getT1(), base.getT2(), base.getT3(), base.getT4(),
base.getT5(), base.getT6(), 3);
}
@Test
public void mapT7Null() {
assertThatNullPointerException().isThrownBy(() ->
Tuples.of(1, 2, 3, 4, 5, 6, 7)
.mapT7(i -> null)
).withMessage("t7");
}
@Test
public void getNegativeIndex() {
assertThat(full.get(-1)).isNull();
}
@Test
public void getTooLargeIndex() {
assertThat(full.get(10)).isNull();
}
@Test
public void getAllValuesCorrespondToArray() {
Object[] array = full.toArray();
for (int i = 0; i < array.length; i++) {
assertThat(full.get(i)).as("element at %d", i).isEqualTo(array[i]);
}
}
@Test
public void equalityOfSameReference() {
assertThat(full).isEqualTo(full);
}
@Test
public void equalityOfNullOrWrongClass() {
assertThat(full).isNotEqualTo(null)
.isNotEqualTo("foo");
}
@Test
public void t7Combinations() {
assertThat(new Tuple7<>(1, 2, 3, 4, 5, 6, 7))
.isNotEqualTo(new Tuple7<>(1, 2, 3, 4, 5, 6, 10))
.isEqualTo(new Tuple7<>(1, 2, 3, 4, 5, 6, 7));
}
@Test
public void sanityTestHashcode() {
Tuple7<Integer, Integer, Integer, Integer, Integer, Integer, Integer> same = new Tuple7<>(1, 2, 3, 4, 5, 6, 7);
Tuple7<Integer, Integer, Integer, Integer, Integer, Integer, Integer> different = new Tuple7<>(1, 2, 3, 4, 5, 6,1);
assertThat(full.hashCode())
.isEqualTo(same.hashCode())
.isNotEqualTo(different.hashCode());
}
}
| Tuple7Test |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/generics/ErroneousSource3.java | {
"start": 206,
"end": 660
} | class ____ {
private WildCardExtendsMBWrapper<TypeB> fooWildCardExtendsMBTypeBFailure;
public WildCardExtendsMBWrapper<TypeB> getFooWildCardExtendsMBTypeBFailure() {
return fooWildCardExtendsMBTypeBFailure;
}
public void setFooWildCardExtendsMBTypeBFailure(WildCardExtendsMBWrapper<TypeB> fooWildCardExtendsMBTypeBFailure) {
this.fooWildCardExtendsMBTypeBFailure = fooWildCardExtendsMBTypeBFailure;
}
}
| ErroneousSource3 |
java | apache__maven | impl/maven-impl/src/test/java/org/apache/maven/impl/model/DefaultModelObjectPoolTest.java | {
"start": 1358,
"end": 8774
} | class ____ {
@Test
void testServiceLoading() {
// Test that the static method works
String testString = "test";
String result = ModelObjectProcessor.processObject(testString);
assertNotNull(result);
assertEquals(testString, result);
}
@Test
void testDependencyPooling() {
ModelObjectProcessor processor = new DefaultModelObjectPool();
// Create two identical dependencies
// Note: Due to the static processor being active, these may already be pooled
Dependency dep1 = Dependency.newBuilder()
.groupId("org.apache.maven")
.artifactId("maven-core")
.version("4.0.0")
.build();
Dependency dep2 = Dependency.newBuilder()
.groupId("org.apache.maven")
.artifactId("maven-core")
.version("4.0.0")
.build();
// Due to static processing, they may already be the same instance
// This is actually the expected behavior - pooling is working!
// Process them through our specific processor instance
Dependency pooled1 = processor.process(dep1);
Dependency pooled2 = processor.process(dep2);
// They should be the same instance after processing
assertSame(pooled1, pooled2);
// The pooled instances should be semantically equal to the originals
assertTrue(dependenciesEqual(dep1, pooled1));
assertTrue(dependenciesEqual(dep2, pooled2));
}
/**
* Helper method to check complete equality of dependencies.
*/
private boolean dependenciesEqual(Dependency dep1, Dependency dep2) {
return Objects.equals(dep1.getGroupId(), dep2.getGroupId())
&& Objects.equals(dep1.getArtifactId(), dep2.getArtifactId())
&& Objects.equals(dep1.getVersion(), dep2.getVersion())
&& Objects.equals(dep1.getType(), dep2.getType())
&& Objects.equals(dep1.getClassifier(), dep2.getClassifier())
&& Objects.equals(dep1.getScope(), dep2.getScope())
&& Objects.equals(dep1.getSystemPath(), dep2.getSystemPath())
&& Objects.equals(dep1.getExclusions(), dep2.getExclusions())
&& Objects.equals(dep1.getOptional(), dep2.getOptional())
&& Objects.equals(dep1.getLocationKeys(), dep2.getLocationKeys())
&& locationsEqual(dep1, dep2)
&& Objects.equals(dep1.getImportedFrom(), dep2.getImportedFrom());
}
/**
* Helper method to check locations equality.
*/
private boolean locationsEqual(Dependency dep1, Dependency dep2) {
var keys1 = dep1.getLocationKeys();
var keys2 = dep2.getLocationKeys();
if (!Objects.equals(keys1, keys2)) {
return false;
}
for (Object key : keys1) {
if (!Objects.equals(dep1.getLocation(key), dep2.getLocation(key))) {
return false;
}
}
return true;
}
@Test
void testNonDependencyObjects() {
ModelObjectProcessor processor = new DefaultModelObjectPool();
String testString = "test";
String result = processor.process(testString);
// Non-dependency objects should be returned as-is
assertSame(testString, result);
}
@Test
void testConfigurableReferenceType() {
// Test that the reference type can be configured via system property
String originalValue = System.getProperty(Constants.MAVEN_MODEL_PROCESSOR_REFERENCE_TYPE);
try {
// Set a different reference type
System.setProperty(Constants.MAVEN_MODEL_PROCESSOR_REFERENCE_TYPE, "SOFT");
// Create a new processor (this would use the new setting in a real scenario)
ModelObjectProcessor processor = new DefaultModelObjectPool();
// Test that it still works (the actual reference type is used internally)
Dependency dep = Dependency.newBuilder()
.groupId("test")
.artifactId("test")
.version("1.0")
.build();
Dependency result = processor.process(dep);
assertNotNull(result);
assertEquals(dep, result);
} finally {
// Restore original value
if (originalValue != null) {
System.setProperty(Constants.MAVEN_MODEL_PROCESSOR_REFERENCE_TYPE, originalValue);
} else {
System.clearProperty(Constants.MAVEN_MODEL_PROCESSOR_REFERENCE_TYPE);
}
}
}
@Test
void testConfigurablePooledTypes() {
// Configure to only pool Dependencies
ModelObjectProcessor processor =
new DefaultModelObjectPool(Map.of(Constants.MAVEN_MODEL_PROCESSOR_POOLED_TYPES, "Dependency"));
// Dependencies should be pooled
Dependency dep1 = Dependency.newBuilder()
.groupId("test")
.artifactId("test")
.version("1.0")
.build();
Dependency dep2 = Dependency.newBuilder()
.groupId("test")
.artifactId("test")
.version("1.0")
.build();
Dependency result1 = processor.process(dep1);
Dependency result2 = processor.process(dep2);
// Should be the same instance due to pooling
assertSame(result1, result2);
// Non-dependency objects should not be pooled (pass through)
String str1 = "test";
String str2 = processor.process(str1);
assertSame(str1, str2); // Same instance because it's not pooled
}
@Test
void testPerTypeReferenceType() {
// Set default to WEAK and Dependency-specific to HARD
ModelObjectProcessor processor = new DefaultModelObjectPool(Map.of(
Constants.MAVEN_MODEL_PROCESSOR_REFERENCE_TYPE,
"WEAK",
Constants.MAVEN_MODEL_PROCESSOR_REFERENCE_TYPE_PREFIX + "Dependency",
"HARD"));
// Test that dependencies still work with per-type configuration
Dependency dep = Dependency.newBuilder()
.groupId("test")
.artifactId("test")
.version("1.0")
.build();
Dependency result = processor.process(dep);
assertNotNull(result);
assertEquals(dep, result);
}
@Test
void testStatistics() {
ModelObjectProcessor processor = new DefaultModelObjectPool();
// Process some dependencies
for (int i = 0; i < 5; i++) {
Dependency dep = Dependency.newBuilder()
.groupId("test")
.artifactId("test-" + (i % 2)) // Create some duplicates
.version("1.0")
.build();
processor.process(dep);
}
// Check that statistics are available
String stats = DefaultModelObjectPool.getStatistics(Dependency.class);
assertNotNull(stats);
assertTrue(stats.contains("Dependency"));
String allStats = DefaultModelObjectPool.getAllStatistics();
assertNotNull(allStats);
assertTrue(allStats.contains("ModelObjectPool Statistics"));
}
}
| DefaultModelObjectPoolTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/matchers/AnnotationHasArgumentWithValueTest.java | {
"start": 2782,
"end": 3424
} | class ____ {}
""");
assertCompiles(
annotationMatches(
/* shouldMatch= */ true,
new AnnotationHasArgumentWithValue("value", stringLiteral("unchecked"))));
}
private Scanner annotationMatches(boolean shouldMatch, AnnotationHasArgumentWithValue toMatch) {
return new Scanner() {
@Override
public Void visitAnnotation(AnnotationTree node, VisitorState visitorState) {
assertWithMessage(node.toString())
.that(!shouldMatch ^ toMatch.matches(node, visitorState))
.isTrue();
return super.visitAnnotation(node, visitorState);
}
};
}
}
| A |
java | playframework__playframework | web/play-java-forms/src/test/java/play/data/RepeatableConstraintsForm.java | {
"start": 287,
"end": 655
} | class ____ {
@ValidateWith(BlueValidator.class)
@ValidateWith(GreenValidator.class)
@Pattern(value = "[a-c]", message = "Should be a - c")
@Pattern(value = "[c-h]", message = "Should be c - h")
private String name;
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
}
| RepeatableConstraintsForm |
java | apache__camel | components/camel-sql/src/test/java/org/apache/camel/processor/aggregate/jdbc/JdbcAggregateConcurrentSameGroupTest.java | {
"start": 1074,
"end": 2388
} | class ____ extends AbstractJdbcAggregationTestSupport {
@Test
public void testNoConcurrentProducers() throws Exception {
doSendMessages(1, 1);
}
@Test
public void testConcurrentProducers() throws Exception {
doSendMessages(10, 5);
}
@Test
public void testMoreConcurrentProducers() throws Exception {
doSendMessages(50, 10);
}
private void doSendMessages(int files, int poolSize) throws Exception {
MockEndpoint mock = getMockEndpoint("mock:aggregated");
mock.setResultWaitTime(30 * 1000L);
mock.expectedMessageCount(1);
ExecutorService executor = Executors.newFixedThreadPool(poolSize);
for (int i = 0; i < files; i++) {
final int index = i;
executor.submit(new Callable<Object>() {
public Object call() throws Exception {
template.sendBodyAndHeader("direct:start", index, "id", 123);
// simulate a little delay
Thread.sleep(3);
return null;
}
});
}
MockEndpoint.assertIsSatisfied(context);
executor.shutdownNow();
}
@Override
long getCompletionInterval() {
return 1000;
}
}
| JdbcAggregateConcurrentSameGroupTest |
java | apache__flink | flink-datastream/src/main/java/org/apache/flink/datastream/impl/operators/ProcessOperator.java | {
"start": 2983,
"end": 9785
} | class ____<IN, OUT>
extends AbstractAsyncStateUdfStreamOperator<OUT, OneInputStreamProcessFunction<IN, OUT>>
implements OneInputStreamOperator<IN, OUT>, BoundedOneInput {
protected transient DefaultRuntimeContext context;
protected transient DefaultPartitionedContext<OUT> partitionedContext;
protected transient NonPartitionedContext<OUT> nonPartitionedContext;
protected transient TimestampCollector<OUT> outputCollector;
protected transient Map<String, AbstractInternalWatermarkDeclaration<?>>
watermarkDeclarationMap;
// {@link EventTimeWatermarkHandler} will be used to process event time related watermarks
protected transient EventTimeWatermarkHandler eventTimeWatermarkHandler;
public ProcessOperator(OneInputStreamProcessFunction<IN, OUT> userFunction) {
super(userFunction);
}
@Override
public void open() throws Exception {
super.open();
StreamingRuntimeContext operatorContext = getRuntimeContext();
TaskInfo taskInfo = operatorContext.getTaskInfo();
context =
new DefaultRuntimeContext(
operatorContext.getJobInfo().getJobName(),
operatorContext.getJobType(),
taskInfo.getNumberOfParallelSubtasks(),
taskInfo.getMaxNumberOfParallelSubtasks(),
taskInfo.getTaskName(),
taskInfo.getIndexOfThisSubtask(),
taskInfo.getAttemptNumber(),
operatorContext.getMetricGroup());
outputCollector = getOutputCollector();
watermarkDeclarationMap =
config.getWatermarkDeclarations(getUserCodeClassloader()).stream()
.collect(
Collectors.toMap(
AbstractInternalWatermarkDeclaration::getIdentifier,
Function.identity()));
partitionedContext =
new DefaultPartitionedContext<>(
context,
this::currentKey,
getProcessorWithKey(),
getProcessingTimeManager(),
operatorContext,
getOperatorStateBackend());
outputCollector = getOutputCollector();
nonPartitionedContext = getNonPartitionedContext();
partitionedContext.setNonPartitionedContext(nonPartitionedContext);
this.eventTimeWatermarkHandler =
new EventTimeWatermarkHandler(1, output, timeServiceManager);
// Initialize event time extension related ProcessFunction
if (userFunction instanceof ExtractEventTimeProcessFunction) {
((ExtractEventTimeProcessFunction<IN>) userFunction)
.initEventTimeExtension(
getExecutionConfig(),
partitionedContext.getNonPartitionedContext().getWatermarkManager(),
getProcessingTimeService());
} else if (userFunction instanceof EventTimeWrappedOneInputStreamProcessFunction) {
// note that the {@code initEventTimeExtension} in EventTimeWrappedProcessFunction
// should be invoked before the {@code open}.
((EventTimeWrappedOneInputStreamProcessFunction<IN, OUT>) userFunction)
.initEventTimeExtension(
getTimerService(), getEventTimeSupplier(), eventTimeWatermarkHandler);
}
userFunction.open(nonPartitionedContext);
}
@Override
public void processElement(StreamRecord<IN> element) throws Exception {
outputCollector.setTimestampFromStreamRecord(element);
userFunction.processRecord(element.getValue(), outputCollector, partitionedContext);
}
@Override
public void processWatermarkInternal(WatermarkEvent watermark) throws Exception {
WatermarkHandlingResult watermarkHandlingResultByUserFunction =
userFunction.onWatermark(
watermark.getWatermark(), outputCollector, nonPartitionedContext);
if (watermarkHandlingResultByUserFunction == WatermarkHandlingResult.PEEK
&& watermarkDeclarationMap
.get(watermark.getWatermark().getIdentifier())
.getDefaultHandlingStrategy()
== WatermarkHandlingStrategy.FORWARD) {
if (EventTimeExtensionImpl.isEventTimeExtensionWatermark(watermark.getWatermark())) {
// if the watermark is event time related watermark, process them to advance event
// time
eventTimeWatermarkHandler.processWatermark(watermark.getWatermark(), 0);
} else {
output.emitWatermark(watermark);
}
}
}
protected TimestampCollector<OUT> getOutputCollector() {
return new OutputCollector<>(output);
}
@Override
public void endInput() throws Exception {
userFunction.endInput(nonPartitionedContext);
}
protected Object currentKey() {
throw new UnsupportedOperationException("The key is only defined for keyed operator");
}
protected BiConsumer<Runnable, Object> getProcessorWithKey() {
if (isAsyncKeyOrderedProcessingEnabled()) {
return (r, k) -> asyncProcessWithKey(k, r::run);
} else {
return (r, k) -> {
Object oldKey = currentKey();
setCurrentKey(k);
try {
r.run();
} finally {
setCurrentKey(oldKey);
}
};
}
}
protected ProcessingTimeManager getProcessingTimeManager() {
return UnsupportedProcessingTimeManager.INSTANCE;
}
protected NonPartitionedContext<OUT> getNonPartitionedContext() {
return new DefaultNonPartitionedContext<>(
context,
partitionedContext,
outputCollector,
false,
null,
output,
watermarkDeclarationMap);
}
@Override
public void close() throws Exception {
super.close();
userFunction.close();
}
@Override
public boolean isAsyncKeyOrderedProcessingEnabled() {
// For non-keyed operators, we disable async state processing.
return false;
}
protected InternalTimerService<VoidNamespace> getTimerService() {
return null;
}
protected Supplier<Long> getEventTimeSupplier() {
return () -> eventTimeWatermarkHandler.getLastEmitWatermark();
}
}
| ProcessOperator |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/DefaultListableBeanFactoryTests.java | {
"start": 137009,
"end": 137377
} | class ____ implements FactoryBean<Object> {
public ConstructorDependencyFactoryBean(String dependency) {
}
@Override
public Object getObject() {
return "test";
}
@Override
public Class<?> getObjectType() {
return String.class;
}
@Override
public boolean isSingleton() {
return true;
}
}
public static | ConstructorDependencyFactoryBean |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/support/AbstractContextConfigurationUtilsTests.java | {
"start": 2162,
"end": 2333
} | class ____ tests involving {@link ContextLoaderUtils},
* {@link BootstrapTestUtils}, and {@link ActiveProfilesUtils}.
*
* @author Sam Brannen
* @since 3.1
*/
abstract | for |
java | apache__camel | components/camel-netty-http/src/main/java/org/apache/camel/component/netty/http/InboundStreamHttpRequest.java | {
"start": 937,
"end": 1322
} | class ____ {
private final HttpRequest request;
private final InputStream in;
public InboundStreamHttpRequest(HttpRequest request, InputStream in) {
this.request = request;
this.in = in;
}
public InputStream getInputStream() {
return in;
}
public HttpRequest getHttpRequest() {
return request;
}
}
| InboundStreamHttpRequest |
java | apache__camel | components/camel-smpp/src/test/java/org/apache/camel/component/smpp/SmppQuerySmCommandTest.java | {
"start": 1503,
"end": 3902
} | class ____ {
private SMPPSession session;
private SmppConfiguration config;
private SmppQuerySmCommand command;
@BeforeEach
public void setUp() {
session = mock(SMPPSession.class);
config = new SmppConfiguration();
command = new SmppQuerySmCommand(session, config);
}
@Test
public void executeWithConfigurationData() throws Exception {
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "QuerySm");
exchange.getIn().setHeader(SmppConstants.ID, "1");
when(session.queryShortMessage("1", TypeOfNumber.UNKNOWN, NumberingPlanIndicator.UNKNOWN, "1616"))
.thenReturn(new QuerySmResult("-300101010000004+", MessageState.DELIVERED, (byte) 0));
command.execute(exchange);
assertEquals("1", exchange.getMessage().getHeader(SmppConstants.ID));
assertEquals("DELIVERED", exchange.getMessage().getHeader(SmppConstants.MESSAGE_STATE));
assertEquals((byte) 0, exchange.getMessage().getHeader(SmppConstants.ERROR));
assertNotNull(exchange.getMessage().getHeader(SmppConstants.FINAL_DATE));
}
@Test
public void execute() throws Exception {
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "QuerySm");
exchange.getIn().setHeader(SmppConstants.ID, "1");
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_TON, TypeOfNumber.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_NPI, NumberingPlanIndicator.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR, "1818");
when(session.queryShortMessage("1", TypeOfNumber.NATIONAL, NumberingPlanIndicator.NATIONAL, "1818"))
.thenReturn(new QuerySmResult("-300101010000004+", MessageState.DELIVERED, (byte) 0));
command.execute(exchange);
assertEquals("1", exchange.getMessage().getHeader(SmppConstants.ID));
assertEquals("DELIVERED", exchange.getMessage().getHeader(SmppConstants.MESSAGE_STATE));
assertEquals((byte) 0, exchange.getMessage().getHeader(SmppConstants.ERROR));
assertNotNull(exchange.getMessage().getHeader(SmppConstants.FINAL_DATE));
}
}
| SmppQuerySmCommandTest |
java | apache__hadoop | hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobHistoryUtils.java | {
"start": 1430,
"end": 6325
} | class ____ {
private static String applyParser(String fileName, Pattern pattern) {
Matcher matcher = pattern.matcher(fileName);
if (!matcher.matches()) {
return null;
}
return matcher.group(1);
}
/**
* Extracts jobID string from the given job history log file name or
* job history configuration file name.
* @param fileName name of job history file or job history configuration file
* @return a valid jobID String, parsed out of the file name. Otherwise,
* [especially for .crc files] returns null.
*/
static String extractJobID(String fileName) {
// Get jobID if fileName is a config file name.
String jobId = extractJobIDFromConfFileName(fileName);
if (jobId == null) {
// Get JobID if fileName is a job history file name
jobId = extractJobIDFromHistoryFileName(fileName);
}
return jobId;
}
/**
* Extracts job id from the current hadoop version's job history file name.
* @param fileName job history file name from which job id is to be extracted
* @return job id if the history file name format is same as that of the
* current hadoop version. Returns null otherwise.
*/
private static String extractJobIDFromCurrentHistoryFile(String fileName) {
JobID id = null;
if (org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils
.isValidJobHistoryFileName(fileName)) {
try {
id = org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils
.getJobIDFromHistoryFilePath(fileName);
} catch (IOException e) {
// Ignore this exception and go ahead with getting of jobID assuming
// older hadoop verison's history file
}
}
if (id != null) {
return id.toString();
}
return null;
}
/**
* Extracts jobID string from the given job history file name.
* @param fileName name of the job history file
* @return JobID if the given <code>fileName</code> is a valid job history
* file name, <code>null</code> otherwise.
*/
private static String extractJobIDFromHistoryFileName(String fileName) {
// History file name could be in one of the following formats
// (1) old pre21 job history file name format
// (2) new pre21 job history file name format
// (3) current job history file name format i.e. 0.22
// Try to get the jobID assuming that the history file is from the current
// hadoop version
String jobID = extractJobIDFromCurrentHistoryFile(fileName);
if (jobID != null) {
return jobID;//history file is of current hadoop version
}
// History file could be of older hadoop versions
String pre21JobID = applyParser(fileName,
Pre21JobHistoryConstants.JOBHISTORY_FILENAME_REGEX_V1);
if (pre21JobID == null) {
pre21JobID = applyParser(fileName,
Pre21JobHistoryConstants.JOBHISTORY_FILENAME_REGEX_V2);
}
return pre21JobID;
}
/**
* Extracts jobID string from the given job conf xml file name.
* @param fileName name of the job conf xml file
* @return job id if the given <code>fileName</code> is a valid job conf xml
* file name, <code>null</code> otherwise.
*/
private static String extractJobIDFromConfFileName(String fileName) {
// History conf file name could be in one of the following formats
// (1) old pre21 job history file name format
// (2) new pre21 job history file name format
// (3) current job history file name format i.e. 0.22
String pre21JobID = applyParser(fileName,
Pre21JobHistoryConstants.CONF_FILENAME_REGEX_V1);
if (pre21JobID == null) {
pre21JobID = applyParser(fileName,
Pre21JobHistoryConstants.CONF_FILENAME_REGEX_V2);
}
if (pre21JobID != null) {
return pre21JobID;
}
return applyParser(fileName, JobHistory.CONF_FILENAME_REGEX);
}
/**
* Checks if the given <code>fileName</code> is a valid job conf xml file name
* @param fileName name of the file to be validated
* @return <code>true</code> if the given <code>fileName</code> is a valid
* job conf xml file name.
*/
static boolean isJobConfXml(String fileName) {
String jobId = extractJobIDFromConfFileName(fileName);
return jobId != null;
}
/**
* Extract/Add counters into the Map from the given JhCounters object.
* @param counters the counters to be extracted from
* @return the map of counters
*/
static Map<String, Long> extractCounters(JhCounters counters) {
Map<String, Long> countersMap = new HashMap<String, Long>();
if (counters != null) {
for (JhCounterGroup group : counters.getGroups()) {
for (JhCounter counter : group.getCounts()) {
countersMap.put(counter.getName().toString(), counter.getValue());
}
}
}
return countersMap;
}
}
| JobHistoryUtils |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-ant/src/main/java/smoketest/ant/SampleAntApplication.java | {
"start": 852,
"end": 1117
} | class ____ implements CommandLineRunner {
@Override
public void run(String... args) {
System.out.println("Spring Boot Ant Example");
}
public static void main(String[] args) {
SpringApplication.run(SampleAntApplication.class, args);
}
}
| SampleAntApplication |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/cluster/RedisClusterReadFromIntegrationTests.java | {
"start": 1550,
"end": 4244
} | class ____ extends TestSupport {
private final RedisClusterClient clusterClient;
private StatefulRedisClusterConnection<String, String> connection;
private RedisAdvancedClusterCommands<String, String> sync;
@Inject
RedisClusterReadFromIntegrationTests(RedisClusterClient clusterClient) {
this.clusterClient = clusterClient;
}
@BeforeEach
void before() {
connection = clusterClient.connect();
sync = connection.sync();
}
@AfterEach
void after() {
connection.close();
}
@Test
void defaultTest() {
assertThat(connection.getReadFrom()).isEqualTo(ReadFrom.UPSTREAM);
}
@Test
void readWriteMaster() {
connection.setReadFrom(ReadFrom.UPSTREAM);
sync.set(key, value);
assertThat(sync.get(key)).isEqualTo(value);
}
@Test
void readWriteMasterPreferred() {
connection.setReadFrom(ReadFrom.UPSTREAM_PREFERRED);
sync.set(key, value);
assertThat(sync.get(key)).isEqualTo(value);
}
@Test
void readWriteReplica() {
connection.setReadFrom(ReadFrom.REPLICA);
sync.set(key, "value1");
connection.getConnection(ClusterTestSettings.host, ClusterTestSettings.port2).sync().waitForReplication(1, 1000);
assertThat(sync.get(key)).isEqualTo("value1");
}
@Test
void readWriteReplicaPreferred() {
connection.setReadFrom(ReadFrom.REPLICA_PREFERRED);
sync.set(key, "value1");
connection.getConnection(ClusterTestSettings.host, ClusterTestSettings.port2).sync().waitForReplication(1, 1000);
assertThat(sync.get(key)).isEqualTo("value1");
}
@Test
void readWriteNearest() {
connection.setReadFrom(ReadFrom.NEAREST);
sync.set(key, "value1");
connection.getConnection(ClusterTestSettings.host, ClusterTestSettings.port2).sync().waitForReplication(1, 1000);
assertThat(sync.get(key)).isEqualTo("value1");
}
@Test
void readWriteSubnet() {
connection.setReadFrom(ReadFrom.subnet("0.0.0.0/0", "::/0"));
sync.set(key, "value1");
connection.getConnection(ClusterTestSettings.host, ClusterTestSettings.port2).sync().waitForReplication(1, 1000);
assertThat(sync.get(key)).isEqualTo("value1");
}
@Test
void readWriteRegex() {
connection.setReadFrom(ReadFrom.regex(Pattern.compile(".*")));
sync.set(key, "value1");
connection.getConnection(ClusterTestSettings.host, ClusterTestSettings.port2).sync().waitForReplication(1, 1000);
assertThat(sync.get(key)).isEqualTo("value1");
}
}
| RedisClusterReadFromIntegrationTests |
java | alibaba__nacos | console/src/test/java/com/alibaba/nacos/console/handler/impl/remote/naming/ServiceRemoteHandlerTest.java | {
"start": 1773,
"end": 6854
} | class ____ extends AbstractRemoteHandlerTest {
private static final String TEST_NAMESPACE_ID = "testNamespaceId";
private static final String TEST_GROUP_NAME = "testGroupName";
private static final String TEST_SERVICE_NAME = "testServiceName";
ServiceRemoteHandler serviceRemoteHandler;
@BeforeEach
void setUp() {
super.setUpWithNaming();
serviceRemoteHandler = new ServiceRemoteHandler(clientHolder);
}
@AfterEach
void tearDown() {
}
@Test
void createService() throws Exception {
ServiceForm serviceForm = new ServiceForm();
serviceForm.setServiceName(TEST_SERVICE_NAME);
serviceForm.setGroupName(TEST_GROUP_NAME);
serviceForm.setNamespaceId(TEST_NAMESPACE_ID);
serviceForm.validate();
ServiceMetadata serviceMetadata = new ServiceMetadata();
serviceRemoteHandler.createService(serviceForm, serviceMetadata);
verify(namingMaintainerService).createService(any(Service.class));
}
@Test
void deleteService() throws Exception {
serviceRemoteHandler.deleteService(TEST_NAMESPACE_ID, TEST_SERVICE_NAME, TEST_GROUP_NAME);
verify(namingMaintainerService).removeService(TEST_NAMESPACE_ID, TEST_GROUP_NAME, TEST_SERVICE_NAME);
}
@Test
void updateService() throws Exception {
ServiceForm serviceForm = new ServiceForm();
serviceForm.setServiceName(TEST_SERVICE_NAME);
serviceForm.setGroupName(TEST_GROUP_NAME);
serviceForm.setNamespaceId(TEST_NAMESPACE_ID);
serviceForm.validate();
ServiceMetadata serviceMetadata = new ServiceMetadata();
serviceRemoteHandler.updateService(serviceForm, serviceMetadata);
verify(namingMaintainerService).updateService(any(Service.class));
}
@Test
void getSelectorTypeList() throws NacosException {
List<String> selectorTypeList = Collections.singletonList("Mock");
when(namingMaintainerService.listSelectorTypes()).thenReturn(selectorTypeList);
List<String> actual = serviceRemoteHandler.getSelectorTypeList();
assertEquals(selectorTypeList, actual);
}
@Test
void getSubscribers() throws Exception {
Page<SubscriberInfo> mockPage = new Page<>();
mockPage.setTotalCount(1);
mockPage.setPagesAvailable(1);
mockPage.setPageNumber(1);
mockPage.setPageItems(Collections.singletonList(new SubscriberInfo()));
when(namingMaintainerService.getSubscribers(TEST_NAMESPACE_ID, TEST_GROUP_NAME, TEST_SERVICE_NAME, 1, 1,
true)).thenReturn(mockPage);
Page<SubscriberInfo> actual = serviceRemoteHandler.getSubscribers(1, 1, TEST_NAMESPACE_ID, TEST_SERVICE_NAME,
TEST_GROUP_NAME, true);
assertEquals(mockPage.getPageNumber(), actual.getPageNumber());
assertEquals(mockPage.getPagesAvailable(), actual.getPagesAvailable());
assertEquals(mockPage.getTotalCount(), actual.getTotalCount());
assertEquals(mockPage.getPageItems().size(), actual.getPageItems().size());
}
@Test
void getServiceListWithInstances() throws NacosException {
Page<ServiceDetailInfo> mockPage = new Page<>();
when(namingMaintainerService.listServicesWithDetail(TEST_NAMESPACE_ID, TEST_GROUP_NAME, TEST_SERVICE_NAME, 1,
1)).thenReturn(mockPage);
Page<ServiceDetailInfo> actual = (Page<ServiceDetailInfo>) serviceRemoteHandler.getServiceList(true,
TEST_NAMESPACE_ID, 1, 1, TEST_SERVICE_NAME, TEST_GROUP_NAME, false);
assertEquals(mockPage, actual);
}
@Test
void getServiceListWithoutInstance() throws NacosException {
Page<ServiceView> mockPage = new Page<>();
when(namingMaintainerService.listServices(TEST_NAMESPACE_ID, TEST_GROUP_NAME, TEST_SERVICE_NAME, false, 1,
1)).thenReturn(mockPage);
Page<ServiceDetailInfo> actual = (Page<ServiceDetailInfo>) serviceRemoteHandler.getServiceList(false,
TEST_NAMESPACE_ID, 1, 1, TEST_SERVICE_NAME, TEST_GROUP_NAME, false);
assertEquals(mockPage, actual);
}
@Test
void getServiceDetail() throws NacosException {
ServiceDetailInfo mockServiceDetailInfo = new ServiceDetailInfo();
when(namingMaintainerService.getServiceDetail(TEST_NAMESPACE_ID, TEST_GROUP_NAME,
TEST_SERVICE_NAME)).thenReturn(mockServiceDetailInfo);
ServiceDetailInfo actual = serviceRemoteHandler.getServiceDetail(TEST_NAMESPACE_ID, TEST_SERVICE_NAME,
TEST_GROUP_NAME);
assertEquals(mockServiceDetailInfo, actual);
}
@Test
void updateClusterMetadata() throws Exception {
serviceRemoteHandler.updateClusterMetadata(TEST_NAMESPACE_ID, TEST_GROUP_NAME, TEST_SERVICE_NAME,
Constants.DEFAULT_CLUSTER_NAME, new ClusterMetadata());
verify(namingMaintainerService).updateCluster(any(Service.class), any(ClusterInfo.class));
}
} | ServiceRemoteHandlerTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/cascade/multilevel/MultiLevelCascadeCollectionEmbeddableTest.java | {
"start": 8367,
"end": 9550
} | class ____ implements Serializable {
@Column(name = "ID_NUM", insertable = false, updatable = false)
private Long idNum;
@Column(name = "PERSON", insertable = false, updatable = false)
private String person;
@Column(name = "SOURCE_CODE")
private String sourceCode;
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AnotherSubSubEntityId that = (AnotherSubSubEntityId) o;
return Objects.equals(idNum, that.idNum) &&
Objects.equals(person, that.person) &&
Objects.equals(sourceCode, that.sourceCode);
}
@Override
public int hashCode() {
return Objects.hash(idNum, person, sourceCode);
}
public Long getIdNum() {
return idNum;
}
public void setIdNum(Long idNum) {
this.idNum = idNum;
}
public String getPerson() {
return person;
}
public void setPerson(String person) {
this.person = person;
}
public String getSourceCode() {
return sourceCode;
}
public void setSourceCode(String sourceCode) {
this.sourceCode = sourceCode;
}
}
@Entity
@Table(name = "SUB_SUB_TABLE")
public static | AnotherSubSubEntityId |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/embeddable/NestedEmbeddableWithLockingDeletionTest.java | {
"start": 3952,
"end": 4492
} | class ____ {
@Id
private String productId;
private String description;
@Embedded
private Benefits benefits;
public Product() {
}
public Product(String productId) {
this.productId = productId;
}
public Product(String productId, Benefits benefits) {
this.productId = productId;
this.benefits = benefits;
}
public Product(String productId, Benefits benefits, String description) {
this.productId = productId;
this.benefits = benefits;
this.description = description;
}
}
public static | Product |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/descriptor/TestInstanceLifecycleUtilsTests.java | {
"start": 4351,
"end": 4421
} | interface ____ {
}
@PerClassLifeCycle
private static | PerClassLifeCycle |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/OperatorStateRestoreOperation.java | {
"start": 1676,
"end": 13146
} | class ____ implements RestoreOperation<Void> {
private final CloseableRegistry closeStreamOnCancelRegistry;
private final ClassLoader userClassloader;
private final Map<String, PartitionableListState<?>> registeredOperatorStates;
private final Map<String, BackendWritableBroadcastState<?, ?>> registeredBroadcastStates;
private final Collection<OperatorStateHandle> stateHandles;
public OperatorStateRestoreOperation(
CloseableRegistry closeStreamOnCancelRegistry,
ClassLoader userClassloader,
Map<String, PartitionableListState<?>> registeredOperatorStates,
Map<String, BackendWritableBroadcastState<?, ?>> registeredBroadcastStates,
@Nonnull Collection<OperatorStateHandle> stateHandles) {
this.closeStreamOnCancelRegistry = closeStreamOnCancelRegistry;
this.userClassloader = userClassloader;
this.registeredOperatorStates = registeredOperatorStates;
this.registeredBroadcastStates = registeredBroadcastStates;
this.stateHandles = stateHandles;
}
@Override
public Void restore() throws Exception {
if (stateHandles.isEmpty()) {
return null;
}
for (OperatorStateHandle stateHandle : stateHandles) {
if (stateHandle == null
|| stateHandle instanceof EmptyFileMergingOperatorStreamStateHandle) {
continue;
}
FSDataInputStream in = stateHandle.openInputStream();
closeStreamOnCancelRegistry.registerCloseable(in);
ClassLoader restoreClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(userClassloader);
OperatorBackendSerializationProxy backendSerializationProxy =
new OperatorBackendSerializationProxy(userClassloader);
backendSerializationProxy.read(new DataInputViewStreamWrapper(in));
List<StateMetaInfoSnapshot> restoredOperatorMetaInfoSnapshots =
backendSerializationProxy.getOperatorStateMetaInfoSnapshots();
// Recreate all PartitionableListStates from the meta info
for (StateMetaInfoSnapshot restoredSnapshot : restoredOperatorMetaInfoSnapshots) {
final RegisteredOperatorStateBackendMetaInfo<?> restoredMetaInfo =
new RegisteredOperatorStateBackendMetaInfo<>(restoredSnapshot);
if (restoredMetaInfo.getPartitionStateSerializer()
instanceof UnloadableDummyTypeSerializer) {
// must fail now if the previous typeSerializer cannot be restored because
// there is no typeSerializer
// capable of reading previous state
// TODO when eager state registration is in place, we can try to get a
// convert deserializer
// TODO from the newly registered typeSerializer instead of simply failing
// here
throw new IOException(
"Unable to restore operator state ["
+ restoredSnapshot.getName()
+ "]."
+ " The previous typeSerializer of the operator state must be present; the typeSerializer could"
+ " have been removed from the classpath, or its implementation have changed and could"
+ " not be loaded. This is a temporary restriction that will be fixed in future versions.");
}
PartitionableListState<?> listState =
registeredOperatorStates.get(restoredSnapshot.getName());
if (null == listState) {
listState = new PartitionableListState<>(restoredMetaInfo);
registeredOperatorStates.put(
listState.getStateMetaInfo().getName(), listState);
} else {
// TODO with eager state registration in place, check here for
// typeSerializer migration strategies
}
}
// ... and then get back the broadcast state.
List<StateMetaInfoSnapshot> restoredBroadcastMetaInfoSnapshots =
backendSerializationProxy.getBroadcastStateMetaInfoSnapshots();
for (StateMetaInfoSnapshot restoredSnapshot : restoredBroadcastMetaInfoSnapshots) {
final RegisteredBroadcastStateBackendMetaInfo<?, ?> restoredMetaInfo =
new RegisteredBroadcastStateBackendMetaInfo<>(restoredSnapshot);
if (restoredMetaInfo.getKeySerializer() instanceof UnloadableDummyTypeSerializer
|| restoredMetaInfo.getValueSerializer()
instanceof UnloadableDummyTypeSerializer) {
// must fail now if the previous typeSerializer cannot be restored because
// there is no typeSerializer
// capable of reading previous state
// TODO when eager state registration is in place, we can try to get a
// convert deserializer
// TODO from the newly registered typeSerializer instead of simply failing
// here
throw new IOException(
"Unable to restore broadcast state ["
+ restoredSnapshot.getName()
+ "]."
+ " The previous key and value serializers of the state must be present; the serializers could"
+ " have been removed from the classpath, or their implementations have changed and could"
+ " not be loaded. This is a temporary restriction that will be fixed in future versions.");
}
BackendWritableBroadcastState<?, ?> broadcastState =
registeredBroadcastStates.get(restoredSnapshot.getName());
if (broadcastState == null) {
broadcastState = new HeapBroadcastState<>(restoredMetaInfo);
registeredBroadcastStates.put(
broadcastState.getStateMetaInfo().getName(), broadcastState);
} else {
// TODO with eager state registration in place, check here for
// typeSerializer migration strategies
}
}
// Restore states in the order in which they were written. Operator states come
// before Broadcast states.
final List<String> toRestore = new ArrayList<>();
restoredOperatorMetaInfoSnapshots.forEach(
stateName -> toRestore.add(stateName.getName()));
restoredBroadcastMetaInfoSnapshots.forEach(
stateName -> toRestore.add(stateName.getName()));
final StreamCompressionDecorator compressionDecorator =
backendSerializationProxy.isUsingStateCompression()
? SnappyStreamCompressionDecorator.INSTANCE
: UncompressedStreamCompressionDecorator.INSTANCE;
try (final CompressibleFSDataInputStream compressedIn =
new CompressibleFSDataInputStream(
in,
compressionDecorator)) { // closes only the outer compression stream
for (String stateName : toRestore) {
final OperatorStateHandle.StateMetaInfo offsets =
stateHandle.getStateNameToPartitionOffsets().get(stateName);
PartitionableListState<?> listStateForName =
registeredOperatorStates.get(stateName);
if (listStateForName == null) {
BackendWritableBroadcastState<?, ?> broadcastStateForName =
registeredBroadcastStates.get(stateName);
Preconditions.checkState(
broadcastStateForName != null,
"Found state without "
+ "corresponding meta info: "
+ stateName);
deserializeBroadcastStateValues(
broadcastStateForName, compressedIn, offsets);
} else {
deserializeOperatorStateValues(listStateForName, compressedIn, offsets);
}
}
}
} finally {
Thread.currentThread().setContextClassLoader(restoreClassLoader);
if (closeStreamOnCancelRegistry.unregisterCloseable(in)) {
IOUtils.closeQuietly(in);
}
}
}
return null;
}
private <S> void deserializeOperatorStateValues(
PartitionableListState<S> stateListForName,
FSDataInputStream in,
OperatorStateHandle.StateMetaInfo metaInfo)
throws IOException {
if (null != metaInfo) {
long[] offsets = metaInfo.getOffsets();
if (null != offsets) {
DataInputView div = new DataInputViewStreamWrapper(in);
TypeSerializer<S> serializer =
stateListForName.getStateMetaInfo().getPartitionStateSerializer();
for (long offset : offsets) {
in.seek(offset);
stateListForName.add(serializer.deserialize(div));
}
}
}
}
private <K, V> void deserializeBroadcastStateValues(
final BackendWritableBroadcastState<K, V> broadcastStateForName,
final FSDataInputStream in,
final OperatorStateHandle.StateMetaInfo metaInfo)
throws Exception {
if (metaInfo != null) {
long[] offsets = metaInfo.getOffsets();
if (offsets != null) {
TypeSerializer<K> keySerializer =
broadcastStateForName.getStateMetaInfo().getKeySerializer();
TypeSerializer<V> valueSerializer =
broadcastStateForName.getStateMetaInfo().getValueSerializer();
in.seek(offsets[0]);
DataInputView div = new DataInputViewStreamWrapper(in);
int size = div.readInt();
for (int i = 0; i < size; i++) {
broadcastStateForName.put(
keySerializer.deserialize(div), valueSerializer.deserialize(div));
}
}
}
}
}
| OperatorStateRestoreOperation |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java | {
"start": 2642,
"end": 12863
} | class ____ extends FilteredXContentObject implements ServiceSettings, OpenAiRateLimitServiceSettings {
public static final String NAME = "openai_service_settings";
public static final String DIMENSIONS_SET_BY_USER = "dimensions_set_by_user";
// The rate limit for usage tier 1 is 3000 request per minute for the text embedding models
// To find this information you need to access your account's limits https://platform.openai.com/account/limits
// 3000 requests per minute
public static final RateLimitSettings DEFAULT_RATE_LIMIT_SETTINGS = new RateLimitSettings(3000);
public static OpenAiEmbeddingsServiceSettings fromMap(Map<String, Object> map, ConfigurationParseContext context) {
return switch (context) {
case REQUEST -> fromRequestMap(map);
case PERSISTENT -> fromPersistentMap(map);
};
}
private static OpenAiEmbeddingsServiceSettings fromPersistentMap(Map<String, Object> map) {
// Reading previously persisted config, assume the validation
// passed at that time and never throw.
ValidationException validationException = new ValidationException();
var commonFields = fromMap(map, validationException, ConfigurationParseContext.PERSISTENT);
Boolean dimensionsSetByUser = removeAsType(map, DIMENSIONS_SET_BY_USER, Boolean.class);
if (dimensionsSetByUser == null) {
// Setting added in 8.13, default to false for configs created prior
dimensionsSetByUser = Boolean.FALSE;
}
return new OpenAiEmbeddingsServiceSettings(commonFields, dimensionsSetByUser);
}
private static OpenAiEmbeddingsServiceSettings fromRequestMap(Map<String, Object> map) {
ValidationException validationException = new ValidationException();
var commonFields = fromMap(map, validationException, ConfigurationParseContext.REQUEST);
if (validationException.validationErrors().isEmpty() == false) {
throw validationException;
}
return new OpenAiEmbeddingsServiceSettings(commonFields, commonFields.dimensions != null);
}
private static CommonFields fromMap(
Map<String, Object> map,
ValidationException validationException,
ConfigurationParseContext context
) {
String url = extractOptionalString(map, URL, ModelConfigurations.SERVICE_SETTINGS, validationException);
String organizationId = extractOptionalString(map, ORGANIZATION, ModelConfigurations.SERVICE_SETTINGS, validationException);
SimilarityMeasure similarity = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException);
Integer maxInputTokens = extractOptionalPositiveInteger(
map,
MAX_INPUT_TOKENS,
ModelConfigurations.SERVICE_SETTINGS,
validationException
);
Integer dims = extractOptionalPositiveInteger(map, DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS, validationException);
URI uri = convertToUri(url, URL, ModelConfigurations.SERVICE_SETTINGS, validationException);
String modelId = extractRequiredString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException);
RateLimitSettings rateLimitSettings = RateLimitSettings.of(
map,
DEFAULT_RATE_LIMIT_SETTINGS,
validationException,
OpenAiService.NAME,
context
);
return new CommonFields(modelId, uri, organizationId, similarity, maxInputTokens, dims, rateLimitSettings);
}
private record CommonFields(
String modelId,
@Nullable URI uri,
@Nullable String organizationId,
@Nullable SimilarityMeasure similarity,
@Nullable Integer maxInputTokens,
@Nullable Integer dimensions,
RateLimitSettings rateLimitSettings
) {}
private final String modelId;
private final URI uri;
private final String organizationId;
private final SimilarityMeasure similarity;
private final Integer dimensions;
private final Integer maxInputTokens;
private final Boolean dimensionsSetByUser;
private final RateLimitSettings rateLimitSettings;
public OpenAiEmbeddingsServiceSettings(
String modelId,
@Nullable URI uri,
@Nullable String organizationId,
@Nullable SimilarityMeasure similarity,
@Nullable Integer dimensions,
@Nullable Integer maxInputTokens,
Boolean dimensionsSetByUser,
@Nullable RateLimitSettings rateLimitSettings
) {
this.uri = uri;
this.modelId = Objects.requireNonNull(modelId);
this.organizationId = organizationId;
this.similarity = similarity;
this.dimensions = dimensions;
this.maxInputTokens = maxInputTokens;
this.dimensionsSetByUser = Objects.requireNonNull(dimensionsSetByUser);
this.rateLimitSettings = Objects.requireNonNullElse(rateLimitSettings, DEFAULT_RATE_LIMIT_SETTINGS);
}
OpenAiEmbeddingsServiceSettings(
String modelId,
@Nullable String uri,
@Nullable String organizationId,
@Nullable SimilarityMeasure similarity,
@Nullable Integer dimensions,
@Nullable Integer maxInputTokens,
Boolean dimensionsSetByUser,
@Nullable RateLimitSettings rateLimitSettings
) {
this(
modelId,
createOptionalUri(uri),
organizationId,
similarity,
dimensions,
maxInputTokens,
dimensionsSetByUser,
rateLimitSettings
);
}
public OpenAiEmbeddingsServiceSettings(StreamInput in) throws IOException {
uri = createOptionalUri(in.readOptionalString());
organizationId = in.readOptionalString();
similarity = in.readOptionalEnum(SimilarityMeasure.class);
dimensions = in.readOptionalVInt();
maxInputTokens = in.readOptionalVInt();
dimensionsSetByUser = in.readBoolean();
modelId = in.readString();
rateLimitSettings = new RateLimitSettings(in);
}
private OpenAiEmbeddingsServiceSettings(CommonFields fields, Boolean dimensionsSetByUser) {
this(
fields.modelId,
fields.uri,
fields.organizationId,
fields.similarity,
fields.dimensions,
fields.maxInputTokens,
dimensionsSetByUser,
fields.rateLimitSettings
);
}
@Override
public RateLimitSettings rateLimitSettings() {
return rateLimitSettings;
}
@Override
public URI uri() {
return uri;
}
@Override
public String organizationId() {
return organizationId;
}
@Override
public SimilarityMeasure similarity() {
return similarity;
}
@Override
public Integer dimensions() {
return dimensions;
}
@Override
public Boolean dimensionsSetByUser() {
return dimensionsSetByUser;
}
public Integer maxInputTokens() {
return maxInputTokens;
}
@Override
public String modelId() {
return modelId;
}
@Override
public DenseVectorFieldMapper.ElementType elementType() {
return DenseVectorFieldMapper.ElementType.FLOAT;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
toXContentFragmentOfExposedFields(builder, params);
if (dimensionsSetByUser != null) {
builder.field(DIMENSIONS_SET_BY_USER, dimensionsSetByUser);
}
builder.endObject();
return builder;
}
@Override
protected XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException {
builder.field(MODEL_ID, modelId);
if (uri != null) {
builder.field(URL, uri.toString());
}
if (organizationId != null) {
builder.field(ORGANIZATION, organizationId);
}
if (similarity != null) {
builder.field(SIMILARITY, similarity);
}
if (dimensions != null) {
builder.field(DIMENSIONS, dimensions);
}
if (maxInputTokens != null) {
builder.field(MAX_INPUT_TOKENS, maxInputTokens);
}
rateLimitSettings.toXContent(builder, params);
return builder;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_8_12_0;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
var uriToWrite = uri != null ? uri.toString() : null;
out.writeOptionalString(uriToWrite);
out.writeOptionalString(organizationId);
out.writeOptionalEnum(SimilarityMeasure.translateSimilarity(similarity, out.getTransportVersion()));
out.writeOptionalVInt(dimensions);
out.writeOptionalVInt(maxInputTokens);
out.writeBoolean(dimensionsSetByUser);
out.writeString(modelId);
rateLimitSettings.writeTo(out);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
OpenAiEmbeddingsServiceSettings that = (OpenAiEmbeddingsServiceSettings) o;
return Objects.equals(uri, that.uri)
&& Objects.equals(modelId, that.modelId)
&& Objects.equals(organizationId, that.organizationId)
&& Objects.equals(similarity, that.similarity)
&& Objects.equals(dimensions, that.dimensions)
&& Objects.equals(maxInputTokens, that.maxInputTokens)
&& Objects.equals(dimensionsSetByUser, that.dimensionsSetByUser)
&& Objects.equals(rateLimitSettings, that.rateLimitSettings);
}
@Override
public int hashCode() {
return Objects.hash(uri, modelId, organizationId, similarity, dimensions, maxInputTokens, dimensionsSetByUser, rateLimitSettings);
}
}
| OpenAiEmbeddingsServiceSettings |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/it/eventexecutor/CustomThread.java | {
"start": 459,
"end": 558
} | class ____ extends Thread {
public CustomThread(Runnable task) {
super(task);
}
}
| CustomThread |
java | elastic__elasticsearch | x-pack/plugin/security/qa/security-disabled/src/javaRestTest/java/org/elasticsearch/xpack/security/SetSecurityUserProcessorWithSecurityDisabledIT.java | {
"start": 1036,
"end": 2948
} | class ____ extends ESRestTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.nodes(2)
.distribution(DistributionType.DEFAULT)
.setting("xpack.ml.enabled", "false")
// We run with a trial license, but explicitly disable security.
// This means the security plugin is loaded and all feature are permitted, but they are not enabled
.setting("xpack.license.self_generated.type", "trial")
.setting("xpack.security.enabled", "false")
.build();
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
public void testDefineAndUseProcessor() throws Exception {
final String pipeline = "pipeline-" + getTestName();
final String index = "index-" + getTestName();
{
final Request putPipeline = new Request("PUT", "/_ingest/pipeline/" + pipeline);
putPipeline.setJsonEntity(Strings.format("""
{
"description": "Test pipeline (%s)",
"processors": [ { "set_security_user": { "field": "user" } } ]
}""", getTestName()));
final Response response = client().performRequest(putPipeline);
assertOK(response);
}
{
final Request ingest = new Request("PUT", "/" + index + "/_doc/1?pipeline=" + pipeline);
ingest.setJsonEntity("{\"field\":\"value\"}");
final ResponseException ex = expectThrows(ResponseException.class, () -> client().performRequest(ingest));
final Response response = ex.getResponse();
assertThat(
EntityUtils.toString(response.getEntity()),
containsString("Security (authentication) is not enabled on this cluster")
);
}
}
}
| SetSecurityUserProcessorWithSecurityDisabledIT |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/SpelReproTests.java | {
"start": 25154,
"end": 26651
} | class ____ {
public int getX(int i) {
return 10;
}
public int getX(Number i) {
return 20;
}
}
final Integer INTEGER = 7;
EvaluationContext emptyEvalContext = new StandardEvaluationContext();
List<TypeDescriptor> args = new ArrayList<>();
args.add(TypeDescriptor.forObject(42));
ConversionPriority1 target = new ConversionPriority1();
MethodExecutor me = new ReflectiveMethodResolver(true).resolve(emptyEvalContext, target, "getX", args);
// MethodInvoker chooses getX(int i) when passing Integer
final int actual = (Integer) me.execute(emptyEvalContext, target, 42).getValue();
// Compiler chooses getX(Number i) when passing Integer
final int compiler = target.getX(INTEGER);
// Fails!
assertThat(actual).isEqualTo(compiler);
ConversionPriority2 target2 = new ConversionPriority2();
MethodExecutor me2 = new ReflectiveMethodResolver(true).resolve(emptyEvalContext, target2, "getX", args);
// MethodInvoker chooses getX(int i) when passing Integer
int actual2 = (Integer) me2.execute(emptyEvalContext, target2, 42).getValue();
// Compiler chooses getX(Number i) when passing Integer
int compiler2 = target2.getX(INTEGER);
// Fails!
assertThat(actual2).isEqualTo(compiler2);
}
/**
* Test whether {@link ReflectiveMethodResolver} handles Widening Primitive Conversion. That's passing an 'int' to a
* method accepting 'long' is ok.
*/
@Test
void wideningPrimitiveConversion_SPR8224() throws Exception {
| ConversionPriority2 |
java | google__error-prone | core/src/test/java/com/google/errorprone/matchers/MethodReturnsTest.java | {
"start": 2098,
"end": 2294
} | class ____ {
static int foo() {
return 0;
}
}
""");
assertCompiles(fooReturnsType(/* shouldMatch= */ true, "int"));
}
private abstract static | A |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/cache/interceptor/AbstractFallbackCacheOperationSource.java | {
"start": 1803,
"end": 1993
} | interface ____ the invoked method
* has been called through (in case of a JDK proxy) will be checked.
*
* @author Costin Leau
* @author Juergen Hoeller
* @since 3.1
*/
public abstract | that |
java | apache__kafka | connect/api/src/main/java/org/apache/kafka/connect/connector/policy/ConnectorClientConfigRequest.java | {
"start": 4213,
"end": 4263
} | class ____ the Connector.
*
* @return the | of |
java | apache__camel | components/camel-aws/camel-aws2-ec2/src/test/java/org/apache/camel/component/aws2/ec2/EC2ComponentSpringTest.java | {
"start": 2213,
"end": 10789
} | class ____ extends CamelSpringTestSupport {
@Test
public void createAndRunInstances() {
Exchange exchange = template.request("direct:createAndRun", new Processor() {
public void process(Exchange exchange) {
exchange.getIn().setHeader(AWS2EC2Constants.OPERATION, AWS2EC2Operations.createAndRunInstances);
exchange.getIn().setHeader(AWS2EC2Constants.IMAGE_ID, "test-1");
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCE_TYPE, InstanceType.T2_MICRO);
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCE_MIN_COUNT, 1);
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCE_MAX_COUNT, 1);
}
});
RunInstancesResponse resultGet = (RunInstancesResponse) exchange.getMessage().getBody();
assertEquals("test-1", resultGet.instances().get(0).imageId());
assertEquals(InstanceType.T2_MICRO, resultGet.instances().get(0).instanceType());
assertEquals("instance-1", resultGet.instances().get(0).instanceId());
}
@Test
public void ec2CreateAndRunTestWithKeyPair() {
Exchange exchange = template.request("direct:createAndRun", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(AWS2EC2Constants.OPERATION, AWS2EC2Operations.createAndRunInstances);
exchange.getIn().setHeader(AWS2EC2Constants.IMAGE_ID, "test-1");
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCE_TYPE, InstanceType.T2_MICRO);
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCE_MIN_COUNT, 1);
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCE_MAX_COUNT, 1);
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCES_KEY_PAIR, "keypair-1");
}
});
RunInstancesResponse resultGet = (RunInstancesResponse) exchange.getMessage().getBody();
assertEquals("test-1", resultGet.instances().get(0).imageId());
assertEquals(InstanceType.T2_MICRO, resultGet.instances().get(0).instanceType());
assertEquals("instance-1", resultGet.instances().get(0).instanceId());
assertEquals(2, resultGet.instances().get(0).securityGroups().size());
assertEquals("id-3", resultGet.instances().get(0).securityGroups().get(0).groupId());
assertEquals("id-4", resultGet.instances().get(0).securityGroups().get(1).groupId());
}
@Test
public void startInstances() {
Exchange exchange = template.request("direct:start", new Processor() {
@Override
public void process(Exchange exchange) {
Collection<String> l = new ArrayList<>();
l.add("test-1");
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCES_IDS, l);
}
});
StartInstancesResponse resultGet = (StartInstancesResponse) exchange.getMessage().getBody();
assertEquals("test-1", resultGet.startingInstances().get(0).instanceId());
assertEquals(InstanceStateName.STOPPED, resultGet.startingInstances().get(0).previousState().name());
assertEquals(InstanceStateName.RUNNING, resultGet.startingInstances().get(0).currentState().name());
}
@Test
public void stopInstances() {
Exchange exchange = template.request("direct:stop", new Processor() {
@Override
public void process(Exchange exchange) {
Collection<String> l = new ArrayList<>();
l.add("test-1");
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCES_IDS, l);
}
});
StopInstancesResponse resultGet = (StopInstancesResponse) exchange.getMessage().getBody();
assertEquals("test-1", resultGet.stoppingInstances().get(0).instanceId());
assertEquals(InstanceStateName.RUNNING, resultGet.stoppingInstances().get(0).previousState().name());
assertEquals(InstanceStateName.STOPPED, resultGet.stoppingInstances().get(0).currentState().name());
}
@Test
public void terminateInstances() {
Exchange exchange = template.request("direct:terminate", new Processor() {
@Override
public void process(Exchange exchange) {
Collection<String> l = new ArrayList<>();
l.add("test-1");
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCES_IDS, l);
}
});
TerminateInstancesResponse resultGet = (TerminateInstancesResponse) exchange.getMessage().getBody();
assertEquals("test-1", resultGet.terminatingInstances().get(0).instanceId());
assertEquals(InstanceStateName.RUNNING, resultGet.terminatingInstances().get(0).previousState().name());
assertEquals(InstanceStateName.TERMINATED, resultGet.terminatingInstances().get(0).currentState().name());
}
@Test
public void ec2DescribeSpecificInstancesTest() {
Exchange exchange = template.request("direct:describe", new Processor() {
@Override
public void process(Exchange exchange) {
Collection<String> l = new ArrayList<>();
l.add("instance-1");
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCES_IDS, l);
}
});
DescribeInstancesResponse resultGet = (DescribeInstancesResponse) exchange.getMessage().getBody();
assertEquals(1, resultGet.reservations().size());
assertEquals(1, resultGet.reservations().get(0).instances().size());
}
@Test
public void ec2DescribeStatusSpecificInstancesTest() throws Exception {
Exchange exchange = template.request("direct:describeStatus", new Processor() {
@Override
public void process(Exchange exchange) {
Collection<String> l = new ArrayList<>();
l.add("test-1");
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCES_IDS, l);
}
});
MockEndpoint.assertIsSatisfied(context);
DescribeInstanceStatusResponse resultGet = (DescribeInstanceStatusResponse) exchange.getMessage().getBody();
assertEquals(1, resultGet.instanceStatuses().size());
assertEquals(InstanceStateName.RUNNING, resultGet.instanceStatuses().get(0).instanceState().name());
}
@Test
public void ec2RebootInstancesTest() {
assertDoesNotThrow(() -> issueReboot());
}
private void issueReboot() {
template.request("direct:reboot", exchange -> {
Collection<String> l = new ArrayList<>();
l.add("test-1");
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCES_IDS, l);
});
}
@Test
public void ec2MonitorInstancesTest() {
Exchange exchange = template.request("direct:monitor", new Processor() {
@Override
public void process(Exchange exchange) {
Collection<String> l = new ArrayList<>();
l.add("test-1");
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCES_IDS, l);
}
});
MonitorInstancesResponse resultGet = (MonitorInstancesResponse) exchange.getMessage().getBody();
assertEquals(1, resultGet.instanceMonitorings().size());
assertEquals("test-1", resultGet.instanceMonitorings().get(0).instanceId());
assertEquals(MonitoringState.ENABLED, resultGet.instanceMonitorings().get(0).monitoring().state());
}
@Test
public void ec2UnmonitorInstancesTest() {
Exchange exchange = template.request("direct:unmonitor", new Processor() {
@Override
public void process(Exchange exchange) {
Collection<String> l = new ArrayList<>();
l.add("test-1");
exchange.getIn().setHeader(AWS2EC2Constants.INSTANCES_IDS, l);
}
});
UnmonitorInstancesResponse resultGet = (UnmonitorInstancesResponse) exchange.getMessage().getBody();
assertEquals(1, resultGet.instanceMonitorings().size());
assertEquals("test-1", resultGet.instanceMonitorings().get(0).instanceId());
assertEquals(MonitoringState.DISABLED, resultGet.instanceMonitorings().get(0).monitoring().state());
}
@Override
protected AbstractApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/component/aws2/ec2/EC2ComponentSpringTest-context.xml");
}
}
| EC2ComponentSpringTest |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/sqlserver/parser/SQLServerSelectParser.java | {
"start": 1022,
"end": 7354
} | class ____ extends SQLSelectParser {
public SQLServerSelectParser(String sql) {
super(new SQLServerExprParser(sql));
}
public SQLServerSelectParser(SQLExprParser exprParser) {
super(exprParser);
}
public SQLServerSelectParser(SQLExprParser exprParser, SQLSelectListCache selectListCache) {
super(exprParser, selectListCache);
}
public SQLSelect select() {
SQLSelect select = new SQLSelect();
if (lexer.token() == Token.WITH) {
SQLWithSubqueryClause with = this.parseWith();
select.setWithSubQuery(with);
}
select.setQuery(query());
select.setOrderBy(parseOrderBy());
if (select.getOrderBy() == null) {
select.setOrderBy(parseOrderBy());
}
if (lexer.token() == Token.FOR) {
lexer.nextToken();
if (lexer.identifierEquals("BROWSE")) {
lexer.nextToken();
select.setForBrowse(true);
} else if (lexer.identifierEquals("XML")) {
lexer.nextToken();
for (; ; ) {
if (lexer.identifierEquals("AUTO") //
|| lexer.identifierEquals("TYPE") //
|| lexer.identifierEquals("XMLSCHEMA") //
) {
select.getForXmlOptions().add(lexer.stringVal());
lexer.nextToken();
} else if (lexer.identifierEquals("ELEMENTS")) {
lexer.nextToken();
if (lexer.identifierEquals("XSINIL")) {
lexer.nextToken();
select.getForXmlOptions().add("ELEMENTS XSINIL");
} else {
select.getForXmlOptions().add("ELEMENTS");
}
} else if (lexer.identifierEquals("PATH")) {
SQLExpr xmlPath = this.exprParser.expr();
select.setXmlPath(xmlPath);
} else {
break;
}
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
continue;
} else {
break;
}
}
} else {
throw new ParserException("syntax error, not support option : " + lexer.token() + ", " + lexer.info());
}
}
if (lexer.identifierEquals("OFFSET")) {
lexer.nextToken();
SQLExpr offset = this.expr();
acceptIdentifier("ROWS");
select.setOffset(offset);
if (lexer.token() == Token.FETCH) {
lexer.nextToken();
acceptIdentifier("NEXT");
SQLExpr rowCount = expr();
acceptIdentifier("ROWS");
acceptIdentifier("ONLY");
select.setRowCount(rowCount);
}
}
return select;
}
public SQLSelectQuery query(SQLObject parent, boolean acceptUnion) {
if (lexer.token() == Token.LPAREN) {
lexer.nextToken();
SQLSelectQuery select = query();
accept(Token.RPAREN);
return queryRest(select, acceptUnion);
}
SQLServerSelectQueryBlock queryBlock = new SQLServerSelectQueryBlock();
if (lexer.token() == Token.SELECT) {
lexer.nextToken();
if (lexer.token() == Token.COMMENT) {
lexer.nextToken();
}
if (lexer.token() == Token.DISTINCT) {
queryBlock.setDistionOption(SQLSetQuantifier.DISTINCT);
lexer.nextToken();
} else if (lexer.token() == Token.ALL) {
queryBlock.setDistionOption(SQLSetQuantifier.ALL);
lexer.nextToken();
}
if (lexer.token() == Token.TOP) {
SQLTop top = this.createExprParser().parseTop();
queryBlock.setTop(top);
}
parseSelectList(queryBlock);
}
if (lexer.token() == Token.INTO) {
lexer.nextToken();
SQLTableSource into = this.parseTableSource();
queryBlock.setInto((SQLExprTableSource) into);
}
parseFrom(queryBlock);
parseWhere(queryBlock);
parseGroupBy(queryBlock);
queryBlock.setOrderBy(this.exprParser.parseOrderBy());
parseFetchClause(queryBlock);
return queryRest(queryBlock, acceptUnion);
}
protected SQLServerExprParser createExprParser() {
return new SQLServerExprParser(lexer);
}
public SQLTableSource parseTableSourceRest(SQLTableSource tableSource) {
if (lexer.token() == Token.WITH) {
lexer.nextToken();
accept(Token.LPAREN);
for (; ; ) {
SQLExpr expr = this.expr();
SQLExprHint hint = new SQLExprHint(expr);
hint.setParent(tableSource);
tableSource.getHints().add(hint);
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
continue;
} else {
break;
}
}
accept(Token.RPAREN);
}
return super.parseTableSourceRest(tableSource);
}
@Override
protected void afterParseFetchClause(SQLSelectQueryBlock queryBlock) {
if (queryBlock instanceof SQLServerSelectQueryBlock) {
SQLServerSelectQueryBlock sqlServerSelectQueryBlock = (SQLServerSelectQueryBlock) queryBlock;
if (lexer.token() == Token.OPTION) {
lexer.nextToken();
accept(Token.LPAREN);
for (; ; ) {
SQLAssignItem item = this.exprParser.parseAssignItem();
sqlServerSelectQueryBlock.getOptions().add(item);
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
continue;
}
break;
}
accept(Token.RPAREN);
}
}
}
}
| SQLServerSelectParser |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessConfig.java | {
"start": 3192,
"end": 3965
} | class ____ implements ToXContentObject {
private final DataFrameAnalysis analysis;
private final ExtractedFields extractedFields;
private DataFrameAnalysisWrapper(DataFrameAnalysis analysis, ExtractedFields extractedFields) {
this.analysis = analysis;
this.extractedFields = extractedFields;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("name", analysis.getWriteableName());
builder.field("parameters", analysis.getParams(new AnalysisFieldInfo(extractedFields)));
builder.endObject();
return builder;
}
}
}
| DataFrameAnalysisWrapper |
java | apache__hadoop | hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/GridmixTestUtils.java | {
"start": 1531,
"end": 4426
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(GridmixTestUtils.class);
static final Path DEST = new Path("/gridmix");
static FileSystem dfs = null;
static MiniDFSCluster dfsCluster = null;
static MiniMRClientCluster mrvl = null;
protected static final String GRIDMIX_USE_QUEUE_IN_TRACE =
"gridmix.job-submission.use-queue-in-trace";
protected static final String GRIDMIX_DEFAULT_QUEUE =
"gridmix.job-submission.default-queue";
public static void initCluster(Class<?> caller) throws IOException {
Configuration conf = new Configuration();
// conf.set("mapred.queue.names", "default,q1,q2");
conf.set("mapred.queue.names", "default");
conf.set(PREFIX + "root.queues", "default");
conf.set(PREFIX + "root.default.capacity", "100.0");
conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
conf.set(GRIDMIX_DEFAULT_QUEUE, "default");
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
.build();// MiniDFSCluster(conf, 3, true, null);
dfs = dfsCluster.getFileSystem();
conf.set(JTConfig.JT_RETIREJOBS, "false");
mrvl = MiniMRClientClusterFactory.create(caller, 2, conf);
conf = mrvl.getConfig();
String[] files = conf.getStrings(MRJobConfig.CACHE_FILES);
if (files != null) {
String[] timestamps = new String[files.length];
for (int i = 0; i < files.length; i++) {
timestamps[i] = Long.toString(System.currentTimeMillis());
}
conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
}
}
public static void shutdownCluster() throws IOException {
if (mrvl != null) {
mrvl.stop();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
/**
* Methods to generate the home directory for dummy users.
*
* @param conf
*/
public static void createHomeAndStagingDirectory(String user,
Configuration conf) {
try {
FileSystem fs = dfsCluster.getFileSystem();
String path = "/user/" + user;
Path homeDirectory = new Path(path);
if (!fs.exists(homeDirectory)) {
LOG.info("Creating Home directory : " + homeDirectory);
fs.mkdirs(homeDirectory);
changePermission(user, homeDirectory, fs);
}
changePermission(user, homeDirectory, fs);
Path stagingArea = new Path(
conf.get("mapreduce.jobtracker.staging.root.dir",
"/tmp/hadoop/mapred/staging"));
LOG.info("Creating Staging root directory : " + stagingArea);
fs.mkdirs(stagingArea);
fs.setPermission(stagingArea, new FsPermission((short) 0777));
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
static void changePermission(String user, Path homeDirectory, FileSystem fs)
throws IOException {
fs.setOwner(homeDirectory, user, "");
}
}
| GridmixTestUtils |
java | spring-projects__spring-boot | module/spring-boot-hibernate/src/main/java/org/springframework/boot/hibernate/autoconfigure/HibernateJpaConfiguration.java | {
"start": 10366,
"end": 11287
} | class ____ implements HibernatePropertiesCustomizer {
private final @Nullable PhysicalNamingStrategy physicalNamingStrategy;
private final @Nullable ImplicitNamingStrategy implicitNamingStrategy;
NamingStrategiesHibernatePropertiesCustomizer(@Nullable PhysicalNamingStrategy physicalNamingStrategy,
@Nullable ImplicitNamingStrategy implicitNamingStrategy) {
this.physicalNamingStrategy = physicalNamingStrategy;
this.implicitNamingStrategy = implicitNamingStrategy;
}
@Override
public void customize(Map<String, Object> hibernateProperties) {
if (this.physicalNamingStrategy != null) {
hibernateProperties.put("hibernate.physical_naming_strategy", this.physicalNamingStrategy);
}
if (this.implicitNamingStrategy != null) {
hibernateProperties.put("hibernate.implicit_naming_strategy", this.implicitNamingStrategy);
}
}
}
static | NamingStrategiesHibernatePropertiesCustomizer |
java | resilience4j__resilience4j | resilience4j-circuitbreaker/src/main/java/io/github/resilience4j/circuitbreaker/event/CircuitBreakerOnErrorEvent.java | {
"start": 788,
"end": 1735
} | class ____ extends AbstractCircuitBreakerEvent {
private final Throwable throwable;
private final Duration elapsedDuration;
public CircuitBreakerOnErrorEvent(String circuitBreakerName, Duration elapsedDuration,
Throwable throwable) {
super(circuitBreakerName);
this.throwable = throwable;
this.elapsedDuration = elapsedDuration;
}
public Throwable getThrowable() {
return throwable;
}
public Duration getElapsedDuration() {
return elapsedDuration;
}
@Override
public Type getEventType() {
return Type.ERROR;
}
@Override
public String toString() {
return String.format("%s: CircuitBreaker '%s' recorded an error: '%s'. Elapsed time: %s ms",
getCreationTime(),
getCircuitBreakerName(),
getThrowable().toString(),
getElapsedDuration().toMillis());
}
}
| CircuitBreakerOnErrorEvent |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/legacy/Detail.java | {
"start": 219,
"end": 862
} | class ____ implements Serializable {
private Root root;
private int i;
private Set details = new HashSet();
private int x;
public int getX() {
return x;
}
public void setX(int x) {
this.x = x;
}
public Root getRoot() {
return root;
}
public void setRoot(Root root) {
this.root = root;
}
public int getI() {
return i;
}
public void setI(int i) {
this.i = i;
}
/**
* Returns the details.
* @return Set
*/
public Set getSubDetails() {
return details;
}
/**
* Sets the details.
* @param details The details to set
*/
public void setSubDetails(Set details) {
this.details = details;
}
}
| Detail |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/typeutils/InternalTypeInfoTest.java | {
"start": 1216,
"end": 2130
} | class ____ extends TypeInformationTestBase<InternalTypeInfo<?>> {
@Override
protected InternalTypeInfo<?>[] getTestData() {
return new InternalTypeInfo<?>[] {
InternalTypeInfo.of(DataTypes.INT().getLogicalType()),
InternalTypeInfo.of(
DataTypes.RAW(
DayOfWeek.class,
new KryoSerializer<>(
DayOfWeek.class, new SerializerConfigImpl()))
.getLogicalType()),
InternalTypeInfo.of(
DataTypes.RAW(
ByteBuffer.class,
new KryoSerializer<>(
ByteBuffer.class, new SerializerConfigImpl()))
.getLogicalType()),
};
}
}
| InternalTypeInfoTest |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/snapshot/ForStIncrementalSnapshotStrategy.java | {
"start": 2927,
"end": 9677
} | class ____<K> extends ForStNativeFullSnapshotStrategy<K> {
private static final Logger LOG =
LoggerFactory.getLogger(ForStIncrementalSnapshotStrategy.class);
private static final String DESCRIPTION = "Asynchronous incremental ForSt snapshot";
/**
* Stores the {@link StreamStateHandle} and corresponding local path of uploaded SST files that
* build the incremental history. Once the checkpoint is confirmed by JM, they can be reused for
* incremental checkpoint.
*/
@Nonnull private final SortedMap<Long, Collection<HandleAndLocalPath>> uploadedSstFiles;
/** The identifier of the last completed checkpoint. */
private long lastCompletedCheckpointId;
public ForStIncrementalSnapshotStrategy(
@Nonnull RocksDB db,
@Nonnull ResourceGuard forstResourceGuard,
@Nonnull ForStResourceContainer resourceContainer,
@Nonnull TypeSerializer<K> keySerializer,
@Nonnull LinkedHashMap<String, ForStOperationUtils.ForStKvStateInfo> kvStateInformation,
@Nonnull KeyGroupRange keyGroupRange,
@Nonnegative int keyGroupPrefixBytes,
@Nonnull UUID backendUID,
@Nonnull SortedMap<Long, Collection<HandleAndLocalPath>> uploadedStateHandles,
@Nonnull ForStStateDataTransfer stateTransfer,
long lastCompletedCheckpointId) {
super(
DESCRIPTION,
db,
forstResourceGuard,
resourceContainer,
keySerializer,
kvStateInformation,
keyGroupRange,
keyGroupPrefixBytes,
backendUID,
stateTransfer);
this.uploadedSstFiles = new TreeMap<>(uploadedStateHandles);
this.lastCompletedCheckpointId = lastCompletedCheckpointId;
}
@Override
public SnapshotResultSupplier<KeyedStateHandle> asyncSnapshot(
ForStNativeSnapshotResources snapshotResources,
long checkpointId,
long timestamp,
@Nonnull CheckpointStreamFactory checkpointStreamFactory,
@Nonnull CheckpointOptions checkpointOptions) {
if (snapshotResources.stateMetaInfoSnapshots.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Asynchronous ForSt snapshot performed on empty keyed state at {}. Returning null.",
timestamp);
}
return registry -> SnapshotResult.empty();
}
final CheckpointType.SharingFilesStrategy sharingFilesStrategy =
checkpointOptions.getCheckpointType().getSharingFilesStrategy();
switch (sharingFilesStrategy) {
case FORWARD_BACKWARD:
// incremental checkpoint, use origin PreviousSnapshot
break;
case NO_SHARING:
// savepoint, use empty PreviousSnapshot
snapshotResources.setPreviousSnapshot(EMPTY_PREVIOUS_SNAPSHOT);
break;
case FORWARD:
// Full checkpoint for IncrementalSnapshotStrategy is not supported, except for the
// first one.
if (snapshotResources.previousSnapshot.isEmpty()) {
break;
} else {
throw new IllegalArgumentException(
"Triggering a full checkpoint for IncrementalSnapshotStrategy is not supported.");
}
default:
throw new IllegalArgumentException(
String.format(
"Unsupported sharing files strategy for %s : %s",
this.getClass().getName(), sharingFilesStrategy));
}
return new ForStIncrementalSnapshotOperation(
checkpointId, snapshotResources, checkpointStreamFactory, sharingFilesStrategy);
}
@Override
public void notifyCheckpointComplete(long completedCheckpointId) {
synchronized (uploadedSstFiles) {
LOG.info("Backend:{} checkpoint:{} complete.", backendUID, completedCheckpointId);
// FLINK-23949: materializedSstFiles.keySet().contains(completedCheckpointId) make sure
// the notified checkpointId is not a savepoint, otherwise next checkpoint will
// degenerate into a full checkpoint
if (completedCheckpointId > lastCompletedCheckpointId
&& uploadedSstFiles.containsKey(completedCheckpointId)) {
uploadedSstFiles
.keySet()
.removeIf(checkpointId -> checkpointId < completedCheckpointId);
lastCompletedCheckpointId = completedCheckpointId;
}
}
}
@Override
public void notifyCheckpointAborted(long abortedCheckpointId) {
synchronized (uploadedSstFiles) {
LOG.info("Backend:{} checkpoint:{} aborted.", backendUID, abortedCheckpointId);
uploadedSstFiles.keySet().remove(abortedCheckpointId);
}
}
@Override
public void close() {
stateTransfer.close();
}
@Override
protected PreviousSnapshot snapshotMetaData(
long checkpointId, @Nonnull List<StateMetaInfoSnapshot> stateMetaInfoSnapshots) {
final long lastCompletedCheckpoint;
final SortedMap<Long, Collection<HandleAndLocalPath>> currentUploadedSstFiles;
// use the last completed checkpoint as the comparison base.
synchronized (uploadedSstFiles) {
lastCompletedCheckpoint = lastCompletedCheckpointId;
currentUploadedSstFiles =
new TreeMap<>(uploadedSstFiles.tailMap(lastCompletedCheckpoint));
}
PreviousSnapshot previousSnapshot =
new PreviousSnapshot(currentUploadedSstFiles, lastCompletedCheckpoint);
LOG.trace(
"Taking incremental snapshot for checkpoint {}. Snapshot is based on last completed checkpoint {} "
+ "assuming the following (shared) confirmed files as base: {}.",
checkpointId,
lastCompletedCheckpoint,
previousSnapshot);
// snapshot meta data to save
for (Map.Entry<String, ForStOperationUtils.ForStKvStateInfo> stateMetaInfoEntry :
kvStateInformation.entrySet()) {
stateMetaInfoSnapshots.add(stateMetaInfoEntry.getValue().metaInfo.snapshot());
}
return previousSnapshot;
}
/** Encapsulates the process to perform an incremental snapshot of a ForStKeyedStateBackend. */
private final | ForStIncrementalSnapshotStrategy |
java | apache__rocketmq | store/src/main/java/org/apache/rocketmq/store/dledger/DLedgerCommitLog.java | {
"start": 50045,
"end": 50776
} | class ____ extends SelectMappedBufferResult {
private SelectMmapBufferResult sbr;
public DLedgerSelectMappedBufferResult(SelectMmapBufferResult sbr) {
super(sbr.getStartOffset(), sbr.getByteBuffer(), sbr.getSize(), null);
this.sbr = sbr;
}
@Override
public synchronized void release() {
super.release();
if (sbr != null) {
sbr.release();
}
}
}
public DLedgerServer getdLedgerServer() {
return dLedgerServer;
}
public int getId() {
return id;
}
public long getDividedCommitlogOffset() {
return dividedCommitlogOffset;
}
}
| DLedgerSelectMappedBufferResult |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/snapshots/SnapshotState.java | {
"start": 572,
"end": 2466
} | enum ____ {
/**
* Snapshot process has started
*/
IN_PROGRESS((byte) 0, false, false),
/**
* Snapshot process completed successfully
*/
SUCCESS((byte) 1, true, true),
/**
* Snapshot failed
*/
FAILED((byte) 2, true, false),
/**
* Snapshot was partial successful
*/
PARTIAL((byte) 3, true, true),
/**
* Snapshot is incompatible with the current version of the cluster
*/
INCOMPATIBLE((byte) 4, true, false);
private final byte value;
private final boolean completed;
private final boolean restorable;
SnapshotState(byte value, boolean completed, boolean restorable) {
this.value = value;
this.completed = completed;
this.restorable = restorable;
}
/**
* Returns code that represents the snapshot state
*
* @return code for the state
*/
public byte value() {
return value;
}
/**
* Returns true if snapshot completed (successfully or not)
*
* @return true if snapshot completed, false otherwise
*/
public boolean completed() {
return completed;
}
/**
* Returns true if snapshot can be restored (at least partially)
*
* @return true if snapshot can be restored, false otherwise
*/
public boolean restorable() {
return restorable;
}
/**
* Generate snapshot state from code
*
* @param value the state code
* @return state
*/
public static SnapshotState fromValue(byte value) {
return switch (value) {
case 0 -> IN_PROGRESS;
case 1 -> SUCCESS;
case 2 -> FAILED;
case 3 -> PARTIAL;
case 4 -> INCOMPATIBLE;
default -> throw new IllegalArgumentException("No snapshot state for value [" + value + "]");
};
}
}
| SnapshotState |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/config/configcenter/nop/NopDynamicConfigurationFactory.java | {
"start": 1077,
"end": 1305
} | class ____ extends AbstractDynamicConfigurationFactory {
@Override
protected DynamicConfiguration createDynamicConfiguration(URL url) {
return new NopDynamicConfiguration(url);
}
}
| NopDynamicConfigurationFactory |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/LegacyTaskAssignor.java | {
"start": 1104,
"end": 1553
} | interface ____ {
/**
* @return whether the generated assignment requires a followup probing rebalance to satisfy all conditions
*/
boolean assign(final Map<ProcessId, ClientState> clients,
final Set<TaskId> allTaskIds,
final Set<TaskId> statefulTaskIds,
final RackAwareTaskAssignor rackAwareTaskAssignor,
final AssignmentConfigs configs);
}
| LegacyTaskAssignor |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableTakeTest.java | {
"start": 8214,
"end": 16349
} | class ____ implements Publisher<String> {
final String[] values;
Thread t;
TestFlowableFunc(String... values) {
this.values = values;
}
@Override
public void subscribe(final Subscriber<? super String> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
System.out.println("TestFlowable subscribed to ...");
t = new Thread(new Runnable() {
@Override
public void run() {
try {
System.out.println("running TestFlowable thread");
for (String s : values) {
System.out.println("TestFlowable onNext: " + s);
subscriber.onNext(s);
}
subscriber.onComplete();
} catch (Throwable e) {
throw new RuntimeException(e);
}
}
});
System.out.println("starting TestFlowable thread");
t.start();
System.out.println("done starting TestFlowable thread");
}
}
private static Flowable<Long> INFINITE_OBSERVABLE = Flowable.unsafeCreate(new Publisher<Long>() {
@Override
public void subscribe(Subscriber<? super Long> op) {
BooleanSubscription bs = new BooleanSubscription();
op.onSubscribe(bs);
long l = 1;
while (!bs.isCancelled()) {
op.onNext(l++);
}
op.onComplete();
}
});
@Test
public void takeObserveOn() {
Subscriber<Object> subscriber = TestHelper.mockSubscriber();
TestSubscriber<Object> ts = new TestSubscriber<>(subscriber);
INFINITE_OBSERVABLE.onBackpressureDrop()
.observeOn(Schedulers.newThread()).take(1).subscribe(ts);
ts.awaitDone(5, TimeUnit.SECONDS);
ts.assertNoErrors();
verify(subscriber).onNext(1L);
verify(subscriber, never()).onNext(2L);
verify(subscriber).onComplete();
verify(subscriber, never()).onError(any(Throwable.class));
}
@Test
public void producerRequestThroughTake() {
TestSubscriber<Integer> ts = new TestSubscriber<>(3);
final AtomicLong requested = new AtomicLong();
Flowable.unsafeCreate(new Publisher<Integer>() {
@Override
public void subscribe(Subscriber<? super Integer> s) {
s.onSubscribe(new Subscription() {
@Override
public void request(long n) {
requested.set(n);
}
@Override
public void cancel() {
}
});
}
}).take(3).subscribe(ts);
assertEquals(3, requested.get());
}
@Test
public void producerRequestThroughTakeIsModified() {
TestSubscriber<Integer> ts = new TestSubscriber<>(3);
final AtomicLong requested = new AtomicLong();
Flowable.unsafeCreate(new Publisher<Integer>() {
@Override
public void subscribe(Subscriber<? super Integer> s) {
s.onSubscribe(new Subscription() {
@Override
public void request(long n) {
requested.set(n);
}
@Override
public void cancel() {
}
});
}
}).take(1).subscribe(ts);
//FIXME take triggers fast path if downstream requests more than the limit
assertEquals(1, requested.get());
}
@Test
public void interrupt() throws InterruptedException {
final AtomicReference<Object> exception = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
Flowable.just(1).subscribeOn(Schedulers.computation()).take(1)
.subscribe(new Consumer<Integer>() {
@Override
public void accept(Integer t1) {
try {
Thread.sleep(100);
} catch (Exception e) {
exception.set(e);
e.printStackTrace();
} finally {
latch.countDown();
}
}
});
latch.await();
assertNull(exception.get());
}
@Test
public void doesntRequestMoreThanNeededFromUpstream() throws InterruptedException {
final AtomicLong requests = new AtomicLong();
TestSubscriber<Long> ts = new TestSubscriber<>(0L);
Flowable.interval(100, TimeUnit.MILLISECONDS)
//
.doOnRequest(new LongConsumer() {
@Override
public void accept(long n) {
System.out.println(n);
requests.addAndGet(n);
}})
//
.take(2)
//
.subscribe(ts);
Thread.sleep(50);
ts.request(1);
ts.request(1);
ts.request(1);
ts.awaitDone(5, TimeUnit.SECONDS);
ts.assertComplete();
ts.assertNoErrors();
assertEquals(2, requests.get());
}
@Test
public void takeFinalValueThrows() {
Flowable<Integer> source = Flowable.just(1).take(1);
TestSubscriber<Integer> ts = new TestSubscriber<Integer>() {
@Override
public void onNext(Integer t) {
throw new TestException();
}
};
source.safeSubscribe(ts);
ts.assertNoValues();
ts.assertError(TestException.class);
ts.assertNotComplete();
}
@Test
public void reentrantTake() {
final PublishProcessor<Integer> source = PublishProcessor.create();
TestSubscriber<Integer> ts = new TestSubscriber<>();
source.take(1).doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer v) {
source.onNext(2);
}
}).subscribe(ts);
source.onNext(1);
ts.assertValue(1);
ts.assertNoErrors();
ts.assertComplete();
}
@Test
public void takeNegative() {
try {
Flowable.just(1).take(-99);
fail("Should have thrown");
} catch (IllegalArgumentException ex) {
assertEquals("count >= 0 required but it was -99", ex.getMessage());
}
}
@Test
public void takeZero() {
Flowable.just(1)
.take(0)
.test()
.assertResult();
}
@Test
public void dispose() {
TestHelper.checkDisposed(PublishProcessor.create().take(2));
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeFlowable(new Function<Flowable<Object>, Flowable<Object>>() {
@Override
public Flowable<Object> apply(Flowable<Object> f) throws Exception {
return f.take(2);
}
});
}
@Test
public void badRequest() {
TestHelper.assertBadRequestReported(Flowable.never().take(1));
}
@Test
public void requestRace() {
for (int i = 0; i < TestHelper.RACE_LONG_LOOPS; i++) {
final TestSubscriber<Integer> ts = Flowable.range(1, 2).take(2).test(0L);
Runnable r1 = new Runnable() {
@Override
public void run() {
ts.request(1);
}
};
TestHelper.race(r1, r1);
ts.assertResult(1, 2);
}
}
@Test
public void errorAfterLimitReached() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
Flowable.error(new TestException())
.take(0)
.test()
.assertResult();
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
}
| TestFlowableFunc |
java | apache__kafka | raft/src/main/java/org/apache/kafka/raft/LeaderState.java | {
"start": 2782,
"end": 40869
} | class ____<T> implements EpochState {
static final long OBSERVER_SESSION_TIMEOUT_MS = 300_000L;
static final double CHECK_QUORUM_TIMEOUT_FACTOR = 1.5;
private final VoterSet.VoterNode localVoterNode;
private final int epoch;
private final long epochStartOffset;
private final Set<Integer> grantingVoters;
private final VoterSet voterSetAtEpochStart;
// This field is non-empty if the voter set at epoch start came from a snapshot or log segment
private final OptionalLong offsetOfVotersAtEpochStart;
private final KRaftVersion kraftVersionAtEpochStart;
private Optional<LogOffsetMetadata> highWatermark = Optional.empty();
private Map<Integer, ReplicaState> voterStates = new HashMap<>();
private Optional<AddVoterHandlerState> addVoterHandlerState = Optional.empty();
private Optional<RemoveVoterHandlerState> removeVoterHandlerState = Optional.empty();
private final Map<ReplicaKey, ReplicaState> observerStates = new HashMap<>();
private final Logger log;
private final BatchAccumulator<T> accumulator;
// The set includes all the followers voters that FETCH or FETCH_SNAPSHOT during the current checkQuorumTimer interval.
private final Set<Integer> fetchedVoters = new HashSet<>();
private final Timer checkQuorumTimer;
private final int checkQuorumTimeoutMs;
private final Timer beginQuorumEpochTimer;
private final int beginQuorumEpochTimeoutMs;
private final KafkaRaftMetrics kafkaRaftMetrics;
// This is volatile because resignation can be requested from an external thread.
private volatile boolean resignRequested = false;
/* Used to coordinate the upgrade of the kraft.version from 0 to 1. The upgrade is triggered by
* the clients to RaftClient.
* 1. if the kraft version is 0, the starting state is the Voters type. The voter set is the voters in
* the static voter set with the leader updated. See KRaftVersionUpgrade for details on the
* Voters type.
* 2. as the leader receives UpdateRaftVoter requests, it updates the associated Voters type. Only
* after all of the voters have been updated will an upgrade successfully complete.
* 3. a client of RaftClient triggers the upgrade and transition this state to the Version
* type. See KRaftVersionUpgrade for details on the Version type.
*
* All transition are coordinated using optimistic locking by always calling AtomicReference#compareAndSet
*/
private final AtomicReference<KRaftVersionUpgrade> kraftVersionUpgradeState = new AtomicReference<>(
KRaftVersionUpgrade.empty()
);
protected LeaderState(
Time time,
VoterSet.VoterNode localVoterNode,
int epoch,
long epochStartOffset,
VoterSet voterSetAtEpochStart,
OptionalLong offsetOfVotersAtEpochStart,
KRaftVersion kraftVersionAtEpochStart,
Set<Integer> grantingVoters,
BatchAccumulator<T> accumulator,
int fetchTimeoutMs,
LogContext logContext,
KafkaRaftMetrics kafkaRaftMetrics
) {
if (localVoterNode.voterKey().directoryId().isEmpty()) {
throw new IllegalArgumentException(
String.format("Unknown local replica directory id: %s", localVoterNode)
);
} else if (!voterSetAtEpochStart.isVoter(localVoterNode.voterKey())) {
throw new IllegalArgumentException(
String.format(
"Local replica %s is not a voter in %s",
localVoterNode,
voterSetAtEpochStart
)
);
}
this.localVoterNode = localVoterNode;
this.epoch = epoch;
this.epochStartOffset = epochStartOffset;
for (VoterSet.VoterNode voterNode: voterSetAtEpochStart.voterNodes()) {
boolean hasAcknowledgedLeader = voterNode.isVoter(localVoterNode.voterKey());
this.voterStates.put(
voterNode.voterKey().id(),
new ReplicaState(voterNode.voterKey(), hasAcknowledgedLeader, voterNode.listeners())
);
}
this.grantingVoters = Set.copyOf(grantingVoters);
this.log = logContext.logger(LeaderState.class);
this.accumulator = Objects.requireNonNull(accumulator, "accumulator must be non-null");
// use the 1.5x of fetch timeout to tolerate some network transition time or other IO time.
this.checkQuorumTimeoutMs = (int) (fetchTimeoutMs * CHECK_QUORUM_TIMEOUT_FACTOR);
this.checkQuorumTimer = time.timer(checkQuorumTimeoutMs);
this.beginQuorumEpochTimeoutMs = fetchTimeoutMs / 2;
this.beginQuorumEpochTimer = time.timer(0);
this.voterSetAtEpochStart = voterSetAtEpochStart;
this.offsetOfVotersAtEpochStart = offsetOfVotersAtEpochStart;
this.kraftVersionAtEpochStart = kraftVersionAtEpochStart;
kafkaRaftMetrics.addLeaderMetrics();
this.kafkaRaftMetrics = kafkaRaftMetrics;
if (!kraftVersionAtEpochStart.isReconfigSupported()) {
var updatedVoters = voterSetAtEpochStart
.updateVoterIgnoringDirectoryId(localVoterNode)
.orElseThrow(
() -> new IllegalStateException(
String.format(
"Unable to update voter set %s with the latest leader information %s",
voterSetAtEpochStart,
localVoterNode
)
)
);
kraftVersionUpgradeState.set(new KRaftVersionUpgrade.Voters(updatedVoters));
}
}
public long timeUntilBeginQuorumEpochTimerExpires(long currentTimeMs) {
beginQuorumEpochTimer.update(currentTimeMs);
return beginQuorumEpochTimer.remainingMs();
}
public void resetBeginQuorumEpochTimer(long currentTimeMs) {
beginQuorumEpochTimer.update(currentTimeMs);
beginQuorumEpochTimer.reset(beginQuorumEpochTimeoutMs);
}
/**
* Determines the set of replicas that should receive a {@code BeginQuorumEpoch} request
* based on the elapsed time since their last fetch.
* <p>
* For each remote voter (excluding the local node), if the time since the last
* fetch exceeds the configured {@code beginQuorumEpochTimeoutMs}, the replica
* is considered to need a new quorum epoch request.
*
* @param currentTimeMs the current system time in milliseconds
* @return an unmodifiable set of {@link ReplicaKey} objects representing replicas
* that need to receive a {@code BeginQuorumEpoch} request
*/
public Set<ReplicaKey> needToSendBeginQuorumRequests(long currentTimeMs) {
return voterStates.values()
.stream()
.filter(
state -> state.replicaKey.id() != localVoterNode.voterKey().id() &&
currentTimeMs - state.lastFetchTimestamp >= beginQuorumEpochTimeoutMs
)
.map(ReplicaState::replicaKey)
.collect(Collectors.toUnmodifiableSet());
}
/**
* Get the remaining time in milliseconds until the checkQuorumTimer expires.
*
* This will happen if we didn't receive a valid fetch/fetchSnapshot request from the majority
* of the voters within checkQuorumTimeoutMs.
*
* @param currentTimeMs the current timestamp in millisecond
* @return the remainingMs before the checkQuorumTimer expired
*/
public long timeUntilCheckQuorumExpires(long currentTimeMs) {
// if there's only 1 voter, it should never get expired.
if (voterStates.size() == 1) {
return Long.MAX_VALUE;
}
checkQuorumTimer.update(currentTimeMs);
long remainingMs = checkQuorumTimer.remainingMs();
if (remainingMs == 0) {
log.info(
"Did not receive fetch request from the majority of the voters within {}ms. " +
"Current fetched voters are {}, and voters are {}",
checkQuorumTimeoutMs,
fetchedVoters,
voterStates.values()
.stream()
.map(voter -> voter.replicaKey)
.collect(Collectors.toUnmodifiableSet())
);
}
return remainingMs;
}
/**
* Reset the checkQuorumTimer if we've received fetch/fetchSnapshot request from the majority of the voter
*
* @param replicaKey the replica key of the voter
* @param currentTimeMs the current timestamp in millisecond
*/
public void updateCheckQuorumForFollowingVoter(ReplicaKey replicaKey, long currentTimeMs) {
updateFetchedVoters(replicaKey);
// The majority number of the voters. Ex: 2 for 3 voters, 3 for 4 voters... etc.
int majority = (voterStates.size() / 2) + 1;
// If the leader is in the voter set, it should be implicitly counted as part of the
// majority, but the leader will never be a member of the fetchedVoters.
// If the leader is not in the voter set, it is not in the majority. Then, the
// majority can only be composed of fetched voters.
if (voterStates.containsKey(localVoterNode.voterKey().id())) {
majority = majority - 1;
}
if (fetchedVoters.size() >= majority) {
fetchedVoters.clear();
checkQuorumTimer.update(currentTimeMs);
checkQuorumTimer.reset(checkQuorumTimeoutMs);
}
}
private void updateFetchedVoters(ReplicaKey replicaKey) {
if (replicaKey.id() == localVoterNode.voterKey().id()) {
throw new IllegalArgumentException("Received a FETCH/FETCH_SNAPSHOT request from the leader itself.");
}
ReplicaState state = voterStates.get(replicaKey.id());
if (state != null && state.matchesKey(replicaKey)) {
fetchedVoters.add(replicaKey.id());
}
}
public BatchAccumulator<T> accumulator() {
return accumulator;
}
public Optional<AddVoterHandlerState> addVoterHandlerState() {
return addVoterHandlerState;
}
public void resetAddVoterHandlerState(
Errors error,
String message,
Optional<AddVoterHandlerState> state
) {
addVoterHandlerState.ifPresent(
handlerState -> handlerState
.future()
.complete(RaftUtil.addVoterResponse(error, message))
);
addVoterHandlerState = state;
updateUncommittedVoterChangeMetric();
}
public Optional<RemoveVoterHandlerState> removeVoterHandlerState() {
return removeVoterHandlerState;
}
public void resetRemoveVoterHandlerState(
Errors error,
String message,
Optional<RemoveVoterHandlerState> state
) {
removeVoterHandlerState.ifPresent(
handlerState -> handlerState
.future()
.complete(RaftUtil.removeVoterResponse(error, message))
);
removeVoterHandlerState = state;
updateUncommittedVoterChangeMetric();
}
private void updateUncommittedVoterChangeMetric() {
kafkaRaftMetrics.updateUncommittedVoterChange(
addVoterHandlerState.isPresent() || removeVoterHandlerState.isPresent()
);
}
public long maybeExpirePendingOperation(long currentTimeMs) {
// First abort any expired operations
long timeUntilAddVoterExpiration = addVoterHandlerState()
.map(state -> state.timeUntilOperationExpiration(currentTimeMs))
.orElse(Long.MAX_VALUE);
if (timeUntilAddVoterExpiration == 0) {
resetAddVoterHandlerState(Errors.REQUEST_TIMED_OUT, null, Optional.empty());
}
long timeUntilRemoveVoterExpiration = removeVoterHandlerState()
.map(state -> state.timeUntilOperationExpiration(currentTimeMs))
.orElse(Long.MAX_VALUE);
if (timeUntilRemoveVoterExpiration == 0) {
resetRemoveVoterHandlerState(Errors.REQUEST_TIMED_OUT, null, Optional.empty());
}
// Reread the timeouts and return the smaller of them
return Math.min(
addVoterHandlerState()
.map(state -> state.timeUntilOperationExpiration(currentTimeMs))
.orElse(Long.MAX_VALUE),
removeVoterHandlerState()
.map(state -> state.timeUntilOperationExpiration(currentTimeMs))
.orElse(Long.MAX_VALUE)
);
}
public boolean isOperationPending(long currentTimeMs) {
maybeExpirePendingOperation(currentTimeMs);
return addVoterHandlerState.isPresent() || removeVoterHandlerState.isPresent();
}
private static List<Voter> convertToVoters(Set<Integer> voterIds) {
return voterIds.stream()
.map(follower -> new Voter().setVoterId(follower))
.collect(Collectors.toList());
}
private static MemoryRecordsBuilder createControlRecordsBuilder(
long baseOffset,
int epoch,
Compression compression,
ByteBuffer buffer,
long currentTimeMs
) {
return new MemoryRecordsBuilder(
buffer,
RecordBatch.CURRENT_MAGIC_VALUE,
compression,
TimestampType.CREATE_TIME,
baseOffset,
currentTimeMs,
RecordBatch.NO_PRODUCER_ID,
RecordBatch.NO_PRODUCER_EPOCH,
RecordBatch.NO_SEQUENCE,
false, // isTransactional
true, // isControlBatch
epoch,
buffer.capacity()
);
}
public void appendStartOfEpochControlRecords(long currentTimeMs) {
List<Voter> voters = convertToVoters(voterStates.keySet());
List<Voter> grantingVoters = convertToVoters(this.grantingVoters());
LeaderChangeMessage leaderChangeMessage = new LeaderChangeMessage()
.setVersion(ControlRecordUtils.LEADER_CHANGE_CURRENT_VERSION)
.setLeaderId(this.election().leaderId())
.setVoters(voters)
.setGrantingVoters(grantingVoters);
accumulator.appendControlMessages((baseOffset, epoch, compression, buffer) -> {
try (MemoryRecordsBuilder builder = createControlRecordsBuilder(
baseOffset,
epoch,
compression,
buffer,
currentTimeMs
)
) {
builder.appendLeaderChangeMessage(currentTimeMs, leaderChangeMessage);
if (kraftVersionAtEpochStart.isReconfigSupported()) {
long offset = offsetOfVotersAtEpochStart.orElseThrow(
() -> new IllegalStateException(
String.format(
"The %s is %s but there is no voter set in the log or " +
"checkpoint %s",
KRaftVersion.FEATURE_NAME,
kraftVersionAtEpochStart,
voterSetAtEpochStart
)
)
);
// The leader should write the latest voters record if its local listeners are different
// or it has never written a voters record to the log before.
if (offset == -1 || voterSetAtEpochStart.voterNodeNeedsUpdate(localVoterNode)) {
VoterSet updatedVoterSet = voterSetAtEpochStart
.updateVoter(localVoterNode)
.orElseThrow(
() -> new IllegalStateException(
String.format(
"Update expected for leader node %s and voter set %s",
localVoterNode,
voterSetAtEpochStart
)
)
);
builder.appendKRaftVersionMessage(
currentTimeMs,
new KRaftVersionRecord()
.setVersion(kraftVersionAtEpochStart.kraftVersionRecordVersion())
.setKRaftVersion(kraftVersionAtEpochStart.featureLevel())
);
builder.appendVotersMessage(
currentTimeMs,
updatedVoterSet.toVotersRecord(
kraftVersionAtEpochStart.votersRecordVersion()
)
);
}
}
return builder.build();
}
});
}
public long appendVotersRecord(VoterSet voters, long currentTimeMs) {
return accumulator.appendVotersRecord(
voters.toVotersRecord(ControlRecordUtils.KRAFT_VOTERS_CURRENT_VERSION),
currentTimeMs
);
}
public boolean compareAndSetVolatileVoters(
KRaftVersionUpgrade.Voters oldVoters,
KRaftVersionUpgrade.Voters newVoters
) {
return kraftVersionUpgradeState.compareAndSet(oldVoters, newVoters);
}
public Optional<KRaftVersionUpgrade.Voters> volatileVoters() {
return kraftVersionUpgradeState.get().toVoters();
}
public Optional<KRaftVersionUpgrade.Version> requestedKRaftVersion() {
return kraftVersionUpgradeState.get().toVersion();
}
public boolean isResignRequested() {
return resignRequested;
}
public boolean isReplicaCaughtUp(ReplicaKey replicaKey, long currentTimeMs) {
// In summary, let's consider a replica caught up for add voter, if they
// have fetched within the last hour
long anHourInMs = TimeUnit.HOURS.toMillis(1);
return Optional.ofNullable(observerStates.get(replicaKey))
.map(state ->
state.lastCaughtUpTimestamp > 0 &&
state.lastFetchTimestamp > 0 &&
state.lastFetchTimestamp > currentTimeMs - anHourInMs
)
.orElse(false);
}
public void requestResign() {
this.resignRequested = true;
}
/**
* Upgrade the kraft version.
*
* This methods upgrades the kraft version to {@code newVersion}. If the version is already
* {@code newVersion}, this is a noop operation.
*
* KRaft only supports upgrades, so {@code newVersion} must be greater than or equal to curent
* kraft version {@code persistedVersion}.
*
* For the upgrade to succeed all of the voters in the voter set must support the new kraft
* version. The upgrade from kraft version 0 to kraft version 1 generate one control batch
* with one control record setting the kraft version to 1 and one voters record setting the
* updated voter set.
*
* When {@code validateOnly} is true only the validation is perform and the control records are
* not generated.
*
* @param currentEpoch the current epoch
* @param newVersion the new kraft version
* @param persistedVersion the kraft version persisted to disk
* @param persistedVoters the set of voters persisted to disk
* @param validateOnly determine if only validation should be performed
* @param currentTimeMs the current time
*/
public boolean maybeAppendUpgradedKRaftVersion(
int currentEpoch,
KRaftVersion newVersion,
KRaftVersion persistedVersion,
VoterSet persistedVoters,
boolean validateOnly,
long currentTimeMs
) {
validateEpoch(currentEpoch);
var pendingVersion = kraftVersionUpgradeState.get().toVersion();
if (pendingVersion.isPresent()) {
if (pendingVersion.get().kraftVersion().equals(newVersion)) {
// The version match; upgrade is a noop
return false;
} else {
throw new InvalidUpdateVersionException(
String.format(
"Invalid concurrent upgrade of %s from version %s to %s",
KRaftVersion.FEATURE_NAME,
pendingVersion.get(),
newVersion
)
);
}
} else if (persistedVersion.equals(newVersion)) {
return false;
} else if (persistedVersion.isMoreThan(newVersion)) {
throw new InvalidUpdateVersionException(
String.format(
"Invalid upgrade of %s from version %s to %s because the new version is a downgrade",
KRaftVersion.FEATURE_NAME,
persistedVersion,
newVersion
)
);
}
// Upgrade to kraft.verion 1 is only supported; this needs to change when kraft.version 2 is added
var inMemoryVoters = kraftVersionUpgradeState.get().toVoters().orElseThrow(() ->
new InvalidUpdateVersionException(
String.format(
"Invalid upgrade of %s from version %s to %s",
KRaftVersion.FEATURE_NAME,
persistedVersion,
newVersion
)
)
);
if (!inMemoryVoters.voters().voterIds().equals(persistedVoters.voterIds())) {
throw new IllegalStateException(
String.format(
"Unable to update %s to %s due to missing voters %s compared to %s",
KRaftVersion.FEATURE_NAME,
newVersion,
inMemoryVoters.voters().voterIds(),
persistedVoters.voterIds()
)
);
} else if (!inMemoryVoters.voters().supportsVersion(newVersion)) {
log.info("Not all voters support kraft version {}: {}", newVersion, inMemoryVoters.voters());
throw new InvalidUpdateVersionException(
String.format(
"Invalid upgrade of %s to %s because not all of the voters support it",
KRaftVersion.FEATURE_NAME,
newVersion
)
);
} else if (
inMemoryVoters
.voters()
.voterKeys()
.stream()
.anyMatch(voterKey -> voterKey.directoryId().isEmpty())
) {
throw new IllegalStateException(
String.format(
"Directory id must be known for all of the voters: %s",
inMemoryVoters.voters()
)
);
}
if (!validateOnly) {
/* Note that this only supports upgrades from kraft.version 0 to kraft.version 1. When
* kraft.version 2 is added, this logic needs to be revisited
*/
var successful = kraftVersionUpgradeState.compareAndSet(
inMemoryVoters,
new KRaftVersionUpgrade.Version(newVersion)
);
if (!successful) {
throw new InvalidUpdateVersionException(
String.format(
"Unable to upgrade version for %s to %s due to changing voters",
KRaftVersion.FEATURE_NAME,
newVersion
)
);
}
// All of the validations succeeded; create control records for the upgrade
accumulator.appendControlMessages((baseOffset, batchEpoch, compression, buffer) -> {
try (MemoryRecordsBuilder builder = createControlRecordsBuilder(
baseOffset,
batchEpoch,
compression,
buffer,
currentTimeMs
)
) {
log.info("Appended kraft.version {} to the batch accumulator", newVersion);
builder.appendKRaftVersionMessage(
currentTimeMs,
new KRaftVersionRecord()
.setVersion(newVersion.kraftVersionRecordVersion())
.setKRaftVersion(newVersion.featureLevel())
);
if (!inMemoryVoters.voters().equals(persistedVoters)) {
log.info("Appended voter set {} to the batch accumulator", inMemoryVoters.voters());
builder.appendVotersMessage(
currentTimeMs,
inMemoryVoters.voters().toVotersRecord(newVersion.votersRecordVersion())
);
}
return builder.build();
}
});
}
return true;
}
private void validateEpoch(int currentEpoch) {
if (currentEpoch < epoch()) {
throw new NotLeaderException(
String.format(
"Upgrade kraft version failed because the given epoch %s is stale. Current leader epoch is %s",
currentEpoch,
epoch()
)
);
} else if (currentEpoch > epoch()) {
throw new IllegalArgumentException(
String.format(
"Attempt to append from epoch %s which is larger than the current epoch of %s",
currentEpoch,
epoch()
)
);
}
}
@Override
public Optional<LogOffsetMetadata> highWatermark() {
return highWatermark;
}
@Override
public ElectionState election() {
return ElectionState.withElectedLeader(epoch, localVoterNode.voterKey().id(), Optional.empty(), voterStates.keySet());
}
@Override
public int epoch() {
return epoch;
}
@Override
public Endpoints leaderEndpoints() {
return localVoterNode.listeners();
}
Map<Integer, ReplicaState> voterStates() {
return voterStates;
}
Map<ReplicaKey, ReplicaState> observerStates(final long currentTimeMs) {
clearInactiveObservers(currentTimeMs);
return observerStates;
}
public Set<Integer> grantingVoters() {
return this.grantingVoters;
}
// visible for testing
Set<ReplicaKey> nonAcknowledgingVoters() {
Set<ReplicaKey> nonAcknowledging = new HashSet<>();
for (ReplicaState state : voterStates.values()) {
if (!state.hasAcknowledgedLeader) {
nonAcknowledging.add(state.replicaKey);
}
}
return nonAcknowledging;
}
private boolean maybeUpdateHighWatermark() {
// Find the largest offset which is replicated to a majority of replicas (the leader counts)
ArrayList<ReplicaState> followersByDescendingFetchOffset = followersByDescendingFetchOffset()
.collect(Collectors.toCollection(ArrayList::new));
int indexOfHw = voterStates.size() / 2;
Optional<LogOffsetMetadata> highWatermarkUpdateOpt = followersByDescendingFetchOffset.get(indexOfHw).endOffset;
if (highWatermarkUpdateOpt.isPresent()) {
// The KRaft protocol requires an extra condition on commitment after a leader
// election. The leader must commit one record from its own epoch before it is
// allowed to expose records from any previous epoch. This guarantees that its
// log will contain the largest record (in terms of epoch/offset) in any log
// which ensures that any future leader will have replicated this record as well
// as all records from previous epochs that the current leader has committed.
LogOffsetMetadata highWatermarkUpdateMetadata = highWatermarkUpdateOpt.get();
long highWatermarkUpdateOffset = highWatermarkUpdateMetadata.offset();
if (highWatermarkUpdateOffset > epochStartOffset) {
if (highWatermark.isPresent()) {
LogOffsetMetadata currentHighWatermarkMetadata = highWatermark.get();
if (highWatermarkUpdateOffset > currentHighWatermarkMetadata.offset()
|| (highWatermarkUpdateOffset == currentHighWatermarkMetadata.offset() &&
!highWatermarkUpdateMetadata.metadata().equals(currentHighWatermarkMetadata.metadata()))) {
Optional<LogOffsetMetadata> oldHighWatermark = highWatermark;
highWatermark = highWatermarkUpdateOpt;
logHighWatermarkUpdate(
oldHighWatermark,
highWatermarkUpdateMetadata,
indexOfHw,
followersByDescendingFetchOffset
);
return true;
} else if (highWatermarkUpdateOffset < currentHighWatermarkMetadata.offset()) {
log.info("The latest computed high watermark {} is smaller than the current " +
"value {}, which should only happen when voter set membership changes. If the voter " +
"set has not changed this suggests that one of the voters has lost committed data. " +
"Full voter replication state: {}", highWatermarkUpdateOffset,
currentHighWatermarkMetadata.offset(), voterStates.values());
return false;
} else {
return false;
}
} else {
Optional<LogOffsetMetadata> oldHighWatermark = highWatermark;
highWatermark = highWatermarkUpdateOpt;
logHighWatermarkUpdate(
oldHighWatermark,
highWatermarkUpdateMetadata,
indexOfHw,
followersByDescendingFetchOffset
);
return true;
}
}
}
return false;
}
private void logHighWatermarkUpdate(
Optional<LogOffsetMetadata> oldHighWatermark,
LogOffsetMetadata newHighWatermark,
int indexOfHw,
List<ReplicaState> followersByDescendingFetchOffset
) {
if (oldHighWatermark.isPresent()) {
log.debug(
"High watermark set to {} from {} based on indexOfHw {} and voters {}",
newHighWatermark,
oldHighWatermark.get(),
indexOfHw,
followersByDescendingFetchOffset
);
} else {
log.info(
"High watermark set to {} for the first time for epoch {} based on indexOfHw {} and voters {}",
newHighWatermark,
epoch,
indexOfHw,
followersByDescendingFetchOffset
);
}
}
/**
* Update the local replica state.
*
* @param endOffsetMetadata updated log end offset of local replica
* @param lastVoterSet the up-to-date voter set
* @return true if the high watermark is updated as a result of this call
*/
public boolean updateLocalState(
LogOffsetMetadata endOffsetMetadata,
VoterSet lastVoterSet
) {
ReplicaState state = getOrCreateReplicaState(localVoterNode.voterKey());
state.endOffset.ifPresent(currentEndOffset -> {
if (currentEndOffset.offset() > endOffsetMetadata.offset()) {
throw new IllegalStateException("Detected non-monotonic update of local " +
"end offset: " + currentEndOffset.offset() + " -> " + endOffsetMetadata.offset());
}
});
state.updateLeaderEndOffset(endOffsetMetadata);
updateVoterAndObserverStates(lastVoterSet);
return maybeUpdateHighWatermark();
}
/**
* Update the replica state in terms of fetch time and log end offsets.
*
* @param replicaKey replica key
* @param currentTimeMs current time in milliseconds
* @param fetchOffsetMetadata new log offset and metadata
* @return true if the high watermark is updated as a result of this call
*/
public boolean updateReplicaState(
ReplicaKey replicaKey,
long currentTimeMs,
LogOffsetMetadata fetchOffsetMetadata
) {
// Ignore fetches from negative replica id, as it indicates
// the fetch is from non-replica. For example, a consumer.
if (replicaKey.id() < 0) {
return false;
} else if (replicaKey.id() == localVoterNode.voterKey().id()) {
throw new IllegalStateException(
String.format("Remote replica ID %s matches the local leader ID", replicaKey)
);
}
ReplicaState state = getOrCreateReplicaState(replicaKey);
state.endOffset.ifPresent(currentEndOffset -> {
if (currentEndOffset.offset() > fetchOffsetMetadata.offset()) {
log.warn("Detected non-monotonic update of fetch offset from nodeId {}: {} -> {}",
state.replicaKey, currentEndOffset.offset(), fetchOffsetMetadata.offset());
}
});
Optional<LogOffsetMetadata> leaderEndOffsetOpt = getOrCreateReplicaState(localVoterNode.voterKey()).endOffset;
state.updateFollowerState(
currentTimeMs,
fetchOffsetMetadata,
leaderEndOffsetOpt
);
updateCheckQuorumForFollowingVoter(replicaKey, currentTimeMs);
return isVoter(state.replicaKey) && maybeUpdateHighWatermark();
}
public List<ReplicaKey> nonLeaderVotersByDescendingFetchOffset() {
return followersByDescendingFetchOffset()
.filter(state -> !state.matchesKey(localVoterNode.voterKey()))
.map(state -> state.replicaKey)
.collect(Collectors.toList());
}
private Stream<ReplicaState> followersByDescendingFetchOffset() {
return voterStates
.values()
.stream()
.sorted();
}
public void addAcknowledgementFrom(int remoteNodeId) {
ReplicaState voterState = ensureValidVoter(remoteNodeId);
voterState.hasAcknowledgedLeader = true;
}
private ReplicaState ensureValidVoter(int remoteNodeId) {
ReplicaState state = voterStates.get(remoteNodeId);
if (state == null) {
throw new IllegalArgumentException("Unexpected acknowledgement from non-voter " + remoteNodeId);
}
return state;
}
public long epochStartOffset() {
return epochStartOffset;
}
private ReplicaState getOrCreateReplicaState(ReplicaKey replicaKey) {
ReplicaState state = voterStates.get(replicaKey.id());
if (state == null || !state.matchesKey(replicaKey)) {
observerStates.putIfAbsent(replicaKey, new ReplicaState(replicaKey, false, Endpoints.empty()));
kafkaRaftMetrics.updateNumObservers(observerStates.size());
return observerStates.get(replicaKey);
}
return state;
}
public Optional<ReplicaState> getReplicaState(ReplicaKey replicaKey) {
ReplicaState state = voterStates.get(replicaKey.id());
if (state == null || !state.matchesKey(replicaKey)) {
state = observerStates.get(replicaKey);
}
return Optional.ofNullable(state);
}
/**
* Clear observer states that have not been active for a while and are not the leader.
*/
private void clearInactiveObservers(final long currentTimeMs) {
observerStates.entrySet().removeIf(integerReplicaStateEntry ->
currentTimeMs - integerReplicaStateEntry.getValue().lastFetchTimestamp >= OBSERVER_SESSION_TIMEOUT_MS &&
!integerReplicaStateEntry.getKey().equals(localVoterNode.voterKey())
);
kafkaRaftMetrics.updateNumObservers(observerStates.size());
}
private boolean isVoter(ReplicaKey remoteReplicaKey) {
ReplicaState state = voterStates.get(remoteReplicaKey.id());
return state != null && state.matchesKey(remoteReplicaKey);
}
private void updateVoterAndObserverStates(VoterSet lastVoterSet) {
Map<Integer, ReplicaState> newVoterStates = new HashMap<>();
Map<Integer, ReplicaState> oldVoterStates = new HashMap<>(voterStates);
// Compute the new voter states map
for (VoterSet.VoterNode voterNode : lastVoterSet.voterNodes()) {
ReplicaState state = getReplicaState(voterNode.voterKey())
.orElse(new ReplicaState(voterNode.voterKey(), false, voterNode.listeners()));
// Remove the voter from the previous data structures
oldVoterStates.remove(voterNode.voterKey().id());
observerStates.remove(voterNode.voterKey());
// Make sure that the replica key in the replica state matches the voter's
state.setReplicaKey(voterNode.voterKey());
// Make sure that the listeners are updated
state.updateListeners(voterNode.listeners());
newVoterStates.put(state.replicaKey.id(), state);
}
voterStates = newVoterStates;
// Move any of the remaining old voters to observerStates
for (ReplicaState replicaStateEntry : oldVoterStates.values()) {
replicaStateEntry.clearListeners();
observerStates.putIfAbsent(replicaStateEntry.replicaKey, replicaStateEntry);
}
kafkaRaftMetrics.updateNumObservers(observerStates.size());
}
public static | LeaderState |
java | resilience4j__resilience4j | resilience4j-bulkhead/src/test/java/io/github/resilience4j/bulkhead/ThreadPoolBulkheadConfigTest.java | {
"start": 8831,
"end": 9224
} | class ____ implements ContextPropagator<Object> {
@Override
public Supplier<Optional<Object>> retrieve() {
return null;
}
@Override
public Consumer<Optional<Object>> copy() {
return null;
}
@Override
public Consumer<Optional<Object>> clear() {
return null;
}
}
}
| TestCtxPropagator2 |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/doublearrays/DoubleArrays_assertEmpty_Test.java | {
"start": 1321,
"end": 2036
} | class ____ extends DoubleArraysBaseTest {
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertEmpty(someInfo(), null))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_is_not_empty() {
double[] actual = { 6d, 8d };
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertEmpty(someInfo(), actual))
.withMessage(shouldBeEmpty(actual).create());
}
@Test
void should_pass_if_actual_is_empty() {
arrays.assertEmpty(someInfo(), emptyArray());
}
}
| DoubleArrays_assertEmpty_Test |
java | google__dagger | javatests/dagger/android/support/functional/UsesGeneratedModulesApplication.java | {
"start": 3673,
"end": 3856
} | class ____ {
@Provides
@IntoSet
static Class<?> addDummyValueToComponentHierarchy() {
return DummyActivitySubcomponent.class;
}
}
}
| AddToHierarchy |
java | quarkusio__quarkus | integration-tests/spring-boot-properties/src/test/java/io/quarkus/it/spring/boot/BeanPropertiesIT.java | {
"start": 114,
"end": 168
} | class ____ extends BeanPropertiesTest {
}
| BeanPropertiesIT |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java | {
"start": 41702,
"end": 42986
} | class ____ extends AnalyzerRule<Filter> {
@Override
protected LogicalPlan rule(Filter f) {
if (f.child() instanceof Project p) {
for (Expression n : p.projections()) {
n = Alias.unwrap(n);
// no literal or aggregates - it's a 'regular' projection
if (n.foldable() == false && Functions.isAggregate(n) == false
// folding might not work (it might wait for the optimizer)
// so check whether any column is referenced
&& n.anyMatch(FieldAttribute.class::isInstance)) {
return f;
}
}
if (containsAggregate(f.condition())) {
return f.with(new Aggregate(p.source(), p.child(), emptyList(), p.projections()), f.condition());
}
}
return f;
}
@Override
protected boolean skipResolved() {
return false;
}
}
//
// Handle aggs in HAVING. To help folding any aggs not found in Aggregation
// will be pushed down to the Aggregate and then projected. This also simplifies the Verifier's job.
//
private static | HavingOverProject |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/util/Arrays_isArrayTypePrimitive_Test.java | {
"start": 806,
"end": 1807
} | class ____ {
@Test
void should_return_true_if_object_is_a_primitive_array() {
// GIVEN
int[] o = new int[0];
// WHEN
boolean isArrayTypePrimitive = isArrayTypePrimitive(o);
// THEN
then(isArrayTypePrimitive).isTrue();
}
@Test
void should_return_false_if_object_is_an_object_array() {
// GIVEN
Object[] o = new Object[0];
// WHEN
boolean isArrayTypePrimitive = isArrayTypePrimitive(o);
// THEN
then(isArrayTypePrimitive).isFalse();
}
@Test
void should_return_false_if_object_is_null() {
// GIVEN
Object o = null;
// WHEN
boolean isArrayTypePrimitive = isArrayTypePrimitive(o);
// THEN
then(isArrayTypePrimitive).isFalse();
}
@Test
void should_return_false_if_object_is_not_an_array() {
// GIVEN
String string = "I'm not an array";
// WHEN
boolean isArrayTypePrimitive = isArrayTypePrimitive(string);
// THEN
then(isArrayTypePrimitive).isFalse();
}
}
| Arrays_isArrayTypePrimitive_Test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.