language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | redisson__redisson | redisson/src/main/java/org/redisson/PubSubMessageListener.java | {
"start": 878,
"end": 2495
} | class ____<V> implements RedisPubSubListener<Object> {
private final MessageListener<V> listener;
private final Set<String> names;
private final Class<V> type;
private Runnable callback;
public PubSubMessageListener(Class<V> type, MessageListener<V> listener, Set<String> names) {
super();
this.type = type;
this.listener = listener;
this.names = names;
}
public PubSubMessageListener(Class<V> type, MessageListener<V> listener, Set<String> names, Runnable callback) {
super();
this.type = type;
this.listener = listener;
this.names = names;
this.callback = callback;
}
public MessageListener<V> getListener() {
return listener;
}
@Override
public void onMessage(CharSequence channel, Object message) {
// could be subscribed to multiple channels
if (names.contains(channel.toString()) && type.isInstance(message)) {
listener.onMessage(channel, (V) message);
if (callback != null) {
callback.run();
}
}
}
@Override
public void onPatternMessage(CharSequence pattern, CharSequence channel, Object message) {
// could be subscribed to multiple channels
if (names.contains(pattern.toString()) && type.isInstance(message)) {
listener.onMessage(channel, (V) message);
if (callback != null) {
callback.run();
}
}
}
@Override
public void onStatus(PubSubType type, CharSequence channel) {
}
}
| PubSubMessageListener |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/ClaimCheckEipPushPopTest.java | {
"start": 1024,
"end": 1895
} | class ____ extends ContextTestSupport {
@Test
public void testPushPop() throws Exception {
getMockEndpoint("mock:a").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:b").expectedBodiesReceived("Bye World");
getMockEndpoint("mock:c").expectedBodiesReceived("Hello World");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").to("mock:a").claimCheck(ClaimCheckOperation.Push).transform().constant("Bye World")
.to("mock:b").claimCheck(ClaimCheckOperation.Pop)
.to("mock:c");
}
};
}
}
| ClaimCheckEipPushPopTest |
java | grpc__grpc-java | xds/src/generated/thirdparty/grpc/io/envoyproxy/envoy/service/discovery/v3/AggregatedDiscoveryServiceGrpc.java | {
"start": 20697,
"end": 20903
} | class ____
extends AggregatedDiscoveryServiceBaseDescriptorSupplier {
AggregatedDiscoveryServiceFileDescriptorSupplier() {}
}
private static final | AggregatedDiscoveryServiceFileDescriptorSupplier |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/bean/BeanInvokeSingleMethodNoBodyTest.java | {
"start": 978,
"end": 1583
} | class ____ extends ContextTestSupport {
@Test
public void testBeanInvokeSingleMethodNoBody() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Hello");
template.sendBody("direct:start", "Hi");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").bean(MySingleMethodNoBodyBean.class).to("mock:result");
}
};
}
}
| BeanInvokeSingleMethodNoBodyTest |
java | google__guice | core/test/com/google/inject/BinderTestSuite.java | {
"start": 11460,
"end": 11531
} | enum ____ {
NONE,
EAGER,
LAZY
}
public static | CreationTime |
java | quarkusio__quarkus | extensions/smallrye-health/deployment/src/test/java/io/quarkus/smallrye/health/test/ui/LegacyDisabledTest.java | {
"start": 266,
"end": 676
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addAsResource(new StringAsset("quarkus.smallrye-health.ui.enable=false"), "application.properties"));
@Test
void shouldUseDefaultConfig() {
RestAssured.when().get("/q/health-ui").then().statusCode(404);
}
}
| LegacyDisabledTest |
java | elastic__elasticsearch | libs/gpu-codec/src/main/java/org/elasticsearch/gpu/codec/ES92GpuHnswVectorsWriter.java | {
"start": 37797,
"end": 39305
} | class ____ extends KnnFieldVectorsWriter<float[]> {
private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(FieldWriter.class);
private final FieldInfo fieldInfo;
private int lastDocID = -1;
private final FlatFieldVectorsWriter<float[]> flatFieldVectorsWriter;
FieldWriter(FlatFieldVectorsWriter<float[]> flatFieldVectorsWriter, FieldInfo fieldInfo) {
this.fieldInfo = fieldInfo;
this.flatFieldVectorsWriter = Objects.requireNonNull(flatFieldVectorsWriter);
}
@Override
public void addValue(int docID, float[] vectorValue) throws IOException {
if (docID == lastDocID) {
throw new IllegalArgumentException(
"VectorValuesField \""
+ fieldInfo.name
+ "\" appears more than once in this document (only one value is allowed per field)"
);
}
flatFieldVectorsWriter.addValue(docID, vectorValue);
lastDocID = docID;
}
@Override
public float[] copyValue(float[] vectorValue) {
throw new UnsupportedOperationException();
}
@Override
public long ramBytesUsed() {
return SHALLOW_SIZE + flatFieldVectorsWriter.ramBytesUsed();
}
public DocsWithFieldSet getDocsWithFieldSet() {
return flatFieldVectorsWriter.getDocsWithFieldSet();
}
}
}
| FieldWriter |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/LegacySemanticSparseVectorQueryRewriteInterceptor.java | {
"start": 764,
"end": 6235
} | class ____ extends LegacySemanticQueryRewriteInterceptor {
public static final NodeFeature SEMANTIC_SPARSE_VECTOR_QUERY_REWRITE_INTERCEPTION_SUPPORTED = new NodeFeature(
"search.semantic_sparse_vector_query_rewrite_interception_supported"
);
public LegacySemanticSparseVectorQueryRewriteInterceptor() {}
@Override
protected String getFieldName(QueryBuilder queryBuilder) {
assert (queryBuilder instanceof SparseVectorQueryBuilder);
SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder;
return sparseVectorQueryBuilder.getFieldName();
}
@Override
protected String getQuery(QueryBuilder queryBuilder) {
assert (queryBuilder instanceof SparseVectorQueryBuilder);
SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder;
return sparseVectorQueryBuilder.getQuery();
}
@Override
protected QueryBuilder buildInferenceQuery(QueryBuilder queryBuilder, InferenceIndexInformationForField indexInformation) {
Map<String, List<String>> inferenceIdsIndices = indexInformation.getInferenceIdsIndices();
QueryBuilder finalQueryBuilder;
if (inferenceIdsIndices.size() == 1) {
// Simple case, everything uses the same inference ID
String searchInferenceId = inferenceIdsIndices.keySet().iterator().next();
finalQueryBuilder = buildNestedQueryFromSparseVectorQuery(queryBuilder, searchInferenceId);
} else {
// Multiple inference IDs, construct a boolean query
finalQueryBuilder = buildInferenceQueryWithMultipleInferenceIds(queryBuilder, inferenceIdsIndices);
}
finalQueryBuilder.queryName(queryBuilder.queryName());
finalQueryBuilder.boost(queryBuilder.boost());
return finalQueryBuilder;
}
private QueryBuilder buildInferenceQueryWithMultipleInferenceIds(
QueryBuilder queryBuilder,
Map<String, List<String>> inferenceIdsIndices
) {
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
for (String inferenceId : inferenceIdsIndices.keySet()) {
boolQueryBuilder.should(
createSubQueryForIndices(
inferenceIdsIndices.get(inferenceId),
buildNestedQueryFromSparseVectorQuery(queryBuilder, inferenceId)
)
);
}
return boolQueryBuilder;
}
@Override
protected QueryBuilder buildCombinedInferenceAndNonInferenceQuery(
QueryBuilder queryBuilder,
InferenceIndexInformationForField indexInformation
) {
assert (queryBuilder instanceof SparseVectorQueryBuilder);
SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder;
Map<String, List<String>> inferenceIdsIndices = indexInformation.getInferenceIdsIndices();
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
boolQueryBuilder.should(
createSubQueryForIndices(
indexInformation.nonInferenceIndices(),
new SparseVectorQueryBuilder(
sparseVectorQueryBuilder.getFieldName(),
sparseVectorQueryBuilder.getQueryVectors(),
sparseVectorQueryBuilder.getInferenceId(),
sparseVectorQueryBuilder.getQuery(),
sparseVectorQueryBuilder.shouldPruneTokens(),
sparseVectorQueryBuilder.getTokenPruningConfig()
)
)
);
// We always perform nested subqueries on semantic_text fields, to support
// sparse_vector queries using query vectors.
for (String inferenceId : inferenceIdsIndices.keySet()) {
boolQueryBuilder.should(
createSubQueryForIndices(
inferenceIdsIndices.get(inferenceId),
buildNestedQueryFromSparseVectorQuery(sparseVectorQueryBuilder, inferenceId)
)
);
}
boolQueryBuilder.boost(queryBuilder.boost());
boolQueryBuilder.queryName(queryBuilder.queryName());
return boolQueryBuilder;
}
private QueryBuilder buildNestedQueryFromSparseVectorQuery(QueryBuilder queryBuilder, String searchInferenceId) {
assert (queryBuilder instanceof SparseVectorQueryBuilder);
SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder;
return QueryBuilders.nestedQuery(
SemanticTextField.getChunksFieldName(sparseVectorQueryBuilder.getFieldName()),
new SparseVectorQueryBuilder(
SemanticTextField.getEmbeddingsFieldName(sparseVectorQueryBuilder.getFieldName()),
sparseVectorQueryBuilder.getQueryVectors(),
(sparseVectorQueryBuilder.getInferenceId() == null && sparseVectorQueryBuilder.getQuery() != null)
? searchInferenceId
: sparseVectorQueryBuilder.getInferenceId(),
sparseVectorQueryBuilder.getQuery(),
sparseVectorQueryBuilder.shouldPruneTokens(),
sparseVectorQueryBuilder.getTokenPruningConfig()
),
ScoreMode.Max
);
}
@Override
public String getQueryName() {
return SparseVectorQueryBuilder.NAME;
}
}
| LegacySemanticSparseVectorQueryRewriteInterceptor |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestSecretManager.java | {
"start": 1238,
"end": 3899
} | class ____ {
private final String defaultAlgorithm =
CommonConfigurationKeysPublic.HADOOP_SECURITY_SECRET_MANAGER_KEY_GENERATOR_ALGORITHM_DEFAULT;
private final int defaultLength =
CommonConfigurationKeysPublic.HADOOP_SECURITY_SECRET_MANAGER_KEY_LENGTH_DEFAULT;
private final String strongAlgorithm = "HmacSHA256";
private final int strongLength = 256;
private SecretManager<TokenIdentifier> secretManager;
@Test
public void testDefaults() {
assertKey(secretManager.generateSecret(), defaultAlgorithm, defaultLength);
}
@Test
public void testUpdate() {
SecretManager.update(createConfiguration(strongAlgorithm, strongLength));
assertKey(secretManager.generateSecret(), strongAlgorithm, strongLength);
}
@Test
public void testUnknownAlgorithm() {
SecretManager.update(createConfiguration("testUnknownAlgorithm_NO_ALG", strongLength));
assertThrows(IllegalArgumentException.class, secretManager::generateSecret);
}
@Test
public void testUpdateAfterInitialisation() {
SecretKey oldSecretKey = secretManager.generateSecret();
SecretManager.update(createConfiguration(strongAlgorithm, strongLength));
SecretKey newSecretKey = secretManager.generateSecret();
assertKey(oldSecretKey, defaultAlgorithm, defaultLength);
assertKey(newSecretKey, defaultAlgorithm, defaultLength);
}
@BeforeEach
public void setUp() {
secretManager = new SecretManager<TokenIdentifier>() {
@Override
protected byte[] createPassword(TokenIdentifier identifier) {
return new byte[0];
}
@Override
public byte[] retrievePassword(TokenIdentifier identifier) throws InvalidToken {
return new byte[0];
}
@Override
public TokenIdentifier createIdentifier() {
return null;
}
};
}
@AfterEach
public void tearDown() {
SecretManager.update(createConfiguration(defaultAlgorithm, defaultLength));
}
private void assertKey(SecretKey secretKey, String algorithm, int length) {
assertEquals(algorithm, secretKey.getAlgorithm(),
"Algorithm of created key is not as expected.");
assertEquals(length, secretKey.getEncoded().length * 8,
"Length of created key is not as expected.");
}
private Configuration createConfiguration(String algorithm, int length) {
Configuration conf = new Configuration();
conf.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_SECRET_MANAGER_KEY_GENERATOR_ALGORITHM_KEY,
algorithm);
conf.setInt(CommonConfigurationKeysPublic.HADOOP_SECURITY_SECRET_MANAGER_KEY_LENGTH_KEY,
length);
return conf;
}
}
| TestSecretManager |
java | apache__camel | components/camel-chunk/src/test/java/org/apache/camel/component/chunk/ChunkDifferentThemeFolderSubfolderAndExtensionTest.java | {
"start": 1265,
"end": 2492
} | class ____ extends CamelTestSupport {
@EndpointInject("mock:endSimple")
protected MockEndpoint endSimpleMock;
@Produce("direct:startSimple")
protected ProducerTemplate startSimpleProducerTemplate;
/**
* Test using themeFolder, themeSubfolder and extension parameters
*/
@Test
void testChunkSubfolder() throws Exception {
// Prepare
Exchange exchange = createExchangeWithBody("The Body");
exchange.getIn().setHeader("name", "Andrew");
endSimpleMock.expectedMessageCount(1);
endSimpleMock.expectedBodiesReceived("Earth to Andrew. Come in, Andrew.\n");
// Act
startSimpleProducerTemplate.send(exchange);
// Verify
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:startSimple")
.to("chunk:subfile_example?themeFolder=folderexample&themeSubfolder=subfolderexample&extension=file")
.to("mock:endSimple");
}
};
}
}
| ChunkDifferentThemeFolderSubfolderAndExtensionTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/propertyref/basic/Person.java | {
"start": 276,
"end": 1574
} | class ____ {
private Long id;
private String name;
private Address address;
private String userId;
private Set accounts = new HashSet();
private List systems = new ArrayList();
/**
* @return Returns the userId.
*/
public String getUserId() {
return userId;
}
/**
* @param userId The userId to set.
*/
public void setUserId(String userId) {
this.userId = userId;
}
/**
* @return Returns the address.
*/
public Address getAddress() {
return address;
}
/**
* @param address The address to set.
*/
public void setAddress(Address address) {
this.address = address;
}
/**
* @return Returns the id.
*/
public Long getId() {
return id;
}
/**
* @param id The id to set.
*/
public void setId(Long id) {
this.id = id;
}
/**
* @return Returns the name.
*/
public String getName() {
return name;
}
/**
* @param name The name to set.
*/
public void setName(String name) {
this.name = name;
}
/**
* @return Returns the accounts.
*/
public Set getAccounts() {
return accounts;
}
/**
* @param accounts The accounts to set.
*/
public void setAccounts(Set accounts) {
this.accounts = accounts;
}
public List getSystems() {
return systems;
}
public void setSystems(List systems) {
this.systems = systems;
}
}
| Person |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java | {
"start": 20454,
"end": 25283
} | class ____ init; here this is picked up on.
if (!service.isInState(Service.STATE.INITED)) {
service.init(configuration);
}
int exitCode;
try {
// start the service
service.start();
exitCode = EXIT_SUCCESS;
if (execute && service.isInState(Service.STATE.STARTED)) {
if (launchableService != null) {
// assume that runnable services are meant to run from here
try {
exitCode = launchableService.execute();
LOG.debug("Service {} execution returned exit code {}",
name, exitCode);
} finally {
// then stop the service
service.stop();
}
} else {
//run the service until it stops or an interrupt happens
// on a different thread.
LOG.debug("waiting for service threads to terminate");
service.waitForServiceToStop(0);
}
}
} finally {
if (shutdownHook != null) {
shutdownHook.unregister();
}
}
return exitCode;
}
/**
* @return Instantiate the service defined in {@code serviceClassName}.
*
* Sets the {@code configuration} field
* to the the value of {@code conf},
* and the {@code service} field to the service created.
*
* @param conf configuration to use
*/
@SuppressWarnings("unchecked")
public Service instantiateService(Configuration conf) {
Preconditions.checkArgument(conf != null, "null conf");
Preconditions.checkArgument(serviceClassName != null,
"null service classname");
Preconditions.checkArgument(!serviceClassName.isEmpty(),
"undefined service classname");
configuration = conf;
// Instantiate the class. this requires the service to have a public
// zero-argument or string-argument constructor
Object instance;
try {
Class<?> serviceClass = getClassLoader().loadClass(serviceClassName);
try {
instance = serviceClass.getConstructor().newInstance();
} catch (NoSuchMethodException noEmptyConstructor) {
// no simple constructor, fall back to a string
LOG.debug("No empty constructor {}", noEmptyConstructor,
noEmptyConstructor);
instance = serviceClass.getConstructor(String.class)
.newInstance(serviceClassName);
}
} catch (Exception e) {
throw serviceCreationFailure(e);
}
if (!(instance instanceof Service)) {
//not a service
throw new ServiceLaunchException(
LauncherExitCodes.EXIT_SERVICE_CREATION_FAILURE,
"Not a service class: \"%s\"", serviceClassName);
}
// cast to the specific instance type of this ServiceLauncher
service = (S) instance;
return service;
}
/**
* Convert an exception to an {@code ExitException}.
*
* This process may just be a simple pass through, otherwise a new
* exception is created with an exit code, the text of the supplied
* exception, and the supplied exception as an inner cause.
*
* <ol>
* <li>If is already the right type, pass it through.</li>
* <li>If it implements {@link ExitCodeProvider#getExitCode()},
* the exit code is extracted and used in the new exception.</li>
* <li>Otherwise, the exit code
* {@link LauncherExitCodes#EXIT_EXCEPTION_THROWN} is used.</li>
* </ol>
*
* @param thrown the exception thrown
* @return an {@code ExitException} with a status code
*/
protected static ExitUtil.ExitException convertToExitException(
Throwable thrown) {
ExitUtil.ExitException exitException;
// get the exception message
String message = thrown.toString();
int exitCode;
if (thrown instanceof ExitCodeProvider) {
// the exception provides a status code -extract it
exitCode = ((ExitCodeProvider) thrown).getExitCode();
message = thrown.getMessage();
if (message == null) {
// some exceptions do not have a message; fall back
// to the string value.
message = thrown.toString();
}
} else {
// no exception code: use the default
exitCode = EXIT_EXCEPTION_THROWN;
}
// construct the new exception with the original message and
// an exit code
exitException = new ServiceLaunchException(exitCode, thrown, message);
return exitException;
}
/**
* Generate an exception announcing a failure to create the service.
* @param exception inner exception.
* @return a new exception, with the exit code
* {@link LauncherExitCodes#EXIT_SERVICE_CREATION_FAILURE}
*/
protected ServiceLaunchException serviceCreationFailure(Exception exception) {
return new ServiceLaunchException(EXIT_SERVICE_CREATION_FAILURE, exception);
}
/**
* Override point: register this | constructors |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/failures/ctorexception/MyClassA.java | {
"start": 827,
"end": 890
} | class ____ {
public MyClassA(MyClassC propC) {
}
}
| MyClassA |
java | greenrobot__greendao | tests/DaoTestPerformance/src/androidTest/java/org/greenrobot/greendao/performance/ReflectionPerformanceTest.java | {
"start": 978,
"end": 4843
} | class ____ // extends TestCase
{
int intValue;
String stringValue;
public int getIntValue() {
return intValue;
}
public void setIntValue(int inValue) {
this.intValue = inValue;
}
public String getStringValue() {
return stringValue;
}
public void setStringValue(String stringValue) {
this.stringValue = stringValue;
}
public void testSetIntPerf() throws SecurityException, NoSuchMethodException, IllegalArgumentException,
IllegalAccessException, InvocationTargetException {
int count = 100000;
long start = System.currentTimeMillis();
for (int i = 0; i < count; i++) {
setIntValue(i);
}
long time = System.currentTimeMillis() - start;
Method method = getClass().getMethod("setIntValue", int.class);
long start2 = System.currentTimeMillis();
for (int i = 0; i < count; i++) {
method.invoke(this, i);
}
long time2 = System.currentTimeMillis() - start2;
DaoLog.d("set int: normal=" + time + "ms, reflected=" + time2 + "ms, " + 1000 * count / time2
+ " refelected ops/s, slower=" + ((float) time2) / time);
}
public void testGetIntPerf() throws SecurityException, NoSuchMethodException, IllegalArgumentException,
IllegalAccessException, InvocationTargetException {
int count = 100000;
long start = System.currentTimeMillis();
for (int i = 0; i < count; i++) {
int x = getIntValue();
}
long time = System.currentTimeMillis() - start;
Method method = getClass().getMethod("getIntValue");
long start2 = System.currentTimeMillis();
for (int i = 0; i < count; i++) {
int x = (Integer) method.invoke(this);
}
long time2 = System.currentTimeMillis() - start2;
DaoLog.d("get int: normal=" + time + "ms, reflected=" + time2 + "ms, " + 1000 * count / time2
+ " refelected ops/s, slower=" + ((float) time2) / time);
}
public void testSetStringPerf() throws SecurityException, NoSuchMethodException, IllegalArgumentException,
IllegalAccessException, InvocationTargetException {
int count = 100000;
long start = System.currentTimeMillis();
for (int i = 0; i < count; i++) {
setStringValue("the quick brown fox");
}
long time = System.currentTimeMillis() - start;
Method method = getClass().getMethod("setStringValue", String.class);
long start2 = System.currentTimeMillis();
for (int i = 0; i < count; i++) {
method.invoke(this, "the quick brown fox");
}
long time2 = System.currentTimeMillis() - start2;
DaoLog.d("set String: normal=" + time + "ms, reflected=" + time2 + "ms, " + 1000 * count / time2
+ " refelected ops/s, slower=" + ((float) time2) / time);
}
public void testGetStringPerf() throws SecurityException, NoSuchMethodException, IllegalArgumentException,
IllegalAccessException, InvocationTargetException {
int count = 100000;
long start = System.currentTimeMillis();
for (int i = 0; i < count; i++) {
String x = getStringValue();
}
long time = System.currentTimeMillis() - start;
Method method = getClass().getMethod("getStringValue");
long start2 = System.currentTimeMillis();
for (int i = 0; i < count; i++) {
String x = (String) method.invoke(this);
}
long time2 = System.currentTimeMillis() - start2;
DaoLog.d("get String: normal=" + time + "ms, reflected=" + time2 + "ms, " + 1000 * count / time2
+ " refelected ops/s, slower=" + ((float) time2) / time);
}
}
| ReflectionPerformanceTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/entitygraph/EntityGraphWithInheritanceTest.java | {
"start": 6267,
"end": 6363
} | class ____ {
@Id
Long id;
String name;
}
@Entity(name = "FreeCourse")
public static | Course |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/web/servlet/setup/MockMvcConfigurer.java | {
"start": 1556,
"end": 2400
} | interface ____ {
/**
* Invoked immediately when this {@code MockMvcConfigurer} is added via
* {@link ConfigurableMockMvcBuilder#apply}.
* @param builder the builder for the MockMvc
*/
default void afterConfigurerAdded(ConfigurableMockMvcBuilder<?> builder) {
}
/**
* Invoked when the MockMvc instance is about to be created with the MockMvc
* builder and the Spring WebApplicationContext that will be passed to the
* {@code DispatcherServlet}.
* @param builder the builder for the MockMvc
* @param context the Spring configuration
* @return a post processor to be applied to every request performed
* through the {@code MockMvc} instance.
*/
default @Nullable RequestPostProcessor beforeMockMvcCreated(
ConfigurableMockMvcBuilder<?> builder, WebApplicationContext context) {
return null;
}
}
| MockMvcConfigurer |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeutils/ComparatorTestBase.java | {
"start": 17705,
"end": 18507
} | class ____ extends DataOutputStream implements DataOutputView {
public TestOutputView() {
super(new ByteArrayOutputStream(4096));
}
public TestInputView getInputView() {
ByteArrayOutputStream baos = (ByteArrayOutputStream) out;
return new TestInputView(baos.toByteArray());
}
@Override
public void skipBytesToWrite(int numBytes) throws IOException {
for (int i = 0; i < numBytes; i++) {
write(0);
}
}
@Override
public void write(DataInputView source, int numBytes) throws IOException {
byte[] buffer = new byte[numBytes];
source.readFully(buffer);
write(buffer);
}
}
public static final | TestOutputView |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/event/injection/invalid/InitMethodEventRawTypeTest.java | {
"start": 919,
"end": 1047
} | class ____ {
// raw event type
@Inject
public void initMethod(Event event) {
}
}
}
| InvalidBean |
java | hibernate__hibernate-orm | local-build-plugins/src/main/java/org/hibernate/build/xjc/SchemaDescriptor.java | {
"start": 457,
"end": 1729
} | class ____ implements Named {
private final String name;
private final Project project;
private final RegularFileProperty xsdFile;
private final RegularFileProperty xjcBindingFile;
private final SetProperty<String> xjcPlugins;
public SchemaDescriptor(String name, Project project) {
this.name = name;
this.project = project;
xsdFile = project.getObjects().fileProperty();
xjcBindingFile = project.getObjects().fileProperty();
xjcPlugins = project.getObjects().setProperty( String.class );
}
@Override
public final String getName() {
return name;
}
@InputFile
public RegularFileProperty getXsdFile() {
return xsdFile;
}
public void setXsdFile(Object reference) {
xsdFile.set( project.file( reference ) );
}
public void xsdFile(Object reference) {
setXsdFile( reference );
}
@InputFile
public RegularFileProperty getXjcBindingFile() {
return xjcBindingFile;
}
public void setXjcBindingFile(Object reference) {
xjcBindingFile.set( project.file( reference ) );
}
public void xjcBindingFile(Object reference) {
setXjcBindingFile( reference );
}
@Input
public SetProperty<String> getXjcPlugins() {
return xjcPlugins;
}
public void xjcPlugins(String... plugins) {
xjcPlugins.addAll( plugins );
}
}
| SchemaDescriptor |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java | {
"start": 1712,
"end": 4633
} | class ____ extends AbstractLifecycleComponent implements Repository {
private final ProjectId projectId;
private final RepositoryMetadata repositoryMetadata;
private final RepositoryException creationException;
public InvalidRepository(ProjectId projectId, RepositoryMetadata repositoryMetadata, RepositoryException creationException) {
this.projectId = projectId;
this.repositoryMetadata = repositoryMetadata;
this.creationException = creationException;
}
private RepositoryException createCreationException() {
return new RepositoryException(
repositoryMetadata.name(),
"repository type [" + repositoryMetadata.type() + "] failed to create on current node",
creationException
);
}
@Override
public ProjectId getProjectId() {
return projectId;
}
@Override
public RepositoryMetadata getMetadata() {
return repositoryMetadata;
}
@Override
public void getSnapshotInfo(
Collection<SnapshotId> snapshotIds,
boolean abortOnFailure,
BooleanSupplier isCancelled,
CheckedConsumer<SnapshotInfo, Exception> consumer,
ActionListener<Void> listener
) {
listener.onFailure(createCreationException());
}
@Override
public Metadata getSnapshotGlobalMetadata(SnapshotId snapshotId, boolean fromProjectMetadata) {
throw createCreationException();
}
@Override
public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, SnapshotId snapshotId, IndexId index) throws IOException {
throw createCreationException();
}
@Override
public void getRepositoryData(Executor responseExecutor, ActionListener<RepositoryData> listener) {
listener.onFailure(createCreationException());
}
@Override
public void finalizeSnapshot(FinalizeSnapshotContext finalizeSnapshotContext) {
finalizeSnapshotContext.onFailure(createCreationException());
}
@Override
public void deleteSnapshots(
Collection<SnapshotId> snapshotIds,
long repositoryDataGeneration,
IndexVersion minimumNodeVersion,
ActionListener<RepositoryData> repositoryDataUpdateListener,
Runnable onCompletion
) {
repositoryDataUpdateListener.onFailure(createCreationException());
}
@Override
public String startVerification() {
throw createCreationException();
}
@Override
public void endVerification(String verificationToken) {
throw createCreationException();
}
@Override
public void verify(String verificationToken, DiscoveryNode localNode) {
throw createCreationException();
}
@Override
public boolean isReadOnly() {
// this repository is assumed writable to bypass read-only check and fail with exception produced by this | InvalidRepository |
java | apache__rocketmq | filter/src/main/java/org/apache/rocketmq/filter/expression/UnaryExpression.java | {
"start": 1130,
"end": 1411
} | class ____ taken from ActiveMQ org.apache.activemq.filter.UnaryExpression,
* but:
* 1. remove XPath and XQuery expression;
* 2. Add constant UnaryType to distinguish different unary expression;
* 3. Extract UnaryInExpression to an independent class.
* </p>
*/
public abstract | was |
java | elastic__elasticsearch | x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/operator/comparison/BinaryComparisonProcessor.java | {
"start": 961,
"end": 2841
} | enum ____ implements PredicateBiFunction<Object, Object, Boolean> {
EQ(Comparisons::eq, "=="),
NULLEQ(Comparisons::nulleq, "<=>"),
NEQ(Comparisons::neq, "!="),
GT(Comparisons::gt, ">"),
GTE(Comparisons::gte, ">="),
LT(Comparisons::lt, "<"),
LTE(Comparisons::lte, "<=");
private final BiFunction<Object, Object, Boolean> process;
private final String symbol;
BinaryComparisonOperation(BiFunction<Object, Object, Boolean> process, String symbol) {
this.process = process;
this.symbol = symbol;
}
@Override
public String symbol() {
return symbol;
}
@Override
public Boolean apply(Object left, Object right) {
if (this != NULLEQ && (left == null || right == null)) {
return null;
}
return doApply(left, right);
}
@Override
public final Boolean doApply(Object left, Object right) {
return process.apply(left, right);
}
@Override
public String toString() {
return symbol;
}
}
public static final String NAME = "cb";
public BinaryComparisonProcessor(Processor left, Processor right, BinaryComparisonOperation operation) {
super(left, right, operation);
}
public BinaryComparisonProcessor(StreamInput in) throws IOException {
super(in, i -> i.readEnum(BinaryComparisonOperation.class));
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public Object process(Object input) {
if (function() == BinaryComparisonOperation.NULLEQ) {
return doProcess(left().process(input), right().process(input));
}
return super.process(input);
}
}
| BinaryComparisonOperation |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/masterreplica/CompletableEventLatchSupport.java | {
"start": 1692,
"end": 4641
} | class ____ {@code expectedCount} notifications.
*
* @param expectedCount
*/
public CompletableEventLatchSupport(int expectedCount) {
this.expectedCount = expectedCount;
}
public final int getExpectedCount() {
return expectedCount;
}
/**
* Notification callback method accepting a connection for a value. Triggers emission if the gate is open and the current
* call to this method is the last expected notification.
*/
public final void accept(T value) {
if (GATE_UPDATER.get(this) == GATE_CLOSED) {
onDrop(value);
return;
}
onAccept(value);
onNotification();
}
/**
* Notification callback method accepting a connection error. Triggers emission if the gate is open and the current call to
* this method is the last expected notification.
*/
public final void accept(Throwable throwable) {
if (GATE_UPDATER.get(this) == GATE_CLOSED) {
onDrop(throwable);
return;
}
onError(throwable);
onNotification();
}
private void onNotification() {
if (NOTIFICATIONS_UPDATER.incrementAndGet(this) == expectedCount) {
ScheduledFuture<?> timeoutScheduleFuture = this.timeoutScheduleFuture;
this.timeoutScheduleFuture = null;
if (timeoutScheduleFuture != null) {
timeoutScheduleFuture.cancel(false);
}
emit();
}
}
private void emit() {
if (GATE_UPDATER.compareAndSet(this, GATE_OPEN, GATE_CLOSED)) {
onEmit(new Emission<V>() {
@Override
public void success(V value) {
selfFuture.complete(value);
}
@Override
public void error(Throwable exception) {
selfFuture.completeExceptionally(exception);
}
});
}
}
// Callback hooks
protected void onAccept(T value) {
}
protected void onError(Throwable value) {
}
protected void onDrop(T value) {
}
protected void onDrop(Throwable value) {
}
protected void onEmit(Emission<V> emission) {
}
/**
* Retrieve a {@link CompletionStage} that is notified upon completion or timeout.
*
* @param timeout
* @param timeoutExecutor
* @return
*/
public final CompletionStage<V> getOrTimeout(Duration timeout, ScheduledExecutorService timeoutExecutor) {
if (GATE_UPDATER.get(this) == GATE_OPEN && timeoutScheduleFuture == null) {
this.timeoutScheduleFuture = timeoutExecutor.schedule(this::emit, timeout.toNanos(), TimeUnit.NANOSECONDS);
}
return selfFuture;
}
/**
* Interface to signal emission of a value or an {@link Exception}.
*
* @param <T>
*/
public | expecting |
java | google__guava | android/guava/src/com/google/common/reflect/Invokable.java | {
"start": 18359,
"end": 18888
} | class ____ within a non-static context
// and the first parameter is the hidden 'this'.
return declaringClass.getEnclosingClass() != null
&& !Modifier.isStatic(declaringClass.getModifiers());
}
}
}
private static final boolean ANNOTATED_TYPE_EXISTS = initAnnotatedTypeExists();
private static boolean initAnnotatedTypeExists() {
try {
Class.forName("java.lang.reflect.AnnotatedType");
} catch (ClassNotFoundException e) {
return false;
}
return true;
}
}
| is |
java | square__moshi | moshi/src/test/java/com/squareup/moshi/JsonQualifiersTest.java | {
"start": 3145,
"end": 3225
} | class ____ {
String a;
@FooPrefix String b;
}
static | StringAndFooString |
java | google__guava | android/guava/src/com/google/common/util/concurrent/AbstractScheduledService.java | {
"start": 2337,
"end": 3509
} | class ____ the {@link ScheduledExecutorService} returned from {@link #executor} to run
* the {@link #startUp} and {@link #shutDown} methods and also uses that service to schedule the
* {@link #runOneIteration} that will be executed periodically as specified by its {@link
* Scheduler}. When this service is asked to stop via {@link #stopAsync} it will cancel the periodic
* task (but not interrupt it) and wait for it to stop before running the {@link #shutDown} method.
*
* <p>Subclasses are guaranteed that the life cycle methods ({@link #runOneIteration}, {@link
* #startUp} and {@link #shutDown}) will never run concurrently. Notably, if any execution of {@link
* #runOneIteration} takes longer than its schedule defines, then subsequent executions may start
* late. Also, all life cycle methods are executed with a lock held, so subclasses can safely modify
* shared state without additional synchronization necessary for visibility to later executions of
* the life cycle methods.
*
* <h3>Usage Example</h3>
*
* <p>Here is a sketch of a service which crawls a website and uses the scheduling capabilities to
* rate limit itself.
*
* {@snippet :
* | uses |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/fetch/subphase/FieldAndFormat.java | {
"start": 1205,
"end": 4615
} | class ____ implements Writeable, ToXContentObject {
public static final ParseField FIELD_FIELD = new ParseField("field");
public static final ParseField FORMAT_FIELD = new ParseField("format");
public static final ParseField INCLUDE_UNMAPPED_FIELD = new ParseField("include_unmapped");
private static final ConstructingObjectParser<FieldAndFormat, Void> PARSER = new ConstructingObjectParser<>(
"fetch_field_and_format",
a -> new FieldAndFormat((String) a[0], (String) a[1], (Boolean) a[2])
);
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), FIELD_FIELD);
PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), FORMAT_FIELD);
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), INCLUDE_UNMAPPED_FIELD);
}
/**
* Parse a {@link FieldAndFormat} from some {@link XContent}.
*/
public static FieldAndFormat fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
if (token.isValue()) {
return new FieldAndFormat(parser.text(), null);
} else {
return PARSER.apply(parser, null);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(FIELD_FIELD.getPreferredName(), field);
if (format != null) {
builder.field(FORMAT_FIELD.getPreferredName(), format);
}
if (this.includeUnmapped != null) {
builder.field(INCLUDE_UNMAPPED_FIELD.getPreferredName(), includeUnmapped);
}
builder.endObject();
return builder;
}
/** The name of the field. */
public final String field;
/** The format of the field, or {@code null} if defaults should be used. */
public final String format;
/** Whether to include unmapped fields or not. */
public final Boolean includeUnmapped;
public FieldAndFormat(String field, @Nullable String format) {
this(field, format, null);
}
public FieldAndFormat(String field, @Nullable String format, @Nullable Boolean includeUnmapped) {
this.field = Objects.requireNonNull(field);
this.format = format;
this.includeUnmapped = includeUnmapped;
}
/** Serialization constructor. */
public FieldAndFormat(StreamInput in) throws IOException {
this.field = in.readString();
format = in.readOptionalString();
this.includeUnmapped = in.readOptionalBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(field);
out.writeOptionalString(format);
out.writeOptionalBoolean(this.includeUnmapped);
}
@Override
public int hashCode() {
int h = field.hashCode();
h = 31 * h + Objects.hashCode(format);
h = 31 * h + Objects.hashCode(includeUnmapped);
return h;
}
@Override
public boolean equals(Object obj) {
if (obj == null || getClass() != obj.getClass()) {
return false;
}
FieldAndFormat other = (FieldAndFormat) obj;
return field.equals(other.field) && Objects.equals(format, other.format) && Objects.equals(includeUnmapped, other.includeUnmapped);
}
}
| FieldAndFormat |
java | google__dagger | javatests/dagger/functional/assisted/AssistedFactoryTest.java | {
"start": 1560,
"end": 1778
} | class
____ abstractFooFactory();
// Factory without any assisted parameters
NoAssistedParametersFooFactory noAssistedParametersFooFactory();
// Test injecting the factories from another | AbstractFooFactory |
java | apache__camel | components/camel-keycloak/src/test/java/org/apache/camel/component/keycloak/KeycloakProducerIT.java | {
"start": 3495,
"end": 17124
} | class ____ extends CamelTestSupport {
private static final Logger log = LoggerFactory.getLogger(KeycloakProducerIT.class);
private static String keycloakUrl;
private static String realm;
private static String username;
private static String password;
// Test data - use unique names to avoid conflicts
private static final String TEST_REALM_NAME = "test-realm-" + UUID.randomUUID().toString().substring(0, 8);
private static final String TEST_USER_NAME = "test-user-" + UUID.randomUUID().toString().substring(0, 8);
private static final String TEST_ROLE_NAME = "test-role-" + UUID.randomUUID().toString().substring(0, 8);
static {
// Load configuration from system properties
keycloakUrl = System.getProperty("keycloak.server.url");
realm = System.getProperty("keycloak.realm");
username = System.getProperty("keycloak.username");
password = System.getProperty("keycloak.password");
}
@Override
protected RoutesBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
// For admin operations, always authenticate against master realm
// but operations will be performed on the target realm specified in headers
String keycloakEndpoint = String.format(
"keycloak:admin?serverUrl=%s&realm=master&username=%s&password=%s",
keycloakUrl, username, password);
// Realm operations
from("direct:createRealm")
.to(keycloakEndpoint + "&operation=createRealm");
from("direct:getRealm")
.to(keycloakEndpoint + "&operation=getRealm");
from("direct:deleteRealm")
.to(keycloakEndpoint + "&operation=deleteRealm");
// User operations
from("direct:createUser")
.to(keycloakEndpoint + "&operation=createUser");
from("direct:getUser")
.to(keycloakEndpoint + "&operation=getUser");
from("direct:listUsers")
.to(keycloakEndpoint + "&operation=listUsers");
from("direct:deleteUser")
.to(keycloakEndpoint + "&operation=deleteUser");
// Role operations
from("direct:createRole")
.to(keycloakEndpoint + "&operation=createRole");
from("direct:getRole")
.to(keycloakEndpoint + "&operation=getRole");
from("direct:listRoles")
.to(keycloakEndpoint + "&operation=listRoles");
from("direct:deleteRole")
.to(keycloakEndpoint + "&operation=deleteRole");
// User-Role operations
from("direct:assignRoleToUser")
.to(keycloakEndpoint + "&operation=assignRoleToUser");
from("direct:removeRoleFromUser")
.to(keycloakEndpoint + "&operation=removeRoleFromUser");
// POJO-based operations
from("direct:createRealmPojo")
.to(keycloakEndpoint + "&operation=createRealm&pojoRequest=true");
from("direct:createUserPojo")
.to(keycloakEndpoint + "&operation=createUser&pojoRequest=true");
from("direct:createRolePojo")
.to(keycloakEndpoint + "&operation=createRole&pojoRequest=true");
}
};
}
@Test
@Order(1)
void testKeycloakProducerConfiguration() {
// Verify that the configuration is properly set up
assertNotNull(keycloakUrl);
assertNotNull(realm);
assertNotNull(username);
assertNotNull(password);
assertTrue(keycloakUrl.startsWith("http://") || keycloakUrl.startsWith("https://"));
log.info("Testing Keycloak at: {} with realm: {}", keycloakUrl, realm);
}
@Test
@Order(2)
void testCreateRealmWithHeaders() {
Exchange exchange = createExchangeWithBody(null);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
Exchange result = template.send("direct:createRealm", exchange);
assertNotNull(result);
assertNull(result.getException());
String body = result.getIn().getBody(String.class);
assertEquals("Realm created successfully", body);
log.info("Created realm: {}", TEST_REALM_NAME);
}
@Test
@Order(4)
void testGetRealm() {
Exchange exchange = createExchangeWithBody(null);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
Exchange result = template.send("direct:getRealm", exchange);
assertNotNull(result);
assertNull(result.getException());
RealmRepresentation realmRep = result.getIn().getBody(RealmRepresentation.class);
assertNotNull(realmRep);
assertEquals(TEST_REALM_NAME, realmRep.getRealm());
assertTrue(realmRep.isEnabled());
log.info("Retrieved realm: {} - enabled: {}", realmRep.getRealm(), realmRep.isEnabled());
}
@Test
@Order(5)
void testCreateUserWithHeaders() {
Exchange exchange = createExchangeWithBody(null);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
exchange.getIn().setHeader(KeycloakConstants.USERNAME, TEST_USER_NAME);
exchange.getIn().setHeader(KeycloakConstants.USER_EMAIL, TEST_USER_NAME + "@test.com");
exchange.getIn().setHeader(KeycloakConstants.USER_FIRST_NAME, "Test");
exchange.getIn().setHeader(KeycloakConstants.USER_LAST_NAME, "User");
Exchange result = template.send("direct:createUser", exchange);
assertNotNull(result);
assertNull(result.getException());
// The result should be a Response object
Object body = result.getIn().getBody();
assertNotNull(body);
assertTrue(body instanceof Response);
log.info("Created user: {} in realm: {}", TEST_USER_NAME, TEST_REALM_NAME);
}
@Test
@Order(6)
void testCreateUserWithPojo() {
String pojoUserName = TEST_USER_NAME + "-pojo";
UserRepresentation user = new UserRepresentation();
user.setUsername(pojoUserName);
user.setEmail(pojoUserName + "@test.com");
user.setFirstName("Test");
user.setLastName("User POJO");
user.setEnabled(true);
Exchange exchange = createExchangeWithBody(user);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
Exchange result = template.send("direct:createUserPojo", exchange);
assertNotNull(result);
assertNull(result.getException());
Object body = result.getIn().getBody();
assertNotNull(body);
assertTrue(body instanceof Response);
log.info("Created user via POJO: {} in realm: {}", pojoUserName, TEST_REALM_NAME);
}
@Test
@Order(7)
void testListUsers() {
Exchange exchange = createExchangeWithBody(null);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
Exchange result = template.send("direct:listUsers", exchange);
assertNotNull(result);
assertNull(result.getException());
@SuppressWarnings("unchecked")
List<UserRepresentation> users = result.getIn().getBody(List.class);
assertNotNull(users);
assertTrue(users.size() >= 2); // At least our two test users
log.info("Found {} users in realm: {}", users.size(), TEST_REALM_NAME);
}
@Test
@Order(8)
void testCreateRoleWithHeaders() {
Exchange exchange = createExchangeWithBody(null);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
exchange.getIn().setHeader(KeycloakConstants.ROLE_NAME, TEST_ROLE_NAME);
exchange.getIn().setHeader(KeycloakConstants.ROLE_DESCRIPTION, "Test role for integration testing");
Exchange result = template.send("direct:createRole", exchange);
assertNotNull(result);
assertNull(result.getException());
String body = result.getIn().getBody(String.class);
assertEquals("Role created successfully", body);
log.info("Created role: {} in realm: {}", TEST_ROLE_NAME, TEST_REALM_NAME);
}
@Test
@Order(9)
void testCreateRoleWithPojo() {
String pojoRoleName = TEST_ROLE_NAME + "-pojo";
RoleRepresentation role = new RoleRepresentation();
role.setName(pojoRoleName);
role.setDescription("Test role created via POJO");
Exchange exchange = createExchangeWithBody(role);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
Exchange result = template.send("direct:createRolePojo", exchange);
assertNotNull(result);
assertNull(result.getException());
String body = result.getIn().getBody(String.class);
assertEquals("Role created successfully", body);
log.info("Created role via POJO: {} in realm: {}", pojoRoleName, TEST_REALM_NAME);
}
@Test
@Order(10)
void testGetRole() {
Exchange exchange = createExchangeWithBody(null);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
exchange.getIn().setHeader(KeycloakConstants.ROLE_NAME, TEST_ROLE_NAME);
Exchange result = template.send("direct:getRole", exchange);
assertNotNull(result);
assertNull(result.getException());
RoleRepresentation roleRep = result.getIn().getBody(RoleRepresentation.class);
assertNotNull(roleRep);
assertEquals(TEST_ROLE_NAME, roleRep.getName());
assertEquals("Test role for integration testing", roleRep.getDescription());
log.info("Retrieved role: {} - description: {}", roleRep.getName(), roleRep.getDescription());
}
@Test
@Order(11)
void testListRoles() {
Exchange exchange = createExchangeWithBody(null);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
Exchange result = template.send("direct:listRoles", exchange);
assertNotNull(result);
assertNull(result.getException());
@SuppressWarnings("unchecked")
List<RoleRepresentation> roles = result.getIn().getBody(List.class);
assertNotNull(roles);
assertTrue(roles.size() >= 2); // At least our test roles + default roles
log.info("Found {} roles in realm: {}", roles.size(), TEST_REALM_NAME);
}
@Test
@Order(12)
void testErrorHandling() {
// Test with missing realm name
CamelExecutionException ex = assertThrows(CamelExecutionException.class, () -> {
template.sendBody("direct:createUser", null);
});
assertTrue(ex.getCause().getMessage().contains("Realm name must be specified"));
// Test with non-existent realm
Exchange exchange = createExchangeWithBody(null);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, "non-existent-realm");
exchange.getIn().setHeader(KeycloakConstants.USERNAME, "testuser");
log.info("Error handling tests completed successfully");
}
@Test
@Order(98)
void testCleanupRoles() {
// Delete test roles
String[] rolesToDelete = { TEST_ROLE_NAME, TEST_ROLE_NAME + "-pojo" };
for (String roleName : rolesToDelete) {
try {
Exchange exchange = createExchangeWithBody(null);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
exchange.getIn().setHeader(KeycloakConstants.ROLE_NAME, roleName);
Exchange result = template.send("direct:deleteRole", exchange);
if (result.getException() == null) {
String body = result.getIn().getBody(String.class);
assertEquals("Role deleted successfully", body);
log.info("Deleted role: {}", roleName);
}
} catch (Exception e) {
log.warn("Failed to delete role {}: {}", roleName, e.getMessage());
}
}
}
@Test
@Order(99)
void testCleanupRealm() {
// Delete the test realm (this will also delete all users and roles in it)
Exchange exchange = createExchangeWithBody(null);
exchange.getIn().setHeader(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
Exchange result = template.send("direct:deleteRealm", exchange);
assertNotNull(result);
assertNull(result.getException());
String body = result.getIn().getBody(String.class);
assertEquals("Realm deleted successfully", body);
log.info("Deleted test realm: {}", TEST_REALM_NAME);
}
@Test
@Order(100)
void testVerifyRealmDeleted() {
CamelExecutionException ex = assertThrows(CamelExecutionException.class, () -> {
// Verify that the realm was actually deleted
Map<String, Object> headers = new HashMap<>();
headers.put(KeycloakConstants.REALM_NAME, TEST_REALM_NAME);
template.requestBodyAndHeaders("direct:getRealm", null, headers);
});
assertNotNull(ex.getCause());
log.info("Verified that test realm was deleted");
}
}
| KeycloakProducerIT |
java | apache__flink | flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/SourceReaderBaseTest.java | {
"start": 26720,
"end": 27936
} | class ____<E, SplitT extends SourceSplit>
extends SingleThreadFetcherManager<E, SplitT> {
private final CompletableFuture<Void> inShutdownSplitFetcherFuture;
public BlockingShutdownSplitFetcherManager(
Supplier<SplitReader<E, SplitT>> splitReaderSupplier, Configuration configuration) {
super(splitReaderSupplier, configuration);
this.inShutdownSplitFetcherFuture = new CompletableFuture<>();
}
@Override
public boolean maybeShutdownFinishedFetchers() {
shutdownAllSplitFetcher();
return true;
}
public CompletableFuture<Void> getInShutdownSplitFetcherFuture() {
return inShutdownSplitFetcherFuture;
}
private void shutdownAllSplitFetcher() {
inShutdownSplitFetcherFuture.complete(null);
while (!super.maybeShutdownFinishedFetchers()) {
try {
// avoid tight loop
Thread.sleep(1);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}
private static | BlockingShutdownSplitFetcherManager |
java | apache__flink | flink-kubernetes/src/main/java/org/apache/flink/kubernetes/kubeclient/decorators/PodTemplateMountDecorator.java | {
"start": 2502,
"end": 7034
} | class ____ extends AbstractKubernetesStepDecorator {
private final AbstractKubernetesParameters kubernetesComponentConf;
private final String podTemplateConfigMapName;
public PodTemplateMountDecorator(AbstractKubernetesParameters kubernetesComponentConf) {
this.kubernetesComponentConf = checkNotNull(kubernetesComponentConf);
this.podTemplateConfigMapName =
Constants.POD_TEMPLATE_CONFIG_MAP_PREFIX + kubernetesComponentConf.getClusterId();
}
@Override
public FlinkPod decorateFlinkPod(FlinkPod flinkPod) {
if (!getTaskManagerPodTemplateFile().isPresent()) {
return flinkPod;
}
final Pod mountedPod = decoratePod(flinkPod.getPodWithoutMainContainer());
final Container mountedMainContainer =
new ContainerBuilder(flinkPod.getMainContainer())
.addNewVolumeMount()
.withName(POD_TEMPLATE_VOLUME)
.withMountPath(POD_TEMPLATE_DIR_IN_POD)
.endVolumeMount()
.build();
return new FlinkPod.Builder(flinkPod)
.withPod(mountedPod)
.withMainContainer(mountedMainContainer)
.build();
}
private Pod decoratePod(Pod pod) {
final List<KeyToPath> keyToPaths = new ArrayList<>();
keyToPaths.add(
new KeyToPathBuilder()
.withKey(TASK_MANAGER_POD_TEMPLATE_FILE_NAME)
.withPath(TASK_MANAGER_POD_TEMPLATE_FILE_NAME)
.build());
final Volume podTemplateVolume =
new VolumeBuilder()
.withName(POD_TEMPLATE_VOLUME)
.withNewConfigMap()
.withName(podTemplateConfigMapName)
.withItems(keyToPaths)
.endConfigMap()
.build();
return new PodBuilder(pod)
.editSpec()
.addNewVolumeLike(podTemplateVolume)
.endVolume()
.endSpec()
.build();
}
@Override
public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException {
return getTaskManagerPodTemplateFile()
.map(
FunctionUtils.uncheckedFunction(
file -> {
final Map<String, String> data = new HashMap<>();
data.put(
TASK_MANAGER_POD_TEMPLATE_FILE_NAME,
Files.toString(file, StandardCharsets.UTF_8));
final HasMetadata flinkConfConfigMap =
new ConfigMapBuilder()
.withApiVersion(Constants.API_VERSION)
.withNewMetadata()
.withName(podTemplateConfigMapName)
.withLabels(
kubernetesComponentConf
.getCommonLabels())
.endMetadata()
.addToData(data)
.build();
return Collections.singletonList(flinkConfConfigMap);
}))
.orElse(Collections.emptyList());
}
private Optional<File> getTaskManagerPodTemplateFile() {
return kubernetesComponentConf
.getFlinkConfiguration()
.getOptional(KubernetesConfigOptions.TASK_MANAGER_POD_TEMPLATE)
.map(
file -> {
final File podTemplateFile = new File(file);
if (!podTemplateFile.exists()) {
throw new FlinkRuntimeException(
String.format(
"Pod template file %s does not exist.", file));
}
return podTemplateFile;
});
}
}
| PodTemplateMountDecorator |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SshEndpointBuilderFactory.java | {
"start": 72106,
"end": 74009
} | class ____ {
/**
* The internal instance of the builder used to access to all the
* methods representing the name of headers.
*/
private static final SshHeaderNameBuilder INSTANCE = new SshHeaderNameBuilder();
/**
* The user name.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SshUsername}.
*/
public String sshUsername() {
return "CamelSshUsername";
}
/**
* The password.
*
* The option is a: {@code String} type.
*
* Group: common
*
* @return the name of the header {@code SshPassword}.
*/
public String sshPassword() {
return "CamelSshPassword";
}
/**
* The value of this header is a InputStream with the standard error
* stream of the executable.
*
* The option is a: {@code InputStream} type.
*
* Group: common
*
* @return the name of the header {@code SshStderr}.
*/
public String sshStderr() {
return "CamelSshStderr";
}
/**
* The value of this header is the exit value that is returned, after
* the execution. By convention a non-zero status exit value indicates
* abnormal termination. Note that the exit value is OS dependent.
*
* The option is a: {@code Integer} type.
*
* Group: common
*
* @return the name of the header {@code SshExitValue}.
*/
public String sshExitValue() {
return "CamelSshExitValue";
}
}
static SshEndpointBuilder endpointBuilder(String componentName, String path) {
| SshHeaderNameBuilder |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/coordination/AtomicRegisterCoordinatorTests.java | {
"start": 13730,
"end": 18886
} | class ____ extends ElectionStrategy {
private final AtomicRegister register;
private final BooleanSupplier disruptElectionsSupplier;
private final BooleanSupplier disruptPublicationsSupplier;
AtomicRegisterElectionStrategy(
AtomicRegister register,
BooleanSupplier disruptElectionsSupplier,
BooleanSupplier disruptPublicationsSupplier
) {
this.register = register;
this.disruptElectionsSupplier = disruptElectionsSupplier;
this.disruptPublicationsSupplier = disruptPublicationsSupplier;
}
@Override
protected boolean satisfiesAdditionalQuorumConstraints(
DiscoveryNode localNode,
long localCurrentTerm,
long localAcceptedTerm,
long localAcceptedVersion,
CoordinationMetadata.VotingConfiguration lastCommittedConfiguration,
CoordinationMetadata.VotingConfiguration lastAcceptedConfiguration,
CoordinationState.VoteCollection joinVotes
) {
return true;
}
@Override
public boolean isElectionQuorum(
DiscoveryNode localNode,
long localCurrentTerm,
long localAcceptedTerm,
long localAcceptedVersion,
CoordinationMetadata.VotingConfiguration lastCommittedConfiguration,
CoordinationMetadata.VotingConfiguration lastAcceptedConfiguration,
CoordinationState.VoteCollection joinVotes
) {
assert lastCommittedConfiguration.isEmpty() == false;
assert lastAcceptedConfiguration.isEmpty() == false;
// Safety is guaranteed by the blob store CAS which guaranteed that we only create one StartJoinRequest per term, so elect as
// the master the current node as soon as it has voted for itself.
return joinVotes.containsVoteFor(localNode);
}
@Override
public boolean isPublishQuorum(
CoordinationState.VoteCollection voteCollection,
CoordinationMetadata.VotingConfiguration lastCommittedConfiguration,
CoordinationMetadata.VotingConfiguration latestPublishedConfiguration
) {
assert latestPublishedConfiguration.getNodeIds().size() == 1;
return voteCollection.isQuorum(latestPublishedConfiguration);
}
@Override
public void onNewElection(DiscoveryNode localNode, long proposedTerm, ActionListener<StartJoinRequest> listener) {
if (disruptElectionsSupplier.getAsBoolean()) {
listener.onFailure(new IOException("simulating failure to acquire term during election"));
return;
}
register.readCurrentTerm(listener.delegateFailure((l1, currentTerm) -> {
final var electionTerm = Math.max(proposedTerm, currentTerm + 1);
register.compareAndExchange(
currentTerm,
electionTerm,
l1.delegateFailure((l2, witness) -> ActionListener.completeWith(l2, () -> {
if (witness.equals(currentTerm)) {
return new StartJoinRequest(localNode, electionTerm);
} else {
throw new CoordinationStateRejectedException("couldn't claim " + electionTerm + ", current term is " + witness);
}
}))
);
}));
}
@Override
public boolean isInvalidReconfiguration(
ClusterState clusterState,
CoordinationMetadata.VotingConfiguration lastAcceptedConfiguration,
CoordinationMetadata.VotingConfiguration lastCommittedConfiguration
) {
// TODO: Move into a fixed dummy VotingConfiguration
return false;
}
@Override
public void beforeCommit(long term, long version, ActionListener<Void> listener) {
// TODO: add a test to ensure that this gets called
if (disruptPublicationsSupplier.getAsBoolean()) {
listener.onFailure(new IOException("simulating failure to verify term during publication"));
return;
}
register.readCurrentTerm(listener.delegateFailure((l, currentTerm) -> ActionListener.completeWith(l, () -> {
if (currentTerm == term) {
return null;
} else {
assert term < currentTerm : term + " vs " + currentTerm;
throw new CoordinationStateRejectedException(
Strings.format(
"could not commit cluster state version %d in term %d, current term is now %d",
version,
term,
currentTerm
)
);
}
})));
}
}
record PersistentClusterState(long term, long version, Metadata state) {}
private static | AtomicRegisterElectionStrategy |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/aot/hint/BindingReflectionHintsRegistrarTests.java | {
"start": 15497,
"end": 15614
} | class ____ {
public ResolvableType getResolvableType() {
return null;
}
}
static | SampleClassWithResolvableType |
java | apache__flink | flink-models/flink-model-openai/src/main/java/org/apache/flink/model/openai/AbstractOpenAIModelFunction.java | {
"start": 10198,
"end": 10990
} | enum ____ implements DescribedEnum {
RETRY("Retry sending the request."),
FAILOVER("Throw exceptions and fail the Flink job."),
IGNORE(
"Ignore the input that caused the error and continue. The error itself would be recorded in log.");
private final String description;
ErrorHandlingStrategy(String description) {
this.description = description;
}
@Override
public InlineElement getDescription() {
return text(description);
}
}
/**
* The fallback strategy for when retry attempts are exhausted. It should be identical to {@link
* ErrorHandlingStrategy} except that it does not support {@link ErrorHandlingStrategy#RETRY}.
*/
public | ErrorHandlingStrategy |
java | apache__camel | components/camel-asterisk/src/test/java/org/apache/camel/component/asterisk/AsteriskConsumerTest.java | {
"start": 1341,
"end": 2606
} | class ____ extends CamelTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(AsteriskConsumerTest.class);
private String hostname = "192.168.0.254";
private String username = "username";
private String password = "password";
@Disabled
@Test
void testReceiveTraps() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMinimumMessageCount(1);
mock.assertIsSatisfied();
List<Exchange> events = mock.getExchanges();
if (LOG.isInfoEnabled()) {
for (Exchange e : events) {
LOG.info("ASTERISK EVENTS: {}", e.getIn().getBody(String.class));
}
}
}
@Test
void testStartRoute() {
// do nothing here , just make sure the camel route can started.
assertTrue(context.isStarted());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("asterisk:myVoIP?hostname=" + hostname + "&username=" + username + "&password=" + password).id("route1")
.transform(body().convertToString()).to("mock:result");
}
};
}
}
| AsteriskConsumerTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulIntsEvaluator.java | {
"start": 1125,
"end": 5091
} | class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(MulIntsEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator lhs;
private final EvalOperator.ExpressionEvaluator rhs;
private final DriverContext driverContext;
private Warnings warnings;
public MulIntsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs,
EvalOperator.ExpressionEvaluator rhs, DriverContext driverContext) {
this.source = source;
this.lhs = lhs;
this.rhs = rhs;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) {
try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) {
IntVector lhsVector = lhsBlock.asVector();
if (lhsVector == null) {
return eval(page.getPositionCount(), lhsBlock, rhsBlock);
}
IntVector rhsVector = rhsBlock.asVector();
if (rhsVector == null) {
return eval(page.getPositionCount(), lhsBlock, rhsBlock);
}
return eval(page.getPositionCount(), lhsVector, rhsVector);
}
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += lhs.baseRamBytesUsed();
baseRamBytesUsed += rhs.baseRamBytesUsed();
return baseRamBytesUsed;
}
public IntBlock eval(int positionCount, IntBlock lhsBlock, IntBlock rhsBlock) {
try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
switch (lhsBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
switch (rhsBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
int lhs = lhsBlock.getInt(lhsBlock.getFirstValueIndex(p));
int rhs = rhsBlock.getInt(rhsBlock.getFirstValueIndex(p));
try {
result.appendInt(Mul.processInts(lhs, rhs));
} catch (ArithmeticException e) {
warnings().registerException(e);
result.appendNull();
}
}
return result.build();
}
}
public IntBlock eval(int positionCount, IntVector lhsVector, IntVector rhsVector) {
try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
int lhs = lhsVector.getInt(p);
int rhs = rhsVector.getInt(p);
try {
result.appendInt(Mul.processInts(lhs, rhs));
} catch (ArithmeticException e) {
warnings().registerException(e);
result.appendNull();
}
}
return result.build();
}
}
@Override
public String toString() {
return "MulIntsEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(lhs, rhs);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static | MulIntsEvaluator |
java | spring-projects__spring-framework | spring-jms/src/main/java/org/springframework/jms/core/DefaultJmsClient.java | {
"start": 1410,
"end": 2942
} | class ____ implements JmsClient {
private final JmsOperations jmsTemplate;
private @Nullable MessageConverter messageConverter;
private @Nullable MessagePostProcessor messagePostProcessor;
public DefaultJmsClient(ConnectionFactory connectionFactory) {
Assert.notNull(connectionFactory, "ConnectionFactory must not be null");
this.jmsTemplate = new JmsTemplate(connectionFactory);
}
public DefaultJmsClient(JmsOperations jmsTemplate) {
Assert.notNull(jmsTemplate, "JmsTemplate must not be null");
this.jmsTemplate = jmsTemplate;
}
void setMessageConverter(MessageConverter messageConverter) {
Assert.notNull(messageConverter, "MessageConverter must not be null");
this.messageConverter = messageConverter;
}
void setMessagePostProcessor(MessagePostProcessor messagePostProcessor) {
Assert.notNull(messagePostProcessor, "MessagePostProcessor must not be null");
this.messagePostProcessor = messagePostProcessor;
}
@Override
public OperationSpec destination(Destination destination) {
return new DefaultOperationSpec(destination);
}
@Override
public OperationSpec destination(String destinationName) {
return new DefaultOperationSpec(destinationName);
}
private JmsMessagingTemplate newDelegate() {
JmsMessagingTemplate delegate = new JmsMessagingTemplate(DefaultJmsClient.this.jmsTemplate);
MessageConverter converter = DefaultJmsClient.this.messageConverter;
if (converter != null) {
delegate.setMessageConverter(converter);
}
return delegate;
}
private | DefaultJmsClient |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotationBinder.java | {
"start": 10525,
"end": 11460
} | class ____ generators
// GeneratorBinder.registerGlobalGenerators( classDetails, context );
if ( context.getMetadataCollector().getClassType( classDetails ) == ENTITY ) {
bindEntityClass( classDetails, inheritanceStatePerClass, context );
}
}
private static void handleImport(ClassDetails annotatedClass, MetadataBuildingContext context) {
if ( annotatedClass.hasDirectAnnotationUsage( Imported.class ) ) {
final String qualifiedName = annotatedClass.getName();
final String name = unqualify( qualifiedName );
final String rename = annotatedClass.getDirectAnnotationUsage( Imported.class ).rename();
context.getMetadataCollector().addImport( rename.isBlank() ? name : rename, qualifiedName );
}
}
private static void detectMappedSuperclassProblems(ClassDetails annotatedClass) {
if ( annotatedClass.hasDirectAnnotationUsage( MappedSuperclass.class ) ) {
// @Entity and @MappedSuperclass on the same | level |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/CamelLogger.java | {
"start": 1228,
"end": 7466
} | class ____ {
private Logger log;
private LoggingLevel level;
private Marker marker;
public CamelLogger() {
this(LoggerFactory.getLogger(CamelLogger.class));
}
public CamelLogger(Logger log) {
this(log, LoggingLevel.INFO);
}
public CamelLogger(Logger log, LoggingLevel level) {
this(log, level, null);
}
public CamelLogger(Logger log, LoggingLevel level, String marker) {
this.log = log;
setLevel(level);
setMarker(marker);
}
public CamelLogger(String logName) {
this(LoggerFactory.getLogger(logName));
}
public CamelLogger(String logName, LoggingLevel level) {
this(logName, level, null);
}
public CamelLogger(String logName, LoggingLevel level, String marker) {
this(LoggerFactory.getLogger(logName), level, marker);
}
@Override
public String toString() {
return "Logger[" + log + "]";
}
public void log(String message, LoggingLevel loggingLevel) {
LoggingLevel oldLogLevel = getLevel();
setLevel(loggingLevel);
log(message);
setLevel(oldLogLevel);
}
/**
* Logs the message <b>with</b> checking the {@link #shouldLog()} method first.
*
* @param message the message to log, if {@link #shouldLog()} returned <tt>true</tt>
*/
public void log(String message) {
if (shouldLog(log, level)) {
if (marker != null) {
log(log, level, marker, message);
} else {
log(log, level, message);
}
}
}
/**
* Logs the message <b>without</b> checking the {@link #shouldLog()} method first.
*
* @param message the message to log
*/
public void doLog(String message) {
if (marker != null) {
log(log, level, marker, message);
} else {
log(log, level, message);
}
}
public void log(String message, Throwable exception, LoggingLevel loggingLevel) {
log(log, loggingLevel, marker, message, exception);
}
public void log(String message, Throwable exception) {
if (shouldLog(log, level)) {
log(log, level, marker, message, exception);
}
}
public Logger getLog() {
return log;
}
public void setLog(Logger log) {
this.log = log;
}
public LoggingLevel getLevel() {
return level;
}
public void setLevel(LoggingLevel level) {
if (level == null) {
throw new IllegalArgumentException("Log level may not be null");
}
this.level = level;
}
public void setLogName(String logName) {
this.log = LoggerFactory.getLogger(logName);
}
public Marker getMarker() {
return marker;
}
public void setMarker(Marker marker) {
this.marker = marker;
}
public void setMarker(String marker) {
if (ObjectHelper.isNotEmpty(marker)) {
this.marker = MarkerFactory.getMarker(marker);
} else {
this.marker = null;
}
}
public static void log(Logger log, LoggingLevel level, String message) {
switch (level) {
case DEBUG:
log.debug(message);
break;
case ERROR:
log.error(message);
break;
case INFO:
log.info(message);
break;
case TRACE:
log.trace(message);
break;
case WARN:
log.warn(message);
break;
default:
}
}
public static void log(Logger log, LoggingLevel level, Marker marker, String message) {
switch (level) {
case DEBUG:
log.debug(marker, message);
break;
case ERROR:
log.error(marker, message);
break;
case INFO:
log.info(marker, message);
break;
case TRACE:
log.trace(marker, message);
break;
case WARN:
log.warn(marker, message);
break;
default:
}
}
public static void log(Logger log, LoggingLevel level, String message, Throwable th) {
switch (level) {
case DEBUG:
log.debug(message, th);
break;
case ERROR:
log.error(message, th);
break;
case INFO:
log.info(message, th);
break;
case TRACE:
log.trace(message, th);
break;
case WARN:
log.warn(message, th);
break;
default:
}
}
public static void log(Logger log, LoggingLevel level, Marker marker, String message, Throwable th) {
if (marker == null) {
log(log, level, message, th);
return;
}
// marker must be provided
switch (level) {
case DEBUG:
log.debug(marker, message, th);
break;
case ERROR:
log.error(marker, message, th);
break;
case INFO:
log.info(marker, message, th);
break;
case TRACE:
log.trace(marker, message, th);
break;
case WARN:
log.warn(marker, message, th);
break;
default:
}
}
public boolean shouldLog() {
return CamelLogger.shouldLog(log, level);
}
public static boolean shouldLog(Logger log, LoggingLevel level) {
switch (level) {
case DEBUG:
return log.isDebugEnabled();
case ERROR:
return log.isErrorEnabled();
case INFO:
return log.isInfoEnabled();
case TRACE:
return log.isTraceEnabled();
case WARN:
return log.isWarnEnabled();
default:
}
return false;
}
}
| CamelLogger |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/MultipleTestableAnnotationsTests.java | {
"start": 952,
"end": 1649
} | class ____ extends AbstractJupiterTestEngineTests {
@Test
void testAndRepeatedTest() throws Exception {
var results = discoverTestsForClass(TestCase.class);
var discoveryIssue = getOnlyElement(results.getDiscoveryIssues());
assertThat(discoveryIssue.severity()) //
.isEqualTo(Severity.WARNING);
assertThat(discoveryIssue.message()) //
.matches("Possible configuration error: method .+ resulted in multiple TestDescriptors .+");
assertThat(discoveryIssue.source()) //
.contains(
MethodSource.from(TestCase.class.getDeclaredMethod("testAndRepeatedTest", RepetitionInfo.class)));
}
@SuppressWarnings("JUnitMalformedDeclaration")
static | MultipleTestableAnnotationsTests |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/image/node/MetadataLeafNode.java | {
"start": 911,
"end": 1261
} | class ____ implements MetadataNode {
private final String string;
public MetadataLeafNode(String string) {
this.string = string;
}
@Override
public boolean isDirectory() {
return false;
}
@Override
public void print(MetadataNodePrinter printer) {
printer.output(string);
}
}
| MetadataLeafNode |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/reflect/InstantiationUtils.java | {
"start": 5739,
"end": 8454
} | class ____ the most optimal strategy first trying the {@link io.micronaut.core.beans.BeanIntrospector} and
* if no bean is present falling back to reflection.
*
* @param type The type
* @param <T> The generic type
* @return The instantiated instance or {@link Optional#empty()}
*/
public static @NonNull <T> Optional<T> tryInstantiate(@NonNull Class<T> type) {
ArgumentUtils.requireNonNull("type", type);
final Supplier<T> reflectionFallback = () -> {
final Logger logger = ClassUtils.REFLECTION_LOGGER;
if (logger.isDebugEnabled()) {
logger.debug("Cannot instantiate type [{}] without reflection. Attempting reflective instantiation", type);
}
try {
T bean = type.getDeclaredConstructor().newInstance();
if (type.isInstance(bean)) {
return bean;
}
return null;
} catch (Throwable e) {
try {
Constructor<T> defaultConstructor = type.getDeclaredConstructor();
defaultConstructor.setAccessible(true);
return tryInstantiate(defaultConstructor).orElse(null);
} catch (Throwable e1) {
Logger log = LoggerFactory.getLogger(InstantiationUtils.class);
if (log.isDebugEnabled()) {
log.debug("Tried, but could not instantiate type: {}", type, e);
}
return null;
}
}
};
final T result = BeanIntrospector.SHARED.findIntrospection(type).map(introspection -> {
try {
return introspection.instantiate();
} catch (InstantiationException e) {
return reflectionFallback.get();
}
}).orElseGet(reflectionFallback);
return Optional.ofNullable(result);
}
/**
* Try to instantiate the given class.
*
* @param type The type
* @param args The arguments to the constructor
* @param <T> The generic type
* @return The instantiated instance or {@link Optional#empty()}
*/
public static @NonNull <T> Optional<T> tryInstantiate(@NonNull Constructor<T> type, Object... args) {
try {
return Optional.of(type.newInstance(args));
} catch (Throwable e) {
Logger log = ClassUtils.REFLECTION_LOGGER;
if (log.isDebugEnabled()) {
log.debug("Tried, but could not instantiate type: {}", type, e);
}
return Optional.empty();
}
}
/**
* Instantiate the given | using |
java | apache__maven | impl/maven-cli/src/test/java/org/apache/maven/cling/transfer/FileSizeFormatTest.java | {
"start": 1474,
"end": 15292
} | class ____ {
static Locale original;
@BeforeAll
static void beforeAll() {
original = Locale.getDefault();
Locale.setDefault(Locale.US);
}
@AfterAll
static void afterAll() {
Locale.setDefault(original);
}
@Test
void testNegativeSize() {
FileSizeFormat format = new FileSizeFormat();
assertThrows(IllegalArgumentException.class, () -> format.format(-100L));
}
static Stream<Arguments> sizeTestData() {
return Stream.of(
Arguments.of(0L, "0 B"),
Arguments.of(5L, "5 B"),
Arguments.of(10L, "10 B"),
Arguments.of(15L, "15 B"),
Arguments.of(999L, "999 B"),
Arguments.of(1000L, "1.0 kB"),
Arguments.of(5500L, "5.5 kB"),
Arguments.of(10L * 1000L, "10 kB"),
Arguments.of(15L * 1000L, "15 kB"),
Arguments.of(999L * 1000L, "999 kB"),
Arguments.of(1000L * 1000L, "1.0 MB"),
Arguments.of(5500L * 1000L, "5.5 MB"),
Arguments.of(10L * 1000L * 1000L, "10 MB"),
Arguments.of(15L * 1000L * 1000L, "15 MB"),
Arguments.of(999L * 1000L * 1000L, "999 MB"),
Arguments.of(1000L * 1000L * 1000L, "1.0 GB"),
Arguments.of(5500L * 1000L * 1000L, "5.5 GB"),
Arguments.of(10L * 1000L * 1000L * 1000L, "10 GB"),
Arguments.of(15L * 1000L * 1000L * 1000L, "15 GB"),
Arguments.of(1000L * 1000L * 1000L * 1000L, "1000 GB"));
}
@ParameterizedTest
@MethodSource("sizeTestData")
void testSize(long input, String expected) {
FileSizeFormat format = new FileSizeFormat();
assertEquals(expected, format.format(input));
}
static Stream<Arguments> sizeWithScaleUnitTestData() {
return Stream.of(
// 0 bytes
Arguments.of(0L, null, "0 B"),
Arguments.of(0L, ScaleUnit.BYTE, "0 B"),
Arguments.of(0L, ScaleUnit.KILOBYTE, "0 kB"),
Arguments.of(0L, ScaleUnit.MEGABYTE, "0 MB"),
Arguments.of(0L, ScaleUnit.GIGABYTE, "0 GB"),
// 5 bytes
Arguments.of(5L, null, "5 B"),
Arguments.of(5L, ScaleUnit.BYTE, "5 B"),
Arguments.of(5L, ScaleUnit.KILOBYTE, "0 kB"),
Arguments.of(5L, ScaleUnit.MEGABYTE, "0 MB"),
Arguments.of(5L, ScaleUnit.GIGABYTE, "0 GB"),
// 49 bytes
Arguments.of(49L, null, "49 B"),
Arguments.of(49L, ScaleUnit.BYTE, "49 B"),
Arguments.of(49L, ScaleUnit.KILOBYTE, "0 kB"),
Arguments.of(49L, ScaleUnit.MEGABYTE, "0 MB"),
Arguments.of(49L, ScaleUnit.GIGABYTE, "0 GB"),
// 50 bytes
Arguments.of(50L, null, "50 B"),
Arguments.of(50L, ScaleUnit.BYTE, "50 B"),
Arguments.of(50L, ScaleUnit.KILOBYTE, "0.1 kB"),
Arguments.of(50L, ScaleUnit.MEGABYTE, "0 MB"),
Arguments.of(50L, ScaleUnit.GIGABYTE, "0 GB"),
// 999 bytes
Arguments.of(999L, null, "999 B"),
Arguments.of(999L, ScaleUnit.BYTE, "999 B"),
Arguments.of(999L, ScaleUnit.KILOBYTE, "1.0 kB"),
Arguments.of(999L, ScaleUnit.MEGABYTE, "0 MB"),
Arguments.of(999L, ScaleUnit.GIGABYTE, "0 GB"),
// 1000 bytes
Arguments.of(1000L, null, "1.0 kB"),
Arguments.of(1000L, ScaleUnit.BYTE, "1000 B"),
Arguments.of(1000L, ScaleUnit.KILOBYTE, "1.0 kB"),
Arguments.of(1000L, ScaleUnit.MEGABYTE, "0 MB"),
Arguments.of(1000L, ScaleUnit.GIGABYTE, "0 GB"),
// 49 kilobytes
Arguments.of(49L * 1000L, null, "49 kB"),
Arguments.of(49L * 1000L, ScaleUnit.BYTE, "49000 B"),
Arguments.of(49L * 1000L, ScaleUnit.KILOBYTE, "49 kB"),
Arguments.of(49L * 1000L, ScaleUnit.MEGABYTE, "0 MB"),
Arguments.of(49L * 1000L, ScaleUnit.GIGABYTE, "0 GB"),
// 50 kilobytes
Arguments.of(50L * 1000L, null, "50 kB"),
Arguments.of(50L * 1000L, ScaleUnit.BYTE, "50000 B"),
Arguments.of(50L * 1000L, ScaleUnit.KILOBYTE, "50 kB"),
Arguments.of(50L * 1000L, ScaleUnit.MEGABYTE, "0.1 MB"),
Arguments.of(50L * 1000L, ScaleUnit.GIGABYTE, "0 GB"),
// 999 kilobytes
Arguments.of(999L * 1000L, null, "999 kB"),
Arguments.of(999L * 1000L, ScaleUnit.BYTE, "999000 B"),
Arguments.of(999L * 1000L, ScaleUnit.KILOBYTE, "999 kB"),
Arguments.of(999L * 1000L, ScaleUnit.MEGABYTE, "1.0 MB"),
Arguments.of(999L * 1000L, ScaleUnit.GIGABYTE, "0 GB"),
// 1000 kilobytes
Arguments.of(1000L * 1000L, null, "1.0 MB"),
Arguments.of(1000L * 1000L, ScaleUnit.BYTE, "1000000 B"),
Arguments.of(1000L * 1000L, ScaleUnit.KILOBYTE, "1000 kB"),
Arguments.of(1000L * 1000L, ScaleUnit.MEGABYTE, "1.0 MB"),
Arguments.of(1000L * 1000L, ScaleUnit.GIGABYTE, "0 GB"),
// 49 megabytes
Arguments.of(49L * 1000L * 1000L, null, "49 MB"),
Arguments.of(49L * 1000L * 1000L, ScaleUnit.BYTE, "49000000 B"),
Arguments.of(49L * 1000L * 1000L, ScaleUnit.KILOBYTE, "49000 kB"),
Arguments.of(49L * 1000L * 1000L, ScaleUnit.MEGABYTE, "49 MB"),
Arguments.of(49L * 1000L * 1000L, ScaleUnit.GIGABYTE, "0 GB"),
// 50 megabytes
Arguments.of(50L * 1000L * 1000L, null, "50 MB"),
Arguments.of(50L * 1000L * 1000L, ScaleUnit.BYTE, "50000000 B"),
Arguments.of(50L * 1000L * 1000L, ScaleUnit.KILOBYTE, "50000 kB"),
Arguments.of(50L * 1000L * 1000L, ScaleUnit.MEGABYTE, "50 MB"),
Arguments.of(50L * 1000L * 1000L, ScaleUnit.GIGABYTE, "0.1 GB"),
// 999 megabytes
Arguments.of(999L * 1000L * 1000L, null, "999 MB"));
}
@ParameterizedTest
@MethodSource("sizeWithScaleUnitTestData")
void testSizeWithSelectedScaleUnit(long input, ScaleUnit unit, String expected) {
FileSizeFormat format = new FileSizeFormat();
if (unit == null) {
assertEquals(expected, format.format(input));
} else {
assertEquals(expected, format.format(input, unit));
}
}
@Test
void testNegativeProgressedSize() {
FileSizeFormat format = new FileSizeFormat();
long negativeProgressedSize = -100L;
assertThrows(IllegalArgumentException.class, () -> format.formatProgress(negativeProgressedSize, 10L));
}
@Test
void testNegativeProgressedSizeBiggerThanSize() {
FileSizeFormat format = new FileSizeFormat();
assertThrows(IllegalArgumentException.class, () -> format.formatProgress(100L, 10L));
}
static Stream<Arguments> progressedSizeWithoutSizeTestData() {
return Stream.of(
Arguments.of(0L, "0 B"),
Arguments.of(1000L, "1.0 kB"),
Arguments.of(1000L * 1000L, "1.0 MB"),
Arguments.of(1000L * 1000L * 1000L, "1.0 GB"));
}
@ParameterizedTest
@MethodSource("progressedSizeWithoutSizeTestData")
void testProgressedSizeWithoutSize(long progressedSize, String expected) {
FileSizeFormat format = new FileSizeFormat();
assertEquals(expected, format.formatProgress(progressedSize, -1L));
}
static Stream<Arguments> progressedSizeWithSizeTestData() {
return Stream.of(
// Zero test
Arguments.of(0L, 0L, "0 B"),
// Bytes tests
Arguments.of(0L, 800L, "0/800 B"),
Arguments.of(400L, 800L, "400/800 B"),
Arguments.of(800L, 800L, "800 B"),
// Kilobytes tests
Arguments.of(0L, 8000L, "0/8.0 kB"),
Arguments.of(400L, 8000L, "0.4/8.0 kB"),
Arguments.of(4000L, 8000L, "4.0/8.0 kB"),
Arguments.of(8000L, 8000L, "8.0 kB"),
Arguments.of(8000L, 50000L, "8.0/50 kB"),
Arguments.of(16000L, 50000L, "16/50 kB"),
Arguments.of(50000L, 50000L, "50 kB"),
// Megabytes tests
Arguments.of(0L, 5000000L, "0/5.0 MB"),
Arguments.of(500000L, 5000000L, "0.5/5.0 MB"),
Arguments.of(1000000L, 5000000L, "1.0/5.0 MB"),
Arguments.of(5000000L, 5000000L, "5.0 MB"),
Arguments.of(5000000L, 15000000L, "5.0/15 MB"),
Arguments.of(15000000L, 15000000L, "15 MB"),
// Gigabytes tests
Arguments.of(0L, 500000000L, "0/500 MB"),
Arguments.of(1000000000L, 5000000000L, "1.0/5.0 GB"),
Arguments.of(5000000000L, 5000000000L, "5.0 GB"),
Arguments.of(5000000000L, 15000000000L, "5.0/15 GB"),
Arguments.of(15000000000L, 15000000000L, "15 GB"));
}
@ParameterizedTest
@MethodSource("progressedSizeWithSizeTestData")
void testProgressedSizeWithSize(long progressedSize, long totalSize, String expected) {
FileSizeFormat format = new FileSizeFormat();
assertEquals(expected, format.formatProgress(progressedSize, totalSize));
}
@Test
void testFormatRate() {
FileSizeFormat format = new FileSizeFormat();
// Test bytes per second
MessageBuilder builder = new DefaultMessageBuilder();
format.formatRate(builder, 5.0);
assertEquals("5.0 B/s", builder.build());
// Test kilobytes per second
builder = new DefaultMessageBuilder();
format.formatRate(builder, 5500.0);
assertEquals("5.5 kB/s", builder.build());
// Test megabytes per second
builder = new DefaultMessageBuilder();
format.formatRate(builder, 5500000.0);
assertEquals("5.5 MB/s", builder.build());
// Test gigabytes per second
builder = new DefaultMessageBuilder();
format.formatRate(builder, 5500000000.0);
assertEquals("5.5 GB/s", builder.build());
}
@Test
void testFormatRateThresholds() {
FileSizeFormat format = new FileSizeFormat();
// Test value less than 0.05
// Test exact unit thresholds
MessageBuilder builder = new DefaultMessageBuilder();
format.formatRate(builder, 45.0); // 45 B/s
assertEquals("45.0 B/s", builder.build());
// Test value greater than or equal to 10
builder = new DefaultMessageBuilder();
format.formatRate(builder, 15000.0); // 15 kB/s
assertEquals("15.0 kB/s", builder.build());
// Test value between 0.05 and 10
builder = new DefaultMessageBuilder();
format.formatRate(builder, 5500.0); // 5.5 kB/s
assertEquals("5.5 kB/s", builder.build());
// Test exact unit thresholds
builder = new DefaultMessageBuilder();
format.formatRate(builder, 1000.0); // 1 kB/s
assertEquals("1.0 kB/s", builder.build());
builder = new DefaultMessageBuilder();
format.formatRate(builder, 1000000.0); // 1 MB/s
assertEquals("1.0 MB/s", builder.build());
builder = new DefaultMessageBuilder();
format.formatRate(builder, 1000000000.0); // 1 GB/s
assertEquals("1.0 GB/s", builder.build());
}
@Test
void testFormatRateEdgeCases() {
FileSizeFormat format = new FileSizeFormat();
// Test zero rate
MessageBuilder builder = new DefaultMessageBuilder();
format.formatRate(builder, 0.0);
assertEquals("0.0 B/s", builder.build());
// Test rate at exactly 1000 (1 kB/s)
builder = new DefaultMessageBuilder();
format.formatRate(builder, 1000.0);
assertEquals("1.0 kB/s", builder.build());
// Test rate at exactly 1000000 (1 MB/s)
builder = new DefaultMessageBuilder();
format.formatRate(builder, 1000000.0);
assertEquals("1.0 MB/s", builder.build());
}
@Test
void testFormatRateLargeValues() {
FileSizeFormat format = new FileSizeFormat();
// Test large but valid rates
MessageBuilder builder = new DefaultMessageBuilder();
format.formatRate(builder, 5e12); // 5 TB/s
assertEquals("5000.0 GB/s", builder.build());
// Test very large rate
builder = new DefaultMessageBuilder();
format.formatRate(builder, 1e15); // 1 PB/s
assertEquals("1000000.0 GB/s", builder.build());
}
@Test
void testFormatRateInvalidValues() {
FileSizeFormat format = new FileSizeFormat();
// Test negative rate
MessageBuilder builder = new DefaultMessageBuilder();
format.formatRate(builder, -1.0);
assertEquals("? B/s", builder.build());
// Test NaN
builder = new DefaultMessageBuilder();
format.formatRate(builder, Double.NaN);
assertEquals("? B/s", builder.build());
// Test Infinity
builder = new DefaultMessageBuilder();
format.formatRate(builder, Double.POSITIVE_INFINITY);
assertEquals("? B/s", builder.build());
builder = new DefaultMessageBuilder();
format.formatRate(builder, Double.NEGATIVE_INFINITY);
assertEquals("? B/s", builder.build());
}
}
| FileSizeFormatTest |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_3673/Animal.java | {
"start": 198,
"end": 423
} | class ____ {
private AnimalDetails details;
public AnimalDetails getDetails() {
return details;
}
public void setDetails(AnimalDetails details) {
this.details = details;
}
public | Animal |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoTileGridTiler.java | {
"start": 9056,
"end": 11658
} | class ____ extends GeoTileGridTiler {
private final GeoTileBoundedPredicate predicate;
BoundedGeoTileGridTiler(int precision, GeoBoundingBox bbox) {
super(precision);
this.predicate = new GeoTileBoundedPredicate(precision, bbox);
}
@Override
protected boolean validTile(int x, int y, int z) {
return predicate.validTile(x, y, z);
}
@Override
protected long getMaxCells() {
return predicate.getMaxTiles();
}
@Override
protected int setValuesForFullyContainedTile(int xTile, int yTile, int zTile, GeoShapeCellValues values, int valuesIndex) {
// For every level we go down, we half each dimension. The total number of splits is equal to 1 << (levelEnd - levelStart)
final int splits = 1 << precision - zTile;
// The start value of a dimension is calculated by multiplying the value of that dimension at the start level
// by the number of splits. Choose the max value with respect to the bounding box.
final int minY = Math.max(predicate.minY(), yTile * splits);
// The end value of a dimension is calculated by adding to the start value the number of splits.
// Choose the min value with respect to the bounding box.
final int maxY = Math.min(predicate.maxY(), yTile * splits + splits);
// Do the same for the X dimension taking into account that the bounding box might cross the dateline.
if (predicate.crossesDateline()) {
final int westMinX = Math.max(predicate.leftX(), xTile * splits);
final int westMaxX = xTile * splits + splits;
valuesIndex = setValues(values, valuesIndex, minY, maxY, westMinX, westMaxX);
// when the left and right box land in the same tile, we need to make sure we don't count then twice
final int eastMaxX = Math.min(westMinX, Math.min(predicate.rightX(), xTile * splits + splits));
final int eastMinX = xTile * splits;
return setValues(values, valuesIndex, minY, maxY, eastMinX, eastMaxX);
} else {
final int minX = Math.max(predicate.leftX(), xTile * splits);
final int maxX = Math.min(predicate.rightX(), xTile * splits + splits);
return setValues(values, valuesIndex, minY, maxY, minX, maxX);
}
}
}
/**
* Unbounded geotile aggregation. It accepts any tile.
*/
private static | BoundedGeoTileGridTiler |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/NamespaceHttpBasicTests.java | {
"start": 10650,
"end": 11183
} | class ____ {
AuthenticationEntryPoint authenticationEntryPoint = (request, response, ex) -> response.setStatus(999);
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().hasRole("USER"))
.httpBasic((basic) -> basic
.authenticationEntryPoint(this.authenticationEntryPoint));
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static | EntryPointRefHttpBasicConfig |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/validation/beanvalidation/CustomValidatorBean.java | {
"start": 1297,
"end": 2970
} | class ____ extends SpringValidatorAdapter implements Validator, InitializingBean {
private @Nullable ValidatorFactory validatorFactory;
private @Nullable MessageInterpolator messageInterpolator;
private @Nullable TraversableResolver traversableResolver;
/**
* Set the ValidatorFactory to obtain the target Validator from.
* <p>Default is {@link jakarta.validation.Validation#buildDefaultValidatorFactory()}.
*/
public void setValidatorFactory(ValidatorFactory validatorFactory) {
this.validatorFactory = validatorFactory;
}
/**
* Specify a custom MessageInterpolator to use for this Validator.
*/
public void setMessageInterpolator(MessageInterpolator messageInterpolator) {
this.messageInterpolator = messageInterpolator;
}
/**
* Specify a custom TraversableResolver to use for this Validator.
*/
public void setTraversableResolver(TraversableResolver traversableResolver) {
this.traversableResolver = traversableResolver;
}
@Override
public void afterPropertiesSet() {
if (this.validatorFactory == null) {
this.validatorFactory = Validation.buildDefaultValidatorFactory();
}
ValidatorContext validatorContext = this.validatorFactory.usingContext();
MessageInterpolator targetInterpolator = this.messageInterpolator;
if (targetInterpolator == null) {
targetInterpolator = this.validatorFactory.getMessageInterpolator();
}
validatorContext.messageInterpolator(new LocaleContextMessageInterpolator(targetInterpolator));
if (this.traversableResolver != null) {
validatorContext.traversableResolver(this.traversableResolver);
}
setTargetValidator(validatorContext.getValidator());
}
}
| CustomValidatorBean |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/aspectj/annotation/AspectJBeanFactoryInitializationAotProcessor.java | {
"start": 2209,
"end": 2603
} | class ____ {
private static @Nullable AspectContribution processAheadOfTime(ConfigurableListableBeanFactory beanFactory) {
BeanFactoryAspectJAdvisorsBuilder builder = new BeanFactoryAspectJAdvisorsBuilder(beanFactory);
List<Advisor> advisors = builder.buildAspectJAdvisors();
return (advisors.isEmpty() ? null : new AspectContribution(advisors));
}
}
private static | AspectDelegate |
java | apache__rocketmq | test/src/main/java/org/apache/rocketmq/test/lmq/benchmark/BenchLmqStore.java | {
"start": 2622,
"end": 16747
} | class ____ {
private static Logger logger = LoggerFactory.getLogger(BenchLmqStore.class);
private static String namesrv = System.getProperty("namesrv", "127.0.0.1:9876");
private static String lmqTopic = System.getProperty("lmqTopic", "lmqTestTopic");
private static boolean enableSub = Boolean.parseBoolean(System.getProperty("enableSub", "true"));
private static String queuePrefix = System.getProperty("queuePrefix", "lmqTest");
private static int tps = Integer.parseInt(System.getProperty("tps", "1"));
private static int lmqNum = Integer.parseInt(System.getProperty("lmqNum", "1"));
private static int sendThreadNum = Integer.parseInt(System.getProperty("sendThreadNum", "64"));
private static int consumerThreadNum = Integer.parseInt(System.getProperty("consumerThreadNum", "64"));
private static String brokerName = System.getProperty("brokerName", "broker-a");
private static int size = Integer.parseInt(System.getProperty("size", "128"));
private static int suspendTime = Integer.parseInt(System.getProperty("suspendTime", "2000"));
private static final boolean RETRY_NO_MATCHED_MSG = Boolean.parseBoolean(System.getProperty("retry_no_matched_msg", "false"));
private static boolean benchOffset = Boolean.parseBoolean(System.getProperty("benchOffset", "false"));
private static int benchOffsetNum = Integer.parseInt(System.getProperty("benchOffsetNum", "1"));
private static Map<MessageQueue, Long> offsetMap = new ConcurrentHashMap<>(256);
private static Map<MessageQueue, Boolean> pullStatus = new ConcurrentHashMap<>(256);
private static Map<Integer, Map<MessageQueue, Long>> pullEvent = new ConcurrentHashMap<>(256);
public static DefaultMQProducer defaultMQProducer;
private static int pullConsumerNum = Integer.parseInt(System.getProperty("pullConsumerNum", "8"));
public static DefaultMQPullConsumer[] defaultMQPullConsumers = new DefaultMQPullConsumer[pullConsumerNum];
private static AtomicLong rid = new AtomicLong();
private static final String LMQ_PREFIX = "%LMQ%";
public static void main(String[] args) throws InterruptedException, MQClientException, MQBrokerException,
RemotingException {
defaultMQProducer = new DefaultMQProducer();
defaultMQProducer.setProducerGroup("PID_LMQ_TEST");
defaultMQProducer.setVipChannelEnabled(false);
defaultMQProducer.setNamesrvAddr(namesrv);
defaultMQProducer.start();
//defaultMQProducer.createTopic(lmqTopic, lmqTopic, 8);
for (int i = 0; i < pullConsumerNum; i++) {
DefaultMQPullConsumer defaultMQPullConsumer = new DefaultMQPullConsumer();
defaultMQPullConsumers[i] = defaultMQPullConsumer;
defaultMQPullConsumer.setNamesrvAddr(namesrv);
defaultMQPullConsumer.setVipChannelEnabled(false);
defaultMQPullConsumer.setConsumerGroup("CID_RMQ_SYS_LMQ_TEST_" + i);
defaultMQPullConsumer.setInstanceName("CID_RMQ_SYS_LMQ_TEST_" + i);
defaultMQPullConsumer.setRegisterTopics(new HashSet<>(Collections.singletonList(lmqTopic)));
defaultMQPullConsumer.setBrokerSuspendMaxTimeMillis(suspendTime);
defaultMQPullConsumer.setConsumerTimeoutMillisWhenSuspend(suspendTime + 1000);
defaultMQPullConsumer.start();
}
Thread.sleep(3000L);
if (benchOffset) {
doBenchOffset();
return;
}
ScheduledThreadPoolExecutor consumerPool = new ScheduledThreadPoolExecutor(consumerThreadNum, new ThreadFactoryImpl("test"));
for (int i = 0; i < consumerThreadNum; i++) {
final int idx = i;
consumerPool.scheduleWithFixedDelay(() -> {
try {
Map<MessageQueue, Long> map = pullEvent.get(idx);
if (map == null) {
return;
}
for (Map.Entry<MessageQueue, Long> entry : map.entrySet()) {
try {
Boolean status = pullStatus.get(entry.getKey());
if (Boolean.TRUE.equals(status)) {
continue;
}
doPull(map, entry.getKey(), entry.getValue());
} catch (Exception e) {
logger.error("pull broker msg error", e);
}
}
} catch (Exception e) {
logger.error("exec doPull task error", e);
}
}, 1, 1, TimeUnit.MILLISECONDS);
}
// init queue sub
if (enableSub && lmqNum > 0 && StringUtils.isNotBlank(brokerName)) {
for (int i = 0; i < lmqNum; i++) {
long idx = rid.incrementAndGet();
String queue = LMQ_PREFIX + queuePrefix + LongMath.mod(idx, lmqNum);
MessageQueue mq = new MessageQueue(queue, brokerName, 0);
int queueHash = IntMath.mod(queue.hashCode(), consumerThreadNum);
pullEvent.putIfAbsent(queueHash, new ConcurrentHashMap<>());
pullEvent.get(queueHash).put(mq, idx);
}
}
Thread.sleep(5000L);
doSend();
}
public static void doSend() {
StringBuilder sb = new StringBuilder();
for (int j = 0; j < size; j += 10) {
sb.append("hello baby");
}
byte[] body = sb.toString().getBytes(StandardCharsets.UTF_8);
String pubKey = "pub";
ExecutorService sendPool = Executors.newFixedThreadPool(sendThreadNum);
for (int i = 0; i < sendThreadNum; i++) {
sendPool.execute(() -> {
while (true) {
if (StatUtil.isOverFlow(pubKey, tps)) {
try {
Thread.sleep(100L);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
long start = System.currentTimeMillis();
try {
long idx = rid.incrementAndGet();
Message message = new Message(lmqTopic, body);
String queue = lmqTopic;
if (lmqNum > 0) {
queue = LMQ_PREFIX + queuePrefix + idx % lmqNum;
message.putUserProperty("INNER_MULTI_DISPATCH", queue);
}
SendResult sendResult = defaultMQProducer.send(message);
StatUtil.addInvoke(pubKey, System.currentTimeMillis() - start);
if (StatUtil.nowTps(pubKey) < 10) {
logger.warn("pub: {} ", sendResult.getMsgId());
}
if (enableSub && null != sendResult.getMessageQueue()) {
MessageQueue mq = new MessageQueue(queue, sendResult.getMessageQueue().getBrokerName(),
lmqNum > 0 ? 0 : sendResult.getMessageQueue().getQueueId());
int queueHash = IntMath.mod(queue.hashCode(), consumerThreadNum);
pullEvent.putIfAbsent(queueHash, new ConcurrentHashMap<>());
pullEvent.get(queueHash).put(mq, idx);
}
} catch (Exception e) {
logger.error("", e);
StatUtil.addInvoke(pubKey, System.currentTimeMillis() - start, false);
}
}
});
}
}
public static void doPull(Map<MessageQueue, Long> eventMap, MessageQueue mq,
Long eventId) throws RemotingException, InterruptedException, MQClientException {
if (!enableSub) {
eventMap.remove(mq, eventId);
pullStatus.remove(mq);
return;
}
DefaultMQPullConsumer defaultMQPullConsumer = defaultMQPullConsumers[(int) (eventId % pullConsumerNum)];
Long offset = offsetMap.get(mq);
if (offset == null) {
long start = System.currentTimeMillis();
offset = defaultMQPullConsumer.maxOffset(mq);
StatUtil.addInvoke("maxOffset", System.currentTimeMillis() - start);
offsetMap.put(mq, offset);
}
long start = System.currentTimeMillis();
if (null != pullStatus.putIfAbsent(mq, true)) {
return;
}
defaultMQPullConsumer.pullBlockIfNotFound(
mq, "*", offset, 32,
new PullCallback() {
@Override
public void onSuccess(PullResult pullResult) {
StatUtil.addInvoke(pullResult.getPullStatus().name(), System.currentTimeMillis() - start);
eventMap.remove(mq, eventId);
pullStatus.remove(mq);
offsetMap.put(mq, pullResult.getNextBeginOffset());
StatUtil.addInvoke("doPull", System.currentTimeMillis() - start);
if (PullStatus.NO_MATCHED_MSG.equals(pullResult.getPullStatus()) && RETRY_NO_MATCHED_MSG) {
long idx = rid.incrementAndGet();
eventMap.put(mq, idx);
}
List<MessageExt> list = pullResult.getMsgFoundList();
if (list == null || list.isEmpty()) {
StatUtil.addInvoke("NoMsg", System.currentTimeMillis() - start);
return;
}
for (MessageExt messageExt : list) {
StatUtil.addInvoke("sub", System.currentTimeMillis() - messageExt.getBornTimestamp());
if (StatUtil.nowTps("sub") < 10) {
logger.warn("sub: {}", messageExt.getMsgId());
}
}
}
@Override
public void onException(Throwable e) {
eventMap.remove(mq, eventId);
pullStatus.remove(mq);
logger.error("", e);
StatUtil.addInvoke("doPull", System.currentTimeMillis() - start, false);
}
});
}
public static void doBenchOffset() throws RemotingException, InterruptedException, MQClientException {
ExecutorService sendPool = Executors.newFixedThreadPool(sendThreadNum);
Map<String, Long> offsetMap = new ConcurrentHashMap<>();
String statKey = "benchOffset";
TopicRouteData topicRouteData = defaultMQPullConsumers[0].getDefaultMQPullConsumerImpl().
getRebalanceImpl().getmQClientFactory().getMQClientAPIImpl().
getTopicRouteInfoFromNameServer(lmqTopic, 3000);
HashMap<Long, String> brokerMap = topicRouteData.getBrokerDatas().get(0).getBrokerAddrs();
if (brokerMap == null || brokerMap.isEmpty()) {
return;
}
String brokerAddress = brokerMap.get(MixAll.MASTER_ID);
for (int i = 0; i < sendThreadNum; i++) {
final int flag = i;
sendPool.execute(new Runnable() {
@Override
public void run() {
while (true) {
try {
if (StatUtil.isOverFlow(statKey, tps)) {
Thread.sleep(100L);
}
long start = System.currentTimeMillis();
long id = rid.incrementAndGet();
int index = (Integer.MAX_VALUE & (int) id) % defaultMQPullConsumers.length;
DefaultMQPullConsumer defaultMQPullConsumer = defaultMQPullConsumers[index];
String lmq = LMQ_PREFIX + queuePrefix + id % benchOffsetNum;
String lmqCid = LMQ_PREFIX + "GID_LMQ@@c" + flag + "-" + id % benchOffsetNum;
offsetMap.putIfAbsent(lmq, 0L);
long newOffset1 = offsetMap.get(lmq) + 1;
UpdateConsumerOffsetRequestHeader updateHeader = new UpdateConsumerOffsetRequestHeader();
updateHeader.setTopic(lmq);
updateHeader.setConsumerGroup(lmqCid);
updateHeader.setQueueId(0);
updateHeader.setCommitOffset(newOffset1);
defaultMQPullConsumer
.getDefaultMQPullConsumerImpl()
.getRebalanceImpl()
.getmQClientFactory()
.getMQClientAPIImpl().updateConsumerOffset(brokerAddress, updateHeader, 1000);
QueryConsumerOffsetRequestHeader queryHeader = new QueryConsumerOffsetRequestHeader();
queryHeader.setTopic(lmq);
queryHeader.setConsumerGroup(lmqCid);
queryHeader.setQueueId(0);
long newOffset2 = defaultMQPullConsumer
.getDefaultMQPullConsumerImpl()
.getRebalanceImpl()
.getmQClientFactory()
.getMQClientAPIImpl()
.queryConsumerOffset(brokerAddress, queryHeader, 1000);
offsetMap.put(lmq, newOffset2);
if (newOffset1 != newOffset2) {
StatUtil.addInvoke("ErrorOffset", 1);
}
StatUtil.addInvoke(statKey, System.currentTimeMillis() - start);
} catch (Exception e) {
logger.error("", e);
}
}
}
});
}
}
}
| BenchLmqStore |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/schema/BaseSchemaGeneratorTest.java | {
"start": 1659,
"end": 2263
} | class ____ {
@Id
private Long id;
private String name;
@OneToMany(mappedBy = "author")
private List<Book> books = new ArrayList<>();
//Getters and setters are omitted for brevity
//end::schema-generation-domain-model-example[]
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<Book> getBooks() {
return books;
}
//tag::schema-generation-domain-model-example[]
}
@Entity(name = "Book")
public static | Person |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Last.java | {
"start": 592,
"end": 1067
} | class ____ extends TopHits {
public Last(Source source, Expression field, Expression sortField) {
super(source, field, sortField);
}
@Override
protected NodeInfo<Last> info() {
return NodeInfo.create(this, Last::new, field(), orderField());
}
@Override
public Last replaceChildren(List<Expression> newChildren) {
return new Last(source(), newChildren.get(0), newChildren.size() > 1 ? newChildren.get(1) : null);
}
}
| Last |
java | quarkusio__quarkus | extensions/cache/deployment/src/test/java/io/quarkus/cache/test/runtime/UniReturnTypeTest.java | {
"start": 677,
"end": 6875
} | class ____ {
private static final String CACHE_NAME_1 = "test-cache-1";
private static final String CACHE_NAME_2 = "test-cache-2";
private static final String KEY_1 = "key-1";
private static final String KEY_2 = "key-2";
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest().withApplicationRoot((jar) -> jar.addClass(CachedService.class));
@Inject
CachedService cachedService;
@Test
void testCacheResult() {
// STEP 1
// Action: a method annotated with @CacheResult and returning a Uni is called.
// Expected effect: the method is not invoked, as Uni is lazy.
// Verified by: invocations counter.
Uni<String> uni1 = cachedService.cacheResult1(KEY_1);
assertEquals(0, cachedService.getCacheResultInvocations());
// STEP 2
// Action: same call as STEP 1.
// Expected effect: same as STEP 1 with a different Uni instance returned.
// Verified by: invocations counter and different objects references between STEPS 1 AND 2 results.
Uni<String> uni2 = cachedService.cacheResult1(KEY_1);
assertEquals(0, cachedService.getCacheResultInvocations());
assertNotSame(uni1, uni2);
// STEP 3
// Action: the Uni returned in STEP 1 is subscribed to and we wait for an item event to be fired.
// Expected effect: the method from STEP 1 is invoked and its result is cached.
// Verified by: invocations counter and STEP 4.
String emittedItem1 = uni1.await().indefinitely();
assertEquals(1, cachedService.getCacheResultInvocations());
// STEP 4
// Action: the Uni returned in STEP 2 is subscribed to and we wait for an item event to be fired.
// Expected effect: the method from STEP 2 is not invoked and the value cached in STEP 3 is returned.
// Verified by: invocations counter and same object reference between STEPS 3 and 4 emitted items.
String emittedItem2 = uni2.await().indefinitely();
assertEquals(1, cachedService.getCacheResultInvocations());
assertSame(emittedItem1, emittedItem2);
// STEP 5
// Action: same call as STEP 2 with a different key and an immediate subscription.
// Expected effect: the method is invoked and a new item is emitted (also cached).
// Verified by: invocations counter and different objects references between STEPS 2 and 3 emitted items.
String emittedItem3 = cachedService.cacheResult1("another-key").await().indefinitely();
assertEquals(2, cachedService.getCacheResultInvocations());
assertNotSame(emittedItem2, emittedItem3);
}
@Test
void testCacheInvalidate() {
// First, let's put some data into the caches.
String value1 = cachedService.cacheResult1(KEY_1).await().indefinitely();
Object value2 = cachedService.cacheResult2(KEY_1).await().indefinitely();
Object value3 = cachedService.cacheResult2(KEY_2).await().indefinitely();
// We will invalidate some data (only KEY_1) in all caches later.
Uni<Void> invalidateUni = cachedService.cacheInvalidate(KEY_1);
// For now, the method that will invalidate the data should not be invoked, as Uni is lazy.
assertEquals(0, cachedService.getCacheInvalidateInvocations());
// The data should still be cached at this point.
String value4 = cachedService.cacheResult1(KEY_1).await().indefinitely();
Object value5 = cachedService.cacheResult2(KEY_1).await().indefinitely();
Object value6 = cachedService.cacheResult2(KEY_2).await().indefinitely();
assertSame(value1, value4);
assertSame(value2, value5);
assertSame(value3, value6);
// It's time to perform the data invalidation.
invalidateUni.await().indefinitely();
// The method annotated with @CacheInvalidate should have been invoked now.
assertEquals(1, cachedService.getCacheInvalidateInvocations());
// Let's call the methods annotated with @CacheResult again.
String value7 = cachedService.cacheResult1(KEY_1).await().indefinitely();
Object value8 = cachedService.cacheResult2(KEY_1).await().indefinitely();
Object value9 = cachedService.cacheResult2(KEY_2).await().indefinitely();
// The objects references should be different for the invalidated key.
assertNotSame(value4, value7);
assertNotSame(value5, value8);
// The object reference should remain unchanged for the key that was not invalidated.
assertSame(value6, value9);
}
@Test
void testCacheInvalidateAll() {
// First, let's put some data into the caches.
String value1 = cachedService.cacheResult1(KEY_1).await().indefinitely();
Object value2 = cachedService.cacheResult2(KEY_2).await().indefinitely();
// We will invalidate all the data in all caches later.
Uni<Void> invalidateAllUni = cachedService.cacheInvalidateAll();
// For now, the method that will invalidate the data should not be invoked, as Uni is lazy.
assertEquals(0, cachedService.getCacheInvalidateAllInvocations());
// The data should still be cached at this point.
String value3 = cachedService.cacheResult1(KEY_1).await().indefinitely();
Object value4 = cachedService.cacheResult2(KEY_2).await().indefinitely();
assertSame(value1, value3);
assertSame(value2, value4);
// It's time to perform the data invalidation.
invalidateAllUni.await().indefinitely();
// The method annotated with @CacheInvalidateAll should have been invoked now.
assertEquals(1, cachedService.getCacheInvalidateAllInvocations());
// Let's call the methods annotated with @CacheResult again.
String value5 = cachedService.cacheResult1(KEY_1).await().indefinitely();
Object value6 = cachedService.cacheResult2(KEY_2).await().indefinitely();
// All objects references should be different.
assertNotSame(value1, value5);
assertNotSame(value2, value6);
}
@ApplicationScoped
static | UniReturnTypeTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/BadAnnotationImplementationTest.java | {
"start": 8121,
"end": 8208
} | class ____ extends BaseAnnotation {}
""")
.doTest();
}
}
| MyAnnotation |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/accumulators/IntCounter.java | {
"start": 992,
"end": 2670
} | class ____ implements SimpleAccumulator<Integer> {
private static final long serialVersionUID = 1L;
private int localValue = 0;
public IntCounter() {}
public IntCounter(int value) {
this.localValue = value;
}
// ------------------------------------------------------------------------
// Accumulator
// ------------------------------------------------------------------------
/** Consider using {@link #add(int)} instead for primitive int values */
@Override
public void add(Integer value) {
localValue += value;
}
@Override
public Integer getLocalValue() {
return localValue;
}
@Override
public void merge(Accumulator<Integer, Integer> other) {
this.localValue += other.getLocalValue();
}
@Override
public void resetLocal() {
this.localValue = 0;
}
@Override
public IntCounter clone() {
IntCounter result = new IntCounter();
result.localValue = localValue;
return result;
}
// ------------------------------------------------------------------------
// Primitive Specializations
// ------------------------------------------------------------------------
public void add(int value) {
localValue += value;
}
public int getLocalValuePrimitive() {
return this.localValue;
}
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return "IntCounter " + this.localValue;
}
}
| IntCounter |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/EqualsIncompatibleTypeTest.java | {
"start": 13242,
"end": 13819
} | class ____ implements Entity<E2, EK1, Month, DayOfWeek>, RandomInterface {}
void testMultilayer(Class<? extends Entity<?, ?, ?, ?>> eClazz, Class<? extends E2> e2Clazz) {
if (Objects.equals(eClazz, E1.class)) {
System.out.println("yay");
}
if (Objects.equals(eClazz, E2.class)) {
System.out.println("yay");
}
if (Objects.equals(e2Clazz, E2.class)) {
System.out.println("yay");
}
// BUG: Diagnostic contains: E2 and E1 are incompatible.
if (Objects.equals(e2Clazz, E1.class)) {
System.out.println("boo");
}
}
| E2 |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/RestrictedApiCheckerTest.java | {
"start": 12337,
"end": 12612
} | interface ____ {}
}
""")
.doTest();
}
// https://github.com/google/error-prone/issues/2099
@Test
public void i2099() {
helper
.addSourceLines(
"T.java",
"""
package t;
| Allowlist |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/function/client/WebClientResponseException.java | {
"start": 17339,
"end": 17790
} | class ____ extends WebClientResponseException {
InternalServerError(
String statusText, HttpHeaders headers, byte[] body, @Nullable Charset charset,
@Nullable HttpRequest request) {
super(HttpStatus.INTERNAL_SERVER_ERROR, statusText, headers, body, charset, request);
}
}
/**
* {@link WebClientResponseException} for status HTTP 501 Not Implemented.
* @since 5.1
*/
@SuppressWarnings("serial")
public static | InternalServerError |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/internal/entities/mapper/id/AbstractIdMapper.java | {
"start": 466,
"end": 608
} | class ____ for identifier mappers.
*
* @author Adam Warski (adam at warski dot org)
* @author Chris Cranford
*/
public abstract | implementation |
java | google__error-prone | check_api/src/test/java/com/google/errorprone/util/FindIdentifiersTest.java | {
"start": 12506,
"end": 12658
} | class ____ {
String s1;
}
""")
.addSourceLines(
"Outer.java",
"""
| ToExtend |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStepTests.java | {
"start": 681,
"end": 4043
} | class ____ extends AbstractStepTestCase<CheckTargetShardsCountStep> {
@Override
protected CheckTargetShardsCountStep createRandomInstance() {
return new CheckTargetShardsCountStep(randomStepKey(), randomStepKey(), null);
}
@Override
protected CheckTargetShardsCountStep mutateInstance(CheckTargetShardsCountStep instance) {
StepKey key = instance.getKey();
StepKey nextKey = instance.getNextStepKey();
switch (between(0, 1)) {
case 0 -> key = new StepKey(key.phase(), key.action(), key.name() + randomAlphaOfLength(5));
case 1 -> nextKey = new StepKey(nextKey.phase(), nextKey.action(), nextKey.name() + randomAlphaOfLength(5));
default -> throw new AssertionError("Illegal randomisation branch");
}
return new CheckTargetShardsCountStep(key, nextKey, null);
}
@Override
protected CheckTargetShardsCountStep copyInstance(CheckTargetShardsCountStep instance) {
return new CheckTargetShardsCountStep(instance.getKey(), instance.getNextStepKey(), instance.getNumberOfShards());
}
public void testStepCompleteIfTargetShardsCountIsValid() {
String policyName = "test-ilm-policy";
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
.numberOfShards(10)
.numberOfReplicas(randomIntBetween(0, 5))
.build();
ProjectState state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, false));
CheckTargetShardsCountStep checkTargetShardsCountStep = new CheckTargetShardsCountStep(randomStepKey(), randomStepKey(), 2);
ClusterStateWaitStep.Result result = checkTargetShardsCountStep.isConditionMet(indexMetadata.getIndex(), state);
assertThat(result.complete(), is(true));
}
public void testStepIncompleteIfTargetShardsCountNotValid() {
String indexName = randomAlphaOfLength(10);
String policyName = "test-ilm-policy";
IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
.settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
.numberOfShards(10)
.numberOfReplicas(randomIntBetween(0, 5))
.build();
ProjectState state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, false));
CheckTargetShardsCountStep checkTargetShardsCountStep = new CheckTargetShardsCountStep(randomStepKey(), randomStepKey(), 3);
ClusterStateWaitStep.Result result = checkTargetShardsCountStep.isConditionMet(indexMetadata.getIndex(), state);
assertThat(result.complete(), is(false));
SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.informationContext();
assertThat(
info.message(),
is(
"lifecycle action of policy ["
+ policyName
+ "] for index ["
+ indexName
+ "] cannot make progress because the target shards count [3] must be a factor of the source index's shards count [10]"
)
);
}
}
| CheckTargetShardsCountStepTests |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/AutoAnnotationTest.java | {
"start": 9612,
"end": 15854
} | class ____,
// fabricate an instance using newEverything that is supposed to be equal to it, and
// fabricate another instance using newEverything that is supposed to be different.
private static final Everything EVERYTHING_FROM_REFLECTION =
AnnotatedWithEverything.class.getAnnotation(Everything.class);
private static final Everything EVERYTHING_FROM_AUTO =
newEverything(
(byte) 1,
(short) 2,
3,
-4,
Float.NaN,
Double.NaN,
'#',
true,
"maybe\nmaybe not\n",
RetentionPolicy.RUNTIME,
newStringValues(new String[] {"whatever"}),
String.class,
new byte[] {5, 6},
new short[] {},
new int[] {7},
new long[] {8, 9},
new float[] {10, 11},
new double[] {Double.NEGATIVE_INFINITY, -12.0, Double.POSITIVE_INFINITY},
new char[] {'?', '!', '\n'},
new boolean[] {false, true, false},
new String[] {"ver", "vers", "vert", "verre", "vair"},
new RetentionPolicy[] {RetentionPolicy.CLASS, RetentionPolicy.RUNTIME},
new StringValues[] {
newStringValues(new String[] {}), newStringValues(new String[] {"foo", "bar"}),
},
String.class,
StringBuilder.class);
private static final Everything EVERYTHING_FROM_AUTO_COLLECTIONS =
newEverythingCollections(
(byte) 1,
(short) 2,
3,
-4,
Float.NaN,
Double.NaN,
'#',
true,
"maybe\nmaybe not\n",
RetentionPolicy.RUNTIME,
newStringValues(new String[] {"whatever"}),
String.class,
Arrays.asList((byte) 5, (byte) 6),
Collections.<Short>emptyList(),
new ArrayList<Integer>(Collections.singleton(7)),
ImmutableSet.of(8L, 9L),
ImmutableSortedSet.of(10f, 11f),
new TreeSet<Double>(
ImmutableList.of(Double.NEGATIVE_INFINITY, -12.0, Double.POSITIVE_INFINITY)),
new LinkedHashSet<Character>(ImmutableList.of('?', '!', '\n')),
ImmutableList.of(false, true, false),
ImmutableList.of("ver", "vers", "vert", "verre", "vair"),
ImmutableSet.of(RetentionPolicy.CLASS, RetentionPolicy.RUNTIME),
ImmutableSet.of(
newStringValues(new String[] {}), newStringValues(new String[] {"foo", "bar"})),
ImmutableList.of(String.class.asSubclass(CharSequence.class), StringBuilder.class));
// .asSubclass because of pre-Java8, where otherwise we get a compilation error because
// the inferred type is <Class<? extends CharSequence & Serializable>>.
private static final Everything EVERYTHING_ELSE_FROM_AUTO =
newEverything(
(byte) 0,
(short) 0,
0,
0,
0,
0,
'0',
false,
"",
RetentionPolicy.SOURCE,
newStringValues(new String[] {""}),
String.class,
new byte[0],
new short[0],
new int[0],
new long[0],
new float[0],
new double[0],
new char[0],
new boolean[0],
new String[0],
new RetentionPolicy[0],
new StringValues[0]);
private static final Everything EVERYTHING_ELSE_FROM_AUTO_COLLECTIONS =
newEverythingCollections(
(byte) 0,
(short) 0,
0,
0,
0,
0,
'0',
false,
"",
RetentionPolicy.SOURCE,
newStringValues(new String[] {""}),
String.class,
ImmutableList.<Byte>of(),
Collections.<Short>emptyList(),
new ArrayList<Integer>(),
Collections.<Long>emptySet(),
ImmutableSortedSet.<Float>of(),
new TreeSet<Double>(),
new LinkedHashSet<Character>(),
ImmutableSet.<Boolean>of(),
ImmutableList.<String>of(),
ImmutableSet.<RetentionPolicy>of(),
Collections.<StringValues>emptySet(),
Collections.<Class<? extends CharSequence>>emptyList());
@Test
public void testEqualsAndHashCode() {
new EqualsTester()
.addEqualityGroup(
EVERYTHING_FROM_REFLECTION, EVERYTHING_FROM_AUTO, EVERYTHING_FROM_AUTO_COLLECTIONS)
.addEqualityGroup(EVERYTHING_ELSE_FROM_AUTO, EVERYTHING_ELSE_FROM_AUTO_COLLECTIONS)
.testEquals();
}
@Test
public void testSerialization() {
Annotation[] instances = {EVERYTHING_FROM_AUTO, EVERYTHING_FROM_AUTO_COLLECTIONS};
for (Annotation instance : instances) {
SerializableTester.reserializeAndAssert(instance);
}
}
@Test
@SuppressWarnings("GetClassOnAnnotation") // yes, we really do want the implementation classes
public void testSerialVersionUid() {
Class<? extends Everything> everythingImpl = EVERYTHING_FROM_AUTO.getClass();
Class<? extends Everything> everythingFromCollectionsImpl =
EVERYTHING_FROM_AUTO_COLLECTIONS.getClass();
assertThat(everythingImpl).isNotEqualTo(everythingFromCollectionsImpl);
long everythingUid = ObjectStreamClass.lookup(everythingImpl).getSerialVersionUID();
long everythingFromCollectionsUid =
ObjectStreamClass.lookup(everythingFromCollectionsImpl).getSerialVersionUID();
// Two different implementations of the same annotation with the same members being provided
// (not defaulted) should have the same serialVersionUID. They won't be serial-compatible, of
// course, because their classes are different. So we're really just checking that the
// serialVersionUID depends only on the names and types of those members.
assertThat(everythingFromCollectionsUid).isEqualTo(everythingUid);
Class<? extends StringValues> stringValuesImpl = newStringValues(new String[0]).getClass();
long stringValuesUid = ObjectStreamClass.lookup(stringValuesImpl).getSerialVersionUID();
// The previous assertion would be vacuously true if every implementation had the same
// serialVersionUID, so check that that's not true.
assertThat(stringValuesUid).isNotEqualTo(everythingUid);
}
public static | AnnotatedWithEverything |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java | {
"start": 1327,
"end": 2170
} | class ____ the include and exclude files for HDFS.
* <p>
* These files control which DataNodes the NameNode expects to see in the
* cluster. Loosely speaking, the include file, if it exists and is not
* empty, is a list of everything we expect to see. The exclude file is
* a list of everything we want to ignore if we do see it.
* <p>
* Entries may or may not specify a port. If they don't, we consider
* them to apply to every DataNode on that host. The code canonicalizes the
* entries into IP addresses.
* <p>
* The code ignores all entries that the DNS fails to resolve their IP
* addresses. This is okay because by default the NN rejects the registrations
* of DNs when it fails to do a forward and reverse lookup. Note that DNS
* resolutions are only done during the loading time to minimize the latency.
*/
public | manages |
java | apache__maven | compat/maven-model-builder/src/main/java/org/apache/maven/model/profile/activation/JdkVersionProfileActivator.java | {
"start": 6415,
"end": 6730
} | class ____ {
private String value;
private boolean closed;
RangeValue(String value, boolean closed) {
this.value = value.trim();
this.closed = closed;
}
@Override
public String toString() {
return value;
}
}
}
| RangeValue |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/mapper/blockloader/LongFieldBlockLoaderTests.java | {
"start": 674,
"end": 986
} | class ____ extends NumberFieldBlockLoaderTestCase<Long> {
public LongFieldBlockLoaderTests(Params params) {
super(FieldType.LONG, params);
}
@Override
protected Long convert(Number value, Map<String, Object> fieldMapping) {
return value.longValue();
}
}
| LongFieldBlockLoaderTests |
java | apache__dubbo | dubbo-plugin/dubbo-qos/src/main/java/org/apache/dubbo/qos/command/DefaultCommandExecutor.java | {
"start": 1618,
"end": 5218
} | class ____ implements CommandExecutor {
private static final Logger logger = LoggerFactory.getLogger(DefaultCommandExecutor.class);
private final FrameworkModel frameworkModel;
public DefaultCommandExecutor(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
}
@Override
public String execute(CommandContext commandContext) throws NoSuchCommandException, PermissionDenyException {
String remoteAddress = Optional.ofNullable(commandContext.getRemote())
.map(Channel::remoteAddress)
.map(Objects::toString)
.orElse("unknown");
logger.info("[Dubbo QoS] Command Process start. Command: " + commandContext.getCommandName() + ", Args: "
+ Arrays.toString(commandContext.getArgs()) + ", Remote Address: " + remoteAddress);
BaseCommand command = null;
try {
command =
frameworkModel.getExtensionLoader(BaseCommand.class).getExtension(commandContext.getCommandName());
} catch (Throwable throwable) {
// can't find command
}
if (command == null) {
logger.info("[Dubbo QoS] Command Not found. Command: " + commandContext.getCommandName()
+ ", Remote Address: " + remoteAddress);
throw new NoSuchCommandException(commandContext.getCommandName());
}
// check permission when configs allow anonymous access
if (commandContext.isAllowAnonymousAccess()) {
PermissionChecker permissionChecker = DefaultAnonymousAccessPermissionChecker.INSTANCE;
try {
permissionChecker = frameworkModel
.getExtensionLoader(PermissionChecker.class)
.getExtension(QosConstants.QOS_PERMISSION_CHECKER);
} catch (Throwable throwable) {
// can't find valid custom permissionChecker
}
final Cmd cmd = command.getClass().getAnnotation(Cmd.class);
final PermissionLevel cmdRequiredPermissionLevel = cmd.requiredPermissionLevel();
if (!permissionChecker.access(commandContext, cmdRequiredPermissionLevel)) {
logger.info(
"[Dubbo QoS] Command Deny to access. Command: " + commandContext.getCommandName() + ", Args: "
+ Arrays.toString(commandContext.getArgs()) + ", Required Permission Level: "
+ cmdRequiredPermissionLevel + ", Remote Address: "
+ remoteAddress);
throw new PermissionDenyException(commandContext.getCommandName());
}
}
try {
String result = command.execute(commandContext, commandContext.getArgs());
if (command.logResult()) {
logger.info("[Dubbo QoS] Command Process success. Command: " + commandContext.getCommandName()
+ ", Args: "
+ Arrays.toString(commandContext.getArgs()) + ", Result: " + result + ", Remote Address: "
+ remoteAddress);
}
return result;
} catch (Throwable t) {
logger.info(
"[Dubbo QoS] Command Process Failed. Command: " + commandContext.getCommandName() + ", Args: "
+ Arrays.toString(commandContext.getArgs()) + ", Remote Address: "
+ remoteAddress,
t);
throw t;
}
}
}
| DefaultCommandExecutor |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/formula/JoinFormulaManyToOneLazyFetchingTest.java | {
"start": 4102,
"end": 4141
} | enum ____ {
TYPE_A, TYPE_B
}
}
| CodeType |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducerTests.java | {
"start": 1325,
"end": 20241
} | class ____ extends ESTestCase {
private static Supplier<Boolean> doNotCancelSupplier = () -> false;
public void testSimple() throws IOException {
Field field1 = createKeywordFieldTestInstance("keyword1", 0);
Field field2 = createKeywordFieldTestInstance("keyword2", 1);
Field field3 = createKeywordFieldTestInstance("keyword3", 2);
EclatMapReducer eclat = new EclatMapReducer(getTestName(), 0.1, 2, 10, true);
HashBasedTransactionStore transactionStore = eclat.mapInit(mockBigArrays());
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-A"))), transactionStore);
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-B"))), transactionStore);
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-C"))), transactionStore);
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-D"))), transactionStore);
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-E"))), transactionStore);
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-b"), tuple(field2, "f2-1"), tuple(field3, "f3-F"))), transactionStore);
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-b"), tuple(field2, "f2-1"), tuple(field3, "f3-G"))), transactionStore);
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-c"), tuple(field2, "f2-1"), tuple(field3, "f3-H"))), transactionStore);
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-d"), tuple(field2, "f2-1"), tuple(field3, "f3-I"))), transactionStore);
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-b"), tuple(field2, "f2-1"), tuple(field3, "f3-J"))), transactionStore);
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-f"), tuple(field2, "f2-1"), tuple(field3, "f3-K"))), transactionStore);
eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-L"))), transactionStore);
EclatMapReducer.EclatResult result = runEclat(eclat, List.of(field1, field2, field3), transactionStore);
assertThat(result.getFrequentItemSets().length, equalTo(2));
assertThat(result.getFrequentItemSets()[0].getSupport(), equalTo(0.5));
assertThat(result.getFrequentItemSets()[1].getSupport(), equalTo(0.25));
assertThat(result.getProfilingInfo().get("unique_items_after_reduce"), equalTo(18L));
assertThat(result.getProfilingInfo().get("total_transactions_after_reduce"), equalTo(12L));
assertThat(result.getProfilingInfo().get("total_items_after_reduce"), equalTo(36L));
}
public void testPruneToNextMainBranch() throws IOException {
Field field1 = createKeywordFieldTestInstance("keyword1", 0);
Field field2 = createKeywordFieldTestInstance("keyword2", 1);
Field field3 = createKeywordFieldTestInstance("keyword3", 2);
// create 3 fields that "follow" field 2
Field field2a = createKeywordFieldTestInstance("keyword2a", 3);
Field field2b = createKeywordFieldTestInstance("keyword2b", 4);
Field field2c = createKeywordFieldTestInstance("keyword2c", 5);
EclatMapReducer eclat = new EclatMapReducer(getTestName(), 0.1, 2, 10, true);
HashBasedTransactionStore transactionStore = eclat.mapInit(mockBigArrays());
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-A"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-B"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-C"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-D"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-E"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-b"),
tuple(field2, "f2-1"),
tuple(field3, "f3-F"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-b"),
tuple(field2, "f2-1"),
tuple(field3, "f3-G"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-c"),
tuple(field2, "f2-1"),
tuple(field3, "f3-H"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-d"),
tuple(field2, "f2-1"),
tuple(field3, "f3-I"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-b"),
tuple(field2, "f2-1"),
tuple(field3, "f3-J"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-f"),
tuple(field2, "f2-1"),
tuple(field3, "f3-K"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-L"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1")
)
),
transactionStore
);
EclatMapReducer.EclatResult result = runEclat(eclat, List.of(field1, field2, field3, field2a, field2b, field2c), transactionStore);
assertThat(result.getFrequentItemSets().length, equalTo(3));
assertThat(result.getFrequentItemSets()[0].getSupport(), equalTo(1.0));
assertThat(result.getFrequentItemSets()[1].getSupport(), equalTo(0.5));
assertThat(result.getFrequentItemSets()[2].getSupport(), equalTo(0.25));
assertThat(result.getProfilingInfo().get("unique_items_after_reduce"), equalTo(21L));
assertThat(result.getProfilingInfo().get("total_transactions_after_reduce"), equalTo(12L));
assertThat(result.getProfilingInfo().get("total_items_after_reduce"), equalTo(72L));
assertThat(result.getProfilingInfo().get("item_sets_checked_eclat"), equalTo(47L));
}
public void testPruneToNextMainBranchAfterMinCountPrune() throws IOException {
Field field1 = createKeywordFieldTestInstance("keyword1", 0);
Field field2 = createKeywordFieldTestInstance("keyword2", 1);
Field field3 = createKeywordFieldTestInstance("keyword3", 2);
// create 3 fields that "follow" field 2
Field field2a = createKeywordFieldTestInstance("keyword2a", 3);
Field field2b = createKeywordFieldTestInstance("keyword2b", 4);
Field field2c = createKeywordFieldTestInstance("keyword2c", 5);
// create another field to enforce min count pruning
Field field4 = createKeywordFieldTestInstance("keyword4", 6);
Field field4a = createKeywordFieldTestInstance("keyword4a", 7);
EclatMapReducer eclat = new EclatMapReducer(getTestName(), 0.1, 2, 10, true);
HashBasedTransactionStore transactionStore = eclat.mapInit(mockBigArrays());
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-A"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-1"),
tuple(field4a, "f4a-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-B"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-2"),
tuple(field4a, "f4a-2")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-C"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-3"),
tuple(field4a, "f4a-3")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-D"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-4"),
tuple(field4a, "f4a-4")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-E"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-5"),
tuple(field4a, "f4a-5")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-b"),
tuple(field2, "f2-1"),
tuple(field3, "f3-F"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-1"),
tuple(field4a, "f4a-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-b"),
tuple(field2, "f2-1"),
tuple(field3, "f3-G"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-1"),
tuple(field4a, "f4a-1")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-c"),
tuple(field2, "f2-1"),
tuple(field3, "f3-H"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-6"),
tuple(field4a, "f4a-6")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-d"),
tuple(field2, "f2-1"),
tuple(field3, "f3-I"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-7"),
tuple(field4a, "f4a-7")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-b"),
tuple(field2, "f2-1"),
tuple(field3, "f3-J"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-8"),
tuple(field4a, "f4a-8")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-f"),
tuple(field2, "f2-1"),
tuple(field3, "f3-K"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-3"),
tuple(field4a, "f4a-3")
)
),
transactionStore
);
eclat.map(
mockOneDocument(
List.of(
tuple(field1, "f1-a"),
tuple(field2, "f2-1"),
tuple(field3, "f3-L"),
tuple(field2a, "f2a-1"),
tuple(field2b, "f2b-1"),
tuple(field2c, "f2c-1"),
tuple(field4, "f4-10"),
tuple(field4a, "f4a-10")
)
),
transactionStore
);
EclatMapReducer.EclatResult result = runEclat(
eclat,
List.of(field1, field2, field3, field2a, field2b, field2c, field4, field4a),
transactionStore
);
assertThat(result.getFrequentItemSets().length, equalTo(6));
assertThat(result.getFrequentItemSets()[0].getSupport(), equalTo(1.0));
assertThat(result.getFrequentItemSets()[1].getSupport(), equalTo(0.5));
assertThat(result.getFrequentItemSets()[2].getSupport(), equalTo(0.25));
assertThat(result.getProfilingInfo().get("unique_items_after_reduce"), equalTo(39L));
assertThat(result.getProfilingInfo().get("unique_items_after_prune"), equalTo(10L));
assertThat(result.getProfilingInfo().get("total_transactions_after_reduce"), equalTo(12L));
assertThat(result.getProfilingInfo().get("total_items_after_reduce"), equalTo(96L));
assertThat(result.getProfilingInfo().get("total_items_after_prune"), equalTo(96L));
// the number can vary depending on order, so we can only check a range, which is still much lower than without
// that optimization
assertThat((Long) result.getProfilingInfo().get("item_sets_checked_eclat"), greaterThanOrEqualTo(294L));
assertThat((Long) result.getProfilingInfo().get("item_sets_checked_eclat"), lessThan(310L));
}
private static BigArrays mockBigArrays() {
return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
}
private static Tuple<Field, List<Object>> mockOneField(Field field, String... fieldValues) {
return tuple(field, Arrays.stream(fieldValues).map(v -> new BytesRef(v)).collect(Collectors.toList()));
}
private static Stream<Tuple<Field, List<Object>>> mockOneDocument(List<Tuple<Field, String>> fieldsAndValues) {
return fieldsAndValues.stream().map(fieldAndValue -> mockOneField(fieldAndValue.v1(), fieldAndValue.v2()));
}
private static EclatMapReducer.EclatResult runEclat(
EclatMapReducer eclat,
List<Field> fields,
HashBasedTransactionStore... transactionStores
) throws IOException {
HashBasedTransactionStore transactionStoreForReduce = eclat.reduceInit(mockBigArrays());
for (HashBasedTransactionStore transactionStore : transactionStores) {
ImmutableTransactionStore transactionStoreAfterFinalizing = eclat.mapFinalize(transactionStore, null);
List<ImmutableTransactionStore> allPartitions = List.of(transactionStoreAfterFinalizing);
eclat.reduce(allPartitions.stream(), transactionStoreForReduce, doNotCancelSupplier);
}
return eclat.reduceFinalize(transactionStoreForReduce, fields, doNotCancelSupplier);
}
}
| EclatMapReducerTests |
java | quarkusio__quarkus | extensions/smallrye-health/deployment/src/test/java/io/quarkus/smallrye/health/test/HealthOpenAPITest.java | {
"start": 343,
"end": 2200
} | class ____ {
private static final String OPEN_API_PATH = "/q/openapi";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(BasicHealthCheck.class, OpenApiRoute.class)
.addAsResource(new StringAsset("quarkus.smallrye-health.openapi.included=true\n"
+ "quarkus.smallrye-openapi.store-schema-directory=target"), "application.properties")
.addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"));
@Test
void testOpenApiPathAccessResource() {
RestAssured.given().header("Accept", "application/json")
.when().get(OPEN_API_PATH)
.then()
.header("Content-Type", "application/json;charset=UTF-8")
.body("paths", Matchers.hasKey("/q/health/ready"))
.body("paths", Matchers.hasKey("/q/health/live"))
.body("paths", Matchers.hasKey("/q/health/started"))
.body("paths", Matchers.hasKey("/q/health"))
.body("components.schemas.HealthResponse.type", Matchers.equalTo("object"))
.body("components.schemas.HealthResponse.properties.status.type", Matchers.equalTo("string"))
.body("components.schemas.HealthResponse.properties.checks.type", Matchers.equalTo("array"))
.body("components.schemas.HealthCheck.type", Matchers.equalTo("object"))
.body("components.schemas.HealthCheck.properties.status.type", Matchers.equalTo("string"))
.body("components.schemas.HealthCheck.properties.name.type", Matchers.equalTo("string"))
.body("components.schemas.HealthCheck.properties.data.type", Matchers.contains("object", "null"));
}
}
| HealthOpenAPITest |
java | apache__camel | components/camel-hl7/src/generated/java/org/apache/camel/component/hl7/HL721ConverterLoader.java | {
"start": 879,
"end": 35049
} | class ____ implements TypeConverterLoader, CamelContextAware {
private CamelContext camelContext;
public HL721ConverterLoader() {
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void load(TypeConverterRegistry registry) throws TypeConverterLoaderException {
try {
registerConverters(registry);
} catch (Throwable e) {
// ignore on load error
}
}
private void registerConverters(TypeConverterRegistry registry) {
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ACK.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toACK((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ACK.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toACK((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADR_A19.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdrA19((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADR_A19.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdrA19((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A01.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA01((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A01.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA01((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A02.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA02((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A02.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA02((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A03.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA03((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A03.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA03((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A04.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA04((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A04.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA04((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A05.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA05((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A05.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA05((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A06.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA06((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A06.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA06((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A07.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA07((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A07.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA07((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A08.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA08((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A08.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA08((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A09.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA09((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A09.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA09((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A10.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA10((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A10.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA10((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A11.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA11((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A11.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA11((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A12.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA12((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A12.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA12((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A13.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA13((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A13.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA13((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A14.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA14((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A14.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA14((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A15.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA15((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A15.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA15((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A16.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA16((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A16.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA16((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A17.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA17((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A17.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA17((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A18.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA18((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A18.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA18((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A20.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA20((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A20.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA20((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A21.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA21((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A21.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA21((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A22.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA22((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A22.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA22((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A23.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA23((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A23.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA23((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A24.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA24((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_A24.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtA24((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_AXX.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtAXX((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ADT_AXX.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toAdtAXX((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.BAR_P01.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toBarP01((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.BAR_P01.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toBarP01((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.BAR_P02.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toBarP02((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.BAR_P02.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toBarP02((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.DFT_P03.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toDftP03((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.DFT_P03.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toDftP03((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.DSR_Q01.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toDsrQ01((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.DSR_Q01.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toDsrQ01((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.DSR_Q03.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toDsrQ03((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.DSR_Q03.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toDsrQ03((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.MCF_Q02.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toMcfQ02((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.MCF_Q02.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toMcfQ02((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ORM_O01.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toOrmO01((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ORM_O01.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toOrmO01((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ORR_O02.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toOrrO02((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ORR_O02.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toOrrO02((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ORU_R01.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toOruR01((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ORU_R01.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toOruR01((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ORU_R03.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toOruR03((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.ORU_R03.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toOruR03((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.QRY_A19.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toQryA19((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.QRY_A19.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toQryA19((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.QRY_Q01.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toQryQ01((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.QRY_Q01.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toQryQ01((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.QRY_Q02.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toQryQ02((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.QRY_Q02.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toQryQ02((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.UDM_Q05.class, byte[].class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toUdmQ05((byte[]) value, exchange);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
addTypeConverter(registry, ca.uhn.hl7v2.model.v21.message.UDM_Q05.class, java.lang.String.class, false,
(type, exchange, value) -> {
Object answer = org.apache.camel.component.hl7.HL721Converter.toUdmQ05((java.lang.String) value);
if (false && answer == null) {
answer = Void.class;
}
return answer;
});
}
private static void addTypeConverter(TypeConverterRegistry registry, Class<?> toType, Class<?> fromType, boolean allowNull, SimpleTypeConverter.ConversionMethod method) {
registry.addTypeConverter(toType, fromType, new SimpleTypeConverter(allowNull, method));
}
}
| HL721ConverterLoader |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreTests.java | {
"start": 774,
"end": 2748
} | class ____ extends AbstractXContentSerializingTestCase<DataStreamFailureStore> {
@Override
protected Writeable.Reader<DataStreamFailureStore> instanceReader() {
return DataStreamFailureStore::new;
}
@Override
protected DataStreamFailureStore createTestInstance() {
return randomFailureStore();
}
@Override
protected DataStreamFailureStore mutateInstance(DataStreamFailureStore instance) {
var enabled = instance.enabled();
var lifecycle = instance.lifecycle();
switch (randomIntBetween(0, 1)) {
case 0 -> enabled = enabled != null && lifecycle != null && randomBoolean() ? null : Boolean.FALSE.equals(enabled);
case 1 -> lifecycle = lifecycle != null && enabled != null && randomBoolean()
? null
: randomValueOtherThan(lifecycle, DataStreamLifecycleTests::randomFailuresLifecycle);
default -> throw new IllegalArgumentException("illegal randomisation branch");
}
return new DataStreamFailureStore(enabled, lifecycle);
}
@Override
protected DataStreamFailureStore doParseInstance(XContentParser parser) throws IOException {
return DataStreamFailureStore.fromXContent(parser);
}
static DataStreamFailureStore randomFailureStore() {
boolean enabledDefined = randomBoolean();
boolean lifecycleDefined = enabledDefined == false || randomBoolean();
return new DataStreamFailureStore(
enabledDefined ? randomBoolean() : null,
lifecycleDefined ? DataStreamLifecycleTests.randomFailuresLifecycle() : null
);
}
public void testInvalidEmptyConfiguration() {
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new DataStreamFailureStore(null, null));
assertThat(exception.getMessage(), containsString("at least one non-null configuration value"));
}
}
| DataStreamFailureStoreTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/resultmapping/ResultMappingAssociationIdClassTest.java | {
"start": 3241,
"end": 3499
} | class ____ {
@Id
@GeneratedValue
private Long id;
private String name;
public ItemEntity() {
}
public ItemEntity(String name) {
this.name = name;
}
}
@Entity( name = "ItemOrder" )
@Table( name = "item_orders" )
public static | ItemEntity |
java | quarkusio__quarkus | integration-tests/smallrye-graphql-client/src/test/java/io/quarkus/it/smallrye/graphql/client/DynamicClientTest.java | {
"start": 418,
"end": 1854
} | class ____ {
@TestHTTPResource
URL url;
@Test
public void testDynamicClientSingleResultOperationOverHttp() {
when()
.get("/dynamic-single-http/" + url.toString())
.then()
.log().everything()
.statusCode(204);
}
@Test
public void testDynamicClientSingleResultOperationOverWebSocket() {
when()
.get("/dynamic-single-websocket/" + url.toString())
.then()
.log().everything()
.statusCode(204);
}
@Test
public void testDynamicClientSubscription() throws Exception {
when()
.get("/dynamic-subscription/" + url.toString())
.then()
.log().everything()
.statusCode(204);
}
@Test
@DisabledOnIntegrationTest(forArtifactTypes = DisabledOnIntegrationTest.ArtifactType.NATIVE_BINARY)
public void testDynamicClientAutowiredUrl() throws Exception {
when()
.get("/autowired-dynamic/")
.then()
.log().everything()
.statusCode(204);
}
@Test
public void testDynamicClientDirective() throws Exception {
when()
.get("/dynamic-directive/" + url.toString())
.then()
.log().everything()
.statusCode(204);
}
}
| DynamicClientTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/AbstractType.java | {
"start": 480,
"end": 3476
} | class ____ implements Type {
@Override
public boolean isAssociationType() {
return false;
}
@Override
public boolean isCollectionType() {
return false;
}
@Override
public boolean isComponentType() {
return false;
}
@Override
public boolean isEntityType() {
return false;
}
@Override @SuppressWarnings({"rawtypes", "unchecked"})
public int compare(Object x, Object y) {
return ( (Comparable) x ).compareTo(y);
}
@Override
public Serializable disassemble(Object value, SharedSessionContractImplementor session, Object owner)
throws HibernateException {
return value == null ? null : (Serializable) deepCopy( value, session.getFactory() );
}
@Override
public Serializable disassemble(Object value, SessionFactoryImplementor sessionFactory)
throws HibernateException {
return value == null ? null : (Serializable) deepCopy( value, sessionFactory );
}
@Override
public Object assemble(Serializable cached, SharedSessionContractImplementor session, Object owner)
throws HibernateException {
return cached == null ? null : deepCopy( cached, session.getFactory() );
}
@Override
public boolean isDirty(Object old, Object current, SharedSessionContractImplementor session)
throws HibernateException {
return !isSame( old, current );
}
@Override
public boolean isAnyType() {
return false;
}
@Override
public boolean isModified(Object old, Object current, boolean[] checkable, SharedSessionContractImplementor session)
throws HibernateException {
return isDirty( old, current, session );
}
@Override
public boolean isSame(Object x, Object y) throws HibernateException {
return isEqual(x, y );
}
@Override
public boolean isEqual(Object x, Object y) {
return Objects.equals( x, y );
}
@Override
public int getHashCode(Object x) {
return x.hashCode();
}
@Override
public boolean isEqual(Object x, Object y, SessionFactoryImplementor factory) {
return isEqual( x, y );
}
@Override
public int getHashCode(Object x, SessionFactoryImplementor factory) {
return getHashCode( x );
}
@Override
public Object replace(
Object original,
Object target,
SharedSessionContractImplementor session,
Object owner,
Map<Object, Object> copyCache,
ForeignKeyDirection foreignKeyDirection)
throws HibernateException {
return needsReplacement( foreignKeyDirection ) ? replace( original, target, session, owner, copyCache ) : target;
}
private boolean needsReplacement(ForeignKeyDirection foreignKeyDirection) {
// Collection and OneToOne are the only associations that could be TO_PARENT
if ( this instanceof CollectionType || this instanceof OneToOneType ) {
final var associationType = (AssociationType) this;
return associationType.getForeignKeyDirection() == foreignKeyDirection;
}
else {
return ForeignKeyDirection.FROM_PARENT == foreignKeyDirection;
}
}
@Override
public void beforeAssemble(Serializable cached, SharedSessionContractImplementor session) {}
}
| AbstractType |
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/test/java/org/springframework/boot/buildpack/platform/docker/type/ImageArchiveManifestTests.java | {
"start": 1022,
"end": 2328
} | class ____ extends AbstractJsonTests {
@Test
void getLayersReturnsLayers() {
String content = getContentAsString("image-archive-manifest.json");
ImageArchiveManifest manifest = getManifest(content);
List<String> expectedLayers = new ArrayList<>();
for (int blankLayersCount = 0; blankLayersCount < 46; blankLayersCount++) {
expectedLayers.add("blank_" + blankLayersCount);
}
expectedLayers.add("bb09e17fd1bd2ee47155f1349645fcd9fff31e1247c7ed99cad469f1c16a4216.tar");
assertThat(manifest.getEntries()).hasSize(1);
assertThat(manifest.getEntries().get(0).getLayers()).hasSize(47);
assertThat(manifest.getEntries().get(0).getLayers()).isEqualTo(expectedLayers);
}
@Test
void getLayersWithNoLayersReturnsEmptyList() {
String content = "[{\"Layers\": []}]";
ImageArchiveManifest manifest = getManifest(content);
assertThat(manifest.getEntries()).hasSize(1);
assertThat(manifest.getEntries().get(0).getLayers()).isEmpty();
}
@Test
void getLayersWithEmptyManifestReturnsEmptyList() {
String content = "[]";
ImageArchiveManifest manifest = getManifest(content);
assertThat(manifest.getEntries()).isEmpty();
}
private ImageArchiveManifest getManifest(String content) {
return new ImageArchiveManifest(getJsonMapper().readTree(content));
}
}
| ImageArchiveManifestTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/ManyToManyTreatJoinTest.java | {
"start": 10159,
"end": 10281
} | class ____ extends UnionBase {
public UnionSub2() {
}
public UnionSub2(Integer id) {
super( id );
}
}
}
| UnionSub2 |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeAttributesProvider.java | {
"start": 1033,
"end": 1194
} | class ____
extends AbstractNodeDescriptorsProvider<NodeAttribute> {
public NodeAttributesProvider(String name) {
super(name);
}
}
| NodeAttributesProvider |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java | {
"start": 2138,
"end": 6546
} | class ____ extends Configured {
private static final Logger LOG =
LoggerFactory.getLogger(PathOutputCommitterFactory.class);
/**
* Name of the configuration option used to configure the
* output committer factory to use unless there is a specific
* one for a schema.
*/
public static final String COMMITTER_FACTORY_CLASS =
"mapreduce.outputcommitter.factory.class";
/**
* Scheme prefix for per-filesystem scheme committers.
*/
public static final String COMMITTER_FACTORY_SCHEME =
"mapreduce.outputcommitter.factory.scheme";
/**
* String format pattern for per-filesystem scheme committers.
*/
public static final String COMMITTER_FACTORY_SCHEME_PATTERN =
COMMITTER_FACTORY_SCHEME + ".%s";
/**
* The {@link FileOutputCommitter} factory.
*/
public static final String FILE_COMMITTER_FACTORY =
"org.apache.hadoop.mapreduce.lib.output.FileOutputCommitterFactory";
/**
* The {@link FileOutputCommitter} factory.
*/
public static final String NAMED_COMMITTER_FACTORY =
"org.apache.hadoop.mapreduce.lib.output.NamedCommitterFactory";
/**
* The named output committer.
* Creates any committer listed in
*/
public static final String NAMED_COMMITTER_CLASS =
"mapreduce.outputcommitter.named.classname";
/**
* Default committer factory name: {@value}.
*/
public static final String COMMITTER_FACTORY_DEFAULT =
FILE_COMMITTER_FACTORY;
/**
* Create an output committer for a task attempt.
* @param outputPath output path. This may be null.
* @param context context
* @return a new committer
* @throws IOException problems instantiating the committer
*/
public PathOutputCommitter createOutputCommitter(
Path outputPath,
TaskAttemptContext context) throws IOException {
return createFileOutputCommitter(outputPath, context);
}
/**
* Create an instance of the default committer, a {@link FileOutputCommitter}
* for a task.
* @param outputPath the task's output path, or or null if no output path
* has been defined.
* @param context the task attempt context
* @return the committer to use
* @throws IOException problems instantiating the committer
*/
protected final PathOutputCommitter createFileOutputCommitter(
Path outputPath,
TaskAttemptContext context) throws IOException {
LOG.debug("Creating FileOutputCommitter for path {} and context {}",
outputPath, context);
return new FileOutputCommitter(outputPath, context);
}
/**
* Get the committer factory for a configuration.
* @param outputPath the job's output path. If null, it means that the
* schema is unknown and a per-schema factory cannot be determined.
* @param conf configuration
* @return an instantiated committer factory
*/
public static PathOutputCommitterFactory getCommitterFactory(
Path outputPath,
Configuration conf) {
// determine which key to look up the overall one or a schema-specific
// key
LOG.debug("Looking for committer factory for path {}", outputPath);
String key = COMMITTER_FACTORY_CLASS;
if (StringUtils.isEmpty(conf.getTrimmed(key)) && outputPath != null) {
// there is no explicit factory and there's an output path
// Get the scheme of the destination
String scheme = outputPath.toUri().getScheme();
// and see if it has a key
String schemeKey = String.format(COMMITTER_FACTORY_SCHEME_PATTERN,
scheme);
if (StringUtils.isNotEmpty(conf.getTrimmed(schemeKey))) {
// it does, so use that key in the classname lookup
LOG.info("Using schema-specific factory for {}", outputPath);
key = schemeKey;
} else {
LOG.debug("No scheme-specific factory defined in {}", schemeKey);
}
}
// create the factory. Before using Configuration.getClass, check
// for an empty configuration value, as that raises ClassNotFoundException.
Class<? extends PathOutputCommitterFactory> factory;
String trimmedValue = conf.getTrimmed(key, "");
if (StringUtils.isEmpty(trimmedValue)) {
// empty/null value, use default
LOG.info("No output committer factory defined,"
+ " defaulting to FileOutputCommitterFactory");
factory = FileOutputCommitterFactory.class;
} else {
// key is set, get the | PathOutputCommitterFactory |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/FlatpackEndpointBuilderFactory.java | {
"start": 1594,
"end": 26492
} | interface ____
extends
EndpointConsumerBuilder {
default AdvancedFlatpackEndpointConsumerBuilder advanced() {
return (AdvancedFlatpackEndpointConsumerBuilder) this;
}
/**
* Allows for lines to be shorter than expected and ignores the extra
* characters.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param allowShortLines the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder allowShortLines(boolean allowShortLines) {
doSetProperty("allowShortLines", allowShortLines);
return this;
}
/**
* Allows for lines to be shorter than expected and ignores the extra
* characters.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param allowShortLines the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder allowShortLines(String allowShortLines) {
doSetProperty("allowShortLines", allowShortLines);
return this;
}
/**
* The default character delimiter for delimited files.
*
* The option is a: <code>char</code> type.
*
* Default: ,
* Group: common
*
* @param delimiter the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder delimiter(char delimiter) {
doSetProperty("delimiter", delimiter);
return this;
}
/**
* The default character delimiter for delimited files.
*
* The option will be converted to a <code>char</code> type.
*
* Default: ,
* Group: common
*
* @param delimiter the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder delimiter(String delimiter) {
doSetProperty("delimiter", delimiter);
return this;
}
/**
* Allows for lines to be longer than expected and ignores the extra
* characters.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param ignoreExtraColumns the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder ignoreExtraColumns(boolean ignoreExtraColumns) {
doSetProperty("ignoreExtraColumns", ignoreExtraColumns);
return this;
}
/**
* Allows for lines to be longer than expected and ignores the extra
* characters.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param ignoreExtraColumns the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder ignoreExtraColumns(String ignoreExtraColumns) {
doSetProperty("ignoreExtraColumns", ignoreExtraColumns);
return this;
}
/**
* Whether the first line is ignored for delimited files (for the column
* headers).
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param ignoreFirstRecord the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder ignoreFirstRecord(boolean ignoreFirstRecord) {
doSetProperty("ignoreFirstRecord", ignoreFirstRecord);
return this;
}
/**
* Whether the first line is ignored for delimited files (for the column
* headers).
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param ignoreFirstRecord the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder ignoreFirstRecord(String ignoreFirstRecord) {
doSetProperty("ignoreFirstRecord", ignoreFirstRecord);
return this;
}
/**
* Sets the Component to send each row as a separate exchange once
* parsed.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param splitRows the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder splitRows(boolean splitRows) {
doSetProperty("splitRows", splitRows);
return this;
}
/**
* Sets the Component to send each row as a separate exchange once
* parsed.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param splitRows the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder splitRows(String splitRows) {
doSetProperty("splitRows", splitRows);
return this;
}
/**
* The text qualifier for delimited files.
*
* The option is a: <code>char</code> type.
*
* Group: common
*
* @param textQualifier the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder textQualifier(char textQualifier) {
doSetProperty("textQualifier", textQualifier);
return this;
}
/**
* The text qualifier for delimited files.
*
* The option will be converted to a <code>char</code> type.
*
* Group: common
*
* @param textQualifier the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder textQualifier(String textQualifier) {
doSetProperty("textQualifier", textQualifier);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param sendEmptyMessageWhenIdle the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder sendEmptyMessageWhenIdle(boolean sendEmptyMessageWhenIdle) {
doSetProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param sendEmptyMessageWhenIdle the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder sendEmptyMessageWhenIdle(String sendEmptyMessageWhenIdle) {
doSetProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffErrorThreshold the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder backoffErrorThreshold(int backoffErrorThreshold) {
doSetProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffErrorThreshold the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder backoffErrorThreshold(String backoffErrorThreshold) {
doSetProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffIdleThreshold the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder backoffIdleThreshold(int backoffIdleThreshold) {
doSetProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffIdleThreshold the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder backoffIdleThreshold(String backoffIdleThreshold) {
doSetProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffMultiplier the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder backoffMultiplier(int backoffMultiplier) {
doSetProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffMultiplier the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder backoffMultiplier(String backoffMultiplier) {
doSetProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* Milliseconds before the next poll.
*
* The option is a: <code>long</code> type.
*
* Default: 500
* Group: scheduler
*
* @param delay the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder delay(long delay) {
doSetProperty("delay", delay);
return this;
}
/**
* Milliseconds before the next poll.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 500
* Group: scheduler
*
* @param delay the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder delay(String delay) {
doSetProperty("delay", delay);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: scheduler
*
* @param greedy the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder greedy(boolean greedy) {
doSetProperty("greedy", greedy);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: scheduler
*
* @param greedy the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder greedy(String greedy) {
doSetProperty("greedy", greedy);
return this;
}
/**
* Milliseconds before the first poll starts.
*
* The option is a: <code>long</code> type.
*
* Default: 1000
* Group: scheduler
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder initialDelay(long initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Milliseconds before the first poll starts.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 1000
* Group: scheduler
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder initialDelay(String initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Specifies a maximum limit of number of fires. So if you set it to 1,
* the scheduler will only fire once. If you set it to 5, it will only
* fire five times. A value of zero or negative means fire forever.
*
* The option is a: <code>long</code> type.
*
* Default: 0
* Group: scheduler
*
* @param repeatCount the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder repeatCount(long repeatCount) {
doSetProperty("repeatCount", repeatCount);
return this;
}
/**
* Specifies a maximum limit of number of fires. So if you set it to 1,
* the scheduler will only fire once. If you set it to 5, it will only
* fire five times. A value of zero or negative means fire forever.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 0
* Group: scheduler
*
* @param repeatCount the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder repeatCount(String repeatCount) {
doSetProperty("repeatCount", repeatCount);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
*
* The option is a: <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: TRACE
* Group: scheduler
*
* @param runLoggingLevel the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder runLoggingLevel(org.apache.camel.LoggingLevel runLoggingLevel) {
doSetProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
*
* The option will be converted to a
* <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: TRACE
* Group: scheduler
*
* @param runLoggingLevel the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder runLoggingLevel(String runLoggingLevel) {
doSetProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
*
* The option is a:
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
*
* Group: scheduler
*
* @param scheduledExecutorService the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder scheduledExecutorService(ScheduledExecutorService scheduledExecutorService) {
doSetProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
*
* The option will be converted to a
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
*
* Group: scheduler
*
* @param scheduledExecutorService the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder scheduledExecutorService(String scheduledExecutorService) {
doSetProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz
* component. Use value spring or quartz for built in scheduler.
*
* The option is a: <code>java.lang.Object</code> type.
*
* Default: none
* Group: scheduler
*
* @param scheduler the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder scheduler(Object scheduler) {
doSetProperty("scheduler", scheduler);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz
* component. Use value spring or quartz for built in scheduler.
*
* The option will be converted to a <code>java.lang.Object</code> type.
*
* Default: none
* Group: scheduler
*
* @param scheduler the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder scheduler(String scheduler) {
doSetProperty("scheduler", scheduler);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz, Spring based scheduler. This is a multi-value
* option with prefix: scheduler.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* schedulerProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: scheduler
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder schedulerProperties(String key, Object value) {
doSetMultiValueProperty("schedulerProperties", "scheduler." + key, value);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz, Spring based scheduler. This is a multi-value
* option with prefix: scheduler.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* schedulerProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: scheduler
*
* @param values the values
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder schedulerProperties(Map values) {
doSetMultiValueProperties("schedulerProperties", "scheduler.", values);
return this;
}
/**
* Whether the scheduler should be auto started.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param startScheduler the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder startScheduler(boolean startScheduler) {
doSetProperty("startScheduler", startScheduler);
return this;
}
/**
* Whether the scheduler should be auto started.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param startScheduler the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder startScheduler(String startScheduler) {
doSetProperty("startScheduler", startScheduler);
return this;
}
/**
* Time unit for initialDelay and delay options.
*
* The option is a: <code>java.util.concurrent.TimeUnit</code> type.
*
* Default: MILLISECONDS
* Group: scheduler
*
* @param timeUnit the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder timeUnit(TimeUnit timeUnit) {
doSetProperty("timeUnit", timeUnit);
return this;
}
/**
* Time unit for initialDelay and delay options.
*
* The option will be converted to a
* <code>java.util.concurrent.TimeUnit</code> type.
*
* Default: MILLISECONDS
* Group: scheduler
*
* @param timeUnit the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder timeUnit(String timeUnit) {
doSetProperty("timeUnit", timeUnit);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param useFixedDelay the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder useFixedDelay(boolean useFixedDelay) {
doSetProperty("useFixedDelay", useFixedDelay);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param useFixedDelay the value to set
* @return the dsl builder
*/
default FlatpackEndpointConsumerBuilder useFixedDelay(String useFixedDelay) {
doSetProperty("useFixedDelay", useFixedDelay);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the Flatpack component.
*/
public | FlatpackEndpointConsumerBuilder |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/type/TypeFactoryWithRecursiveTypesTest.java | {
"start": 468,
"end": 503
} | interface ____<T> { }
static | IFace |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/scenarios/FluxTests.java | {
"start": 35060,
"end": 35179
} | class ____ extends Sample {
public OuterSample(Point point) {
super(point);
}
}
private static final | OuterSample |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/howto/dataaccess/usemultipleentitymanagers/MyAdditionalEntityManagerFactoryConfiguration.java | {
"start": 1458,
"end": 2966
} | class ____ {
@Qualifier("second")
@Bean(defaultCandidate = false)
@ConfigurationProperties("app.jpa")
public JpaProperties secondJpaProperties() {
return new JpaProperties();
}
@Qualifier("second")
@Bean(defaultCandidate = false)
public LocalContainerEntityManagerFactoryBean secondEntityManagerFactory(@Qualifier("second") DataSource dataSource,
@Qualifier("second") JpaProperties jpaProperties) {
EntityManagerFactoryBuilder builder = createEntityManagerFactoryBuilder(jpaProperties);
return builder.dataSource(dataSource).packages(Order.class).persistenceUnit("second").build();
}
private EntityManagerFactoryBuilder createEntityManagerFactoryBuilder(JpaProperties jpaProperties) {
JpaVendorAdapter jpaVendorAdapter = createJpaVendorAdapter(jpaProperties);
Function<DataSource, Map<String, ?>> jpaPropertiesFactory = (dataSource) -> createJpaProperties(dataSource,
jpaProperties.getProperties());
return new EntityManagerFactoryBuilder(jpaVendorAdapter, jpaPropertiesFactory, null);
}
private JpaVendorAdapter createJpaVendorAdapter(JpaProperties jpaProperties) {
// ... map JPA properties as needed
return new HibernateJpaVendorAdapter();
}
private Map<String, ?> createJpaProperties(DataSource dataSource, Map<String, ?> existingProperties) {
Map<String, ?> jpaProperties = new LinkedHashMap<>(existingProperties);
// ... map JPA properties that require the DataSource (e.g. DDL flags)
return jpaProperties;
}
}
| MyAdditionalEntityManagerFactoryConfiguration |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/jdk/MapDeser2757Test.java | {
"start": 489,
"end": 1231
} | class ____ extends LinkedHashMap<String, String> {
public MyMap() { }
public void setValue(StringWrapper w) { }
public void setValue(IntWrapper w) { }
public long getValue() { return 0L; }
}
// [databind#2757]: should allow deserialization as Map despite conflicting setters
@Test
public void testCanDeserializeMap() throws Exception
{
final ObjectMapper mapper = jsonMapperBuilder()
.build();
MyMap input = new MyMap();
input.put("a", "b");
final String json = mapper.writeValueAsString(input);
MyMap x = mapper.readValue(json, MyMap.class);
assertEquals(1, x.size());
assertEquals("b", input.get("a"));
}
}
| MyMap |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/EmptyIOStatisticsStore.java | {
"start": 1200,
"end": 3926
} | class ____ implements IOStatisticsStore {
/**
* The sole instance of this class.
*/
private static final EmptyIOStatisticsStore INSTANCE =
new EmptyIOStatisticsStore();
/**
* Get the single instance of this class.
* @return a shared, empty instance.
*/
static IOStatisticsStore getInstance() {
return INSTANCE;
}
private EmptyIOStatisticsStore() {
}
@Override
public Map<String, Long> counters() {
return emptyMap();
}
@Override
public Map<String, Long> gauges() {
return emptyMap();
}
@Override
public Map<String, Long> minimums() {
return emptyMap();
}
@Override
public Map<String, Long> maximums() {
return emptyMap();
}
@Override
public Map<String, MeanStatistic> meanStatistics() {
return emptyMap();
}
@Override
public boolean aggregate(@Nullable final IOStatistics statistics) {
return false;
}
@Override
public long incrementCounter(final String key, final long value) {
return 0;
}
@Override
public void setCounter(final String key, final long value) {
}
@Override
public void setGauge(final String key, final long value) {
}
@Override
public long incrementGauge(final String key, final long value) {
return 0;
}
@Override
public void setMaximum(final String key, final long value) {
}
@Override
public long incrementMaximum(final String key, final long value) {
return 0;
}
@Override
public void setMinimum(final String key, final long value) {
}
@Override
public long incrementMinimum(final String key, final long value) {
return 0;
}
@Override
public void addMinimumSample(final String key, final long value) {
}
@Override
public void addMaximumSample(final String key, final long value) {
}
@Override
public void setMeanStatistic(final String key, final MeanStatistic value) {
}
@Override
public void addMeanStatisticSample(final String key, final long value) {
}
@Override
public void reset() {
}
@Override
public AtomicLong getCounterReference(final String key) {
return null;
}
@Override
public AtomicLong getMaximumReference(final String key) {
return null;
}
@Override
public AtomicLong getMinimumReference(final String key) {
return null;
}
@Override
public AtomicLong getGaugeReference(final String key) {
return null;
}
@Override
public MeanStatistic getMeanStatistic(final String key) {
return null;
}
@Override
public void addTimedOperation(final String prefix,
final long durationMillis) {
}
@Override
public void addTimedOperation(final String prefix, final Duration duration) {
}
}
| EmptyIOStatisticsStore |
java | alibaba__nacos | plugin/datasource/src/main/java/com/alibaba/nacos/plugin/datasource/impl/derby/ConfigInfoTagsRelationMapperByDerby.java | {
"start": 1332,
"end": 5959
} | class ____ extends AbstractMapperByDerby implements ConfigTagsRelationMapper {
@Override
public MapperResult findConfigInfo4PageFetchRows(MapperContext context) {
final String appName = (String) context.getWhereParameter(FieldConstant.APP_NAME);
final String dataId = (String) context.getWhereParameter(FieldConstant.DATA_ID);
final String group = (String) context.getWhereParameter(FieldConstant.GROUP_ID);
final String content = (String) context.getWhereParameter(FieldConstant.CONTENT);
final String tenantId = (String) context.getWhereParameter(FieldConstant.TENANT_ID);
final String[] tagArr = (String[]) context.getWhereParameter(FieldConstant.TAG_ARR);
List<Object> paramList = new ArrayList<>();
StringBuilder where = new StringBuilder(" WHERE ");
// 增强 SELECT 子句,包含 desc 字段,但不包含 configTags(Derby 不支持 GROUP_CONCAT)
final String baseSql =
"SELECT a.id,a.data_id,a.group_id,a.tenant_id,a.app_name,a.content,a.md5,a.type,a.encrypted_data_key,a.c_desc "
+ "FROM config_info a LEFT JOIN "
+ "config_tags_relation b ON a.id=b.id";
where.append(" a.tenant_id=? ");
paramList.add(tenantId);
if (StringUtils.isNotBlank(dataId)) {
where.append(" AND a.data_id=? ");
paramList.add(dataId);
}
if (StringUtils.isNotBlank(group)) {
where.append(" AND a.group_id=? ");
paramList.add(group);
}
if (StringUtils.isNotBlank(appName)) {
where.append(" AND a.app_name=? ");
paramList.add(appName);
}
if (!StringUtils.isBlank(content)) {
where.append(" AND a.content LIKE ? ");
paramList.add(content);
}
where.append(" AND b.tag_name IN (");
for (int i = 0; i < tagArr.length; i++) {
if (i != 0) {
where.append(", ");
}
where.append('?');
paramList.add(tagArr[i]);
}
where.append(") ");
String sql = baseSql + where + " OFFSET " + context.getStartRow() + " ROWS FETCH NEXT " + context.getPageSize()
+ " ROWS ONLY";
return new MapperResult(sql, paramList);
}
@Override
public MapperResult findConfigInfoLike4PageFetchRows(MapperContext context) {
final String appName = (String) context.getWhereParameter(FieldConstant.APP_NAME);
final String dataId = (String) context.getWhereParameter(FieldConstant.DATA_ID);
final String group = (String) context.getWhereParameter(FieldConstant.GROUP_ID);
final String content = (String) context.getWhereParameter(FieldConstant.CONTENT);
final String tenantId = (String) context.getWhereParameter(FieldConstant.TENANT_ID);
final String[] tagArr = (String[]) context.getWhereParameter(FieldConstant.TAG_ARR);
final String[] types = (String[]) context.getWhereParameter(FieldConstant.TYPE);
// 增强 SELECT 子句,包含 desc 字段,但不包含 configTags(Derby 不支持 GROUP_CONCAT)
WhereBuilder where = new WhereBuilder(
"SELECT a.ID,a.data_id,a.group_id,a.tenant_id,a.app_name,a.content,a.md5,a.encrypted_data_key,a.type,a.c_desc "
+ "FROM config_info a LEFT JOIN "
+ "config_tags_relation b ON a.id=b.id");
where.like("a.tenant_id", tenantId);
if (StringUtils.isNotBlank(dataId)) {
where.and().like("a.data_id", dataId);
}
if (StringUtils.isNotBlank(group)) {
where.and().like("a.group_id", group);
}
if (StringUtils.isNotBlank(appName)) {
where.and().eq("a.app_name", appName);
}
if (StringUtils.isNotBlank(content)) {
where.and().like("a.content", content);
}
if (!ArrayUtils.isEmpty(tagArr)) {
where.and().startParentheses();
for (int i = 0; i < tagArr.length; i++) {
if (i != 0) {
where.or();
}
where.like("b.tag_name", tagArr[i]);
}
where.endParentheses();
}
if (!ArrayUtils.isEmpty(types)) {
where.and().in("a.type", types);
}
where.offset(context.getStartRow(), context.getPageSize());
return where.build();
}
@Override
public String getDataSource() {
return DataSourceConstant.DERBY;
}
}
| ConfigInfoTagsRelationMapperByDerby |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java | {
"start": 1104,
"end": 1777
} | class ____ extends BinaryOptionalNumericFunction implements OptionalArgument {
public Round(Source source, Expression left, Expression right) {
super(source, left, right);
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, Round::new, left(), right());
}
@Override
protected BinaryOptionalMathOperation operation() {
return BinaryOptionalMathOperation.ROUND;
}
@Override
protected final Expression replacedChildrenInstance(List<Expression> newChildren) {
return new Round(source(), newChildren.get(0), right() == null ? null : newChildren.get(1));
}
}
| Round |
java | spring-projects__spring-boot | module/spring-boot-actuator-autoconfigure/src/main/java/org/springframework/boot/actuate/autoconfigure/env/EnvironmentEndpointAutoConfiguration.java | {
"start": 1953,
"end": 2759
} | class ____ {
@Bean
@ConditionalOnMissingBean
EnvironmentEndpoint environmentEndpoint(Environment environment, EnvironmentEndpointProperties properties,
ObjectProvider<SanitizingFunction> sanitizingFunctions) {
return new EnvironmentEndpoint(environment, sanitizingFunctions.orderedStream().toList(),
properties.getShowValues());
}
@Bean
@ConditionalOnMissingBean
@ConditionalOnBean(EnvironmentEndpoint.class)
@ConditionalOnAvailableEndpoint(exposure = EndpointExposure.WEB)
EnvironmentEndpointWebExtension environmentEndpointWebExtension(EnvironmentEndpoint environmentEndpoint,
EnvironmentEndpointProperties properties) {
return new EnvironmentEndpointWebExtension(environmentEndpoint, properties.getShowValues(),
properties.getRoles());
}
}
| EnvironmentEndpointAutoConfiguration |
java | apache__flink | flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/restore/RocksDBIncrementalRestoreOperation.java | {
"start": 4527,
"end": 44848
} | class ____<K> implements RocksDBRestoreOperation {
private static final Logger logger =
LoggerFactory.getLogger(RocksDBIncrementalRestoreOperation.class);
@SuppressWarnings("unchecked")
private static final Class<? extends IncrementalKeyedStateHandle>[]
EXPECTED_STATE_HANDLE_CLASSES =
new Class[] {
IncrementalRemoteKeyedStateHandle.class,
IncrementalLocalKeyedStateHandle.class
};
private final String operatorIdentifier;
private final SortedMap<Long, Collection<HandleAndLocalPath>> restoredSstFiles;
private final RocksDBHandle rocksHandle;
private final Collection<IncrementalKeyedStateHandle> restoreStateHandles;
/**
* This registry will be closed after restore and should only contain Closeables that are closed
* by the end of the restore operation.
*/
private final CloseableRegistry cancelStreamRegistryForRestore;
/**
* This registry will only be closed when the created backend is closed and should be used for
* all Closeables that are closed at some later point after restore.
*/
private final CloseableRegistry cancelRegistryForBackend;
private final KeyGroupRange keyGroupRange;
private final File instanceBasePath;
private final int numberOfTransferringThreads;
private final int keyGroupPrefixBytes;
private final StateSerializerProvider<K> keySerializerProvider;
private final ClassLoader userCodeClassLoader;
private final CustomInitializationMetrics customInitializationMetrics;
private final ResourceGuard dbResourceGuard;
private long lastCompletedCheckpointId;
private UUID backendUID;
private final long writeBatchSize;
private final double overlapFractionThreshold;
private boolean isKeySerializerCompatibilityChecked;
private final boolean useIngestDbRestoreMode;
private final boolean asyncCompactAfterRescale;
private final boolean useDeleteFilesInRange;
private final ExecutorService ioExecutor;
private final AsyncExceptionHandler asyncExceptionHandler;
public RocksDBIncrementalRestoreOperation(
String operatorIdentifier,
KeyGroupRange keyGroupRange,
int keyGroupPrefixBytes,
int numberOfTransferringThreads,
ResourceGuard dbResourceGuard,
CloseableRegistry cancelStreamRegistryForRestore,
CloseableRegistry cancelRegistryForBackend,
ClassLoader userCodeClassLoader,
Map<String, RocksDbKvStateInfo> kvStateInformation,
StateSerializerProvider<K> keySerializerProvider,
File instanceBasePath,
File instanceRocksDBPath,
DBOptions dbOptions,
Function<String, ColumnFamilyOptions> columnFamilyOptionsFactory,
RocksDBNativeMetricOptions nativeMetricOptions,
MetricGroup metricGroup,
CustomInitializationMetrics customInitializationMetrics,
@Nonnull Collection<IncrementalKeyedStateHandle> restoreStateHandles,
@Nonnull RocksDbTtlCompactFiltersManager ttlCompactFiltersManager,
@Nonnegative long writeBatchSize,
Long writeBufferManagerCapacity,
double overlapFractionThreshold,
boolean useIngestDbRestoreMode,
boolean asyncCompactAfterRescale,
boolean useDeleteFilesInRange,
ExecutorService ioExecutor,
AsyncExceptionHandler asyncExceptionHandler) {
this.rocksHandle =
new RocksDBHandle(
kvStateInformation,
instanceRocksDBPath,
dbOptions,
columnFamilyOptionsFactory,
nativeMetricOptions,
metricGroup,
ttlCompactFiltersManager,
writeBufferManagerCapacity);
this.operatorIdentifier = operatorIdentifier;
this.restoredSstFiles = new TreeMap<>();
this.lastCompletedCheckpointId = -1L;
this.backendUID = UUID.randomUUID();
this.writeBatchSize = writeBatchSize;
this.overlapFractionThreshold = overlapFractionThreshold;
this.customInitializationMetrics = customInitializationMetrics;
this.restoreStateHandles = restoreStateHandles;
this.dbResourceGuard = dbResourceGuard;
this.cancelStreamRegistryForRestore = cancelStreamRegistryForRestore;
this.cancelRegistryForBackend = cancelRegistryForBackend;
this.keyGroupRange = keyGroupRange;
this.instanceBasePath = instanceBasePath;
this.numberOfTransferringThreads = numberOfTransferringThreads;
this.keyGroupPrefixBytes = keyGroupPrefixBytes;
this.keySerializerProvider = keySerializerProvider;
this.userCodeClassLoader = userCodeClassLoader;
this.useIngestDbRestoreMode = useIngestDbRestoreMode;
this.asyncCompactAfterRescale = asyncCompactAfterRescale;
this.useDeleteFilesInRange = useDeleteFilesInRange;
this.ioExecutor = ioExecutor;
this.asyncExceptionHandler = asyncExceptionHandler;
}
/**
* Root method that branches for different implementations of {@link
* IncrementalKeyedStateHandle}.
*/
@Override
public RocksDBRestoreResult restore() throws Exception {
if (restoreStateHandles == null || restoreStateHandles.isEmpty()) {
return null;
}
logger.info(
"Starting RocksDB incremental recovery in operator {} "
+ "target key-group range {}. Use IngestDB={}, Use AsyncCompaction={}, State Handles={}",
operatorIdentifier,
keyGroupRange.prettyPrintInterval(),
useIngestDbRestoreMode,
asyncCompactAfterRescale,
restoreStateHandles);
final List<StateHandleDownloadSpec> allDownloadSpecs =
new ArrayList<>(restoreStateHandles.size());
final List<IncrementalLocalKeyedStateHandle> localKeyedStateHandles =
new ArrayList<>(restoreStateHandles.size());
final Path absolutInstanceBasePath = instanceBasePath.getAbsoluteFile().toPath();
try {
runAndReportDuration(
() ->
makeAllStateHandlesLocal(
absolutInstanceBasePath,
localKeyedStateHandles,
allDownloadSpecs),
DOWNLOAD_STATE_DURATION);
runAndReportDuration(
() -> restoreFromLocalState(localKeyedStateHandles), RESTORE_STATE_DURATION);
logger.info(
"Finished RocksDB incremental recovery in operator {} with "
+ "target key-group range range {}.",
operatorIdentifier,
keyGroupRange.prettyPrintInterval());
return new RocksDBRestoreResult(
this.rocksHandle.getDb(),
this.rocksHandle.getDefaultColumnFamilyHandle(),
this.rocksHandle.getNativeMetricMonitor(),
lastCompletedCheckpointId,
backendUID,
restoredSstFiles,
createAsyncCompactionTask());
} finally {
// Cleanup all download directories
allDownloadSpecs.stream()
.map(StateHandleDownloadSpec::getDownloadDestination)
.forEach(this::cleanUpPathQuietly);
}
}
@Nullable
private Runnable createAsyncCompactionTask() {
if (!asyncCompactAfterRescale) {
return null;
}
return () -> {
long t = System.currentTimeMillis();
logger.info(
"Starting async compaction after restore for backend {} in operator {}",
backendUID,
operatorIdentifier);
try {
RunnableWithException asyncRangeCompactionTask =
RocksDBIncrementalCheckpointUtils.createAsyncRangeCompactionTask(
rocksHandle.getDb(),
rocksHandle.getColumnFamilyHandles(),
keyGroupPrefixBytes,
keyGroupRange,
dbResourceGuard,
// This task will be owned by the backend's lifecycle because it
// continues to exist after restore is completed.
cancelRegistryForBackend);
runAndReportDuration(asyncRangeCompactionTask, RESTORE_ASYNC_COMPACTION_DURATION);
logger.info(
"Completed async compaction after restore for backend {} in operator {} after {} ms.",
backendUID,
operatorIdentifier,
System.currentTimeMillis() - t);
} catch (Throwable throwable) {
asyncExceptionHandler.handleAsyncException(
String.format(
"Failed async compaction after restore for backend {} in operator {} after {} ms.",
backendUID,
operatorIdentifier,
System.currentTimeMillis() - t),
throwable);
}
};
}
private void restoreFromLocalState(
List<IncrementalLocalKeyedStateHandle> localKeyedStateHandles) throws Exception {
if (localKeyedStateHandles.size() == 1) {
// This happens if we don't rescale and for some scale out scenarios.
initBaseDBFromSingleStateHandle(localKeyedStateHandles.get(0));
} else {
// This happens for all scale ins and some scale outs.
restoreFromMultipleStateHandles(localKeyedStateHandles);
}
}
/**
* Downloads and converts all {@link IncrementalRemoteKeyedStateHandle}s to {@link
* IncrementalLocalKeyedStateHandle}s.
*
* @param absolutInstanceBasePath the base path of the restoring DB instance as absolute path.
* @param localKeyedStateHandlesOut the output parameter for the created {@link
* IncrementalLocalKeyedStateHandle}s.
* @param allDownloadSpecsOut output parameter for the created download specs.
* @throws Exception if an unexpected state handle type is passed as argument.
*/
private void makeAllStateHandlesLocal(
Path absolutInstanceBasePath,
List<IncrementalLocalKeyedStateHandle> localKeyedStateHandlesOut,
List<StateHandleDownloadSpec> allDownloadSpecsOut)
throws Exception {
// Prepare and collect all the download request to pull remote state to a local directory
for (IncrementalKeyedStateHandle stateHandle : restoreStateHandles) {
if (stateHandle instanceof IncrementalRemoteKeyedStateHandle) {
StateHandleDownloadSpec downloadRequest =
new StateHandleDownloadSpec(
(IncrementalRemoteKeyedStateHandle) stateHandle,
absolutInstanceBasePath.resolve(UUID.randomUUID().toString()));
allDownloadSpecsOut.add(downloadRequest);
} else if (stateHandle instanceof IncrementalLocalKeyedStateHandle) {
localKeyedStateHandlesOut.add((IncrementalLocalKeyedStateHandle) stateHandle);
} else {
throw unexpectedStateHandleException(
EXPECTED_STATE_HANDLE_CLASSES, stateHandle.getClass());
}
}
allDownloadSpecsOut.stream()
.map(StateHandleDownloadSpec::createLocalStateHandleForDownloadedState)
.forEach(localKeyedStateHandlesOut::add);
transferRemoteStateToLocalDirectory(allDownloadSpecsOut);
}
/**
* Initializes the base DB that we restore from a single local state handle.
*
* @param stateHandle the state handle to restore the base DB from.
* @throws Exception on any error during restore.
*/
private void initBaseDBFromSingleStateHandle(IncrementalLocalKeyedStateHandle stateHandle)
throws Exception {
logger.info(
"Starting opening base RocksDB instance in operator {} with target key-group range {} from state handle {}.",
operatorIdentifier,
keyGroupRange.prettyPrintInterval(),
stateHandle);
// Restore base DB from selected initial handle
restoreBaseDBFromLocalState(stateHandle);
KeyGroupRange stateHandleKeyGroupRange = stateHandle.getKeyGroupRange();
// Check if the key-groups range has changed.
if (Objects.equals(stateHandleKeyGroupRange, keyGroupRange)) {
// This is the case if we didn't rescale, so we can restore all the info from the
// previous backend instance (backend id and incremental checkpoint history).
restorePreviousIncrementalFilesStatus(stateHandle);
} else {
// If the key-groups don't match, this was a scale out, and we need to clip the
// key-groups range of the db to the target range for this backend.
try {
RocksDBIncrementalCheckpointUtils.clipDBWithKeyGroupRange(
this.rocksHandle.getDb(),
this.rocksHandle.getColumnFamilyHandles(),
keyGroupRange,
stateHandleKeyGroupRange,
keyGroupPrefixBytes,
useDeleteFilesInRange);
} catch (RocksDBException e) {
String errMsg = "Failed to clip DB after initialization.";
logger.error(errMsg, e);
throw new BackendBuildingException(errMsg, e);
}
}
logger.info(
"Finished opening base RocksDB instance in operator {} with target key-group range {}.",
operatorIdentifier,
keyGroupRange.prettyPrintInterval());
}
/**
* Initializes the base DB that we restore from a list of multiple local state handles.
*
* @param localKeyedStateHandles the list of state handles to restore the base DB from.
* @throws Exception on any error during restore.
*/
private void restoreFromMultipleStateHandles(
List<IncrementalLocalKeyedStateHandle> localKeyedStateHandles) throws Exception {
logger.info(
"Starting to restore backend with range {} in operator {} from multiple state handles {} with useIngestDbRestoreMode = {}.",
keyGroupRange.prettyPrintInterval(),
operatorIdentifier,
localKeyedStateHandles,
useIngestDbRestoreMode);
byte[] startKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
CompositeKeySerializationUtils.serializeKeyGroup(
keyGroupRange.getStartKeyGroup(), startKeyGroupPrefixBytes);
byte[] stopKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
CompositeKeySerializationUtils.serializeKeyGroup(
keyGroupRange.getEndKeyGroup() + 1, stopKeyGroupPrefixBytes);
if (useIngestDbRestoreMode) {
// Optimized path for merging multiple handles with Ingest/Clip
mergeStateHandlesWithClipAndIngest(
localKeyedStateHandles, startKeyGroupPrefixBytes, stopKeyGroupPrefixBytes);
} else {
// Optimized path for single handle and legacy path for merging multiple handles.
mergeStateHandlesWithCopyFromTemporaryInstance(
localKeyedStateHandles, startKeyGroupPrefixBytes, stopKeyGroupPrefixBytes);
}
logger.info(
"Completed restoring backend with range {} in operator {} from multiple state handles with useIngestDbRestoreMode = {}.",
keyGroupRange.prettyPrintInterval(),
operatorIdentifier,
useIngestDbRestoreMode);
}
/**
* Restores the base DB by merging multiple state handles into one. This method first checks if
* all data to import is in the expected key-groups range and then uses import/export.
* Otherwise, this method falls back to copying the data using a temporary DB.
*
* @param localKeyedStateHandles the list of state handles to restore the base DB from.
* @param startKeyGroupPrefixBytes the min/start key of the key groups range as bytes.
* @param stopKeyGroupPrefixBytes the max+1/end key of the key groups range as bytes.
* @throws Exception on any restore error.
*/
private void mergeStateHandlesWithClipAndIngest(
List<IncrementalLocalKeyedStateHandle> localKeyedStateHandles,
byte[] startKeyGroupPrefixBytes,
byte[] stopKeyGroupPrefixBytes)
throws Exception {
final Path absolutInstanceBasePath = instanceBasePath.getAbsoluteFile().toPath();
final Path exportCfBasePath = absolutInstanceBasePath.resolve("export-cfs");
Files.createDirectories(exportCfBasePath);
final Map<RegisteredStateMetaInfoBase.Key, List<ExportImportFilesMetaData>>
exportedColumnFamilyMetaData = new HashMap<>(localKeyedStateHandles.size());
final List<IncrementalLocalKeyedStateHandle> notImportableHandles =
new ArrayList<>(localKeyedStateHandles.size());
try {
KeyGroupRange exportedSstKeyGroupsRange =
exportColumnFamiliesWithSstDataInKeyGroupsRange(
exportCfBasePath,
localKeyedStateHandles,
exportedColumnFamilyMetaData,
notImportableHandles);
if (exportedColumnFamilyMetaData.isEmpty()) {
// Nothing could be exported, so we fall back to
// #mergeStateHandlesWithCopyFromTemporaryInstance
mergeStateHandlesWithCopyFromTemporaryInstance(
notImportableHandles, startKeyGroupPrefixBytes, stopKeyGroupPrefixBytes);
} else {
// We initialize the base DB by importing all the exported data.
initBaseDBFromColumnFamilyImports(
exportedColumnFamilyMetaData, exportedSstKeyGroupsRange);
// Copy data from handles that we couldn't directly import using temporary
// instances.
copyToBaseDBUsingTempDBs(
notImportableHandles, startKeyGroupPrefixBytes, stopKeyGroupPrefixBytes);
}
} finally {
// Close native RocksDB objects
exportedColumnFamilyMetaData.values().forEach(IOUtils::closeAllQuietly);
// Cleanup export base directory
cleanUpPathQuietly(exportCfBasePath);
}
}
/**
* Prepares the data for importing by exporting from temporary RocksDB instances. We can only
* import data that does not exceed the target key-groups range and skip state handles that
* exceed their range.
*
* @param exportCfBasePath the base path for the export files.
* @param localKeyedStateHandles the state handles to prepare for import.
* @param exportedColumnFamiliesOut output parameter for the metadata of completed exports.
* @param skipped output parameter for state handles that could not be exported because the data
* exceeded the proclaimed range.
* @return the total key-groups range of the exported data.
* @throws Exception on any export error.
*/
private KeyGroupRange exportColumnFamiliesWithSstDataInKeyGroupsRange(
Path exportCfBasePath,
List<IncrementalLocalKeyedStateHandle> localKeyedStateHandles,
Map<RegisteredStateMetaInfoBase.Key, List<ExportImportFilesMetaData>>
exportedColumnFamiliesOut,
List<IncrementalLocalKeyedStateHandle> skipped)
throws Exception {
logger.info(
"Starting restore export for backend with range {} in operator {}.",
keyGroupRange.prettyPrintInterval(),
operatorIdentifier);
int minExportKeyGroup = Integer.MAX_VALUE;
int maxExportKeyGroup = Integer.MIN_VALUE;
int index = 0;
for (IncrementalLocalKeyedStateHandle stateHandle : localKeyedStateHandles) {
KeyedBackendSerializationProxy<K> serializationProxy =
readMetaData(stateHandle.getMetaDataStateHandle());
List<StateMetaInfoSnapshot> stateMetaInfoSnapshots =
serializationProxy.getStateMetaInfoSnapshots();
// Use Helper to encapsulate single stateHandle processing
try (DistributeStateHandlerHelper helper =
new DistributeStateHandlerHelper(
stateHandle,
stateMetaInfoSnapshots,
rocksHandle.getColumnFamilyOptionsFactory(),
rocksHandle.getDbOptions(),
rocksHandle.getTtlCompactFiltersManager(),
rocksHandle.getWriteBufferManagerCapacity(),
keyGroupPrefixBytes,
keyGroupRange,
operatorIdentifier,
index)) {
Either<KeyGroupRange, IncrementalLocalKeyedStateHandle> result =
helper.tryDistribute(exportCfBasePath, exportedColumnFamiliesOut);
// Handle the result and collect skipped handles
if (result.isLeft()) {
KeyGroupRange exportedRange = result.left();
minExportKeyGroup =
Math.min(minExportKeyGroup, exportedRange.getStartKeyGroup());
maxExportKeyGroup = Math.max(maxExportKeyGroup, exportedRange.getEndKeyGroup());
} else {
skipped.add(result.right());
}
}
++index;
}
KeyGroupRange exportedKeyGroupsRange =
minExportKeyGroup <= maxExportKeyGroup
? new KeyGroupRange(minExportKeyGroup, maxExportKeyGroup)
: KeyGroupRange.EMPTY_KEY_GROUP_RANGE;
logger.info(
"Completed restore export for backend with range {} in operator {}. {} exported handles with overall exported range {}. {} Skipped handles: {}.",
keyGroupRange.prettyPrintInterval(),
operatorIdentifier,
localKeyedStateHandles.size() - skipped.size(),
exportedKeyGroupsRange.prettyPrintInterval(),
skipped.size(),
skipped);
return exportedKeyGroupsRange;
}
/**
* Helper method that merges the data from multiple state handles into the restoring base DB by
* the help of copying through temporary RocksDB instances.
*
* @param localKeyedStateHandles the state handles to merge into the base DB.
* @param startKeyGroupPrefixBytes the min/start key of the key groups range as bytes.
* @param stopKeyGroupPrefixBytes the max+1/end key of the key groups range as bytes.
* @throws Exception on any merge error.
*/
private void mergeStateHandlesWithCopyFromTemporaryInstance(
List<IncrementalLocalKeyedStateHandle> localKeyedStateHandles,
byte[] startKeyGroupPrefixBytes,
byte[] stopKeyGroupPrefixBytes)
throws Exception {
logger.info(
"Starting to merge state for backend with range {} in operator {} from multiple state handles using temporary instances.",
keyGroupRange.prettyPrintInterval(),
operatorIdentifier);
// Choose the best state handle for the initial DB and remove it from the list
final IncrementalLocalKeyedStateHandle selectedInitialHandle =
localKeyedStateHandles.remove(
RocksDBIncrementalCheckpointUtils.findTheBestStateHandleForInitial(
localKeyedStateHandles, keyGroupRange, overlapFractionThreshold));
Preconditions.checkNotNull(selectedInitialHandle);
// Init the base DB instance with the initial state
initBaseDBFromSingleStateHandle(selectedInitialHandle);
// Copy remaining handles using temporary RocksDB instances
copyToBaseDBUsingTempDBs(
localKeyedStateHandles, startKeyGroupPrefixBytes, stopKeyGroupPrefixBytes);
logger.info(
"Completed merging state for backend with range {} in operator {} from multiple state handles using temporary instances.",
keyGroupRange.prettyPrintInterval(),
operatorIdentifier);
}
/**
* Initializes the base DB by importing from previously exported data.
*
* @param exportedColumnFamilyMetaData the export (meta) data.
* @param exportKeyGroupRange the total key-groups range of the exported data.
* @throws Exception on import error.
*/
private void initBaseDBFromColumnFamilyImports(
Map<RegisteredStateMetaInfoBase.Key, List<ExportImportFilesMetaData>>
exportedColumnFamilyMetaData,
KeyGroupRange exportKeyGroupRange)
throws Exception {
// We initialize the base DB by importing all the exported data.
logger.info(
"Starting to import exported state handles for backend with range {} in operator {} using Clip/Ingest DB with exported range {}.",
keyGroupRange.prettyPrintInterval(),
operatorIdentifier,
exportKeyGroupRange.prettyPrintInterval());
rocksHandle.openDB();
for (Map.Entry<RegisteredStateMetaInfoBase.Key, List<ExportImportFilesMetaData>> entry :
exportedColumnFamilyMetaData.entrySet()) {
rocksHandle.registerStateColumnFamilyHandleWithImport(
entry.getKey(), entry.getValue(), cancelStreamRegistryForRestore);
}
// Use Range delete to clip the temp db to the target range of the backend
RocksDBIncrementalCheckpointUtils.clipDBWithKeyGroupRange(
rocksHandle.getDb(),
rocksHandle.getColumnFamilyHandles(),
keyGroupRange,
exportKeyGroupRange,
keyGroupPrefixBytes,
useDeleteFilesInRange);
logger.info(
"Completed importing exported state handles for backend with range {} in operator {} using Clip/Ingest DB.",
keyGroupRange.prettyPrintInterval(),
operatorIdentifier);
}
/**
* Restores the checkpointing status and state for this backend. This can only be done if the
* backend was not rescaled and is therefore identical to the source backend in the previous
* run.
*
* @param localKeyedStateHandle the single state handle from which the backend is restored.
*/
private void restorePreviousIncrementalFilesStatus(
IncrementalKeyedStateHandle localKeyedStateHandle) {
backendUID = localKeyedStateHandle.getBackendIdentifier();
restoredSstFiles.put(
localKeyedStateHandle.getCheckpointId(),
localKeyedStateHandle.getSharedStateHandles());
lastCompletedCheckpointId = localKeyedStateHandle.getCheckpointId();
logger.info(
"Restored previous incremental files status in backend with range {} in operator {}: backend uuid {}, last checkpoint id {}.",
keyGroupRange.prettyPrintInterval(),
operatorIdentifier,
backendUID,
lastCompletedCheckpointId);
}
/**
* Restores the base DB from local state of a single state handle.
*
* @param localKeyedStateHandle the state handle to restore from.
* @throws Exception on any restore error.
*/
private void restoreBaseDBFromLocalState(IncrementalLocalKeyedStateHandle localKeyedStateHandle)
throws Exception {
KeyedBackendSerializationProxy<K> serializationProxy =
readMetaData(localKeyedStateHandle.getMetaDataStateHandle());
List<StateMetaInfoSnapshot> stateMetaInfoSnapshots =
serializationProxy.getStateMetaInfoSnapshots();
Path restoreSourcePath = localKeyedStateHandle.getDirectoryStateHandle().getDirectory();
this.rocksHandle.openDB(
RestoredDBInstance.createColumnFamilyDescriptors(
stateMetaInfoSnapshots,
rocksHandle.getColumnFamilyOptionsFactory(),
rocksHandle.getTtlCompactFiltersManager(),
rocksHandle.getWriteBufferManagerCapacity(),
true),
stateMetaInfoSnapshots,
restoreSourcePath,
cancelStreamRegistryForRestore);
}
/**
* Helper method to download files, as specified in the given download specs, to the local
* directory.
*
* @param downloadSpecs specifications of files to download.
* @throws Exception On any download error.
*/
private void transferRemoteStateToLocalDirectory(
Collection<StateHandleDownloadSpec> downloadSpecs) throws Exception {
logger.info(
"Start downloading remote state to local directory in operator {} for target key-group range {}.",
operatorIdentifier,
keyGroupRange.prettyPrintInterval());
try (RocksDBStateDownloader rocksDBStateDownloader =
new RocksDBStateDownloader(
RocksDBStateDataTransferHelper.forThreadNumIfSpecified(
numberOfTransferringThreads, ioExecutor))) {
rocksDBStateDownloader.transferAllStateDataToDirectory(
downloadSpecs, cancelStreamRegistryForRestore);
logger.info(
"Finished downloading remote state to local directory in operator {} for target key-group range {}.",
operatorIdentifier,
keyGroupRange.prettyPrintInterval());
}
}
/**
* Helper method to copy all data from the given local state handles to the base DB by using
* temporary DB instances.
*
* @param toImport the state handles to import.
* @param startKeyGroupPrefixBytes the min/start key of the key groups range as bytes.
* @param stopKeyGroupPrefixBytes the max+1/end key of the key groups range as bytes.
* @throws Exception on any copy error.
*/
private void copyToBaseDBUsingTempDBs(
List<IncrementalLocalKeyedStateHandle> toImport,
byte[] startKeyGroupPrefixBytes,
byte[] stopKeyGroupPrefixBytes)
throws Exception {
if (toImport.isEmpty()) {
return;
}
logger.info(
"Starting to copy state handles for backend with range {} in operator {} using temporary instances.",
keyGroupRange.prettyPrintInterval(),
operatorIdentifier);
try (RocksDBWriteBatchWrapper writeBatchWrapper =
new RocksDBWriteBatchWrapper(this.rocksHandle.getDb(), writeBatchSize);
Closeable ignored =
cancelStreamRegistryForRestore.registerCloseableTemporarily(
writeBatchWrapper.getCancelCloseable())) {
for (IncrementalLocalKeyedStateHandle handleToCopy : toImport) {
KeyedBackendSerializationProxy<K> serializationProxy =
readMetaData(handleToCopy.getMetaDataStateHandle());
List<StateMetaInfoSnapshot> stateMetaInfoSnapshots =
serializationProxy.getStateMetaInfoSnapshots();
try (RestoredDBInstance restoredDbInstance =
RestoredDBInstance.restoreTempDBInstanceFromLocalState(
handleToCopy,
stateMetaInfoSnapshots,
rocksHandle.getColumnFamilyOptionsFactory(),
rocksHandle.getDbOptions(),
rocksHandle.getTtlCompactFiltersManager(),
rocksHandle.getWriteBufferManagerCapacity())) {
copyTempDbIntoBaseDb(
restoredDbInstance,
writeBatchWrapper,
startKeyGroupPrefixBytes,
stopKeyGroupPrefixBytes);
}
}
}
logger.info(
"Completed copying state handles for backend with range {} in operator {} using temporary instances.",
keyGroupRange.prettyPrintInterval(),
operatorIdentifier);
}
/**
* Helper method tp copy all data from an open temporary DB to the base DB.
*
* @param tmpRestoreDBInfo the temporary instance.
* @param writeBatchWrapper write batch wrapper for writes against the base DB.
* @param startKeyGroupPrefixBytes the min/start key of the key groups range as bytes.
* @param stopKeyGroupPrefixBytes the max+1/end key of the key groups range as bytes.
* @throws Exception on any copy error.
*/
private void copyTempDbIntoBaseDb(
RestoredDBInstance tmpRestoreDBInfo,
RocksDBWriteBatchWrapper writeBatchWrapper,
byte[] startKeyGroupPrefixBytes,
byte[] stopKeyGroupPrefixBytes)
throws Exception {
logger.debug(
"Starting copy of state handle {} for backend with range {} in operator {} to base DB using temporary instance.",
tmpRestoreDBInfo.srcStateHandle,
keyGroupRange.prettyPrintInterval(),
operatorIdentifier);
List<ColumnFamilyDescriptor> tmpColumnFamilyDescriptors =
tmpRestoreDBInfo.columnFamilyDescriptors;
List<ColumnFamilyHandle> tmpColumnFamilyHandles = tmpRestoreDBInfo.columnFamilyHandles;
// iterating only the requested descriptors automatically skips the default
// column family handle
for (int descIdx = 0; descIdx < tmpColumnFamilyDescriptors.size(); ++descIdx) {
ColumnFamilyHandle tmpColumnFamilyHandle = tmpColumnFamilyHandles.get(descIdx);
ColumnFamilyHandle targetColumnFamilyHandle =
this.rocksHandle.getOrRegisterStateColumnFamilyHandle(
null,
tmpRestoreDBInfo.stateMetaInfoSnapshots.get(descIdx),
cancelStreamRegistryForRestore)
.columnFamilyHandle;
try (RocksIteratorWrapper iterator =
RocksDBOperationUtils.getRocksIterator(
tmpRestoreDBInfo.db,
tmpColumnFamilyHandle,
tmpRestoreDBInfo.readOptions)) {
iterator.seek(startKeyGroupPrefixBytes);
while (iterator.isValid()) {
if (RocksDBIncrementalCheckpointUtils.beforeThePrefixBytes(
iterator.key(), stopKeyGroupPrefixBytes)) {
writeBatchWrapper.put(
targetColumnFamilyHandle, iterator.key(), iterator.value());
} else {
// Since the iterator will visit the record according to the
// sorted
// order,
// we can just break here.
break;
}
iterator.next();
}
} // releases native iterator resources
}
logger.debug(
"Finished copy of state handle {} for backend with range {} in operator {} using temporary instance.",
tmpRestoreDBInfo.srcStateHandle,
keyGroupRange.prettyPrintInterval(),
operatorIdentifier);
}
private void cleanUpPathQuietly(@Nonnull Path path) {
try {
FileUtils.deleteDirectory(path.toFile());
} catch (IOException ex) {
logger.warn("Failed to clean up path {}", path, ex);
}
}
private void runAndReportDuration(RunnableWithException runnable, String metricName)
throws Exception {
final SystemClock clock = SystemClock.getInstance();
final long startTime = clock.relativeTimeMillis();
runnable.run();
customInitializationMetrics.addMetric(metricName, clock.relativeTimeMillis() - startTime);
}
/** Reads Flink's state meta data file from the state handle. */
private KeyedBackendSerializationProxy<K> readMetaData(StreamStateHandle metaStateHandle)
throws Exception {
InputStream inputStream = null;
try {
inputStream = metaStateHandle.openInputStream();
cancelStreamRegistryForRestore.registerCloseable(inputStream);
DataInputView in = new DataInputViewStreamWrapper(inputStream);
return readMetaData(in);
} finally {
if (cancelStreamRegistryForRestore.unregisterCloseable(inputStream)) {
inputStream.close();
}
}
}
KeyedBackendSerializationProxy<K> readMetaData(DataInputView dataInputView)
throws IOException, StateMigrationException {
// isSerializerPresenceRequired flag is set to false, since for the RocksDB state backend,
// deserialization of state happens lazily during runtime; we depend on the fact
// that the new serializer for states could be compatible, and therefore the restore can
// continue
// without old serializers required to be present.
KeyedBackendSerializationProxy<K> serializationProxy =
new KeyedBackendSerializationProxy<>(userCodeClassLoader);
serializationProxy.read(dataInputView);
if (!isKeySerializerCompatibilityChecked) {
// fetch current serializer now because if it is incompatible, we can't access
// it anymore to improve the error message
TypeSerializer<K> currentSerializer = keySerializerProvider.currentSchemaSerializer();
// check for key serializer compatibility; this also reconfigures the
// key serializer to be compatible, if it is required and is possible
TypeSerializerSchemaCompatibility<K> keySerializerSchemaCompat =
keySerializerProvider.setPreviousSerializerSnapshotForRestoredState(
serializationProxy.getKeySerializerSnapshot());
if (keySerializerSchemaCompat.isCompatibleAfterMigration()
|| keySerializerSchemaCompat.isIncompatible()) {
throw new StateMigrationException(
"The new key serializer ("
+ currentSerializer
+ ") must be compatible with the previous key serializer ("
+ keySerializerProvider.previousSchemaSerializer()
+ ").");
}
isKeySerializerCompatibilityChecked = true;
}
return serializationProxy;
}
@Override
public void close() throws Exception {
this.rocksHandle.close();
}
}
| RocksDBIncrementalRestoreOperation |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/TestCreators3.java | {
"start": 2415,
"end": 3037
} | class ____ extends JacksonAnnotationIntrospector
{
@Override
public String findImplicitPropertyName(MapperConfig<?> config, AnnotatedMember param) {
if (param instanceof AnnotatedParameter ap) {
switch (ap.getIndex()) {
case 0: return "a";
case 1: return "b";
case 2: return "c";
default:
return "param"+ap.getIndex();
}
}
return super.findImplicitPropertyName(config, param);
}
}
// [databind#1853]
public static | MyParamIntrospector |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/TypesTest.java | {
"start": 4661,
"end": 4856
} | class ____ implements ValueResolver {
@Override
public CompletionStage<Object> resolve(EvalContext context) {
return null;
}
}
public static | MyResolver1 |
java | apache__camel | components/camel-spring-parent/camel-spring-ldap/src/main/java/org/apache/camel/component/springldap/LdapOperation.java | {
"start": 1289,
"end": 1407
} | enum ____ {
SEARCH,
BIND,
UNBIND,
AUTHENTICATE,
MODIFY_ATTRIBUTES,
FUNCTION_DRIVEN
}
| LdapOperation |
java | apache__kafka | shell/src/main/java/org/apache/kafka/shell/node/RootShellNode.java | {
"start": 1101,
"end": 1720
} | class ____ implements MetadataNode {
private final MetadataImage image;
public RootShellNode(MetadataImage image) {
this.image = image;
}
@Override
public Collection<String> childNames() {
return List.of(LocalShellNode.NAME, MetadataImageNode.NAME);
}
@Override
public MetadataNode child(String name) {
if (name.equals(LocalShellNode.NAME)) {
return new LocalShellNode();
} else if (name.equals(MetadataImageNode.NAME)) {
return new MetadataImageNode(image);
} else {
return null;
}
}
}
| RootShellNode |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/HazelcastRingbufferEndpointBuilderFactory.java | {
"start": 10545,
"end": 11332
} | class ____ {
/**
* The internal instance of the builder used to access to all the
* methods representing the name of headers.
*/
private static final HazelcastRingbufferHeaderNameBuilder INSTANCE = new HazelcastRingbufferHeaderNameBuilder();
/**
* The operation to perform.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code HazelcastOperationType}.
*/
public String hazelcastOperationType() {
return "CamelHazelcastOperationType";
}
}
static HazelcastRingbufferEndpointBuilder endpointBuilder(String componentName, String path) {
| HazelcastRingbufferHeaderNameBuilder |
java | apache__camel | components/camel-zipfile/src/main/java/org/apache/camel/dataformat/zipfile/ZipIterator.java | {
"start": 1740,
"end": 6906
} | class ____ implements Iterator<Message>, Closeable {
private static final Logger LOG = LoggerFactory.getLogger(ZipIterator.class);
private final Exchange exchange;
private boolean allowEmptyDirectory;
private volatile ZipArchiveInputStream zipInputStream;
private volatile ZipArchiveEntry currentEntry;
private volatile Message parent;
private volatile boolean first;
public ZipIterator(Exchange exchange, InputStream inputStream) {
this.exchange = exchange;
this.allowEmptyDirectory = false;
Objects.requireNonNull(inputStream);
if (inputStream instanceof ZipArchiveInputStream) {
zipInputStream = (ZipArchiveInputStream) inputStream;
} else {
try {
ArchiveInputStream input = new ArchiveStreamFactory().createArchiveInputStream(ArchiveStreamFactory.ZIP,
new BufferedInputStream(inputStream));
zipInputStream = (ZipArchiveInputStream) input;
} catch (ArchiveException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
parent = null;
first = true;
}
@Override
public boolean hasNext() {
boolean answer = doHasNext();
LOG.trace("hasNext: {}", answer);
return answer;
}
protected boolean doHasNext() {
try {
if (zipInputStream == null) {
return false;
}
boolean availableDataInCurrentEntry = currentEntry != null;
if (!availableDataInCurrentEntry) {
// advance to the next entry.
parent = getNextElement();
if (parent == null) {
zipInputStream.close();
} else {
availableDataInCurrentEntry = true;
}
}
return availableDataInCurrentEntry;
} catch (IOException exception) {
throw new RuntimeCamelException(exception);
}
}
@Override
public Message next() {
Message answer = doNext();
LOG.trace("next: {}", answer);
return answer;
}
protected Message doNext() {
if (parent == null) {
parent = getNextElement();
}
Message answer = parent;
parent = null;
currentEntry = null;
if (first && answer == null) {
throw new IllegalStateException("Unable to unzip the file, it may be corrupted.");
}
first = false;
checkNullAnswer(answer);
return answer;
}
private Message getNextElement() {
if (zipInputStream == null) {
return null;
}
try {
currentEntry = getNextEntry();
if (currentEntry != null) {
LOG.debug("read zipEntry {}", currentEntry.getName());
Message answer = new DefaultMessage(exchange.getContext());
answer.getHeaders().putAll(exchange.getIn().getHeaders());
answer.setHeader("zipFileName", currentEntry.getName());
answer.setHeader(Exchange.FILE_NAME, currentEntry.getName());
if (currentEntry.isDirectory()) {
if (allowEmptyDirectory) {
answer.setBody(new ByteArrayInputStream(new byte[0]));
} else {
return getNextElement(); // skip directory
}
} else {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
IOHelper.copy(zipInputStream, baos);
byte[] data = baos.toByteArray();
answer.setBody(new ByteArrayInputStream(data));
}
return answer;
} else {
LOG.trace("close zipInputStream");
return null;
}
} catch (IOException exception) {
throw new RuntimeCamelException(exception);
}
}
public void checkNullAnswer(Message answer) {
if (answer == null && zipInputStream != null) {
IOHelper.close(zipInputStream);
zipInputStream = null;
}
}
private ZipArchiveEntry getNextEntry() throws IOException {
ZipArchiveEntry entry;
while ((entry = zipInputStream.getNextEntry()) != null) {
if (!entry.isDirectory()) {
return entry;
} else {
if (allowEmptyDirectory) {
return entry;
}
}
}
return null;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public void close() throws IOException {
IOHelper.close(zipInputStream);
zipInputStream = null;
currentEntry = null;
}
public boolean isSupportIteratorForEmptyDirectory() {
return allowEmptyDirectory;
}
public void setAllowEmptyDirectory(boolean allowEmptyDirectory) {
this.allowEmptyDirectory = allowEmptyDirectory;
}
}
| ZipIterator |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/checkreturnvalue/NoCanIgnoreReturnValueOnClassesTest.java | {
"start": 8152,
"end": 8356
} | class ____ {
@CanIgnoreReturnValue
public Client getValue() {
return this;
}
@CheckReturnValue
public static final | Client |
java | spring-projects__spring-boot | documentation/spring-boot-actuator-docs/src/test/java/org/springframework/boot/actuate/docs/beans/BeansEndpointDocumentationTests.java | {
"start": 1904,
"end": 3326
} | class ____ extends MockMvcEndpointDocumentationTests {
@Test
void beans() {
List<FieldDescriptor> beanFields = List.of(fieldWithPath("aliases").description("Names of any aliases."),
fieldWithPath("scope").description("Scope of the bean."),
fieldWithPath("type").description("Fully qualified type of the bean."),
fieldWithPath("resource").description("Resource in which the bean was defined, if any.")
.optional()
.type(JsonFieldType.STRING),
fieldWithPath("dependencies").description("Names of any dependencies."));
ResponseFieldsSnippet responseFields = responseFields(
fieldWithPath("contexts").description("Application contexts keyed by id."), parentIdField(),
fieldWithPath("contexts.*.beans").description("Beans in the application context keyed by name."))
.andWithPrefix("contexts.*.beans.*.", beanFields);
assertThat(this.mvc.get().uri("/actuator/beans")).hasStatusOk()
.apply(document("beans",
preprocessResponse(
limit(this::isIndependentBean, "contexts", getApplicationContext().getId(), "beans")),
responseFields));
}
private boolean isIndependentBean(Entry<String, Map<String, Object>> bean) {
return CollectionUtils.isEmpty((Collection<?>) bean.getValue().get("aliases"))
&& CollectionUtils.isEmpty((Collection<?>) bean.getValue().get("dependencies"));
}
@Configuration(proxyBeanMethods = false)
static | BeansEndpointDocumentationTests |
java | google__guava | android/guava-tests/test/com/google/common/cache/CacheTesting.java | {
"start": 12590,
"end": 17445
} | interface ____<T> {
void accept(@Nullable T object);
}
/**
* Assuming the given cache has maximum size {@code maxSize}, this method populates the cache (by
* getting a bunch of different keys), then makes sure all the items in the cache are also in the
* eviction queue. It will invoke the given {@code operation} on the first element in the eviction
* queue, and then reverify that all items in the cache are in the eviction queue, and verify that
* the head of the eviction queue has changed as a result of the operation.
*/
static void checkRecency(
LoadingCache<Integer, Integer> cache,
int maxSize,
Receiver<ReferenceEntry<Integer, Integer>> operation) {
checkNotNull(operation);
if (hasLocalCache(cache)) {
warmUp(cache, 0, 2 * maxSize);
LocalCache<Integer, Integer> cchm = toLocalCache(cache);
Segment<?, ?> segment = cchm.segments[0];
drainRecencyQueue(segment);
assertThat(accessQueueSize(cache)).isEqualTo(maxSize);
assertThat(cache.size()).isEqualTo(maxSize);
ReferenceEntry<?, ?> originalHead = segment.accessQueue.peek();
@SuppressWarnings("unchecked")
ReferenceEntry<Integer, Integer> entry = (ReferenceEntry<Integer, Integer>) originalHead;
operation.accept(entry);
drainRecencyQueue(segment);
assertThat(segment.accessQueue.peek()).isNotSameInstanceAs(originalHead);
assertThat(accessQueueSize(cache)).isEqualTo(cache.size());
}
}
/** Warms the given cache by getting all values in {@code [start, end)}, in order. */
static void warmUp(LoadingCache<Integer, Integer> map, int start, int end) {
checkNotNull(map);
for (int i = start; i < end; i++) {
map.getUnchecked(i);
}
}
static void expireEntries(Cache<?, ?> cache, long expiringTime, FakeTicker ticker) {
checkNotNull(ticker);
expireEntries(toLocalCache(cache), expiringTime, ticker);
}
static void expireEntries(LocalCache<?, ?> cchm, long expiringTime, FakeTicker ticker) {
for (Segment<?, ?> segment : cchm.segments) {
drainRecencyQueue(segment);
}
ticker.advance(2 * expiringTime, MILLISECONDS);
long now = ticker.read();
for (Segment<?, ?> segment : cchm.segments) {
expireEntries(segment, now);
assertWithMessage("Expiration queue must be empty by now")
.that(writeQueueSize(segment))
.isEqualTo(0);
assertWithMessage("Expiration queue must be empty by now")
.that(accessQueueSize(segment))
.isEqualTo(0);
assertWithMessage("Segments must be empty by now").that(segmentSize(segment)).isEqualTo(0);
}
cchm.processPendingNotifications();
}
static void expireEntries(Segment<?, ?> segment, long now) {
segment.lock();
try {
segment.expireEntries(now);
segment.cleanUp();
} finally {
segment.unlock();
}
}
static void checkEmpty(Cache<?, ?> cache) {
assertThat(cache.size()).isEqualTo(0);
assertThat(cache.asMap().containsKey(null)).isFalse();
assertThat(cache.asMap().containsKey(6)).isFalse();
assertThat(cache.asMap().containsValue(null)).isFalse();
assertThat(cache.asMap().containsValue(6)).isFalse();
checkEmpty(cache.asMap());
}
static void checkEmpty(ConcurrentMap<?, ?> map) {
checkEmpty(map.keySet());
checkEmpty(map.values());
checkEmpty(map.entrySet());
assertThat(map).isEqualTo(ImmutableMap.of());
assertThat(map.hashCode()).isEqualTo(ImmutableMap.of().hashCode());
assertThat(map.toString()).isEqualTo(ImmutableMap.of().toString());
if (map instanceof LocalCache) {
LocalCache<?, ?> cchm = (LocalCache<?, ?>) map;
checkValidState(cchm);
assertThat(cchm.isEmpty()).isTrue();
assertThat(cchm).isEmpty();
for (LocalCache.Segment<?, ?> segment : cchm.segments) {
assertThat(segment.count).isEqualTo(0);
assertThat(segmentSize(segment)).isEqualTo(0);
assertThat(segment.writeQueue.isEmpty()).isTrue();
assertThat(segment.accessQueue.isEmpty()).isTrue();
}
}
}
static void checkEmpty(Collection<?> collection) {
assertThat(collection.isEmpty()).isTrue();
assertThat(collection).isEmpty();
assertThat(collection.iterator().hasNext()).isFalse();
assertThat(collection.toArray()).isEmpty();
assertThat(collection.toArray(new Object[0])).isEmpty();
if (collection instanceof Set) {
new EqualsTester()
.addEqualityGroup(ImmutableSet.of(), collection)
.addEqualityGroup(ImmutableSet.of(""))
.testEquals();
} else if (collection instanceof List) {
new EqualsTester()
.addEqualityGroup(ImmutableList.of(), collection)
.addEqualityGroup(ImmutableList.of(""))
.testEquals();
}
}
private CacheTesting() {}
}
| Receiver |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/junit4/SpringRunner.java | {
"start": 1224,
"end": 1687
} | class ____ JUnit 4.12 or higher.
*
* @author Sam Brannen
* @since 4.3
* @see SpringJUnit4ClassRunner
* @see org.springframework.test.context.junit4.rules.SpringClassRule
* @see org.springframework.test.context.junit4.rules.SpringMethodRule
* @deprecated since Spring Framework 7.0 in favor of the
* {@link org.springframework.test.context.junit.jupiter.SpringExtension SpringExtension}
* and JUnit Jupiter
*/
@Deprecated(since = "7.0")
public final | requires |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/oauth2/server/authorization/JwkSetTests.java | {
"start": 6381,
"end": 6690
} | class ____ extends AuthorizationServerConfiguration {
@Bean
AuthorizationServerSettings authorizationServerSettings() {
return AuthorizationServerSettings.builder()
.jwkSetEndpoint("/test/jwks")
.multipleIssuersAllowed(true)
.build();
}
}
}
| AuthorizationServerConfigurationCustomEndpoints |
java | quarkusio__quarkus | extensions/virtual-threads/runtime/src/main/java/io/quarkus/virtual/threads/DelegatingExecutorService.java | {
"start": 327,
"end": 1423
} | class ____ extends ForwardingExecutorService {
private final ExecutorService delegate;
DelegatingExecutorService(final ExecutorService delegate) {
this.delegate = delegate;
}
@Override
protected ExecutorService delegate() {
return delegate;
}
public boolean isShutdown() {
// container managed executors are never shut down from the application's perspective
return false;
}
public boolean isTerminated() {
// container managed executors are never shut down from the application's perspective
return false;
}
public boolean awaitTermination(final long timeout, final TimeUnit unit) {
return false;
}
public void shutdown() {
throw new UnsupportedOperationException("shutdown not allowed on managed executor service");
}
public List<Runnable> shutdownNow() {
throw new UnsupportedOperationException("shutdownNow not allowed on managed executor service");
}
public String toString() {
return delegate.toString();
}
}
| DelegatingExecutorService |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.