language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/channel/ChannelStateWriteRequestExecutorImplTest.java | {
"start": 22778,
"end": 23726
} | class ____ extends ArrayDeque<ChannelStateWriteRequest> {
private ChannelStateWriteRequestExecutor worker;
@Override
public boolean add(@Nonnull ChannelStateWriteRequest request) {
boolean add = super.add(request);
try {
worker.releaseSubtask(JOB_VERTEX_ID, SUBTASK_INDEX);
} catch (IOException e) {
ExceptionUtils.rethrow(e);
}
return add;
}
@Override
public void addFirst(@Nonnull ChannelStateWriteRequest request) {
super.addFirst(request);
try {
worker.releaseSubtask(JOB_VERTEX_ID, SUBTASK_INDEX);
} catch (IOException e) {
ExceptionUtils.rethrow(e);
}
}
public void setWorker(ChannelStateWriteRequestExecutor worker) {
this.worker = worker;
}
}
private static | WorkerClosingDeque |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/aot/hint/SecurityHintsAotProcessor.java | {
"start": 1372,
"end": 1950
} | class ____
implements BeanFactoryInitializationAotContribution {
private final ConfigurableListableBeanFactory beanFactory;
private AuthorizationProxyFactoryAotContribution(ConfigurableListableBeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
@Override
public void applyTo(GenerationContext context, BeanFactoryInitializationCode code) {
this.beanFactory.getBeanProvider(SecurityHintsRegistrar.class)
.forEach((provider) -> provider.registerHints(context.getRuntimeHints(), this.beanFactory));
}
}
}
| AuthorizationProxyFactoryAotContribution |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/operations/converters/SqlProcedureCallConverter.java | {
"start": 2596,
"end": 7087
} | class ____ implements SqlNodeConverter<SqlNode> {
@Override
public Optional<EnumSet<SqlKind>> supportedSqlKinds() {
return Optional.of(EnumSet.of(SqlKind.PROCEDURE_CALL));
}
@Override
public Operation convertSqlNode(SqlNode sqlNode, ConvertContext context) {
SqlCall callProcedure = (SqlCall) ((SqlCall) sqlNode).getOperandList().get(0);
BridgingSqlProcedure sqlProcedure = (BridgingSqlProcedure) callProcedure.getOperator();
SqlValidator sqlValidator = context.getSqlValidator();
ProcedureDefinition procedureDefinition =
new ProcedureDefinition(sqlProcedure.getContextResolveProcedure().getProcedure());
FlinkSqlCallBinding sqlCallBinding =
new FlinkSqlCallBinding(
context.getSqlValidator(),
((SqlValidatorImpl) context.getSqlValidator()).getEmptyScope(),
callProcedure);
List<RexNode> reducedOperands = reduceOperands(sqlCallBinding, context);
SqlOperatorBinding sqlOperatorBinding =
new ExplicitOperatorBinding(
context.getSqlValidator().getTypeFactory(),
sqlProcedure,
reducedOperands.stream()
.map(RexNode::getType)
.collect(Collectors.toList()));
OperatorBindingCallContext bindingCallContext =
new OperatorBindingCallContext(
context.getCatalogManager().getDataTypeFactory(),
procedureDefinition,
sqlOperatorBinding,
sqlValidator.getValidatedNodeType(callProcedure));
// run type inference to infer the type including types of input args
// and output
TypeInferenceUtil.Result typeInferResult =
TypeInferenceUtil.runTypeInference(
procedureDefinition.getTypeInference(
context.getCatalogManager().getDataTypeFactory()),
bindingCallContext,
null);
List<DataType> argumentTypes = typeInferResult.getExpectedArgumentTypes();
int argumentCount = argumentTypes.size();
DataType[] inputTypes = new DataType[argumentCount];
Object[] params = new Object[argumentCount];
for (int i = 0; i < argumentCount; i++) {
inputTypes[i] = argumentTypes.get(i);
RexNode reducedOperand = reducedOperands.get(i);
if (!(reducedOperand instanceof RexLiteral)) {
throw new ValidationException(
String.format(
"The argument at position %s %s for calling procedure can't be converted to "
+ "literal.",
i, context.toQuotedSqlString(callProcedure.operand(i))));
}
// convert the literal to Flink internal representation
RexLiteral literalOperand = (RexLiteral) reducedOperand;
Object internalValue =
RexLiteralUtil.toFlinkInternalValue(
literalOperand.getValueAs(Comparable.class),
inputTypes[i].getLogicalType());
params[i] = internalValue;
}
return new PlannerCallProcedureOperation(
sqlProcedure.getContextResolveProcedure().getIdentifier().getIdentifier().get(),
sqlProcedure.getContextResolveProcedure().getProcedure(),
params,
inputTypes,
typeInferResult.getOutputDataType());
}
private List<RexNode> reduceOperands(
FlinkSqlCallBinding sqlCallBinding, ConvertContext context) {
// we don't really care about the input row type while converting to RexNode
// since call procedure shouldn't refer any inputs.
// so, construct an empty row for it.
RelDataType inputRowType =
toRelDataType(
DataTypes.ROW().getLogicalType(),
context.getSqlValidator().getTypeFactory());
return context.reduceRexNodes(
sqlCallBinding.operands().stream()
.map(node -> context.toRexNode(node, inputRowType, null))
.collect(Collectors.toList()));
}
}
| SqlProcedureCallConverter |
java | apache__camel | components/camel-ical/src/main/java/org/apache/camel/component/ical/ICalDataFormat.java | {
"start": 1348,
"end": 2454
} | class ____ extends ServiceSupport implements DataFormat, DataFormatName {
private boolean validating;
private CalendarOutputter outputer;
private CalendarBuilder builder;
@Override
public String getDataFormatName() {
return "ical";
}
@Override
public void marshal(Exchange exchange, Object graph, OutputStream stream) throws Exception {
Calendar calendar = exchange.getContext().getTypeConverter().convertTo(Calendar.class, graph);
outputer.output(calendar, stream);
}
@Override
public Object unmarshal(Exchange exchange, InputStream stream) throws Exception {
return builder.build(stream);
}
@Override
protected void doStart() throws Exception {
outputer = new CalendarOutputter(validating);
builder = new CalendarBuilder();
}
@Override
protected void doStop() throws Exception {
// noop
}
public void setValidating(boolean validating) {
this.validating = validating;
}
public boolean isValidating() {
return validating;
}
}
| ICalDataFormat |
java | apache__flink | flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/utils/ParquetFormatStatisticsReportUtil.java | {
"start": 14929,
"end": 16057
} | class ____ implements Callable<FileParquetStatistics> {
private final Configuration hadoopConfig;
private final Path file;
public ParquetFileRowCountCalculator(
Configuration hadoopConfig,
Path file,
Map<String, Statistics<?>> columnStatisticsMap) {
this.hadoopConfig = hadoopConfig;
this.file = file;
}
@Override
public FileParquetStatistics call() throws Exception {
org.apache.hadoop.fs.Path hadoopPath = new org.apache.hadoop.fs.Path(file.toUri());
ParquetMetadata metadata = ParquetFileReader.readFooter(hadoopConfig, hadoopPath);
MessageType schema = metadata.getFileMetaData().getSchema();
List<String> columns =
schema.asGroupType().getFields().stream()
.map(Type::getName)
.collect(Collectors.toList());
List<BlockMetaData> blocks = metadata.getBlocks();
return new FileParquetStatistics(columns, blocks);
}
}
}
| ParquetFileRowCountCalculator |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/discriminator/MappedSuperclassExtendsEntityTest.java | {
"start": 2617,
"end": 2932
} | class ____ extends GrandParent {
@ManyToMany(mappedBy = "parents")
private List<TestEntity> entities;
public List<TestEntity> getEntities() {
return entities;
}
public void setEntities(List<TestEntity> entities) {
this.entities = entities;
}
}
@Entity(name = "TestEntity")
public static | Parent |
java | quarkusio__quarkus | core/runtime/src/main/java/io/quarkus/runtime/graal/GraalVM.java | {
"start": 11934,
"end": 12054
} | class ____ {
}
@Delete
@TargetClass(GraalVM.VersionParseHelper.class)
final | Target_io_quarkus_runtime_graal_GraalVM_Version |
java | junit-team__junit5 | junit-platform-launcher/src/main/java/org/junit/platform/launcher/jfr/FlightRecordingExecutionListener.java | {
"start": 1284,
"end": 3896
} | class ____ implements TestExecutionListener {
private final Map<org.junit.platform.engine.UniqueId, TestExecutionEvent> testExecutionEvents = new ConcurrentHashMap<>();
private @Nullable TestPlanExecutionEvent testPlanExecutionEvent;
@Override
public void testPlanExecutionStarted(TestPlan plan) {
var event = new TestPlanExecutionEvent();
if (event.isEnabled()) {
event.begin();
this.testPlanExecutionEvent = event;
}
}
@Override
public void testPlanExecutionFinished(TestPlan plan) {
var event = this.testPlanExecutionEvent;
this.testPlanExecutionEvent = null;
if (event != null && event.shouldCommit()) {
event.containsTests = plan.containsTests();
event.engineNames = plan.getRoots().stream().map(TestIdentifier::getDisplayName).collect(
Collectors.joining(", "));
event.commit();
}
}
@Override
public void executionSkipped(TestIdentifier test, String reason) {
var event = new SkippedTestEvent();
if (event.shouldCommit()) {
event.initialize(test);
event.reason = reason;
event.commit();
}
}
@Override
public void executionStarted(TestIdentifier test) {
var event = new TestExecutionEvent();
if (event.isEnabled()) {
event.begin();
this.testExecutionEvents.put(test.getUniqueIdObject(), event);
}
}
@Override
public void executionFinished(TestIdentifier test, TestExecutionResult result) {
TestExecutionEvent event = this.testExecutionEvents.remove(test.getUniqueIdObject());
if (event != null && event.shouldCommit()) {
event.end();
event.initialize(test);
event.result = result.getStatus().toString();
Optional<Throwable> throwable = result.getThrowable();
event.exceptionClass = throwable.map(Throwable::getClass).orElse(null);
event.exceptionMessage = throwable.map(Throwable::getMessage).orElse(null);
event.commit();
}
}
@Override
public void reportingEntryPublished(TestIdentifier test, ReportEntry reportEntry) {
for (var entry : reportEntry.getKeyValuePairs().entrySet()) {
var event = new ReportEntryEvent();
if (event.shouldCommit()) {
event.uniqueId = test.getUniqueId();
event.key = entry.getKey();
event.value = entry.getValue();
event.commit();
}
}
}
@Override
public void fileEntryPublished(TestIdentifier testIdentifier, FileEntry file) {
var event = new FileEntryEvent();
if (event.shouldCommit()) {
event.uniqueId = testIdentifier.getUniqueId();
event.path = file.getPath().toAbsolutePath().toString();
event.commit();
}
}
@Category({ "JUnit", "Execution" })
@StackTrace(false)
abstract static | FlightRecordingExecutionListener |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/XLMRobertaTokenizationResult.java | {
"start": 439,
"end": 874
} | class ____ extends RobertaTokenizationResult {
protected XLMRobertaTokenizationResult(List<String> vocab, List<Tokens> tokenizations, int padTokenId) {
super(vocab, tokenizations, padTokenId);
}
@Override
public String decode(String token) {
if (token.startsWith(PREFIX)) {
return token.substring(PREFIX.length());
}
return token;
}
static | XLMRobertaTokenizationResult |
java | spring-projects__spring-boot | module/spring-boot-cassandra/src/dockerTest/java/org/springframework/boot/cassandra/autoconfigure/CassandraAutoConfigurationWithPasswordAuthenticationIntegrationTests.java | {
"start": 4485,
"end": 5280
} | class ____ extends AbstractWaitStrategy {
@Override
protected void waitUntilReady() {
try {
Unreliables.retryUntilSuccess((int) this.startupTimeout.getSeconds(), TimeUnit.SECONDS, () -> {
getRateLimiter().doWhenReady(() -> cqlSessionBuilder().build());
return true;
});
}
catch (TimeoutException ex) {
throw new ContainerLaunchException(
"Timed out waiting for Cassandra to be accessible for query execution");
}
}
private CqlSessionBuilder cqlSessionBuilder() {
return CqlSession.builder()
.addContactPoint(new InetSocketAddress(this.waitStrategyTarget.getHost(),
this.waitStrategyTarget.getFirstMappedPort()))
.withLocalDatacenter("datacenter1")
.withAuthCredentials("cassandra", "cassandra");
}
}
}
| CassandraWaitStrategy |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/enums/EnumSameName4302Test.java | {
"start": 540,
"end": 767
} | enum ____ {
BAR("bar");
public String bar;
Getter4302Enum(String bar) {
this.bar = bar;
}
public String getBar() {
return "bar";
}
}
| Getter4302Enum |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java | {
"start": 21258,
"end": 29169
} | class ____ implements Answer<LocatedBlocks> {
private int failuresLeft;
private final NamenodeProtocols realNN;
public FailNTimesAnswer(NamenodeProtocols preSpyNN, int timesToFail) {
failuresLeft = timesToFail;
this.realNN = preSpyNN;
}
@Override
public LocatedBlocks answer(InvocationOnMock invocation) throws IOException {
Object args[] = invocation.getArguments();
LocatedBlocks realAnswer = realNN.getBlockLocations(
(String)args[0],
(Long)args[1],
(Long)args[2]);
if (failuresLeft-- > 0) {
NameNode.LOG.info("FailNTimesAnswer injecting failure.");
return makeBadBlockList(realAnswer);
}
NameNode.LOG.info("FailNTimesAnswer no longer failing.");
return realAnswer;
}
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
LocatedBlock goodLocatedBlock = goodBlockList.get(0);
LocatedBlock badLocatedBlock = new LocatedBlock(
goodLocatedBlock.getBlock(),
new DatanodeInfo[] {
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
});
badLocatedBlock.setStartOffset(goodLocatedBlock.getStartOffset());
List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
badBlocks.add(badLocatedBlock);
return new LocatedBlocks(goodBlockList.getFileLength(), false,
badBlocks, null, true,
null, null);
}
}
/**
* Test that a DFSClient waits for random time before retry on busy blocks.
*/
@Test
public void testDFSClientRetriesOnBusyBlocks() throws IOException {
System.out.println("Testing DFSClient random waiting on busy blocks.");
//
// Test settings:
//
// xcievers fileLen #clients timeWindow #retries
// ======== ======= ======== ========== ========
// Test 1: 2 6 MB 50 300 ms 3
// Test 2: 2 6 MB 50 300 ms 50
// Test 3: 2 6 MB 50 1000 ms 3
// Test 4: 2 6 MB 50 1000 ms 50
//
// Minimum xcievers is 2 since 1 thread is reserved for registry.
// Test 1 & 3 may fail since # retries is low.
// Test 2 & 4 should never fail since (#threads)/(xcievers-1) is the upper
// bound for guarantee to not throw BlockMissingException.
//
int xcievers = 2;
int fileLen = 6*1024*1024;
int threads = 50;
int retries = 3;
int timeWin = 300;
//
// Test 1: might fail
//
long timestamp = Time.now();
boolean pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
long timestamp2 = Time.now();
if ( pass ) {
LOG.info("Test 1 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
} else {
LOG.warn("Test 1 failed, but relax. Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
}
//
// Test 2: should never fail
//
retries = 50;
timestamp = Time.now();
pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
timestamp2 = Time.now();
assertTrue(pass, "Something wrong! Test 2 got Exception with maxmum retries!");
LOG.info("Test 2 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
//
// Test 3: might fail
//
retries = 3;
timeWin = 1000;
timestamp = Time.now();
pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
timestamp2 = Time.now();
if ( pass ) {
LOG.info("Test 3 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
} else {
LOG.warn("Test 3 failed, but relax. Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
}
//
// Test 4: should never fail
//
retries = 50;
timeWin = 1000;
timestamp = Time.now();
pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
timestamp2 = Time.now();
assertTrue(pass, "Something wrong! Test 4 got Exception with maxmum retries!");
LOG.info("Test 4 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
}
private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, int retries)
throws IOException {
boolean ret = true;
short replicationFactor = 1;
long blockSize = 128*1024*1024; // DFS block size
int bufferSize = 4096;
int originalXcievers = conf.getInt(
DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
xcievers);
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
retries);
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, timeWin);
// Disable keepalive
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 0);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replicationFactor).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path file1 = new Path("test_data.dat");
file1 = file1.makeQualified(fs.getUri(), fs.getWorkingDirectory()); // make URI hdfs://
try {
FSDataOutputStream stm = fs.create(file1, true,
bufferSize,
replicationFactor,
blockSize);
// verify that file exists in FS namespace
assertTrue(fs.getFileStatus(file1).isFile(),
file1 + " should be a file");
System.out.println("Path : \"" + file1 + "\"");
LOG.info("Path : \"" + file1 + "\"");
// write 1 block to file
byte[] buffer = AppendTestUtil.randomBytes(Time.now(), fileLen);
stm.write(buffer, 0, fileLen);
stm.close();
// verify that file size has changed to the full size
long len = fs.getFileStatus(file1).getLen();
assertTrue(len == fileLen, file1 +
" should be of size " + fileLen +
" but found to be of size " + len);
// read back and check data integrigy
byte[] read_buf = new byte[fileLen];
InputStream in = fs.open(file1, fileLen);
IOUtils.readFully(in, read_buf, 0, fileLen);
assert(Arrays.equals(buffer, read_buf));
in.close();
read_buf = null; // GC it if needed
// compute digest of the content to reduce memory space
MessageDigest m = MessageDigest.getInstance("SHA");
m.update(buffer, 0, fileLen);
byte[] hash_sha = m.digest();
// spawn multiple threads and all trying to access the same block
Thread[] readers = new Thread[threads];
Counter counter = new Counter(0);
for (int i = 0; i < threads; ++i ) {
DFSClientReader reader = new DFSClientReader(file1, cluster, hash_sha, fileLen, counter);
readers[i] = new SubjectInheritingThread(reader);
readers[i].start();
}
// wait for them to exit
for (int i = 0; i < threads; ++i ) {
readers[i].join();
}
if ( counter.get() == threads )
ret = true;
else
ret = false;
} catch (InterruptedException e) {
System.out.println("Thread got InterruptedException.");
e.printStackTrace();
ret = false;
} catch (Exception e) {
e.printStackTrace();
ret = false;
} finally {
conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
originalXcievers);
fs.delete(file1, false);
cluster.shutdown();
}
return ret;
}
private void verifyEmptyLease(LeaseRenewer leaseRenewer) throws Exception {
GenericTestUtils.waitFor(() -> leaseRenewer.isEmpty(), 100, 10000);
}
| FailNTimesAnswer |
java | quarkusio__quarkus | extensions/smallrye-openapi/deployment/src/test/java/io/quarkus/smallrye/openapi/deployment/filter/SecurityConfigFilterTest.java | {
"start": 2510,
"end": 7695
} | class ____ implements SmallRyeOpenApiConfig {
private SecurityScheme securityScheme;
private String apiKeyParameterName;
private String apiKeyParameterIn;
DummySmallRyeOpenApiConfig(SecurityScheme securityScheme,
String apiKeyParameterName,
String apiKeyParameterIn) {
this.securityScheme = securityScheme;
this.apiKeyParameterName = apiKeyParameterName;
this.apiKeyParameterIn = apiKeyParameterIn;
}
@Override
public String path() {
return null;
}
@Override
public Optional<Path> storeSchemaDirectory() {
return Optional.empty();
}
@Override
public String storeSchemaFileName() {
return null;
}
@Override
public boolean alwaysRunFilter() {
return false;
}
@Override
public boolean ignoreStaticDocument() {
return false;
}
@Override
public boolean managementEnabled() {
return false;
}
@Override
public Optional<List<Path>> additionalDocsDirectory() {
return Optional.empty();
}
@Override
public Optional<SecurityScheme> securityScheme() {
return Optional.ofNullable(securityScheme);
}
@Override
public String securitySchemeName() {
return null;
}
@Override
public String securitySchemeDescription() {
return null;
}
@Override
public boolean autoAddSecurityRequirement() {
return false;
}
@Override
public boolean autoAddTags() {
return false;
}
@Override
public boolean autoAddBadRequestResponse() {
return false;
}
@Override
public boolean autoAddOperationSummary() {
return false;
}
@Override
public Optional<Boolean> autoAddServer() {
return Optional.empty();
}
@Override
public boolean autoAddSecurity() {
return false;
}
@Override
public boolean autoAddOpenApiEndpoint() {
return false;
}
@Override
public Optional<String> apiKeyParameterIn() {
return Optional.ofNullable(apiKeyParameterIn);
}
@Override
public Optional<String> apiKeyParameterName() {
return Optional.ofNullable(apiKeyParameterName);
}
@Override
public String basicSecuritySchemeValue() {
return null;
}
@Override
public String jwtSecuritySchemeValue() {
return null;
}
@Override
public String jwtBearerFormat() {
return null;
}
@Override
public String oauth2SecuritySchemeValue() {
return null;
}
@Override
public String oauth2BearerFormat() {
return null;
}
@Override
public Optional<String> oidcOpenIdConnectUrl() {
return Optional.empty();
}
@Override
public Optional<String> oauth2ImplicitRefreshUrl() {
return Optional.empty();
}
@Override
public Optional<String> oauth2ImplicitAuthorizationUrl() {
return Optional.empty();
}
@Override
public Optional<String> oauth2ImplicitTokenUrl() {
return Optional.empty();
}
@Override
public Optional<String> openApiVersion() {
return Optional.empty();
}
@Override
public Optional<String> infoTitle() {
return Optional.empty();
}
@Override
public Optional<String> infoVersion() {
return Optional.empty();
}
@Override
public Optional<String> infoDescription() {
return Optional.empty();
}
@Override
public Optional<String> infoTermsOfService() {
return Optional.empty();
}
@Override
public Optional<String> infoContactEmail() {
return Optional.empty();
}
@Override
public Optional<String> infoContactName() {
return Optional.empty();
}
@Override
public Optional<String> infoContactUrl() {
return Optional.empty();
}
@Override
public Optional<String> infoLicenseName() {
return Optional.empty();
}
@Override
public Optional<String> infoLicenseUrl() {
return Optional.empty();
}
@Override
public Optional<String> operationIdStrategy() {
return Optional.empty();
}
@Override
public Map<String, String> securitySchemeExtensions() {
return Map.of();
}
@Override
public boolean mergeSchemaExamples() {
return true;
}
}
}
| DummySmallRyeOpenApiConfig |
java | spring-projects__spring-boot | core/spring-boot-test/src/test/java/org/springframework/boot/test/context/SpringBootTestWebEnvironmentMockWithWebAppConfigurationTests.java | {
"start": 1574,
"end": 1902
} | class ____ {
@Autowired
private ServletContext servletContext;
@Test
void resourcePath() {
assertThat(this.servletContext).hasFieldOrPropertyWithValue("resourceBasePath", "src/mymain/mywebapp");
}
@Configuration(proxyBeanMethods = false)
@EnableWebMvc
static | SpringBootTestWebEnvironmentMockWithWebAppConfigurationTests |
java | quarkusio__quarkus | extensions/qute/runtime/src/main/java/io/quarkus/qute/runtime/extensions/StringTemplateExtensions.java | {
"start": 299,
"end": 4429
} | class ____ {
static final String STR = "str";
/**
* E.g. {@code strVal.fmt(name,surname)}. The priority must be lower than
* {@link #fmtInstance(String, String, Locale, Object...)}.
*
* @param format
* @param ignoredPropertyName
* @param args
* @return the formatted value
*/
@TemplateExtension(matchNames = { "fmt", "format" }, priority = 2)
static String fmtInstance(String format, String ignoredPropertyName, Object... args) {
return String.format(format, args);
}
/**
* E.g. {@code strVal.format(locale,name)}. The priority must be higher than
* {@link #fmtInstance(String, String, Object...)}.
*
* @param format
* @param ignoredPropertyName
* @param locale
* @param args
* @return the formatted value
*/
@TemplateExtension(matchNames = { "fmt", "format" }, priority = 3)
static String fmtInstance(String format, String ignoredPropertyName, Locale locale, Object... args) {
return String.format(locale, format, args);
}
/**
* E.g. {@cde str:fmt("Hello %s",name)}. The priority must be lower than {@link #fmt(String, Locale, String, Object...)}.
*
* @param ignoredPropertyName
* @param format
* @param args
* @return the formatted value
*/
@TemplateExtension(namespace = STR, matchNames = { "fmt", "format" }, priority = 2)
static String fmt(String ignoredPropertyName, String format, Object... args) {
return String.format(format, args);
}
/**
* E.g. {@code str:fmt(locale,"Hello %s",name)}. The priority must be higher than {@link #fmt(String, String, Object...)}.
*
* @param ignoredPropertyName
* @param locale
* @param format
* @param args
* @return the formatted value
*/
@TemplateExtension(namespace = STR, matchNames = { "fmt", "format" }, priority = 3)
static String fmt(String ignoredPropertyName, Locale locale, String format, Object... args) {
return String.format(locale, format, args);
}
@TemplateExtension(matchName = "+")
static String plus(String str, Object val) {
return str + val;
}
/**
* E.g. {@code str:concat("Hello ",name)}. The priority must be lower than {@link #fmt(String, String, Object...)}.
*
* @param args
*/
@TemplateExtension(namespace = STR, priority = 1)
static String concat(Object... args) {
StringBuilder b = new StringBuilder(args.length * 10);
for (Object obj : args) {
b.append(obj.toString());
}
return b.toString();
}
/**
* E.g. {@code str:join("_", "Hello",name)}. The priority must be lower than {@link #concat(Object...)}.
*
* @param delimiter
* @param args
*/
@TemplateExtension(namespace = STR, priority = 0)
static String join(String delimiter, Object... args) {
CharSequence[] elements = new CharSequence[args.length];
for (int i = 0; i < args.length; i++) {
elements[i] = args[i].toString();
}
return String.join(delimiter, elements);
}
/**
* E.g. {@code str:builder}. The priority must be lower than {@link #join(String, Object...)}.
*/
@TemplateExtension(namespace = STR, priority = -1)
static StringBuilder builder() {
return new StringBuilder();
}
/**
* E.g. {@code str:builder('Hello')}. The priority must be lower than {@link #builder()}.
*/
@TemplateExtension(namespace = STR, priority = -2)
static StringBuilder builder(Object val) {
return new StringBuilder(Objects.toString(val));
}
/**
* E.g. {@code str:['Foo and bar']}. The priority must be lower than any other {@code str:} resolver.
*
* @param name
*/
@TemplateExtension(namespace = STR, priority = -10, matchName = ANY)
static String self(String name) {
return name;
}
@TemplateExtension(matchName = "+")
static StringBuilder plus(StringBuilder builder, Object val) {
return builder.append(val);
}
}
| StringTemplateExtensions |
java | apache__camel | components/camel-netty-http/src/main/java/org/apache/camel/component/netty/http/NettyChannelBufferStreamCache.java | {
"start": 1659,
"end": 3726
} | class ____ extends InputStream implements StreamCache {
private final ByteBuf buffer;
public NettyChannelBufferStreamCache(ByteBuf buffer) {
// retain the buffer so we keep it in use until we release it when we are done
this.buffer = buffer.retain();
this.buffer.markReaderIndex();
}
@Override
public boolean markSupported() {
return true;
}
@Override
public int read() {
return buffer.readByte();
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) {
// are we at end, then return -1
if (buffer.readerIndex() == buffer.capacity()) {
return -1;
}
// ensure we don't read more than what we have in the buffer
int before = buffer.readerIndex();
int max = buffer.capacity() - before;
len = Math.min(max, len);
buffer.readBytes(b, off, len);
return buffer.readerIndex() - before;
}
@Override
public synchronized void reset() {
buffer.resetReaderIndex();
}
@Override
public void writeTo(OutputStream os) throws IOException {
// must remember current index so we can reset back to it after the copy
int idx = buffer.readerIndex();
try {
buffer.resetReaderIndex();
IOHelper.copy(this, os);
} finally {
buffer.readerIndex(idx);
}
}
@Override
public StreamCache copy(Exchange exchange) {
return new NettyChannelBufferStreamCache(buffer.copy());
}
@Override
public boolean inMemory() {
return true;
}
@Override
public long length() {
return buffer.readableBytes();
}
@Override
public long position() {
return buffer.readerIndex();
}
/**
* Release the buffer when we are done using it.
*/
public void release() {
buffer.release();
}
}
| NettyChannelBufferStreamCache |
java | apache__kafka | streams/test-utils/src/main/java/org/apache/kafka/streams/TestInputTopic.java | {
"start": 1907,
"end": 10870
} | class ____<K, V> {
private final TopologyTestDriver driver;
private final String topic;
private final Serializer<K> keySerializer;
private final Serializer<V> valueSerializer;
//Timing
private Instant currentTime;
private final Duration advanceDuration;
TestInputTopic(final TopologyTestDriver driver,
final String topicName,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer,
final Instant startTimestamp,
final Duration autoAdvance) {
Objects.requireNonNull(driver, "TopologyTestDriver cannot be null");
Objects.requireNonNull(topicName, "topicName cannot be null");
Objects.requireNonNull(keySerializer, "keySerializer cannot be null");
Objects.requireNonNull(valueSerializer, "valueSerializer cannot be null");
Objects.requireNonNull(startTimestamp, "startTimestamp cannot be null");
Objects.requireNonNull(autoAdvance, "autoAdvance cannot be null");
this.driver = driver;
this.topic = topicName;
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
this.currentTime = startTimestamp;
if (autoAdvance.isNegative()) {
throw new IllegalArgumentException("autoAdvance must be positive");
}
this.advanceDuration = autoAdvance;
}
/**
* Advances the internally tracked event time of this input topic.
* Each time a record without explicitly defined timestamp is piped,
* the current topic event time is used as record timestamp.
* <p>
* Note: advancing the event time on the input topic, does not advance the tracked stream time in
* {@link TopologyTestDriver} as long as no new input records are piped.
* Furthermore, it does not advance the wall-clock time of {@link TopologyTestDriver}.
*
* @param advance the duration of time to advance
*/
public void advanceTime(final Duration advance) {
if (advance.isNegative()) {
throw new IllegalArgumentException("advance must be positive");
}
currentTime = currentTime.plus(advance);
}
private Instant getTimestampAndAdvance() {
final Instant timestamp = currentTime;
currentTime = currentTime.plus(advanceDuration);
return timestamp;
}
/**
* Send an input record with the given record on the topic and then commit the records.
* May auto advance topic time.
*
* @param record the record to sent
*/
public void pipeInput(final TestRecord<K, V> record) {
//if record timestamp not set get timestamp and advance
final Instant timestamp = (record.getRecordTime() == null) ? getTimestampAndAdvance() : record.getRecordTime();
driver.pipeRecord(topic, record, keySerializer, valueSerializer, timestamp);
}
/**
* Send an input record with the given value on the topic and then commit the records.
* May auto advance topic time.
*
* @param value the record value
*/
public void pipeInput(final V value) {
pipeInput(new TestRecord<>(value));
}
/**
* Send an input record with the given key and value on the topic and then commit the records.
* May auto advance topic time
*
* @param key the record key
* @param value the record value
*/
public void pipeInput(final K key,
final V value) {
pipeInput(new TestRecord<>(key, value));
}
/**
* Send an input record with the given value and timestamp on the topic and then commit the records.
* Does not auto advance internally tracked time.
*
* @param value the record value
* @param timestamp the record timestamp
*/
public void pipeInput(final V value,
final Instant timestamp) {
pipeInput(new TestRecord<>(null, value, timestamp));
}
/**
* Send an input record with the given key, value and timestamp on the topic and then commit the records.
* Does not auto advance internally tracked time.
*
* @param key the record key
* @param value the record value
* @param timestampMs the record timestamp
*/
public void pipeInput(final K key,
final V value,
final long timestampMs) {
pipeInput(new TestRecord<>(key, value, null, timestampMs));
}
/**
* Send an input record with the given key, value and timestamp on the topic and then commit the records.
* Does not auto advance internally tracked time.
*
* @param key the record key
* @param value the record value
* @param timestamp the record timestamp
*/
public void pipeInput(final K key,
final V value,
final Instant timestamp) {
pipeInput(new TestRecord<>(key, value, timestamp));
}
/**
* Send input records with the given KeyValue list on the topic then commit each record individually.
* The timestamp will be generated based on the constructor provided start time and time will auto advance.
*
* @param records the list of TestRecord records
*/
public void pipeRecordList(final List<? extends TestRecord<K, V>> records) {
for (final TestRecord<K, V> record : records) {
pipeInput(record);
}
}
/**
* Send input records with the given KeyValue list on the topic then commit each record individually.
* The timestamp will be generated based on the constructor provided start time and time will auto advance based on
* {@link #TestInputTopic(TopologyTestDriver, String, Serializer, Serializer, Instant, Duration) autoAdvance} setting.
*
* @param keyValues the {@link List} of {@link KeyValue} records
*/
public void pipeKeyValueList(final List<KeyValue<K, V>> keyValues) {
for (final KeyValue<K, V> keyValue : keyValues) {
pipeInput(keyValue.key, keyValue.value);
}
}
/**
* Send input records with the given value list on the topic then commit each record individually.
* The timestamp will be generated based on the constructor provided start time and time will auto advance based on
* {@link #TestInputTopic(TopologyTestDriver, String, Serializer, Serializer, Instant, Duration) autoAdvance} setting.
*
* @param values the {@link List} of {@link KeyValue} records
*/
public void pipeValueList(final List<V> values) {
for (final V value : values) {
pipeInput(value);
}
}
/**
* Send input records with the given {@link KeyValue} list on the topic then commit each record individually.
* Does not auto advance internally tracked time.
*
* @param keyValues the {@link List} of {@link KeyValue} records
* @param startTimestamp the timestamp for the first generated record
* @param advance the time difference between two consecutive generated records
*/
public void pipeKeyValueList(final List<KeyValue<K, V>> keyValues,
final Instant startTimestamp,
final Duration advance) {
Instant recordTime = startTimestamp;
for (final KeyValue<K, V> keyValue : keyValues) {
pipeInput(keyValue.key, keyValue.value, recordTime);
recordTime = recordTime.plus(advance);
}
}
/**
* Send input records with the given value list on the topic then commit each record individually.
* The timestamp will be generated based on the constructor provided start time and time will auto advance based on
* {@link #TestInputTopic(TopologyTestDriver, String, Serializer, Serializer, Instant, Duration) autoAdvance} setting.
*
* @param values the {@link List} of values
* @param startTimestamp the timestamp for the first generated record
* @param advance the time difference between two consecutive generated records
*/
public void pipeValueList(final List<V> values,
final Instant startTimestamp,
final Duration advance) {
Instant recordTime = startTimestamp;
for (final V value : values) {
pipeInput(value, recordTime);
recordTime = recordTime.plus(advance);
}
}
@Override
public String toString() {
return new StringJoiner(", ", TestInputTopic.class.getSimpleName() + "[", "]")
.add("topic='" + topic + "'")
.add("keySerializer=" + keySerializer.getClass().getSimpleName())
.add("valueSerializer=" + valueSerializer.getClass().getSimpleName())
.toString();
}
}
| TestInputTopic |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/http/SecurityHeaders.java | {
"start": 910,
"end": 1438
} | class ____ {
private SecurityHeaders() {
}
/**
* Sets the provided value as a Bearer token in a header with the name of
* {@link HttpHeaders#AUTHORIZATION}
* @param bearerTokenValue the bear token value
* @return a {@link Consumer} that sets the header.
*/
public static Consumer<HttpHeaders> bearerToken(String bearerTokenValue) {
Assert.hasText(bearerTokenValue, "bearerTokenValue cannot be null");
return (headers) -> headers.set(HttpHeaders.AUTHORIZATION, "Bearer " + bearerTokenValue);
}
}
| SecurityHeaders |
java | elastic__elasticsearch | x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RestoreModelSnapshotIT.java | {
"start": 1554,
"end": 4091
} | class ____ extends MlNativeAutodetectIntegTestCase {
@After
public void tearDownData() {
cleanUp();
}
public void test() throws Exception {
TimeValue bucketSpan = TimeValue.timeValueHours(1);
int bucketCount = 72;
List<String> data = new ArrayList<>();
long now = System.currentTimeMillis();
long timestamp = now - bucketCount * bucketSpan.getMillis();
for (int i = 0; i < bucketCount; i++) {
Map<String, Object> record = new HashMap<>();
record.put("time", timestamp);
data.add(createJsonRecord(record));
timestamp += bucketSpan.getMillis();
}
// Create the job, post the data and close the job
Job.Builder job = buildAndRegisterJob("restore-model-snapshot-job", bucketSpan);
openJob(job.getId());
// Forecast should fail when the model has seen no data, ie model state not initialized
expectThrows(ElasticsearchStatusException.class, () -> forecast(job.getId(), TimeValue.timeValueHours(3), null));
postData(job.getId(), data.stream().collect(Collectors.joining()));
closeJob(job.getId());
// Reopen the job and check forecast works
openJob(job.getId());
String forecastId = forecast(job.getId(), TimeValue.timeValueHours(3), null);
waitForecastToFinish(job.getId(), forecastId);
// In a multi-node cluster the replica may not be up to date
// so wait for the change
assertBusy(() -> {
ForecastRequestStats forecastStats = getForecastStats(job.getId(), forecastId);
assertThat(forecastStats.getMessages(), anyOf(nullValue(), empty()));
assertThat(forecastStats.getMemoryUsage(), greaterThan(0L));
assertThat(forecastStats.getRecordCount(), equalTo(3L));
});
closeJob(job.getId());
}
private Job.Builder buildAndRegisterJob(String jobId, TimeValue bucketSpan) throws Exception {
Detector.Builder detector = new Detector.Builder("count", null);
AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build()));
analysisConfig.setBucketSpan(bucketSpan);
Job.Builder job = new Job.Builder(jobId);
job.setAnalysisConfig(analysisConfig);
DataDescription.Builder dataDescription = new DataDescription.Builder();
job.setDataDescription(dataDescription);
putJob(job);
return job;
}
}
| RestoreModelSnapshotIT |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/writeAsArray/WriteAsArray_Object_2_public.java | {
"start": 898,
"end": 1273
} | class ____ {
private int id;
private A value;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public A getValue() {
return value;
}
public void setValue(A value) {
this.value = value;
}
}
public static | VO |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/SerializableTypeWrapperTests.java | {
"start": 4698,
"end": 4770
} | class ____ {
public Constructors(List<String> p) {
}
}
}
| Constructors |
java | spring-projects__spring-security | acl/src/main/java/org/springframework/security/acls/model/AclDataAccessException.java | {
"start": 697,
"end": 788
} | class ____ Acl data operations.
*
* @author Luke Taylor
* @since 3.0
*/
public abstract | for |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java | {
"start": 39009,
"end": 40010
} | class ____ extends MeteredBlobStoreRepository {
private static final String TYPE = "type-a";
private static final RepositoryStats STATS = new RepositoryStats(Map.of("GET", new BlobStoreActionStats(10, 13)));
private MeteredRepositoryTypeA(ProjectId projectId, RepositoryMetadata metadata, ClusterService clusterService) {
super(
projectId,
metadata,
mock(NamedXContentRegistry.class),
clusterService,
MockBigArrays.NON_RECYCLING_INSTANCE,
mock(RecoverySettings.class),
BlobPath.EMPTY,
Map.of("bucket", "bucket-a"),
SnapshotMetrics.NOOP
);
}
@Override
protected BlobStore createBlobStore() {
return mock(BlobStore.class);
}
@Override
public RepositoryStats stats() {
return STATS;
}
}
private static | MeteredRepositoryTypeA |
java | apache__camel | components/camel-influxdb2/src/main/java/org/apache/camel/component/influxdb2/data/Record.java | {
"start": 896,
"end": 1279
} | class ____ {
private String recordObj;
public Record(String recordObj) {
this.recordObj = recordObj;
}
public static Record fromString(String record) {
return new Record(record);
}
public String getInfluxRecord() {
return recordObj;
}
public void setRecord(String recordObj) {
this.recordObj = recordObj;
}
}
| Record |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestTopCLI.java | {
"start": 1801,
"end": 6286
} | class ____ {
private static final String RM1_NODE_ID = "rm1";
private static final String RM2_NODE_ID = "rm2";
private static List<String> dummyHostNames =
Arrays.asList("host1", "host2", "host3");
private static Map<String, String> savedStaticResolution = new HashMap<>();
private PrintStream stdout;
private PrintStream stderr;
@BeforeAll
public static void initializeDummyHostnameResolution() throws Exception {
String previousIpAddress;
for (String hostName : dummyHostNames) {
previousIpAddress = NetUtils.getStaticResolution(hostName);
if (null != previousIpAddress) {
savedStaticResolution.put(hostName, previousIpAddress);
}
NetUtils.addStaticResolution(hostName, "10.20.30.40");
}
}
@AfterAll
public static void restoreDummyHostnameResolution() throws Exception {
for (Map.Entry<String, String> hostnameToIpEntry : savedStaticResolution
.entrySet()) {
NetUtils.addStaticResolution(hostnameToIpEntry.getKey(),
hostnameToIpEntry.getValue());
}
}
@BeforeEach
public void before() {
this.stdout = System.out;
this.stderr = System.err;
}
@AfterEach
public void after() {
System.setOut(this.stdout);
System.setErr(this.stderr);
}
@Test
public void testHAClusterInfoURL() throws IOException, InterruptedException {
TopCLI topcli = new TopCLI();
// http
String rm1Address = "host2:8088";
String rm2Address = "host3:8088";
Configuration conf = topcli.getConf();
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + "." + RM1_NODE_ID,
rm1Address);
topcli.getConf().set(
YarnConfiguration.RM_WEBAPP_ADDRESS + "." + RM2_NODE_ID, rm2Address);
topcli.getConf().setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
topcli.getConf().set(YarnConfiguration.RM_HA_IDS,
RM1_NODE_ID + "," + RM2_NODE_ID);
URL clusterUrl = topcli.getHAClusterUrl(conf, RM1_NODE_ID);
assertEquals("http", clusterUrl.getProtocol());
assertEquals(rm1Address, clusterUrl.getAuthority());
clusterUrl = topcli.getHAClusterUrl(conf, RM2_NODE_ID);
assertEquals("http", clusterUrl.getProtocol());
assertEquals(rm2Address, clusterUrl.getAuthority());
// https
rm1Address = "host2:9088";
rm2Address = "host3:9088";
conf = topcli.getConf();
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + "." + RM1_NODE_ID,
rm1Address);
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + "." + RM2_NODE_ID,
rm2Address);
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY, "HTTPS_ONLY");
clusterUrl = topcli.getHAClusterUrl(conf, RM1_NODE_ID);
assertEquals("https", clusterUrl.getProtocol());
assertEquals(rm1Address, clusterUrl.getAuthority());
}
@Test
public void testHeaderNodeManagers() throws Exception {
YarnClusterMetrics ymetrics = mock(YarnClusterMetrics.class);
when(ymetrics.getNumNodeManagers()).thenReturn(0);
when(ymetrics.getNumDecommissioningNodeManagers()).thenReturn(1);
when(ymetrics.getNumDecommissionedNodeManagers()).thenReturn(2);
when(ymetrics.getNumActiveNodeManagers()).thenReturn(3);
when(ymetrics.getNumLostNodeManagers()).thenReturn(4);
when(ymetrics.getNumUnhealthyNodeManagers()).thenReturn(5);
when(ymetrics.getNumRebootedNodeManagers()).thenReturn(6);
when(ymetrics.getNumShutdownNodeManagers()).thenReturn(7);
YarnClient client = mock(YarnClient.class);
when(client.getYarnClusterMetrics()).thenReturn(ymetrics);
TopCLI topcli = new TopCLI() {
@Override protected void createAndStartYarnClient() {
}
};
topcli.setClient(client);
topcli.terminalWidth = 200;
String actual;
try (ByteArrayOutputStream outStream = new ByteArrayOutputStream();
PrintStream out = new PrintStream(outStream)) {
System.setOut(out);
System.setErr(out);
topcli.showTopScreen();
out.flush();
actual = outStream.toString(StandardCharsets.UTF_8.name());
}
String expected = "NodeManager(s)"
+ ": 0 total, 3 active, 5 unhealthy, 1 decommissioning,"
+ " 2 decommissioned, 4 lost, 6 rebooted, 7 shutdown";
assertTrue(actual.contains(expected),
String.format("Expected output to contain [%s], actual output was [%s].",
expected, actual));
}
}
| TestTopCLI |
java | apache__rocketmq | tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatAppendFileTest.java | {
"start": 1868,
"end": 9818
} | class ____ {
private final String storePath = MessageStoreUtilTest.getRandomStorePath();
private MessageQueue queue;
private MetadataStore metadataStore;
private MessageStoreConfig storeConfig;
private FlatFileFactory flatFileFactory;
@Before
public void init() throws ClassNotFoundException, NoSuchMethodException {
storeConfig = new MessageStoreConfig();
storeConfig.setBrokerName("brokerName");
storeConfig.setStorePathRootDir(storePath);
storeConfig.setTieredStoreFilePath(storePath);
storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName());
storeConfig.setTieredStoreCommitLogMaxSize(2000L);
storeConfig.setTieredStoreConsumeQueueMaxSize(2000L);
queue = new MessageQueue("TieredFlatFileTest", storeConfig.getBrokerName(), 0);
metadataStore = new DefaultMetadataStore(storeConfig);
flatFileFactory = new FlatFileFactory(metadataStore, storeConfig);
}
@After
public void shutdown() throws IOException {
MessageStoreUtilTest.deleteStoreDirectory(storePath);
}
public ByteBuffer allocateBuffer(int size) {
byte[] byteArray = new byte[size];
ByteBuffer buffer = ByteBuffer.wrap(byteArray);
Arrays.fill(byteArray, (byte) 0);
return buffer;
}
@Test
public void recoverFileSizeTest() {
String filePath = MessageStoreUtil.toFilePath(queue);
FlatAppendFile flatFile = flatFileFactory.createFlatFileForConsumeQueue(filePath);
flatFile.rollingNewFile(500L);
FileSegment fileSegment = flatFile.getFileToWrite();
flatFile.append(allocateBuffer(1000), 1L);
flatFile.commitAsync().join();
flatFile.flushFileSegmentMeta(fileSegment);
}
@Test
public void testRecoverFile() {
String filePath = MessageStoreUtil.toFilePath(queue);
FlatAppendFile flatFile = flatFileFactory.createFlatFileForConsumeQueue(filePath);
flatFile.rollingNewFile(500L);
FileSegment fileSegment = flatFile.getFileToWrite();
flatFile.append(allocateBuffer(1000), 1L);
flatFile.commitAsync().join();
flatFile.flushFileSegmentMeta(fileSegment);
FileSegmentMetadata metadata =
metadataStore.getFileSegment(filePath, FileSegmentType.CONSUME_QUEUE, 500L);
Assert.assertEquals(fileSegment.getPath(), metadata.getPath());
Assert.assertEquals(FileSegmentType.CONSUME_QUEUE, FileSegmentType.valueOf(metadata.getType()));
Assert.assertEquals(500L, metadata.getBaseOffset());
Assert.assertEquals(1000L, metadata.getSize());
Assert.assertEquals(0L, metadata.getSealTimestamp());
fileSegment.close();
flatFile.rollingNewFile(flatFile.getAppendOffset());
flatFile.append(allocateBuffer(200), 1L);
flatFile.commitAsync().join();
flatFile.flushFileSegmentMeta(fileSegment);
Assert.assertEquals(2, flatFile.getFileSegmentList().size());
flatFile.getFileToWrite().close();
metadata = metadataStore.getFileSegment(filePath, FileSegmentType.CONSUME_QUEUE, 1500L);
Assert.assertEquals(fileSegment.getPath(), metadata.getPath());
Assert.assertEquals(FileSegmentType.CONSUME_QUEUE, FileSegmentType.valueOf(metadata.getType()));
Assert.assertEquals(1500L, metadata.getBaseOffset());
Assert.assertEquals(200L, metadata.getSize());
Assert.assertEquals(0L, metadata.getSealTimestamp());
// reference same file
flatFile = flatFileFactory.createFlatFileForConsumeQueue(filePath);
Assert.assertEquals(2, flatFile.fileSegmentTable.size());
metadata = metadataStore.getFileSegment(filePath, FileSegmentType.CONSUME_QUEUE, 1500L);
Assert.assertEquals(fileSegment.getPath(), metadata.getPath());
Assert.assertEquals(FileSegmentType.CONSUME_QUEUE, FileSegmentType.valueOf(metadata.getType()));
Assert.assertEquals(1500L, metadata.getBaseOffset());
Assert.assertEquals(200L, metadata.getSize());
Assert.assertEquals(0L, metadata.getSealTimestamp());
flatFile.destroy();
}
@Test
public void testFileSegment() {
String filePath = MessageStoreUtil.toFilePath(queue);
FlatAppendFile flatFile = flatFileFactory.createFlatFileForConsumeQueue(filePath);
Assert.assertThrows(IllegalStateException.class, flatFile::getFileToWrite);
flatFile.commitAsync().join();
flatFile.rollingNewFile(0L);
Assert.assertEquals(0L, flatFile.getMinOffset());
Assert.assertEquals(0L, flatFile.getCommitOffset());
Assert.assertEquals(0L, flatFile.getAppendOffset());
flatFile.append(allocateBuffer(1000), 1L);
Assert.assertEquals(0L, flatFile.getMinOffset());
Assert.assertEquals(0L, flatFile.getCommitOffset());
Assert.assertEquals(1000L, flatFile.getAppendOffset());
Assert.assertEquals(1L, flatFile.getMinTimestamp());
Assert.assertEquals(1L, flatFile.getMaxTimestamp());
flatFile.commitAsync().join();
Assert.assertEquals(filePath, flatFile.getFilePath());
Assert.assertEquals(FileSegmentType.CONSUME_QUEUE, flatFile.getFileType());
Assert.assertEquals(0L, flatFile.getMinOffset());
Assert.assertEquals(1000L, flatFile.getCommitOffset());
Assert.assertEquals(1000L, flatFile.getAppendOffset());
Assert.assertEquals(1L, flatFile.getMinTimestamp());
Assert.assertEquals(1L, flatFile.getMaxTimestamp());
// file full
flatFile.append(allocateBuffer(1000), 1L);
flatFile.append(allocateBuffer(1000), 1L);
flatFile.commitAsync().join();
Assert.assertEquals(2, flatFile.fileSegmentTable.size());
flatFile.destroy();
}
@Test
public void testAppendAndRead() {
FlatAppendFile flatFile = flatFileFactory.createFlatFileForConsumeQueue(MessageStoreUtil.toFilePath(queue));
flatFile.rollingNewFile(500L);
Assert.assertEquals(500L, flatFile.getCommitOffset());
Assert.assertEquals(500L, flatFile.getAppendOffset());
flatFile.append(allocateBuffer(1000), 1L);
// no commit
CompletionException exception = Assert.assertThrows(
CompletionException.class, () -> flatFile.readAsync(500, 200).join());
Assert.assertTrue(exception.getCause() instanceof TieredStoreException);
Assert.assertEquals(TieredStoreErrorCode.ILLEGAL_PARAM,
((TieredStoreException) exception.getCause()).getErrorCode());
flatFile.commitAsync().join();
Assert.assertEquals(200, flatFile.readAsync(500, 200).join().remaining());
// 500-1500, 1500-3000
flatFile.append(allocateBuffer(1500), 1L);
flatFile.commitAsync().join();
Assert.assertEquals(2, flatFile.fileSegmentTable.size());
Assert.assertEquals(1000, flatFile.readAsync(1000, 1000).join().remaining());
flatFile.destroy();
}
@Test
public void testCleanExpiredFile() {
FlatAppendFile flatFile = flatFileFactory.createFlatFileForConsumeQueue(MessageStoreUtil.toFilePath(queue));
flatFile.destroyExpiredFile(1);
flatFile.rollingNewFile(500L);
flatFile.append(allocateBuffer(1000), 2L);
flatFile.commitAsync().join();
Assert.assertEquals(1, flatFile.fileSegmentTable.size());
flatFile.destroyExpiredFile(1);
Assert.assertEquals(1, flatFile.fileSegmentTable.size());
flatFile.destroyExpiredFile(3);
Assert.assertEquals(0, flatFile.fileSegmentTable.size());
flatFile.rollingNewFile(1500L);
flatFile.append(allocateBuffer(1000), 2L);
flatFile.append(allocateBuffer(1000), 2L);
flatFile.commitAsync().join();
flatFile.destroy();
Assert.assertEquals(0, flatFile.fileSegmentTable.size());
}
}
| FlatAppendFileTest |
java | quarkusio__quarkus | devtools/maven/src/main/java/io/quarkus/maven/InfoMojo.java | {
"start": 574,
"end": 1399
} | class ____ extends QuarkusProjectStateMojoBase {
@Override
protected void validateParameters() throws MojoExecutionException {
getLog().warn("quarkus:info goal is experimental, its options and output may change in future versions");
super.validateParameters();
}
@Override
protected void processProjectState(QuarkusProject quarkusProject) throws MojoExecutionException {
final ProjectInfo invoker = new ProjectInfo(quarkusProject);
invoker.perModule(perModule);
invoker.appModel(resolveApplicationModel());
QuarkusCommandOutcome outcome;
try {
outcome = invoker.execute();
} catch (QuarkusCommandException e) {
throw new MojoExecutionException("Failed to resolve the available updates", e);
}
}
}
| InfoMojo |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/RandomUuidGenerator.java | {
"start": 1084,
"end": 1241
} | class ____ implements UuidGenerator {
@Override
public String generateUuid() {
return UUID.randomUUID().toString();
}
}
| RandomUuidGenerator |
java | spring-projects__spring-boot | module/spring-boot-reactor-netty/src/main/java/org/springframework/boot/reactor/netty/SslServerCustomizer.java | {
"start": 1749,
"end": 4734
} | class ____ implements NettyServerCustomizer {
private static final Log logger = LogFactory.getLog(SslServerCustomizer.class);
private final @Nullable Http2 http2;
private final ClientAuth clientAuth;
private volatile SslProvider sslProvider;
private final Map<String, SslProvider> serverNameSslProviders;
public SslServerCustomizer(@Nullable Http2 http2, Ssl.@Nullable ClientAuth clientAuth, SslBundle sslBundle,
Map<String, SslBundle> serverNameSslBundles) {
this.http2 = http2;
this.clientAuth = Ssl.ClientAuth.map(clientAuth, ClientAuth.NONE, ClientAuth.OPTIONAL, ClientAuth.REQUIRE);
this.sslProvider = createSslProvider(sslBundle);
this.serverNameSslProviders = createServerNameSslProviders(serverNameSslBundles);
updateSslBundle(null, sslBundle);
}
@Override
public HttpServer apply(HttpServer server) {
return server.secure(this::applySecurity);
}
private void applySecurity(SslContextSpec spec) {
spec.sslContext(this.sslProvider.getSslContext()).setSniAsyncMappings((serverName, promise) -> {
SslProvider provider = (serverName != null) ? this.serverNameSslProviders.get(serverName)
: this.sslProvider;
return promise.setSuccess(provider);
});
}
void updateSslBundle(@Nullable String serverName, SslBundle sslBundle) {
logger.debug("SSL Bundle has been updated, reloading SSL configuration");
if (serverName == null) {
this.sslProvider = createSslProvider(sslBundle);
}
else {
this.serverNameSslProviders.put(serverName, createSslProvider(sslBundle));
}
}
private Map<String, SslProvider> createServerNameSslProviders(Map<String, SslBundle> serverNameSslBundles) {
Map<String, SslProvider> serverNameSslProviders = new HashMap<>();
serverNameSslBundles
.forEach((serverName, sslBundle) -> serverNameSslProviders.put(serverName, createSslProvider(sslBundle)));
return serverNameSslProviders;
}
private SslProvider createSslProvider(SslBundle sslBundle) {
return SslProvider.builder().sslContext((GenericSslContextSpec<?>) createSslContextSpec(sslBundle)).build();
}
/**
* Create an {@link AbstractProtocolSslContextSpec} for a given {@link SslBundle}.
* @param sslBundle the {@link SslBundle} to use
* @return an {@link AbstractProtocolSslContextSpec} instance
*/
protected final AbstractProtocolSslContextSpec<?> createSslContextSpec(SslBundle sslBundle) {
AbstractProtocolSslContextSpec<?> sslContextSpec = (this.http2 != null && this.http2.isEnabled())
? Http2SslContextSpec.forServer(sslBundle.getManagers().getKeyManagerFactory())
: Http11SslContextSpec.forServer(sslBundle.getManagers().getKeyManagerFactory());
return sslContextSpec.configure((builder) -> {
builder.trustManager(sslBundle.getManagers().getTrustManagerFactory());
SslOptions options = sslBundle.getOptions();
builder.protocols(options.getEnabledProtocols());
builder.ciphers(SslOptions.asSet(options.getCiphers()));
builder.clientAuth(this.clientAuth);
});
}
}
| SslServerCustomizer |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/workload/Histogram.java | {
"start": 3492,
"end": 6640
} | class ____ {
/**
* The fraction of samples which are less than or equal to the value of this percentile.
*/
private final float fraction;
/**
* The value of this percentile.
*/
private final int value;
PercentileSummary(float fraction, int value) {
this.fraction = fraction;
this.value = value;
}
public float fraction() {
return fraction;
}
public int value() {
return value;
}
}
public Summary summarize() {
return summarize(new float[0]);
}
public Summary summarize(float[] percentiles) {
int[] countsCopy = new int[counts.length];
synchronized (this) {
System.arraycopy(counts, 0, countsCopy, 0, counts.length);
}
// Verify that the percentiles array is sorted and positive.
float prev = 0f;
for (float percentile : percentiles) {
if (percentile < prev) {
throw new RuntimeException("Invalid percentiles fraction array. Bad element " +
percentile + ". The array must be sorted and non-negative.");
}
if (percentile > 1.0f) {
throw new RuntimeException("Invalid percentiles fraction array. Bad element " +
percentile + ". Elements must be less than or equal to 1.");
}
}
// Find out how many total samples we have, and what the average is.
long numSamples = 0;
float total = 0f;
for (int i = 0; i < countsCopy.length; i++) {
long count = countsCopy[i];
numSamples = numSamples + count;
total = total + (i * count);
}
float average = (numSamples == 0) ? 0.0f : (total / numSamples);
List<PercentileSummary> percentileSummaries =
summarizePercentiles(countsCopy, percentiles, numSamples);
return new Summary(numSamples, average, percentileSummaries);
}
private List<PercentileSummary> summarizePercentiles(int[] countsCopy, float[] percentiles,
long numSamples) {
if (percentiles.length == 0) {
return List.of();
}
List<PercentileSummary> summaries = new ArrayList<>(percentiles.length);
int i = 0, j = 0;
long seen = 0, next = (long) (numSamples * percentiles[0]);
while (true) {
if (i == countsCopy.length - 1) {
for (; j < percentiles.length; j++) {
summaries.add(new PercentileSummary(percentiles[j], i));
}
return summaries;
}
seen += countsCopy[i];
while (seen >= next) {
summaries.add(new PercentileSummary(percentiles[j], i));
j++;
if (j == percentiles.length) {
return summaries;
}
next = (long) (numSamples * percentiles[j]);
}
i++;
}
}
}
| PercentileSummary |
java | spring-projects__spring-boot | module/spring-boot-webflux/src/test/java/org/springframework/boot/webflux/actuate/web/exchanges/HttpExchangesWebFilterIntegrationTests.java | {
"start": 2120,
"end": 3891
} | class ____ {
private final ReactiveWebApplicationContextRunner contextRunner = new ReactiveWebApplicationContextRunner()
.withUserConfiguration(Config.class);
@Test
void exchangeForNotFoundResponseHas404Status() {
this.contextRunner.run((context) -> {
WebTestClient.bindToApplicationContext(context)
.build()
.get()
.uri("/")
.exchange()
.expectStatus()
.isNotFound();
HttpExchangeRepository repository = context.getBean(HttpExchangeRepository.class);
assertThat(repository.findAll()).hasSize(1);
assertThat(repository.findAll().get(0).getResponse().getStatus()).isEqualTo(404);
});
}
@Test
void exchangeForMonoErrorWithRuntimeExceptionHas500Status() {
this.contextRunner.run((context) -> {
WebTestClient.bindToApplicationContext(context)
.build()
.get()
.uri("/mono-error")
.exchange()
.expectStatus()
.isEqualTo(HttpStatus.INTERNAL_SERVER_ERROR);
HttpExchangeRepository repository = context.getBean(HttpExchangeRepository.class);
assertThat(repository.findAll()).hasSize(1);
assertThat(repository.findAll().get(0).getResponse().getStatus()).isEqualTo(500);
});
}
@Test
void exchangeForThrownRuntimeExceptionHas500Status() {
this.contextRunner.run((context) -> {
WebTestClient.bindToApplicationContext(context)
.build()
.get()
.uri("/thrown")
.exchange()
.expectStatus()
.isEqualTo(HttpStatus.INTERNAL_SERVER_ERROR);
HttpExchangeRepository repository = context.getBean(HttpExchangeRepository.class);
assertThat(repository.findAll()).hasSize(1);
assertThat(repository.findAll().get(0).getResponse().getStatus()).isEqualTo(500);
});
}
@Configuration(proxyBeanMethods = false)
@EnableWebFlux
static | HttpExchangesWebFilterIntegrationTests |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authorization/method/Jsr250AuthorizationManager.java | {
"start": 2048,
"end": 3571
} | class ____ implements AuthorizationManager<MethodInvocation> {
private final Jsr250AuthorizationManagerRegistry registry = new Jsr250AuthorizationManagerRegistry();
private AuthorizationManager<Collection<String>> authoritiesAuthorizationManager = new AuthoritiesAuthorizationManager();
private String rolePrefix = "ROLE_";
/**
* Sets an {@link AuthorizationManager} that accepts a collection of authority
* strings.
* @param authoritiesAuthorizationManager the {@link AuthorizationManager} that
* accepts a collection of authority strings to use
* @since 6.2
*/
public void setAuthoritiesAuthorizationManager(
AuthorizationManager<Collection<String>> authoritiesAuthorizationManager) {
Assert.notNull(authoritiesAuthorizationManager, "authoritiesAuthorizationManager cannot be null");
this.authoritiesAuthorizationManager = authoritiesAuthorizationManager;
}
/**
* Sets the role prefix. Defaults to "ROLE_".
* @param rolePrefix the role prefix to use
*/
public void setRolePrefix(String rolePrefix) {
Assert.notNull(rolePrefix, "rolePrefix cannot be null");
this.rolePrefix = rolePrefix;
}
/**
* {@inheritDoc}
*/
@Override
public @Nullable AuthorizationResult authorize(Supplier<? extends @Nullable Authentication> authentication,
MethodInvocation methodInvocation) {
AuthorizationManager<MethodInvocation> delegate = this.registry.getManager(methodInvocation);
return delegate.authorize(authentication, methodInvocation);
}
private final | Jsr250AuthorizationManager |
java | quarkusio__quarkus | independent-projects/qute/debug/src/test/java/io/quarkus/qute/debug/client/DebuggerUtils.java | {
"start": 105,
"end": 603
} | class ____ {
public static int findAvailableSocketPort() throws IOException {
try (ServerSocket serverSocket = new ServerSocket(0)) {
int port = serverSocket.getLocalPort();
synchronized (serverSocket) {
try {
serverSocket.wait(1L);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
return port;
}
}
}
| DebuggerUtils |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/extension/ext6_inject/impl/Ext6Impl1.java | {
"start": 1123,
"end": 1531
} | class ____ implements Ext6 {
public Dao obj;
SimpleExt ext1;
public void setDao(Dao obj) {
Assertions.assertNotNull(obj, "inject extension instance can not be null");
Assertions.fail();
}
public void setExt1(SimpleExt ext1) {
this.ext1 = ext1;
}
public String echo(URL url, String s) {
return "Ext6Impl1-echo-" + ext1.echo(url, s);
}
}
| Ext6Impl1 |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java | {
"start": 57094,
"end": 58515
} | class ____ {
public final long offset;
final Optional<Integer> offsetEpoch;
final Metadata.LeaderAndEpoch currentLeader;
FetchPosition(long offset) {
this(offset, Optional.empty(), Metadata.LeaderAndEpoch.noLeaderOrEpoch());
}
public FetchPosition(long offset, Optional<Integer> offsetEpoch, Metadata.LeaderAndEpoch currentLeader) {
this.offset = offset;
this.offsetEpoch = Objects.requireNonNull(offsetEpoch);
this.currentLeader = Objects.requireNonNull(currentLeader);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FetchPosition that = (FetchPosition) o;
return offset == that.offset &&
offsetEpoch.equals(that.offsetEpoch) &&
currentLeader.equals(that.currentLeader);
}
@Override
public int hashCode() {
return Objects.hash(offset, offsetEpoch, currentLeader);
}
@Override
public String toString() {
return "FetchPosition{" +
"offset=" + offset +
", offsetEpoch=" + offsetEpoch +
", currentLeader=" + currentLeader +
'}';
}
}
public static | FetchPosition |
java | google__truth | core/src/main/java/com/google/common/truth/IntStreamSubject.java | {
"start": 2194,
"end": 11014
} | class ____ extends Subject {
private final Supplier<@Nullable List<?>> listSupplier;
private IntStreamSubject(FailureMetadata metadata, @Nullable IntStream actual) {
super(metadata, actual);
// For discussion of when we collect(), see the Javadoc and also StreamSubject.
this.listSupplier = memoize(listCollector(actual));
}
@Override
protected String actualCustomStringRepresentation() {
List<?> asList;
try {
asList = listSupplier.get();
} catch (IllegalStateException e) {
return "Stream that has already been operated upon or closed: "
+ actualForPackageMembersToCall();
}
return String.valueOf(asList);
}
/**
* Obsolete factory instance. This factory was previously necessary for assertions like {@code
* assertWithMessage(...).about(intStreams()).that(stream)....}. Now, you can perform assertions
* like that without the {@code about(...)} call.
*
* @deprecated Instead of {@code about(intStreams()).that(...)}, use just {@code that(...)}.
* Similarly, instead of {@code assertAbout(intStreams()).that(...)}, use just {@code
* assertThat(...)}.
*/
@Deprecated
@SuppressWarnings("InlineMeSuggester") // We want users to remove the surrounding call entirely.
public static Factory<IntStreamSubject, IntStream> intStreams() {
return IntStreamSubject::new;
}
/** Checks that the actual stream is empty. */
public void isEmpty() {
checkThatContentsList().isEmpty();
}
/** Checks that the actual stream is not empty. */
public void isNotEmpty() {
checkThatContentsList().isNotEmpty();
}
/**
* Checks that the actual stream has the given size.
*
* <p>If you'd like to check that your stream contains more than {@link Integer#MAX_VALUE}
* elements, use {@code assertThat(stream.count()).isEqualTo(...)}.
*/
public void hasSize(int size) {
checkThatContentsList().hasSize(size);
}
/** Checks that the actual stream contains the given element. */
public void contains(int element) {
checkThatContentsList().contains(element);
}
/** Checks that the actual stream does not contain the given element. */
public void doesNotContain(int element) {
checkThatContentsList().doesNotContain(element);
}
/** Checks that the actual stream does not contain duplicate elements. */
public void containsNoDuplicates() {
checkThatContentsList().containsNoDuplicates();
}
/** Checks that the actual stream contains at least one of the given elements. */
public void containsAnyOf(int first, int second, int... rest) {
checkThatContentsList().containsAnyOf(first, second, box(rest));
}
/** Checks that the actual stream contains at least one of the given elements. */
public void containsAnyIn(@Nullable Iterable<?> expected) {
checkThatContentsList().containsAnyIn(expected);
}
/**
* Checks that the actual stream contains all of the given elements. If an element appears more
* than once in the given elements, then it must appear at least that number of times in the
* actual elements.
*
* <p>To also test that the contents appear in the given order, make a call to {@code inOrder()}
* on the object returned by this method. The expected elements must appear in the given order
* within the actual elements, but they are not required to be consecutive.
*/
@CanIgnoreReturnValue
public Ordered containsAtLeast(int first, int second, int... rest) {
return checkThatContentsList().containsAtLeast(first, second, box(rest));
}
/**
* Checks that the actual stream contains all of the given elements. If an element appears more
* than once in the given elements, then it must appear at least that number of times in the
* actual elements.
*
* <p>To also test that the contents appear in the given order, make a call to {@code inOrder()}
* on the object returned by this method. The expected elements must appear in the given order
* within the actual elements, but they are not required to be consecutive.
*/
@CanIgnoreReturnValue
public Ordered containsAtLeastElementsIn(@Nullable Iterable<?> expected) {
return checkThatContentsList().containsAtLeastElementsIn(expected);
}
/**
* Checks that the actual stream contains exactly the given elements.
*
* <p>Multiplicity is respected. For example, an object duplicated exactly 3 times in the
* parameters asserts that the object must likewise be duplicated exactly 3 times in the actual
* stream.
*
* <p>To also test that the contents appear in the given order, make a call to {@code inOrder()}
* on the object returned by this method.
*/
@CanIgnoreReturnValue
public Ordered containsExactly(int @Nullable ... expected) {
if (expected == null) {
failWithoutActual(
simpleFact("could not perform containment check because expected array was null"),
actualContents());
return ALREADY_FAILED;
}
return checkThatContentsList().containsExactlyElementsIn(box(expected));
}
/**
* Checks that the actual stream contains exactly the given elements.
*
* <p>Multiplicity is respected. For example, an object duplicated exactly 3 times in the
* parameters asserts that the object must likewise be duplicated exactly 3 times in the actual
* stream.
*
* <p>To also test that the contents appear in the given order, make a call to {@code inOrder()}
* on the object returned by this method.
*/
@CanIgnoreReturnValue
public Ordered containsExactlyElementsIn(@Nullable Iterable<?> expected) {
return checkThatContentsList().containsExactlyElementsIn(expected);
}
/** Checks that the actual stream does not contain any of the given elements. */
public void containsNoneOf(int first, int second, int... rest) {
checkThatContentsList().containsNoneOf(first, second, box(rest));
}
/** Checks that the actual stream does not contain any of the given elements. */
public void containsNoneIn(@Nullable Iterable<?> excluded) {
checkThatContentsList().containsNoneIn(excluded);
}
/**
* Checks that the actual stream is strictly ordered, according to the natural ordering of its
* elements. Strictly ordered means that each element in the stream is <i>strictly</i> greater
* than the element that preceded it.
*
* @throws ClassCastException if any pair of elements is not mutually Comparable
* @throws NullPointerException if any element is null
*/
public void isInStrictOrder() {
checkThatContentsList().isInStrictOrder();
}
/**
* Checks that the actual stream is strictly ordered, according to the given comparator. Strictly
* ordered means that each element in the stream is <i>strictly</i> greater than the element that
* preceded it.
*
* @throws ClassCastException if any pair of elements is not mutually Comparable
*/
public void isInStrictOrder(Comparator<? super Integer> comparator) {
checkThatContentsList().isInStrictOrder(comparator);
}
/**
* Checks that the actual stream is ordered, according to the natural ordering of its elements.
* Ordered means that each element in the stream is greater than or equal to the element that
* preceded it.
*
* @throws ClassCastException if any pair of elements is not mutually Comparable
* @throws NullPointerException if any element is null
*/
public void isInOrder() {
checkThatContentsList().isInOrder();
}
/**
* Checks that the actual stream is ordered, according to the given comparator. Ordered means that
* each element in the stream is greater than or equal to the element that preceded it.
*
* @throws ClassCastException if any pair of elements is not mutually Comparable
*/
public void isInOrder(Comparator<? super Integer> comparator) {
checkThatContentsList().isInOrder(comparator);
}
/** Be careful with using this, as documented on {@link Subject#substituteCheck}. */
private IterableSubject checkThatContentsList() {
return substituteCheck().that(listSupplier.get());
}
private static Supplier<@Nullable List<?>> listCollector(@Nullable IntStream actual) {
return () -> actual == null ? null : actual.boxed().collect(toCollection(ArrayList::new));
}
private static Object[] box(int[] rest) {
return IntStream.of(rest).boxed().toArray(Integer[]::new);
}
private Fact actualContents() {
return actualValue("actual contents");
}
/** Ordered implementation that does nothing because an earlier check already caused a failure. */
private static final Ordered ALREADY_FAILED = () -> {};
// TODO: b/246961366 - Do we want to override + deprecate isEqualTo/isNotEqualTo?
// TODO(user): Do we want to support comparingElementsUsing() on StreamSubject?
}
| IntStreamSubject |
java | square__retrofit | samples/src/main/java/com/example/retrofit/SimpleMockService.java | {
"start": 843,
"end": 4430
} | class ____ implements GitHub {
private final BehaviorDelegate<GitHub> delegate;
private final Map<String, Map<String, List<Contributor>>> ownerRepoContributors;
MockGitHub(BehaviorDelegate<GitHub> delegate) {
this.delegate = delegate;
ownerRepoContributors = new LinkedHashMap<>();
// Seed some mock data.
addContributor("square", "retrofit", "John Doe", 12);
addContributor("square", "retrofit", "Bob Smith", 2);
addContributor("square", "retrofit", "Big Bird", 40);
addContributor("square", "picasso", "Proposition Joe", 39);
addContributor("square", "picasso", "Keiser Soze", 152);
}
@Override
public Call<List<Contributor>> contributors(String owner, String repo) {
List<Contributor> response = Collections.emptyList();
Map<String, List<Contributor>> repoContributors = ownerRepoContributors.get(owner);
if (repoContributors != null) {
List<Contributor> contributors = repoContributors.get(repo);
if (contributors != null) {
response = contributors;
}
}
return delegate.returningResponse(response).contributors(owner, repo);
}
void addContributor(String owner, String repo, String name, int contributions) {
Map<String, List<Contributor>> repoContributors = ownerRepoContributors.get(owner);
if (repoContributors == null) {
repoContributors = new LinkedHashMap<>();
ownerRepoContributors.put(owner, repoContributors);
}
List<Contributor> contributors = repoContributors.get(repo);
if (contributors == null) {
contributors = new ArrayList<>();
repoContributors.put(repo, contributors);
}
contributors.add(new Contributor(name, contributions));
}
}
public static void main(String... args) throws IOException {
// Create a very simple Retrofit adapter which points the GitHub API.
Retrofit retrofit = new Retrofit.Builder().baseUrl(SimpleService.API_URL).build();
// Create a MockRetrofit object with a NetworkBehavior which manages the fake behavior of calls.
NetworkBehavior behavior = NetworkBehavior.create();
MockRetrofit mockRetrofit =
new MockRetrofit.Builder(retrofit).networkBehavior(behavior).build();
BehaviorDelegate<GitHub> delegate = mockRetrofit.create(GitHub.class);
MockGitHub gitHub = new MockGitHub(delegate);
// Query for some contributors for a few repositories.
printContributors(gitHub, "square", "retrofit");
printContributors(gitHub, "square", "picasso");
// Using the mock-only methods, add some additional data.
System.out.println("Adding more mock data...\n");
gitHub.addContributor("square", "retrofit", "Foo Bar", 61);
gitHub.addContributor("square", "picasso", "Kit Kat", 53);
// Reduce the delay to make the next calls complete faster.
behavior.setDelay(500, TimeUnit.MILLISECONDS);
// Query for the contributors again so we can see the mock data that was added.
printContributors(gitHub, "square", "retrofit");
printContributors(gitHub, "square", "picasso");
}
private static void printContributors(GitHub gitHub, String owner, String repo)
throws IOException {
System.out.println(String.format("== Contributors for %s/%s ==", owner, repo));
Call<List<Contributor>> contributors = gitHub.contributors(owner, repo);
for (Contributor contributor : contributors.execute().body()) {
System.out.println(contributor.login + " (" + contributor.contributions + ")");
}
System.out.println();
}
}
| MockGitHub |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_stv_liu.java | {
"start": 1184,
"end": 1563
} | class ____ extends BaseEntity {
private String username;
/**
* @return the username
*/
public String getUsername() {
return username;
}
/**
* @param username the username to set
*/
public void setUsername(String username) {
this.username = username;
}
}
}
| User |
java | quarkusio__quarkus | extensions/hibernate-validator/deployment/src/test/java/io/quarkus/hibernate/validator/test/AllowMultipleCascadedValidationOnReturnValuesTest.java | {
"start": 1549,
"end": 1918
} | class ____
implements InterfaceWithNoConstraints {
/**
* Adds @Valid to an un-constrained method from a super-type, which is not allowed.
*/
@Override
@Valid
public String foo(String s) {
return "Hello Valid World";
}
}
private static | RealizationWithValidConstraintOnMethodParameter |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/YodaConditionTest.java | {
"start": 3631,
"end": 4076
} | class ____ {",
" boolean yoda(boolean a) {",
// NOTE: this is a broken fix! We could detect this if it turns out to be an issue in
// practice.
" return a.equals(Boolean.TRUE);",
" }",
"}")
.allowBreakingChanges()
.doTest();
}
@Test
public void enums() {
refactoring
.addInputLines(
"E.java",
"""
| Test |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/processor/utils/InputPriorityConflictResolver.java | {
"start": 1845,
"end": 1962
} | class ____ conflicts by inserting a {@link BatchExecExchange} into the conflicting
* input.
*/
@Internal
public | resolve |
java | redisson__redisson | redisson/src/main/java/org/redisson/RedissonAtomicLong.java | {
"start": 1257,
"end": 7990
} | class ____ extends RedissonExpirable implements RAtomicLong {
public RedissonAtomicLong(CommandAsyncExecutor commandExecutor, String name) {
super(commandExecutor, name);
}
@Override
public long addAndGet(long delta) {
return get(addAndGetAsync(delta));
}
@Override
public RFuture<Long> addAndGetAsync(long delta) {
return commandExecutor.writeAsync(getRawName(), StringCodec.INSTANCE, RedisCommands.INCRBY, getRawName(), delta);
}
@Override
public boolean compareAndSet(long expect, long update) {
return get(compareAndSetAsync(expect, update));
}
@Override
public RFuture<Boolean> compareAndSetAsync(long expect, long update) {
return commandExecutor.evalWriteAsync(getRawName(), StringCodec.INSTANCE, RedisCommands.EVAL_BOOLEAN,
"local currValue = redis.call('get', KEYS[1]); "
+ "if currValue == ARGV[1] "
+ "or (tonumber(ARGV[1]) == 0 and currValue == false) then "
+ "redis.call('set', KEYS[1], ARGV[2]); "
+ "return 1 "
+ "else "
+ "return 0 "
+ "end",
Collections.<Object>singletonList(getRawName()), expect, update);
}
@Override
public long getAndDelete() {
return get(getAndDeleteAsync());
}
@Override
public RFuture<Long> getAndDeleteAsync() {
return commandExecutor.evalWriteAsync(getRawName(), StringCodec.INSTANCE, RedisCommands.EVAL_LONG_SAFE,
"local currValue = redis.call('get', KEYS[1]); "
+ "redis.call('del', KEYS[1]); "
+ "return currValue; ",
Collections.<Object>singletonList(getRawName()));
}
@Override
public long decrementAndGet() {
return get(decrementAndGetAsync());
}
@Override
public RFuture<Long> decrementAndGetAsync() {
return commandExecutor.writeAsync(getRawName(), StringCodec.INSTANCE, RedisCommands.DECR, getRawName());
}
@Override
public long get() {
return get(getAsync());
}
@Override
public RFuture<Long> getAsync() {
return commandExecutor.writeAsync(getRawName(), StringCodec.INSTANCE, RedisCommands.GET_LONG, getRawName());
}
@Override
public long getAndAdd(long delta) {
return get(getAndAddAsync(delta));
}
@Override
public RFuture<Long> getAndAddAsync(final long delta) {
return commandExecutor.writeAsync(getRawName(), StringCodec.INSTANCE, new RedisStrictCommand<Long>("INCRBY", new Convertor<Long>() {
@Override
public Long convert(Object obj) {
return ((Long) obj) - delta;
}
}), getRawName(), delta);
}
@Override
public long getAndSet(long newValue) {
return get(getAndSetAsync(newValue));
}
@Override
public RFuture<Long> getAndSetAsync(long newValue) {
return commandExecutor.writeAsync(getRawName(), LongCodec.INSTANCE, RedisCommands.GETSET_LONG, getRawName(), newValue);
}
@Override
public long incrementAndGet() {
return get(incrementAndGetAsync());
}
@Override
public RFuture<Long> incrementAndGetAsync() {
return commandExecutor.writeAsync(getRawName(), StringCodec.INSTANCE, RedisCommands.INCR, getRawName());
}
@Override
public long getAndIncrement() {
return getAndAdd(1);
}
@Override
public RFuture<Long> getAndIncrementAsync() {
return getAndAddAsync(1);
}
@Override
public long getAndDecrement() {
return getAndAdd(-1);
}
@Override
public RFuture<Long> getAndDecrementAsync() {
return getAndAddAsync(-1);
}
@Override
public void set(long newValue) {
get(setAsync(newValue));
}
@Override
public RFuture<Void> setAsync(long newValue) {
return commandExecutor.writeAsync(getRawName(), StringCodec.INSTANCE, RedisCommands.SET, getRawName(), newValue);
}
@Override
public boolean setIfLess(long less, long value) {
return get(setIfLessAsync(less, value));
}
@Override
public RFuture<Boolean> setIfLessAsync(long less, long value) {
return commandExecutor.evalWriteAsync(getRawName(), StringCodec.INSTANCE, RedisCommands.EVAL_BOOLEAN,
"local currValue = redis.call('get', KEYS[1]); "
+ "currValue = currValue == false and 0 or tonumber(currValue);"
+ "if currValue < tonumber(ARGV[1]) then "
+ "redis.call('set', KEYS[1], ARGV[2]); "
+ "return 1;"
+ "end; "
+ "return 0;",
Collections.<Object>singletonList(getRawName()), less, value);
}
@Override
public boolean setIfGreater(long greater, long value) {
return get(setIfGreaterAsync(greater, value));
}
@Override
public RFuture<Boolean> setIfGreaterAsync(long greater, long value) {
return commandExecutor.evalWriteAsync(getRawName(), StringCodec.INSTANCE, RedisCommands.EVAL_BOOLEAN,
"local currValue = redis.call('get', KEYS[1]); "
+ "currValue = currValue == false and 0 or tonumber(currValue);"
+ "if currValue > tonumber(ARGV[1]) then "
+ "redis.call('set', KEYS[1], ARGV[2]); "
+ "return 1;"
+ "end; "
+ "return 0;",
Collections.<Object>singletonList(getRawName()), greater, value);
}
public String toString() {
return Long.toString(get());
}
@Override
public int addListener(ObjectListener listener) {
if (listener instanceof IncrByListener) {
return addListener("__keyevent@*:incrby", (IncrByListener) listener, IncrByListener::onChange);
}
return super.addListener(listener);
}
@Override
public RFuture<Integer> addListenerAsync(ObjectListener listener) {
if (listener instanceof IncrByListener) {
return addListenerAsync("__keyevent@*:incrby", (IncrByListener) listener, IncrByListener::onChange);
}
return super.addListenerAsync(listener);
}
@Override
public void removeListener(int listenerId) {
removeListener(listenerId, "__keyevent@*:incrby");
super.removeListener(listenerId);
}
@Override
public RFuture<Void> removeListenerAsync(int listenerId) {
return removeListenerAsync(listenerId, "__keyevent@*:incrby");
}
}
| RedissonAtomicLong |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/method/MethodSecurityBeanDefinitionParser.java | {
"start": 19988,
"end": 21532
} | class ____
implements FactoryBean<AuthorizationManagerAfterMethodInterceptor> {
private SecurityContextHolderStrategy securityContextHolderStrategy = SecurityContextHolder
.getContextHolderStrategy();
private ObservationRegistry observationRegistry = ObservationRegistry.NOOP;
private final PostAuthorizeAuthorizationManager manager = new PostAuthorizeAuthorizationManager();
@Override
public AuthorizationManagerAfterMethodInterceptor getObject() {
AuthorizationManager<MethodInvocationResult> manager = this.manager;
if (!this.observationRegistry.isNoop()) {
manager = new ObservationAuthorizationManager<>(this.observationRegistry, this.manager);
}
AuthorizationManagerAfterMethodInterceptor interceptor = AuthorizationManagerAfterMethodInterceptor
.postAuthorize(manager);
interceptor.setSecurityContextHolderStrategy(this.securityContextHolderStrategy);
return interceptor;
}
@Override
public Class<?> getObjectType() {
return AuthorizationManagerAfterMethodInterceptor.class;
}
public void setSecurityContextHolderStrategy(SecurityContextHolderStrategy securityContextHolderStrategy) {
this.securityContextHolderStrategy = securityContextHolderStrategy;
}
public void setExpressionHandler(MethodSecurityExpressionHandler expressionHandler) {
this.manager.setExpressionHandler(expressionHandler);
}
public void setObservationRegistry(ObservationRegistry registry) {
this.observationRegistry = registry;
}
}
static | PostAuthorizeAuthorizationMethodInterceptor |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java | {
"start": 46457,
"end": 47555
} | class ____ parameter takes values from
* @param acceptedValues the set of values that the parameter can take
*/
public static <T extends Enum<T>> Parameter<T> restrictedEnumParam(
String name,
boolean updateable,
Function<FieldMapper, T> initializer,
T defaultValue,
Class<T> enumClass,
Set<T> acceptedValues
) {
return restrictedEnumParam(name, updateable, initializer, (Supplier<T>) () -> defaultValue, enumClass, acceptedValues);
}
/**
* Defines a parameter that takes one of a restricted set of values from an enumeration.
*
* @param name the parameter name
* @param updateable whether the parameter can be changed by a mapping update
* @param initializer a function that reads the parameter value from an existing mapper
* @param defaultValue a supplier for the default value, to be used if the parameter is undefined in a mapping
* @param enumClass the enumeration | the |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/UnalignedCheckpointRescaleITCase.java | {
"start": 32776,
"end": 32925
} | class ____ extends VerifyingSinkStateBase {
private final BitSet encounteredNumbers = new BitSet();
}
}
private static | State |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/primitive_array/PrimitiveArrayTest.java | {
"start": 1079,
"end": 2046
} | class ____ {
private static SqlSessionFactory sqlSessionFactory;
@BeforeAll
static void setUp() throws Exception {
// create an SqlSessionFactory
try (Reader reader = Resources
.getResourceAsReader("org/apache/ibatis/submitted/primitive_array/mybatis-config.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
// populate in-memory database
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/primitive_array/CreateDB.sql");
}
@Test
void shouldGetAUser() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user = mapper.getUser(1);
Assertions.assertEquals("User1", user.getName());
Assertions.assertEquals(2, user.getNum().length);
Assertions.assertEquals(100, user.getNum()[0]);
}
}
}
| PrimitiveArrayTest |
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/parallel/ResourceLocksProvider.java | {
"start": 2545,
"end": 2837
} | class ____ an analogous
* {@code @ResourceLock(value, mode)} declaration.
*
* @implNote The classes supplied as {@code enclosingInstanceTypes} may
* differ from the classes returned from invocations of
* {@link Class#getEnclosingClass()} — for example, when a nested test
* | with |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/datastreams/lifecycle/ErrorEntryTests.java | {
"start": 617,
"end": 1295
} | class ____ extends ESTestCase {
public void testIncrementRetryCount() {
long now = System.currentTimeMillis();
ErrorEntry existingRecord = new ErrorEntry(now, "error message", now, 0);
long newOccurenceTimestamp = now + 2L;
ErrorEntry newEntry = ErrorEntry.incrementRetryCount(existingRecord, () -> newOccurenceTimestamp);
assertThat(newEntry.firstOccurrenceTimestamp(), is(existingRecord.firstOccurrenceTimestamp()));
assertThat(newEntry.error(), is(existingRecord.error()));
assertThat(newEntry.recordedTimestamp(), is(newOccurenceTimestamp));
assertThat(newEntry.retryCount(), is(1));
}
}
| ErrorEntryTests |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/basic/UnversionedProperty.java | {
"start": 739,
"end": 1910
} | class ____ {
private Integer id1;
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
// Rev 1
scope.inTransaction( em -> {
UnversionedEntity ue1 = new UnversionedEntity( "a1", "b1" );
em.persist( ue1 );
id1 = ue1.getId();
} );
// Rev 2
scope.inTransaction( em -> {
UnversionedEntity ue1 = em.find( UnversionedEntity.class, id1 );
ue1.setData1( "a2" );
ue1.setData2( "b2" );
} );
}
@Test
public void testRevisionsCounts(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
assertEquals( Arrays.asList( 1, 2 ),
AuditReaderFactory.get( em ).getRevisions( UnversionedEntity.class, id1 ) );
} );
}
@Test
public void testHistoryOfId1(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
UnversionedEntity rev1 = new UnversionedEntity( id1, "a1", null );
UnversionedEntity rev2 = new UnversionedEntity( id1, "a2", null );
assertEquals( rev1, auditReader.find( UnversionedEntity.class, id1, 1 ) );
assertEquals( rev2, auditReader.find( UnversionedEntity.class, id1, 2 ) );
} );
}
}
| UnversionedProperty |
java | apache__camel | components/camel-avro-rpc/camel-avro-rpc-component/src/test/java/org/apache/camel/component/avro/AvroHttpProducerTest.java | {
"start": 1188,
"end": 2597
} | class ____ extends AvroProducerTestSupport {
@Override
protected void initializeServer() throws IOException {
if (server == null) {
server = new HttpServer(new SpecificResponder(KeyValueProtocol.PROTOCOL, keyValue), avroPort);
server.start();
}
if (serverReflection == null) {
serverReflection = new HttpServer(
new ReflectResponder(TestReflection.class, testReflection),
avroPortReflection);
serverReflection.start();
}
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
//In Only
from("direct:in")
.to("avro:http:localhost:" + avroPort
+ "?protocolClassName=org.apache.camel.avro.generated.KeyValueProtocol");
//In Only with message in route
from("direct:in-message-name")
.errorHandler(deadLetterChannel("mock:in-message-name-error"))
.to("avro:http:localhost:" + avroPort
+ "/put?protocolClassName=org.apache.camel.avro.generated.KeyValueProtocol")
.to("mock:result-in-message-name");
//In Only with existing | AvroHttpProducerTest |
java | netty__netty | codec-socks/src/main/java/io/netty/handler/codec/socksx/v5/DefaultSocks5PasswordAuthRequest.java | {
"start": 874,
"end": 2295
} | class ____ extends AbstractSocks5Message implements Socks5PasswordAuthRequest {
private final String username;
private final String password;
public DefaultSocks5PasswordAuthRequest(String username, String password) {
ObjectUtil.checkNotNull(username, "username");
ObjectUtil.checkNotNull(password, "password");
if (username.length() > 255) {
throw new IllegalArgumentException("username: **** (expected: less than 256 chars)");
}
if (password.length() > 255) {
throw new IllegalArgumentException("password: **** (expected: less than 256 chars)");
}
this.username = username;
this.password = password;
}
@Override
public String username() {
return username;
}
@Override
public String password() {
return password;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(StringUtil.simpleClassName(this));
DecoderResult decoderResult = decoderResult();
if (!decoderResult.isSuccess()) {
buf.append("(decoderResult: ");
buf.append(decoderResult);
buf.append(", username: ");
} else {
buf.append("(username: ");
}
buf.append(username());
buf.append(", password: ****)");
return buf.toString();
}
}
| DefaultSocks5PasswordAuthRequest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/any/discriminator/implicit/Order.java | {
"start": 830,
"end": 1956
} | class ____ {
@Id
public Integer id;
@Basic
public String name;
//tag::associations-any-implicit-discriminator-example[]
@Any
@AnyKeyJavaClass( Integer.class )
@JoinColumn(name = "implicit_fk")
@Column(name = "implicit_type")
public Payment paymentImplicit;
//end::associations-any-implicit-discriminator-example[]
//tag::associations-any-implicit-discriminator-full-example[]
@Any
@AnyKeyJavaClass( Integer.class )
@JoinColumn(name = "implicit_full_fk")
@Column(name = "implicit_full_type")
@AnyDiscriminatorImplicitValues(FULL_NAME)
public Payment paymentImplicitFullName;
//end::associations-any-implicit-discriminator-full-example[]
//tag::associations-any-implicit-discriminator-short-example[]
@Any
@AnyKeyJavaClass( Integer.class )
@JoinColumn(name = "implicit_short_fk")
@Column(name = "implicit_short_type")
@AnyDiscriminatorImplicitValues(SHORT_NAME)
public Payment paymentImplicitShortName;
//end::associations-any-implicit-discriminator-short-example[]
protected Order() {
// for Hibernate use
}
public Order(Integer id, String name) {
this.id = id;
this.name = name;
}
}
| Order |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/core/annotation/UniqueSecurityAnnotationScanner.java | {
"start": 2017,
"end": 2196
} | interface ____ annotated with
* `@PreAuthorize("hasRole('ADMIN')")` and `@PreAuthorize("hasRole('USER')")`
* respectively, it's not clear which of these should apply, and so this | is |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/HttpHeaders.java | {
"start": 1336,
"end": 18119
} | interface ____ extends Headers {
/**
* {@code "Accept"}.
*/
String ACCEPT = "Accept";
/**
* {@code "Accept-CH"}.
*/
String ACCEPT_CH = "Accept-CH";
/**
* {@code "Accept-CH"}.
*/
String ACCEPT_CH_LIFETIME = "Accept-CH-Lifetime";
/**
* {@code "Accept-Charset"}.
*/
String ACCEPT_CHARSET = "Accept-Charset";
/**
* {@code "Accept-Encoding"}.
*/
String ACCEPT_ENCODING = "Accept-Encoding";
/**
* {@code "Accept-Language"}.
*/
String ACCEPT_LANGUAGE = "Accept-Language";
/**
* {@code "Accept-Ranges"}.
*/
String ACCEPT_RANGES = "Accept-Ranges";
/**
* {@code "Accept-Patch"}.
*/
String ACCEPT_PATCH = "Accept-Patch";
/**
* {@code "Access-Control-Allow-Credentials"}.
*/
String ACCESS_CONTROL_ALLOW_CREDENTIALS = "Access-Control-Allow-Credentials";
/**
* {@code "Access-Control-Allow-Headers"}.
*/
String ACCESS_CONTROL_ALLOW_HEADERS = "Access-Control-Allow-Headers";
/**
* {@code "Access-Control-Allow-Methods"}.
*/
String ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods";
/**
* {@code "Access-Control-Allow-Origin"}.
*/
String ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin";
/**
* {@code "Access-Control-Allow-Private-Network"}.
* @see <a href="https://developer.chrome.com/blog/private-network-access-preflight">Private Network Access</a>
* @since 4.3.0
*/
String ACCESS_CONTROL_ALLOW_PRIVATE_NETWORK = "Access-Control-Allow-Private-Network";
/**
* {@code "Access-Control-Expose-Headers"}.
*/
String ACCESS_CONTROL_EXPOSE_HEADERS = "Access-Control-Expose-Headers";
/**
* {@code "Access-Control-Max-Age"}.
*/
String ACCESS_CONTROL_MAX_AGE = "Access-Control-Max-Age";
/**
* {@code "Access-Control-Request-Headers"}.
*/
String ACCESS_CONTROL_REQUEST_HEADERS = "Access-Control-Request-Headers";
/**
* {@code "Access-Control-Request-Method"}.
*/
String ACCESS_CONTROL_REQUEST_METHOD = "Access-Control-Request-Method";
/**
* {@code "Access-Control-Request-Private-Network"}.
* @see <a href="https://developer.chrome.com/blog/private-network-access-preflight">Private Network Access</a>
* @since 4.3.0
*/
String ACCESS_CONTROL_REQUEST_PRIVATE_NETWORK = "Access-Control-Request-Private-Network";
/**
* {@code "Age"}.
*/
String AGE = "Age";
/**
* {@code "Allow"}.
*/
String ALLOW = "Allow";
/**
* {@code "Authorization"}.
*/
String AUTHORIZATION = "Authorization";
/**
* {@code "Authorization"}.
*/
String AUTHORIZATION_INFO = "Authorization-Info";
/**
* {@code "Cache-Control"}.
*/
String CACHE_CONTROL = "Cache-Control";
/**
* {@code "Connection"}.
*/
String CONNECTION = "Connection";
/**
* {@code "Content-Base"}.
*/
String CONTENT_BASE = "Content-Base";
/**
* {@code "Content-Disposition"}.
*/
String CONTENT_DISPOSITION = "Content-Disposition";
/**
* {@code "Content-DPR"}.
*/
String CONTENT_DPR = "Content-DPR";
/**
* {@code "Content-Encoding"}.
*/
String CONTENT_ENCODING = "Content-Encoding";
/**
* {@code "Content-Language"}.
*/
String CONTENT_LANGUAGE = "Content-Language";
/**
* {@code "Content-Length"}.
*/
String CONTENT_LENGTH = "Content-Length";
/**
* {@code "Content-Location"}.
*/
String CONTENT_LOCATION = "Content-Location";
/**
* {@code "Content-Transfer-Encoding"}.
*/
String CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding";
/**
* {@code "Content-MD5"}.
*/
String CONTENT_MD5 = "Content-MD5";
/**
* {@code "Content-Range"}.
*/
String CONTENT_RANGE = "Content-Range";
/**
* {@code "Content-Type"}.
*/
String CONTENT_TYPE = "Content-Type";
/**
* {@code "Cookie"}.
*/
String COOKIE = "Cookie";
/**
* {@code "Cross-Origin-Resource-Policy"}.
*/
String CROSS_ORIGIN_RESOURCE_POLICY = "Cross-Origin-Resource-Policy";
/**
* {@code "Date"}.
*/
String DATE = "Date";
/**
* {@code "Device-Memory"}.
*/
String DEVICE_MEMORY = "Device-Memory";
/**
* {@code "Downlink"}.
*/
String DOWNLINK = "Downlink";
/**
* {@code "DPR"}.
*/
String DPR = "DPR";
/**
* {@code "ECT"}.
*/
String ECT = "ECT";
/**
* {@code "ETag"}.
*/
String ETAG = "ETag";
/**
* {@code "Expect"}.
*/
String EXPECT = "Expect";
/**
* {@code "Expires"}.
*/
String EXPIRES = "Expires";
/**
* {@code "Feature-Policy"}.
*/
String FEATURE_POLICY = "Feature-Policy";
/**
* {@code "Forwarded"}.
*/
String FORWARDED = "Forwarded";
/**
* {@code "From"}.
*/
String FROM = "From";
/**
* {@code "Host"}.
*/
String HOST = "Host";
/**
* {@code "If-Match"}.
*/
String IF_MATCH = "If-Match";
/**
* {@code "If-Modified-Since"}.
*/
String IF_MODIFIED_SINCE = "If-Modified-Since";
/**
* {@code "If-None-Match"}.
*/
String IF_NONE_MATCH = "If-None-Match";
/**
* {@code "If-Range"}.
*/
String IF_RANGE = "If-Range";
/**
* {@code "If-Unmodified-Since"}.
*/
String IF_UNMODIFIED_SINCE = "If-Unmodified-Since";
/**
* {@code "Last-Modified"}.
*/
String LAST_MODIFIED = "Last-Modified";
/**
* {@code "Link"}.
*/
String LINK = "Link";
/**
* {@code "Location"}.
*/
String LOCATION = "Location";
/**
* {@code "Max-Forwards"}.
*/
String MAX_FORWARDS = "Max-Forwards";
/**
* {@code "Origin"}.
*/
String ORIGIN = "Origin";
/**
* {@code "Pragma"}.
*/
String PRAGMA = "Pragma";
/**
* {@code "Proxy-Authenticate"}.
*/
String PROXY_AUTHENTICATE = "Proxy-Authenticate";
/**
* {@code "Proxy-Authorization"}.
*/
String PROXY_AUTHORIZATION = "Proxy-Authorization";
/**
* {@code "Range"}.
*/
String RANGE = "Range";
/**
* {@code "Referer"}.
*/
String REFERER = "Referer";
/**
* {@code "Referrer-Policy"}.
*/
String REFERRER_POLICY = "Referrer-Policy";
/**
* {@code "Retry-After"}.
*/
String RETRY_AFTER = "Retry-After";
/**
* {@code "RTT"}.
*/
String RTT = "RTT";
/**
* {@code "Save-Data"}.
*/
String SAVE_DATA = "Save-Data";
/**
* {@code "Sec-WebSocket-Key1"}.
*/
String SEC_WEBSOCKET_KEY1 = "Sec-WebSocket-Key1";
/**
* {@code "Sec-WebSocket-Key2"}.
*/
String SEC_WEBSOCKET_KEY2 = "Sec-WebSocket-Key2";
/**
* {@code "Sec-WebSocket-Location"}.
*/
String SEC_WEBSOCKET_LOCATION = "Sec-WebSocket-Location";
/**
* {@code "Sec-WebSocket-Origin"}.
*/
String SEC_WEBSOCKET_ORIGIN = "Sec-WebSocket-Origin";
/**
* {@code "Sec-WebSocket-Protocol"}.
*/
String SEC_WEBSOCKET_PROTOCOL = "Sec-WebSocket-Protocol";
/**
* {@code "Sec-WebSocket-Version"}.
*/
String SEC_WEBSOCKET_VERSION = "Sec-WebSocket-Version";
/**
* {@code "Sec-WebSocket-Key"}.
*/
String SEC_WEBSOCKET_KEY = "Sec-WebSocket-Key";
/**
* {@code "Sec-WebSocket-Accept"}.
*/
String SEC_WEBSOCKET_ACCEPT = "Sec-WebSocket-Accept";
/**
* {@code "Server"}.
*/
String SERVER = "Server";
/**
* {@code "Set-Cookie"}.
*/
String SET_COOKIE = "Set-Cookie";
/**
* {@code "Set-Cookie2"}.
*/
String SET_COOKIE2 = "Set-Cookie2";
/**
* {@code "Source-Map"}.
*/
String SOURCE_MAP = "SourceMap";
/**
* {@code "TE"}.
*/
String TE = "TE";
/**
* {@code "Trailer"}.
*/
String TRAILER = "Trailer";
/**
* {@code "Transfer-Encoding"}.
*/
String TRANSFER_ENCODING = "Transfer-Encoding";
/**
* {@code "Upgrade"}.
*/
String UPGRADE = "Upgrade";
/**
* {@code "User-Agent"}.
*/
String USER_AGENT = "User-Agent";
/**
* {@code "Vary"}.
*/
String VARY = "Vary";
/**
* {@code "Via"}.
*/
String VIA = "Via";
/**
* {@code "Viewport-Width"}.
*/
String VIEWPORT_WIDTH = "Viewport-Width";
/**
* {@code "Warning"}.
*/
String WARNING = "Warning";
/**
* {@code "WebSocket-Location"}.
*/
String WEBSOCKET_LOCATION = "WebSocket-Location";
/**
* {@code "WebSocket-Origin"}.
*/
String WEBSOCKET_ORIGIN = "WebSocket-Origin";
/**
* {@code "WebSocket-Protocol"}.
*/
String WEBSOCKET_PROTOCOL = "WebSocket-Protocol";
/**
* {@code "Width"}.
*/
String WIDTH = "Width";
/**
* {@code "WWW-Authenticate"}.
*/
String WWW_AUTHENTICATE = "WWW-Authenticate";
/**
* {@code "X-Auth-Token"}.
*/
String X_AUTH_TOKEN = "X-Auth-Token";
/**
* Unmodifiable List of every header constant defined in {@link HttpHeaders}.
*/
List<String> STANDARD_HEADERS = Collections.unmodifiableList(Arrays.asList(
ACCEPT,
ACCEPT,
ACCEPT_CH,
ACCEPT_CH_LIFETIME,
ACCEPT_CHARSET,
ACCEPT_ENCODING,
ACCEPT_LANGUAGE,
ACCEPT_RANGES,
ACCEPT_PATCH,
ACCESS_CONTROL_ALLOW_CREDENTIALS,
ACCESS_CONTROL_ALLOW_HEADERS,
ACCESS_CONTROL_ALLOW_METHODS,
ACCESS_CONTROL_ALLOW_ORIGIN,
ACCESS_CONTROL_EXPOSE_HEADERS,
ACCESS_CONTROL_MAX_AGE,
ACCESS_CONTROL_REQUEST_HEADERS,
ACCESS_CONTROL_REQUEST_METHOD,
ACCESS_CONTROL_REQUEST_PRIVATE_NETWORK,
AGE,
ALLOW,
AUTHORIZATION,
AUTHORIZATION_INFO,
CACHE_CONTROL,
CONNECTION,
CONTENT_BASE,
CONTENT_DISPOSITION,
CONTENT_DPR,
CONTENT_ENCODING,
CONTENT_LANGUAGE,
CONTENT_LENGTH,
CONTENT_LOCATION,
CONTENT_TRANSFER_ENCODING,
CONTENT_MD5,
CONTENT_RANGE,
CONTENT_TYPE,
COOKIE,
CROSS_ORIGIN_RESOURCE_POLICY,
DATE,
DEVICE_MEMORY,
DOWNLINK,
DPR,
ECT,
ETAG,
EXPECT,
EXPIRES,
FEATURE_POLICY,
FORWARDED,
FROM,
HOST,
IF_MATCH,
IF_MODIFIED_SINCE,
IF_NONE_MATCH,
IF_RANGE,
IF_UNMODIFIED_SINCE,
LAST_MODIFIED,
LINK,
LOCATION,
MAX_FORWARDS,
ORIGIN,
PRAGMA,
PROXY_AUTHENTICATE,
PROXY_AUTHORIZATION,
RANGE,
REFERER,
REFERRER_POLICY,
RETRY_AFTER,
RTT,
SAVE_DATA,
SEC_WEBSOCKET_KEY1,
SEC_WEBSOCKET_KEY2,
SEC_WEBSOCKET_LOCATION,
SEC_WEBSOCKET_ORIGIN,
SEC_WEBSOCKET_PROTOCOL,
SEC_WEBSOCKET_VERSION,
SEC_WEBSOCKET_KEY,
SEC_WEBSOCKET_ACCEPT,
SERVER,
SET_COOKIE,
SET_COOKIE2,
SOURCE_MAP,
TE,
TRAILER,
TRANSFER_ENCODING,
UPGRADE,
USER_AGENT,
VARY,
VIA,
VIEWPORT_WIDTH,
WARNING,
WEBSOCKET_LOCATION,
WEBSOCKET_ORIGIN,
WEBSOCKET_PROTOCOL,
WIDTH,
WWW_AUTHENTICATE,
X_AUTH_TOKEN
));
/**
* Whether the given key is contained within these values.
*
* @param name The key name
* @return True if it is
* @since 4.8.0
*/
default boolean contains(CharSequence name) {
return contains(name.toString());
}
/**
* Obtain the date header.
*
* @param name The header name
* @return The date header as a {@link ZonedDateTime} otherwise if it is not present or cannot be parsed
* {@link Optional#empty()}
*/
default Optional<ZonedDateTime> findDate(CharSequence name) {
try {
return findFirst(name).map(str -> {
LocalDateTime localDateTime = LocalDateTime.parse(str, DateTimeFormatter.RFC_1123_DATE_TIME);
return ZonedDateTime.of(localDateTime, ZoneId.of("GMT"));
}
);
} catch (DateTimeParseException e) {
return Optional.empty();
}
}
/**
* Obtain the date header.
*
* @param name The header name
* @return The date header as a {@link ZonedDateTime} otherwise if it is not present or cannot be parsed null
*/
default ZonedDateTime getDate(CharSequence name) {
return findDate(name).orElse(null);
}
/**
* Obtain an integer header.
*
* @param name The header name
* @return The date header as a {@link ZonedDateTime} otherwise if it is not present or cannot be parsed null
*/
default Integer getInt(CharSequence name) {
return findInt(name).orElse(null);
}
/**
* Find an integer header.
*
* @param name The name of the header
* @return An {@link Optional} of {@link Integer}
*/
default Optional<Integer> findInt(CharSequence name) {
return get(name, ConversionContext.INT);
}
/**
* Get the first value of the given header.
*
* @param name The header name
* @return The first value or null if it is present
*/
default Optional<String> findFirst(CharSequence name) {
return getFirst(name, ConversionContext.STRING);
}
/**
* The request or response content type.
*
* @return The content type
*/
default Optional<MediaType> contentType() {
return getFirst(HttpHeaders.CONTENT_TYPE, MediaType.CONVERSION_CONTEXT);
}
/**
* The request or response content type.
*
* @return The content type
*/
default OptionalLong contentLength() {
final Long aLong = getFirst(HttpHeaders.CONTENT_LENGTH, ConversionContext.LONG).orElse(null);
if (aLong != null) {
return OptionalLong.of(aLong);
} else {
return OptionalLong.empty();
}
}
/**
* A list of accepted {@link MediaType} instances.
*
* @return A list of zero or many {@link MediaType} instances
*/
default List<MediaType> accept() {
return MediaType.orderedOf(getAll(HttpHeaders.ACCEPT));
}
/**
* The {@code Accept-Charset} header, or {@code null} if unset.
*
* @return The {@code Accept-Charset} header
* @since 4.0.0
*/
@Nullable
default Charset acceptCharset() {
return findAcceptCharset().orElse(null);
}
/**
* The {@code Accept-Charset} header, or empty if unset.
*
* @return The {@code Accept-Charset} header
* @since 4.3.0
*/
default Optional<Charset> findAcceptCharset() {
return findFirst(HttpHeaders.ACCEPT_CHARSET)
.map(HttpHeadersUtil::parseAcceptCharset);
}
/**
* The {@code Accept-Language} header, or {@code null} if unset.
*
* @return The {@code Accept-Language} header
* @since 4.0.0
*/
@Nullable
default Locale acceptLanguage() {
return findAcceptLanguage().orElse(null);
}
/**
* The {@code Accept-Language} header, or empty if unset.
*
* @return The {@code Accept-Language} header
* @since 4.3.0
*/
default Optional<Locale> findAcceptLanguage() {
return findFirst(HttpHeaders.ACCEPT_LANGUAGE)
.map(text -> {
String part = HttpHeadersUtil.splitAcceptHeader(text);
return part == null ? Locale.getDefault() : Locale.forLanguageTag(part);
});
}
/**
* @return Whether the {@link HttpHeaders#CONNECTION} header is set to Keep-Alive
*/
default boolean isKeepAlive() {
return findFirst(CONNECTION)
.map(val -> val.equalsIgnoreCase(HttpHeaderValues.CONNECTION_KEEP_ALIVE)).orElse(false);
}
/**
* @return The {@link #ORIGIN} header
*/
default Optional<String> getOrigin() {
return findFirst(ORIGIN);
}
/**
* @return The {@link #AUTHORIZATION} header
*/
default Optional<String> getAuthorization() {
return findFirst(AUTHORIZATION);
}
/**
* @return The {@link #CONTENT_TYPE} header
*/
default Optional<String> getContentType() {
return findFirst(CONTENT_TYPE);
}
}
| HttpHeaders |
java | apache__spark | common/utils-java/src/main/java/org/apache/spark/api/java/function/FlatMapFunction.java | {
"start": 1019,
"end": 1118
} | interface ____<T, R> extends Serializable {
Iterator<R> call(T t) throws Exception;
}
| FlatMapFunction |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java | {
"start": 8951,
"end": 14121
} | class ____ extends ShardOperationFailedException implements ToXContentObject {
private static final String _INDEX = "_index";
private static final String _SHARD = "_shard";
private static final String _NODE = "_node";
private static final String REASON = "reason";
private static final String STATUS = "status";
private static final String PRIMARY = "primary";
private final ShardId shardId;
private final String nodeId;
private final boolean primary;
public Failure(StreamInput in) throws IOException {
shardId = new ShardId(in);
super.shardId = shardId.getId();
index = shardId.getIndexName();
nodeId = in.readOptionalString();
cause = in.readException();
status = RestStatus.readFrom(in);
primary = in.readBoolean();
}
public Failure(ShardId shardId, @Nullable String nodeId, Exception cause, RestStatus status, boolean primary) {
super(shardId.getIndexName(), shardId.getId(), ExceptionsHelper.stackTrace(cause), status, cause);
this.shardId = shardId;
this.nodeId = nodeId;
this.primary = primary;
}
public ShardId fullShardId() {
return shardId;
}
/**
* @return On what node the failure occurred.
*/
@Nullable
public String nodeId() {
return nodeId;
}
/**
* @return Whether this failure occurred on a primary shard.
* (this only reports true for delete by query)
*/
public boolean primary() {
return primary;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
shardId.writeTo(out);
out.writeOptionalString(nodeId);
out.writeException(cause);
RestStatus.writeTo(out, status);
out.writeBoolean(primary);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(_INDEX, shardId.getIndexName());
builder.field(_SHARD, shardId.id());
builder.field(_NODE, nodeId);
builder.field(REASON);
builder.startObject();
ElasticsearchException.generateThrowableXContent(builder, params, cause);
builder.endObject();
builder.field(STATUS, status);
builder.field(PRIMARY, primary);
builder.endObject();
return builder;
}
public static Failure fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser);
String shardIndex = null, nodeId = null;
int shardId = -1;
boolean primary = false;
RestStatus status = null;
ElasticsearchException reason = null;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (_INDEX.equals(currentFieldName)) {
shardIndex = parser.text();
} else if (_SHARD.equals(currentFieldName)) {
shardId = parser.intValue();
} else if (_NODE.equals(currentFieldName)) {
nodeId = parser.text();
} else if (STATUS.equals(currentFieldName)) {
status = RestStatus.valueOf(parser.text());
} else if (PRIMARY.equals(currentFieldName)) {
primary = parser.booleanValue();
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (REASON.equals(currentFieldName)) {
reason = ElasticsearchException.fromXContent(parser);
} else {
parser.skipChildren(); // skip potential inner objects for forward compatibility
}
} else if (token == XContentParser.Token.START_ARRAY) {
parser.skipChildren(); // skip potential inner arrays for forward compatibility
}
}
return new Failure(new ShardId(shardIndex, IndexMetadata.INDEX_UUID_NA_VALUE, shardId), nodeId, reason, status, primary);
}
}
}
}
| Failure |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/FactoryBeanTests.java | {
"start": 8114,
"end": 8290
} | class ____ {
private BeanImpl1 impl1;
public BeanImpl1 getImpl1() {
return impl1;
}
public void setImpl1(BeanImpl1 impl1) {
this.impl1 = impl1;
}
}
}
| BeanImpl2 |
java | mapstruct__mapstruct | processor/src/test/resources/fixtures/org/mapstruct/ap/test/value/enum2enum/OrderMapperImpl.java | {
"start": 535,
"end": 1647
} | class ____ implements OrderMapper {
@Override
public OrderDto orderEntityToDto(OrderEntity order) {
if ( order == null ) {
return null;
}
OrderDto orderDto = new OrderDto();
orderDto.setOrderType( orderTypeToExternalOrderType( order.getOrderType() ) );
return orderDto;
}
@Override
public ExternalOrderType orderTypeToExternalOrderType(OrderType orderType) {
if ( orderType == null ) {
return null;
}
ExternalOrderType externalOrderType;
switch ( orderType ) {
case EXTRA: externalOrderType = ExternalOrderType.SPECIAL;
break;
case STANDARD: externalOrderType = ExternalOrderType.DEFAULT;
break;
case NORMAL: externalOrderType = ExternalOrderType.DEFAULT;
break;
case RETAIL: externalOrderType = ExternalOrderType.RETAIL;
break;
case B2B: externalOrderType = ExternalOrderType.B2B;
break;
default: throw new IllegalArgumentException( "Unexpected | OrderMapperImpl |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/FailoverRoundRobinGoodBadTest.java | {
"start": 973,
"end": 1895
} | class ____ extends ContextTestSupport {
@Test
public void testFailoverRoundRobin() throws Exception {
getMockEndpoint("mock:good").expectedBodiesReceived("Hello World", "Bye World");
getMockEndpoint("mock:bad").expectedBodiesReceived("Bye World");
template.sendBody("direct:start", "Hello World");
template.sendBody("direct:start", "Bye World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").loadBalance().failover(1, true, true).to("direct:good", "direct:bad");
from("direct:good").to("mock:good");
from("direct:bad").to("mock:bad").throwException(new IllegalArgumentException("Damn"));
}
};
}
}
| FailoverRoundRobinGoodBadTest |
java | dropwizard__dropwizard | dropwizard-jersey/src/test/java/io/dropwizard/other/RestInterface.java | {
"start": 884,
"end": 1116
} | interface ____ {
@POST
@Path("repr")
@Valid
ValidRepresentation repr(@NotNull @Valid ValidRepresentation representation,
@NotNull @QueryParam("interfaceVariable") String xer);
}
| RestInterface |
java | dropwizard__dropwizard | dropwizard-jersey/src/main/java/io/dropwizard/jersey/validation/JerseyViolationExceptionMapper.java | {
"start": 411,
"end": 1413
} | class ____ implements ExceptionMapper<JerseyViolationException> {
private static final Logger LOGGER = LoggerFactory.getLogger(JerseyViolationExceptionMapper.class);
@Override
public Response toResponse(final JerseyViolationException exception) {
// Provide a way to log if desired, Issue #2128, PR #2129
LOGGER.debug("Object validation failure", exception);
final Set<ConstraintViolation<?>> violations = exception.getConstraintViolations();
final Invocable invocable = exception.getInvocable();
final List<String> errors = exception.getConstraintViolations().stream()
.map(violation -> ConstraintMessage.getMessage(violation, invocable))
.collect(Collectors.toList());
final int status = ConstraintMessage.determineStatus(violations, invocable);
return Response.status(status)
.entity(new ValidationErrorMessage(errors))
.build();
}
}
| JerseyViolationExceptionMapper |
java | apache__rocketmq | namesrv/src/test/java/org/apache/rocketmq/namesrv/processor/RequestProcessorTest.java | {
"start": 2940,
"end": 33603
} | class ____ {
private DefaultRequestProcessor defaultRequestProcessor;
private ClientRequestProcessor clientRequestProcessor;
private NamesrvController namesrvController;
private NamesrvConfig namesrvConfig;
private NettyServerConfig nettyServerConfig;
private RouteInfoManager routeInfoManager;
private Logger logger;
@Before
public void init() throws Exception {
namesrvConfig = new NamesrvConfig();
namesrvConfig.setEnableAllTopicList(true);
nettyServerConfig = new NettyServerConfig();
routeInfoManager = new RouteInfoManager(new NamesrvConfig(), null);
namesrvController = new NamesrvController(namesrvConfig, nettyServerConfig);
Field field = NamesrvController.class.getDeclaredField("routeInfoManager");
field.setAccessible(true);
field.set(namesrvController, routeInfoManager);
defaultRequestProcessor = new DefaultRequestProcessor(namesrvController);
clientRequestProcessor = new ClientRequestProcessor(namesrvController);
registerRouteInfoManager();
logger = mock(Logger.class);
setFinalStatic(DefaultRequestProcessor.class.getDeclaredField("log"), logger);
}
@Test
public void testProcessRequest_PutKVConfig() throws RemotingCommandException {
PutKVConfigRequestHeader header = new PutKVConfigRequestHeader();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.PUT_KV_CONFIG,
header);
request.addExtField("namespace", "namespace");
request.addExtField("key", "key");
request.addExtField("value", "value");
RemotingCommand response = defaultRequestProcessor.processRequest(null, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(response.getRemark()).isNull();
assertThat(namesrvController.getKvConfigManager().getKVConfig("namespace", "key"))
.isEqualTo("value");
}
@Test
public void testProcessRequest_GetKVConfigReturnNotNull() throws RemotingCommandException {
namesrvController.getKvConfigManager().putKVConfig("namespace", "key", "value");
GetKVConfigRequestHeader header = new GetKVConfigRequestHeader();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_KV_CONFIG,
header);
request.addExtField("namespace", "namespace");
request.addExtField("key", "key");
RemotingCommand response = defaultRequestProcessor.processRequest(null, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(response.getRemark()).isNull();
GetKVConfigResponseHeader responseHeader = (GetKVConfigResponseHeader) response
.readCustomHeader();
assertThat(responseHeader.getValue()).isEqualTo("value");
}
@Test
public void testProcessRequest_GetKVConfigReturnNull() throws RemotingCommandException {
GetKVConfigRequestHeader header = new GetKVConfigRequestHeader();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_KV_CONFIG,
header);
request.addExtField("namespace", "namespace");
request.addExtField("key", "key");
RemotingCommand response = defaultRequestProcessor.processRequest(null, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.QUERY_NOT_FOUND);
assertThat(response.getRemark()).isEqualTo("No config item, Namespace: namespace Key: key");
GetKVConfigResponseHeader responseHeader = (GetKVConfigResponseHeader) response
.readCustomHeader();
assertThat(responseHeader.getValue()).isNull();
}
@Test
public void testProcessRequest_DeleteKVConfig() throws RemotingCommandException {
namesrvController.getKvConfigManager().putKVConfig("namespace", "key", "value");
DeleteKVConfigRequestHeader header = new DeleteKVConfigRequestHeader();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.DELETE_KV_CONFIG,
header);
request.addExtField("namespace", "namespace");
request.addExtField("key", "key");
RemotingCommand response = defaultRequestProcessor.processRequest(null, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(response.getRemark()).isNull();
assertThat(namesrvController.getKvConfigManager().getKVConfig("namespace", "key"))
.isNull();
}
@Test
public void testProcessRequest_UnSupportedRequest() throws RemotingCommandException {
final RemotingCommand unSupportedRequest = RemotingCommand.createRequestCommand(99999, null);
final RemotingCommand response = defaultRequestProcessor.processRequest(null, unSupportedRequest);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.REQUEST_CODE_NOT_SUPPORTED);
}
@Test
public void testProcessRequest_UpdateConfigPath() throws RemotingCommandException {
final RemotingCommand updateConfigRequest = RemotingCommand.createRequestCommand(RequestCode.UPDATE_NAMESRV_CONFIG, null);
Properties properties = new Properties();
// Update allowed value
properties.setProperty("enableTopicList", "true");
updateConfigRequest.setBody(MixAll.properties2String(properties).getBytes(StandardCharsets.UTF_8));
RemotingCommand response = defaultRequestProcessor.processRequest(null, updateConfigRequest);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
//update disallowed value
properties.clear();
properties.setProperty("configStorePath", "test/path");
updateConfigRequest.setBody(MixAll.properties2String(properties).getBytes(StandardCharsets.UTF_8));
response = defaultRequestProcessor.processRequest(null, updateConfigRequest);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.NO_PERMISSION);
assertThat(response.getRemark()).contains("Can not update config in black list.");
//update disallowed values
properties.clear();
properties.setProperty("kvConfigPath", "test/path");
updateConfigRequest.setBody(MixAll.properties2String(properties).getBytes(StandardCharsets.UTF_8));
response = defaultRequestProcessor.processRequest(null, updateConfigRequest);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.NO_PERMISSION);
assertThat(response.getRemark()).contains("Can not update config in black list");
//update disallowed values
properties.clear();
properties.setProperty("configBlackList", "test;path");
updateConfigRequest.setBody(MixAll.properties2String(properties).getBytes(StandardCharsets.UTF_8));
response = defaultRequestProcessor.processRequest(null, updateConfigRequest);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.NO_PERMISSION);
assertThat(response.getRemark()).contains("Can not update config in black list");
}
@Test
public void testProcessRequest_RegisterBroker() throws RemotingCommandException,
NoSuchFieldException, IllegalAccessException {
RemotingCommand request = genSampleRegisterCmd(true);
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand response = defaultRequestProcessor.processRequest(ctx, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(response.getRemark()).isNull();
RouteInfoManager routes = namesrvController.getRouteInfoManager();
Field brokerAddrTable = RouteInfoManager.class.getDeclaredField("brokerAddrTable");
brokerAddrTable.setAccessible(true);
BrokerData broker = new BrokerData();
broker.setBrokerName("broker");
broker.setBrokerAddrs((HashMap) Maps.newHashMap(new Long(2333), "10.10.1.1"));
assertThat((Map) brokerAddrTable.get(routes))
.contains(new HashMap.SimpleEntry("broker", broker));
}
/*@Test
public void testProcessRequest_RegisterBrokerLogicalQueue() throws Exception {
String cluster = "cluster";
String broker1Name = "broker1";
String broker1Addr = "10.10.1.1";
String broker2Name = "broker2";
String broker2Addr = "10.10.1.2";
String topic = "foobar";
LogicalQueueRouteData queueRouteData1 = new LogicalQueueRouteData(0, 0, new MessageQueue(topic, broker1Name, 0), MessageQueueRouteState.ReadOnly, 0, 10, 100, 100, broker1Addr);
{
RegisterBrokerRequestHeader header = new RegisterBrokerRequestHeader();
header.setBrokerName(broker1Name);
RemotingCommand request = RemotingCommand.createRequestCommand(
RequestCode.REGISTER_BROKER, header);
request.addExtField("brokerName", broker1Name);
request.addExtField("brokerAddr", broker1Addr);
request.addExtField("clusterName", cluster);
request.addExtField("haServerAddr", "10.10.2.1");
request.addExtField("brokerId", String.valueOf(MixAll.MASTER_ID));
request.setVersion(MQVersion.CURRENT_VERSION);
TopicConfigSerializeWrapper topicConfigSerializeWrapper = new TopicConfigSerializeWrapper();
topicConfigSerializeWrapper.setTopicConfigTable(new ConcurrentHashMap<>(Collections.singletonMap(topic, new TopicConfig(topic))));
topicConfigSerializeWrapper.setLogicalQueuesInfoMap(Maps.newHashMap(topic, new LogicalQueuesInfo(Collections.singletonMap(0, Lists.newArrayList(
queueRouteData1
)))));
topicConfigSerializeWrapper.setDataVersion(new DataVersion());
RegisterBrokerBody requestBody = new RegisterBrokerBody();
requestBody.setTopicConfigSerializeWrapper(topicConfigSerializeWrapper);
requestBody.setFilterServerList(Lists.<String>newArrayList());
request.setBody(requestBody.encode());
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand response = defaultRequestProcessor.processRequest(ctx, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(response.getRemark()).isNull();
}
LogicalQueueRouteData queueRouteData2 = new LogicalQueueRouteData(0, 100, new MessageQueue(topic, broker2Name, 0), MessageQueueRouteState.Normal, 0, -1, -1, -1, broker2Addr);
LogicalQueueRouteData queueRouteData3 = new LogicalQueueRouteData(1, 100, new MessageQueue(topic, broker2Name, 0), MessageQueueRouteState.Normal, 0, -1, -1, -1, broker2Addr);
{
RegisterBrokerRequestHeader header = new RegisterBrokerRequestHeader();
header.setBrokerName(broker2Name);
RemotingCommand request = RemotingCommand.createRequestCommand(
RequestCode.REGISTER_BROKER, header);
request.addExtField("brokerName", broker2Name);
request.addExtField("brokerAddr", broker2Addr);
request.addExtField("clusterName", cluster);
request.addExtField("haServerAddr", "10.10.2.1");
request.addExtField("brokerId", String.valueOf(MixAll.MASTER_ID));
request.setVersion(MQVersion.CURRENT_VERSION);
TopicConfigSerializeWrapper topicConfigSerializeWrapper = new TopicConfigSerializeWrapper();
topicConfigSerializeWrapper.setTopicConfigTable(new ConcurrentHashMap<>(Collections.singletonMap(topic, new TopicConfig(topic))));
topicConfigSerializeWrapper.setLogicalQueuesInfoMap(Maps.newHashMap(topic, new LogicalQueuesInfo(ImmutableMap.of(
0, Collections.singletonList(queueRouteData2),
1, Collections.singletonList(queueRouteData3)
))));
topicConfigSerializeWrapper.setDataVersion(new DataVersion());
RegisterBrokerBody requestBody = new RegisterBrokerBody();
requestBody.setTopicConfigSerializeWrapper(topicConfigSerializeWrapper);
requestBody.setFilterServerList(Lists.<String>newArrayList());
request.setBody(requestBody.encode());
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand response = defaultRequestProcessor.processRequest(ctx, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(response.getRemark()).isNull();
}
{
GetRouteInfoRequestHeader header = new GetRouteInfoRequestHeader();
header.setTopic(topic);
header.setSysFlag(MessageSysFlag.LOGICAL_QUEUE_FLAG);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_ROUTEINFO_BY_TOPIC, header);
request.makeCustomHeaderToNet();
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand response = defaultRequestProcessor.processRequest(ctx, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
TopicRouteDataNameSrv topicRouteDataNameSrv = JSON.parseObject(response.getBody(), TopicRouteDataNameSrv.class);
assertThat(topicRouteDataNameSrv).isNotNull();
LogicalQueuesInfoUnordered logicalQueuesInfoUnordered = new LogicalQueuesInfoUnordered();
logicalQueuesInfoUnordered.put(0, ImmutableMap.of(
new LogicalQueuesInfoUnordered.Key(queueRouteData1.getBrokerName(), queueRouteData1.getQueueId(), queueRouteData1.getOffsetDelta()), queueRouteData1,
new LogicalQueuesInfoUnordered.Key(queueRouteData2.getBrokerName(), queueRouteData2.getQueueId(), queueRouteData2.getOffsetDelta()), queueRouteData2
));
logicalQueuesInfoUnordered.put(1, ImmutableMap.of(new LogicalQueuesInfoUnordered.Key(queueRouteData3.getBrokerName(), queueRouteData3.getQueueId(), queueRouteData3.getOffsetDelta()), queueRouteData3));
assertThat(topicRouteDataNameSrv.getLogicalQueuesInfoUnordered()).isEqualTo(logicalQueuesInfoUnordered);
}
}
*/
@Test
public void testProcessRequest_RegisterBrokerWithFilterServer() throws RemotingCommandException,
NoSuchFieldException, IllegalAccessException {
RemotingCommand request = genSampleRegisterCmd(true);
// version >= MQVersion.Version.V3_0_11.ordinal() to register with filter server
request.setVersion(100);
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand response = defaultRequestProcessor.processRequest(ctx, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(response.getRemark()).isNull();
RouteInfoManager routes = namesrvController.getRouteInfoManager();
Field brokerAddrTable = RouteInfoManager.class.getDeclaredField("brokerAddrTable");
brokerAddrTable.setAccessible(true);
BrokerData broker = new BrokerData();
broker.setBrokerName("broker");
broker.setBrokerAddrs((HashMap) Maps.newHashMap(new Long(2333), "10.10.1.1"));
assertThat((Map) brokerAddrTable.get(routes))
.contains(new HashMap.SimpleEntry("broker", broker));
}
@Test
public void testProcessRequest_UnregisterBroker() throws RemotingCommandException, NoSuchFieldException, IllegalAccessException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
//Register broker
RemotingCommand regRequest = genSampleRegisterCmd(true);
defaultRequestProcessor.processRequest(ctx, regRequest);
//Unregister broker
RemotingCommand unregRequest = genSampleRegisterCmd(false);
RemotingCommand unregResponse = defaultRequestProcessor.processRequest(ctx, unregRequest);
assertThat(unregResponse.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(unregResponse.getRemark()).isNull();
RouteInfoManager routes = namesrvController.getRouteInfoManager();
Field brokerAddrTable = RouteInfoManager.class.getDeclaredField("brokerAddrTable");
brokerAddrTable.setAccessible(true);
assertThat((Map) brokerAddrTable.get(routes)).isNotEmpty();
}
@Test
public void testGetAllTopicList() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
Channel channel = mock(Channel.class);
when(channel.remoteAddress()).thenReturn(null);
when(ctx.channel()).thenReturn(channel);
namesrvController.getNamesrvConfig().setEnableAllTopicList(true);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_ALL_TOPIC_LIST_FROM_NAMESERVER, null);
RemotingCommand response = defaultRequestProcessor.processRequest(ctx, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(response.getRemark()).isNull();
namesrvController.getNamesrvConfig().setEnableAllTopicList(false);
response = defaultRequestProcessor.processRequest(ctx, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR);
}
@Test
public void testGetRouteInfoByTopic() throws Exception {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.GET_ROUTEINFO_BY_TOPIC);
RemotingCommand remotingCommandSuccess = clientRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommandSuccess.getCode()).isEqualTo(ResponseCode.SUCCESS);
request.getExtFields().put("topic", "test");
RemotingCommand remotingCommandNoTopicRouteInfo = clientRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommandNoTopicRouteInfo.getCode()).isEqualTo(ResponseCode.TOPIC_NOT_EXIST);
}
@Test
public void testGetBrokerClusterInfo() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.GET_BROKER_CLUSTER_INFO);
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testQueryDataVersion()throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.QUERY_DATA_VERSION);
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testGetBrokerMemberBroker() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.GET_BROKER_MEMBER_GROUP);
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testBrokerHeartBeat() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.BROKER_HEARTBEAT);
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testAddWritePermOfBroker() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.ADD_WRITE_PERM_OF_BROKER);
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testWipeWritePermOfBroker() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.WIPE_WRITE_PERM_OF_BROKER);
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testGetAllTopicListFromNameserver() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(mock(Channel.class));
when(ctx.channel().remoteAddress()).thenReturn(new InetSocketAddress(123));
RemotingCommand request = getRemotingCommand(RequestCode.GET_ALL_TOPIC_LIST_FROM_NAMESERVER);
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testDeleteTopicInNamesrv() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.DELETE_TOPIC_IN_NAMESRV);
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testGetKVListByNamespace() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.GET_KVLIST_BY_NAMESPACE);
request.addExtField("namespace", "default-namespace-1");
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.QUERY_NOT_FOUND);
namesrvController.getKvConfigManager().putKVConfig("default-namespace-1", "key", "value");
RemotingCommand remotingCommandSuccess = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommandSuccess.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testGetTopicsByCluster() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.GET_TOPICS_BY_CLUSTER);
request.addExtField("cluster", "default-cluster");
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testGetSystemTopicListFromNs() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.GET_SYSTEM_TOPIC_LIST_FROM_NS);
request.addExtField("cluster", "default-cluster");
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testGetUnitTopicList() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.GET_UNIT_TOPIC_LIST);
request.addExtField("cluster", "default-cluster");
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testGetHasUnitSubTopicList() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.GET_HAS_UNIT_SUB_TOPIC_LIST);
request.addExtField("cluster", "default-cluster");
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testGetHasUnitSubUnUnitTopicList() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.GET_HAS_UNIT_SUB_UNUNIT_TOPIC_LIST);
request.addExtField("cluster", "default-cluster");
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testUpdateConfig() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.UPDATE_NAMESRV_CONFIG);
request.addExtField("cluster", "default-cluster");
Map<String, String> propertiesMap = new HashMap<>();
propertiesMap.put("key", "value");
request.setBody(propertiesMap.toString().getBytes());
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
@Test
public void testGetConfig() throws RemotingCommandException {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
when(ctx.channel()).thenReturn(null);
RemotingCommand request = getRemotingCommand(RequestCode.GET_NAMESRV_CONFIG);
request.addExtField("cluster", "default-cluster");
RemotingCommand remotingCommand = defaultRequestProcessor.processRequest(ctx, request);
assertThat(remotingCommand.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
private RemotingCommand getRemotingCommand(int code) {
RegisterBrokerRequestHeader header = new RegisterBrokerRequestHeader();
header.setBrokerName("broker");
RemotingCommand request = RemotingCommand.createRequestCommand(code, header);
request.addExtField("brokerName", "broker");
request.addExtField("brokerAddr", "10.10.1.1");
request.addExtField("clusterName", "cluster");
request.addExtField("haServerAddr", "10.10.2.1");
request.addExtField("brokerId", "2333");
request.addExtField("topic", "unit-test0");
return request;
}
private static RemotingCommand genSampleRegisterCmd(boolean reg) {
RegisterBrokerRequestHeader header = new RegisterBrokerRequestHeader();
byte[] body = null;
if (reg) {
TopicConfigAndMappingSerializeWrapper topicConfigWrapper = new TopicConfigAndMappingSerializeWrapper();
topicConfigWrapper.getTopicConfigTable().put("unit-test1", new TopicConfig());
topicConfigWrapper.getTopicConfigTable().put("unit-test2", new TopicConfig());
RegisterBrokerBody requestBody = new RegisterBrokerBody();
requestBody.setTopicConfigSerializeWrapper(topicConfigWrapper);
body = requestBody.encode(false);
final int bodyCrc32 = UtilAll.crc32(body);
header.setBodyCrc32(bodyCrc32);
}
header.setBrokerName("broker");
RemotingCommand request = RemotingCommand.createRequestCommand(
reg ? RequestCode.REGISTER_BROKER : RequestCode.UNREGISTER_BROKER, header);
request.addExtField("brokerName", "broker");
request.addExtField("brokerAddr", "10.10.1.1");
request.addExtField("clusterName", "cluster");
request.addExtField("haServerAddr", "10.10.2.1");
request.addExtField("brokerId", "2333");
request.setBody(body);
return request;
}
private static void setFinalStatic(Field field, Object newValue) throws Exception {
field.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.set(null, newValue);
}
private void registerRouteInfoManager() {
TopicConfigSerializeWrapper topicConfigSerializeWrapper = new TopicConfigSerializeWrapper();
ConcurrentHashMap<String, TopicConfig> topicConfigConcurrentHashMap = new ConcurrentHashMap<>();
for (int i = 0; i < 2; i++) {
TopicConfig topicConfig = new TopicConfig();
topicConfig.setWriteQueueNums(8);
topicConfig.setTopicName("unit-test" + i);
topicConfig.setPerm(6);
topicConfig.setReadQueueNums(8);
topicConfig.setOrder(false);
topicConfigConcurrentHashMap.put(topicConfig.getTopicName(), topicConfig);
}
topicConfigSerializeWrapper.setTopicConfigTable(topicConfigConcurrentHashMap);
Channel channel = mock(Channel.class);
RegisterBrokerResult registerBrokerResult = routeInfoManager.registerBroker("default-cluster", "127.0.0.1:10911", "default-broker", 1234, "127.0.0.1:1001", "",
null, topicConfigSerializeWrapper, new ArrayList<>(), channel);
}
}
| RequestProcessorTest |
java | netty__netty | codec-compression/src/main/java/io/netty/handler/codec/compression/ZlibUtil.java | {
"start": 869,
"end": 2367
} | class ____ {
static void fail(Inflater z, String message, int resultCode) {
throw inflaterException(z, message, resultCode);
}
static void fail(Deflater z, String message, int resultCode) {
throw deflaterException(z, message, resultCode);
}
static DecompressionException inflaterException(Inflater z, String message, int resultCode) {
return new DecompressionException(message + " (" + resultCode + ')' + (z.msg != null? ": " + z.msg : ""));
}
static CompressionException deflaterException(Deflater z, String message, int resultCode) {
return new CompressionException(message + " (" + resultCode + ')' + (z.msg != null? ": " + z.msg : ""));
}
static JZlib.WrapperType convertWrapperType(ZlibWrapper wrapper) {
switch (wrapper) {
case NONE:
return JZlib.W_NONE;
case ZLIB:
return JZlib.W_ZLIB;
case GZIP:
return JZlib.W_GZIP;
case ZLIB_OR_NONE:
return JZlib.W_ANY;
default:
throw new Error("Unexpected wrapper type: " + wrapper);
}
}
static int wrapperOverhead(ZlibWrapper wrapper) {
switch (wrapper) {
case NONE:
return 0;
case ZLIB:
case ZLIB_OR_NONE:
return 2;
case GZIP:
return 10;
default:
throw new Error("Unexpected wrapper type: " + wrapper);
}
}
private ZlibUtil() {
}
}
| ZlibUtil |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/inject/annotation/EnvironmentAnnotationMetadata.java | {
"start": 3290,
"end": 3466
} | class ____ and optionally map its value.
* @param annotation The annotation
* @param member The member
* @param valueMapper The value mapper
* @return The | value |
java | quarkusio__quarkus | integration-tests/jaxb/src/main/java/io/quarkus/it/jaxb/Response.java | {
"start": 1369,
"end": 1983
} | class ____
extends ExtensionOfBaseObj {
@XmlElement(required = true)
protected String evenMoreZeep;
/**
* Gets the value of the evenMoreZeep property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getEvenMoreZeep() {
return evenMoreZeep;
}
/**
* Sets the value of the evenMoreZeep property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setEvenMoreZeep(String value) {
this.evenMoreZeep = value;
}
}
| Response |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/hql/Mammal.java | {
"start": 288,
"end": 1180
} | class ____ extends Animal {
private boolean pregnant;
private Date birthdate;
public boolean isPregnant() {
return pregnant;
}
public void setPregnant(boolean pregnant) {
this.pregnant = pregnant;
}
public Date getBirthdate() {
return birthdate;
}
public void setBirthdate(Date birthdate) {
this.birthdate = birthdate;
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !( o instanceof Mammal ) ) {
return false;
}
Mammal mammal = ( Mammal ) o;
if ( pregnant != mammal.pregnant ) {
return false;
}
if ( birthdate != null ? !birthdate.equals( mammal.birthdate ) : mammal.birthdate != null ) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = ( pregnant ? 1 : 0 );
result = 31 * result + ( birthdate != null ? birthdate.hashCode() : 0 );
return result;
}
}
| Mammal |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/controller/QuorumController.java | {
"start": 9371,
"end": 9865
} | class ____ implements Controller {
/**
* The default maximum records that the controller will write in a single batch.
*/
private static final int DEFAULT_MAX_RECORDS_PER_BATCH = 10000;
/**
* The maximum records any user-initiated operation is allowed to generate.
*
* For now, this is set to the maximum records in a single batch.
*/
static final int MAX_RECORDS_PER_USER_OP = DEFAULT_MAX_RECORDS_PER_BATCH;
/**
* A builder | QuorumController |
java | apache__camel | components/camel-aws/camel-aws-xray/src/main/java/org/apache/camel/component/aws/xray/decorators/MongoDBSegmentDecorator.java | {
"start": 1002,
"end": 2004
} | class ____ extends AbstractSegmentDecorator {
@Override
public String getComponent() {
return "mongodb";
}
@Override
public String getOperationName(Exchange exchange, Endpoint endpoint) {
Map<String, String> queryParameters = toQueryParameters(endpoint.getEndpointUri());
String opName = queryParameters.get("operation");
return opName != null ? opName : super.getOperationName(exchange, endpoint);
}
@Override
public void pre(Entity segment, Exchange exchange, Endpoint endpoint) {
super.pre(segment, exchange, endpoint);
segment.putMetadata("db.type", getComponent());
Map<String, String> queryParameters = toQueryParameters(endpoint.getEndpointUri());
String database = queryParameters.get("database");
if (null != database) {
segment.putMetadata("db.instance", database);
}
segment.putSql("db.statement", queryParameters.toString());
}
}
| MongoDBSegmentDecorator |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging/deployment/src/test/java/io/quarkus/smallrye/reactivemessaging/wiring/ConnectorAttachmentIncomingTest.java | {
"start": 1929,
"end": 2203
} | class ____ {
private final List<Integer> items = new CopyOnWriteArrayList<>();
@Incoming("my-sink")
public void sink(int l) {
items.add(l);
}
public List<Integer> items() {
return items;
}
}
}
| MySink |
java | apache__camel | components/camel-cassandraql/src/test/java/org/apache/camel/component/cassandra/integration/BaseCassandra.java | {
"start": 1964,
"end": 4625
} | class ____ implements ConfigurableRoute, CamelTestSupportHelper {
@Order(1)
@RegisterExtension
public static CassandraService service = CassandraServiceFactory.createLocalService("initScript.cql");
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new DefaultCamelContextExtension();
public static final String KEYSPACE_NAME = "camel_ks";
public static final String DATACENTER_NAME = "datacenter1";
protected CamelContext context = camelContextExtension.getContext();
private CqlSession session;
@BeforeEach
public void executeScript() throws Exception {
executeScript("BasicDataSet.cql");
}
public void executeScript(String pathToScript) throws IOException {
String s = IOHelper.stripLineComments(Paths.get("src/test/resources/" + pathToScript), "--", true);
String[] statements = s.split(";");
for (int i = 0; i < statements.length; i++) {
if (!statements[i].isBlank()) {
executeCql(statements[i]);
}
}
}
public void executeCql(String cql) {
getSession().execute(cql);
}
@AfterEach
protected void doPostTearDown() throws Exception {
try {
if (session != null) {
session.close();
session = null;
}
} catch (Exception e) {
// ignored
}
}
public CqlSession getSession() {
if (session == null) {
InetSocketAddress endpoint
= new InetSocketAddress(service.getCassandraHost(), service.getCQL3Port());
//create a new session
session = CqlSession.builder()
.withLocalDatacenter(DATACENTER_NAME)
.withKeyspace(KEYSPACE_NAME)
.withConfigLoader(DriverConfigLoader.programmaticBuilder()
.withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(5)).build())
.addContactPoint(endpoint).build();
}
return session;
}
public String getUrl() {
return service.getCQL3Endpoint();
}
protected abstract RouteBuilder createRouteBuilder();
@Override
@RouteFixture
public void createRouteBuilder(CamelContext context) throws Exception {
final RouteBuilder routeBuilder = createRouteBuilder();
if (routeBuilder != null) {
context.addRoutes(routeBuilder);
}
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
}
| BaseCassandra |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/EmbeddableReusedColumnInheritanceTest.java | {
"start": 1352,
"end": 3116
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final SolidFood solid = new SolidFood();
solid.setId( 1L );
solid.setWeight( 42.0D );
session.persist( solid );
final LiquidFood liquid = new LiquidFood();
liquid.setId( 2L );
liquid.setWeightAndVolume( new WeightAndVolume( 1.0D, 2.0D ) );
session.persist( liquid );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> session.createMutationQuery( "delete from Food" ).executeUpdate() );
}
@Test
public void testSolidFood(SessionFactoryScope scope) {
scope.inSession( s -> {
final Food food = s.getReference( Food.class, 1L );
assertThat( Hibernate.isInitialized( food ) ).isFalse();
final Object unproxied = Hibernate.unproxy( food );
assertThat( unproxied ).isInstanceOf( SolidFood.class );
assertThat( ( (SolidFood) unproxied ).getWeight() ).isEqualTo( 42.0D );
} );
}
@Test
public void testLiquidFood(SessionFactoryScope scope) {
scope.inSession( s -> {
final Food food = s.getReference( Food.class, 2L );
assertThat( Hibernate.isInitialized( food ) ).isFalse();
final Object unproxied = Hibernate.unproxy( food );
assertThat( unproxied ).isInstanceOf( LiquidFood.class );
assertThat( ( (LiquidFood) unproxied ).getWeightAndVolume().getWeight() ).isEqualTo( 1.0D );
assertThat( ( (LiquidFood) unproxied ).getWeightAndVolume().getVolume() ).isEqualTo( 2.0D );
} );
}
@Entity( name = "Food" )
@Inheritance( strategy = InheritanceType.SINGLE_TABLE )
@DiscriminatorColumn( discriminatorType = DiscriminatorType.STRING, name = "type" )
@DiscriminatorValue( value = "FOOD" )
public static | EmbeddableReusedColumnInheritanceTest |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/requests/EnvelopeResponseTest.java | {
"start": 1273,
"end": 2347
} | class ____ {
@Test
public void testToSend() {
for (short version : ApiKeys.ENVELOPE.allVersions()) {
ByteBuffer responseData = ByteBuffer.wrap("foobar".getBytes());
EnvelopeResponse response = new EnvelopeResponse(responseData, Errors.NONE);
short headerVersion = ApiKeys.ENVELOPE.responseHeaderVersion(version);
ResponseHeader header = new ResponseHeader(15, headerVersion);
Send send = response.toSend(header, version);
ByteBuffer buffer = TestUtils.toBuffer(send);
assertEquals(send.size() - 4, buffer.getInt());
ResponseHeader parsedHeader = ResponseHeader.parse(buffer, headerVersion);
assertEquals(header.size(), parsedHeader.size());
assertEquals(header, parsedHeader);
EnvelopeResponseData parsedResponseData = new EnvelopeResponseData();
parsedResponseData.read(new ByteBufferAccessor(buffer), version);
assertEquals(response.data(), parsedResponseData);
}
}
}
| EnvelopeResponseTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgDoubleEvaluator.java | {
"start": 845,
"end": 927
} | class ____ generated. Edit {@code MvEvaluatorImplementer} instead.
*/
public final | is |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ReactiveStreamsEndpointBuilderFactory.java | {
"start": 9081,
"end": 14981
} | interface ____
extends
EndpointConsumerBuilder {
default ReactiveStreamsEndpointConsumerBuilder basic() {
return (ReactiveStreamsEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedReactiveStreamsEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedReactiveStreamsEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedReactiveStreamsEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedReactiveStreamsEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedReactiveStreamsEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedReactiveStreamsEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
}
/**
* Builder for endpoint producers for the Reactive Streams component.
*/
public | AdvancedReactiveStreamsEndpointConsumerBuilder |
java | apache__camel | components/camel-mvel/src/generated/java/org/apache/camel/component/mvel/MvelEndpointConfigurer.java | {
"start": 731,
"end": 3231
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
MvelEndpoint target = (MvelEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowcontextmapall":
case "allowContextMapAll": target.setAllowContextMapAll(property(camelContext, boolean.class, value)); return true;
case "allowtemplatefromheader":
case "allowTemplateFromHeader": target.setAllowTemplateFromHeader(property(camelContext, boolean.class, value)); return true;
case "contentcache":
case "contentCache": target.setContentCache(property(camelContext, boolean.class, value)); return true;
case "encoding": target.setEncoding(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowcontextmapall":
case "allowContextMapAll": return boolean.class;
case "allowtemplatefromheader":
case "allowTemplateFromHeader": return boolean.class;
case "contentcache":
case "contentCache": return boolean.class;
case "encoding": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
MvelEndpoint target = (MvelEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "allowcontextmapall":
case "allowContextMapAll": return target.isAllowContextMapAll();
case "allowtemplatefromheader":
case "allowTemplateFromHeader": return target.isAllowTemplateFromHeader();
case "contentcache":
case "contentCache": return target.isContentCache();
case "encoding": return target.getEncoding();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
| MvelEndpointConfigurer |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/framework/AdvisorChainFactory.java | {
"start": 1303,
"end": 1560
} | class ____ the next best option)
* @return a List of MethodInterceptors (may also include InterceptorAndDynamicMethodMatchers)
*/
List<Object> getInterceptorsAndDynamicInterceptionAdvice(Advised config, Method method, @Nullable Class<?> targetClass);
}
| is |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java | {
"start": 1565,
"end": 4206
} | class ____ extends GetApplicationReportResponse {
GetApplicationReportResponseProto proto = GetApplicationReportResponseProto.getDefaultInstance();
GetApplicationReportResponseProto.Builder builder = null;
boolean viaProto = false;
private ApplicationReport applicationReport = null;
public GetApplicationReportResponsePBImpl() {
builder = GetApplicationReportResponseProto.newBuilder();
}
public GetApplicationReportResponsePBImpl(GetApplicationReportResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public GetApplicationReportResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToBuilder() {
if (this.applicationReport != null) {
builder.setApplicationReport(convertToProtoFormat(this.applicationReport));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetApplicationReportResponseProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public ApplicationReport getApplicationReport() {
GetApplicationReportResponseProtoOrBuilder p = viaProto ? proto : builder;
if (this.applicationReport != null) {
return this.applicationReport;
}
if (!p.hasApplicationReport()) {
return null;
}
this.applicationReport = convertFromProtoFormat(p.getApplicationReport());
return this.applicationReport;
}
@Override
public void setApplicationReport(ApplicationReport applicationMaster) {
maybeInitBuilder();
if (applicationMaster == null)
builder.clearApplicationReport();
this.applicationReport = applicationMaster;
}
private ApplicationReportPBImpl convertFromProtoFormat(ApplicationReportProto p) {
return new ApplicationReportPBImpl(p);
}
private ApplicationReportProto convertToProtoFormat(ApplicationReport t) {
return ((ApplicationReportPBImpl)t).getProto();
}
}
| GetApplicationReportResponsePBImpl |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/blockloader/docvalues/fn/MvMinBooleansBlockLoader.java | {
"start": 1488,
"end": 2899
} | class ____ extends BlockDocValuesReader {
private final SortedNumericDocValues numericDocValues;
MvMinSorted(SortedNumericDocValues numericDocValues) {
this.numericDocValues = numericDocValues;
}
@Override
public Block read(BlockFactory factory, Docs docs, int offset, boolean nullsFiltered) throws IOException {
try (BooleanBuilder builder = factory.booleansFromDocValues(docs.count() - offset)) {
for (int i = offset; i < docs.count(); i++) {
int doc = docs.get(i);
read(doc, builder);
}
return builder.build();
}
}
@Override
public void read(int docId, StoredFields storedFields, Builder builder) throws IOException {
read(docId, (BooleanBuilder) builder);
}
private void read(int doc, BooleanBuilder builder) throws IOException {
if (false == numericDocValues.advanceExact(doc)) {
builder.appendNull();
return;
}
builder.appendBoolean(numericDocValues.nextValue() != 0);
}
@Override
public int docId() {
return numericDocValues.docID();
}
@Override
public String toString() {
return "MvMinBooleansFromDocValues.Sorted";
}
}
}
| MvMinSorted |
java | apache__flink | flink-core/src/test/java/org/apache/flink/core/io/PostVersionedIOReadableWritableTest.java | {
"start": 5345,
"end": 6223
} | class ____ extends PostVersionedIOReadableWritable {
private static final int VERSION = 1;
private final byte[] data;
TestPostVersionedReadableWritable(int len) {
this.data = new byte[len];
}
TestPostVersionedReadableWritable(byte[] data) {
this.data = data;
}
@Override
public int getVersion() {
return VERSION;
}
@Override
public void write(DataOutputView out) throws IOException {
super.write(out);
out.write(data);
}
@Override
protected void read(DataInputView in, boolean wasVersioned) throws IOException {
in.readFully(data);
assertEmpty(in);
}
public byte[] getData() {
return data;
}
}
static | TestPostVersionedReadableWritable |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java | {
"start": 38632,
"end": 94851
} | class ____ implements OffsetCommitCallback {
public int invoked = 0;
public Exception exception = null;
public String completionThread;
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
invoked++;
this.completionThread = Thread.currentThread().getName();
this.exception = exception;
}
}
@Test
public void testAssign() {
consumer = newConsumer();
final TopicPartition tp = new TopicPartition("foo", 3);
completeAssignmentChangeEventSuccessfully();
consumer.assign(singleton(tp));
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().contains(tp));
verify(applicationEventHandler).addAndGet(any(AssignmentChangeEvent.class));
}
@Test
public void testAssignOnNullTopicPartition() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class, () -> consumer.assign(null));
}
@Test
public void testAssignOnEmptyTopicPartition() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.assign(Collections.emptyList());
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().isEmpty());
}
@Test
public void testAssignOnNullTopicInPartition() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class, () -> consumer.assign(singleton(new TopicPartition(null, 0))));
}
@Test
public void testAssignOnEmptyTopicInPartition() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class, () -> consumer.assign(singleton(new TopicPartition(" ", 0))));
}
@Test
public void testBeginningOffsetsFailsIfNullPartitions() {
consumer = newConsumer();
assertThrows(NullPointerException.class, () -> consumer.beginningOffsets(null,
Duration.ofMillis(1)));
}
@Test
public void testBeginningOffsets() {
consumer = newConsumer();
Map<TopicPartition, OffsetAndTimestampInternal> expectedOffsets = mockOffsetAndTimestamp();
when(applicationEventHandler.addAndGet(any(ListOffsetsEvent.class))).thenAnswer(invocation -> {
ListOffsetsEvent event = invocation.getArgument(0);
Timer timer = time.timer(event.deadlineMs() - time.milliseconds());
if (timer.remainingMs() == 0) {
fail("Timer duration should not be zero.");
}
return expectedOffsets;
});
Map<TopicPartition, Long> result = assertDoesNotThrow(() -> consumer.beginningOffsets(expectedOffsets.keySet(), Duration.ofMillis(1)));
expectedOffsets.forEach((key, value) -> {
assertTrue(result.containsKey(key));
assertEquals(value.offset(), result.get(key));
});
verify(applicationEventHandler).addAndGet(any(ListOffsetsEvent.class));
}
@Test
public void testBeginningOffsetsThrowsKafkaExceptionForUnderlyingExecutionFailure() {
consumer = newConsumer();
Set<TopicPartition> partitions = mockTopicPartitionOffset().keySet();
Throwable eventProcessingFailure = new KafkaException("Unexpected failure " +
"processing List Offsets event");
doThrow(eventProcessingFailure).when(applicationEventHandler).addAndGet(
any(ListOffsetsEvent.class));
Throwable consumerError = assertThrows(KafkaException.class,
() -> consumer.beginningOffsets(partitions,
Duration.ofMillis(1)));
assertEquals(eventProcessingFailure, consumerError);
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(ListOffsetsEvent.class));
}
@Test
public void testBeginningOffsetsTimeoutOnEventProcessingTimeout() {
consumer = newConsumer();
doThrow(new TimeoutException()).when(applicationEventHandler).addAndGet(any());
assertThrows(TimeoutException.class,
() -> consumer.beginningOffsets(
Collections.singletonList(new TopicPartition("t1", 0)),
Duration.ofMillis(1)));
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(ListOffsetsEvent.class));
}
@Test
public void testOffsetsForTimesOnNullPartitions() {
consumer = newConsumer();
assertThrows(NullPointerException.class, () -> consumer.offsetsForTimes(null,
Duration.ofMillis(1)));
}
@Test
public void testOffsetsForTimesFailsOnNegativeTargetTimes() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class,
() -> consumer.offsetsForTimes(Collections.singletonMap(new TopicPartition(
"topic1", 1), ListOffsetsRequest.EARLIEST_TIMESTAMP),
Duration.ofMillis(1)));
assertThrows(IllegalArgumentException.class,
() -> consumer.offsetsForTimes(Collections.singletonMap(new TopicPartition(
"topic1", 1), ListOffsetsRequest.LATEST_TIMESTAMP),
Duration.ofMillis(1)));
assertThrows(IllegalArgumentException.class,
() -> consumer.offsetsForTimes(Collections.singletonMap(new TopicPartition(
"topic1", 1), ListOffsetsRequest.MAX_TIMESTAMP),
Duration.ofMillis(1)));
}
@Test
public void testOffsetsForTimes() {
consumer = newConsumer();
Map<TopicPartition, OffsetAndTimestampInternal> expectedResult = mockOffsetAndTimestamp();
Map<TopicPartition, Long> timestampToSearch = mockTimestampToSearch();
doReturn(expectedResult).when(applicationEventHandler).addAndGet(any());
Map<TopicPartition, OffsetAndTimestamp> result =
assertDoesNotThrow(() -> consumer.offsetsForTimes(timestampToSearch, Duration.ofMillis(1)));
expectedResult.forEach((key, value) -> {
OffsetAndTimestamp expected = value.buildOffsetAndTimestamp();
assertEquals(expected, result.get(key));
});
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(ListOffsetsEvent.class));
}
@Test
public void testOffsetsForTimesTimeoutException() {
consumer = newConsumer();
long timeout = 100;
doThrow(new TimeoutException("Event did not complete in time and was expired by the reaper"))
.when(applicationEventHandler).addAndGet(any());
Throwable t = assertThrows(
TimeoutException.class,
() -> consumer.offsetsForTimes(mockTimestampToSearch(), Duration.ofMillis(timeout)));
assertEquals("Failed to get offsets by times in " + timeout + "ms", t.getMessage());
}
@Test
public void testBeginningOffsetsTimeoutException() {
consumer = newConsumer();
long timeout = 100;
doThrow(new TimeoutException("Event did not complete in time and was expired by the reaper"))
.when(applicationEventHandler).addAndGet(any());
Throwable t = assertThrows(
TimeoutException.class,
() -> consumer.beginningOffsets(Collections.singleton(new TopicPartition("topic", 5)),
Duration.ofMillis(timeout)));
assertEquals("Failed to get offsets by times in " + timeout + "ms", t.getMessage());
}
@Test
public void testEndOffsetsTimeoutException() {
consumer = newConsumer();
long timeout = 100;
doThrow(new TimeoutException("Event did not complete in time and was expired by the reaper"))
.when(applicationEventHandler).addAndGet(any());
Throwable t = assertThrows(
TimeoutException.class,
() -> consumer.endOffsets(Collections.singleton(new TopicPartition("topic", 5)),
Duration.ofMillis(timeout)));
assertEquals("Failed to get offsets by times in " + timeout + "ms", t.getMessage());
}
// This test ensures same behaviour as the current consumer when offsetsForTimes is called
// with 0 timeout. It should return map with all requested partitions as keys, with null
// OffsetAndTimestamp as value.
@Test
public void testBeginningOffsetsWithZeroTimeout() {
consumer = newConsumer();
TopicPartition tp = new TopicPartition("topic1", 0);
Map<TopicPartition, Long> result =
assertDoesNotThrow(() -> consumer.beginningOffsets(Collections.singletonList(tp), Duration.ZERO));
assertNotNull(result);
assertEquals(0, result.size());
verify(applicationEventHandler).add(ArgumentMatchers.isA(ListOffsetsEvent.class));
}
@Test
public void testOffsetsForTimesWithZeroTimeout() {
consumer = newConsumer();
TopicPartition tp = new TopicPartition("topic1", 0);
Map<TopicPartition, OffsetAndTimestamp> expectedResult = Collections.singletonMap(tp, null);
Map<TopicPartition, Long> timestampToSearch = Collections.singletonMap(tp, 5L);
Map<TopicPartition, OffsetAndTimestamp> result =
assertDoesNotThrow(() -> consumer.offsetsForTimes(timestampToSearch, Duration.ZERO));
assertEquals(expectedResult, result);
verify(applicationEventHandler, never()).addAndGet(ArgumentMatchers.isA(ListOffsetsEvent.class));
}
@Test
public void testWakeupCommitted() {
consumer = newConsumer();
final Map<TopicPartition, OffsetAndMetadata> offsets = mockTopicPartitionOffset();
doAnswer(invocation -> {
CompletableApplicationEvent<?> event = invocation.getArgument(0);
assertInstanceOf(FetchCommittedOffsetsEvent.class, event);
assertTrue(event.future().isCompletedExceptionally());
return ConsumerUtils.getResult(event.future());
})
.when(applicationEventHandler)
.addAndGet(any(FetchCommittedOffsetsEvent.class));
consumer.wakeup();
assertThrows(WakeupException.class, () -> consumer.committed(offsets.keySet()));
assertNull(consumer.wakeupTrigger().getPendingTask());
}
@Test
public void testNoWakeupInCloseCommit() {
TopicPartition tp = new TopicPartition("topic1", 0);
Properties props = requiredConsumerConfigAndGroupId("consumer-group");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
consumer = newConsumer(props);
completeAssignmentChangeEventSuccessfully();
consumer.assign(Collections.singleton(tp));
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
completeSeekUnvalidatedEventSuccessfully();
consumer.seek(tp, 10);
consumer.wakeup();
AtomicReference<SyncCommitEvent> capturedEvent = new AtomicReference<>();
doAnswer(invocation -> {
ApplicationEvent event = invocation.getArgument(0);
if (event instanceof SyncCommitEvent) {
capturedEvent.set((SyncCommitEvent) event);
((SyncCommitEvent) event).markOffsetsReady();
}
return null;
}).when(applicationEventHandler).add(any());
completeUnsubscribeApplicationEventSuccessfully();
consumer.close(CloseOptions.timeout(Duration.ZERO));
// A commit was triggered and not completed exceptionally by the wakeup
assertNotNull(capturedEvent.get());
assertFalse(capturedEvent.get().future().isCompletedExceptionally());
}
@Test
public void testCloseAwaitPendingAsyncCommitIncomplete() {
time = new MockTime(1);
consumer = newConsumer();
// Commit async (incomplete)
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
final TopicPartition tp = new TopicPartition("foo", 0);
completeAssignmentChangeEventSuccessfully();
consumer.assign(Collections.singleton(tp));
completeSeekUnvalidatedEventSuccessfully();
consumer.seek(tp, 20);
markOffsetsReadyForCommitEvent();
consumer.commitAsync();
Exception e = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ofMillis(10))));
assertInstanceOf(TimeoutException.class, e.getCause());
}
@Test
public void testCloseAwaitPendingAsyncCommitComplete() {
time = new MockTime(1);
consumer = newConsumer();
MockCommitCallback cb = new MockCommitCallback();
// Commit async (complete)
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
final TopicPartition tp = new TopicPartition("foo", 0);
completeAssignmentChangeEventSuccessfully();
consumer.assign(Collections.singleton(tp));
completeSeekUnvalidatedEventSuccessfully();
consumer.seek(tp, 20);
completeCommitAsyncApplicationEventSuccessfully();
consumer.commitAsync(cb);
completeUnsubscribeApplicationEventSuccessfully();
assertDoesNotThrow(() -> consumer.close(CloseOptions.timeout(Duration.ofMillis(10))));
assertEquals(1, cb.invoked);
}
@Test
public void testInterceptorAutoCommitOnClose() {
Properties props = requiredConsumerConfigAndGroupId("test-id");
props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
consumer = newConsumer(props);
assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get());
completeCommitSyncApplicationEventSuccessfully();
completeUnsubscribeApplicationEventSuccessfully();
consumer.close(CloseOptions.timeout(Duration.ZERO));
assertEquals(1, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
assertEquals(1, MockConsumerInterceptor.CLOSE_COUNT.get());
}
@Test
public void testInterceptorCommitSync() {
Properties props = requiredConsumerConfigAndGroupId("test-id");
props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
consumer = newConsumer(props);
assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get());
completeCommitSyncApplicationEventSuccessfully();
consumer.commitSync(mockTopicPartitionOffset());
assertEquals(1, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
}
@Test
public void testNoInterceptorCommitSyncFailed() {
Properties props = requiredConsumerConfigAndGroupId("test-id");
props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
consumer = newConsumer(props);
assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get());
KafkaException expected = new KafkaException("Test exception");
completeCommitSyncApplicationEventExceptionally(expected);
KafkaException actual = assertThrows(KafkaException.class, () -> consumer.commitSync(mockTopicPartitionOffset()));
assertEquals(expected, actual);
assertEquals(0, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
}
@Test
public void testInterceptorCommitAsync() {
Properties props = requiredConsumerConfigAndGroupId("test-id");
props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
consumer = newConsumer(props);
assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get());
completeCommitAsyncApplicationEventSuccessfully();
consumer.commitAsync(mockTopicPartitionOffset(), new MockCommitCallback());
assertEquals(0, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
forceCommitCallbackInvocation();
assertEquals(1, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
}
@Test
public void testNoInterceptorCommitAsyncFailed() {
Properties props = requiredConsumerConfigAndGroupId("test-id");
props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
consumer = newConsumer(props);
assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get());
completeCommitAsyncApplicationEventExceptionally(new KafkaException("Test exception"));
consumer.commitAsync(mockTopicPartitionOffset(), new MockCommitCallback());
assertEquals(0, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
forceCommitCallbackInvocation();
assertEquals(0, MockConsumerInterceptor.ON_COMMIT_COUNT.get());
}
@Test
public void testSubscribeGeneratesEvent() {
consumer = newConsumer();
String topic = "topic1";
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(singletonList(topic));
assertEquals(singleton(topic), consumer.subscription());
assertTrue(consumer.assignment().isEmpty());
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicSubscriptionChangeEvent.class));
}
@Test
public void testSubscribePatternGeneratesEvent() {
consumer = newConsumer();
Pattern pattern = Pattern.compile("topic.*");
completeTopicPatternSubscriptionChangeEventSuccessfully();
consumer.subscribe(pattern);
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicPatternSubscriptionChangeEvent.class));
}
@Test
public void testUnsubscribeGeneratesUnsubscribeEvent() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.unsubscribe();
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().isEmpty());
ArgumentCaptor<UnsubscribeEvent> eventCaptor = ArgumentCaptor.forClass(UnsubscribeEvent.class);
verify(applicationEventHandler).add(eventCaptor.capture());
// check the deadline is set to the default API timeout
long deadline = time.milliseconds() + (int) ConsumerConfig.configDef().defaultValues().get(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG);
assertTrue(eventCaptor.getValue().deadlineMs() <= deadline);
}
@Test
public void testSubscribeToEmptyListActsAsUnsubscribe() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.subscribe(Collections.emptyList());
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().isEmpty());
verify(applicationEventHandler).add(ArgumentMatchers.isA(UnsubscribeEvent.class));
}
@Test
public void testSubscribeToNullTopicCollection() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class, () -> consumer.subscribe((List<String>) null));
}
@Test
public void testSubscriptionOnNullTopic() {
consumer = newConsumer();
assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(singletonList(null)));
}
@Test
public void testSubscriptionOnEmptyTopic() {
consumer = newConsumer();
String emptyTopic = " ";
assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(singletonList(emptyTopic)));
}
@Test
public void testGroupMetadataAfterCreationWithGroupIdIsNull() {
final Properties props = requiredConsumerConfig();
final ConsumerConfig config = new ConsumerConfig(props);
consumer = newConsumer(config);
assertFalse(config.unused().contains(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));
assertFalse(config.unused().contains(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED));
final Throwable exception = assertThrows(InvalidGroupIdException.class, consumer::groupMetadata);
assertEquals(
"To use the group management or offset commit APIs, you must " +
"provide a valid " + ConsumerConfig.GROUP_ID_CONFIG + " in the consumer configuration.",
exception.getMessage()
);
}
@Test
public void testGroupMetadataAfterCreationWithGroupIdIsNotNull() {
final String groupId = "consumerGroupA";
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final ConsumerGroupMetadata groupMetadata = consumer.groupMetadata();
assertEquals(groupId, groupMetadata.groupId());
assertEquals(Optional.empty(), groupMetadata.groupInstanceId());
assertEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, groupMetadata.generationId());
assertEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, groupMetadata.memberId());
}
@Test
public void testGroupMetadataAfterCreationWithGroupIdIsNotNullAndGroupInstanceIdSet() {
final String groupId = "consumerGroupA";
final String groupInstanceId = "groupInstanceId1";
final Properties props = requiredConsumerConfigAndGroupId(groupId);
props.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId);
consumer = newConsumer(props);
final ConsumerGroupMetadata groupMetadata = consumer.groupMetadata();
assertEquals(groupId, groupMetadata.groupId());
assertEquals(Optional.of(groupInstanceId), groupMetadata.groupInstanceId());
assertEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, groupMetadata.generationId());
assertEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, groupMetadata.memberId());
}
private MemberStateListener captureGroupMetadataUpdateListener(final MockedStatic<RequestManagers> requestManagers) {
ArgumentCaptor<MemberStateListener> applicationThreadMemberStateListener = ArgumentCaptor.forClass(MemberStateListener.class);
requestManagers.verify(() -> RequestManagers.supplier(
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
applicationThreadMemberStateListener.capture(),
any(),
any()
));
return applicationThreadMemberStateListener.getValue();
}
@Test
public void testGroupMetadataUpdate() {
final String groupId = "consumerGroupA";
try (final MockedStatic<RequestManagers> requestManagers = mockStatic(RequestManagers.class)) {
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final ConsumerGroupMetadata oldGroupMetadata = consumer.groupMetadata();
final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers);
final int expectedMemberEpoch = 42;
final String expectedMemberId = "memberId";
groupMetadataUpdateListener.onMemberEpochUpdated(
Optional.of(expectedMemberEpoch),
expectedMemberId
);
final ConsumerGroupMetadata newGroupMetadata = consumer.groupMetadata();
assertEquals(oldGroupMetadata.groupId(), newGroupMetadata.groupId());
assertEquals(expectedMemberId, newGroupMetadata.memberId());
assertEquals(expectedMemberEpoch, newGroupMetadata.generationId());
assertEquals(oldGroupMetadata.groupInstanceId(), newGroupMetadata.groupInstanceId());
}
}
@SuppressWarnings("removal")
@Test
public void testGroupMetadataIsResetAfterUnsubscribe() {
final String groupId = "consumerGroupA";
try (final MockedStatic<RequestManagers> requestManagers = mockStatic(RequestManagers.class)) {
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers);
consumer.subscribe(singletonList("topic"));
final int memberEpoch = 42;
final String memberId = "memberId";
groupMetadataUpdateListener.onMemberEpochUpdated(Optional.of(memberEpoch), memberId);
final ConsumerGroupMetadata groupMetadata = consumer.groupMetadata();
assertNotEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, groupMetadata.generationId());
assertNotEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, groupMetadata.memberId());
}
completeUnsubscribeApplicationEventSuccessfully();
consumer.unsubscribe();
final ConsumerGroupMetadata groupMetadataAfterUnsubscribe = new ConsumerGroupMetadata(
groupId,
JoinGroupRequest.UNKNOWN_GENERATION_ID,
JoinGroupRequest.UNKNOWN_MEMBER_ID,
Optional.empty()
);
assertEquals(groupMetadataAfterUnsubscribe, consumer.groupMetadata());
}
private Optional<StreamsRebalanceData> captureStreamRebalanceData(final MockedStatic<RequestManagers> requestManagers) {
ArgumentCaptor<Optional<StreamsRebalanceData>> streamRebalanceData = ArgumentCaptor.forClass(Optional.class);
requestManagers.verify(() -> RequestManagers.supplier(
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
any(),
streamRebalanceData.capture(),
any()
));
return streamRebalanceData.getValue();
}
@Test
public void testEmptyStreamRebalanceData() {
final String groupId = "consumerGroupA";
try (final MockedStatic<RequestManagers> requestManagers = mockStatic(RequestManagers.class)) {
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final Optional<StreamsRebalanceData> groupMetadataUpdateListener = captureStreamRebalanceData(requestManagers);
assertTrue(groupMetadataUpdateListener.isEmpty());
}
}
@Test
public void testStreamRebalanceData() {
final String groupId = "consumerGroupA";
try (final MockedStatic<RequestManagers> requestManagers = mockStatic(RequestManagers.class)) {
StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of());
consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData);
final Optional<StreamsRebalanceData> groupMetadataUpdateListener = captureStreamRebalanceData(requestManagers);
assertTrue(groupMetadataUpdateListener.isPresent());
assertEquals(streamsRebalanceData, groupMetadataUpdateListener.get());
}
}
/**
* Tests that the consumer correctly invokes the callbacks for {@link ConsumerRebalanceListener} that was
* specified. We don't go through the full effort to emulate heartbeats and correct group management here. We're
* simply exercising the background {@link EventProcessor} does the correct thing when
* {@link AsyncKafkaConsumer#poll(Duration)} is called.
*
* Note that we test {@link ConsumerRebalanceListener} that throws errors in its different callbacks. Failed
* callback execution does <em>not</em> immediately errors. Instead, those errors are forwarded to the
* application event thread for the {@link ConsumerMembershipManager} to handle.
*/
@ParameterizedTest
@MethodSource("listenerCallbacksInvokeSource")
public void testListenerCallbacksInvoke(List<ConsumerRebalanceListenerMethodName> methodNames,
Optional<RuntimeException> revokedError,
Optional<RuntimeException> assignedError,
Optional<RuntimeException> lostError,
int expectedRevokedCount,
int expectedAssignedCount,
int expectedLostCount,
Optional<RuntimeException> expectedException
) {
consumer = newConsumer();
CounterConsumerRebalanceListener consumerRebalanceListener = new CounterConsumerRebalanceListener(
revokedError,
assignedError,
lostError
);
doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class));
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(Collections.singletonList("topic"), consumerRebalanceListener);
SortedSet<TopicPartition> partitions = Collections.emptySortedSet();
for (ConsumerRebalanceListenerMethodName methodName : methodNames) {
CompletableBackgroundEvent<Void> e = new ConsumerRebalanceListenerCallbackNeededEvent(methodName, partitions);
backgroundEventQueue.add(e);
}
completeAsyncPollEventSuccessfully();
// This will trigger the background event queue to process our background event message.
// If any error is happening inside the rebalance callbacks, we expect the first exception to be thrown from poll.
if (expectedException.isPresent()) {
Exception exception = assertThrows(expectedException.get().getClass(), () -> consumer.poll(Duration.ZERO));
assertEquals(expectedException.get().getMessage(), exception.getMessage());
assertEquals(expectedException.get().getCause(), exception.getCause());
} else {
assertDoesNotThrow(() -> consumer.poll(Duration.ZERO));
}
assertEquals(expectedRevokedCount, consumerRebalanceListener.revokedCount());
assertEquals(expectedAssignedCount, consumerRebalanceListener.assignedCount());
assertEquals(expectedLostCount, consumerRebalanceListener.lostCount());
}
private static Stream<Arguments> listenerCallbacksInvokeSource() {
Optional<RuntimeException> empty = Optional.empty();
Optional<RuntimeException> error = Optional.of(new RuntimeException("Intentional error"));
Optional<RuntimeException> kafkaException = Optional.of(new KafkaException("Intentional error"));
Optional<RuntimeException> wrappedException = Optional.of(new KafkaException("User rebalance callback throws an error", error.get()));
return Stream.of(
// Tests if we don't have an event, the listener doesn't get called.
Arguments.of(Collections.emptyList(), empty, empty, empty, 0, 0, 0, empty),
// Tests if we get an event for a revocation, that we invoke our listener.
Arguments.of(Collections.singletonList(ON_PARTITIONS_REVOKED), empty, empty, empty, 1, 0, 0, empty),
// Tests if we get an event for an assignment, that we invoke our listener.
Arguments.of(Collections.singletonList(ON_PARTITIONS_ASSIGNED), empty, empty, empty, 0, 1, 0, empty),
// Tests that we invoke our listener even if it encounters an exception.
Arguments.of(Collections.singletonList(ON_PARTITIONS_LOST), empty, empty, empty, 0, 0, 1, empty),
// Tests that we invoke our listener even if it encounters an exception.
Arguments.of(Collections.singletonList(ON_PARTITIONS_REVOKED), error, empty, empty, 1, 0, 0, wrappedException),
// Tests that we invoke our listener even if it encounters an exception.
Arguments.of(Collections.singletonList(ON_PARTITIONS_ASSIGNED), empty, error, empty, 0, 1, 0, wrappedException),
// Tests that we invoke our listener even if it encounters an exception.
Arguments.of(Collections.singletonList(ON_PARTITIONS_LOST), empty, empty, error, 0, 0, 1, wrappedException),
// Tests that we invoke our listener even if it encounters an exception. Special case to test that a kafka exception is not wrapped.
Arguments.of(Collections.singletonList(ON_PARTITIONS_REVOKED), kafkaException, empty, empty, 1, 0, 0, kafkaException),
Arguments.of(Collections.singletonList(ON_PARTITIONS_ASSIGNED), empty, kafkaException, empty, 0, 1, 0, kafkaException),
Arguments.of(Collections.singletonList(ON_PARTITIONS_LOST), empty, empty, kafkaException, 0, 0, 1, kafkaException),
// Tests if we get separate events for revocation and then assignment--AND our revocation throws an error--
// we still invoke the listeners correctly and throw the error.
Arguments.of(Arrays.asList(ON_PARTITIONS_REVOKED, ON_PARTITIONS_ASSIGNED), error, empty, empty, 1, 1, 0, wrappedException),
// Tests if we get separate events for revocation and then assignment--AND both throws an error--
// we still invoke the listeners correctly and throw the first error.
Arguments.of(Arrays.asList(ON_PARTITIONS_REVOKED, ON_PARTITIONS_ASSIGNED), kafkaException, error, empty, 1, 1, 0, kafkaException)
);
}
@Test
public void testBackgroundError() {
final String groupId = "consumerGroupA";
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final KafkaException expectedException = new KafkaException("Nobody expects the Spanish Inquisition");
final ErrorEvent errorEvent = new ErrorEvent(expectedException);
backgroundEventQueue.add(errorEvent);
completeAssignmentChangeEventSuccessfully();
consumer.assign(singletonList(new TopicPartition("topic", 0)));
completeAsyncPollEventSuccessfully();
final KafkaException exception = assertThrows(KafkaException.class, () -> consumer.poll(Duration.ZERO));
assertEquals(expectedException.getMessage(), exception.getMessage());
}
@Test
public void testMultipleBackgroundErrors() {
final String groupId = "consumerGroupA";
consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId));
final KafkaException expectedException1 = new KafkaException("Nobody expects the Spanish Inquisition");
final ErrorEvent errorEvent1 = new ErrorEvent(expectedException1);
backgroundEventQueue.add(errorEvent1);
final KafkaException expectedException2 = new KafkaException("Spam, Spam, Spam");
final ErrorEvent errorEvent2 = new ErrorEvent(expectedException2);
backgroundEventQueue.add(errorEvent2);
completeAssignmentChangeEventSuccessfully();
consumer.assign(singletonList(new TopicPartition("topic", 0)));
completeAsyncPollEventSuccessfully();
final KafkaException exception = assertThrows(KafkaException.class, () -> consumer.poll(Duration.ZERO));
assertEquals(expectedException1.getMessage(), exception.getMessage());
assertTrue(backgroundEventQueue.isEmpty());
}
@Test
public void testGroupRemoteAssignorUnusedIfGroupIdUndefined() {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, "someAssignor");
props.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT));
final ConsumerConfig config = new ConsumerConfig(props);
consumer = newConsumer(config);
assertTrue(config.unused().contains(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG));
}
@Test
public void testGroupRemoteAssignorInClassicProtocol() {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_ID_CONFIG, "consumerGroupA");
props.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT));
props.put(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, "someAssignor");
assertThrows(ConfigException.class, () -> new ConsumerConfig(props));
}
@Test
public void testGroupRemoteAssignorUsedInConsumerProtocol() {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_ID_CONFIG, "consumerGroupA");
props.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT));
props.put(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, "someAssignor");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
final ConsumerConfig config = new ConsumerConfig(props);
consumer = newConsumer(config);
assertFalse(config.unused().contains(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG));
}
@Test
public void testGroupIdNull() {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 10000);
props.put(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED, true);
final ConsumerConfig config = new ConsumerConfig(props);
consumer = newConsumer(config);
assertFalse(config.unused().contains(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));
assertFalse(config.unused().contains(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED));
}
@Test
public void testGroupIdNotNullAndValid() {
final Properties props = requiredConsumerConfigAndGroupId("consumerGroupA");
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 10000);
props.put(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED, true);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
final ConsumerConfig config = new ConsumerConfig(props);
consumer = newConsumer(config);
assertTrue(config.unused().contains(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));
assertTrue(config.unused().contains(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED));
}
@Test
public void testEnsurePollEventSentOnConsumerPoll() {
SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE);
consumer = newConsumer(
mock(FetchBuffer.class),
new ConsumerInterceptors<>(Collections.emptyList(), metrics),
mock(ConsumerRebalanceListenerInvoker.class),
subscriptions);
final TopicPartition tp = new TopicPartition("topic", 0);
final List<ConsumerRecord<String, String>> records = singletonList(
new ConsumerRecord<>("topic", 0, 2, "key1", "value1"));
doAnswer(invocation -> Fetch.forPartition(tp, records, true, new OffsetAndMetadata(3, Optional.of(0), "")))
.when(fetchCollector)
.collectFetch(Mockito.any(FetchBuffer.class));
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(singletonList("topic1"));
completeAsyncPollEventSuccessfully();
consumer.poll(Duration.ofMillis(100));
verify(applicationEventHandler, atLeastOnce()).add(any(AsyncPollEvent.class));
}
private Properties requiredConsumerConfigAndGroupId(final String groupId) {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
return props;
}
@Test
public void testLongPollWaitIsLimited() {
consumer = newConsumer();
String topicName = "topic1";
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(singletonList(topicName));
assertEquals(singleton(topicName), consumer.subscription());
assertTrue(consumer.assignment().isEmpty());
final int partition = 3;
final TopicPartition tp = new TopicPartition(topicName, partition);
final List<ConsumerRecord<String, String>> records = asList(
new ConsumerRecord<>(topicName, partition, 2, "key1", "value1"),
new ConsumerRecord<>(topicName, partition, 3, "key2", "value2")
);
final OffsetAndMetadata nextOffsetAndMetadata = new OffsetAndMetadata(4, Optional.of(0), "");
// On the first iteration, return no data; on the second, return two records
Set<TopicPartition> partitions = singleton(tp);
doAnswer(invocation -> {
// Mock the subscription being assigned as the first fetch is collected
consumer.subscriptions().assignFromSubscribed(partitions);
consumer.setGroupAssignmentSnapshot(partitions);
return Fetch.empty();
}).doAnswer(invocation ->
Fetch.forPartition(tp, records, true, nextOffsetAndMetadata)
).when(fetchCollector).collectFetch(any(FetchBuffer.class));
completeAsyncPollEventSuccessfully();
// And then poll for up to 10000ms, which should return 2 records without timing out
ConsumerRecords<?, ?> returnedRecords = consumer.poll(Duration.ofMillis(10000));
assertEquals(2, returnedRecords.count());
assertEquals(4, returnedRecords.nextOffsets().get(tp).offset());
assertEquals(Optional.of(0), returnedRecords.nextOffsets().get(tp).leaderEpoch());
assertEquals(singleton(topicName), consumer.subscription());
assertEquals(partitions, consumer.assignment());
}
/**
* Tests {@link AsyncKafkaConsumer#processBackgroundEvents(Future, Timer, Predicate) processBackgroundEvents}
* handles the case where the {@link Future} takes a bit of time to complete, but does within the timeout.
*/
@Test
public void testProcessBackgroundEventsWithInitialDelay() throws Exception {
consumer = newConsumer();
Timer timer = time.timer(1000);
CompletableFuture<?> future = mock(CompletableFuture.class);
CountDownLatch latch = new CountDownLatch(3);
// Mock our call to Future.get(timeout) so that it mimics a delay of 200 milliseconds. Keep in mind that
// the incremental timeout inside processBackgroundEvents is 100 seconds for each pass. Our first two passes
// will exceed the incremental timeout, but the third will return.
doAnswer(invocation -> {
latch.countDown();
if (latch.getCount() > 0) {
long timeout = invocation.getArgument(0, Long.class);
timer.sleep(timeout);
throw new java.util.concurrent.TimeoutException("Intentional timeout");
}
future.complete(null);
return null;
}).when(future).get(any(Long.class), any(TimeUnit.class));
consumer.processBackgroundEvents(future, timer, e -> false);
// 800 is the 1000 ms timeout (above) minus the 200 ms delay for the two incremental timeouts/retries.
assertEquals(800, timer.remainingMs());
}
/**
* Tests {@link AsyncKafkaConsumer#processBackgroundEvents(Future, Timer, Predicate) processBackgroundEvents}
* handles the case where the {@link Future} is already complete when invoked, so it doesn't have to wait.
*/
@Test
public void testProcessBackgroundEventsWithoutDelay() {
consumer = newConsumer();
Timer timer = time.timer(1000);
// Create a future that is already completed.
CompletableFuture<?> future = CompletableFuture.completedFuture(null);
consumer.processBackgroundEvents(future, timer, e -> false);
// Because we didn't need to perform a timed get, we should still have every last millisecond
// of our initial timeout.
assertEquals(1000, timer.remainingMs());
}
/**
* Tests {@link AsyncKafkaConsumer#processBackgroundEvents(Future, Timer, Predicate) processBackgroundEvents}
* handles the case where the {@link Future} does not complete within the timeout.
*/
@Test
public void testProcessBackgroundEventsTimesOut() throws Exception {
consumer = newConsumer();
Timer timer = time.timer(1000);
CompletableFuture<?> future = mock(CompletableFuture.class);
doAnswer(invocation -> {
long timeout = invocation.getArgument(0, Long.class);
timer.sleep(timeout);
throw new java.util.concurrent.TimeoutException("Intentional timeout");
}).when(future).get(any(Long.class), any(TimeUnit.class));
assertThrows(TimeoutException.class, () -> consumer.processBackgroundEvents(future, timer, e -> false));
// Because we forced our mocked future to continuously time out, we should have no time remaining.
assertEquals(0, timer.remainingMs());
}
/**
* Tests that calling {@link Thread#interrupt()} before {@link KafkaConsumer#poll(Duration)}
* causes {@link InterruptException} to be thrown.
*/
@Test
public void testPollThrowsInterruptExceptionIfInterrupted() {
consumer = newConsumer();
final String topicName = "foo";
final int partition = 3;
final TopicPartition tp = new TopicPartition(topicName, partition);
doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class));
doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any());
completeAssignmentChangeEventSuccessfully();
consumer.assign(singleton(tp));
// interrupt the thread and call poll
try {
Thread.currentThread().interrupt();
completeAsyncPollEventSuccessfully();
assertThrows(InterruptException.class, () -> consumer.poll(Duration.ZERO));
} finally {
// clear interrupted state again since this thread may be reused by JUnit
Thread.interrupted();
}
assertDoesNotThrow(() -> consumer.poll(Duration.ZERO));
}
@Test
void testReaperInvokedInClose() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.close();
verify(backgroundEventReaper).reap(backgroundEventQueue);
}
@Test
void testReaperInvokedInUnsubscribe() {
consumer = newConsumer();
completeUnsubscribeApplicationEventSuccessfully();
consumer.unsubscribe();
verify(backgroundEventReaper).reap(time.milliseconds());
}
@Test
void testReaperInvokedInPoll() {
consumer = newConsumer();
doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class));
completeTopicSubscriptionChangeEventSuccessfully();
consumer.subscribe(Collections.singletonList("topic"));
completeAsyncPollEventSuccessfully();
consumer.poll(Duration.ZERO);
verify(backgroundEventReaper).reap(time.milliseconds());
}
@Test
public void testUnsubscribeWithoutGroupId() {
consumer = newConsumerWithoutGroupId();
completeUnsubscribeApplicationEventSuccessfully();
consumer.unsubscribe();
verify(applicationEventHandler).add(ArgumentMatchers.isA(UnsubscribeEvent.class));
}
@Test
public void testSeekToBeginning() {
Collection<TopicPartition> topics = Collections.singleton(new TopicPartition("test", 0));
consumer = newConsumer();
consumer.seekToBeginning(topics);
CompletableApplicationEvent<Void> event = addAndGetLastEnqueuedEvent();
ResetOffsetEvent resetOffsetEvent = assertInstanceOf(ResetOffsetEvent.class, event);
assertEquals(topics, new HashSet<>(resetOffsetEvent.topicPartitions()));
assertEquals(AutoOffsetResetStrategy.EARLIEST, resetOffsetEvent.offsetResetStrategy());
}
@Test
public void testSeekToBeginningWithException() {
Collection<TopicPartition> topics = Collections.singleton(new TopicPartition("test", 0));
consumer = newConsumer();
completeResetOffsetEventExceptionally(new TimeoutException());
assertThrows(TimeoutException.class, () -> consumer.seekToBeginning(topics));
}
@Test
public void testSeekToEndWithException() {
Collection<TopicPartition> topics = Collections.singleton(new TopicPartition("test", 0));
consumer = newConsumer();
completeResetOffsetEventExceptionally(new TimeoutException());
assertThrows(TimeoutException.class, () -> consumer.seekToEnd(topics));
}
@Test
public void testSeekToEnd() {
Collection<TopicPartition> topics = Collections.singleton(new TopicPartition("test", 0));
consumer = newConsumer();
consumer.seekToEnd(topics);
CompletableApplicationEvent<Void> event = addAndGetLastEnqueuedEvent();
ResetOffsetEvent resetOffsetEvent = assertInstanceOf(ResetOffsetEvent.class, event);
assertEquals(topics, new HashSet<>(resetOffsetEvent.topicPartitions()));
assertEquals(AutoOffsetResetStrategy.LATEST, resetOffsetEvent.offsetResetStrategy());
}
@Test
public void testSubscribeToRe2JPatternValidation() {
consumer = newConsumer();
Throwable t = assertThrows(IllegalArgumentException.class, () -> consumer.subscribe((SubscriptionPattern) null));
assertEquals("Topic pattern to subscribe to cannot be null", t.getMessage());
t = assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(new SubscriptionPattern("")));
assertEquals("Topic pattern to subscribe to cannot be empty", t.getMessage());
assertDoesNotThrow(() -> consumer.subscribe(new SubscriptionPattern("t*")));
assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(new SubscriptionPattern("t*"), null));
assertDoesNotThrow(() -> consumer.subscribe(new SubscriptionPattern("t*"), mock(ConsumerRebalanceListener.class)));
}
@Test
public void testSubscribeToRe2JPatternThrowsIfNoGroupId() {
consumer = newConsumer(requiredConsumerConfig());
assertThrows(InvalidGroupIdException.class, () -> consumer.subscribe(new SubscriptionPattern("t*")));
assertThrows(InvalidGroupIdException.class, () -> consumer.subscribe(new SubscriptionPattern("t*"),
mock(ConsumerRebalanceListener.class)));
}
@Test
public void testSubscribeToRe2JPatternGeneratesEvent() {
consumer = newConsumer();
completeTopicRe2JPatternSubscriptionChangeEventSuccessfully();
consumer.subscribe(new SubscriptionPattern("t*"));
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicRe2JPatternSubscriptionChangeEvent.class));
clearInvocations(applicationEventHandler);
consumer.subscribe(new SubscriptionPattern("t*"), mock(ConsumerRebalanceListener.class));
verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicRe2JPatternSubscriptionChangeEvent.class));
}
// SubscriptionPattern is supported as of ConsumerGroupHeartbeatRequest v1. Clients using subscribe
// (SubscribePattern) against older broker versions should get UnsupportedVersionException on poll after subscribe
@Test
public void testSubscribePatternAgainstBrokerNotSupportingRegex() throws InterruptedException {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_ID_CONFIG, "group-id");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
final ConsumerConfig config = new ConsumerConfig(props);
ConsumerMetadata metadata = new ConsumerMetadata(0, 0, Long.MAX_VALUE, false, false,
mock(SubscriptionState.class), new LogContext(), new ClusterResourceListeners());
MockClient client = new MockClient(time, metadata);
MetadataResponse initialMetadata = RequestTestUtils.metadataUpdateWithIds(1, Map.of("topic1", 2),
Map.of("topic1", Uuid.randomUuid()));
client.updateMetadata(initialMetadata);
// ConsumerGroupHeartbeat v0 does not support broker-side regex resolution
client.setNodeApiVersions(NodeApiVersions.create(ApiKeys.CONSUMER_GROUP_HEARTBEAT.id, (short) 0, (short) 0));
// Mock response to find coordinator
Node node = metadata.fetch().nodes().get(0);
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, "group-id", node), node);
// Mock HB response (needed so that the MockClient builds the request)
ConsumerGroupHeartbeatResponse result =
new ConsumerGroupHeartbeatResponse(new ConsumerGroupHeartbeatResponseData()
.setMemberId("")
.setMemberEpoch(0));
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.prepareResponseFrom(result, coordinator);
SubscriptionState subscriptionState = mock(SubscriptionState.class);
consumer = new AsyncKafkaConsumer<>(
new LogContext(),
time,
config,
new StringDeserializer(),
new StringDeserializer(),
client,
subscriptionState,
metadata
);
completeTopicRe2JPatternSubscriptionChangeEventSuccessfully();
SubscriptionPattern pattern = new SubscriptionPattern("t*");
consumer.subscribe(pattern);
when(subscriptionState.subscriptionPattern()).thenReturn(pattern);
TestUtils.waitForCondition(() -> {
try {
// The request is generated in the background thread so allow for that
// async operation to happen to detect the failure.
consumer.poll(Duration.ZERO);
return false;
} catch (UnsupportedVersionException e) {
return true;
}
}, "Consumer did not throw the expected UnsupportedVersionException on poll");
}
@Test
public void testRecordBackgroundEventQueueSizeAndBackgroundEventQueueTime() {
consumer = newConsumer(
mock(FetchBuffer.class),
mock(ConsumerInterceptors.class),
mock(ConsumerRebalanceListenerInvoker.class),
mock(SubscriptionState.class));
Metrics metrics = consumer.metricsRegistry();
AsyncConsumerMetrics asyncConsumerMetrics = consumer.asyncConsumerMetrics();
ConsumerRebalanceListenerCallbackNeededEvent event = new ConsumerRebalanceListenerCallbackNeededEvent(ON_PARTITIONS_REVOKED, Collections.emptySortedSet());
event.setEnqueuedMs(time.milliseconds());
backgroundEventQueue.add(event);
asyncConsumerMetrics.recordBackgroundEventQueueSize(1);
time.sleep(10);
consumer.processBackgroundEvents();
assertEquals(0, (double) metrics.metric(metrics.metricName("background-event-queue-size", CONSUMER_METRIC_GROUP)).metricValue());
assertEquals(10, (double) metrics.metric(metrics.metricName("background-event-queue-time-avg", CONSUMER_METRIC_GROUP)).metricValue());
assertEquals(10, (double) metrics.metric(metrics.metricName("background-event-queue-time-max", CONSUMER_METRIC_GROUP)).metricValue());
}
@Test
public void testFailConstructor() {
final Properties props = requiredConsumerConfig();
props.put(ConsumerConfig.GROUP_ID_CONFIG, "group-id");
props.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, "an.invalid.class");
final ConsumerConfig config = new ConsumerConfig(props);
try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
KafkaException ce = assertThrows(
KafkaException.class,
() -> newConsumer(config));
assertTrue(ce.getMessage().contains("Failed to construct kafka consumer"), "Unexpected exception message: " + ce.getMessage());
assertTrue(ce.getCause().getMessage().contains("Class an.invalid. | MockCommitCallback |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxMetrics.java | {
"start": 1787,
"end": 2625
} | class ____<T> extends InternalFluxOperator<T, T> {
final String name;
final Tags tags;
//Note: meters and tag names are normalized by micrometer on the basis that the word
// separator is the dot, not camelCase...
final MeterRegistry registryCandidate;
FluxMetrics(Flux<? extends T> flux) {
super(flux);
this.name = resolveName(flux);
this.tags = resolveTags(flux, DEFAULT_TAGS_FLUX);
this.registryCandidate = MicrometerConfiguration.getRegistry();
}
@Override
public CoreSubscriber<? super T> subscribeOrReturn(CoreSubscriber<? super T> actual) {
return new MetricsSubscriber<>(actual, registryCandidate, Clock.SYSTEM, this.name, this.tags);
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return super.scanUnsafe(key);
}
static | FluxMetrics |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/jaxb/hbm/transform/ColumnDefaultsInsertableNonUpdateableImpl.java | {
"start": 176,
"end": 836
} | class ____ implements ColumnDefaults {
/**
* Singleton access
*/
public static final ColumnDefaultsInsertableNonUpdateableImpl INSTANCE = new ColumnDefaultsInsertableNonUpdateableImpl();
@Override
public Boolean isNullable() {
return null;
}
@Override
public Integer getLength() {
return null;
}
@Override
public Integer getScale() {
return null;
}
@Override
public Integer getPrecision() {
return null;
}
@Override
public Boolean isUnique() {
return null;
}
@Override
public Boolean isInsertable() {
return true;
}
@Override
public Boolean isUpdatable() {
return false;
}
}
| ColumnDefaultsInsertableNonUpdateableImpl |
java | FasterXML__jackson-core | src/test/java/tools/jackson/core/unittest/util/ByteArrayBuilderTest.java | {
"start": 546,
"end": 3892
} | class ____ extends JacksonCoreTestBase
{
public void testSimple() throws Exception
{
ByteArrayBuilder b = new ByteArrayBuilder(null, 20);
assertArrayEquals(new byte[0], b.toByteArray());
b.write((byte) 0);
b.append(1);
byte[] foo = new byte[98];
for (int i = 0; i < foo.length; ++i) {
foo[i] = (byte) (2 + i);
}
b.write(foo);
byte[] result = b.toByteArray();
assertEquals(100, result.length);
for (int i = 0; i < 100; ++i) {
assertEquals(i, (int) result[i]);
}
b.release();
b.close();
}
public void testAppendFourBytesWithPositive() {
BufferRecycler bufferRecycler = new BufferRecycler();
ByteArrayBuilder byteArrayBuilder = new ByteArrayBuilder(bufferRecycler);
assertEquals(0, byteArrayBuilder.size());
byteArrayBuilder.appendFourBytes(2);
assertEquals(4, byteArrayBuilder.size());
assertEquals(0, byteArrayBuilder.toByteArray()[0]);
assertEquals(0, byteArrayBuilder.toByteArray()[1]);
assertEquals(0, byteArrayBuilder.toByteArray()[2]);
assertEquals(2, byteArrayBuilder.toByteArray()[3]);
byteArrayBuilder.close();
}
public void testAppendTwoBytesWithZero() {
ByteArrayBuilder byteArrayBuilder = new ByteArrayBuilder(0);
assertEquals(0, byteArrayBuilder.size());
byteArrayBuilder.appendTwoBytes(0);
assertEquals(2, byteArrayBuilder.size());
assertEquals(0, byteArrayBuilder.toByteArray()[0]);
byteArrayBuilder.close();
}
public void testFinishCurrentSegment() {
BufferRecycler bufferRecycler = new BufferRecycler();
ByteArrayBuilder byteArrayBuilder = new ByteArrayBuilder(bufferRecycler, 2);
byteArrayBuilder.appendThreeBytes(2);
assertEquals(3, byteArrayBuilder.getCurrentSegmentLength());
/*byte[] byteArray =*/ byteArrayBuilder.finishCurrentSegment();
assertEquals(0, byteArrayBuilder.getCurrentSegmentLength());
byteArrayBuilder.close();
}
// [core#1195]: Try to verify that BufferRecycler instance is indeed reused
public void testBufferRecyclerReuse() throws Exception
{
JsonFactory f = new JsonFactory();
BufferRecycler br = new BufferRecycler()
// need to link with some pool
.withPool(JsonRecyclerPools.newBoundedPool(3));
ByteArrayBuilder bab = new ByteArrayBuilder(br, 20);
assertSame(br, bab.bufferRecycler());
JsonGenerator g = f.createGenerator(ObjectWriteContext.empty(), bab);
IOContext ioCtxt = ((GeneratorBase) g).ioContext();
assertSame(br, ioCtxt.bufferRecycler());
assertTrue(ioCtxt.bufferRecycler().isLinkedWithPool());
g.writeStartArray();
g.writeEndArray();
g.close();
// Generator.close() should NOT release buffer recycler
assertTrue(br.isLinkedWithPool());
byte[] result = bab.getClearAndRelease();
assertEquals("[]", new String(result, StandardCharsets.UTF_8));
// Nor accessing contents
assertTrue(br.isLinkedWithPool());
// only explicit release does
br.releaseToPool();
assertFalse(br.isLinkedWithPool());
}
} | ByteArrayBuilderTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/discriminator/SingleTableNotNullDiscriminatorSuperClassTest.java | {
"start": 3320,
"end": 3438
} | class ____ extends NotNullEntity {
}
@Entity(name = "val2_ent")
@DiscriminatorValue("val2")
public static | Val1Entity |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authorization/method/AuthorizationAdvisorProxyFactory.java | {
"start": 11889,
"end": 12759
} | class ____
*/
static TargetVisitor defaultsSkipValueTypes() {
return AuthorizationAdvisorProxyFactory.DEFAULT_VISITOR_SKIP_VALUE_TYPES;
}
/**
* Compose a set of visitors. This is helpful when you are customizing for a given
* type and still want the defaults applied for the remaining types.
*
* <p>
* The resulting visitor will execute the first visitor that returns a non-null
* value.
* @param visitors the set of visitors
* @return a composite that executes the first visitor that returns a non-null
* value
*/
static TargetVisitor of(TargetVisitor... visitors) {
return (proxyFactory, target) -> {
for (TargetVisitor visitor : visitors) {
Object result = visitor.visit(proxyFactory, target);
if (result != null) {
return result;
}
}
return null;
};
}
}
private static final | level |
java | apache__camel | components/camel-exec/src/main/java/org/apache/camel/component/exec/impl/DefaultExecBinding.java | {
"start": 1722,
"end": 6355
} | class ____ implements ExecBinding {
private static final Logger LOG = LoggerFactory.getLogger(DefaultExecBinding.class);
@Override
@SuppressWarnings("unchecked")
public ExecCommand readInput(Exchange exchange, ExecEndpoint endpoint) {
ObjectHelper.notNull(exchange, "exchange");
ObjectHelper.notNull(endpoint, "endpoint");
// do not convert args as we do that manually later
Object args = exchange.getIn().removeHeader(EXEC_COMMAND_ARGS);
String cmd = getAndRemoveHeader(exchange.getIn(), EXEC_COMMAND_EXECUTABLE, endpoint.getExecutable(), String.class);
String dir = getAndRemoveHeader(exchange.getIn(), EXEC_COMMAND_WORKING_DIR, endpoint.getWorkingDir(), String.class);
long timeout = getAndRemoveHeader(exchange.getIn(), EXEC_COMMAND_TIMEOUT, endpoint.getTimeout(), Long.class);
String exitValuesString
= getAndRemoveHeader(exchange.getIn(), EXEC_COMMAND_EXIT_VALUES, endpoint.getExitValues(), String.class);
String outFilePath = getAndRemoveHeader(exchange.getIn(), EXEC_COMMAND_OUT_FILE, endpoint.getOutFile(), String.class);
boolean useStderrOnEmptyStdout = getAndRemoveHeader(exchange.getIn(), EXEC_USE_STDERR_ON_EMPTY_STDOUT,
endpoint.isUseStderrOnEmptyStdout(), Boolean.class);
LoggingLevel commandLogLevel = getAndRemoveHeader(exchange.getIn(), EXEC_COMMAND_LOG_LEVEL,
endpoint.getCommandLogLevel(), LoggingLevel.class);
InputStream input = exchange.getIn().getBody(InputStream.class);
// If the args is a list of strings already..
List<String> argsList = null;
if (isListOfStrings(args)) {
argsList = (List<String>) args;
}
if (argsList == null) {
// no we could not do that, then parse it as a string to a list
String s = endpoint.getArgs();
if (args != null) {
// use args from header instead from endpoint
s = exchange.getContext().getTypeConverter().convertTo(String.class, exchange, args);
}
LOG.debug("Parsing argument String to a List: {}", s);
argsList = splitToWhiteSpaceSeparatedTokens(s);
}
Set<Integer> exitValues = new HashSet<>();
if (exitValuesString != null && exitValuesString.length() > 0) {
exitValues = new HashSet<>(splitCommaSeparatedToListOfInts(exitValuesString));
}
File outFile = outFilePath == null ? null : new File(outFilePath);
return new ExecCommand(
cmd, argsList, dir, timeout, exitValues, input, outFile, useStderrOnEmptyStdout, commandLogLevel);
}
private boolean isListOfStrings(Object o) {
if (o == null) {
return false;
}
if (!(o instanceof List)) {
return false;
}
@SuppressWarnings("rawtypes")
List argsList = (List) o;
for (Object s : argsList) {
if (s.getClass() != String.class) {
return false;
}
}
return true;
}
@Override
public void writeOutput(Exchange exchange, ExecResult result) {
ObjectHelper.notNull(exchange, "exchange");
ObjectHelper.notNull(result, "result");
if (exchange.getPattern().isOutCapable()) {
writeOutputInMessage(exchange.getOut(), result);
exchange.getOut().getHeaders().putAll(exchange.getIn().getHeaders());
} else {
writeOutputInMessage(exchange.getIn(), result);
}
}
/**
* Write the {@link ExecResult} in the message body. Write the stderr and the exit value for convenience in the
* message headers. <br>
* The stdout and/or resultFile should be accessible using a converter or using the result object directly.
*
* @param message a Camel message
* @param result an {@link ExecResult} instance
*/
protected void writeOutputInMessage(Message message, ExecResult result) {
message.setHeader(EXEC_STDERR, result.getStderr());
message.setHeader(EXEC_EXIT_VALUE, result.getExitValue());
message.setBody(result);
}
/**
* Gets and removes the <code> <code>headerName</code> header form the input <code>message</code> (the header will
* not be propagated)
*/
protected <T> T getAndRemoveHeader(Message message, String headerName, T defaultValue, Class<T> headerType) {
T h = message.getHeader(headerName, defaultValue, headerType);
message.removeHeader(headerName);
return h;
}
}
| DefaultExecBinding |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/concurrent/ExponentialBackoffRetryStrategy.java | {
"start": 1044,
"end": 2827
} | class ____ implements RetryStrategy {
private final int remainingRetries;
private final Duration currentRetryDelay;
private final Duration maxRetryDelay;
/**
* @param remainingRetries number of times to retry
* @param currentRetryDelay the current delay between retries
* @param maxRetryDelay the max delay between retries
*/
public ExponentialBackoffRetryStrategy(
int remainingRetries, Duration currentRetryDelay, Duration maxRetryDelay) {
Preconditions.checkArgument(
remainingRetries >= 0, "The number of retries must be greater or equal to 0.");
this.remainingRetries = remainingRetries;
Preconditions.checkArgument(
currentRetryDelay.toMillis() >= 0, "The currentRetryDelay must be positive");
this.currentRetryDelay = currentRetryDelay;
Preconditions.checkArgument(
maxRetryDelay.toMillis() >= 0, "The maxRetryDelay must be positive");
this.maxRetryDelay = maxRetryDelay;
}
@Override
public int getNumRemainingRetries() {
return remainingRetries;
}
@Override
public Duration getRetryDelay() {
return currentRetryDelay;
}
@Override
public RetryStrategy getNextRetryStrategy() {
int nextRemainingRetries = remainingRetries - 1;
Preconditions.checkState(
nextRemainingRetries >= 0, "The number of remaining retries must not be negative");
long nextRetryDelayMillis =
Math.min(2 * currentRetryDelay.toMillis(), maxRetryDelay.toMillis());
return new ExponentialBackoffRetryStrategy(
nextRemainingRetries, Duration.ofMillis(nextRetryDelayMillis), maxRetryDelay);
}
}
| ExponentialBackoffRetryStrategy |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/connector/distributions/Distribution.java | {
"start": 917,
"end": 1027
} | interface ____ defines how data is distributed across partitions.
*
* @since 3.2.0
*/
@Experimental
public | that |
java | micronaut-projects__micronaut-core | context/src/main/java/io/micronaut/scheduling/annotation/Schedules.java | {
"start": 1069,
"end": 1182
} | interface ____ {
/**
* @return The {@link Scheduled} instances
*/
Scheduled[] value();
}
| Schedules |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/util/ReflectionUtilsTests.java | {
"start": 12755,
"end": 12949
} | class ____ extends TestObject {
@SuppressWarnings("unused")
private final String foo = "will break naive copy that doesn't exclude statics";
}
private static | TestObjectSubclassWithFinalField |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/utils/DaemonThreadFactoryTest.java | {
"start": 130,
"end": 538
} | class ____ extends TestCase {
public void test_0() throws Exception {
Runnable task = new Runnable() {
public void run() {
}
};
DaemonThreadFactory factory = new DaemonThreadFactory("test");
assertEquals("[test-1]", factory.newThread(task).getName());
assertEquals("[test-2]", factory.newThread(task).getName());
}
}
| DaemonThreadFactoryTest |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/convert/support/MapToMapConverter.java | {
"start": 1520,
"end": 4582
} | class ____ implements ConditionalGenericConverter {
private final ConversionService conversionService;
public MapToMapConverter(ConversionService conversionService) {
this.conversionService = conversionService;
}
@Override
public Set<ConvertiblePair> getConvertibleTypes() {
return Collections.singleton(new ConvertiblePair(Map.class, Map.class));
}
@Override
public boolean matches(TypeDescriptor sourceType, TypeDescriptor targetType) {
return canConvertKey(sourceType, targetType) && canConvertValue(sourceType, targetType);
}
@Override
public @Nullable Object convert(@Nullable Object source, TypeDescriptor sourceType, TypeDescriptor targetType) {
if (source == null) {
return null;
}
@SuppressWarnings("unchecked")
Map<Object, Object> sourceMap = (Map<Object, Object>) source;
// Shortcut if possible...
boolean copyRequired = !targetType.getType().isInstance(source);
if (!copyRequired && sourceMap.isEmpty()) {
return sourceMap;
}
TypeDescriptor keyDesc = targetType.getMapKeyTypeDescriptor();
TypeDescriptor valueDesc = targetType.getMapValueTypeDescriptor();
List<MapEntry> targetEntries = new ArrayList<>(sourceMap.size());
for (Map.Entry<Object, Object> entry : sourceMap.entrySet()) {
Object sourceKey = entry.getKey();
Object sourceValue = entry.getValue();
Object targetKey = convertKey(sourceKey, sourceType, keyDesc);
Object targetValue = convertValue(sourceValue, sourceType, valueDesc);
targetEntries.add(new MapEntry(targetKey, targetValue));
if (sourceKey != targetKey || sourceValue != targetValue) {
copyRequired = true;
}
}
if (!copyRequired) {
return sourceMap;
}
Map<Object, Object> targetMap = CollectionFactory.createMap(targetType.getType(),
(keyDesc != null ? keyDesc.getType() : null), sourceMap.size());
for (MapEntry entry : targetEntries) {
entry.addToMap(targetMap);
}
return targetMap;
}
// internal helpers
private boolean canConvertKey(TypeDescriptor sourceType, TypeDescriptor targetType) {
return ConversionUtils.canConvertElements(sourceType.getMapKeyTypeDescriptor(),
targetType.getMapKeyTypeDescriptor(), this.conversionService);
}
private boolean canConvertValue(TypeDescriptor sourceType, TypeDescriptor targetType) {
return ConversionUtils.canConvertElements(sourceType.getMapValueTypeDescriptor(),
targetType.getMapValueTypeDescriptor(), this.conversionService);
}
private @Nullable Object convertKey(Object sourceKey, TypeDescriptor sourceType, @Nullable TypeDescriptor targetType) {
if (targetType == null) {
return sourceKey;
}
return this.conversionService.convert(sourceKey, sourceType.getMapKeyTypeDescriptor(sourceKey), targetType);
}
private @Nullable Object convertValue(Object sourceValue, TypeDescriptor sourceType, @Nullable TypeDescriptor targetType) {
if (targetType == null) {
return sourceValue;
}
return this.conversionService.convert(sourceValue, sourceType.getMapValueTypeDescriptor(sourceValue), targetType);
}
private static | MapToMapConverter |
java | quarkusio__quarkus | integration-tests/micrometer-prometheus/src/test/java/io/quarkus/it/micrometer/prometheus/OtelOnProfile.java | {
"start": 152,
"end": 413
} | class ____ implements QuarkusTestProfile {
@Override
public Map<String, String> getConfigOverrides() {
Map<String, String> config = new HashMap<>(Map.of(
"quarkus.otel.enabled", "true"));
return config;
}
}
| OtelOnProfile |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecordingTriggerTest.java | {
"start": 1496,
"end": 3387
} | class ____ {
private static final String STORE_NAME1 = "store-name1";
private static final String STORE_NAME2 = "store-name2";
private static final TaskId TASK_ID1 = new TaskId(1, 2);
private static final TaskId TASK_ID2 = new TaskId(2, 4);
@Mock
private RocksDBMetricsRecorder recorder1;
@Mock
private RocksDBMetricsRecorder recorder2;
private final Time time = new MockTime();
private final RocksDBMetricsRecordingTrigger recordingTrigger = new RocksDBMetricsRecordingTrigger(time);
private void setUp() {
when(recorder1.storeName()).thenReturn(STORE_NAME1);
when(recorder1.taskId()).thenReturn(TASK_ID1);
when(recorder2.storeName()).thenReturn(STORE_NAME2);
when(recorder2.taskId()).thenReturn(TASK_ID2);
}
@Test
public void shouldTriggerAddedMetricsRecorders() {
setUp();
recordingTrigger.addMetricsRecorder(recorder1);
recordingTrigger.addMetricsRecorder(recorder2);
doNothing().when(recorder1).record(time.milliseconds());
doNothing().when(recorder2).record(time.milliseconds());
recordingTrigger.run();
}
@Test
public void shouldThrowIfRecorderToAddHasBeenAlreadyAdded() {
when(recorder1.storeName()).thenReturn(STORE_NAME1);
when(recorder1.taskId()).thenReturn(TASK_ID1);
recordingTrigger.addMetricsRecorder(recorder1);
assertThrows(
IllegalStateException.class,
() -> recordingTrigger.addMetricsRecorder(recorder1)
);
}
@Test
public void shouldThrowIfRecorderToRemoveCouldNotBeFound() {
setUp();
recordingTrigger.addMetricsRecorder(recorder1);
assertThrows(
IllegalStateException.class,
() -> recordingTrigger.removeMetricsRecorder(recorder2)
);
}
}
| RocksDBMetricsRecordingTriggerTest |
java | spring-projects__spring-framework | spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/factory/aot/CustomPropertyValue.java | {
"start": 1048,
"end": 1363
} | class ____ implements Delegate {
@Override
public CodeBlock generateCode(ValueCodeGenerator valueCodeGenerator, Object value) {
if (value instanceof CustomPropertyValue data) {
return CodeBlock.of("new $T($S)", CustomPropertyValue.class, data.value);
}
return null;
}
}
}
| ValueCodeGeneratorDelegate |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/controller/AppListController.java | {
"start": 1645,
"end": 6005
} | class ____ {
public AppListController() {
}
/**
* Get Application List.
*
* @apiGroup AppListController
* @apiName get
* @api {get} /app_list Get list of deployed applications.
* @apiSuccess {Object[]} List<AppEntry> List of deployed Applications.
* @apiSuccessExample {json} Success-Response:
* HTTP/1.1 200 OK
* [
* {
* "id":"howita-man",
* "name":"howita-man",
* "app":"Jenkins-ci.org/Jenkins",
* "yarnfile":{
* "name":"howita_man",
* "lifetime":3600,
* "containers":[
* ],
* "components":[
* {
* "name":"jenkins",
* "dependencies":[
* ],
* "artifact":{
* "id":"eyang-1.openstacklocal:5000/jenkins:latest",
* "type":"DOCKER"
* },
* "launch_command":"",
* "resource":{
* "uri":null,
* "profile":null,
* "cpus":1,
* "memory":"2048"
* },
* "number_of_containers":1,
* "run_privileged_container":false,
* "configuration":{
* "properties":{
* },
* "env":{
* },
* "files":[
* ]
* },
* "quicklinks":[
* ],
* "containers":[
* ]
* }
* ],
* "configuration":{
* "properties":{
* },
* "env":{
* },
* "files":[
* ]
* },
* "quicklinks":{
* "Jenkins UI":"http://jenkins.${SERVICE_NAME}.${USER}.${DOMAIN}:8080/"
* }
* }
* },
* {
* ...
* }
* ]
* @return - Active application deployed by current user.
*/
@GET
@Produces(MediaType.APPLICATION_JSON)
public List<AppEntry> getList() {
AppCatalogSolrClient sc = new AppCatalogSolrClient();
return sc.listAppEntries();
}
/**
* Delete an application.
*
* @apiGroup AppListController
* @apiName delete
* @api {delete} /app_list Delete one instance of application.
* @apiParam {String} id Application name to delete.
* @apiSuccess {String} text Delete request accepted
* @param id - application ID
* @param name - application name
* @return Web response
*/
@DELETE
@Path("{id}/{name}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(@PathParam("id") String id,
@PathParam("name") String name) {
AppCatalogSolrClient sc = new AppCatalogSolrClient();
sc.deleteApp(id);
YarnServiceClient yc = new YarnServiceClient();
yc.deleteApp(name);
return Response.status(Status.ACCEPTED).build();
}
/**
* Deploy an application.
*
* @apiGroup AppListController
* @apiName deploy
* @api {post} /app_list/{id} Deploy one instance of application.
* @apiParam {String} id Application ID to deploy.
* @apiSuccess {String} text Give deployment status
* @apiError BadRequest Unable to deploy requested application.
* @param id - application ID
* @return Web response
*/
@POST
@Path("{id}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response deploy(@PathParam("id") String id, Service service) {
AppCatalogSolrClient sc = new AppCatalogSolrClient();
try {
sc.deployApp(id, service);
} catch (SolrServerException | IOException e) {
return Response.status(Status.BAD_REQUEST).entity(e.toString()).build();
}
YarnServiceClient yc = new YarnServiceClient();
yc.createApp(service);
String output = "{\"status\":\"Application deployed.\",\"id\":\"" +
service.getName() + "\"}";
return Response.status(Status.ACCEPTED).entity(output).build();
}
}
| AppListController |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TempDirectoryCleanupTests.java | {
"start": 7600,
"end": 7894
} | class ____ {
@TempDir(cleanup = ON_SUCCESS)
Path onSuccessPassingFieldDir;
@Test
void testOnSuccessPassingField() {
TempDirFieldTests.onSuccessPassingFieldDir = onSuccessPassingFieldDir;
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
static | OnSuccessPassingFieldCase |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/DropboxEndpointBuilderFactory.java | {
"start": 24236,
"end": 29034
} | interface ____
extends
DropboxEndpointConsumerBuilder,
DropboxEndpointProducerBuilder {
default AdvancedDropboxEndpointBuilder advanced() {
return (AdvancedDropboxEndpointBuilder) this;
}
/**
* Name of the app registered to make API requests.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param clientIdentifier the value to set
* @return the dsl builder
*/
default DropboxEndpointBuilder clientIdentifier(String clientIdentifier) {
doSetProperty("clientIdentifier", clientIdentifier);
return this;
}
/**
* A space-separated list of sub-strings to search for. A file matches
* only if it contains all the sub-strings. If this option is not set,
* all files will be matched.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param query the value to set
* @return the dsl builder
*/
default DropboxEndpointBuilder query(String query) {
doSetProperty("query", query);
return this;
}
/**
* Original file or folder to move.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param remotePath the value to set
* @return the dsl builder
*/
default DropboxEndpointBuilder remotePath(String remotePath) {
doSetProperty("remotePath", remotePath);
return this;
}
/**
* The access token to make API requests for a specific Dropbox user.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: security
*
* @param accessToken the value to set
* @return the dsl builder
*/
default DropboxEndpointBuilder accessToken(String accessToken) {
doSetProperty("accessToken", accessToken);
return this;
}
/**
* The apiKey to make API requests for a specific Dropbox user.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: security
*
* @param apiKey the value to set
* @return the dsl builder
*/
default DropboxEndpointBuilder apiKey(String apiKey) {
doSetProperty("apiKey", apiKey);
return this;
}
/**
* The apiSecret to make API requests for a specific Dropbox user.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: security
*
* @param apiSecret the value to set
* @return the dsl builder
*/
default DropboxEndpointBuilder apiSecret(String apiSecret) {
doSetProperty("apiSecret", apiSecret);
return this;
}
/**
* The expire time to access token for a specific Dropbox user.
*
* The option is a: <code>java.lang.Long</code> type.
*
* Required: true
* Group: security
*
* @param expireIn the value to set
* @return the dsl builder
*/
default DropboxEndpointBuilder expireIn(Long expireIn) {
doSetProperty("expireIn", expireIn);
return this;
}
/**
* The expire time to access token for a specific Dropbox user.
*
* The option will be converted to a <code>java.lang.Long</code> type.
*
* Required: true
* Group: security
*
* @param expireIn the value to set
* @return the dsl builder
*/
default DropboxEndpointBuilder expireIn(String expireIn) {
doSetProperty("expireIn", expireIn);
return this;
}
/**
* The refresh token to refresh the access token for a specific Dropbox
* user.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: security
*
* @param refreshToken the value to set
* @return the dsl builder
*/
default DropboxEndpointBuilder refreshToken(String refreshToken) {
doSetProperty("refreshToken", refreshToken);
return this;
}
}
/**
* Advanced builder for endpoint for the Dropbox component.
*/
public | DropboxEndpointBuilder |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/EqualsIncompatibleTypeTest.java | {
"start": 11609,
"end": 11658
} | class ____ implements J {}
abstract | F1 |
java | apache__camel | dsl/camel-jbang/camel-jbang-core/src/main/java/org/apache/camel/dsl/jbang/core/commands/action/CamelReceiveAction.java | {
"start": 35296,
"end": 35631
} | class ____ {
JsonObject original;
Pid parent;
String pid;
String name;
long uid;
long timestamp;
JsonObject endpoint;
JsonObject endpointService;
JsonObject message;
Row(Pid parent) {
this.parent = parent;
}
}
private static | Row |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.