language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java | {
"start": 66248,
"end": 66485
} | class ____ extends Options.IntegerOption
implements Option {
private BufferSizeOption(int value) {
super(value);
}
}
// only used directly
private static | BufferSizeOption |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsJobThroughManifestCommitter.java | {
"start": 2067,
"end": 3834
} | class ____
extends TestJobThroughManifestCommitter {
private final ABFSContractTestBinding binding;
public ITestAbfsJobThroughManifestCommitter() throws Exception {
binding = new ABFSContractTestBinding();
}
@BeforeEach
@Override
public void setup() throws Exception {
binding.setup();
super.setup();
}
@Override
protected Configuration createConfiguration() {
return enableManifestCommitter(prepareTestConfiguration(binding));
}
@Override
protected AbstractFSContract createContract(final Configuration conf) {
return new AbfsFileSystemContract(conf, binding.isSecureMode());
}
@Override
protected boolean shouldDeleteTestRootAtEndOfTestRun() {
return true;
}
/**
* Add read of manifest and validate of output's etags.
* @param attemptId attempt ID
* @param files files which were created.
* @param manifest manifest
* @throws IOException failure
*/
@Override
protected void validateTaskAttemptManifest(String attemptId,
List<Path> files,
TaskManifest manifest) throws IOException {
super.validateTaskAttemptManifest(attemptId, files, manifest);
final List<FileEntry> commit = manifest.getFilesToCommit();
final ManifestStoreOperations operations = getStoreOperations();
for (FileEntry entry : commit) {
Assertions.assertThat(entry.getEtag())
.describedAs("Etag of %s", entry)
.isNotEmpty();
final FileStatus sourceStatus = operations.getFileStatus(entry.getSourcePath());
final String etag = ManifestCommitterSupport.getEtag(sourceStatus);
Assertions.assertThat(etag)
.describedAs("Etag of %s", sourceStatus)
.isEqualTo(entry.getEtag());
}
}
}
| ITestAbfsJobThroughManifestCommitter |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/BindableTests.java | {
"start": 9539,
"end": 9808
} | class ____ {
private String property;
JavaBeanOrValueObject(String property) {
this.property = property;
}
String getProperty() {
return this.property;
}
void setProperty(String property) {
this.property = property;
}
}
}
| JavaBeanOrValueObject |
java | apache__camel | components/camel-mybatis/src/main/java/org/apache/camel/component/mybatis/MyBatisConsumer.java | {
"start": 1550,
"end": 7584
} | class ____ {
Exchange exchange;
Object data;
DataHolder() {
}
}
private static final Logger LOG = LoggerFactory.getLogger(MyBatisConsumer.class);
protected volatile ShutdownRunningTask shutdownRunningTask;
protected volatile int pendingExchanges;
private String onConsume;
private boolean useIterator = true;
private boolean routeEmptyResultSet;
public MyBatisConsumer(MyBatisEndpoint endpoint, Processor processor) {
super(endpoint, processor);
}
@Override
public MyBatisEndpoint getEndpoint() {
return (MyBatisEndpoint) super.getEndpoint();
}
/**
* Polls the database
*/
@Override
protected int poll() throws Exception {
// must reset for each poll
shutdownRunningTask = null;
pendingExchanges = 0;
// poll data from the database
MyBatisEndpoint endpoint = getEndpoint();
LOG.trace("Polling: {}", endpoint);
List<?> data = endpoint.getProcessingStrategy().poll(this, getEndpoint());
// okay we have some response from MyBatis so lets mark the consumer as ready
forceConsumerAsReady();
// create a list of exchange objects with the data
Queue<DataHolder> answer = new LinkedList<>();
if (useIterator) {
for (Object item : data) {
Exchange exchange = createExchange(item);
DataHolder holder = new DataHolder();
holder.exchange = exchange;
holder.data = item;
answer.add(holder);
}
} else {
if (!data.isEmpty() || routeEmptyResultSet) {
Exchange exchange = createExchange(data);
DataHolder holder = new DataHolder();
holder.exchange = exchange;
holder.data = data;
answer.add(holder);
}
}
// process all the exchanges in this batch
return processBatch(CastUtils.cast(answer));
}
@Override
public int processBatch(Queue<Object> exchanges) throws Exception {
final MyBatisEndpoint endpoint = getEndpoint();
int total = exchanges.size();
// limit if needed
if (maxMessagesPerPoll > 0 && total > maxMessagesPerPoll) {
LOG.debug("Limiting to maximum messages to poll {} as there were {} messages in this poll.",
maxMessagesPerPoll, total);
total = maxMessagesPerPoll;
}
for (int index = 0; index < total && isBatchAllowed(); index++) {
// only loop if we are started (allowed to run)
DataHolder holder = ObjectHelper.cast(DataHolder.class, exchanges.poll());
Exchange exchange = holder.exchange;
Object data = holder.data;
// add current index and total as properties
exchange.setProperty(ExchangePropertyKey.BATCH_INDEX, index);
exchange.setProperty(ExchangePropertyKey.BATCH_SIZE, total);
exchange.setProperty(ExchangePropertyKey.BATCH_COMPLETE, index == total - 1);
// update pending number of exchanges
pendingExchanges = total - index - 1;
Exception cause = null;
try {
getProcessor().process(exchange);
if (onConsume != null) {
endpoint.getProcessingStrategy().commit(endpoint, exchange, data, onConsume);
}
} catch (Exception e) {
handleException(e);
}
if (getEndpoint().isTransacted() && exchange.isFailed()) {
// break out as we are transacted and should rollback
cause = exchange.getException();
if (cause == null) {
cause = new RollbackExchangeException("Rollback transaction due error processing exchange", null);
}
}
releaseExchange(exchange, false);
if (cause != null) {
throw cause;
}
}
return total;
}
private Exchange createExchange(Object data) {
final MyBatisEndpoint endpoint = getEndpoint();
final Exchange exchange = createExchange(false);
exchange.setPattern(ExchangePattern.InOnly);
final String outputHeader = getEndpoint().getOutputHeader();
Message msg = exchange.getIn();
if (outputHeader != null) {
msg.setHeader(outputHeader, data);
} else {
msg.setBody(data);
}
msg.setHeader(MyBatisConstants.MYBATIS_STATEMENT_NAME, endpoint.getStatement());
return exchange;
}
/**
* Gets the statement(s) to run after successful processing. Use comma to separate multiple statements.
*/
public String getOnConsume() {
return onConsume;
}
/**
* Sets the statement to run after successful processing. Use comma to separate multiple statements.
*/
public void setOnConsume(String onConsume) {
this.onConsume = onConsume;
}
/**
* Indicates how resultset should be delivered to the route
*/
public boolean isUseIterator() {
return useIterator;
}
/**
* Sets how resultset should be delivered to route. Indicates delivery as either a list or individual object.
* defaults to true.
*/
public void setUseIterator(boolean useIterator) {
this.useIterator = useIterator;
}
/**
* Indicates whether empty resultset should be allowed to be sent to the next hop or not
*/
public boolean isRouteEmptyResultSet() {
return routeEmptyResultSet;
}
/**
* Sets whether empty resultset should be allowed to be sent to the next hop. defaults to false. So the empty
* resultset will be filtered out.
*/
public void setRouteEmptyResultSet(boolean routeEmptyResultSet) {
this.routeEmptyResultSet = routeEmptyResultSet;
}
}
| DataHolder |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/util/UriComponentsBuilder.java | {
"start": 22566,
"end": 23137
} | enum ____ {
/**
* This parser type expects URI's to conform to RFC 3986 syntax.
*/
RFC,
/**
* This parser follows the
* <a href="https://url.spec.whatwg.org/#url-parsing">URL parsing algorithm</a>
* in the WhatWG URL Living standard that browsers implement to align on
* lenient handling of user typed URL's that may not conform to RFC syntax.
* @see <a href="https://url.spec.whatwg.org">URL Living Standard</a>
* @see <a href="https://github.com/web-platform-tests/wpt/tree/master/url">URL tests</a>
*/
WHAT_WG
}
private | ParserType |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/metrics/TestAMRMClientRelayerMetrics.java | {
"start": 3095,
"end": 20144
} | class ____
implements ApplicationMasterProtocol {
private boolean failover = false;
private boolean exception = false;
private List<ResourceRequest> lastAsk;
private List<ContainerId> lastRelease;
private List<UpdateContainerRequest> lastUpdates;
private List<String> lastBlacklistAdditions;
private List<String> lastBlacklistRemovals;
private AllocateResponse response = AllocateResponse
.newInstance(0, null, null, new ArrayList<NodeReport>(),
Resource.newInstance(0, 0), null, 0, null, null);
@Override
public RegisterApplicationMasterResponse registerApplicationMaster(
RegisterApplicationMasterRequest request)
throws YarnException, IOException {
return null;
}
@Override
public FinishApplicationMasterResponse finishApplicationMaster(
FinishApplicationMasterRequest request)
throws YarnException, IOException {
if (this.failover) {
this.failover = false;
throw new ApplicationMasterNotRegisteredException("Mock RM restarted");
}
return null;
}
@Override
public AllocateResponse allocate(AllocateRequest request)
throws YarnException, IOException {
if (this.failover) {
this.failover = false;
throw new ApplicationMasterNotRegisteredException("Mock RM restarted");
}
if(this.exception){
this.exception = false;
throw new YarnException("Mock RM encountered exception");
}
this.lastAsk = request.getAskList();
this.lastRelease = request.getReleaseList();
this.lastUpdates = request.getUpdateRequests();
this.lastBlacklistAdditions =
request.getResourceBlacklistRequest().getBlacklistAdditions();
this.lastBlacklistRemovals =
request.getResourceBlacklistRequest().getBlacklistRemovals();
return response;
}
public void setFailoverFlag() {
this.failover = true;
}
}
private Configuration conf;
private MockApplicationMasterService mockAMS;
private String homeID = "home";
private AMRMClientRelayer homeRelayer;
private String uamID = "uam";
private AMRMClientRelayer uamRelayer;
private List<ResourceRequest> asks = new ArrayList<>();
private List<ContainerId> releases = new ArrayList<>();
private List<UpdateContainerRequest> updates = new ArrayList<>();
private List<String> blacklistAdditions = new ArrayList<>();
private List<String> blacklistRemoval = new ArrayList<>();
@BeforeEach
public void setup() throws YarnException, IOException {
this.conf = new Configuration();
this.mockAMS = new MockApplicationMasterService();
this.homeRelayer = new AMRMClientRelayer(this.mockAMS,
ApplicationId.newInstance(0, 0), this.homeID, conf);
this.homeRelayer.registerApplicationMaster(
RegisterApplicationMasterRequest.newInstance("", 0, ""));
this.uamRelayer = new AMRMClientRelayer(this.mockAMS,
ApplicationId.newInstance(0, 0), this.uamID, conf);
this.uamRelayer.registerApplicationMaster(
RegisterApplicationMasterRequest.newInstance("", 0, ""));
clearAllocateRequestLists();
AMRMClientRelayerMetrics.getInstance()
.setClientPending(homeID, RequestType.Guaranteed, 0);
AMRMClientRelayerMetrics.getInstance()
.setClientPending(homeID, RequestType.Opportunistic, 0);
AMRMClientRelayerMetrics.getInstance()
.setClientPending(homeID, RequestType.Promote, 0);
AMRMClientRelayerMetrics.getInstance()
.setClientPending(homeID, RequestType.Demote, 0);
AMRMClientRelayerMetrics.getInstance()
.setClientPending(uamID, RequestType.Guaranteed, 0);
AMRMClientRelayerMetrics.getInstance()
.setClientPending(uamID, RequestType.Opportunistic, 0);
AMRMClientRelayerMetrics.getInstance()
.setClientPending(uamID, RequestType.Promote, 0);
AMRMClientRelayerMetrics.getInstance()
.setClientPending(uamID, RequestType.Demote, 0);
}
private AllocateRequest getAllocateRequest() {
// Need to create a new one every time because rather than directly
// referring the lists, the protobuf impl makes a copy of the lists
return AllocateRequest.newBuilder()
.responseId(0)
.progress(0).askList(asks)
.releaseList(new ArrayList<>(this.releases))
.resourceBlacklistRequest(ResourceBlacklistRequest.newInstance(
new ArrayList<>(this.blacklistAdditions),
new ArrayList<>(this.blacklistRemoval)))
.updateRequests(new ArrayList<>(this.updates))
.build();
}
private void clearAllocateRequestLists() {
this.asks.clear();
this.releases.clear();
this.updates.clear();
this.blacklistAdditions.clear();
this.blacklistRemoval.clear();
}
private static UpdateContainerRequest createPromote(int id){
return UpdateContainerRequest.newInstance(0, createContainerId(id),
ContainerUpdateType.PROMOTE_EXECUTION_TYPE, Resource.newInstance(0, 0),
ExecutionType.GUARANTEED);
}
private static UpdateContainerRequest createDemote(int id){
return UpdateContainerRequest.newInstance(0, createContainerId(id),
ContainerUpdateType.DEMOTE_EXECUTION_TYPE, Resource.newInstance(0, 0),
ExecutionType.OPPORTUNISTIC);
}
private static ContainerId createContainerId(int id) {
return ContainerId.newContainerId(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 1), 1),
id);
}
public ResourceRequest createResourceRequest(long id, String resource,
int memory, int vCores, int priority, ExecutionType execType,
int containers) {
ResourceRequest req = Records.newRecord(ResourceRequest.class);
req.setAllocationRequestId(id);
req.setResourceName(resource);
req.setCapability(Resource.newInstance(memory, vCores));
req.setPriority(Priority.newInstance(priority));
req.setExecutionTypeRequest(ExecutionTypeRequest.newInstance(execType));
req.setNumContainers(containers);
return req;
}
@Test
public void testGPending() throws YarnException, IOException {
// Ask for two containers, one with location preference
this.asks.add(
createResourceRequest(0, "node", 2048, 1, 1, ExecutionType.GUARANTEED,
1));
this.asks.add(
createResourceRequest(0, "rack", 2048, 1, 1, ExecutionType.GUARANTEED,
1));
this.asks.add(createResourceRequest(0, ResourceRequest.ANY, 2048, 1, 1,
ExecutionType.GUARANTEED, 2));
this.homeRelayer.allocate(getAllocateRequest());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Guaranteed).value());
assertEquals(0, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(uamID, RequestType.Guaranteed).value());
// Ask from the uam
this.uamRelayer.allocate(getAllocateRequest());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Guaranteed).value());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(uamID, RequestType.Guaranteed).value());
// Update the any to ask for an extra container
this.asks.get(2).setNumContainers(3);
this.homeRelayer.allocate(getAllocateRequest());
assertEquals(3, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Guaranteed).value());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(uamID, RequestType.Guaranteed).value());
// Update the any to ask to pretend a container was allocated
this.asks.get(2).setNumContainers(2);
this.homeRelayer.allocate(getAllocateRequest());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Guaranteed).value());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(uamID, RequestType.Guaranteed).value());
}
@Test
public void testPromotePending() throws YarnException, IOException {
// Ask to promote 3 containers
this.updates.add(createPromote(1));
this.updates.add(createPromote(2));
this.updates.add(createPromote(3));
this.homeRelayer.allocate(getAllocateRequest());
assertEquals(3, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Promote).value());
// Demote 2 containers, one of which is pending promote
this.updates.remove(createPromote(3));
this.updates.add(createDemote(3));
this.updates.add(createDemote(4));
this.homeRelayer.allocate(getAllocateRequest());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Promote).value());
// Let the RM respond with two successful promotions, one of which
// was pending promote
List<UpdatedContainer> updated = new ArrayList<>();
updated.add(UpdatedContainer
.newInstance(ContainerUpdateType.PROMOTE_EXECUTION_TYPE, Container
.newInstance(createContainerId(2), null, null, null,
null, null)));
updated.add(UpdatedContainer
.newInstance(ContainerUpdateType.PROMOTE_EXECUTION_TYPE, Container
.newInstance(createContainerId(5), null, null, null,
null, null)));
this.mockAMS.response.setUpdatedContainers(updated);
this.homeRelayer.allocate(getAllocateRequest());
assertEquals(1, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Promote).value());
// Remove the promoted container and clean up response
this.mockAMS.response.getUpdatedContainers().clear();
this.updates.remove(createPromote(2));
// Let the RM respond with two completed containers, one of which was
// pending promote
List<ContainerStatus> completed = new ArrayList<>();
completed
.add(ContainerStatus.newInstance(createContainerId(1), null, "", 0));
completed
.add(ContainerStatus.newInstance(createContainerId(6), null, "", 0));
this.mockAMS.response.setCompletedContainersStatuses(completed);
this.homeRelayer.allocate(getAllocateRequest());
assertEquals(0, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Promote).value());
}
@Test
public void testCleanUpOnFinish() throws YarnException, IOException {
// Ask for two containers, one with location preference
this.asks.add(
createResourceRequest(0, "node", 2048, 1, 1, ExecutionType.GUARANTEED,
1));
this.asks.add(
createResourceRequest(0, "rack", 2048, 1, 1, ExecutionType.GUARANTEED,
1));
this.asks.add(createResourceRequest(0, ResourceRequest.ANY, 2048, 1, 1,
ExecutionType.GUARANTEED, 2));
// Ask to promote 3 containers
this.updates.add(createPromote(1));
this.updates.add(createPromote(2));
this.updates.add(createPromote(3));
// Run the allocate call to start tracking pending
this.homeRelayer.allocate(getAllocateRequest());
// After finish, the metrics should reset to zero
this.homeRelayer.shutdown();
assertEquals(0, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Guaranteed).value());
assertEquals(0, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Promote).value());
}
@Test
public void testFailover() throws YarnException, IOException {
// Ask for two containers, one with location preference
this.asks.add(
createResourceRequest(0, "node", 2048, 1, 1, ExecutionType.GUARANTEED,
1));
this.asks.add(
createResourceRequest(0, "rack", 2048, 1, 1, ExecutionType.GUARANTEED,
1));
this.asks.add(createResourceRequest(0, ResourceRequest.ANY, 2048, 1, 1,
ExecutionType.GUARANTEED, 2));
long previousSuccess = AMRMClientRelayerMetrics.getInstance()
.getHeartbeatSuccessMetric(homeID).value();
long previousFailover = AMRMClientRelayerMetrics.getInstance()
.getRMMasterSlaveSwitchMetric(homeID).value();
// Set failover to trigger
mockAMS.failover = true;
this.homeRelayer.allocate(getAllocateRequest());
// The failover metric should be incremented
assertEquals(++previousFailover,
AMRMClientRelayerMetrics.getInstance()
.getRMMasterSlaveSwitchMetric(homeID).value());
// The success metric should be incremented once
assertEquals(++previousSuccess,
AMRMClientRelayerMetrics.getInstance()
.getHeartbeatSuccessMetric(homeID).value());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Guaranteed).value());
assertEquals(0, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(uamID, RequestType.Guaranteed).value());
// Ask from the uam
this.uamRelayer.allocate(getAllocateRequest());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Guaranteed).value());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(uamID, RequestType.Guaranteed).value());
// Update the any to ask for an extra container
this.asks.get(2).setNumContainers(3);
mockAMS.failover = true;
this.homeRelayer.allocate(getAllocateRequest());
// The failover metric should be incremented
assertEquals(++previousFailover,
AMRMClientRelayerMetrics.getInstance()
.getRMMasterSlaveSwitchMetric(homeID).value());
// The success metric should be incremented once
assertEquals(++previousSuccess,
AMRMClientRelayerMetrics.getInstance()
.getHeartbeatSuccessMetric(homeID).value());
assertEquals(3, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Guaranteed).value());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(uamID, RequestType.Guaranteed).value());
// Update the any to ask to pretend a container was allocated
this.asks.get(2).setNumContainers(2);
mockAMS.failover = true;
this.homeRelayer.allocate(getAllocateRequest());
// The failover metric should be incremented
assertEquals(++previousFailover,
AMRMClientRelayerMetrics.getInstance()
.getRMMasterSlaveSwitchMetric(homeID).value());
// The success metric should be incremented once
assertEquals(++previousSuccess,
AMRMClientRelayerMetrics.getInstance()
.getHeartbeatSuccessMetric(homeID).value());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Guaranteed).value());
assertEquals(2, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(uamID, RequestType.Guaranteed).value());
long previousFailure = AMRMClientRelayerMetrics.getInstance()
.getHeartbeatFailureMetric(homeID).value();
mockAMS.exception = true;
try{
this.homeRelayer.allocate(getAllocateRequest());
fail();
} catch (YarnException e){
}
// The failover metric should not be incremented
assertEquals(previousFailover,
AMRMClientRelayerMetrics.getInstance()
.getRMMasterSlaveSwitchMetric(homeID).value());
// The success metric should not be incremented
assertEquals(previousSuccess,
AMRMClientRelayerMetrics.getInstance()
.getHeartbeatSuccessMetric(homeID).value());
// The failure metric should be incremented
assertEquals(++previousFailure,
AMRMClientRelayerMetrics.getInstance()
.getHeartbeatFailureMetric(homeID).value());
mockAMS.failover = true;
mockAMS.exception = true;
try{
this.homeRelayer.allocate(getAllocateRequest());
fail();
} catch (YarnException e){
}
// The failover metric should be incremented
assertEquals(++previousFailover,
AMRMClientRelayerMetrics.getInstance()
.getRMMasterSlaveSwitchMetric(homeID).value());
// The success metric should not be incremented
assertEquals(previousSuccess,
AMRMClientRelayerMetrics.getInstance()
.getHeartbeatSuccessMetric(homeID).value());
// The failure metric should be incremented
assertEquals(++previousFailure,
AMRMClientRelayerMetrics.getInstance()
.getHeartbeatFailureMetric(homeID).value());
}
@Test
public void testNewEmptyRequest()
throws YarnException, IOException {
// Ask for zero containers
this.asks.add(createResourceRequest(1, ResourceRequest.ANY, 2048, 1, 1,
ExecutionType.GUARANTEED, 0));
this.homeRelayer.allocate(getAllocateRequest());
assertEquals(0, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(homeID, RequestType.Guaranteed).value());
assertEquals(0, AMRMClientRelayerMetrics.getInstance()
.getPendingMetric(uamID, RequestType.Guaranteed).value());
}
}
| MockApplicationMasterService |
java | google__error-prone | core/src/test/java/com/google/errorprone/ErrorProneJavacPluginTest.java | {
"start": 4968,
"end": 5988
} | class ____ implements Runnable {", //
" public void run() {}",
"}"),
UTF_8);
JavacFileManager fileManager = new JavacFileManager(new Context(), false, UTF_8);
DiagnosticCollector<JavaFileObject> diagnosticCollector = new DiagnosticCollector<>();
JavacTask task =
JavacTool.create()
.getTask(
null,
fileManager,
diagnosticCollector,
ImmutableList.of(
"-Xplugin:ErrorProne"
+ " -XepPatchChecks:MissingOverride -XepPatchLocation:IN_PLACE",
"-XDcompilePolicy=byfile",
"--should-stop=ifError=FLOW"),
ImmutableList.of(),
fileManager.getJavaFileObjects(fileA, fileB));
assertWithMessage(Joiner.on('\n').join(diagnosticCollector.getDiagnostics()))
.that(task.call())
.isTrue();
assertThat(Files.readAllLines(fileA, UTF_8))
.containsExactly(
" | B |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java | {
"start": 2938,
"end": 20982
} | class ____ {
private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
12345, "mycluster", "my-bp", 0L);
private static final NamespaceInfo FAKE_NSINFO_2 = new NamespaceInfo(
6789, "mycluster", "my-bp", 0L);
private static final String JID = "test-journal";
private static final File TEST_LOG_DIR = new File(
new File(MiniDFSCluster.getBaseDirectory()), "TestJournal");
private final StorageErrorReporter mockErrorReporter = Mockito.mock(
StorageErrorReporter.class);
private Configuration conf;
private Journal journal;
@BeforeEach
public void setup() throws Exception {
FileUtil.fullyDelete(TEST_LOG_DIR);
conf = new Configuration();
// Enable fetching edits via RPC
conf.setBoolean(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, true);
journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
mockErrorReporter);
journal.format(FAKE_NSINFO, false);
}
@AfterEach
public void verifyNoStorageErrors() throws Exception{
Mockito.verify(mockErrorReporter, Mockito.never())
.reportErrorOnFile(Mockito.<File>any());
}
@AfterEach
public void cleanup() {
IOUtils.closeStream(journal);
}
/**
* Test whether JNs can correctly handle editlog that cannot be decoded.
*/
@Test
public void testScanEditLog() throws Exception {
// use a future layout version
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1);
// in the segment we write garbage editlog, which can be scanned but
// cannot be decoded
final int numTxns = 5;
byte[] ops = QJMTestUtil.createGabageTxns(1, 5);
journal.journal(makeRI(2), 1, 1, numTxns, ops);
// verify the in-progress editlog segment
SegmentStateProto segmentState = journal.getSegmentInfo(1);
assertTrue(segmentState.getIsInProgress());
assertEquals(numTxns, segmentState.getEndTxId());
assertEquals(1, segmentState.getStartTxId());
// finalize the segment and verify it again
journal.finalizeLogSegment(makeRI(3), 1, numTxns);
segmentState = journal.getSegmentInfo(1);
assertFalse(segmentState.getIsInProgress());
assertEquals(numTxns, segmentState.getEndTxId());
assertEquals(1, segmentState.getStartTxId());
}
/**
* Test for HDFS-14557 to ensure that a edit file that failed to fully
* allocate and has a header byte of -1 is moved aside to allow startup
* to progress.
*/
@Test
public void testEmptyEditsInProgressMovedAside() throws Exception {
// First, write 5 transactions to the journal
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1);
final int numTxns = 5;
byte[] ops = QJMTestUtil.createTxnData(1, 5);
journal.journal(makeRI(2), 1, 1, numTxns, ops);
// Now close the segment
journal.finalizeLogSegment(makeRI(3), 1, numTxns);
// Create a new segment creating a new edits_inprogress file
journal.startLogSegment(makeRI(4), 6,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1);
ops = QJMTestUtil.createTxnData(6, 5);
journal.journal(makeRI(5), 6, 6, numTxns, ops);
File eip = journal.getStorage().getInProgressEditLog(6);
// Now stop the journal without finalizing the segment
journal.close();
// Now "zero out" the EIP file with -1 bytes, similar to how it would
// appear if the pre-allocation failed
RandomAccessFile rwf = new RandomAccessFile(eip, "rw");
for (int i=0; i<rwf.length(); i++) {
rwf.write(-1);
}
rwf.close();
// Finally start the Journal again, and ensure the "zeroed out" file
// is renamed with a .empty extension
journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
mockErrorReporter);
File movedTo = new File(eip.getAbsolutePath()+".empty");
assertTrue(movedTo.exists());
}
@Test
@Timeout(value = 10)
public void testEpochHandling() throws Exception {
assertEquals(0, journal.getLastPromisedEpoch());
NewEpochResponseProto newEpoch =
journal.newEpoch(FAKE_NSINFO, 1);
assertFalse(newEpoch.hasLastSegmentTxId());
assertEquals(1, journal.getLastPromisedEpoch());
journal.newEpoch(FAKE_NSINFO, 3);
assertFalse(newEpoch.hasLastSegmentTxId());
assertEquals(3, journal.getLastPromisedEpoch());
try {
journal.newEpoch(FAKE_NSINFO, 3);
fail("Should have failed to promise same epoch twice");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Proposed epoch 3 <= last promise 3", ioe);
}
try {
journal.startLogSegment(makeRI(1), 12345L,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Should have rejected call from prior epoch");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"epoch 1 is less than the last promised epoch 3", ioe);
}
try {
journal.journal(makeRI(1), 12345L, 100L, 0, new byte[0]);
fail("Should have rejected call from prior epoch");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"epoch 1 is less than the last promised epoch 3", ioe);
}
}
@Test
@Timeout(value = 10)
public void testMaintainCommittedTxId() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
// Send txids 1-3, with a request indicating only 0 committed
journal.journal(new RequestInfo(JID, null, 1, 2, 0), 1, 1, 3,
QJMTestUtil.createTxnData(1, 3));
assertEquals(0, journal.getCommittedTxnId());
// Send 4-6, with request indicating that through 3 is committed.
journal.journal(new RequestInfo(JID, null, 1, 3, 3), 1, 4, 3,
QJMTestUtil.createTxnData(4, 6));
assertEquals(3, journal.getCommittedTxnId());
}
@Test
@Timeout(value = 10)
public void testRestartJournal() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2), 1, 1, 2,
QJMTestUtil.createTxnData(1, 2));
// Don't finalize.
String storageString = journal.getStorage().toColonSeparatedString();
System.err.println("storage string: " + storageString);
journal.close(); // close to unlock the storage dir
// Now re-instantiate, make sure history is still there
journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
mockErrorReporter);
// The storage info should be read, even if no writer has taken over.
assertEquals(storageString,
journal.getStorage().toColonSeparatedString());
assertEquals(1, journal.getLastPromisedEpoch());
NewEpochResponseProtoOrBuilder newEpoch = journal.newEpoch(FAKE_NSINFO, 2);
assertEquals(1, newEpoch.getLastSegmentTxId());
}
@Test
@Timeout(value = 10)
public void testFormatResetsCachedValues() throws Exception {
journal.newEpoch(FAKE_NSINFO, 12345L);
journal.startLogSegment(new RequestInfo(JID, null, 12345L, 1L, 0L), 1L,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertEquals(12345L, journal.getLastPromisedEpoch());
assertEquals(12345L, journal.getLastWriterEpoch());
assertTrue(journal.isFormatted());
// Close the journal in preparation for reformatting it.
journal.close();
// Clear the storage directory before reformatting it
journal.getStorage().getJournalManager()
.getStorageDirectory().clearDirectory();
journal.format(FAKE_NSINFO_2, false);
assertEquals(0, journal.getLastPromisedEpoch());
assertEquals(0, journal.getLastWriterEpoch());
assertTrue(journal.isFormatted());
}
/**
* Test that, if the writer crashes at the very beginning of a segment,
* before any transactions are written, that the next newEpoch() call
* returns the prior segment txid as its most recent segment.
*/
@Test
@Timeout(value = 10)
public void testNewEpochAtBeginningOfSegment() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2), 1, 1, 2,
QJMTestUtil.createTxnData(1, 2));
journal.finalizeLogSegment(makeRI(3), 1, 2);
journal.startLogSegment(makeRI(4), 3,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
NewEpochResponseProto resp = journal.newEpoch(FAKE_NSINFO, 2);
assertEquals(1, resp.getLastSegmentTxId());
}
@Test
@Timeout(value = 10)
public void testJournalLocking() throws Exception {
assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
StorageDirectory sd = journal.getStorage().getStorageDir(0);
File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
// Journal should be locked, since the format() call locks it.
GenericTestUtils.assertExists(lockFile);
journal.newEpoch(FAKE_NSINFO, 1);
try {
new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
mockErrorReporter);
fail("Did not fail to create another journal in same dir");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Cannot lock storage", ioe);
}
journal.close();
// Journal should no longer be locked after the close() call.
// Hence, should be able to create a new Journal in the same dir.
Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID,
StartupOption.REGULAR, mockErrorReporter);
journal2.newEpoch(FAKE_NSINFO, 2);
journal2.close();
}
/**
* Test finalizing a segment after some batch of edits were missed.
* This should fail, since we validate the log before finalization.
*/
@Test
@Timeout(value = 10)
public void testFinalizeWhenEditsAreMissed() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2), 1, 1, 3,
QJMTestUtil.createTxnData(1, 3));
// Try to finalize up to txn 6, even though we only wrote up to txn 3.
try {
journal.finalizeLogSegment(makeRI(3), 1, 6);
fail("did not fail to finalize");
} catch (JournalOutOfSyncException e) {
GenericTestUtils.assertExceptionContains(
"but only written up to txid 3", e);
}
// Check that, even if we re-construct the journal by scanning the
// disk, we don't allow finalizing incorrectly.
journal.close();
journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
mockErrorReporter);
try {
journal.finalizeLogSegment(makeRI(4), 1, 6);
fail("did not fail to finalize");
} catch (JournalOutOfSyncException e) {
GenericTestUtils.assertExceptionContains(
"disk only contains up to txid 3", e);
}
}
/**
* Ensure that finalizing a segment which doesn't exist throws the
* appropriate exception.
*/
@Test
@Timeout(value = 10)
public void testFinalizeMissingSegment() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
try {
journal.finalizeLogSegment(makeRI(1), 1000, 1001);
fail("did not fail to finalize");
} catch (JournalOutOfSyncException e) {
GenericTestUtils.assertExceptionContains(
"No log file to finalize at transaction ID 1000", e);
}
}
/**
* Assume that a client is writing to a journal, but loses its connection
* in the middle of a segment. Thus, any future journal() calls in that
* segment may fail, because some txns were missed while the connection was
* down.
*
* Eventually, the connection comes back, and the NN tries to start a new
* segment at a higher txid. This should abort the old one and succeed.
*/
@Test
@Timeout(value = 10)
public void testAbortOldSegmentIfFinalizeIsMissed() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
// Start a segment at txid 1, and write a batch of 3 txns.
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2), 1, 1, 3,
QJMTestUtil.createTxnData(1, 3));
GenericTestUtils.assertExists(
journal.getStorage().getInProgressEditLog(1));
// Try to start new segment at txid 6, this should abort old segment and
// then succeed, allowing us to write txid 6-9.
journal.startLogSegment(makeRI(3), 6,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(4), 6, 6, 3,
QJMTestUtil.createTxnData(6, 3));
// The old segment should *not* be finalized.
GenericTestUtils.assertExists(
journal.getStorage().getInProgressEditLog(1));
GenericTestUtils.assertExists(
journal.getStorage().getInProgressEditLog(6));
}
/**
* Test behavior of startLogSegment() when a segment with the
* same transaction ID already exists.
*/
@Test
@Timeout(value = 10)
public void testStartLogSegmentWhenAlreadyExists() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
// Start a segment at txid 1, and write just 1 transaction. This
// would normally be the START_LOG_SEGMENT transaction.
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2), 1, 1, 1,
QJMTestUtil.createTxnData(1, 1));
// Try to start new segment at txid 1, this should succeed, because
// we are allowed to re-start a segment if we only ever had the
// START_LOG_SEGMENT transaction logged.
journal.startLogSegment(makeRI(3), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(4), 1, 1, 1,
QJMTestUtil.createTxnData(1, 1));
// This time through, write more transactions afterwards, simulating
// real user transactions.
journal.journal(makeRI(5), 1, 2, 3,
QJMTestUtil.createTxnData(2, 3));
try {
journal.startLogSegment(makeRI(6), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Did not fail to start log segment which would overwrite " +
"an existing one");
} catch (IllegalStateException ise) {
GenericTestUtils.assertExceptionContains(
"seems to contain valid transactions", ise);
}
journal.finalizeLogSegment(makeRI(7), 1, 4);
// Ensure that we cannot overwrite a finalized segment
try {
journal.startLogSegment(makeRI(8), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Did not fail to start log segment which would overwrite " +
"an existing one");
} catch (IllegalStateException ise) {
GenericTestUtils.assertExceptionContains(
"have a finalized segment", ise);
}
}
private static RequestInfo makeRI(int serial) {
return new RequestInfo(JID, null, 1, serial, 0);
}
@Test
@Timeout(value = 10)
public void testNamespaceVerification() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
try {
journal.newEpoch(FAKE_NSINFO_2, 2);
fail("Did not fail newEpoch() when namespaces mismatched");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Incompatible namespaceID", ioe);
}
}
@Test
public void testFormatNonEmptyStorageDirectories() throws Exception {
try {
// Format again here and to format the non-empty directories in
// journal node.
journal.format(FAKE_NSINFO, false);
fail("Did not fail to format non-empty directories in journal node.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Can't format the storage directory because the current "
+ "directory is not empty.", ioe);
}
}
@Test
public void testReadFromCache() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2), 1, 1, 5, QJMTestUtil.createTxnData(1, 5));
journal.journal(makeRI(3), 1, 6, 5, QJMTestUtil.createTxnData(6, 5));
journal.journal(makeRI(4), 1, 11, 5, QJMTestUtil.createTxnData(11, 5));
assertJournaledEditsTxnCountAndContents(1, 7, 7,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertJournaledEditsTxnCountAndContents(1, 30, 15,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.finalizeLogSegment(makeRI(5), 1, 15);
int newLayoutVersion = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1;
journal.startLogSegment(makeRI(6), 16, newLayoutVersion);
journal.journal(makeRI(7), 16, 16, 5, QJMTestUtil.createTxnData(16, 5));
assertJournaledEditsTxnCountAndContents(16, 10, 20, newLayoutVersion);
}
private void assertJournaledEditsTxnCountAndContents(int startTxn,
int requestedMaxTxns, int expectedEndTxn, int layoutVersion)
throws Exception {
GetJournaledEditsResponseProto result =
journal.getJournaledEdits(startTxn, requestedMaxTxns);
int expectedTxnCount = expectedEndTxn - startTxn + 1;
ByteArrayOutputStream headerBytes = new ByteArrayOutputStream();
EditLogFileOutputStream.writeHeader(layoutVersion,
new DataOutputStream(headerBytes));
assertEquals(expectedTxnCount, result.getTxnCount());
assertArrayEquals(Bytes.concat(headerBytes.toByteArray(),
QJMTestUtil.createTxnData(startTxn, expectedTxnCount)),
result.getEditLog().toByteArray());
}
@Test
public void testFormatNonEmptyStorageDirectoriesWhenforceOptionIsTrue()
throws Exception {
try {
// Format again here and to format the non-empty directories in
// journal node.
journal.format(FAKE_NSINFO, true);
} catch (IOException ioe) {
fail("Format should be success with force option.");
}
}
}
| TestJournal |
java | elastic__elasticsearch | x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlTopNShardManagementIT.java | {
"start": 1467,
"end": 4662
} | class ____ extends AbstractPausableIntegTestCase {
private static List<SearchContext> searchContexts = new ArrayList<>();
private static final int SHARD_COUNT = 10;
@Override
protected Class<? extends Plugin> pausableFieldPluginClass() {
return TopNPausableFieldPlugin.class;
}
@Override
protected int shardCount() {
return SHARD_COUNT;
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CollectionUtils.appendToCopy(super.nodePlugins(), MockSearchService.TestPlugin.class);
}
@Before
public void setupMockService() {
searchContexts.clear();
for (SearchService service : internalCluster().getInstances(SearchService.class)) {
((MockSearchService) service).setOnCreateSearchContext(ctx -> {
searchContexts.add(ctx);
scriptPermits.release();
});
}
}
public void testTopNOperatorReleasesContexts() throws Exception {
try (var initialResponse = sendAsyncQuery()) {
assertThat(
"Async query has finished, but it should have waited for semaphore release",
initialResponse.asyncExecutionId().isPresent(),
equalTo(true)
);
var getResultsRequest = new GetAsyncResultRequest(initialResponse.asyncExecutionId().get());
scriptPermits.release(numberOfDocs());
getResultsRequest.setWaitForCompletionTimeout(timeValueSeconds(10));
var result = client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest).get();
assertThat(result.isRunning(), equalTo(false));
assertThat(result.isPartial(), equalTo(false));
result.close();
}
}
private static EsqlQueryResponse sendAsyncQuery() {
scriptPermits.drainPermits();
// Ensures there is no TopN pushdown to lucene, and that the pause happens after the TopN operator has been applied.
return client().execute(
EsqlQueryAction.INSTANCE,
asyncEsqlQueryRequest("from test | sort foo + 1 | limit 1 | where pause_me + 1 < 42 | stats sum(pause_me)").pragmas(
new QueryPragmas(
Settings.builder()
// Configured to ensure that there is only one worker handling all the shards, so that we can assert the correct
// expected behavior.
.put(QueryPragmas.MAX_CONCURRENT_NODES_PER_CLUSTER.getKey(), 1)
.put(QueryPragmas.MAX_CONCURRENT_SHARDS_PER_NODE.getKey(), SHARD_COUNT)
.put(QueryPragmas.TASK_CONCURRENCY.getKey(), 1)
.build()
)
)
).actionGet(1, TimeUnit.MINUTES);
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(PlannerSettings.REDUCTION_LATE_MATERIALIZATION.getKey(), randomBoolean())
.build();
}
public static | EsqlTopNShardManagementIT |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/test/java/org/springframework/boot/docs/testing/utilities/testresttemplate/MySpringBootTestsTests.java | {
"start": 779,
"end": 839
} | class ____ extends MySpringBootTests {
}
| MySpringBootTestsTests |
java | alibaba__nacos | common/src/main/java/com/alibaba/nacos/common/remote/client/RpcConstants.java | {
"start": 3373,
"end": 3541
} | enum ____ represents a specific
* configuration attribute suffix. This allows for the construction of complete configuration property keys.
*/
public | constant |
java | spring-projects__spring-boot | module/spring-boot-graphql/src/main/java/org/springframework/boot/graphql/autoconfigure/rsocket/GraphQlRSocketAutoConfiguration.java | {
"start": 3659,
"end": 3943
} | interface ____ {
Encoder<?> jsonEncoder();
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnBean(JsonMapper.class)
@ConditionalOnProperty(name = "spring.graphql.rsocket.preferred-json-mapper", havingValue = "jackson",
matchIfMissing = true)
static | JsonEncoderSupplier |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java | {
"start": 1644,
"end": 1855
} | class ____ testing transport of keys via Credentials .
* Client passes a list of keys in the Credentials object.
* The mapper and reducer checks whether it can access the keys
* from Credentials.
*/
public | for |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/mysql/visitor/MySqlEvalVisitorImpl.java | {
"start": 987,
"end": 4168
} | class ____ extends MySqlASTVisitorAdapter implements SQLEvalVisitor {
private Map<String, Function> functions = new HashMap<String, Function>();
private List<Object> parameters = new ArrayList<Object>();
private int variantIndex = -1;
private boolean markVariantIndex = true;
public MySqlEvalVisitorImpl() {
this(new ArrayList<Object>(1));
}
public MySqlEvalVisitorImpl(List<Object> parameters) {
this.parameters = parameters;
}
public List<Object> getParameters() {
return parameters;
}
public void setParameters(List<Object> parameters) {
this.parameters = parameters;
}
public boolean visit(SQLCharExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
public int incrementAndGetVariantIndex() {
return ++variantIndex;
}
public int getVariantIndex() {
return variantIndex;
}
public boolean visit(SQLVariantRefExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
public boolean visit(SQLBinaryOpExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
public boolean visit(SQLUnaryExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
public boolean visit(SQLIntegerExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
public boolean visit(SQLNumberExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
public boolean visit(SQLHexExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
@Override
public boolean visit(SQLBinaryExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
@Override
public boolean visit(SQLCaseExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
@Override
public boolean visit(SQLBetweenExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
@Override
public boolean visit(SQLInListExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
@Override
public boolean visit(SQLNullExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
@Override
public boolean visit(SQLMethodInvokeExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
@Override
public boolean visit(SQLQueryExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
public boolean isMarkVariantIndex() {
return markVariantIndex;
}
public void setMarkVariantIndex(boolean markVariantIndex) {
this.markVariantIndex = markVariantIndex;
}
@Override
public Function getFunction(String funcName) {
return functions.get(funcName);
}
@Override
public void registerFunction(String funcName, Function function) {
functions.put(funcName, function);
}
public boolean visit(SQLIdentifierExpr x) {
return SQLEvalVisitorUtils.visit(this, x);
}
@Override
public void unregisterFunction(String funcName) {
functions.remove(funcName);
}
@Override
public boolean visit(SQLBooleanExpr x) {
x.getAttributes().put(EVAL_VALUE, x.getBooleanValue());
return false;
}
}
| MySqlEvalVisitorImpl |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/FlowableEndpointBuilderFactory.java | {
"start": 1954,
"end": 7783
} | interface ____
extends
EndpointConsumerBuilder {
default FlowableEndpointConsumerBuilder basic() {
return (FlowableEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedFlowableEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedFlowableEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedFlowableEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedFlowableEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedFlowableEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedFlowableEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
}
/**
* Builder for endpoint producers for the Flowable component.
*/
public | AdvancedFlowableEndpointConsumerBuilder |
java | spring-projects__spring-security | access/src/main/java/org/springframework/security/access/method/MapBasedMethodSecurityMetadataSource.java | {
"start": 10105,
"end": 10999
} | class ____ {
private final Method method;
private final Class<?> registeredJavaType;
RegisteredMethod(Method method, Class<?> registeredJavaType) {
Assert.notNull(method, "Method required");
Assert.notNull(registeredJavaType, "Registered Java Type required");
this.method = method;
this.registeredJavaType = registeredJavaType;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof RegisteredMethod rhs) {
return this.method.equals(rhs.method) && this.registeredJavaType.equals(rhs.registeredJavaType);
}
return false;
}
@Override
public int hashCode() {
return this.method.hashCode() * this.registeredJavaType.hashCode();
}
@Override
public String toString() {
return "RegisteredMethod[" + this.registeredJavaType.getName() + "; " + this.method + "]";
}
}
}
| RegisteredMethod |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/jdk8/OptionalDouble_Test.java | {
"start": 172,
"end": 599
} | class ____ extends TestCase {
public void test_optional() throws Exception {
Model model = new Model();
model.value = OptionalDouble.empty();
String text = JSON.toJSONString(model);
Assert.assertEquals("{\"value\":null}", text);
Model model2 = JSON.parseObject(text, Model.class);
Assert.assertEquals(model2.value, model.value);
}
public static | OptionalDouble_Test |
java | apache__camel | components/camel-microprofile/camel-microprofile-fault-tolerance/src/main/java/org/apache/camel/component/microprofile/faulttolerance/FaultToleranceConstants.java | {
"start": 875,
"end": 1002
} | interface ____ {
String DEFAULT_FAULT_TOLERANCE_CONFIGURATION_ID = "fault-tolerance-configuration";
}
| FaultToleranceConstants |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/RestrictedApiCheckerTest.java | {
"start": 11257,
"end": 11924
} | class ____ {
@Allowlist
void foo(RestrictedApiMethods m) {
m.restrictedMethod();
m.accept(m::restrictedMethod);
}
}
""")
.expectResult(Result.OK)
.doTest();
}
// Regression test for b/36160747
@Test
public void allowAllDefinitionsInFile() {
helper
.addSourceLines(
"Testcase.java",
"""
package separate.test;
import com.google.errorprone.annotations.RestrictedApi;
import java.lang.annotation.ElementType;
import java.lang.annotation.Target;
| Testcase |
java | alibaba__nacos | plugin/datasource/src/main/java/com/alibaba/nacos/plugin/datasource/enums/derby/TrustedDerbylFunctionEnum.java | {
"start": 758,
"end": 1026
} | enum ____ is used to enumerate and manage a list of trusted built-in SQL functions.
* By using this enum, you can verify whether a given SQL function is part of the trusted functions list
* to avoid potential SQL injection risks.
*
* @author blake.qiu
*/
public | class |
java | spring-projects__spring-boot | module/spring-boot-actuator-autoconfigure/src/main/java/org/springframework/boot/actuate/autoconfigure/endpoint/jackson/JacksonEndpointAutoConfiguration.java | {
"start": 1442,
"end": 1876
} | class ____ {
@Bean
@ConditionalOnBooleanProperty(name = "management.endpoints.jackson.isolated-json-mapper", matchIfMissing = true)
EndpointJsonMapper endpointJsonMapper() {
JsonMapper jsonMapper = JsonMapper.builder()
.changeDefaultPropertyInclusion(
(value) -> value.withValueInclusion(Include.NON_NULL).withContentInclusion(Include.NON_NULL))
.build();
return () -> jsonMapper;
}
}
| JacksonEndpointAutoConfiguration |
java | spring-projects__spring-security | oauth2/oauth2-jose/src/main/java/org/springframework/security/oauth2/jose/JwaAlgorithm.java | {
"start": 1291,
"end": 1412
} | interface ____ {
/**
* Returns the algorithm name.
* @return the algorithm name
*/
String getName();
}
| JwaAlgorithm |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestName.java | {
"start": 1099,
"end": 1398
} | class ____ implements BeforeEachCallback {
private volatile String name;
@Override
public void beforeEach(ExtensionContext extensionContext) throws Exception {
name = extensionContext.getTestMethod().get().getName();
}
public String getMethodName() {
return this.name;
}
} | TestName |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/jdk/MapWithGenericValuesDeserTest.java | {
"start": 1988,
"end": 6109
} | class ____ {
protected String value;
private KeyTypeFactory(String v, boolean foo) { value = v; }
@JsonCreator
public static KeyTypeFactory create(String str) {
return new KeyTypeFactory(str, true);
}
}
/*
/**********************************************************
/* Test methods for sub-classing
/**********************************************************
*/
/**
* Verifying that sub-classing works ok wrt generics information
*/
@Test
public void testMapSubClass() throws Exception
{
ObjectMapper mapper = new ObjectMapper();
MapSubClass result = mapper.readValue
("{\"a\":true }", MapSubClass.class);
assertEquals(1, result.size());
Object value = result.get("a");
assertEquals(BooleanWrapper.class, value.getClass());
BooleanWrapper bw = (BooleanWrapper) value;
assertEquals(Boolean.TRUE, bw.b);
}
@Test
public void testMapWrapper() throws Exception
{
StringMap value = new ObjectMapper().readValue
("{\"entries\":{\"a\":9} }", StringMap.class);
assertNotNull(value.getEntries());
assertEquals(1, value.getEntries().size());
assertEquals(Long.valueOf(9), value.getEntries().get("a"));
}
@Test
public void testIntermediateTypes() throws Exception
{
StringStringWrapperMap result = new ObjectMapper().readValue
("{\"a\":\"b\"}", StringStringWrapperMap.class);
assertEquals(1, result.size());
Object value = result.get("a");
assertNotNull(value);
assertEquals(value.getClass(), StringWrapper.class);
assertEquals("b", ((StringWrapper) value).str);
}
/*
/**********************************************************
/* Test methods for sub-classing for annotation handling
/**********************************************************
*/
/**
* Verifying that sub-classing works ok wrt generics information
*/
@Test
public void testAnnotatedMap() throws Exception
{
ObjectMapper mapper = new ObjectMapper();
AnnotatedMap result = mapper.readValue
("{\"a\":true }", AnnotatedMap.class);
assertEquals(1, result.size());
Map.Entry<Object,Object> en = result.entrySet().iterator().next();
assertEquals(StringWrapper.class, en.getKey().getClass());
assertEquals(BooleanWrapper.class, en.getValue().getClass());
assertEquals("a", ((StringWrapper) en.getKey()).str);
assertEquals(Boolean.TRUE, ((BooleanWrapper) en.getValue()).b);
}
/*
/**********************************************************
/* Test methods for ensuring @JsonCreator works for keys
/**********************************************************
*/
@Test
public void testKeyViaCtor() throws Exception
{
ObjectMapper mapper = new ObjectMapper();
Map<KeyTypeCtor,Integer> map = mapper.readValue("{\"a\":123}",
defaultTypeFactory().constructMapType(HashMap.class, KeyTypeCtor.class, Integer.class));
assertEquals(1, map.size());
Map.Entry<?,?> entry = map.entrySet().iterator().next();
assertEquals(Integer.valueOf(123), entry.getValue());
Object key = entry.getKey();
assertEquals(KeyTypeCtor.class, key.getClass());
assertEquals("a", ((KeyTypeCtor) key).value);
}
@Test
public void testKeyViaFactory() throws Exception
{
ObjectMapper mapper = new ObjectMapper();
Map<KeyTypeCtor,Integer> map = mapper.readValue("{\"a\":123}",
defaultTypeFactory().constructMapType(HashMap.class, KeyTypeFactory.class, Integer.class));
assertEquals(1, map.size());
Map.Entry<?,?> entry = map.entrySet().iterator().next();
assertEquals(Integer.valueOf(123), entry.getValue());
Object key = entry.getKey();
assertEquals(KeyTypeFactory.class, key.getClass());
assertEquals("a", ((KeyTypeFactory) key).value);
}
}
| KeyTypeFactory |
java | alibaba__nacos | plugin/auth/src/main/java/com/alibaba/nacos/plugin/auth/exception/AccessException.java | {
"start": 849,
"end": 1184
} | class ____ extends NacosException {
private static final long serialVersionUID = -2926344920552803270L;
public AccessException() {
}
public AccessException(int code) {
this.setErrCode(code);
}
public AccessException(String msg) {
this.setErrMsg(msg);
}
}
| AccessException |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/parent_childs/ParentChildTest.java | {
"start": 1100,
"end": 3038
} | class ____ {
private static SqlSessionFactory sqlSessionFactory;
@BeforeAll
static void setUp() throws Exception {
// create a SqlSessionFactory
try (
Reader reader = Resources.getResourceAsReader("org/apache/ibatis/submitted/parent_childs/mybatis-config.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
// populate in-memory database
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/parent_childs/CreateDB.sql");
}
@Test
void shouldGet2Parents() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
List<Parent> parents = mapper.getParents();
Assertions.assertEquals(2, parents.size());
Parent firstParent = parents.get(0);
Assertions.assertEquals("Jose", firstParent.getName());
Assertions.assertEquals(2, firstParent.getChilds().size());
Parent secondParent = parents.get(1);
Assertions.assertEquals("Juan", secondParent.getName());
Assertions.assertEquals(0, secondParent.getChilds().size()); // note an empty list is inyected
}
}
// issue #1848
@Test
void shouldGet2ParentsWithConstructor() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
List<Parent> parents = mapper.getParentsWithConstructor();
Assertions.assertEquals(2, parents.size());
Parent firstParent = parents.get(0);
Assertions.assertEquals("Jose", firstParent.getName());
Assertions.assertEquals(2, firstParent.getChilds().size());
Parent secondParent = parents.get(1);
Assertions.assertEquals("Juan", secondParent.getName());
Assertions.assertEquals(0, secondParent.getChilds().size()); // note an empty list is inyected
}
}
}
| ParentChildTest |
java | elastic__elasticsearch | build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/StableApiJarMocks.java | {
"start": 899,
"end": 1946
} | class ____ {
private StableApiJarMocks() {}
public static Path createExtensibleApiJar(Path jar) throws IOException {
jar = jar.resolve("plugin-extensible-api.jar");
DynamicType.Unloaded<ExtensibleInterface> extensible = new ByteBuddy().decorate(ExtensibleInterface.class).make();
DynamicType.Unloaded<ExtensibleClass> extensibleClass = new ByteBuddy().decorate(ExtensibleClass.class).make();
extensible.toJar(jar.toFile());
extensibleClass.inject(jar.toFile());
return jar;
}
public static Path createPluginApiJar(Path jar) throws IOException {
jar = jar.resolve("plugin-api.jar");
DynamicType.Unloaded<Extensible> extensible = new ByteBuddy().decorate(Extensible.class).make();
extensible.toJar(jar.toFile());
DynamicType.Unloaded<NamedComponent> namedComponent = new ByteBuddy().decorate(NamedComponent.class).make();
extensible.toJar(jar.toFile());
namedComponent.inject(jar.toFile());
return jar;
}
}
| StableApiJarMocks |
java | apache__thrift | lib/javame/src/org/apache/thrift/protocol/TProtocol.java | {
"start": 943,
"end": 988
} | interface ____.
*
*/
public abstract | definition |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ResetMlComponentsAction.java | {
"start": 950,
"end": 1294
} | class ____ extends ActionType<ResetMlComponentsAction.Response> {
public static final ResetMlComponentsAction INSTANCE = new ResetMlComponentsAction();
public static final String NAME = "cluster:internal/xpack/ml/auditor/reset";
private ResetMlComponentsAction() {
super(NAME);
}
public static | ResetMlComponentsAction |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/TupleComparatorILDC3Test.java | {
"start": 1516,
"end": 3490
} | class ____ extends TupleComparatorTestBase<Tuple3<Integer, Long, Double>> {
@SuppressWarnings("unchecked")
Tuple3<Integer, Long, Double>[] dataISD =
new Tuple3[] {
new Tuple3<Integer, Long, Double>(4, Long.valueOf(4), 20.0),
new Tuple3<Integer, Long, Double>(5, Long.valueOf(1), 20.0),
new Tuple3<Integer, Long, Double>(5, Long.valueOf(2), 20.0),
new Tuple3<Integer, Long, Double>(5, Long.valueOf(10), 23.0),
new Tuple3<Integer, Long, Double>(5, Long.valueOf(19), 24.0),
new Tuple3<Integer, Long, Double>(5, Long.valueOf(20), 24.0),
new Tuple3<Integer, Long, Double>(5, Long.valueOf(24), 25.0),
new Tuple3<Integer, Long, Double>(5, Long.valueOf(25), 25.0)
};
@Override
protected TupleComparator<Tuple3<Integer, Long, Double>> createComparator(boolean ascending) {
return new TupleComparator<Tuple3<Integer, Long, Double>>(
new int[] {2, 0, 1},
new TypeComparator[] {
new DoubleComparator(ascending),
new IntComparator(ascending),
new LongComparator(ascending)
},
new TypeSerializer[] {
IntSerializer.INSTANCE, LongSerializer.INSTANCE, DoubleSerializer.INSTANCE
});
}
@SuppressWarnings("unchecked")
@Override
protected TupleSerializer<Tuple3<Integer, Long, Double>> createSerializer() {
return new TupleSerializer<Tuple3<Integer, Long, Double>>(
(Class<Tuple3<Integer, Long, Double>>) (Class<?>) Tuple3.class,
new TypeSerializer[] {
new IntSerializer(), new LongSerializer(), new DoubleSerializer()
});
}
@Override
protected Tuple3<Integer, Long, Double>[] getSortedTestData() {
return dataISD;
}
}
| TupleComparatorILDC3Test |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/SerializeWriterTest_14.java | {
"start": 303,
"end": 1796
} | class ____ extends TestCase {
@SuppressWarnings("rawtypes")
public void test_writer_1() throws Exception {
StringWriter strOut = new StringWriter();
SerializeWriter out = new SerializeWriter(strOut, 1);
try {
JSONSerializer serializer = new JSONSerializer(out);
Map map = Collections.singletonMap("", "a");
serializer.write(map);
} finally {
out.close();
}
Assert.assertEquals("{\"\":\"a\"}", strOut.toString());
}
public void test_writer_2() throws Exception {
StringWriter strOut = new StringWriter();
SerializeWriter out = new SerializeWriter(strOut, 1);
try {
JSONSerializer serializer = new JSONSerializer(out);
Map map = Collections.singletonMap("ab", "a");
serializer.write(map);
} finally {
out.close();
}
Assert.assertEquals("{ab:\"a\"}", strOut.toString());
}
public void test_writer_3() throws Exception {
StringWriter strOut = new StringWriter();
SerializeWriter out = new SerializeWriter(strOut, 1);
try {
JSONSerializer serializer = new JSONSerializer(out);
Map map = Collections.singletonMap("ab\t<", "a");
serializer.write(map);
} finally {
out.close();
}
Assert.assertEquals("{\"ab\\t<\":\"a\"}", strOut.toString());
}
}
| SerializeWriterTest_14 |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/AuthorizationEngine.java | {
"start": 5858,
"end": 15592
} | interface ____ {
/**
* Asynchronously resolves the information necessary to authorize the given request, which has
* already been authenticated. This could include retrieval of permissions from an index or external system.
* See also {@link #resolveAuthorizationInfo(Subject, ActionListener)}, for which this method is the more
* specific sibling. This returns the specific {@code AuthorizationInfo} used to authorize only the specified request.
*
* @param requestInfo object containing the request and associated information such as the action name
* and associated user(s)
* @param listener the listener to be notified of success using {@link ActionListener#onResponse(Object)}
* or failure using {@link ActionListener#onFailure(Exception)}
*/
void resolveAuthorizationInfo(RequestInfo requestInfo, ActionListener<AuthorizationInfo> listener);
/**
* Asynchronously resolves the information necessary to authorize requests in the context of the given {@code Subject}.
* This could include retrieval of permissions from an index or external system.
* See also {@link #resolveAuthorizationInfo(RequestInfo, ActionListener)}, for which this method is the more general
* sibling. This returns the {@code AuthorizationInfo} that is used for access checks outside the context of
* authorizing a specific request, i.e.
* {@link #checkPrivileges(AuthorizationInfo, PrivilegesToCheck, Collection, ActionListener)}
*
* @param subject object representing the effective user
* @param listener the listener to be notified of success using {@link ActionListener#onResponse(Object)}
* or failure using {@link ActionListener#onFailure(Exception)}
*/
void resolveAuthorizationInfo(Subject subject, ActionListener<AuthorizationInfo> listener);
/**
* Asynchronously authorizes an attempt for a user to run as another user.
*
* @param requestInfo object contain the request and associated information such as the action
* and associated user(s)
* @param authorizationInfo information needed from authorization that was previously retrieved
* from {@link #resolveAuthorizationInfo(RequestInfo, ActionListener)}
* @param listener the listener to be notified of the authorization result
*/
void authorizeRunAs(RequestInfo requestInfo, AuthorizationInfo authorizationInfo, ActionListener<AuthorizationResult> listener);
/**
* Asynchronously authorizes a cluster action.
*
* @param requestInfo object contain the request and associated information such as the action
* and associated user(s)
* @param authorizationInfo information needed from authorization that was previously retrieved
* from {@link #resolveAuthorizationInfo(RequestInfo, ActionListener)}
* @param listener the listener to be notified of the authorization result
*/
void authorizeClusterAction(RequestInfo requestInfo, AuthorizationInfo authorizationInfo, ActionListener<AuthorizationResult> listener);
/**
* Asynchronously authorizes an action that operates on an index. The indices and aliases that
* the request is attempting to operate on can be retrieved using the {@link AsyncSupplier} for
* {@link ResolvedIndices}. The resolved indices will contain the exact list of indices and aliases
* that the request is attempting to take action on; in other words this supplier handles wildcard
* expansion and datemath expressions.
*
* @param requestInfo object contain the request and associated information such as the action
* and associated user(s)
* @param authorizationInfo information needed from authorization that was previously retrieved
* from {@link #resolveAuthorizationInfo(RequestInfo, ActionListener)}
* @param indicesAsyncSupplier the asynchronous supplier for the indices that this request is
* attempting to operate on
* @param metadata a map of a string name to the cluster metadata specific to that
* alias or index
* @return a listener to be notified of the authorization result
*/
SubscribableListener<IndexAuthorizationResult> authorizeIndexAction(
RequestInfo requestInfo,
AuthorizationInfo authorizationInfo,
AsyncSupplier<ResolvedIndices> indicesAsyncSupplier,
ProjectMetadata metadata
);
/**
* Asynchronously loads a set of alias and index names for which the user is authorized
* to execute the requested action.
*
* @param requestInfo object contain the request and associated information such as the action
* and associated user(s)
* @param authorizationInfo information needed from authorization that was previously retrieved
* from {@link #resolveAuthorizationInfo(RequestInfo, ActionListener)}
* @param indicesLookup a map of a string name to the cluster metadata specific to that
* alias or index
* @param listener the listener to be notified of the authorization result
*/
void loadAuthorizedIndices(
RequestInfo requestInfo,
AuthorizationInfo authorizationInfo,
Map<String, IndexAbstraction> indicesLookup,
ActionListener<AuthorizationEngine.AuthorizedIndices> listener
);
/**
* Asynchronously checks that the permissions a user would have for a given list of names do
* not exceed their permissions for a given name. This is used to ensure that a user cannot
* perform operations that would escalate their privileges over the data. Some examples include
* adding an alias to gain more permissions to a given index and/or resizing an index in order
* to gain more privileges on the data since the index name changes.
*
* @param requestInfo object contain the request and associated information such as the action
* and associated user(s)
* @param authorizationInfo information needed from authorization that was previously retrieved
* from {@link #resolveAuthorizationInfo(RequestInfo, ActionListener)}
* @param indexNameToNewNames A map of an existing index/alias name to a one or more names of
* an index/alias that the user is requesting to create. The method
* should validate that none of the names have more permissions than
* the name in the key would have.
* @param listener the listener to be notified of the authorization result
*/
void validateIndexPermissionsAreSubset(
RequestInfo requestInfo,
AuthorizationInfo authorizationInfo,
Map<String, List<String>> indexNameToNewNames,
ActionListener<AuthorizationResult> listener
);
/**
* Checks the privileges from the provided authorization information against those that are being
* requested to be checked. This provides a way for a client application to ask if a Subject has
* permission to perform an action, before actually trying to perform the action,
* or if the subject has privileges to an application resource.
*
* @param authorizationInfo information used for authorization, for a specific Subject, that was previously retrieved
* using {@link #resolveAuthorizationInfo(Subject, ActionListener)}
* @param privilegesToCheck the object that contains the privileges to check for the Subject
* @param applicationPrivilegeDescriptors a collection of application privilege descriptors
* @param listener the listener to be notified of the check privileges response
*/
void checkPrivileges(
AuthorizationInfo authorizationInfo,
PrivilegesToCheck privilegesToCheck,
Collection<ApplicationPrivilegeDescriptor> applicationPrivilegeDescriptors,
ActionListener<PrivilegesCheckResult> listener
);
/**
* Retrieve the privileges, from the provided authorization information, in a standard format that can be rendered via an
* API for a client application to understand the privileges that the Subject has.
*
* @param authorizationInfo information used from authorization, for a specific Subject, that was previously retrieved
* from {@link #resolveAuthorizationInfo(Subject, ActionListener)}
* @param listener the listener to be notified of the get privileges response
*/
void getUserPrivileges(AuthorizationInfo authorizationInfo, ActionListener<GetUserPrivilegesResponse> listener);
/**
* Retrieve privileges towards a remote cluster, from the provided authorization information, to be sent together
* with a cross-cluster request (e.g. CCS) from an originating cluster to the target cluster.
*/
default void getRoleDescriptorsIntersectionForRemoteCluster(
final String remoteClusterAlias,
final TransportVersion remoteClusterVersion,
final AuthorizationInfo authorizationInfo,
final ActionListener<RoleDescriptorsIntersection> listener
) {
throw new UnsupportedOperationException(
"retrieving role descriptors for remote cluster is not supported by this authorization engine"
);
}
/**
* Interface for objects that contains the information needed to authorize a request
*/
| AuthorizationEngine |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientSCMProtocol.java | {
"start": 1686,
"end": 2629
} | interface ____ by clients to claim a resource with the
* <code>SharedCacheManager.</code> The client uses a checksum to identify the
* resource and an {@link ApplicationId} to identify which application will be
* using the resource.
* </p>
*
* <p>
* The <code>SharedCacheManager</code> responds with whether or not the
* resource exists in the cache. If the resource exists, a <code>Path</code>
* to the resource in the shared cache is returned. If the resource does not
* exist, the response is empty.
* </p>
*
* @param request request to claim a resource in the shared cache
* @return response indicating if the resource is already in the cache
* @throws YarnException exceptions from yarn servers.
* @throws IOException io error occur.
*/
public UseSharedCacheResourceResponse use(
UseSharedCacheResourceRequest request) throws YarnException, IOException;
/**
* <p>
* The | used |
java | netty__netty | transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollDomainSocketStringEchoTest.java | {
"start": 935,
"end": 1339
} | class ____ extends SocketStringEchoTest {
@Override
protected SocketAddress newSocketAddress() {
return EpollSocketTestPermutation.newDomainSocketAddress();
}
@Override
protected List<TestsuitePermutation.BootstrapComboFactory<ServerBootstrap, Bootstrap>> newFactories() {
return EpollSocketTestPermutation.INSTANCE.domainSocket();
}
}
| EpollDomainSocketStringEchoTest |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerTest.java | {
"start": 2429,
"end": 4410
} | class ____ extends SerializerTestBase<PojoSerializerTest.TestUserClass> {
private final TypeInformation<TestUserClass> type =
TypeExtractor.getForClass(TestUserClass.class);
@Override
protected TypeSerializer<TestUserClass> createSerializer() {
TypeSerializer<TestUserClass> serializer =
type.createSerializer(new SerializerConfigImpl());
assert (serializer instanceof PojoSerializer);
return serializer;
}
@Override
protected int getLength() {
return -1;
}
@Override
protected Class<TestUserClass> getTypeClass() {
return TestUserClass.class;
}
@Override
protected TestUserClass[] getTestData() {
Random rnd = new Random(874597969123412341L);
return new TestUserClass[] {
new TestUserClass(
rnd.nextInt(),
"foo",
rnd.nextDouble(),
new int[] {1, 2, 3},
new Date(),
new NestedTestUserClass(
rnd.nextInt(), "foo@boo", rnd.nextDouble(), new int[] {10, 11, 12})),
new TestUserClass(
rnd.nextInt(),
"bar",
rnd.nextDouble(),
new int[] {4, 5, 6},
null,
new NestedTestUserClass(
rnd.nextInt(), "bar@bas", rnd.nextDouble(), new int[] {20, 21, 22})),
new TestUserClass(rnd.nextInt(), null, rnd.nextDouble(), null, null, null),
new TestUserClass(
rnd.nextInt(),
"bar",
rnd.nextDouble(),
new int[] {4, 5, 6},
new Date(),
new NestedTestUserClass(
rnd.nextInt(), "bar@bas", rnd.nextDouble(), new int[] {20, 21, 22}))
};
}
// User code | PojoSerializerTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java | {
"start": 1269,
"end": 1553
} | class ____ extends HtmlPage {
@Override
public void render(Page.HTML<__> html) {
html.
body().
div().
__(Sub1.class).__().
div().
i("inline text").
__(Sub2.class).__().__().__();
}
}
static public | MainView |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java | {
"start": 2290,
"end": 6159
} | class ____ extends EsqlScalarFunction {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
Expression.class,
"MvPercentile",
MvPercentile::new
);
/**
* 2^52 is the smallest integer where it and all smaller integers can be represented exactly as double
*/
private static final double MAX_SAFE_LONG_DOUBLE = Double.longBitsToDouble(0x4330000000000000L);
private final Expression field;
private final Expression percentile;
@FunctionInfo(
returnType = { "double", "integer", "long" },
description = "Converts a multivalued field into a single valued field containing "
+ "the value at which a certain percentage of observed values occur.",
examples = @Example(file = "mv_percentile", tag = "example")
)
public MvPercentile(
Source source,
@Param(name = "number", type = { "double", "integer", "long" }, description = "Multivalue expression.") Expression field,
@Param(
name = "percentile",
type = { "double", "integer", "long" },
description = "The percentile to calculate. Must be a number between 0 and 100. "
+ "Numbers out of range will return a null instead."
) Expression percentile
) {
super(source, List.of(field, percentile));
this.field = field;
this.percentile = percentile;
}
private MvPercentile(StreamInput in) throws IOException {
this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class));
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
source().writeTo(out);
out.writeNamedWriteable(field);
out.writeNamedWriteable(percentile);
}
@Override
protected Expression.TypeResolution resolveType() {
if (childrenResolved() == false) {
return new TypeResolution("Unresolved children");
}
return isType(field, dt -> dt.isNumeric() && dt != UNSIGNED_LONG, sourceText(), FIRST, "numeric except unsigned_long").and(
isType(percentile, dt -> dt.isNumeric() && dt != UNSIGNED_LONG, sourceText(), SECOND, "numeric except unsigned_long")
);
}
@Override
public boolean foldable() {
return field.foldable() && percentile.foldable();
}
public final Expression field() {
return field;
}
@Override
public DataType dataType() {
return field.dataType();
}
@Override
public final ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) {
var fieldEval = toEvaluator.apply(field);
var percentileEval = Cast.cast(source(), percentile.dataType(), DOUBLE, toEvaluator.apply(percentile));
return switch (PlannerUtils.toElementType(field.dataType())) {
case INT -> new MvPercentileIntegerEvaluator.Factory(source(), fieldEval, percentileEval, (d) -> new IntSortingScratch());
case LONG -> new MvPercentileLongEvaluator.Factory(source(), fieldEval, percentileEval, (d) -> new LongSortingScratch());
case DOUBLE -> new MvPercentileDoubleEvaluator.Factory(source(), fieldEval, percentileEval, (d) -> new DoubleSortingScratch());
default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType());
};
}
@Override
public Expression replaceChildren(List<Expression> newChildren) {
return new MvPercentile(source(), newChildren.get(0), newChildren.get(1));
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, MvPercentile::new, field, percentile);
}
static | MvPercentile |
java | elastic__elasticsearch | libs/x-content/src/main/java/org/elasticsearch/xcontent/FlatteningXContentParser.java | {
"start": 696,
"end": 2010
} | class ____ extends XContentSubParser {
private final String parentName;
private static final char DELIMITER = '.';
/**
* Constructs a FlatteningXContentParser with the given parent name and wraps an existing XContentParser.
*
* @param parser The XContentParser to be wrapped and extended with flattening functionality.
* @param parentName The parent name to be used as a prefix for immediate children.
*/
public FlatteningXContentParser(XContentParser parser, String parentName) {
super(parser);
this.parentName = parentName;
}
/**
* Retrieves the name of the current field being parsed. If the current parsing level is 1,
* the returned field name will be constructed by prepending the parent name to the
* delegate's currentFieldName, otherwise just delegate.
*
* @return The current field name, potentially modified by prepending the parent name as a prefix.
* @throws IOException If an I/O error occurs during parsing.
*/
@Override
public String currentName() throws IOException {
if (level() == 1) {
return new StringBuilder(parentName).append(DELIMITER).append(delegate().currentName()).toString();
}
return delegate().currentName();
}
}
| FlatteningXContentParser |
java | google__dagger | javatests/dagger/functional/subcomponent/multibindings/SubcomponentBuilderMultibindingsTest.java | {
"start": 2443,
"end": 2529
} | interface ____ {
Foo getFoo();
@Subcomponent.Builder
public | FloatingSub |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/AnnotationBeanNameGeneratorTests.java | {
"start": 6859,
"end": 6922
} | class ____ {
}
@Component(" ")
private static | ComponentWithName |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-guava-tests/src/test/java/org/assertj/tests/guava/api/MultimapAssert_hasSize_Test.java | {
"start": 1009,
"end": 2113
} | class ____ extends MultimapAssertBaseTest {
@Test
void should_pass_if_size_of_actual_is_equal_to_expected_size() {
// WHEN/THEN
assertThat(actual).hasSize(9);
}
@Test
void should_fail_if_actual_is_null() {
// GIVEN
actual = null;
// WHEN
Throwable throwable = catchThrowable(() -> assertThat(actual).hasSize(2));
// THEN
then(throwable).isInstanceOf(AssertionError.class)
.hasMessage(actualIsNull());
}
@Test
void should_fail_if_size_of_actual_is_not_equal_to_expected_size() {
// WHEN
Throwable throwable = catchThrowable(() -> assertThat(actual).hasSize(3));
// THEN
then(throwable).isInstanceOf(AssertionError.class)
.hasMessage(format("%n" +
"Expected size: 3 but was: 9 in:%n" +
"{Lakers=[Kobe Bryant, Magic Johnson, Kareem Abdul Jabbar], Bulls=[Michael Jordan, " +
"Scottie Pippen, Derrick Rose], Spurs=[Tony Parker, Tim Duncan, Manu Ginobili]}"));
}
}
| MultimapAssert_hasSize_Test |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResultsService.java | {
"start": 1222,
"end": 8555
} | class ____<Task extends AsyncTask, Response extends AsyncResponse<Response>> {
private static final Logger logger = LogManager.getLogger(AsyncResultsService.class);
private final Class<? extends Task> asyncTaskClass;
private final TaskManager taskManager;
private final ClusterService clusterService;
private final AsyncTaskIndexService<Response> store;
private final boolean updateInitialResultsInStore;
private final TriFunction<Task, ActionListener<Response>, TimeValue, Boolean> addCompletionListener;
/**
* Creates async results service
*
* @param store AsyncTaskIndexService for the response we are working with
* @param updateInitialResultsInStore true if initial results are stored (Async Search) or false otherwise (EQL Search)
* @param asyncTaskClass async task class
* @param addCompletionListener function that registers a completion listener with the task
* @param taskManager task manager
* @param clusterService cluster service
*/
public AsyncResultsService(
AsyncTaskIndexService<Response> store,
boolean updateInitialResultsInStore,
Class<? extends Task> asyncTaskClass,
TriFunction<Task, ActionListener<Response>, TimeValue, Boolean> addCompletionListener,
TaskManager taskManager,
ClusterService clusterService
) {
this.updateInitialResultsInStore = updateInitialResultsInStore;
this.asyncTaskClass = asyncTaskClass;
this.addCompletionListener = addCompletionListener;
this.taskManager = taskManager;
this.clusterService = clusterService;
this.store = store;
}
public DiscoveryNode getNode(String id) {
AsyncExecutionId searchId = AsyncExecutionId.decode(id);
return clusterService.state().nodes().get(searchId.getTaskId().getNodeId());
}
public boolean isLocalNode(DiscoveryNode node) {
return Objects.requireNonNull(node).equals(clusterService.localNode());
}
public void retrieveResult(GetAsyncResultRequest request, ActionListener<Response> listener) {
try {
long nowInMillis = System.currentTimeMillis();
AsyncExecutionId searchId = AsyncExecutionId.decode(request.getId());
long expirationTime;
if (request.getKeepAlive() != null && request.getKeepAlive().getMillis() > 0) {
expirationTime = nowInMillis + request.getKeepAlive().getMillis();
} else {
expirationTime = -1;
}
// EQL doesn't store initial or intermediate results so we only need to update expiration time in store for only in case of
// async search
if (updateInitialResultsInStore & expirationTime > 0) {
updateExpirationTime(
searchId,
expirationTime,
listener.delegateFailure((l, unused) -> getSearchResponseFromTask(searchId, request, nowInMillis, expirationTime, l))
);
} else {
getSearchResponseFromTask(searchId, request, nowInMillis, expirationTime, listener);
}
} catch (Exception exc) {
listener.onFailure(exc);
}
}
private void getSearchResponseFromTask(
AsyncExecutionId searchId,
GetAsyncResultRequest request,
long nowInMillis,
long expirationTimeMillis,
ActionListener<Response> listener
) {
try {
final Task task = store.getTaskAndCheckAuthentication(taskManager, searchId, asyncTaskClass);
if (task == null || (updateInitialResultsInStore && task.isCancelled())) {
getSearchResponseFromIndexAndUpdateExpiration(searchId, request, nowInMillis, expirationTimeMillis, listener);
return;
}
if (expirationTimeMillis != -1) {
task.setExpirationTime(expirationTimeMillis);
}
boolean added = addCompletionListener.apply(
task,
listener.delegateFailure((l, response) -> sendFinalResponse(request, response, nowInMillis, l)),
request.getWaitForCompletionTimeout()
);
if (added == false) {
// the task must have completed, since we cannot add a completion listener
assert store.getTaskAndCheckAuthentication(taskManager, searchId, asyncTaskClass) == null;
getSearchResponseFromIndexAndUpdateExpiration(searchId, request, nowInMillis, expirationTimeMillis, listener);
}
} catch (Exception exc) {
listener.onFailure(exc);
}
}
private void getSearchResponseFromIndexAndUpdateExpiration(
AsyncExecutionId searchId,
GetAsyncResultRequest request,
long nowInMillis,
long expirationTime,
ActionListener<Response> outListener
) {
var updateListener = outListener.delegateFailure((listener, unused) -> {
store.getResponse(searchId, true, listener.delegateFailure((l, response) -> {
try {
sendFinalResponse(request, response, nowInMillis, l);
} finally {
if (response instanceof StoredAsyncResponse<?> storedAsyncResponse
&& storedAsyncResponse.getResponse() instanceof RefCounted refCounted) {
refCounted.decRef();
}
}
}));
});
// If updateInitialResultsInStore=false, we can't update expiration while the task is running since the document doesn't exist yet.
// So let's update the expiration here when the task has been completed.
if (updateInitialResultsInStore == false && expirationTime != -1) {
updateExpirationTime(searchId, expirationTime, updateListener.map(unused -> null));
} else {
updateListener.onResponse(null);
}
}
private void sendFinalResponse(GetAsyncResultRequest request, Response response, long nowInMillis, ActionListener<Response> listener) {
// check if the result has expired
if (response.getExpirationTime() < nowInMillis) {
listener.onFailure(new ResourceNotFoundException(request.getId()));
return;
}
listener.onResponse(response);
}
private void updateExpirationTime(AsyncExecutionId searchId, long expirationTime, ActionListener<UpdateResponse> listener) {
store.updateExpirationTime(searchId.getDocId(), expirationTime, listener.delegateResponse((l, e) -> {
RestStatus status = ExceptionsHelper.status(ExceptionsHelper.unwrapCause(e));
if (status != RestStatus.NOT_FOUND) {
logger.error(() -> format("failed to update expiration time for async-search [%s]", searchId.getEncoded()), e);
l.onFailure(e);
} else {
// the async search document or its index is not found.
// That can happen if an invalid/deleted search id is provided.
l.onFailure(new ResourceNotFoundException(searchId.getEncoded()));
}
}));
}
}
| AsyncResultsService |
java | apache__flink | flink-metrics/flink-metrics-dropwizard/src/test/java/org/apache/flink/dropwizard/ScheduledDropwizardReporterTest.java | {
"start": 5842,
"end": 6143
} | class ____ extends ScheduledDropwizardReporter {
@Override
public ScheduledReporter getReporter(MetricConfig config) {
return null;
}
@Override
public void close() {
// don't do anything
}
}
}
| TestingScheduledDropwizardReporter |
java | google__guava | android/guava/src/com/google/common/graph/EndpointPairIterator.java | {
"start": 986,
"end": 1089
} | class ____ facilitate the set returned by {@link Graph#edges()}.
*
* @author James Sexton
*/
abstract | to |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/registry/ClearInferenceEndpointCacheAction.java | {
"start": 7090,
"end": 9753
} | class ____ extends AbstractNamedDiffable<Metadata.ProjectCustom> implements Metadata.ProjectCustom {
public static final String NAME = "inference-endpoint-cache-metadata";
private static final InvalidateCacheMetadata EMPTY = new InvalidateCacheMetadata(0L);
private static final ParseField VERSION_FIELD = new ParseField("version");
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<InvalidateCacheMetadata, Void> PARSER = new ConstructingObjectParser<>(
NAME,
true,
args -> new InvalidateCacheMetadata((long) args[0])
);
static {
PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION_FIELD);
}
public static InvalidateCacheMetadata fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
public static InvalidateCacheMetadata fromMetadata(ProjectMetadata projectMetadata) {
InvalidateCacheMetadata metadata = projectMetadata.custom(NAME);
return metadata == null ? EMPTY : metadata;
}
private final long version;
private InvalidateCacheMetadata(long version) {
this.version = version;
}
public InvalidateCacheMetadata(StreamInput in) throws IOException {
this(in.readVLong());
}
public InvalidateCacheMetadata bumpVersion() {
return new InvalidateCacheMetadata(version < Long.MAX_VALUE ? version + 1 : 1L);
}
@Override
public EnumSet<Metadata.XContentContext> context() {
return Metadata.ALL_CONTEXTS;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return ML_INFERENCE_ENDPOINT_CACHE;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(version);
}
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return Iterators.single(((builder, params) -> builder.field(VERSION_FIELD.getPreferredName(), version)));
}
@Override
public int hashCode() {
return Objects.hashCode(version);
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
return other instanceof InvalidateCacheMetadata that && that.version == this.version;
}
}
private static | InvalidateCacheMetadata |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/query/JpaQueryExecutionUnitTests.java | {
"start": 10785,
"end": 10972
} | class ____ extends JpaQueryExecution {
@Override
protected Object doExecute(AbstractJpaQuery query, JpaParametersParameterAccessor accessor) {
return null;
}
}
}
| StubQueryExecution |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/onetoone/OneToOneJoinTableNonOptionalTest.java | {
"start": 2540,
"end": 2954
} | class ____ {
@Id
@Column(name = "ID")
@GeneratedValue
private Integer id;
@OneToOne(mappedBy = "description", cascade = CascadeType.ALL, optional = false)
private Show show;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public Show getShow() {
return show;
}
public void setShow(Show show) {
this.show = show;
}
}
}
| ShowDescription |
java | google__guava | guava/src/com/google/common/collect/Maps.java | {
"start": 38358,
"end": 39307
} | class ____ extends EntrySet<K, V> {
@Override
Map<K, V> map() {
return AsMapView.this;
}
@Override
public Iterator<Entry<K, V>> iterator() {
return asMapEntryIterator(backingSet(), function);
}
}
return new EntrySetImpl();
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
checkNotNull(action);
// avoids allocation of entries
backingSet().forEach(k -> action.accept(k, function.apply(k)));
}
}
static <K extends @Nullable Object, V extends @Nullable Object>
Iterator<Entry<K, V>> asMapEntryIterator(Set<K> set, Function<? super K, V> function) {
return new TransformedIterator<K, Entry<K, V>>(set.iterator()) {
@Override
Entry<K, V> transform(@ParametricNullness K key) {
return immutableEntry(key, function.apply(key));
}
};
}
private static final | EntrySetImpl |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnusedVariableTest.java | {
"start": 4370,
"end": 5274
} | class ____ {
public List<String> makeList(List<String> input) {
List<String> output = new ArrayList<>();
for (final String unused : input) {
output.add("a string");
}
return output;
}
public List<String> listData(List<List<String>> input) {
List<String> output = new ArrayList<>();
for (List<String> unused : input) {
output.add("a string");
}
return output;
}
}
""")
.doTest();
}
@Test
public void unusedField() {
helper
.addSourceLines(
"UnusedField.java",
"""
package unusedvars;
import java.util.ArrayList;
import java.util.List;
public | UnusedEnhancedForLoop |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/format/datetime/DateFormatterRegistrar.java | {
"start": 3636,
"end": 3812
} | class ____ implements Converter<Calendar, Date> {
@Override
public Date convert(Calendar source) {
return source.getTime();
}
}
private static | CalendarToDateConverter |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/security/CustomExceptionMapperTest.java | {
"start": 966,
"end": 1109
} | class ____ {
@GET
public String hello() {
return "hello world";
}
}
public static final | HelloResource |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/descriptor/jdbc/internal/JdbcTypeBaseline.java | {
"start": 2316,
"end": 5060
} | interface ____ {
void addDescriptor(JdbcType descriptor);
void addDescriptor(int code, JdbcType descriptor);
}
public static void prime(BaselineTarget target) {
target.addDescriptor( BooleanJdbcType.INSTANCE );
// ResultSetMetaData might report BIT on some DBs, so we need to register the boolean type descriptor for that code
target.addDescriptor( Types.BIT, BooleanJdbcType.INSTANCE );
target.addDescriptor( BigIntJdbcType.INSTANCE );
target.addDescriptor( DecimalJdbcType.INSTANCE );
target.addDescriptor( DoubleJdbcType.INSTANCE );
target.addDescriptor( SqlTypes.DOUBLE, DoubleJdbcType.INSTANCE );
target.addDescriptor( FloatJdbcType.INSTANCE );
target.addDescriptor( IntegerJdbcType.INSTANCE );
target.addDescriptor( NumericJdbcType.INSTANCE );
target.addDescriptor( RealJdbcType.INSTANCE );
target.addDescriptor( SmallIntJdbcType.INSTANCE );
target.addDescriptor( TinyIntJdbcType.INSTANCE );
target.addDescriptor( InstantJdbcType.INSTANCE );
target.addDescriptor( LocalDateTimeJdbcType.INSTANCE );
target.addDescriptor( LocalDateJdbcType.INSTANCE );
target.addDescriptor( LocalTimeJdbcType.INSTANCE );
target.addDescriptor( OffsetDateTimeJdbcType.INSTANCE );
target.addDescriptor( OffsetTimeJdbcType.INSTANCE );
target.addDescriptor( ZonedDateTimeJdbcType.INSTANCE );
target.addDescriptor( DateJdbcType.INSTANCE );
target.addDescriptor( TimestampJdbcType.INSTANCE );
target.addDescriptor( TimestampWithTimeZoneJdbcType.INSTANCE );
target.addDescriptor( TimeJdbcType.INSTANCE );
target.addDescriptor( TimeWithTimeZoneJdbcType.INSTANCE );
target.addDescriptor( DurationJdbcType.INSTANCE );
target.addDescriptor( BinaryJdbcType.INSTANCE );
target.addDescriptor( VarbinaryJdbcType.INSTANCE );
target.addDescriptor( LongVarbinaryJdbcType.INSTANCE );
target.addDescriptor( new LongVarbinaryJdbcType( SqlTypes.LONG32VARBINARY) );
target.addDescriptor( CharJdbcType.INSTANCE );
target.addDescriptor( VarcharJdbcType.INSTANCE );
target.addDescriptor( LongVarcharJdbcType.INSTANCE );
target.addDescriptor( new LongVarcharJdbcType( SqlTypes.LONG32VARCHAR) );
target.addDescriptor( BlobJdbcType.DEFAULT );
target.addDescriptor( ClobJdbcType.DEFAULT );
// Assume `NationalizationSupport#IMPLICIT`. Dialects needing the
// explicit type will map them.
target.addDescriptor( Types.NCHAR, CharJdbcType.INSTANCE );
target.addDescriptor( Types.NVARCHAR, VarcharJdbcType.INSTANCE );
target.addDescriptor( Types.LONGNVARCHAR, LongVarcharJdbcType.INSTANCE );
target.addDescriptor( Types.NCLOB, ClobJdbcType.DEFAULT );
target.addDescriptor( new LongVarcharJdbcType( SqlTypes.LONG32NVARCHAR) );
target.addDescriptor( RowIdJdbcType.INSTANCE );
}
}
| BaselineTarget |
java | google__guava | android/guava/src/com/google/common/hash/Hashing.java | {
"start": 10730,
"end": 11052
} | class ____ {
static final HashFunction SHA_384 =
new MessageDigestHashFunction("SHA-384", "Hashing.sha384()");
}
/** Returns a hash function implementing the SHA-512 algorithm (512 hash bits). */
public static HashFunction sha512() {
return Sha512Holder.SHA_512;
}
private static final | Sha384Holder |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/annotation/RequestAttribute.java | {
"start": 1241,
"end": 1758
} | class ____ will always be applied
* <i>Note: Request attributes are intended for internal data sharing only, and are not attached to the outbound request.</i></p>
*
* <p>The following example demonstrates usage at the type level to declare default values to pass in the request when using the {@code Client} annotation;</p>
*
* <pre class="code">
* @RequestAttribute(name = "X-Username", value = "Freddy"),
* @RequestAttribute(name = "X-MyParam", value = "${foo.bar}")
* @Client('/users')
* | that |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/HttpSecurityRequestMatchersTests.java | {
"start": 8225,
"end": 9344
} | class ____ {
@Bean
PathPatternRequestMatcherBuilderFactoryBean requestMatcherBuilder() {
return new PathPatternRequestMatcherBuilderFactoryBean();
}
@Bean
@Order(Ordered.HIGHEST_PRECEDENCE)
SecurityFilterChain first(HttpSecurity http, PathPatternRequestMatcher.Builder builder) throws Exception {
// @formatter:off
http
.securityMatchers((security) -> security
.requestMatchers(builder.matcher("/test-1"))
.requestMatchers(builder.matcher("/test-2"))
.requestMatchers(builder.matcher("/test-3")))
.authorizeHttpRequests((requests) -> requests
.anyRequest().denyAll())
.httpBasic(withDefaults());
// @formatter:on
return http.build();
}
@Bean
SecurityFilterChain second(HttpSecurity http, PathPatternRequestMatcher.Builder builder) throws Exception {
// @formatter:off
http
.securityMatchers((security) -> security
.requestMatchers(builder.matcher("/test-1")))
.authorizeHttpRequests((requests) -> requests
.anyRequest().permitAll());
// @formatter:on
return http.build();
}
@RestController
static | MultiMvcMatcherConfig |
java | google__guava | android/guava-tests/benchmark/com/google/common/collect/IteratorBenchmark.java | {
"start": 993,
"end": 3568
} | class ____ {
@Param({"0", "1", "16", "256", "4096", "65536"})
int size;
// use concrete classes to remove any possible polymorphic overhead?
Object[] array;
ArrayList<Object> arrayList;
LinkedList<Object> linkedList;
@BeforeExperiment
void setUp() {
array = new Object[size];
arrayList = Lists.newArrayListWithCapacity(size);
linkedList = new LinkedList<>();
for (int i = 0; i < size; i++) {
Object value = new Object();
array[i] = value;
arrayList.add(value);
linkedList.add(value);
}
}
@Benchmark
int arrayIndexed(int reps) {
int sum = 0;
for (int i = 0; i < reps; i++) {
for (int index = 0; index < size; index++) {
sum += array[index].hashCode();
}
}
return sum;
}
@Benchmark
int arrayIndexedLength(int reps) {
int sum = 0;
for (int i = 0; i < reps; i++) {
for (int index = 0; index < array.length; index++) {
sum += array[index].hashCode();
}
}
return sum;
}
@Benchmark
int arrayFor(int reps) {
int sum = 0;
for (int i = 0; i < reps; i++) {
for (Object value : array) {
sum += value.hashCode();
}
}
return sum;
}
@Benchmark
int arrayListIndexed(int reps) {
int sum = 0;
for (int i = 0; i < reps; i++) {
for (int index = 0; index < size; index++) {
sum += arrayList.get(index).hashCode();
}
}
return sum;
}
@Benchmark
int arrayListIndexedLength(int reps) {
int sum = 0;
for (int i = 0; i < reps; i++) {
for (int index = 0; index < arrayList.size(); index++) {
sum += arrayList.get(index).hashCode();
}
}
return sum;
}
@Benchmark
int arrayListFor(int reps) {
int sum = 0;
for (int i = 0; i < reps; i++) {
for (Object value : arrayList) {
sum += value.hashCode();
}
}
return sum;
}
@Benchmark
int arrayListToArrayFor(int reps) {
int sum = 0;
for (int i = 0; i < reps; i++) {
for (Object value : arrayList.toArray()) {
sum += value.hashCode();
}
}
return sum;
}
@Benchmark
int linkedListFor(int reps) {
int sum = 0;
for (int i = 0; i < reps; i++) {
for (Object value : linkedList) {
sum += value.hashCode();
}
}
return sum;
}
@Benchmark
int linkedListToArrayFor(int reps) {
int sum = 0;
for (int i = 0; i < reps; i++) {
for (Object value : linkedList.toArray()) {
sum += value.hashCode();
}
}
return sum;
}
}
| IteratorBenchmark |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1200/Issue1276.java | {
"start": 665,
"end": 1969
} | class ____ extends RuntimeException{
private static final long serialVersionUID = 7815426752583648734L;
private long code;
public MyException() {
super();
}
public MyException(String message, Throwable cause) {
super(message, cause);
}
public MyException(String message) {
super(message);
}
public MyException(Throwable cause) {
super(cause);
}
public MyException(long code) {
super();
this.code = code;
}
public MyException(long code, String message, Throwable cause) {
super(message, cause);
this.code = code;
}
public MyException(long code, String message) {
super(message);
this.code = code;
}
public MyException(long code, Throwable cause) {
super(cause);
this.code = code;
}
public void setCode(long code) {
this.code = code;
}
public long getCode() {
return code;
}
@Override
public String toString() {
return "MyException{" +
"code=" + code +
'}';
}
}
}
| MyException |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java | {
"start": 10572,
"end": 10763
} | enum ____ {
STORE,
UPDATE,
REMOVE
};
/**
* Encapsulates znode path and corresponding split index for hierarchical
* znode layouts.
*/
private final static | AppAttemptOp |
java | apache__camel | components/camel-metrics/src/main/java/org/apache/camel/component/metrics/MetricsEndpoint.java | {
"start": 1668,
"end": 4984
} | class ____ extends DefaultEndpoint {
protected final MetricRegistry registry;
@UriPath(description = "Type of metrics")
@Metadata(required = true)
protected final MetricsType metricsType;
@UriPath(description = "Name of metrics")
@Metadata(required = true)
protected final String metricsName;
@UriParam(description = "Action when using timer type")
private MetricsTimerAction action;
@UriParam(description = "Mark when using meter type")
private Long mark;
@UriParam(description = "Value value when using histogram type")
private Long value;
@UriParam(description = "Increment value when using counter type")
private Long increment;
@UriParam(description = "Decrement value when using counter type")
private Long decrement;
@UriParam(description = "Subject value when using gauge type")
private Object subject;
public MetricsEndpoint(String uri, Component component, MetricRegistry registry, MetricsType metricsType,
String metricsName) {
super(uri, component);
this.registry = registry;
this.metricsType = metricsType;
this.metricsName = metricsName;
}
@Override
public boolean isRemote() {
return false;
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
throw new RuntimeCamelException("Cannot consume from " + getClass().getSimpleName() + ": " + getEndpointUri());
}
@Override
public Producer createProducer() throws Exception {
if (metricsType == MetricsType.COUNTER) {
return new CounterProducer(this);
} else if (metricsType == MetricsType.HISTOGRAM) {
return new HistogramProducer(this);
} else if (metricsType == MetricsType.METER) {
return new MeterProducer(this);
} else if (metricsType == MetricsType.TIMER) {
return new TimerProducer(this);
} else if (metricsType == MetricsType.GAUGE) {
return new GaugeProducer(this);
} else {
throw new IllegalArgumentException("Metrics type " + metricsType + " is not supported");
}
}
public MetricRegistry getRegistry() {
return registry;
}
public String getMetricsName() {
return metricsName;
}
public MetricsType getMetricsType() {
return metricsType;
}
public MetricsTimerAction getAction() {
return action;
}
public void setAction(MetricsTimerAction action) {
this.action = action;
}
public Long getMark() {
return mark;
}
public void setMark(Long mark) {
this.mark = mark;
}
public Long getValue() {
return value;
}
public void setValue(Long value) {
this.value = value;
}
public Long getIncrement() {
return increment;
}
public void setIncrement(Long increment) {
this.increment = increment;
}
public Long getDecrement() {
return decrement;
}
public void setDecrement(Long decrement) {
this.decrement = decrement;
}
public Object getSubject() {
return subject;
}
public void setSubject(Object subject) {
this.subject = subject;
}
}
| MetricsEndpoint |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java | {
"start": 4431,
"end": 7099
} | class ____ {
private final int byteArrayLength;
private final int maxAllocated;
private final Queue<byte[]> freeQueue = new LinkedList<>();
private int numAllocated = 0;
FixedLengthManager(int arrayLength, int maxAllocated) {
this.byteArrayLength = arrayLength;
this.maxAllocated = maxAllocated;
}
/**
* Allocate a byte array.
*
* If the number of allocated arrays >= maximum, the current thread is
* blocked until the number of allocated arrays drops to below the maximum.
*
* The byte array allocated by this method must be returned for recycling
* via the {@link FixedLengthManager#recycle(byte[])} method.
*/
synchronized byte[] allocate() throws InterruptedException {
if (LOG.isDebugEnabled()) {
DEBUG_MESSAGE.get().append(", ").append(this);
}
for(; numAllocated >= maxAllocated;) {
if (LOG.isDebugEnabled()) {
DEBUG_MESSAGE.get().append(": wait ...");
logDebugMessage();
}
wait();
if (LOG.isDebugEnabled()) {
DEBUG_MESSAGE.get().append("wake up: ").append(this);
}
}
numAllocated++;
final byte[] array = freeQueue.poll();
if (LOG.isDebugEnabled()) {
DEBUG_MESSAGE.get().append(", recycled? ").append(array != null);
}
return array != null? array : new byte[byteArrayLength];
}
/**
* Recycle the given byte array, which must have the same length as the
* array length managed by this object.
*
* The byte array may or may not be allocated
* by the {@link FixedLengthManager#allocate()} method.
*/
synchronized int recycle(byte[] array) {
Preconditions.checkNotNull(array);
Preconditions.checkArgument(array.length == byteArrayLength);
if (LOG.isDebugEnabled()) {
DEBUG_MESSAGE.get().append(", ").append(this);
}
notify();
numAllocated--;
if (numAllocated < 0) {
// it is possible to drop below 0 since
// some byte arrays may not be created by the allocate() method.
numAllocated = 0;
}
if (freeQueue.size() < maxAllocated - numAllocated) {
if (LOG.isDebugEnabled()) {
DEBUG_MESSAGE.get().append(", freeQueue.offer");
}
freeQueue.offer(array);
}
return freeQueue.size();
}
@Override
public synchronized String toString() {
return "[" + byteArrayLength + ": " + numAllocated + "/"
+ maxAllocated + ", free=" + freeQueue.size() + "]";
}
}
/** A map from array lengths to byte array managers. */
static | FixedLengthManager |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ParameterNameTest.java | {
"start": 16646,
"end": 17118
} | class ____ {",
" void test() {",
" AnnotatedParametersTestClass.target(/* foo= */ 1);",
" }",
"}")
.doTest();
}
@Ignore // TODO(b/67993065): remove @Ignore after the issue is fixed.
@Test
public void externalAnnotatedParameterPositive() {
testHelper
.addSourceLines(
"Test.java",
"import " + AnnotatedParametersTestClass.class.getCanonicalName() + ";",
" | Test |
java | apache__logging-log4j2 | log4j-jpa/src/test/java/org/apache/logging/log4j/core/appender/db/jpa/converter/StackTraceElementAttributeConverterTest.java | {
"start": 3510,
"end": 4706
} | class ____ is not correct.");
assertEquals("testConvert02", reversed.getMethodName(), "The method name is not correct.");
assertEquals("TestWithPackage.java", reversed.getFileName(), "The file name is not correct.");
assertEquals(-1, reversed.getLineNumber(), "The line number is not correct.");
assertFalse(reversed.isNativeMethod(), "The native flag should be false.");
}
@Test
void testConvert03() {
final StackTraceElement element =
new StackTraceElement("org.apache.logging.TestNoSource", "testConvert03", null, -1);
final String converted = this.converter.convertToDatabaseColumn(element);
assertNotNull(converted, "The converted value should not be null.");
assertEquals(
"org.apache.logging.TestNoSource.testConvert03(Unknown Source)",
converted,
"The converted value is not correct.");
final StackTraceElement reversed = this.converter.convertToEntityAttribute(converted);
assertNotNull(reversed, "The reversed value should not be null.");
assertEquals("org.apache.logging.TestNoSource", reversed.getClassName(), "The | name |
java | apache__camel | components/camel-telegram/src/test/java/org/apache/camel/component/telegram/TelegramConsumerEmptyResponseTest.java | {
"start": 1445,
"end": 2927
} | class ____ extends TelegramTestSupport {
@EndpointInject("mock:telegram")
private MockEndpoint endpoint;
@Test
public void testBehaviourWithEmptyUpdates() {
/* First make sure the message containing zero updates was sent by the API */
Awaitility.await().atMost(5, TimeUnit.SECONDS)
.until(() -> getMockRoutes().getMock("getUpdates").getRecordedMessages().size() >= 1);
endpoint.setResultWaitTime(500L);
endpoint.expectedMinimumMessageCount(1);
/* Then make sure that the consumer has sent zero exchanges to the route */
assertThrows(AssertionError.class, () -> {
endpoint.assertIsSatisfied();
});
}
@Override
protected RoutesBuilder[] createRouteBuilders() {
return new RoutesBuilder[] {
getMockRoutes(),
new RouteBuilder() {
@Override
public void configure() {
from("telegram:bots?authorizationToken=mock-token").to("mock:telegram");
}
} };
}
@Override
protected TelegramMockRoutes createMockRoutes() {
return new TelegramMockRoutes(port)
.addEndpoint(
"getUpdates",
"GET",
String.class,
TelegramTestUtil.stringResource("messages/updates-empty.json"));
}
}
| TelegramConsumerEmptyResponseTest |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleConverterWithHeaders.java | {
"start": 1452,
"end": 3185
} | class ____ implements Converter, Versioned {
private static final String HEADER_ENCODING = "encoding";
@Override
public String version() {
return AppInfoParser.getVersion();
}
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public SchemaAndValue toConnectData(String topic, Headers headers, byte[] value) {
String encoding = extractEncoding(headers);
try {
return new SchemaAndValue(Schema.STRING_SCHEMA, new String(value, encoding));
} catch (UnsupportedEncodingException e) {
throw new DataException("Unsupported encoding: " + encoding, e);
}
}
@Override
public byte[] fromConnectData(String topic, Headers headers, Schema schema, Object value) {
String encoding = extractEncoding(headers);
try {
return ((String) value).getBytes(encoding);
} catch (UnsupportedEncodingException e) {
throw new DataException("Unsupported encoding: " + encoding, e);
}
}
private String extractEncoding(Headers headers) {
Header header = headers.lastHeader(HEADER_ENCODING);
if (header == null) {
throw new DataException("Header '" + HEADER_ENCODING + "' is required!");
}
return new String(header.value());
}
@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
throw new DataException("Headers are required for this converter!");
}
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
throw new DataException("Headers are required for this converter!");
}
}
| SampleConverterWithHeaders |
java | quarkusio__quarkus | independent-projects/tools/devtools-common/src/main/java/io/quarkus/devtools/project/extensions/ExtensionInstallPlan.java | {
"start": 250,
"end": 3371
} | class ____ {
public static final ExtensionInstallPlan EMPTY = new ExtensionInstallPlan(
Collections.emptySet(),
Collections.emptySet(),
Collections.emptySet(),
Collections.emptySet(),
Collections.emptySet());
private final Set<ArtifactCoords> platforms;
private final Set<ArtifactCoords> managedExtensions;
private final Set<ArtifactCoords> independentExtensions;
private final Collection<String> unmatchedKeywords;
private final Collection<String> invalidKeywords;
private ExtensionInstallPlan(Set<ArtifactCoords> platforms,
Set<ArtifactCoords> managedExtensions,
Set<ArtifactCoords> independentExtensions,
Collection<String> unmatchedKeywords,
Collection<String> invalidKeywords) {
this.platforms = platforms;
this.managedExtensions = managedExtensions;
this.independentExtensions = independentExtensions;
this.unmatchedKeywords = unmatchedKeywords;
this.invalidKeywords = invalidKeywords;
}
public boolean isNotEmpty() {
return !this.platforms.isEmpty() || !this.managedExtensions.isEmpty()
|| !this.independentExtensions.isEmpty();
}
public boolean isInstallable() {
return isNotEmpty() && unmatchedKeywords.isEmpty();
}
/**
* @return a {@link Collection} of all extensions contained in this object
*/
public Collection<ArtifactCoords> toCollection() {
Set<ArtifactCoords> result = new LinkedHashSet<>();
result.addAll(getPlatforms());
result.addAll(getManagedExtensions());
result.addAll(getIndependentExtensions());
return result;
}
/**
* @return Platforms (BOMs) to be added to the build descriptor
*/
public Collection<ArtifactCoords> getPlatforms() {
return platforms;
}
/**
* @return Extensions that are included in the platforms returned in {@link #getPlatforms()},
* therefore setting the version is not required.
*/
public Collection<ArtifactCoords> getManagedExtensions() {
return managedExtensions;
}
/**
* @return Extensions that do not exist in any platform, the version MUST be set in the build descriptor
*/
public Collection<ArtifactCoords> getIndependentExtensions() {
return independentExtensions;
}
public Collection<String> getUnmatchedKeywords() {
return unmatchedKeywords;
}
public Collection<String> getInvalidKeywords() {
return invalidKeywords;
}
@Override
public String toString() {
return "InstallRequest{" +
"platforms=" + platforms +
", managedExtensions=" + managedExtensions +
", independentExtensions=" + independentExtensions +
", unmatchedKeywords=" + unmatchedKeywords +
", invalidKeywords=" + invalidKeywords +
'}';
}
public static Builder builder() {
return new Builder();
}
public static | ExtensionInstallPlan |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/catalog/JavaCatalogTableTest.java | {
"start": 10470,
"end": 13136
} | class ____ implements CatalogView {
private final CatalogView origin;
public CustomView(CatalogView table) {
this.origin = table;
}
@Override
public String getOriginalQuery() {
return origin.getOriginalQuery();
}
@Override
public String getExpandedQuery() {
return origin.getExpandedQuery();
}
@Override
public Map<String, String> getOptions() {
return origin.getOptions();
}
@Override
public Schema getUnresolvedSchema() {
Schema originalSchema = origin.getUnresolvedSchema();
return Schema.newBuilder()
.fromColumns(
originalSchema.getColumns().stream()
.map(
c -> {
if (c instanceof UnresolvedPhysicalColumn) {
DataType dataType =
(DataType)
((UnresolvedPhysicalColumn) c)
.getDataType();
String stringType =
dataType.getLogicalType()
.asSerializableString();
return new UnresolvedPhysicalColumn(
c.getName(), DataTypes.of(stringType));
}
throw new UnsupportedOperationException(
"Unexpected column type");
})
.collect(Collectors.toList()))
.build();
}
@Override
public String getComment() {
return origin.getComment();
}
@Override
public CatalogBaseTable copy() {
return new CustomView((CatalogView) origin.copy());
}
@Override
public Optional<String> getDescription() {
return origin.getDescription();
}
@Override
public Optional<String> getDetailedDescription() {
return origin.getDetailedDescription();
}
}
private static | CustomView |
java | netty__netty | common/src/test/java/io/netty/util/RecyclerTest.java | {
"start": 2066,
"end": 22802
} | enum ____ {
NONE,
PINNED,
FAST_THREAD_LOCAL,
}
public static Stream<Arguments> ownerTypeAndUnguarded() {
return Arrays.stream(OwnerType.values())
.flatMap(owner -> Stream.of(true, false)
.map(unguarded -> Arguments.of(owner, unguarded)));
}
public static Stream<Arguments> notNoneOwnerAndUnguarded() {
return ownerTypeAndUnguarded().filter(args -> args.get()[0] != OwnerType.NONE);
}
protected static Recycler<HandledObject> newRecycler(OwnerType ownerType, boolean unguarded,
int maxCapacityPerThread) {
return newRecycler(ownerType, unguarded, maxCapacityPerThread, null);
}
protected static Recycler<HandledObject> newRecycler(OwnerType ownerType, boolean unguarded,
int maxCapacityPerThread,
Consumer<HandledObject> onNewObject) {
switch (ownerType) {
case NONE:
return new Recycler<HandledObject>(maxCapacityPerThread, unguarded) {
@Override
protected HandledObject newObject(Handle<HandledObject> handle) {
HandledObject newObj = new HandledObject(handle);
if (onNewObject != null) {
onNewObject.accept(newObj);
}
return newObj;
}
};
case PINNED:
return new Recycler<HandledObject>(maxCapacityPerThread >> 1, maxCapacityPerThread, Thread.currentThread(),
unguarded) {
@Override
protected HandledObject newObject(
Recycler.Handle<HandledObject> handle) {
HandledObject newObj = new HandledObject(handle);
if (onNewObject != null) {
onNewObject.accept(newObj);
}
return newObj;
}
};
case FAST_THREAD_LOCAL:
return new Recycler<HandledObject>(maxCapacityPerThread >> 1, maxCapacityPerThread, unguarded) {
@Override
protected HandledObject newObject(Handle<HandledObject> handle) {
HandledObject newObj = new HandledObject(handle);
if (onNewObject != null) {
onNewObject.accept(newObj);
}
return newObj;
}
};
default:
throw new Error();
}
}
protected static Recycler<HandledObject> newRecycler(boolean unguarded, int maxCapacityPerThread) {
return new Recycler<HandledObject>(maxCapacityPerThread >> 1, maxCapacityPerThread, unguarded) {
@Override
protected HandledObject newObject(
Recycler.Handle<HandledObject> handle) {
return new HandledObject(handle);
}
};
}
protected static Recycler<HandledObject> newRecycler(int maxCapacityPerThread) {
return newRecycler(OwnerType.FAST_THREAD_LOCAL, false, maxCapacityPerThread, 8, maxCapacityPerThread >> 1);
}
protected static Recycler<HandledObject> newRecycler(OwnerType ownerType, boolean unguarded,
int maxCapacityPerThread, int ratio, int chunkSize) {
// NOTE: ratio and chunk size will be ignored for NONE owner type!
switch (ownerType) {
case NONE:
return new Recycler<HandledObject>(maxCapacityPerThread, unguarded) {
@Override
protected HandledObject newObject(Handle<HandledObject> handle) {
return new HandledObject(handle);
}
};
case PINNED:
return new Recycler<HandledObject>(maxCapacityPerThread, ratio, chunkSize, Thread.currentThread(),
unguarded) {
@Override
protected HandledObject newObject(
Recycler.Handle<HandledObject> handle) {
return new HandledObject(handle);
}
};
case FAST_THREAD_LOCAL:
return new Recycler<HandledObject>(maxCapacityPerThread, ratio, chunkSize, unguarded) {
@Override
protected HandledObject newObject(
Recycler.Handle<HandledObject> handle) {
return new HandledObject(handle);
}
};
default:
throw new Error();
}
}
@NotNull
protected Thread newThread(Runnable runnable) {
return new Thread(runnable);
}
@ParameterizedTest
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
@MethodSource("ownerTypeAndUnguarded")
public void testThreadCanBeCollectedEvenIfHandledObjectIsReferenced(OwnerType ownerType, boolean unguarded)
throws Exception {
final AtomicBoolean collected = new AtomicBoolean();
final AtomicReference<HandledObject> reference = new AtomicReference<HandledObject>();
Thread thread = new Thread(new Runnable() {
@Override
public void run() {
final Recycler<HandledObject> recycler = newRecycler(ownerType, unguarded, 1024);
HandledObject object = recycler.get();
// Store a reference to the HandledObject to ensure it is not collected when the run method finish.
reference.set(object);
Recycler.unpinOwner(recycler);
}
}) {
@Override
protected void finalize() throws Throwable {
super.finalize();
collected.set(true);
}
};
assertFalse(collected.get());
thread.start();
thread.join();
// Null out so it can be collected.
thread = null;
// Loop until the Thread was collected. If we can not collect it the Test will fail due of a timeout.
while (!collected.get()) {
System.gc();
System.runFinalization();
Thread.sleep(50);
}
// Now call recycle after the Thread was collected to ensure this still works...
if (reference.get() != null) {
reference.getAndSet(null).recycle();
}
}
@ParameterizedTest
@MethodSource("ownerTypeAndUnguarded")
public void verySmallRecycer(OwnerType ownerType, boolean unguarded) {
newRecycler(ownerType, unguarded, 2, 0, 1).get();
}
@ParameterizedTest
@EnumSource(OwnerType.class)
public void testMultipleRecycle(OwnerType ownerType) {
// This test makes only sense for guarded recyclers
Recycler<HandledObject> recycler = newRecycler(ownerType, false, 1024);
final HandledObject object = recycler.get();
object.recycle();
assertThrows(IllegalStateException.class, new Executable() {
@Override
public void execute() {
object.recycle();
}
});
}
@Test
public void testUnguardedMultipleRecycle() {
Recycler<HandledObject> recycler = newRecycler(true, 1024);
final HandledObject object = recycler.get();
object.recycle();
object.recycle();
}
@ParameterizedTest
@EnumSource(OwnerType.class)
public void testMultipleRecycleAtDifferentThread(OwnerType ownerType)
throws InterruptedException {
// This test makes only sense for guarded recyclers
Recycler<HandledObject> recycler = newRecycler(ownerType, false, 1024);
final HandledObject object = recycler.get();
final AtomicReference<IllegalStateException> exceptionStore = new AtomicReference<IllegalStateException>();
final Thread thread1 = newThread(new Runnable() {
@Override
public void run() {
object.recycle();
}
});
thread1.start();
thread1.join();
final Thread thread2 = newThread(new Runnable() {
@Override
public void run() {
try {
object.recycle();
} catch (IllegalStateException e) {
exceptionStore.set(e);
}
}
});
thread2.start();
thread2.join();
HandledObject a = recycler.get();
HandledObject b = recycler.get();
assertNotSame(a, b);
IllegalStateException exception = exceptionStore.get();
assertNotNull(exception);
}
@ParameterizedTest
@EnumSource(OwnerType.class)
public void testMultipleRecycleAtDifferentThreadRacing(OwnerType ownerType)
throws InterruptedException {
// This test makes only sense for guarded recyclers
Recycler<HandledObject> recycler = newRecycler(ownerType, false, 1024);
final HandledObject object = recycler.get();
final AtomicReference<IllegalStateException> exceptionStore = new AtomicReference<IllegalStateException>();
final CountDownLatch countDownLatch = new CountDownLatch(2);
final Thread thread1 = newThread(new Runnable() {
@Override
public void run() {
try {
object.recycle();
} catch (IllegalStateException e) {
Exception x = exceptionStore.getAndSet(e);
if (x != null) {
e.addSuppressed(x);
}
} finally {
countDownLatch.countDown();
}
}
});
thread1.start();
final Thread thread2 = newThread(new Runnable() {
@Override
public void run() {
try {
object.recycle();
} catch (IllegalStateException e) {
Exception x = exceptionStore.getAndSet(e);
if (x != null) {
e.addSuppressed(x);
}
} finally {
countDownLatch.countDown();
}
}
});
thread2.start();
try {
countDownLatch.await();
HandledObject a = recycler.get();
HandledObject b = recycler.get();
assertNotSame(a, b);
IllegalStateException exception = exceptionStore.get();
if (exception != null) {
assertThat(exception).hasMessageContaining("recycled already");
assertEquals(0, exception.getSuppressed().length);
}
} finally {
thread1.join(1000);
thread2.join(1000);
}
}
@ParameterizedTest
@EnumSource(OwnerType.class)
public void testMultipleRecycleRacing(OwnerType ownerType) throws InterruptedException {
// This test makes only sense for guarded recyclers
Recycler<HandledObject> recycler = newRecycler(ownerType, false, 1024);
final HandledObject object = recycler.get();
final AtomicReference<IllegalStateException> exceptionStore = new AtomicReference<IllegalStateException>();
final CountDownLatch countDownLatch = new CountDownLatch(1);
final Thread thread1 = newThread(new Runnable() {
@Override
public void run() {
try {
object.recycle();
} catch (IllegalStateException e) {
Exception x = exceptionStore.getAndSet(e);
if (x != null) {
e.addSuppressed(x);
}
} finally {
countDownLatch.countDown();
}
}
});
thread1.start();
try {
object.recycle();
} catch (IllegalStateException e) {
Exception x = exceptionStore.getAndSet(e);
if (x != null) {
e.addSuppressed(x);
}
}
try {
countDownLatch.await();
HandledObject a = recycler.get();
HandledObject b = recycler.get();
assertNotSame(a, b);
IllegalStateException exception = exceptionStore.get();
assertNotNull(exception); // Object got recycled twice, so at least one of the calls must throw.
} finally {
thread1.join(1000);
}
}
@ParameterizedTest
@MethodSource("ownerTypeAndUnguarded")
public void testRecycle(OwnerType ownerType, boolean unguarded) {
Recycler<HandledObject> recycler = newRecycler(ownerType, unguarded, 1024);
HandledObject object = recycler.get();
object.recycle();
HandledObject object2 = recycler.get();
assertSame(object, object2);
object2.recycle();
}
@ParameterizedTest
@MethodSource("ownerTypeAndUnguarded")
public void testRecycleDisable(OwnerType ownerType, boolean unguarded) {
Recycler<HandledObject> recycler = newRecycler(ownerType, unguarded, -1);
HandledObject object = recycler.get();
object.recycle();
HandledObject object2 = recycler.get();
assertNotSame(object, object2);
object2.recycle();
}
@ParameterizedTest
@MethodSource("ownerTypeAndUnguarded")
public void testRecycleDisableDrop(OwnerType ownerType, boolean unguarded) {
Recycler<HandledObject> recycler = newRecycler(ownerType, unguarded, 1024, 0, 16);
HandledObject object = recycler.get();
object.recycle();
HandledObject object2 = recycler.get();
assertSame(object, object2);
object2.recycle();
HandledObject object3 = recycler.get();
assertSame(object, object3);
object3.recycle();
}
/**
* Test to make sure bug #2848 never happens again
* https://github.com/netty/netty/issues/2848
*/
@ParameterizedTest
@MethodSource("ownerTypeAndUnguarded")
public void testMaxCapacity(OwnerType ownerType, boolean unguarded) {
testMaxCapacity(ownerType, unguarded, 300);
Random rand = new Random();
for (int i = 0; i < 50; i++) {
testMaxCapacity(ownerType, unguarded, rand.nextInt(1000) + 256); // 256 - 1256
}
}
private static void testMaxCapacity(OwnerType ownerType, boolean unguarded, int maxCapacity) {
Recycler<HandledObject> recycler = newRecycler(ownerType, unguarded, maxCapacity);
HandledObject[] objects = new HandledObject[maxCapacity * 3];
for (int i = 0; i < objects.length; i++) {
objects[i] = recycler.get();
}
for (int i = 0; i < objects.length; i++) {
objects[i].recycle();
objects[i] = null;
}
assertTrue(MathUtil.findNextPositivePowerOfTwo(maxCapacity) >= recycler.threadLocalSize(),
"The threadLocalSize (" + recycler.threadLocalSize() + ") must be <= maxCapacity ("
+ maxCapacity + ") as we not pool all new handles internally");
}
@ParameterizedTest
@MethodSource("notNoneOwnerAndUnguarded")
public void testRecycleAtDifferentThread(OwnerType ownerType, boolean unguarded) throws Exception {
final Recycler<HandledObject> recycler = newRecycler(ownerType, unguarded, 256, 2, 16);
final HandledObject o = recycler.get();
final HandledObject o2 = recycler.get();
final Thread thread = newThread(new Runnable() {
@Override
public void run() {
o.recycle();
o2.recycle();
}
});
thread.start();
thread.join();
assertSame(recycler.get(), o);
assertNotSame(recycler.get(), o2);
}
@ParameterizedTest
@MethodSource("ownerTypeAndUnguarded")
public void testRecycleAtTwoThreadsMulti(OwnerType ownerType, boolean unguarded) throws Exception {
final Recycler<HandledObject> recycler = newRecycler(ownerType, unguarded, 256);
final HandledObject o = recycler.get();
ExecutorService single = Executors.newSingleThreadExecutor(new ThreadFactory() {
@Override
public Thread newThread(@NotNull Runnable r) {
return RecyclerTest.this.newThread(r);
}
});
final CountDownLatch latch1 = new CountDownLatch(1);
single.execute(new Runnable() {
@Override
public void run() {
o.recycle();
latch1.countDown();
}
});
assertTrue(latch1.await(100, TimeUnit.MILLISECONDS));
final HandledObject o2 = recycler.get();
// Always recycler the first object, that is Ok
assertSame(o2, o);
final CountDownLatch latch2 = new CountDownLatch(1);
single.execute(new Runnable() {
@Override
public void run() {
//The object should be recycled
o2.recycle();
latch2.countDown();
}
});
assertTrue(latch2.await(100, TimeUnit.MILLISECONDS));
// It should be the same object, right?
final HandledObject o3 = recycler.get();
assertSame(o3, o);
single.shutdown();
}
@ParameterizedTest
@MethodSource("notNoneOwnerAndUnguarded")
public void testMaxCapacityWithRecycleAtDifferentThread(OwnerType ownerType, boolean unguarded) throws Exception {
final int maxCapacity = 4;
final Recycler<HandledObject> recycler = newRecycler(ownerType, unguarded, maxCapacity, 4, 4);
// Borrow 2 * maxCapacity objects.
// Return the half from the same thread.
// Return the other half from the different thread.
final HandledObject[] array = new HandledObject[maxCapacity * 3];
for (int i = 0; i < array.length; i ++) {
array[i] = recycler.get();
}
for (int i = 0; i < maxCapacity; i ++) {
array[i].recycle();
}
final Thread thread = newThread(new Runnable() {
@Override
public void run() {
for (int i1 = maxCapacity; i1 < array.length; i1++) {
array[i1].recycle();
}
}
});
thread.start();
thread.join();
assertEquals(maxCapacity * 3 / 4, recycler.threadLocalSize());
for (int i = 0; i < array.length; i ++) {
recycler.get();
}
assertEquals(0, recycler.threadLocalSize());
}
@ParameterizedTest
@MethodSource("notNoneOwnerAndUnguarded")
public void testDiscardingExceedingElementsWithRecycleAtDifferentThread(OwnerType ownerType, boolean unguarded)
throws Exception {
final int maxCapacity = 32;
final AtomicInteger instancesCount = new AtomicInteger(0);
final Recycler<HandledObject> recycler = newRecycler(ownerType, unguarded, maxCapacity, ignore ->
instancesCount.incrementAndGet());
// Borrow 2 * maxCapacity objects.
final HandledObject[] array = new HandledObject[maxCapacity * 2];
for (int i = 0; i < array.length; i++) {
array[i] = recycler.get();
}
assertEquals(array.length, instancesCount.get());
// Reset counter.
instancesCount.set(0);
// Recycle from other thread.
final Thread thread = newThread(new Runnable() {
@Override
public void run() {
for (HandledObject object: array) {
object.recycle();
}
}
});
thread.start();
thread.join();
assertEquals(0, instancesCount.get());
// Borrow 2 * maxCapacity objects. Half of them should come from
// the recycler queue, the other half should be freshly allocated.
for (int i = 0; i < array.length; i++) {
recycler.get();
}
// The implementation uses maxCapacity / 2 as limit per WeakOrderQueue
assertTrue(array.length - maxCapacity / 2 <= instancesCount.get(),
"The instances count (" + instancesCount.get() + ") must be <= array.length (" + array.length
+ ") - maxCapacity (" + maxCapacity + ") / 2 as we not pool all new handles" +
" internally");
}
static final | OwnerType |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java | {
"start": 3906,
"end": 5552
} | class ____ extends ContainerExecutor {
@Override
public void init(Context nmContext) throws IOException {
}
@Override
public void startLocalizer(LocalizerStartContext ctx)
throws IOException, InterruptedException {
}
@Override
public int launchContainer(ContainerStartContext ctx) throws
IOException, ConfigurationException {
return 0;
}
@Override
public int relaunchContainer(ContainerStartContext ctx) throws
IOException, ConfigurationException {
return 0;
}
@Override
public boolean signalContainer(ContainerSignalContext ctx)
throws IOException {
return true;
}
@Override
public boolean reapContainer(ContainerReapContext ctx)
throws IOException {
return true;
}
@Override
public IOStreamPair execContainer(ContainerExecContext ctx)
throws ContainerExecutionException {
return new IOStreamPair(null, null);
}
@Override
public void deleteAsUser(DeletionAsUserContext ctx)
throws IOException, InterruptedException {
}
@Override
public void symLink(String target, String symlink)
throws IOException {
}
@Override
public String getProcessId(ContainerId containerId) {
return String.valueOf(containerId.getContainerId());
}
@Override
public boolean isContainerAlive(ContainerLivenessContext ctx)
throws IOException {
return true;
}
@Override
public void updateYarnSysFS(Context ctx, String user, String appId,
String spec) throws IOException {
}
}
private static | MockExecutor |
java | quarkusio__quarkus | extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/observers/SessionFactoryObserverForSchemaExport.java | {
"start": 446,
"end": 1404
} | class ____ implements SessionFactoryObserver {
private final MetadataImplementor metadata;
private DelayedDropAction delayedDropAction;
public SessionFactoryObserverForSchemaExport(MetadataImplementor metadata) {
this.metadata = metadata;
}
@Override
public void sessionFactoryCreated(SessionFactory factory) {
SchemaManagementToolCoordinator.process(
metadata,
getRegistry(factory),
factory.getProperties(),
action -> delayedDropAction = action);
}
@Override
public void sessionFactoryClosing(SessionFactory factory) {
if (delayedDropAction != null) {
delayedDropAction.perform(getRegistry(factory));
}
}
private static ServiceRegistryImplementor getRegistry(SessionFactory factory) {
return ((SessionFactoryImplementor) factory).getServiceRegistry();
}
}
| SessionFactoryObserverForSchemaExport |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KubernetesSecretsEndpointBuilderFactory.java | {
"start": 15373,
"end": 18471
} | interface ____ {
/**
* Kubernetes Secrets (camel-kubernetes)
* Perform operations on Kubernetes Secrets.
*
* Category: container,cloud
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-kubernetes
*
* @return the dsl builder for the headers' name.
*/
default KubernetesSecretsHeaderNameBuilder kubernetesSecrets() {
return KubernetesSecretsHeaderNameBuilder.INSTANCE;
}
/**
* Kubernetes Secrets (camel-kubernetes)
* Perform operations on Kubernetes Secrets.
*
* Category: container,cloud
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-kubernetes
*
* Syntax: <code>kubernetes-secrets:masterUrl</code>
*
* Path parameter: masterUrl (required)
* URL to a remote Kubernetes API server. This should only be used when
* your Camel application is connecting from outside Kubernetes. If you
* run your Camel application inside Kubernetes, then you can use local
* or client as the URL to tell Camel to run in local mode. If you
* connect remotely to Kubernetes, then you may also need some of the
* many other configuration options for secured connection with
* certificates, etc.
*
* @param path masterUrl
* @return the dsl builder
*/
default KubernetesSecretsEndpointBuilder kubernetesSecrets(String path) {
return KubernetesSecretsEndpointBuilderFactory.endpointBuilder("kubernetes-secrets", path);
}
/**
* Kubernetes Secrets (camel-kubernetes)
* Perform operations on Kubernetes Secrets.
*
* Category: container,cloud
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-kubernetes
*
* Syntax: <code>kubernetes-secrets:masterUrl</code>
*
* Path parameter: masterUrl (required)
* URL to a remote Kubernetes API server. This should only be used when
* your Camel application is connecting from outside Kubernetes. If you
* run your Camel application inside Kubernetes, then you can use local
* or client as the URL to tell Camel to run in local mode. If you
* connect remotely to Kubernetes, then you may also need some of the
* many other configuration options for secured connection with
* certificates, etc.
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path masterUrl
* @return the dsl builder
*/
default KubernetesSecretsEndpointBuilder kubernetesSecrets(String componentName, String path) {
return KubernetesSecretsEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the Kubernetes Secrets component.
*/
public static | KubernetesSecretsBuilders |
java | spring-projects__spring-boot | core/spring-boot-testcontainers/src/main/java/org/springframework/boot/testcontainers/lifecycle/TestcontainersStartup.java | {
"start": 3726,
"end": 4678
} | class ____ {
private final Map<Startable, SingleStartable> adapters = new HashMap<>();
SingleStartable getOrCreate(Startable startable) {
return this.adapters.computeIfAbsent(startable, this::create);
}
private SingleStartable create(Startable startable) {
return new SingleStartable(this, startable);
}
record SingleStartable(SingleStartables singleStartables, Startable startable) implements Startable {
@Override
public Set<Startable> getDependencies() {
Set<Startable> dependencies = this.startable.getDependencies();
if (dependencies.isEmpty()) {
return dependencies;
}
return dependencies.stream()
.map(this.singleStartables::getOrCreate)
.collect(Collectors.toCollection(LinkedHashSet::new));
}
@Override
public void start() {
TestcontainersStartup.start(this.startable);
}
@Override
public void stop() {
this.startable.stop();
}
}
}
}
| SingleStartables |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/v2/adaptor/ReducingStateAdaptor.java | {
"start": 1240,
"end": 3017
} | class ____<K, N, V> extends MergingStateAdaptor<K, N, V, V, V, V>
implements InternalReducingState<K, N, V> {
public ReducingStateAdaptor(
org.apache.flink.runtime.state.internal.InternalReducingState<K, N, V> reducingState) {
super(reducingState);
}
@Override
public StateFuture<V> asyncGet() {
try {
return StateFutureUtils.completedFuture(delegatedState.get());
} catch (Exception e) {
throw new RuntimeException("Error while get value from raw ReducingState", e);
}
}
@Override
public StateFuture<V> asyncGetInternal() {
try {
return StateFutureUtils.completedFuture(delegatedState.getInternal());
} catch (Exception e) {
throw new RuntimeException("Error while get value from raw ReducingState", e);
}
}
@Override
public StateFuture<Void> asyncUpdateInternal(V valueToStore) {
try {
delegatedState.updateInternal(valueToStore);
return StateFutureUtils.completedVoidFuture();
} catch (Exception e) {
throw new RuntimeException("Error while update value to raw ReducingState", e);
}
}
@Override
public V getInternal() {
try {
return delegatedState.getInternal();
} catch (Exception e) {
throw new RuntimeException("Error while get internal value from raw ReducingState", e);
}
}
@Override
public void updateInternal(V valueToStore) {
try {
delegatedState.updateInternal(valueToStore);
} catch (Exception e) {
throw new RuntimeException("Error while update internal value to raw ReducingState", e);
}
}
}
| ReducingStateAdaptor |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java | {
"start": 1201,
"end": 1361
} | class ____ extends MapFile {
protected SetFile() {} // no public ctor
/**
* Write a new set file.
*/
public static | SetFile |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/pool/ha/selector/DataSourceSelectorFactory.java | {
"start": 807,
"end": 1352
} | class ____ {
/**
* Get a new instance of the given selector name.
*
* @return null if the given name do not represent a DataSourceSelector
*/
public static DataSourceSelector getSelector(String name, HighAvailableDataSource highAvailableDataSource) {
for (DataSourceSelectorEnum e : DataSourceSelectorEnum.values()) {
if (e.getName().equalsIgnoreCase(name)) {
return e.newInstance(highAvailableDataSource);
}
}
return null;
}
}
| DataSourceSelectorFactory |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/mapper/DoubleFieldScriptTests.java | {
"start": 1333,
"end": 4917
} | class ____ extends FieldScriptTestCase<DoubleFieldScript.Factory> {
public static final DoubleFieldScript.Factory DUMMY = (fieldName, params, lookup, onScriptError) -> ctx -> new DoubleFieldScript(
fieldName,
params,
lookup,
OnScriptError.FAIL,
ctx
) {
@Override
public void execute() {
emit(1.0);
}
};
@Override
protected ScriptContext<DoubleFieldScript.Factory> context() {
return DoubleFieldScript.CONTEXT;
}
@Override
protected DoubleFieldScript.Factory dummyScript() {
return DUMMY;
}
@Override
protected DoubleFieldScript.Factory fromSource() {
return DoubleFieldScript.PARSE_FROM_SOURCE;
}
public void testTooManyValues() throws IOException {
try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
iw.addDocument(List.of(new StoredField("_source", new BytesRef("{}"))));
try (DirectoryReader reader = iw.getReader()) {
DoubleFieldScript script = new DoubleFieldScript(
"test",
Map.of(),
new SearchLookup(field -> null, (ft, lookup, fdt) -> null, (ctx, doc) -> null),
OnScriptError.FAIL,
reader.leaves().get(0)
) {
@Override
public void execute() {
for (int i = 0; i <= AbstractFieldScript.MAX_VALUES; i++) {
new Emit(this).emit(1.0);
}
}
};
Exception e = expectThrows(IllegalArgumentException.class, script::execute);
assertThat(
e.getMessage(),
equalTo("Runtime field [test] is emitting [101] values while the maximum number of values allowed is [100]")
);
}
}
}
public final void testFromSourceDoesNotEnforceValuesLimit() throws IOException {
try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
int numValues = AbstractFieldScript.MAX_VALUES + randomIntBetween(1, 100);
XContentBuilder builder = JsonXContent.contentBuilder();
builder.startObject();
builder.startArray("field");
for (int i = 0; i < numValues; i++) {
builder.value(i + 0.1);
}
builder.endArray();
builder.endObject();
iw.addDocument(List.of(new StoredField("_source", new BytesRef(Strings.toString(builder)))));
try (DirectoryReader reader = iw.getReader()) {
DoubleFieldScript.LeafFactory leafFactory = fromSource().newFactory(
"field",
Collections.emptyMap(),
new SearchLookup(
field -> null,
(ft, lookup, fdt) -> null,
SourceProvider.fromLookup(MappingLookup.EMPTY, null, SourceFieldMetrics.NOOP)
),
OnScriptError.FAIL
);
DoubleFieldScript doubleFieldScript = leafFactory.newInstance(reader.leaves().get(0));
List<Double> results = new ArrayList<>();
doubleFieldScript.runForDoc(0, results::add);
assertEquals(numValues, results.size());
}
}
}
}
| DoubleFieldScriptTests |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/FormLoginConfigurerTests.java | {
"start": 23605,
"end": 24087
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((authorize) -> authorize
.anyRequest().hasRole("USER")
)
.formLogin((formLogin) -> formLogin
.loginPage("/authenticate")
.permitAll()
)
.logout(LogoutConfigurer::permitAll);
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static | FormLoginDefaultsInLambdaConfig |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/file/NewFileConsumerTest.java | {
"start": 1166,
"end": 2164
} | class ____ extends ContextTestSupport {
private MyFileEndpoint myFile;
@Test
public void testNewFileConsumer() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(1);
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, "hello.txt");
assertMockEndpointsSatisfied();
oneExchangeDone.matchesWaitTime();
await("postPollCheck invocation").atMost(1, TimeUnit.SECONDS).until(myFile::isPost);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
myFile = new MyFileEndpoint();
myFile.setCamelContext(context);
myFile.setFile(testDirectory().toFile());
myFile.setDelay(10);
myFile.setInitialDelay(0);
from(myFile).to("mock:result");
}
};
}
private static | NewFileConsumerTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/fairness/StaticRouterRpcFairnessPolicyController.java | {
"start": 1762,
"end": 5410
} | class ____ extends
AbstractRouterRpcFairnessPolicyController {
private static final Logger LOG =
LoggerFactory.getLogger(StaticRouterRpcFairnessPolicyController.class);
public static final String ERROR_MSG = "Configured handlers "
+ DFS_ROUTER_HANDLER_COUNT_KEY + '='
+ " %d is less than the minimum required handlers %d";
public StaticRouterRpcFairnessPolicyController(Configuration conf) {
init(conf);
}
public void init(Configuration conf) throws IllegalArgumentException {
super.init(conf);
// Total handlers configured to process all incoming Rpc.
int handlerCount = conf.getInt(DFS_ROUTER_HANDLER_COUNT_KEY, DFS_ROUTER_HANDLER_COUNT_DEFAULT);
LOG.info("Handlers available for fairness assignment {} ", handlerCount);
// Get all name services configured
Set<String> allConfiguredNS = FederationUtil.getAllConfiguredNS(conf);
// Set to hold name services that are not
// configured with dedicated handlers.
Set<String> unassignedNS = new HashSet<>();
// Insert the concurrent nameservice into the set to process together
allConfiguredNS.add(CONCURRENT_NS);
validateHandlersCount(conf, handlerCount, allConfiguredNS);
for (String nsId : allConfiguredNS) {
int dedicatedHandlers = conf.getInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + nsId, 0);
LOG.info("Dedicated handlers {} for ns {} ", dedicatedHandlers, nsId);
if (dedicatedHandlers > 0) {
handlerCount -= dedicatedHandlers;
insertNameServiceWithPermits(nsId, dedicatedHandlers);
logAssignment(nsId, dedicatedHandlers);
} else {
unassignedNS.add(nsId);
}
}
// Assign remaining handlers equally to remaining name services and
// general pool if applicable.
if (!unassignedNS.isEmpty()) {
LOG.info("Unassigned ns {}", unassignedNS);
int handlersPerNS = handlerCount / unassignedNS.size();
LOG.info("Handlers available per ns {}", handlersPerNS);
for (String nsId : unassignedNS) {
insertNameServiceWithPermits(nsId, handlersPerNS);
logAssignment(nsId, handlersPerNS);
}
}
// Assign remaining handlers if any to fan out calls.
int leftOverHandlers = unassignedNS.isEmpty() ? handlerCount :
handlerCount % unassignedNS.size();
int existingPermits = getAvailablePermits(CONCURRENT_NS);
if (leftOverHandlers > 0) {
LOG.info("Assigned extra {} handlers to commons pool", leftOverHandlers);
insertNameServiceWithPermits(CONCURRENT_NS, existingPermits + leftOverHandlers);
}
LOG.info("Final permit allocation for concurrent ns: {}", getAvailablePermits(CONCURRENT_NS));
}
private static void logAssignment(String nsId, int count) {
LOG.info("Assigned {} handlers to nsId {} ", count, nsId);
}
private void validateHandlersCount(Configuration conf,
int handlerCount, Set<String> allConfiguredNS) {
int totalDedicatedHandlers = 0;
for (String nsId : allConfiguredNS) {
int dedicatedHandlers = conf.getInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + nsId, 0);
if (dedicatedHandlers > 0) {
// Total handlers should not be less than sum of dedicated handlers.
totalDedicatedHandlers += dedicatedHandlers;
} else {
// Each NS should have at least one handler assigned.
totalDedicatedHandlers++;
}
}
if (totalDedicatedHandlers > handlerCount) {
String msg = String.format(ERROR_MSG, handlerCount, totalDedicatedHandlers);
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
}
}
| StaticRouterRpcFairnessPolicyController |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/modifiedflags/AbstractModifiedFlagsEntityTest.java | {
"start": 511,
"end": 2497
} | class ____ {
protected static List queryForPropertyHasChanged(
AuditReader auditReader, Class<?> clazz, Object id,
String... propertyNames) {
AuditQuery query = createForRevisionsQuery( auditReader, clazz, id, false );
addHasChangedProperties( query, propertyNames );
return query.getResultList();
}
protected static List queryForPropertyHasChangedWithDeleted(
AuditReader auditReader, Class<?> clazz, Object id,
String... propertyNames) {
AuditQuery query = createForRevisionsQuery( auditReader, clazz, id, true );
addHasChangedProperties( query, propertyNames );
return query.getResultList();
}
protected static List queryForPropertyHasNotChanged(
AuditReader auditReader, Class<?> clazz, Object id,
String... propertyNames) {
AuditQuery query = createForRevisionsQuery( auditReader, clazz, id, false );
addHasNotChangedProperties( query, propertyNames );
return query.getResultList();
}
protected static List queryForPropertyHasNotChangedWithDeleted(
AuditReader auditReader, Class<?> clazz, Object id,
String... propertyNames) {
AuditQuery query = createForRevisionsQuery( auditReader, clazz, id, true );
addHasNotChangedProperties( query, propertyNames );
return query.getResultList();
}
private static void addHasChangedProperties(
AuditQuery query,
String[] propertyNames) {
for ( String propertyName : propertyNames ) {
query.add( AuditEntity.property( propertyName ).hasChanged() );
}
}
private static void addHasNotChangedProperties(
AuditQuery query,
String[] propertyNames) {
for ( String propertyName : propertyNames ) {
query.add( AuditEntity.property( propertyName ).hasNotChanged() );
}
}
private static AuditQuery createForRevisionsQuery(AuditReader auditReader, Class<?> clazz, Object id, boolean withDeleted) {
return auditReader.createQuery()
.forRevisionsOfEntity( clazz, false, withDeleted )
.add( AuditEntity.id().eq( id ) );
}
}
| AbstractModifiedFlagsEntityTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestRollingLevelDBTimelineStore.java | {
"start": 2464,
"end": 17693
} | class ____ extends TimelineStoreTestUtils {
private FileContext fsContext;
private File fsPath;
private Configuration config = new YarnConfiguration();
@BeforeEach
public void setup() throws Exception {
fsContext = FileContext.getLocalFSFileContext();
fsPath = new File("target", this.getClass().getSimpleName() +
"-tmpDir").getAbsoluteFile();
fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
config.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH,
fsPath.getAbsolutePath());
config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, false);
store = new RollingLevelDBTimelineStore();
store.init(config);
store.start();
loadTestEntityData();
loadVerificationEntityData();
loadTestDomainData();
}
@AfterEach
public void tearDown() throws Exception {
store.stop();
fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
}
@Test
void testRootDirPermission() throws IOException {
FileSystem fs = FileSystem.getLocal(new YarnConfiguration());
FileStatus file = fs.getFileStatus(new Path(fsPath.getAbsolutePath(),
RollingLevelDBTimelineStore.FILENAME));
assertNotNull(file);
assertEquals(RollingLevelDBTimelineStore.LEVELDB_DIR_UMASK,
file.getPermission());
}
@Test
public void testGetSingleEntity() throws IOException {
super.testGetSingleEntity();
((RollingLevelDBTimelineStore)store).clearStartTimeCache();
super.testGetSingleEntity();
loadTestEntityData();
}
@Test
public void testGetEntities() throws IOException {
super.testGetEntities();
}
@Test
public void testGetEntitiesWithFromId() throws IOException {
super.testGetEntitiesWithFromId();
}
@Test
public void testGetEntitiesWithFromTs() throws IOException {
// feature not supported
}
@Test
public void testGetEntitiesWithPrimaryFilters() throws IOException {
super.testGetEntitiesWithPrimaryFilters();
}
@Test
public void testGetEntitiesWithSecondaryFilters() throws IOException {
super.testGetEntitiesWithSecondaryFilters();
}
@Test
public void testGetEvents() throws IOException {
super.testGetEvents();
}
@Test
void testCacheSizes() {
Configuration conf = new Configuration();
assertEquals(10000,
RollingLevelDBTimelineStore.getStartTimeReadCacheSize(conf));
assertEquals(10000,
RollingLevelDBTimelineStore.getStartTimeWriteCacheSize(conf));
conf.setInt(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
10001);
assertEquals(10001,
RollingLevelDBTimelineStore.getStartTimeReadCacheSize(conf));
conf = new Configuration();
conf.setInt(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
10002);
assertEquals(10002,
RollingLevelDBTimelineStore.getStartTimeWriteCacheSize(conf));
}
@Test
void testCheckVersion() throws IOException {
RollingLevelDBTimelineStore dbStore = (RollingLevelDBTimelineStore) store;
// default version
Version defaultVersion = dbStore.getCurrentVersion();
assertEquals(defaultVersion, dbStore.loadVersion());
// compatible version
Version compatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion(),
defaultVersion.getMinorVersion() + 2);
dbStore.storeVersion(compatibleVersion);
assertEquals(compatibleVersion, dbStore.loadVersion());
restartTimelineStore();
dbStore = (RollingLevelDBTimelineStore) store;
// overwrite the compatible version
assertEquals(defaultVersion, dbStore.loadVersion());
// incompatible version
Version incompatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion() + 1,
defaultVersion.getMinorVersion());
dbStore.storeVersion(incompatibleVersion);
try {
restartTimelineStore();
fail("Incompatible version, should expect fail here.");
} catch (ServiceStateException e) {
assertTrue(e.getMessage().contains("Incompatible version for timeline store"),
"Exception message mismatch");
}
}
@Test
void testValidateConfig() throws IOException {
Configuration copyConfig = new YarnConfiguration(config);
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(YarnConfiguration.TIMELINE_SERVICE_TTL_MS, 0);
config = newConfig;
restartTimelineStore();
fail();
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS, 0);
config = newConfig;
restartTimelineStore();
fail();
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, -1);
config = newConfig;
restartTimelineStore();
fail();
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
0);
config = newConfig;
restartTimelineStore();
fail();
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains(
YarnConfiguration
.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE));
}
try {
Configuration newConfig = new YarnConfiguration(copyConfig);
newConfig.setLong(
YarnConfiguration
.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
0);
config = newConfig;
restartTimelineStore();
fail();
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains(
YarnConfiguration
.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE));
}
config = copyConfig;
restartTimelineStore();
}
private void restartTimelineStore() throws IOException {
// need to close so leveldb releases database lock
if (store != null) {
store.close();
}
store = new RollingLevelDBTimelineStore();
store.init(config);
store.start();
}
@Test
public void testGetDomain() throws IOException {
super.testGetDomain();
}
@Test
public void testGetDomains() throws IOException {
super.testGetDomains();
}
@Test
void testRelatingToNonExistingEntity() throws IOException {
TimelineEntity entityToStore = new TimelineEntity();
entityToStore.setEntityType("TEST_ENTITY_TYPE_1");
entityToStore.setEntityId("TEST_ENTITY_ID_1");
entityToStore.setDomainId(TimelineDataManager.DEFAULT_DOMAIN_ID);
entityToStore.addRelatedEntity("TEST_ENTITY_TYPE_2", "TEST_ENTITY_ID_2");
TimelineEntities entities = new TimelineEntities();
entities.addEntity(entityToStore);
store.put(entities);
TimelineEntity entityToGet =
store.getEntity("TEST_ENTITY_ID_2", "TEST_ENTITY_TYPE_2", null);
assertNotNull(entityToGet);
assertEquals("DEFAULT", entityToGet.getDomainId());
assertEquals("TEST_ENTITY_TYPE_1",
entityToGet.getRelatedEntities().keySet().iterator().next());
assertEquals("TEST_ENTITY_ID_1",
entityToGet.getRelatedEntities().values().iterator().next()
.iterator().next());
}
@Test
void testRelatingToEntityInSamePut() throws IOException {
TimelineEntity entityToRelate = new TimelineEntity();
entityToRelate.setEntityType("TEST_ENTITY_TYPE_2");
entityToRelate.setEntityId("TEST_ENTITY_ID_2");
entityToRelate.setDomainId("TEST_DOMAIN");
TimelineEntity entityToStore = new TimelineEntity();
entityToStore.setEntityType("TEST_ENTITY_TYPE_1");
entityToStore.setEntityId("TEST_ENTITY_ID_1");
entityToStore.setDomainId("TEST_DOMAIN");
entityToStore.addRelatedEntity("TEST_ENTITY_TYPE_2", "TEST_ENTITY_ID_2");
TimelineEntities entities = new TimelineEntities();
entities.addEntity(entityToStore);
entities.addEntity(entityToRelate);
store.put(entities);
TimelineEntity entityToGet =
store.getEntity("TEST_ENTITY_ID_2", "TEST_ENTITY_TYPE_2", null);
assertNotNull(entityToGet);
assertEquals("TEST_DOMAIN", entityToGet.getDomainId());
assertEquals("TEST_ENTITY_TYPE_1",
entityToGet.getRelatedEntities().keySet().iterator().next());
assertEquals("TEST_ENTITY_ID_1",
entityToGet.getRelatedEntities().values().iterator().next()
.iterator().next());
}
@Test
void testRelatingToOldEntityWithoutDomainId() throws IOException {
// New entity is put in the default domain
TimelineEntity entityToStore = new TimelineEntity();
entityToStore.setEntityType("NEW_ENTITY_TYPE_1");
entityToStore.setEntityId("NEW_ENTITY_ID_1");
entityToStore.setDomainId(TimelineDataManager.DEFAULT_DOMAIN_ID);
entityToStore.addRelatedEntity("OLD_ENTITY_TYPE_1", "OLD_ENTITY_ID_1");
TimelineEntities entities = new TimelineEntities();
entities.addEntity(entityToStore);
store.put(entities);
TimelineEntity entityToGet =
store.getEntity("OLD_ENTITY_ID_1", "OLD_ENTITY_TYPE_1", null);
assertNotNull(entityToGet);
assertEquals("DEFAULT", entityToGet.getDomainId());
assertEquals("NEW_ENTITY_TYPE_1",
entityToGet.getRelatedEntities().keySet().iterator().next());
assertEquals("NEW_ENTITY_ID_1",
entityToGet.getRelatedEntities().values().iterator().next()
.iterator().next());
// New entity is not put in the default domain
entityToStore = new TimelineEntity();
entityToStore.setEntityType("NEW_ENTITY_TYPE_2");
entityToStore.setEntityId("NEW_ENTITY_ID_2");
entityToStore.setDomainId("NON_DEFAULT");
entityToStore.addRelatedEntity("OLD_ENTITY_TYPE_1", "OLD_ENTITY_ID_1");
entities = new TimelineEntities();
entities.addEntity(entityToStore);
TimelinePutResponse response = store.put(entities);
assertEquals(1, response.getErrors().size());
assertEquals(TimelinePutError.FORBIDDEN_RELATION,
response.getErrors().get(0).getErrorCode());
entityToGet =
store.getEntity("OLD_ENTITY_ID_1", "OLD_ENTITY_TYPE_1", null);
assertNotNull(entityToGet);
assertEquals("DEFAULT", entityToGet.getDomainId());
// Still have one related entity
assertEquals(1, entityToGet.getRelatedEntities().keySet().size());
assertEquals(1, entityToGet.getRelatedEntities().values()
.iterator().next().size());
}
public void testStorePerformance() throws IOException {
TimelineEntity entityToStorePrep = new TimelineEntity();
entityToStorePrep.setEntityType("TEST_ENTITY_TYPE_PREP");
entityToStorePrep.setEntityId("TEST_ENTITY_ID_PREP");
entityToStorePrep.setDomainId("TEST_DOMAIN");
entityToStorePrep.addRelatedEntity("TEST_ENTITY_TYPE_2",
"TEST_ENTITY_ID_2");
entityToStorePrep.setStartTime(0L);
TimelineEntities entitiesPrep = new TimelineEntities();
entitiesPrep.addEntity(entityToStorePrep);
store.put(entitiesPrep);
long start = System.currentTimeMillis();
int num = 1000000;
Log.getLog().info("Start test for " + num);
final String tezTaskAttemptId = "TEZ_TA";
final String tezEntityId = "attempt_1429158534256_0001_1_00_000000_";
final String tezTaskId = "TEZ_T";
final String tezDomainId = "Tez_ATS_application_1429158534256_0001";
TimelineEntity entityToStore = new TimelineEntity();
TimelineEvent startEvt = new TimelineEvent();
entityToStore.setEntityType(tezTaskAttemptId);
startEvt.setEventType("TASK_ATTEMPT_STARTED");
startEvt.setTimestamp(0);
entityToStore.addEvent(startEvt);
entityToStore.setDomainId(tezDomainId);
entityToStore.addPrimaryFilter("status", "SUCCEEDED");
entityToStore.addPrimaryFilter("applicationId",
"application_1429158534256_0001");
entityToStore.addPrimaryFilter("TEZ_VERTEX_ID",
"vertex_1429158534256_0001_1_00");
entityToStore.addPrimaryFilter("TEZ_DAG_ID", "dag_1429158534256_0001_1");
entityToStore.addPrimaryFilter("TEZ_TASK_ID",
"task_1429158534256_0001_1_00_000000");
entityToStore.setStartTime(0L);
entityToStore.addOtherInfo("startTime", 0);
entityToStore.addOtherInfo("inProgressLogsURL",
"localhost:8042/inProgressLogsURL");
entityToStore.addOtherInfo("completedLogsURL", "");
entityToStore.addOtherInfo("nodeId", "localhost:54450");
entityToStore.addOtherInfo("nodeHttpAddress", "localhost:8042");
entityToStore.addOtherInfo("containerId",
"container_1429158534256_0001_01_000002");
entityToStore.addOtherInfo("status", "RUNNING");
entityToStore.addRelatedEntity(tezTaskId, "TEZ_TASK_ID_1");
TimelineEntities entities = new TimelineEntities();
entities.addEntity(entityToStore);
for (int i = 0; i < num; ++i) {
entityToStore.setEntityId(tezEntityId + i);
store.put(entities);
}
long duration = System.currentTimeMillis() - start;
Log.getLog().info("Duration for " + num + ": " + duration);
}
/**
* Test that RollingLevelDb repair is attempted at least once during
* serviceInit for RollingLeveldbTimelineStore in case open fails the
* first time.
*/ @Test
void testLevelDbRepair() throws IOException {
RollingLevelDBTimelineStore store = new RollingLevelDBTimelineStore();
JniDBFactory factory = Mockito.mock(JniDBFactory.class);
Mockito.when(factory.open(Mockito.any(File.class), Mockito.any(Options.class)))
.thenThrow(new IOException()).thenCallRealMethod();
store.setFactory(factory);
//Create the LevelDb in a different location
File path = new File("target", this.getClass().getSimpleName() + "-tmpDir2").getAbsoluteFile();
Configuration conf = new Configuration(this.config);
conf.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH, path.getAbsolutePath());
try {
store.init(conf);
Mockito.verify(factory, Mockito.times(1))
.repair(Mockito.any(File.class), Mockito.any(Options.class));
FilenameFilter fileFilter = WildcardFileFilter.builder()
.setWildcards("*" + RollingLevelDBTimelineStore.BACKUP_EXT + "*")
.get();
assertTrue(new File(path.getAbsolutePath(), RollingLevelDBTimelineStore.FILENAME)
.list(fileFilter).length > 0);
} finally {
store.close();
fsContext.delete(new Path(path.getAbsolutePath()), true);
}
}
public static void main(String[] args) throws Exception {
TestRollingLevelDBTimelineStore store =
new TestRollingLevelDBTimelineStore();
store.setup();
store.testStorePerformance();
store.tearDown();
}
} | TestRollingLevelDBTimelineStore |
java | apache__camel | components/camel-mail/src/test/java/org/apache/camel/component/mail/MailAttachmentRedeliveryTest.java | {
"start": 1753,
"end": 5361
} | class ____ extends CamelTestSupport {
private static final MailboxUser james = Mailbox.getOrCreateUser("james", "secret");
private final List<String> names = new ArrayList<>();
@Test
public void testSendAndReceiveMailWithAttachmentsRedelivery() throws Exception {
// clear mailbox
Mailbox.clearAll();
// create an exchange with a normal body and attachment to be produced as email
Endpoint endpoint = context.getEndpoint(james.uriPrefix(Protocol.smtp));
// create the exchange with the mail message that is multipart with a file and a Hello World text/plain message.
Exchange exchange = endpoint.createExchange();
AttachmentMessage in = exchange.getIn(AttachmentMessage.class);
in.setBody("Hello World");
in.addAttachment("logo.jpeg", new DataHandler(new FileDataSource("src/test/data/logo.jpeg")));
// create a producer that can produce the exchange (= send the mail)
Producer producer = endpoint.createProducer();
// start the producer
producer.start();
// and let it go (processes the exchange by sending the email)
producer.process(exchange);
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.assertIsSatisfied();
Exchange out = mock.assertExchangeReceived(0);
// plain text
assertEquals("Hello World", out.getIn().getBody(String.class));
// attachment
Map<String, DataHandler> attachments = out.getIn(AttachmentMessage.class).getAttachments();
assertNotNull(attachments, "Should have attachments");
assertEquals(1, attachments.size());
DataHandler handler = out.getIn(AttachmentMessage.class).getAttachment("logo.jpeg");
assertNotNull(handler, "The logo should be there");
// content type should match
boolean match1 = "image/jpeg; name=logo.jpeg".equals(handler.getContentType());
boolean match2 = "application/octet-stream; name=logo.jpeg".equals(handler.getContentType());
assertTrue(match1 || match2, "Should match 1 or 2");
assertEquals("logo.jpeg", handler.getName(), "Handler name should be the file name");
producer.stop();
assertEquals(3, names.size());
assertEquals("logo.jpeg", names.get(0));
assertEquals("logo.jpeg", names.get(1));
assertEquals("logo.jpeg", names.get(2));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
onException(IllegalArgumentException.class).maximumRedeliveries(3).redeliveryDelay(0);
from(james.uriPrefix(Protocol.pop3) + "&initialDelay=100&delay=100")
.process(new Processor() {
private int counter;
@Override
public void process(Exchange exchange) {
Map<String, DataHandler> map = exchange.getIn(AttachmentMessage.class).getAttachments();
assertNotNull(map);
assertEquals(1, map.size());
names.add(map.keySet().iterator().next());
if (counter++ < 2) {
throw new IllegalArgumentException("Forced");
}
}
}).to("mock:result");
}
};
}
}
| MailAttachmentRedeliveryTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/legacy/Circular.java | {
"start": 141,
"end": 1257
} | class ____ {
private String id;
private Class clazz;
private Circular other;
private Object anyEntity;
/**
* Constructor for Circular.
*/
public Circular() {
super();
}
/**
* Returns the clazz.
* @return Class
*/
public Class getClazz() {
return clazz;
}
/**
* Returns the id.
* @return String
*/
public String getId() {
return id;
}
/**
* Sets the clazz.
* @param clazz The clazz to set
*/
public void setClazz(Class clazz) {
this.clazz = clazz;
}
/**
* Sets the id.
* @param id The id to set
*/
public void setId(String id) {
this.id = id;
}
/**
* Returns the other.
* @return Circular
*/
public Circular getOther() {
return other;
}
/**
* Sets the other.
* @param other The other to set
*/
public void setOther(Circular other) {
this.other = other;
}
/**
* Returns the anyEntity.
* @return Object
*/
public Object getAnyEntity() {
return anyEntity;
}
/**
* Sets the anyEntity.
* @param anyEntity The anyEntity to set
*/
public void setAnyEntity(Object anyEntity) {
this.anyEntity = anyEntity;
}
}
| Circular |
java | apache__flink | flink-core/src/main/java/org/apache/flink/core/asyncprocessing/AsyncFuture.java | {
"start": 1447,
"end": 1700
} | interface ____ a public API.
*
* <p>TODO: Rename {@link StateFuture} to {@link AsyncFuture} and remove this interface. If we
* decided to expose {@link AsyncFuture} as a public API.
*
* @param <T> The return type of this future.
*/
@Internal
public | as |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/util/MockUtil.java | {
"start": 1113,
"end": 1903
} | class ____ {
private static final MockMaker defaultMockMaker = Plugins.getMockMaker();
private static final Map<Class<? extends MockMaker>, MockMaker> mockMakers =
new ConcurrentHashMap<>(
Collections.singletonMap(defaultMockMaker.getClass(), defaultMockMaker));
private MockUtil() {}
public static MockMaker getMockMaker(String mockMaker) {
if (mockMaker == null) {
return defaultMockMaker;
}
String typeName;
if (DefaultMockitoPlugins.MOCK_MAKER_ALIASES.contains(mockMaker)) {
typeName = DefaultMockitoPlugins.getDefaultPluginClass(mockMaker);
} else {
typeName = mockMaker;
}
Class<? extends MockMaker> type;
// Using the context | MockUtil |
java | quarkusio__quarkus | devtools/cli/src/test/java/io/quarkus/cli/config/RemoveConfigTest.java | {
"start": 599,
"end": 2070
} | class ____ {
@TempDir
Path tempDir;
@BeforeEach
void setUp() throws Exception {
Path resources = tempDir.resolve("src/main/resources");
Files.createDirectories(resources);
Files.createFile(resources.resolve("application.properties"));
}
@Test
void removeConfiguration() throws Exception {
Path propertiesFile = tempDir.resolve("src/main/resources/application.properties");
Properties properties = new Properties();
try (InputStream inputStream = propertiesFile.toUri().toURL().openStream()) {
properties.load(inputStream);
}
properties.put("foo.bar", "1234");
try (FileOutputStream outputStream = new FileOutputStream(propertiesFile.toFile())) {
properties.store(outputStream, "");
}
CliDriver.Result result = CliDriver.execute(tempDir, "config", "remove", "foo.bar");
System.out.println(result.getStdout());
assertEquals(0, result.getExitCode());
assertTrue(config().getOptionalValue("foo.bar", String.class).isEmpty());
}
private SmallRyeConfig config() throws Exception {
PropertiesConfigSource propertiesConfigSource = new PropertiesConfigSource(
tempDir.resolve("src/main/resources/application.properties").toUri().toURL());
return new SmallRyeConfigBuilder()
.withSources(propertiesConfigSource)
.build();
}
}
| RemoveConfigTest |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/json/jackson/BufferSerializer.java | {
"start": 744,
"end": 998
} | class ____ extends JsonSerializer<Buffer> {
@Override
public void serialize(Buffer value, JsonGenerator jgen, SerializerProvider provider) throws IOException {
jgen.writeString(BASE64_ENCODER.encodeToString(value.getBytes()));
}
}
| BufferSerializer |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/core/userdetails/UserDetailsService.java | {
"start": 695,
"end": 961
} | interface ____ loads user-specific data.
* <p>
* It is used throughout the framework as a user DAO and is the strategy used by the
* {@link org.springframework.security.authentication.dao.DaoAuthenticationProvider
* DaoAuthenticationProvider}.
*
* <p>
* The | which |
java | grpc__grpc-java | api/src/main/java/io/grpc/ServerCall.java | {
"start": 2719,
"end": 11514
} | class ____<ReqT> {
/**
* A request message has been received. For streaming calls, there may be zero or more request
* messages.
*
* @param message a received request message.
*/
public void onMessage(ReqT message) {}
/**
* The client completed all message sending. However, the call may still be cancelled.
*/
public void onHalfClose() {}
/**
* The call was cancelled and the server is encouraged to abort processing to save resources,
* since the client will not process any further messages. Cancellations can be caused by
* timeouts, explicit cancellation by the client, network errors, etc.
*
* <p>There will be no further callbacks for the call.
*/
public void onCancel() {}
/**
* The call is considered complete and {@link #onCancel} is guaranteed not to be called.
* However, the client is not guaranteed to have received all messages.
*
* <p>There will be no further callbacks for the call.
*/
public void onComplete() {}
/**
* This indicates that the call may now be capable of sending additional messages (via
* {@link #sendMessage}) without requiring excessive buffering internally. This event is
* just a suggestion and the application is free to ignore it, however doing so may
* result in excessive buffering within the call.
*
* <p>Because there is a processing delay to deliver this notification, it is possible for
* concurrent writes to cause {@code isReady() == false} within this callback. Handle "spurious"
* notifications by checking {@code isReady()}'s current value instead of assuming it is now
* {@code true}. If {@code isReady() == false} the normal expectations apply, so there would be
* <em>another</em> {@code onReady()} callback.
*/
public void onReady() {}
}
/**
* Requests up to the given number of messages from the call to be delivered to
* {@link Listener#onMessage(Object)}. Once {@code numMessages} have been delivered
* no further request messages will be delivered until more messages are requested by
* calling this method again.
*
* <p>Servers use this mechanism to provide back-pressure to the client for flow-control.
*
* <p>This method is safe to call from multiple threads without external synchronization.
*
* @param numMessages the requested number of messages to be delivered to the listener.
*/
public abstract void request(int numMessages);
/**
* Send response header metadata prior to sending a response message. This method may
* only be called once and cannot be called after calls to {@link #sendMessage} or {@link #close}.
*
* <p>Since {@link Metadata} is not thread-safe, the caller must not access (read or write) {@code
* headers} after this point.
*
* @param headers metadata to send prior to any response body.
* @throws IllegalStateException if {@code close} has been called, a message has been sent, or
* headers have already been sent
*/
public abstract void sendHeaders(Metadata headers);
/**
* Send a response message. Messages are the primary form of communication associated with
* RPCs. Multiple response messages may exist for streaming calls.
*
* @param message response message.
* @throws IllegalStateException if headers not sent or call is {@link #close}d
*/
public abstract void sendMessage(RespT message);
/**
* If {@code true}, indicates that the call is capable of sending additional messages
* without requiring excessive buffering internally. This event is
* just a suggestion and the application is free to ignore it, however doing so may
* result in excessive buffering within the call.
*
* <p>If {@code false}, {@link Listener#onReady()} will be called after {@code isReady()}
* transitions to {@code true}.
*
* <p>This abstract class's implementation always returns {@code true}. Implementations generally
* override the method.
*/
public boolean isReady() {
return true;
}
/**
* Close the call with the provided status. No further sending or receiving will occur. If {@link
* Status#isOk} is {@code false}, then the call is said to have failed.
*
* <p>If no errors or cancellations are known to have occurred, then a {@link Listener#onComplete}
* notification should be expected, independent of {@code status}. Otherwise {@link
* Listener#onCancel} has been or will be called.
*
* <p>Since {@link Metadata} is not thread-safe, the caller must not access (read or write) {@code
* trailers} after this point.
*
* <p>This method implies the caller completed processing the RPC, but it does not imply the RPC
* is complete. The call implementation will need additional time to complete the RPC and during
* this time the client is still able to cancel the request or a network error might cause the
* RPC to fail. If you wish to know when the call is actually completed/closed, you have to use
* {@link Listener#onComplete} or {@link Listener#onCancel} instead. This method is not
* necessarily invoked when Listener.onCancel() is called.
*
* @throws IllegalStateException if call is already {@code close}d
*/
public abstract void close(Status status, Metadata trailers);
/**
* Returns {@code true} when the call is cancelled and the server is encouraged to abort
* processing to save resources, since the client will not be processing any further methods.
* Cancellations can be caused by timeouts, explicit cancel by client, network errors, and
* similar.
*
* <p>This method may safely be called concurrently from multiple threads.
*/
public abstract boolean isCancelled();
/**
* Enables per-message compression, if an encoding type has been negotiated. If no message
* encoding has been negotiated, this is a no-op. By default per-message compression is enabled,
* but may not have any effect if compression is not enabled on the call.
*/
public void setMessageCompression(boolean enabled) {
// noop
}
/**
* Sets the compression algorithm for this call. This compression is utilized for sending. If
* the server does not support the compression algorithm, the call will fail. This method may
* only be called before {@link #sendHeaders}. The compressor to use will be looked up in the
* {@link CompressorRegistry}. Default gRPC servers support the "gzip" compressor.
*
* <p>It is safe to call this even if the client does not support the compression format chosen.
* The implementation will handle negotiation with the client and may fall back to no compression.
*
* @param compressor the name of the compressor to use.
* @throws IllegalArgumentException if the compressor name can not be found.
*/
public void setCompression(String compressor) {
// noop
}
/**
* A hint to the call that specifies how many bytes must be queued before
* {@link #isReady()} will return false. A call may ignore this property if
* unsupported. This may only be set before any messages are sent.
*
* @param numBytes The number of bytes that must be queued. Must be a
* positive integer.
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/11021")
public void setOnReadyThreshold(int numBytes) {
checkArgument(numBytes > 0, "numBytes must be positive: %s", numBytes);
}
/**
* Returns the level of security guarantee in communications
*
* <p>Determining the level of security offered by the transport for RPCs on server-side.
* This can be approximated by looking for the SSLSession, but that doesn't work for ALTS and
* maybe some future TLS approaches. May return a lower security level when it cannot be
* determined precisely.
*
* @return non-{@code null} SecurityLevel enum
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/4692")
public SecurityLevel getSecurityLevel() {
return SecurityLevel.NONE;
}
/**
* Returns properties of a single call.
*
* <p>Attributes originate from the transport and can be altered by {@link ServerTransportFilter}.
*
* @return non-{@code null} Attributes container
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1779")
@Grpc.TransportAttr
public Attributes getAttributes() {
return Attributes.EMPTY;
}
/**
* Gets the authority this call is addressed to.
*
* @return the authority string. {@code null} if not available.
*/
@Nullable
public String getAuthority() {
return null;
}
/**
* The {@link MethodDescriptor} for the call.
*/
public abstract MethodDescriptor<ReqT, RespT> getMethodDescriptor();
}
| Listener |
java | google__dagger | javatests/dagger/android/support/functional/ComponentStructureFollowsControllerStructureApplication.java | {
"start": 2262,
"end": 2687
} | interface ____
extends AndroidInjector<ComponentStructureFollowsControllerStructureApplication> {
@Module(
subcomponents = {
ActivitySubcomponent.class,
InnerActivitySubcomponent.class,
ServiceSubcomponent.class,
IntentServiceSubcomponent.class,
BroadcastReceiverSubcomponent.class,
ContentProviderSubcomponent.class,
}
)
abstract | ApplicationComponent |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfig.java | {
"start": 1041,
"end": 6992
} | class ____ implements NlpConfig {
public static final String NAME = "pass_through";
public static PassThroughConfig fromXContentStrict(XContentParser parser) {
return STRICT_PARSER.apply(parser, null);
}
public static PassThroughConfig fromXContentLenient(XContentParser parser) {
return LENIENT_PARSER.apply(parser, null);
}
private static final ConstructingObjectParser<PassThroughConfig, Void> STRICT_PARSER = createParser(false);
private static final ConstructingObjectParser<PassThroughConfig, Void> LENIENT_PARSER = createParser(true);
private static ConstructingObjectParser<PassThroughConfig, Void> createParser(boolean ignoreUnknownFields) {
ConstructingObjectParser<PassThroughConfig, Void> parser = new ConstructingObjectParser<>(
NAME,
ignoreUnknownFields,
a -> new PassThroughConfig((VocabularyConfig) a[0], (Tokenization) a[1], (String) a[2])
);
parser.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> {
if (ignoreUnknownFields == false) {
throw ExceptionsHelper.badRequestException(
"illegal setting [{}] on inference model creation",
VOCABULARY.getPreferredName()
);
}
return VocabularyConfig.fromXContentLenient(p);
}, VOCABULARY);
parser.declareNamedObject(
ConstructingObjectParser.optionalConstructorArg(),
(p, c, n) -> p.namedObject(Tokenization.class, n, ignoreUnknownFields),
TOKENIZATION
);
parser.declareString(ConstructingObjectParser.optionalConstructorArg(), RESULTS_FIELD);
return parser;
}
private final VocabularyConfig vocabularyConfig;
private final Tokenization tokenization;
private final String resultsField;
public PassThroughConfig(
@Nullable VocabularyConfig vocabularyConfig,
@Nullable Tokenization tokenization,
@Nullable String resultsField
) {
this.vocabularyConfig = Optional.ofNullable(vocabularyConfig)
.orElse(new VocabularyConfig(InferenceIndexConstants.nativeDefinitionStore()));
this.tokenization = tokenization == null ? Tokenization.createDefault() : tokenization;
this.resultsField = resultsField;
if (this.tokenization.span != -1) {
throw ExceptionsHelper.badRequestException(
"[{}] does not support windowing long text sequences; configured span [{}]",
NAME,
this.tokenization.span
);
}
}
public PassThroughConfig(StreamInput in) throws IOException {
vocabularyConfig = new VocabularyConfig(in);
tokenization = in.readNamedWriteable(Tokenization.class);
resultsField = in.readOptionalString();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(VOCABULARY.getPreferredName(), vocabularyConfig, params);
NamedXContentObjectHelper.writeNamedObject(builder, params, TOKENIZATION.getPreferredName(), tokenization);
if (resultsField != null) {
builder.field(RESULTS_FIELD.getPreferredName(), resultsField);
}
builder.endObject();
return builder;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
vocabularyConfig.writeTo(out);
out.writeNamedWriteable(tokenization);
out.writeOptionalString(resultsField);
}
@Override
public boolean isTargetTypeSupported(TargetType targetType) {
return false;
}
@Override
public InferenceConfig apply(InferenceConfigUpdate update) {
if (update instanceof PassThroughConfigUpdate configUpdate) {
return new PassThroughConfig(
vocabularyConfig,
(configUpdate.getTokenizationUpdate() == null) ? tokenization : configUpdate.getTokenizationUpdate().apply(tokenization),
update.getResultsField() == null ? resultsField : update.getResultsField()
);
} else if (update instanceof TokenizationConfigUpdate tokenizationUpdate) {
var updatedTokenization = getTokenization().updateWindowSettings(tokenizationUpdate.getSpanSettings());
return new PassThroughConfig(this.vocabularyConfig, updatedTokenization, this.resultsField);
} else {
throw incompatibleUpdateException(update.getName());
}
}
@Override
public MlConfigVersion getMinimalSupportedMlConfigVersion() {
return MlConfigVersion.V_8_0_0;
}
@Override
public TransportVersion getMinimalSupportedTransportVersion() {
return TransportVersion.minimumCompatible();
}
@Override
public boolean isAllocateOnly() {
return true;
}
@Override
public String getName() {
return NAME;
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if (o == null || getClass() != o.getClass()) return false;
PassThroughConfig that = (PassThroughConfig) o;
return Objects.equals(vocabularyConfig, that.vocabularyConfig)
&& Objects.equals(tokenization, that.tokenization)
&& Objects.equals(resultsField, that.resultsField);
}
@Override
public int hashCode() {
return Objects.hash(vocabularyConfig, tokenization, resultsField);
}
@Override
public VocabularyConfig getVocabularyConfig() {
return vocabularyConfig;
}
@Override
public Tokenization getTokenization() {
return tokenization;
}
@Override
public String getResultsField() {
return resultsField;
}
}
| PassThroughConfig |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/engine/EngineJob.java | {
"start": 1006,
"end": 12360
} | class ____<R> implements DecodeJob.Callback<R>, Poolable {
private static final EngineResourceFactory DEFAULT_FACTORY = new EngineResourceFactory();
@SuppressWarnings("WeakerAccess")
@Synthetic
final ResourceCallbacksAndExecutors cbs = new ResourceCallbacksAndExecutors();
private final StateVerifier stateVerifier = StateVerifier.newInstance();
private final ResourceListener resourceListener;
private final Pools.Pool<EngineJob<?>> pool;
private final EngineResourceFactory engineResourceFactory;
private final EngineJobListener engineJobListener;
private final GlideExecutor diskCacheExecutor;
private final GlideExecutor sourceExecutor;
private final GlideExecutor sourceUnlimitedExecutor;
private final GlideExecutor animationExecutor;
private final AtomicInteger pendingCallbacks = new AtomicInteger();
private Key key;
private boolean isCacheable;
private boolean useUnlimitedSourceGeneratorPool;
private boolean useAnimationPool;
private boolean onlyRetrieveFromCache;
private Resource<?> resource;
@SuppressWarnings("WeakerAccess")
@Synthetic
DataSource dataSource;
private boolean hasResource;
@SuppressWarnings("WeakerAccess")
@Synthetic
GlideException exception;
private boolean hasLoadFailed;
@SuppressWarnings("WeakerAccess")
@Synthetic
EngineResource<?> engineResource;
private DecodeJob<R> decodeJob;
// Checked primarily on the main thread, but also on other threads in reschedule.
private volatile boolean isCancelled;
private boolean isLoadedFromAlternateCacheKey;
EngineJob(
GlideExecutor diskCacheExecutor,
GlideExecutor sourceExecutor,
GlideExecutor sourceUnlimitedExecutor,
GlideExecutor animationExecutor,
EngineJobListener engineJobListener,
ResourceListener resourceListener,
Pools.Pool<EngineJob<?>> pool) {
this(
diskCacheExecutor,
sourceExecutor,
sourceUnlimitedExecutor,
animationExecutor,
engineJobListener,
resourceListener,
pool,
DEFAULT_FACTORY);
}
@VisibleForTesting
EngineJob(
GlideExecutor diskCacheExecutor,
GlideExecutor sourceExecutor,
GlideExecutor sourceUnlimitedExecutor,
GlideExecutor animationExecutor,
EngineJobListener engineJobListener,
ResourceListener resourceListener,
Pools.Pool<EngineJob<?>> pool,
EngineResourceFactory engineResourceFactory) {
this.diskCacheExecutor = diskCacheExecutor;
this.sourceExecutor = sourceExecutor;
this.sourceUnlimitedExecutor = sourceUnlimitedExecutor;
this.animationExecutor = animationExecutor;
this.engineJobListener = engineJobListener;
this.resourceListener = resourceListener;
this.pool = pool;
this.engineResourceFactory = engineResourceFactory;
}
@VisibleForTesting
synchronized EngineJob<R> init(
Key key,
boolean isCacheable,
boolean useUnlimitedSourceGeneratorPool,
boolean useAnimationPool,
boolean onlyRetrieveFromCache) {
this.key = key;
this.isCacheable = isCacheable;
this.useUnlimitedSourceGeneratorPool = useUnlimitedSourceGeneratorPool;
this.useAnimationPool = useAnimationPool;
this.onlyRetrieveFromCache = onlyRetrieveFromCache;
return this;
}
public synchronized void start(DecodeJob<R> decodeJob) {
this.decodeJob = decodeJob;
GlideExecutor executor =
decodeJob.willDecodeFromCache() ? diskCacheExecutor : getActiveSourceExecutor();
executor.execute(decodeJob);
}
synchronized void addCallback(final ResourceCallback cb, Executor callbackExecutor) {
stateVerifier.throwIfRecycled();
cbs.add(cb, callbackExecutor);
if (hasResource) {
// Acquire early so that the resource isn't recycled while the Runnable below is still sitting
// in the executors queue.
incrementPendingCallbacks(1);
callbackExecutor.execute(new CallResourceReady(cb));
} else if (hasLoadFailed) {
incrementPendingCallbacks(1);
callbackExecutor.execute(new CallLoadFailed(cb));
} else {
Preconditions.checkArgument(!isCancelled, "Cannot add callbacks to a cancelled EngineJob");
}
}
@SuppressWarnings("WeakerAccess")
@Synthetic
@GuardedBy("this")
void callCallbackOnResourceReady(ResourceCallback cb) {
try {
// This is overly broad, some Glide code is actually called here, but it's much
// simpler to encapsulate here than to do so at the actual call point in the
// Request implementation.
cb.onResourceReady(engineResource, dataSource, isLoadedFromAlternateCacheKey);
} catch (Throwable t) {
throw new CallbackException(t);
}
}
@SuppressWarnings("WeakerAccess")
@Synthetic
@GuardedBy("this")
void callCallbackOnLoadFailed(ResourceCallback cb) {
// This is overly broad, some Glide code is actually called here, but it's much
// simpler to encapsulate here than to do so at the actual call point in the Request
// implementation.
try {
cb.onLoadFailed(exception);
} catch (Throwable t) {
throw new CallbackException(t);
}
}
synchronized void removeCallback(ResourceCallback cb) {
stateVerifier.throwIfRecycled();
cbs.remove(cb);
if (cbs.isEmpty()) {
cancel();
boolean isFinishedRunning = hasResource || hasLoadFailed;
if (isFinishedRunning && pendingCallbacks.get() == 0) {
release();
}
}
}
boolean onlyRetrieveFromCache() {
return onlyRetrieveFromCache;
}
private GlideExecutor getActiveSourceExecutor() {
return useUnlimitedSourceGeneratorPool
? sourceUnlimitedExecutor
: (useAnimationPool ? animationExecutor : sourceExecutor);
}
// Exposed for testing.
void cancel() {
if (isDone()) {
return;
}
isCancelled = true;
decodeJob.cancel();
engineJobListener.onEngineJobCancelled(this, key);
}
// Exposed for testing.
synchronized boolean isCancelled() {
return isCancelled;
}
private boolean isDone() {
return hasLoadFailed || hasResource || isCancelled;
}
// We have to post Runnables in a loop. Typically there will be very few callbacks. AccessorMethod
// seems to be a false positive
@SuppressWarnings({
"WeakerAccess",
"PMD.AvoidInstantiatingObjectsInLoops",
"PMD.AccessorMethodGeneration"
})
@Synthetic
void notifyCallbacksOfResult() {
ResourceCallbacksAndExecutors copy;
Key localKey;
EngineResource<?> localResource;
synchronized (this) {
stateVerifier.throwIfRecycled();
if (isCancelled) {
// TODO: Seems like we might as well put this in the memory cache instead of just recycling
// it since we've gotten this far...
resource.recycle();
release();
return;
} else if (cbs.isEmpty()) {
throw new IllegalStateException("Received a resource without any callbacks to notify");
} else if (hasResource) {
throw new IllegalStateException("Already have resource");
}
engineResource = engineResourceFactory.build(resource, isCacheable, key, resourceListener);
// Hold on to resource for duration of our callbacks below so we don't recycle it in the
// middle of notifying if it synchronously released by one of the callbacks. Acquire it under
// a lock here so that any newly added callback that executes before the next locked section
// below can't recycle the resource before we call the callbacks.
hasResource = true;
copy = cbs.copy();
incrementPendingCallbacks(copy.size() + 1);
localKey = key;
localResource = engineResource;
}
engineJobListener.onEngineJobComplete(this, localKey, localResource);
for (final ResourceCallbackAndExecutor entry : copy) {
entry.executor.execute(new CallResourceReady(entry.cb));
}
decrementPendingCallbacks();
}
@SuppressWarnings("WeakerAccess")
@Synthetic
synchronized void incrementPendingCallbacks(int count) {
Preconditions.checkArgument(isDone(), "Not yet complete!");
if (pendingCallbacks.getAndAdd(count) == 0 && engineResource != null) {
engineResource.acquire();
}
}
@SuppressWarnings("WeakerAccess")
@Synthetic
void decrementPendingCallbacks() {
EngineResource<?> toRelease = null;
synchronized (this) {
stateVerifier.throwIfRecycled();
Preconditions.checkArgument(isDone(), "Not yet complete!");
int decremented = pendingCallbacks.decrementAndGet();
Preconditions.checkArgument(decremented >= 0, "Can't decrement below 0");
if (decremented == 0) {
toRelease = engineResource;
release();
}
}
if (toRelease != null) {
toRelease.release();
}
}
private synchronized void release() {
if (key == null) {
throw new IllegalArgumentException();
}
cbs.clear();
key = null;
engineResource = null;
resource = null;
hasLoadFailed = false;
isCancelled = false;
hasResource = false;
isLoadedFromAlternateCacheKey = false;
decodeJob.release(/* isRemovedFromQueue= */ false);
decodeJob = null;
exception = null;
dataSource = null;
pool.release(this);
}
@Override
public void onResourceReady(
Resource<R> resource, DataSource dataSource, boolean isLoadedFromAlternateCacheKey) {
synchronized (this) {
this.resource = resource;
this.dataSource = dataSource;
this.isLoadedFromAlternateCacheKey = isLoadedFromAlternateCacheKey;
}
notifyCallbacksOfResult();
}
@Override
public void onLoadFailed(GlideException e) {
synchronized (this) {
this.exception = e;
}
notifyCallbacksOfException();
}
@Override
public void reschedule(DecodeJob<?> job) {
// Even if the job is cancelled here, it still needs to be scheduled so that it can clean itself
// up.
getActiveSourceExecutor().execute(job);
}
// We have to post Runnables in a loop. Typically there will be very few callbacks. Acessor method
// warning seems to be false positive.
@SuppressWarnings({
"WeakerAccess",
"PMD.AvoidInstantiatingObjectsInLoops",
"PMD.AccessorMethodGeneration"
})
@Synthetic
void notifyCallbacksOfException() {
ResourceCallbacksAndExecutors copy;
Key localKey;
synchronized (this) {
stateVerifier.throwIfRecycled();
if (isCancelled) {
release();
return;
} else if (cbs.isEmpty()) {
throw new IllegalStateException("Received an exception without any callbacks to notify");
} else if (hasLoadFailed) {
throw new IllegalStateException("Already failed once");
}
hasLoadFailed = true;
localKey = key;
copy = cbs.copy();
// One for each callback below, plus one for ourselves so that we finish if a callback runs on
// another thread before we finish scheduling all of them.
incrementPendingCallbacks(copy.size() + 1);
}
engineJobListener.onEngineJobComplete(this, localKey, /* resource= */ null);
for (ResourceCallbackAndExecutor entry : copy) {
entry.executor.execute(new CallLoadFailed(entry.cb));
}
decrementPendingCallbacks();
}
@NonNull
@Override
public StateVerifier getVerifier() {
return stateVerifier;
}
private | EngineJob |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java | {
"start": 1825,
"end": 9035
} | class ____ {
@Test
public void testParseAndValidateAddresses() {
checkWithoutLookup("127.0.0.1:8000");
checkWithoutLookup("localhost:8080");
checkWithoutLookup("[::1]:8000");
checkWithoutLookup("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234", "localhost:10000");
List<InetSocketAddress> validatedAddresses = checkWithoutLookup("localhost:10000");
assertEquals(1, validatedAddresses.size());
InetSocketAddress onlyAddress = validatedAddresses.get(0);
assertEquals("localhost", onlyAddress.getHostName());
assertEquals(10000, onlyAddress.getPort());
}
@Test
public void testParseAndValidateAddressesWithReverseLookup() {
checkWithoutLookup("127.0.0.1:8000");
checkWithoutLookup("localhost:8080");
checkWithoutLookup("[::1]:8000");
checkWithoutLookup("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234", "localhost:10000");
String hostname = "example.com";
Integer port = 10000;
String canonicalHostname1 = "canonical_hostname1";
String canonicalHostname2 = "canonical_hostname2";
try (final MockedStatic<InetAddress> inetAddress = mockStatic(InetAddress.class)) {
InetAddress inetAddress1 = mock(InetAddress.class);
when(inetAddress1.getCanonicalHostName()).thenReturn(canonicalHostname1);
InetAddress inetAddress2 = mock(InetAddress.class);
when(inetAddress2.getCanonicalHostName()).thenReturn(canonicalHostname2);
inetAddress.when(() -> InetAddress.getAllByName(hostname))
.thenReturn(new InetAddress[]{inetAddress1, inetAddress2});
try (MockedConstruction<InetSocketAddress> inetSocketAddress =
mockConstruction(
InetSocketAddress.class,
(mock, context) -> {
when(mock.isUnresolved()).thenReturn(false);
when(mock.getHostName()).thenReturn((String) context.arguments().get(0));
when(mock.getPort()).thenReturn((Integer) context.arguments().get(1));
})
) {
List<InetSocketAddress> validatedAddresses = checkWithLookup(Collections.singletonList(hostname + ":" + port));
assertEquals(2, inetSocketAddress.constructed().size());
assertEquals(2, validatedAddresses.size());
assertTrue(validatedAddresses.containsAll(List.of(
inetSocketAddress.constructed().get(0),
inetSocketAddress.constructed().get(1)))
);
validatedAddresses.forEach(address -> assertEquals(port, address.getPort()));
validatedAddresses.stream().map(InetSocketAddress::getHostName).forEach(
hostName -> assertTrue(List.of(canonicalHostname1, canonicalHostname2).contains(hostName))
);
}
}
}
@Test
public void testValidBrokerAddress() {
List<String> validBrokerAddress = List.of("localhost:9997", "localhost:9998", "localhost:9999");
assertDoesNotThrow(() -> ClientUtils.parseAndValidateAddresses(validBrokerAddress, ClientDnsLookup.USE_ALL_DNS_IPS));
}
static Stream<List<String>> provideInvalidBrokerAddressTestCases() {
return Stream.of(
List.of("localhost:9997\nlocalhost:9998\nlocalhost:9999"),
List.of("localhost:9997", "localhost:9998", " localhost:9999"),
// Intentionally provide a single string, as users may provide space-separated brokers, which will be parsed as a single string.
List.of("localhost:9997 localhost:9998 localhost:9999")
);
}
@ParameterizedTest
@MethodSource("provideInvalidBrokerAddressTestCases")
public void testInvalidBrokerAddress(List<String> addresses) {
assertThrows(ConfigException.class,
() -> ClientUtils.parseAndValidateAddresses(addresses, ClientDnsLookup.USE_ALL_DNS_IPS));
}
@Test
public void testInvalidConfig() {
assertThrows(IllegalArgumentException.class,
() -> ClientUtils.parseAndValidateAddresses(Collections.singletonList("localhost:10000"), "random.value"));
}
@Test
public void testNoPort() {
assertThrows(ConfigException.class, () -> checkWithoutLookup("127.0.0.1"));
}
@Test
public void testInvalidPort() {
assertThrows(ConfigException.class, () -> checkWithoutLookup("localhost:70000"));
}
@Test
public void testOnlyBadHostname() {
try (MockedConstruction<InetSocketAddress> inetSocketAddress =
mockConstruction(
InetSocketAddress.class,
(mock, context) -> when(mock.isUnresolved()).thenReturn(true)
)
) {
Exception exception = assertThrows(
ConfigException.class,
() -> checkWithoutLookup("some.invalid.hostname.foo.bar.local:9999")
);
assertEquals(
"No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
exception.getMessage()
);
}
}
@Test
public void testFilterPreferredAddresses() throws UnknownHostException {
InetAddress ipv4 = InetAddress.getByName("192.0.0.1");
InetAddress ipv6 = InetAddress.getByName("::1");
InetAddress[] ipv4First = new InetAddress[]{ipv4, ipv6, ipv4};
List<InetAddress> result = ClientUtils.filterPreferredAddresses(ipv4First);
assertTrue(result.contains(ipv4));
assertFalse(result.contains(ipv6));
assertEquals(2, result.size());
InetAddress[] ipv6First = new InetAddress[]{ipv6, ipv4, ipv4};
result = ClientUtils.filterPreferredAddresses(ipv6First);
assertTrue(result.contains(ipv6));
assertFalse(result.contains(ipv4));
assertEquals(1, result.size());
}
@Test
public void testResolveUnknownHostException() {
HostResolver throwingHostResolver = host -> {
throw new UnknownHostException();
};
assertThrows(
UnknownHostException.class,
() -> ClientUtils.resolve("some.invalid.hostname.foo.bar.local", throwingHostResolver)
);
}
@Test
public void testResolveDnsLookup() throws UnknownHostException {
InetAddress[] addresses = new InetAddress[] {
InetAddress.getByName("198.51.100.0"), InetAddress.getByName("198.51.100.5")
};
HostResolver hostResolver = new AddressChangeHostResolver(addresses, addresses);
assertEquals(asList(addresses), ClientUtils.resolve("kafka.apache.org", hostResolver));
}
private List<InetSocketAddress> checkWithoutLookup(String... url) {
return ClientUtils.parseAndValidateAddresses(asList(url), ClientDnsLookup.USE_ALL_DNS_IPS);
}
private List<InetSocketAddress> checkWithLookup(List<String> url) {
return ClientUtils.parseAndValidateAddresses(url, ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY);
}
}
| ClientUtilsTest |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregate.java | {
"start": 2850,
"end": 4731
} | class ____<KIn, VIn, VAgg> implements KStreamAggProcessorSupplier<KIn, VIn, Windowed<KIn>, VAgg> {
private static final Logger LOG = LoggerFactory.getLogger(KStreamSessionWindowAggregate.class);
private final String storeName;
private final StoreFactory storeFactory;
private final SessionWindows windows;
private final Initializer<VAgg> initializer;
private final Aggregator<? super KIn, ? super VIn, VAgg> aggregator;
private final Merger<? super KIn, VAgg> sessionMerger;
private final EmitStrategy emitStrategy;
private boolean sendOldValues = false;
public KStreamSessionWindowAggregate(final SessionWindows windows,
final StoreFactory storeFactory,
final EmitStrategy emitStrategy,
final Initializer<VAgg> initializer,
final Aggregator<? super KIn, ? super VIn, VAgg> aggregator,
final Merger<? super KIn, VAgg> sessionMerger) {
this.windows = windows;
this.storeName = storeFactory.storeName();
this.storeFactory = storeFactory;
this.emitStrategy = emitStrategy;
this.initializer = initializer;
this.aggregator = aggregator;
this.sessionMerger = sessionMerger;
}
@Override
public Set<StoreBuilder<?>> stores() {
return Collections.singleton(new FactoryWrappingStoreBuilder<>(storeFactory));
}
@Override
public Processor<KIn, VIn, Windowed<KIn>, Change<VAgg>> get() {
return new KStreamSessionWindowAggregateProcessor();
}
public SessionWindows windows() {
return windows;
}
@Override
public void enableSendingOldValues() {
sendOldValues = true;
}
private | KStreamSessionWindowAggregate |
java | apache__camel | components/camel-mllp/src/test/java/org/apache/camel/component/mllp/MllpProducerConsumerLoopbackInOnlyTest.java | {
"start": 1447,
"end": 3502
} | class ____ extends CamelTestSupport {
@EndpointInject("direct://source")
ProducerTemplate source;
@EndpointInject("mock://received-and-processed")
MockEndpoint receivedAndProcessed;
@Override
protected CamelContext createCamelContext() throws Exception {
DefaultCamelContext context = (DefaultCamelContext) super.createCamelContext();
context.setUseMDCLogging(false);
context.getCamelContextExtension().setName(this.getClass().getSimpleName());
return context;
}
@Override
protected RouteBuilder[] createRouteBuilders() {
String mllpHost = "localhost";
int mllpPort = AvailablePortFinder.getNextAvailable();
return new RouteBuilder[] {
new RouteBuilder() {
@Override
public void configure() {
fromF("mllp://%s:%d?autoAck=false&exchangePattern=InOnly", mllpHost, mllpPort)
.convertBodyTo(String.class)
.to(receivedAndProcessed);
}
},
new RouteBuilder() {
@Override
public void configure() {
from(source.getDefaultEndpoint())
.toF("mllp://%s:%d?exchangePattern=InOnly", mllpHost, mllpPort)
.setBody(header(MllpConstants.MLLP_ACKNOWLEDGEMENT));
}
}
};
}
@Test
public void testLoopbackWithOneMessage() throws Exception {
String testMessage = Hl7TestMessageGenerator.generateMessage();
receivedAndProcessed.expectedBodiesReceived(testMessage);
String acknowledgement = source.requestBody((Object) testMessage, String.class);
assertThat("Should receive no acknowledgment for message 1", acknowledgement, CoreMatchers.nullValue());
MockEndpoint.assertIsSatisfied(context, 60, TimeUnit.SECONDS);
}
}
| MllpProducerConsumerLoopbackInOnlyTest |
java | apache__hadoop | hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ClusterTopologyReader.java | {
"start": 1104,
"end": 2554
} | class ____ {
private LoggedNetworkTopology topology;
private void readTopology(JsonObjectMapperParser<LoggedNetworkTopology> parser)
throws IOException {
try {
topology = parser.getNext();
if (topology == null) {
throw new IOException(
"Input file does not contain valid topology data.");
}
} finally {
parser.close();
}
}
/**
* Constructor.
*
* @param path
* Path to the JSON-encoded topology file, possibly compressed.
* @param conf
* @throws IOException
*/
public ClusterTopologyReader(Path path, Configuration conf)
throws IOException {
JsonObjectMapperParser<LoggedNetworkTopology> parser = new JsonObjectMapperParser<LoggedNetworkTopology>(
path, LoggedNetworkTopology.class, conf);
readTopology(parser);
}
/**
* Constructor.
*
* @param input
* The input stream for the JSON-encoded topology data.
*/
public ClusterTopologyReader(InputStream input) throws IOException {
JsonObjectMapperParser<LoggedNetworkTopology> parser = new JsonObjectMapperParser<LoggedNetworkTopology>(
input, LoggedNetworkTopology.class);
readTopology(parser);
}
/**
* Get the {@link LoggedNetworkTopology} object.
*
* @return The {@link LoggedNetworkTopology} object parsed from the input.
*/
public LoggedNetworkTopology get() {
return topology;
}
}
| ClusterTopologyReader |
java | quarkusio__quarkus | integration-tests/opentelemetry-grpc/src/main/java/io/quarkus/it/opentelemetry/grpc/ExporterResource.java | {
"start": 486,
"end": 1052
} | class ____ {
@Inject
InMemorySpanExporter inMemorySpanExporter;
@GET
@Path("/reset")
public Response reset() {
inMemorySpanExporter.reset();
return Response.ok().build();
}
@GET
@Path("/export")
public List<SpanData> export() {
return inMemorySpanExporter.getFinishedSpanItems()
.stream()
.filter(sd -> !sd.getName().contains("export") && !sd.getName().contains("reset"))
.collect(Collectors.toList());
}
@ApplicationScoped
static | ExporterResource |
java | apache__camel | components/camel-telemetry/src/test/java/org/apache/camel/telemetry/decorators/AzureServiceBusSpanDecoratorTest.java | {
"start": 1202,
"end": 5308
} | class ____ {
@Test
public void testGetMessageId() {
String messageId = "abcd";
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Mockito.when(exchange.getIn()).thenReturn(message);
Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.MESSAGE_ID, String.class)).thenReturn(messageId);
AbstractMessagingSpanDecorator decorator = new AzureServiceBusSpanDecorator();
assertEquals(messageId, decorator.getMessageId(exchange));
}
@Test
public void testPre() {
String contentType = "application/json";
String correlationId = "1234";
Long deliveryCount = 27L;
Long enqueuedSequenceNumber = 1L;
OffsetDateTime enqueuedTime = OffsetDateTime.now();
OffsetDateTime expiresAt = OffsetDateTime.now();
String partitionKey = "MyPartitionKey";
String replyToSessionId = "MyReplyToSessionId";
String sessionId = "4321";
Duration ttl = Duration.ofDays(7);
Endpoint endpoint = Mockito.mock(Endpoint.class);
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Mockito.when(endpoint.getEndpointUri()).thenReturn("azure-servicebus:topicOrQueueName");
Mockito.when(exchange.getIn()).thenReturn(message);
Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.CONTENT_TYPE, String.class)).thenReturn(contentType);
Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.CORRELATION_ID, String.class)).thenReturn(correlationId);
Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.DELIVERY_COUNT, Long.class)).thenReturn(deliveryCount);
Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.ENQUEUED_SEQUENCE_NUMBER, Long.class))
.thenReturn(enqueuedSequenceNumber);
Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.ENQUEUED_TIME, OffsetDateTime.class))
.thenReturn(enqueuedTime);
Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.EXPIRES_AT, OffsetDateTime.class)).thenReturn(expiresAt);
Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.PARTITION_KEY, String.class)).thenReturn(partitionKey);
Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.REPLY_TO_SESSION_ID, String.class))
.thenReturn(replyToSessionId);
Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.SESSION_ID, String.class)).thenReturn(sessionId);
Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.TIME_TO_LIVE, Duration.class)).thenReturn(ttl);
AbstractMessagingSpanDecorator decorator = new AzureServiceBusSpanDecorator();
MockSpanAdapter span = new MockSpanAdapter();
decorator.beforeTracingEvent(span, exchange, endpoint);
assertEquals(contentType, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_CONTENT_TYPE));
assertEquals(correlationId, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_CORRELATION_ID));
assertEquals(deliveryCount.toString(), span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_DELIVERY_COUNT));
assertEquals(enqueuedSequenceNumber.toString(),
span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_ENQUEUED_SEQUENCE_NUMBER));
assertEquals(enqueuedTime.toString(), span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_ENQUEUED_TIME));
assertEquals(expiresAt.toString(), span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_EXPIRES_AT));
assertEquals(partitionKey, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_PARTITION_KEY));
assertEquals(replyToSessionId, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_REPLY_TO_SESSION_ID));
assertEquals(sessionId, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_SESSION_ID));
assertEquals(ttl.toString(), span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_TIME_TO_LIVE));
}
}
| AzureServiceBusSpanDecoratorTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldHaveDateField_create_Test.java | {
"start": 1108,
"end": 2271
} | class ____ {
@Test
void should_create_error_message_for_date() {
// GIVEN
Date date = parse("2015-12-31");
ErrorMessageFactory factory = shouldHaveDateField(date, "month", 10);
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting actual:%n" +
" 2015-12-31T00:00:00.000 (java.util.Date)%n" +
"to be on \"month\" 10"));
}
@Test
void should_create_error_message_for_local_date() {
// GIVEN
LocalDate date = LocalDate.of(2015, 12, 31);
ErrorMessageFactory factory = shouldHaveDateField(date, "year", 2000);
// WHEN
String message = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo(format("[Test] %n" +
"Expecting actual:%n" +
" 2015-12-31 (java.time.LocalDate)%n" +
"to be on \"year\" 2000"));
}
}
| ShouldHaveDateField_create_Test |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/rest/DestroyTaskRequest.java | {
"start": 1035,
"end": 1287
} | class ____ extends Message {
private final String id;
@JsonCreator
public DestroyTaskRequest(@JsonProperty("id") String id) {
this.id = id;
}
@JsonProperty
public String id() {
return id;
}
}
| DestroyTaskRequest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java | {
"start": 4088,
"end": 4589
} | class ____ extends QueueManagementChange {
public UpdateQueue(final CSQueue queue, QueueState targetQueueState,
final AutoCreatedLeafQueueConfig
queueTemplateUpdate) {
super(queue, QueueAction.UPDATE_QUEUE, targetQueueState,
queueTemplateUpdate);
}
public UpdateQueue(final CSQueue queue,
final AutoCreatedLeafQueueConfig
queueTemplateUpdate) {
super(queue, QueueAction.UPDATE_QUEUE, queueTemplateUpdate);
}
}
}
| UpdateQueue |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.