language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/component/genericinheritance/EmbeddableGenericsAndInterfaceTest.java | {
"start": 950,
"end": 2305
} | class ____ {
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final UserEntity user = new UserEntity();
user.setName( "Debbie" );
session.persist( user );
final ExampleEmbedded<?> embedded = new ExampleEmbedded<>();
embedded.setUser( user );
final ExampleEntity entity = new ExampleEntity();
entity.setExampleEmbedded( embedded );
session.persist( entity );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from ExampleEntity" ).executeUpdate();
session.createMutationQuery( "delete from UserEntity" ).executeUpdate();
} );
}
@Test
public void testMetamodelCriteria(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final CriteriaBuilder cb = session.getCriteriaBuilder();
final CriteriaQuery<ExampleEntity> cq = cb.createQuery( ExampleEntity.class );
final Root<ExampleEntity> root = cq.from( ExampleEntity.class );
cq.select( root ).where( cb.isNotNull(
root.get( ExampleEntity_.exampleEmbedded ).get( ExampleEmbedded_.user )
) );
final ExampleEntity result = session.createQuery( cq ).getSingleResult();
assertThat( result.getExampleEmbedded().getUser().getName() ).isEqualTo( "Debbie" );
} );
}
}
| EmbeddableGenericsAndInterfaceTest |
java | apache__camel | components/camel-caffeine/src/test/java/org/apache/camel/component/caffeine/cache/CaffeineSendDynamicAwareTest.java | {
"start": 1061,
"end": 2305
} | class ____ extends CaffeineCacheTestSupport {
@Test
public void testSendDynamic() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("VALUE_1");
template.sendBodyAndHeaders("direct:start", "Hello World",
Map.of("action1", "PUT", "action2", "GET", "myKey", "foobar"));
MockEndpoint.assertIsSatisfied(context);
// there are 1 caffeine endpoints
int count = (int) context.getEndpoints().stream()
.filter(e -> e.getEndpointUri().startsWith("caffeine-cache")).count();
Assertions.assertEquals(1, count);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct://start")
.setBody(constant("VALUE_1"))
.toD("caffeine-cache://mycache?action=${header.action1}&key=${header.myKey}")
.setBody(constant("VALUE_2"))
.toD("caffeine-cache://mycache?key=${header.myKey}&action=${header.action2}")
.to("mock:result");
}
};
}
}
| CaffeineSendDynamicAwareTest |
java | elastic__elasticsearch | modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java | {
"start": 1103,
"end": 2367
} | class ____ extends ParentJoinAggregator {
static final ParseField TYPE_FIELD = new ParseField("type");
public ParentToChildrenAggregator(
String name,
AggregatorFactories factories,
AggregationContext context,
Aggregator parent,
Query childFilter,
Query parentFilter,
ValuesSource.Bytes.WithOrdinals valuesSource,
long maxOrd,
CardinalityUpperBound cardinality,
Map<String, Object> metadata
) throws IOException {
super(name, factories, context, parent, parentFilter, childFilter, valuesSource, maxOrd, cardinality, metadata);
}
@Override
public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException {
return buildAggregationsForSingleBucket(
owningBucketOrds,
(owningBucketOrd, subAggregationResults) -> new InternalChildren(
name,
bucketDocCount(owningBucketOrd),
subAggregationResults,
metadata()
)
);
}
@Override
public InternalAggregation buildEmptyAggregation() {
return new InternalChildren(name, 0, buildEmptySubAggregations(), metadata());
}
}
| ParentToChildrenAggregator |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java | {
"start": 2300,
"end": 18981
} | class ____ {
public static long maxLockHoldToReleaseLeaseMs = 100;
@Test
public void testRemoveLeases() throws Exception {
FSNamesystem fsn = mock(FSNamesystem.class);
LeaseManager lm = new LeaseManager(fsn);
ArrayList<Long> ids = Lists.newArrayList(INodeId.ROOT_INODE_ID + 1,
INodeId.ROOT_INODE_ID + 2, INodeId.ROOT_INODE_ID + 3,
INodeId.ROOT_INODE_ID + 4);
for (long id : ids) {
lm.addLease("foo", id);
}
assertEquals(4, lm.getINodeIdWithLeases().size());
for (long id : ids) {
lm.removeLease(id);
}
assertEquals(0, lm.getINodeIdWithLeases().size());
}
/** Check that LeaseManager.checkLease release some leases
*/
@Test
public void testCheckLease() throws InterruptedException {
LeaseManager lm = new LeaseManager(makeMockFsNameSystem());
final long numLease = 100;
final long expiryTime = 0;
final long waitTime = expiryTime + 1;
//Make sure the leases we are going to add exceed the hard limit
lm.setLeasePeriod(expiryTime, expiryTime);
for (long i = 0; i <= numLease - 1; i++) {
//Add some leases to the LeaseManager
lm.addLease("holder"+i, INodeId.ROOT_INODE_ID + i);
}
assertEquals(numLease, lm.countLease());
Thread.sleep(waitTime);
//Initiate a call to checkLease. This should exit within the test timeout
lm.checkLeases();
assertTrue(lm.countLease() < numLease);
}
/**
* Test whether the internal lease holder name is updated properly.
*/
@Test
public void testInternalLeaseHolder() throws Exception {
LeaseManager lm = new LeaseManager(makeMockFsNameSystem());
// Set the hard lease limit to 500ms.
lm.setLeasePeriod(100L, 500L);
String holder = lm.getInternalLeaseHolder();
Thread.sleep(1000);
assertNotEquals(holder, lm.getInternalLeaseHolder());
}
@Test
public void testCountPath() {
LeaseManager lm = new LeaseManager(makeMockFsNameSystem());
lm.addLease("holder1", 1);
assertThat(lm.countPath()).isEqualTo(1L);
lm.addLease("holder2", 2);
assertThat(lm.countPath()).isEqualTo(2L);
lm.addLease("holder2", 2); // Duplicate addition
assertThat(lm.countPath()).isEqualTo(2L);
assertThat(lm.countPath()).isEqualTo(2L);
// Remove a couple of non-existing leases. countPath should not change.
lm.removeLease("holder2", stubInodeFile(3));
lm.removeLease("InvalidLeaseHolder", stubInodeFile(1));
assertThat(lm.countPath()).isEqualTo(2L);
INodeFile file = stubInodeFile(1);
lm.reassignLease(lm.getLease(file), file, "holder2");
assertThat(lm.countPath()).isEqualTo(2L); // Count unchanged on reassign
lm.removeLease("holder2", stubInodeFile(2)); // Remove existing
assertThat(lm.countPath()).isEqualTo(1L);
}
/**
* Make sure the lease is restored even if only the inode has the record.
*/
@Test
public void testLeaseRestorationOnRestart() throws Exception {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
.numDataNodes(1).build();
DistributedFileSystem dfs = cluster.getFileSystem();
// Create an empty file
String path = "/testLeaseRestorationOnRestart";
FSDataOutputStream out = dfs.create(new Path(path));
// Remove the lease from the lease manager, but leave it in the inode.
FSDirectory dir = cluster.getNamesystem().getFSDirectory();
INodeFile file = dir.getINode(path).asFile();
cluster.getNamesystem().leaseManager.removeLease(
file.getFileUnderConstructionFeature().getClientName(), file);
// Save a fsimage.
dfs.setSafeMode(SafeModeAction.ENTER);
cluster.getNameNodeRpc().saveNamespace(0,0);
dfs.setSafeMode(SafeModeAction.LEAVE);
// Restart the namenode.
cluster.restartNameNode(true);
// Check whether the lease manager has the lease
dir = cluster.getNamesystem().getFSDirectory();
file = dir.getINode(path).asFile();
assertTrue(cluster.getNamesystem().leaseManager.getLease(file) != null,
"Lease should exist.");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test leased files counts from
* {@link LeaseManager#getINodeWithLeases()},
* {@link LeaseManager#getINodeIdWithLeases()} and
* {@link LeaseManager#getINodeWithLeases(INodeDirectory)}.
*/
@Test
@Timeout(value = 60)
public void testInodeWithLeases() throws Exception {
FSNamesystem fsNamesystem = makeMockFsNameSystem();
when(fsNamesystem.getMaxListOpenFilesResponses()).thenReturn(1024);
FSDirectory fsDirectory = fsNamesystem.getFSDirectory();
LeaseManager lm = new LeaseManager(fsNamesystem);
Set<Long> iNodeIds = new HashSet<>(Arrays.asList(
INodeId.ROOT_INODE_ID + 1,
INodeId.ROOT_INODE_ID + 2,
INodeId.ROOT_INODE_ID + 3,
INodeId.ROOT_INODE_ID + 4
));
final PermissionStatus perm = PermissionStatus.createImmutable(
"user", "group", FsPermission.createImmutable((short)0755));
INodeDirectory rootInodeDirectory = new INodeDirectory(
HdfsConstants.GRANDFATHER_INODE_ID, DFSUtil.string2Bytes(""),
perm, 0L);
when(fsDirectory.getRoot()).thenReturn(rootInodeDirectory);
verifyINodeLeaseCounts(fsNamesystem, lm, rootInodeDirectory, 0, 0, 0);
for (Long iNodeId : iNodeIds) {
INodeFile iNodeFile = stubInodeFile(iNodeId);
iNodeFile.toUnderConstruction("hbase", "gce-100");
iNodeFile.setParent(rootInodeDirectory);
when(fsDirectory.getInode(iNodeId)).thenReturn(iNodeFile);
lm.addLease("holder_" + iNodeId, iNodeId);
}
verifyINodeLeaseCounts(fsNamesystem, lm, rootInodeDirectory,
iNodeIds.size(), iNodeIds.size(), iNodeIds.size());
for (Long iNodeId : iNodeIds) {
lm.removeLease(iNodeId);
}
verifyINodeLeaseCounts(fsNamesystem, lm, rootInodeDirectory, 0, 0, 0);
}
/**
* Test leased files counts at various scale from
* {@link LeaseManager#getINodeWithLeases()},
* {@link LeaseManager#getINodeIdWithLeases()} and
* {@link LeaseManager#getINodeWithLeases(INodeDirectory)}.
*/
@Test
@Timeout(value = 240)
public void testInodeWithLeasesAtScale() throws Exception {
FSNamesystem fsNamesystem = makeMockFsNameSystem();
when(fsNamesystem.getMaxListOpenFilesResponses()).thenReturn(4096);
FSDirectory fsDirectory = fsNamesystem.getFSDirectory();
LeaseManager lm = new LeaseManager(fsNamesystem);
final PermissionStatus perm = PermissionStatus.createImmutable(
"user", "group", FsPermission.createImmutable((short)0755));
INodeDirectory rootInodeDirectory = new INodeDirectory(
HdfsConstants.GRANDFATHER_INODE_ID, DFSUtil.string2Bytes(""),
perm, 0L);
when(fsDirectory.getRoot()).thenReturn(rootInodeDirectory);
// Case 1: No open files
int scale = 0;
testInodeWithLeasesAtScaleImpl(fsNamesystem, lm, fsDirectory,
rootInodeDirectory, scale);
for (int workerCount = 1;
workerCount <= LeaseManager.INODE_FILTER_WORKER_COUNT_MAX / 2;
workerCount++) {
// Case 2: Open files count is half of worker task size
scale = workerCount * LeaseManager.INODE_FILTER_WORKER_TASK_MIN / 2;
testInodeWithLeasesAtScaleImpl(fsNamesystem, lm, fsDirectory,
rootInodeDirectory, scale);
// Case 3: Open files count is 1 less of worker task size
scale = workerCount * LeaseManager.INODE_FILTER_WORKER_TASK_MIN - 1;
testInodeWithLeasesAtScaleImpl(fsNamesystem, lm, fsDirectory,
rootInodeDirectory, scale);
// Case 4: Open files count is equal to worker task size
scale = workerCount * LeaseManager.INODE_FILTER_WORKER_TASK_MIN;
testInodeWithLeasesAtScaleImpl(fsNamesystem, lm, fsDirectory,
rootInodeDirectory, scale);
// Case 5: Open files count is 1 more than worker task size
scale = workerCount * LeaseManager.INODE_FILTER_WORKER_TASK_MIN + 1;
testInodeWithLeasesAtScaleImpl(fsNamesystem, lm, fsDirectory,
rootInodeDirectory, scale);
}
// Case 6: Open files count is way more than worker count
scale = 1279;
testInodeWithLeasesAtScaleImpl(fsNamesystem, lm, fsDirectory,
rootInodeDirectory, scale);
}
private void testInodeWithLeasesAtScaleImpl(FSNamesystem fsNamesystem,
final LeaseManager leaseManager, final FSDirectory fsDirectory,
INodeDirectory ancestorDirectory, int scale) throws IOException {
verifyINodeLeaseCounts(
fsNamesystem, leaseManager, ancestorDirectory, 0, 0, 0);
Set<Long> iNodeIds = new HashSet<>();
for (int i = 0; i < scale; i++) {
iNodeIds.add(INodeId.ROOT_INODE_ID + i);
}
for (Long iNodeId : iNodeIds) {
INodeFile iNodeFile = stubInodeFile(iNodeId);
iNodeFile.toUnderConstruction("hbase", "gce-100");
iNodeFile.setParent(ancestorDirectory);
when(fsDirectory.getInode(iNodeId)).thenReturn(iNodeFile);
leaseManager.addLease("holder_" + iNodeId, iNodeId);
}
verifyINodeLeaseCounts(fsNamesystem, leaseManager,
ancestorDirectory, iNodeIds.size(), iNodeIds.size(), iNodeIds.size());
leaseManager.removeAllLeases();
verifyINodeLeaseCounts(fsNamesystem, leaseManager,
ancestorDirectory, 0, 0, 0);
}
/**
* Verify leased INode details across lease get and release from
* {@link LeaseManager#getINodeIdWithLeases()} and
* {@link LeaseManager#getINodeWithLeases(INodeDirectory)}.
*/
@Test
@Timeout(value = 60)
public void testInodeWithLeasesForAncestorDir() throws Exception {
FSNamesystem fsNamesystem = makeMockFsNameSystem();
FSDirectory fsDirectory = fsNamesystem.getFSDirectory();
LeaseManager lm = new LeaseManager(fsNamesystem);
final PermissionStatus perm = PermissionStatus.createImmutable(
"user", "group", FsPermission.createImmutable((short)0755));
INodeDirectory rootInodeDirectory = new INodeDirectory(
HdfsConstants.GRANDFATHER_INODE_ID, DFSUtil.string2Bytes(""),
perm, 0L);
when(fsDirectory.getRoot()).thenReturn(rootInodeDirectory);
AtomicInteger inodeIds = new AtomicInteger(
(int) (HdfsConstants.GRANDFATHER_INODE_ID + 1234));
String[] pathTree = new String[] {
"/root.log",
"/ENG/a/a1.log",
"/ENG/a/b/b1.log",
"/ENG/a/b/c/c1.log",
"/ENG/a/b/c/c2.log",
"/OPS/m/m1.log",
"/OPS/m/n/n1.log",
"/OPS/m/n/n2.log"
};
Map<String, INode> pathINodeMap = createINodeTree(rootInodeDirectory,
pathTree, inodeIds);
assertEquals(0, lm.getINodeIdWithLeases().size());
for (Entry<String, INode> entry : pathINodeMap.entrySet()) {
long iNodeId = entry.getValue().getId();
when(fsDirectory.getInode(iNodeId)).thenReturn(entry.getValue());
if (entry.getKey().contains("log")) {
lm.addLease("holder_" + iNodeId, iNodeId);
}
}
assertEquals(pathTree.length, lm.getINodeIdWithLeases().size());
assertEquals(pathTree.length, lm.getINodeWithLeases().size());
assertEquals(pathTree.length, lm.getINodeWithLeases(rootInodeDirectory).size());
// reset
lm.removeAllLeases();
Set<String> filesLeased = new HashSet<>(
Arrays.asList("root.log", "a1.log", "c1.log", "n2.log"));
for (String fileName : filesLeased) {
lm.addLease("holder", pathINodeMap.get(fileName).getId());
}
assertEquals(filesLeased.size(), lm.getINodeIdWithLeases().size());
assertEquals(filesLeased.size(), lm.getINodeWithLeases().size());
Set<INodesInPath> iNodeWithLeases = lm.getINodeWithLeases();
for (INodesInPath iNodesInPath : iNodeWithLeases) {
String leasedFileName = DFSUtil.bytes2String(
iNodesInPath.getLastLocalName());
assertTrue(filesLeased.contains(leasedFileName));
}
assertEquals(filesLeased.size(), lm.getINodeWithLeases(rootInodeDirectory).size());
assertEquals(filesLeased.size() - 2,
lm.getINodeWithLeases(pathINodeMap.get("ENG").asDirectory()).size());
assertEquals(filesLeased.size() - 2,
lm.getINodeWithLeases(pathINodeMap.get("a").asDirectory()).size());
assertEquals(filesLeased.size() - 3,
lm.getINodeWithLeases(pathINodeMap.get("c").asDirectory()).size());
assertEquals(filesLeased.size() - 3,
lm.getINodeWithLeases(pathINodeMap.get("OPS").asDirectory()).size());
assertEquals(filesLeased.size() - 3,
lm.getINodeWithLeases(pathINodeMap.get("n").asDirectory()).size());
lm.removeLease(pathINodeMap.get("n2.log").getId());
assertEquals(filesLeased.size() - 1, lm.getINodeWithLeases(rootInodeDirectory).size());
assertEquals(filesLeased.size() - 4,
lm.getINodeWithLeases(pathINodeMap.get("n").asDirectory()).size());
lm.removeAllLeases();
filesLeased.clear();
assertEquals(filesLeased.size(), lm.getINodeWithLeases(rootInodeDirectory).size());
}
private void verifyINodeLeaseCounts(FSNamesystem fsNamesystem,
LeaseManager leaseManager, INodeDirectory ancestorDirectory,
int iNodeIdWithLeaseCount, int iNodeWithLeaseCount,
int iNodeUnderAncestorLeaseCount) throws IOException {
assertEquals(iNodeIdWithLeaseCount, leaseManager.getINodeIdWithLeases().size());
assertEquals(iNodeWithLeaseCount, leaseManager.getINodeWithLeases().size());
assertEquals(iNodeUnderAncestorLeaseCount,
leaseManager.getINodeWithLeases(ancestorDirectory).size());
assertEquals(iNodeIdWithLeaseCount, leaseManager.getUnderConstructionFiles(0).size());
assertEquals(0,
(fsNamesystem.getFilesBlockingDecom(0, OpenFilesIterator.FILTER_PATH_DEFAULT) == null ? 0
: fsNamesystem.getFilesBlockingDecom(0, OpenFilesIterator.FILTER_PATH_DEFAULT)
.size()));
}
private Map<String, INode> createINodeTree(INodeDirectory parentDir,
String[] pathTree, AtomicInteger inodeId)
throws QuotaExceededException {
HashMap<String, INode> pathINodeMap = new HashMap<>();
for (String path : pathTree) {
byte[][] components = INode.getPathComponents(path);
FsPermission perm = FsPermission.createImmutable((short) 0755);
PermissionStatus permStatus =
PermissionStatus.createImmutable("", "", perm);
INodeDirectory prev = parentDir;
INodeDirectory dir = null;
for (int i = 0; i < components.length - 1; i++) {
byte[] component = components[i];
if (component.length == 0) {
continue;
}
INode existingChild = prev.getChild(
component, Snapshot.CURRENT_STATE_ID);
if (existingChild == null) {
String dirName = DFSUtil.bytes2String(component);
dir = new INodeDirectory(inodeId.incrementAndGet(), component,
permStatus, 0);
prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID);
pathINodeMap.put(dirName, dir);
prev = dir;
} else {
assertTrue(existingChild.isDirectory());
prev = existingChild.asDirectory();
}
}
PermissionStatus p = new PermissionStatus(
"user", "group", new FsPermission((short) 0777));
byte[] fileNameBytes = components[components.length - 1];
String fileName = DFSUtil.bytes2String(fileNameBytes);
INodeFile iNodeFile = new INodeFile(
inodeId.incrementAndGet(), fileNameBytes,
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, (short) 1, 1L);
iNodeFile.setParent(prev);
pathINodeMap.put(fileName, iNodeFile);
}
return pathINodeMap;
}
private static FSNamesystem makeMockFsNameSystem() {
FSDirectory dir = mock(FSDirectory.class);
FSNamesystem fsn = mock(FSNamesystem.class);
when(fsn.isRunning()).thenReturn(true);
when(fsn.hasReadLock()).thenReturn(true);
when(fsn.hasWriteLock()).thenReturn(true);
when(fsn.hasReadLock(RwLockMode.FS)).thenReturn(true);
when(fsn.hasWriteLock(RwLockMode.FS)).thenReturn(true);
when(fsn.hasReadLock(RwLockMode.GLOBAL)).thenReturn(true);
when(fsn.hasWriteLock(RwLockMode.GLOBAL)).thenReturn(true);
when(fsn.getFSDirectory()).thenReturn(dir);
when(fsn.getMaxLockHoldToReleaseLeaseMs()).thenReturn(maxLockHoldToReleaseLeaseMs);
return fsn;
}
private static INodeFile stubInodeFile(long inodeId) {
PermissionStatus p = new PermissionStatus(
"dummy", "dummy", new FsPermission((short) 0777));
return new INodeFile(
inodeId, new String("foo-" + inodeId).getBytes(), p, 0L, 0L,
BlockInfo.EMPTY_ARRAY, (short) 1, 1L);
}
}
| TestLeaseManager |
java | grpc__grpc-java | alts/src/main/java/io/grpc/alts/internal/TsiPeer.java | {
"start": 1453,
"end": 1956
} | class ____<T> {
private final String name;
private final T value;
protected Property(@Nonnull String name, @Nonnull T value) {
this.name = name;
this.value = value;
}
public final T getValue() {
return value;
}
public final String getName() {
return name;
}
@Override
public String toString() {
return String.format("%s=%s", name, value);
}
}
/** A peer property corresponding to a boolean. */
public static final | Property |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java | {
"start": 6651,
"end": 10997
} | class ____<T extends Writeable> extends DelayableWriteable<T> {
private final Writeable.Reader<T> reader;
private final TransportVersion serializedAtVersion;
private final NamedWriteableRegistry registry;
private final ReleasableBytesReference serialized;
private Serialized(
Writeable.Reader<T> reader,
TransportVersion serializedAtVersion,
NamedWriteableRegistry registry,
ReleasableBytesReference serialized
) {
this.reader = reader;
this.serializedAtVersion = serializedAtVersion;
this.registry = registry;
this.serialized = serialized;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion() == serializedAtVersion) {
/*
* If the version *does* line up we can just copy the bytes
* which is good because this is how shard request caching
* works.
*/
if (out.getTransportVersion().supports(COMPRESS_DELAYABLE_WRITEABLE)) {
out.writeInt(serialized.length());
serialized.writeTo(out);
} else {
out.writeBytesReference(serialized);
}
} else {
/*
* If the version doesn't line up then we have to deserialize
* into the Writeable and re-serialize it against the new
* output stream so it can apply any backwards compatibility
* differences in the wire protocol. This ain't efficient but
* it should be quite rare.
*/
referencing(expand()).writeTo(out);
}
}
@Override
public T expand() {
try {
return deserialize(reader, serializedAtVersion, registry, serialized);
} catch (IOException e) {
throw new RuntimeException("unexpected error expanding serialized delayed writeable", e);
}
}
@Override
public Serialized<T> asSerialized(Reader<T> reader, NamedWriteableRegistry registry) {
return this; // We're already serialized
}
@Override
public boolean isSerialized() {
return true;
}
@Override
public long getSerializedSize() {
// We're already serialized
return serialized.length();
}
@Override
public void close() {
serialized.close();
}
}
/**
* Returns the serialized size in bytes of the provided {@link Writeable}.
*/
public static long getSerializedSize(Writeable ref) {
try (CountingStreamOutput out = new CountingStreamOutput()) {
out.setTransportVersion(TransportVersion.current());
ref.writeTo(out);
return out.size();
} catch (IOException exc) {
throw new UncheckedIOException(exc);
}
}
private static <T> T deserialize(
Reader<T> reader,
TransportVersion serializedAtVersion,
NamedWriteableRegistry registry,
BytesReference serialized
) throws IOException {
try (
StreamInput in = serializedAtVersion.supports(COMPRESS_DELAYABLE_WRITEABLE)
? CompressorFactory.COMPRESSOR.threadLocalStreamInput(serialized.streamInput())
: serialized.streamInput()
) {
return reader.read(wrapWithDeduplicatorStreamInput(in, serializedAtVersion, registry));
}
}
/** Wraps the provided {@link StreamInput} with another stream that extends {@link Deduplicator} */
public static StreamInput wrapWithDeduplicatorStreamInput(
StreamInput in,
TransportVersion serializedAtVersion,
@Nullable NamedWriteableRegistry registry
) {
StreamInput out = registry == null
? new DeduplicateStreamInput(in, new DeduplicatorCache())
: new DeduplicateNamedWriteableAwareStreamInput(in, registry, new DeduplicatorCache());
out.setTransportVersion(serializedAtVersion);
return out;
}
/** An object implementing this | Serialized |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/flogger/FloggerRequiredModifiersTest.java | {
"start": 11693,
"end": 12073
} | interface ____ extends Parent {
static final FluentLogger logger = FluentLogger.forEnclosingClass();
default void go() {
logger.atInfo().log();
}
}
""")
.addOutputLines(
"out/Child.java",
"""
import com.google.common.flogger.FluentLogger;
| Child |
java | apache__camel | components/camel-aws/camel-aws-bedrock/src/main/java/org/apache/camel/component/aws2/bedrock/agent/BedrockAgentOperations.java | {
"start": 866,
"end": 967
} | enum ____ {
startIngestionJob,
listIngestionJobs,
getIngestionJob
}
| BedrockAgentOperations |
java | playframework__playframework | documentation/manual/working/javaGuide/main/http/code/javaguide/http/JavaBodyParsers.java | {
"start": 2820,
"end": 2878
} | class ____ {
public String name;
}
// #composing- | User |
java | apache__camel | components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyImageFileTest.java | {
"start": 1299,
"end": 2363
} | class ____ extends BaseJettyTest {
private void sendImageContent(boolean usingGZip) {
Endpoint endpoint = context.getEndpoint("http://localhost:{{port}}/myapp/myservice");
Exchange exchange = endpoint.createExchange();
if (usingGZip) {
exchange.getIn().setHeader(Exchange.CONTENT_ENCODING, "gzip");
}
template.send(endpoint, exchange);
assertNotNull(exchange.getMessage().getBody());
assertEquals("image/jpeg", MessageHelper.getContentType(exchange.getMessage()), "Get a wrong content-type");
}
@Test
public void testImageContentType() {
sendImageContent(false);
}
@Test
public void testImageContentWithGZip() {
sendImageContent(true);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("jetty:http://localhost:{{port}}/myapp/myservice").process(new MyImageService());
}
};
}
public static | JettyImageFileTest |
java | google__guava | android/guava-tests/test/com/google/common/primitives/LongArrayAsListTest.java | {
"start": 3683,
"end": 3988
} | class ____ extends TestLongListGenerator {
@Override
protected List<Long> create(Long[] elements) {
Long[] prefix = {86L, 99L};
Long[] all = concat(prefix, elements);
return asList(all).subList(2, elements.length + 2);
}
}
public static final | LongsAsListTailSubListGenerator |
java | apache__rocketmq | client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalancePullImpl.java | {
"start": 1365,
"end": 3924
} | class ____ extends RebalanceImpl {
private final DefaultMQPullConsumerImpl defaultMQPullConsumerImpl;
public RebalancePullImpl(DefaultMQPullConsumerImpl defaultMQPullConsumerImpl) {
this(null, null, null, null, defaultMQPullConsumerImpl);
}
public RebalancePullImpl(String consumerGroup, MessageModel messageModel,
AllocateMessageQueueStrategy allocateMessageQueueStrategy,
MQClientInstance mQClientFactory, DefaultMQPullConsumerImpl defaultMQPullConsumerImpl) {
super(consumerGroup, messageModel, allocateMessageQueueStrategy, mQClientFactory);
this.defaultMQPullConsumerImpl = defaultMQPullConsumerImpl;
}
@Override
public void messageQueueChanged(String topic, Set<MessageQueue> mqAll, Set<MessageQueue> mqDivided) {
MessageQueueListener messageQueueListener = this.defaultMQPullConsumerImpl.getDefaultMQPullConsumer().getMessageQueueListener();
if (messageQueueListener != null) {
try {
messageQueueListener.messageQueueChanged(topic, mqAll, mqDivided);
} catch (Throwable e) {
log.error("messageQueueChanged exception", e);
}
}
}
@Override
public boolean removeUnnecessaryMessageQueue(MessageQueue mq, ProcessQueue pq) {
this.defaultMQPullConsumerImpl.getOffsetStore().persist(mq);
this.defaultMQPullConsumerImpl.getOffsetStore().removeOffset(mq);
return true;
}
@Override
public ConsumeType consumeType() {
return ConsumeType.CONSUME_ACTIVELY;
}
@Override
public void removeDirtyOffset(final MessageQueue mq) {
this.defaultMQPullConsumerImpl.getOffsetStore().removeOffset(mq);
}
@Deprecated
@Override
public long computePullFromWhere(MessageQueue mq) {
return 0;
}
@Override
public long computePullFromWhereWithException(MessageQueue mq) throws MQClientException {
return 0;
}
@Override
public int getConsumeInitMode() {
throw new UnsupportedOperationException("no initMode for Pull");
}
@Override
public void dispatchPullRequest(final List<PullRequest> pullRequestList, final long delay) {
}
@Override
public void dispatchPopPullRequest(final List<PopRequest> pullRequestList, final long delay) {
}
@Override
public ProcessQueue createProcessQueue() {
return new ProcessQueue();
}
@Override
public PopProcessQueue createPopProcessQueue() {
return null;
}
}
| RebalancePullImpl |
java | apache__camel | dsl/camel-jbang/camel-jbang-it/src/test/java/org/apache/camel/dsl/jbang/it/ListServiceITCase.java | {
"start": 978,
"end": 1360
} | class ____ extends JBangTestSupport {
@Test
public void listServicesTest() throws IOException {
copyResourceInDataFolder(TestResources.SERVER_ROUTE);
executeBackground(String.format("run %s/server.yaml", mountPoint()));
checkLogContains("http://0.0.0.0:8080/hello");
checkCommandOutputs("get service", "platform-http");
}
}
| ListServiceITCase |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testjar/ClassWordCount.java | {
"start": 1214,
"end": 1406
} | class ____ {
/**
* Counts the words in each line.
* For each line of input, break the line into words and emit them as
* (<b>word</b>, <b>1</b>).
*/
public static | ClassWordCount |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/compress/Lz4CompressionTest.java | {
"start": 5448,
"end": 5871
} | class ____ {
String name;
byte[] payload;
Payload(String name, byte[] payload) {
this.name = name;
this.payload = payload;
}
@Override
public String toString() {
return "Payload{" +
"size=" + payload.length +
", name='" + name + '\'' +
'}';
}
}
private static | Payload |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/CurrentTimestamp.java | {
"start": 2669,
"end": 3466
} | interface ____ {
/**
* Determines when the timestamp is generated. But default, it is updated
* when any SQL {@code insert} or {@code update} statement is executed.
* If it should be generated just once, on the initial SQL {@code insert},
* explicitly specify {@link EventType#INSERT event = INSERT}.
*/
EventType[] event() default {INSERT, UPDATE, FORCE_INCREMENT};
/**
* Specifies how the timestamp is generated. By default, it is generated
* by the database. Depending on the capabilities of the database and JDBC
* driver, this might require that the value be fetched using a subsequent
* {@code select} statement. Setting {@code source = VM} guarantees that
* this additional {@code select} never occurs.
*/
SourceType source() default SourceType.DB;
}
| CurrentTimestamp |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java | {
"start": 2653,
"end": 19614
} | class ____ {
@Test
public void testFetchFailure() throws Exception {
MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true);
Configuration conf = new Configuration();
// map -> reduce -> fetch-failure -> map retry is incompatible with
// sequential, single-task-attempt approach in uber-AM, so disable:
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
assertEquals(2, job.getTasks().size(), "Num tasks not correct");
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
//wait for Task state move to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt1.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
final int checkIntervalMillis = 10;
final int waitForMillis = 800;
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
TaskAttemptCompletionEvent[] events = job
.getTaskAttemptCompletionEvents(0, 100);
return events.length >= 1;
}
}, checkIntervalMillis, waitForMillis);
TaskAttemptCompletionEvent[] events =
job.getTaskAttemptCompletionEvents(0, 100);
assertEquals(1, events.length, "Num completion events not correct");
assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus(),
"Event status not correct");
// wait for reduce to start running
app.waitForState(reduceTask, TaskState.RUNNING);
TaskAttempt reduceAttempt =
reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
//send 3 fetch failures from reduce to trigger map re execution
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
//wait for map Task state move back to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
//map attempt must have become FAILED
assertEquals(TaskAttemptState.FAILED, mapAttempt1.getState(),
"Map TaskAttempt state not correct");
assertEquals(2, mapTask.getAttempts().size(),
"Num attempts in Map Task not correct");
Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2 = atIt.next();
app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
//send the done signal to the second map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt2.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
//send done to reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(reduceAttempt.getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
//previous completion event now becomes obsolete
assertEquals(TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus(),
"Event status not correct");
events = job.getTaskAttemptCompletionEvents(0, 100);
assertEquals(4, events.length, "Num completion events not correct");
assertEquals(mapAttempt1.getID(), events[0].getAttemptId(),
"Event map attempt id not correct");
assertEquals(mapAttempt1.getID(), events[1].getAttemptId(),
"Event map attempt id not correct");
assertEquals(mapAttempt2.getID(), events[2].getAttemptId(),
"Event map attempt id not correct");
assertEquals(reduceAttempt.getID(), events[3].getAttemptId(),
"Event reduce attempt id not correct");
assertEquals(TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus(),
"Event status not correct for map attempt1");
assertEquals(TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus(),
"Event status not correct for map attempt1");
assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus(),
"Event status not correct for map attempt2");
assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus(),
"Event status not correct for reduce attempt1");
TaskCompletionEvent mapEvents[] =
job.getMapAttemptCompletionEvents(0, 2);
TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events);
assertEquals(2, mapEvents.length, "Incorrect number of map events");
assertArrayEquals(Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents,
"Unexpected map events");
mapEvents = job.getMapAttemptCompletionEvents(2, 200);
assertEquals(1, mapEvents.length, "Incorrect number of map events");
assertEquals(convertedEvents[2], mapEvents[0], "Unexpected map event");
}
/**
* This tests that if a map attempt was failed (say due to fetch failures),
* then it gets re-run. When the next map attempt is running, if the AM dies,
* then, on AM re-run, the AM does not incorrectly remember the first failed
* attempt. Currently recovery does not recover running tasks. Effectively,
* the AM re-runs the maps from scratch.
*/
@Test
public void testFetchFailureWithRecovery() throws Exception {
int runCount = 0;
MRApp app = new MRAppWithHistory(1, 1, false, this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
// map -> reduce -> fetch-failure -> map retry is incompatible with
// sequential, single-task-attempt approach in uber-AM, so disable:
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
assertEquals(2, job.getTasks().size(), "Num tasks not correct");
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
//wait for Task state move to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt1.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events =
job.getTaskAttemptCompletionEvents(0, 100);
assertEquals(1, events.length, "Num completion events not correct");
assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus(),
"Event status not correct");
// wait for reduce to start running
app.waitForState(reduceTask, TaskState.RUNNING);
TaskAttempt reduceAttempt =
reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
//send 3 fetch failures from reduce to trigger map re execution
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
//wait for map Task state move back to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
// Crash the app again.
app.stop();
//rerun
app =
new MRAppWithHistory(1, 1, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
assertEquals(2, job.getTasks().size(), "Num tasks not correct");
it = job.getTasks().values().iterator();
mapTask = it.next();
reduceTask = it.next();
// the map is not in a SUCCEEDED state after restart of AM
app.waitForState(mapTask, TaskState.RUNNING);
mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt1.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
reduceAttempt = reduceTask.getAttempts().values().iterator().next();
//send done to reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(reduceAttempt.getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
events = job.getTaskAttemptCompletionEvents(0, 100);
assertEquals(2, events.length, "Num completion events not correct");
}
@Test
public void testFetchFailureMultipleReduces() throws Exception {
MRApp app = new MRApp(1, 3, false, this.getClass().getName(), true);
Configuration conf = new Configuration();
// map -> reduce -> fetch-failure -> map retry is incompatible with
// sequential, single-task-attempt approach in uber-AM, so disable:
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
assertEquals(4, job.getTasks().size(), "Num tasks not correct");
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
Task reduceTask2 = it.next();
Task reduceTask3 = it.next();
//wait for Task state move to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt1.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events =
job.getTaskAttemptCompletionEvents(0, 100);
assertEquals(1, events.length,
"Num completion events not correct");
assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus(),
"Event status not correct");
// wait for reduce to start running
app.waitForState(reduceTask, TaskState.RUNNING);
app.waitForState(reduceTask2, TaskState.RUNNING);
app.waitForState(reduceTask3, TaskState.RUNNING);
TaskAttempt reduceAttempt =
reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
updateStatus(app, reduceAttempt, Phase.SHUFFLE);
TaskAttempt reduceAttempt2 =
reduceTask2.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt2, TaskAttemptState.RUNNING);
updateStatus(app, reduceAttempt2, Phase.SHUFFLE);
TaskAttempt reduceAttempt3 =
reduceTask3.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt3, TaskAttemptState.RUNNING);
updateStatus(app, reduceAttempt3, Phase.SHUFFLE);
//send 2 fetch failures from reduce to prepare for map re execution
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host1");
sendFetchFailure(app, reduceAttempt2, mapAttempt1, "host2");
//We should not re-launch the map task yet
assertEquals(TaskState.SUCCEEDED, mapTask.getState());
updateStatus(app, reduceAttempt2, Phase.REDUCE);
updateStatus(app, reduceAttempt3, Phase.REDUCE);
//send 3rd fetch failures from reduce to trigger map re execution
sendFetchFailure(app, reduceAttempt3, mapAttempt1, "host3");
//wait for map Task state move back to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
//map attempt must have become FAILED
assertEquals(TaskAttemptState.FAILED, mapAttempt1.getState(),
"Map TaskAttempt state not correct");
assertThat(mapAttempt1.getDiagnostics().get(0))
.isEqualTo("Too many fetch failures. Failing the attempt. "
+ "Last failure reported by "
+ reduceAttempt3.getID().toString() + " from host host3");
assertEquals(2, mapTask.getAttempts().size(),
"Num attempts in Map Task not correct");
Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2 = atIt.next();
app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
//send the done signal to the second map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt2.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
//send done to reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(reduceAttempt.getID(),
TaskAttemptEventType.TA_DONE));
//send done to reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(reduceAttempt2.getID(),
TaskAttemptEventType.TA_DONE));
//send done to reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(reduceAttempt3.getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
//previous completion event now becomes obsolete
assertEquals(TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus(),
"Event status not correct");
events = job.getTaskAttemptCompletionEvents(0, 100);
assertEquals(6, events.length, "Num completion events not correct");
assertEquals(mapAttempt1.getID(), events[0].getAttemptId(),
"Event map attempt id not correct");
assertEquals(mapAttempt1.getID(), events[1].getAttemptId(),
"Event map attempt id not correct");
assertEquals(mapAttempt2.getID(), events[2].getAttemptId(),
"Event map attempt id not correct");
assertEquals(reduceAttempt.getID(), events[3].getAttemptId(),
"Event reduce attempt id not correct");
assertEquals(TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus(),
"Event status not correct for map attempt1");
assertEquals(TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus(),
"Event status not correct for map attempt1");
assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus(),
"Event status not correct for map attempt2");
assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus(),
"Event status not correct for reduce attempt1");
TaskCompletionEvent mapEvents[] =
job.getMapAttemptCompletionEvents(0, 2);
TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events);
assertEquals(2, mapEvents.length, "Incorrect number of map events");
assertArrayEquals(Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents,
"Unexpected map events");
mapEvents = job.getMapAttemptCompletionEvents(2, 200);
assertEquals(1, mapEvents.length, "Incorrect number of map events");
assertEquals(convertedEvents[2], mapEvents[0], "Unexpected map event");
}
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
status.counters = new Counters();
status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
status.id = attempt.getID();
status.mapFinishTime = 0;
status.phase = phase;
status.progress = 0.5f;
status.shuffleFinishTime = 0;
status.sortFinishTime = 0;
status.stateString = "OK";
status.taskState = attempt.getState();
TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
new AtomicReference<>(status));
app.getContext().getEventHandler().handle(event);
}
private void sendFetchFailure(MRApp app, TaskAttempt reduceAttempt,
TaskAttempt mapAttempt, String hostname) {
app.getContext().getEventHandler().handle(
new JobTaskAttemptFetchFailureEvent(
reduceAttempt.getID(),
Arrays.asList(new TaskAttemptId[] {mapAttempt.getID()}),
hostname));
}
static | TestFetchFailure |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/refcolnames/superclass/ReferencedJoinedSuperclassColumnTest.java | {
"start": 4127,
"end": 4358
} | class ____ extends Branch {
@OneToMany(mappedBy = "circleHead", cascade = CascadeType.ALL, fetch = FetchType.LAZY)
Set<RegionHead> regions = new HashSet<>();
// other fields and mappings
// getters and setters
}
}
| CircleHead |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java | {
"start": 59275,
"end": 59959
} | class ____ extends BaseTransition {
Object transitionToDo;
RMAppAttemptState targetedFinalState;
public FinalSavingTransition(Object transitionToDo,
RMAppAttemptState targetedFinalState) {
this.transitionToDo = transitionToDo;
this.targetedFinalState = targetedFinalState;
}
@Override
public void transition(RMAppAttemptImpl appAttempt, RMAppAttemptEvent event) {
// For cases Killed/Failed, targetedFinalState is the same as the state to
// be stored
appAttempt.rememberTargetTransitionsAndStoreState(event, transitionToDo,
targetedFinalState, targetedFinalState);
}
}
private static | FinalSavingTransition |
java | quarkusio__quarkus | extensions/panache/panache-hibernate-common/deployment/src/main/java/io/quarkus/panache/hibernate/common/deployment/HibernateModelClassCandidatesForFieldAccessBuildItem.java | {
"start": 142,
"end": 523
} | class ____ extends SimpleBuildItem {
private final Set<String> managedClassNames;
public HibernateModelClassCandidatesForFieldAccessBuildItem(Set<String> managedClassNames) {
this.managedClassNames = managedClassNames;
}
public Set<String> getManagedClassNames() {
return managedClassNames;
}
}
| HibernateModelClassCandidatesForFieldAccessBuildItem |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/SmartClassLoader.java | {
"start": 3999,
"end": 4504
} | class ____
* @throws SecurityException in case of an invalid definition attempt
* @throws UnsupportedOperationException in case of a custom definition attempt
* not being possible (thrown by the default implementation in this interface)
* @since 5.3.4
* @see ClassLoader#defineClass(String, byte[], int, int, ProtectionDomain)
*/
default Class<?> publicDefineClass(String name, byte[] b, @Nullable ProtectionDomain protectionDomain) {
throw new UnsupportedOperationException();
}
}
| definition |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/InvalidEndpointRequestException.java | {
"start": 831,
"end": 1433
} | class ____ extends RuntimeException {
private final @Nullable String reason;
public InvalidEndpointRequestException(String message, @Nullable String reason) {
super(message);
this.reason = reason;
}
public InvalidEndpointRequestException(String message, @Nullable String reason, @Nullable Throwable cause) {
super(message, cause);
this.reason = reason;
}
/**
* Return the reason explaining why the request is invalid, potentially {@code null}.
* @return the reason for the failure
*/
public @Nullable String getReason() {
return this.reason;
}
}
| InvalidEndpointRequestException |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/DialectChecks.java | {
"start": 679,
"end": 831
} | class ____ different implementation of the {@link DialectCheck} interface.
*
* @author Hardy Ferentschik
* @author Steve Ebersole
*/
abstract public | for |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/EnumComparator.java | {
"start": 1060,
"end": 2768
} | class ____<T extends Enum<T>> extends BasicTypeComparator<T> {
private static final long serialVersionUID = 1L;
public EnumComparator(boolean ascending) {
super(ascending);
}
@Override
public int compareSerialized(DataInputView firstSource, DataInputView secondSource)
throws IOException {
int i1 = firstSource.readInt();
int i2 = secondSource.readInt();
int comp = (i1 < i2 ? -1 : (i1 == i2 ? 0 : 1));
return ascendingComparison ? comp : -comp;
}
@Override
public boolean supportsNormalizedKey() {
return true;
}
@Override
public int getNormalizeKeyLen() {
return 4;
}
@Override
public boolean isNormalizedKeyPrefixOnly(int keyBytes) {
return keyBytes < 4;
}
@Override
public void putNormalizedKey(T iValue, MemorySegment target, int offset, int numBytes) {
int value = iValue.ordinal() - Integer.MIN_VALUE;
// see IntValue for an explanation of the logic
if (numBytes == 4) {
// default case, full normalized key
target.putIntBigEndian(offset, value);
} else if (numBytes <= 0) {
} else if (numBytes < 4) {
for (int i = 0; numBytes > 0; numBytes--, i++) {
target.put(offset + i, (byte) (value >>> ((3 - i) << 3)));
}
} else {
target.putLongBigEndian(offset, value);
for (int i = 4; i < numBytes; i++) {
target.put(offset + i, (byte) 0);
}
}
}
@Override
public EnumComparator<T> duplicate() {
return new EnumComparator<T>(ascendingComparison);
}
}
| EnumComparator |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FuturesGetCheckedIllegalExceptionTypeTest.java | {
"start": 883,
"end": 1543
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(FuturesGetCheckedIllegalExceptionType.class, getClass());
@Test
public void positiveCase() {
compilationHelper
.addSourceLines(
"FuturesGetCheckedIllegalExceptionTypePositiveCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import static com.google.common.util.concurrent.Futures.getChecked;
import static java.util.concurrent.TimeUnit.SECONDS;
import java.util.concurrent.Future;
/** Positive cases for {@link FuturesGetCheckedIllegalExceptionType}. */
public | FuturesGetCheckedIllegalExceptionTypeTest |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 777749,
"end": 779775
} | class ____ extends YamlDeserializerBase<PropertyDefinition> {
public PropertyDefinitionDeserializer() {
super(PropertyDefinition.class);
}
@Override
protected PropertyDefinition newInstance() {
return new PropertyDefinition();
}
@Override
protected boolean setProperty(PropertyDefinition target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "key": {
String val = asText(node);
target.setKey(val);
break;
}
case "value": {
String val = asText(node);
target.setValue(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "propertyExpression",
types = org.apache.camel.model.PropertyExpressionDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "Property Expression",
description = "A key value pair where the value is an expression.",
deprecated = false,
properties = {
@YamlProperty(name = "__extends", type = "object:org.apache.camel.model.language.ExpressionDefinition", oneOf = "expression"),
@YamlProperty(name = "expression", type = "object:org.apache.camel.model.language.ExpressionDefinition", description = "Property values as an expression", displayName = "Expression", oneOf = "expression"),
@YamlProperty(name = "key", type = "string", required = true, description = "Property key", displayName = "Key")
}
)
public static | PropertyDefinitionDeserializer |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterizedTestIntegrationTests.java | {
"start": 23189,
"end": 25255
} | class ____ {
@Test
void executesWithNullSourceForString() {
var results = execute("testWithNullSourceForString", String.class);
results.testEvents().failed().assertEventsMatchExactly(
event(test(), displayName("[1] argument = null"), finishedWithFailure(message("null"))));
}
@Test
void executesWithNullSourceForStringAndTestInfo() {
var results = execute("testWithNullSourceForStringAndTestInfo", String.class, TestInfo.class);
results.testEvents().failed().assertEventsMatchExactly(
event(test(), displayName("[1] argument = null"), finishedWithFailure(message("null"))));
}
@Test
void executesWithNullSourceForNumber() {
var results = execute("testWithNullSourceForNumber", Number.class);
results.testEvents().failed().assertEventsMatchExactly(
event(test(), displayName("[1] argument = null"), finishedWithFailure(message("null"))));
}
@Test
void failsWithNullSourceWithZeroFormalParameters() {
var methodName = "testWithNullSourceWithZeroFormalParameters";
execute(methodName).containerEvents().failed().assertEventsMatchExactly(//
event(container(methodName), //
finishedWithFailure(//
instanceOf(PreconditionViolationException.class), //
message(msg -> msg.matches(
"@NullSource cannot provide a null argument to method .+: no formal parameters declared.")))));
}
@Test
void failsWithNullSourceForPrimitive() {
var results = execute("testWithNullSourceForPrimitive", int.class);
results.testEvents().failed().assertEventsMatchExactly(event(test(), displayName("[1] argument = null"),
finishedWithFailure(instanceOf(ParameterResolutionException.class), message(
"Error converting parameter at index 0: Cannot convert null to primitive value of type int"))));
}
private EngineExecutionResults execute(String methodName, Class<?>... methodParameterTypes) {
return ParameterizedTestIntegrationTests.this.execute(NullSourceTestCase.class, methodName,
methodParameterTypes);
}
}
/**
* @since 5.4
*/
@Nested
| NullSourceIntegrationTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/TruthContainsExactlyElementsInUsageTest.java | {
"start": 4473,
"end": 4954
} | class ____ {
void test() {
assertThat(ImmutableList.of(1, 2, 3)).containsExactlyElementsIn(ImmutableSet.of(1, 2, 3));
}
}
""")
.doTest();
}
@Test
public void refactoringTruthContainsExactlyElementsInUsageWithArrayList() {
refactoringHelper
.addInputLines(
"ExampleClassTest.java",
"""
import static com.google.common.truth.Truth.assertThat;
import com.google.common.collect.ImmutableList;
import java.util.Arrays;
public | ExampleClassTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java | {
"start": 1907,
"end": 10467
} | class ____ {
private final String FAKE_AUTHORITY="hdfs://nn1/";
private final UserGroupInformation FAKE_UGI_A =
UserGroupInformation.createUserForTesting(
"myuser", new String[]{"group1"});
private final UserGroupInformation FAKE_UGI_B =
UserGroupInformation.createUserForTesting(
"myuser", new String[]{"group1"});
private DFSClient MOCK_DFSCLIENT;
private LeaseRenewer renewer;
/** Cause renewals often so test runs quickly. */
private static final long FAST_GRACE_PERIOD = 100L;
@BeforeEach
public void setupMocksAndRenewer() throws IOException {
MOCK_DFSCLIENT = createMockClient();
renewer = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD);
}
private DFSClient createMockClient() {
final DfsClientConf mockConf = Mockito.mock(DfsClientConf.class);
Mockito.doReturn((int)FAST_GRACE_PERIOD).when(mockConf).getHdfsTimeout();
DFSClient mock = Mockito.mock(DFSClient.class);
Mockito.doReturn(true).when(mock).isClientRunning();
Mockito.doReturn(mockConf).when(mock).getConf();
Mockito.doReturn("myclient").when(mock).getClientName();
return mock;
}
@Test
public void testInstanceSharing() throws IOException {
// Two lease renewers with the same UGI should return
// the same instance
LeaseRenewer lr = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
LeaseRenewer lr2 = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
assertSame(lr, lr2);
// But a different UGI should return a different instance
LeaseRenewer lr3 = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_B, MOCK_DFSCLIENT);
assertNotSame(lr, lr3);
// A different authority with same UGI should also be a different
// instance.
LeaseRenewer lr4 = LeaseRenewer.getInstance(
"someOtherAuthority", FAKE_UGI_B, MOCK_DFSCLIENT);
assertNotSame(lr, lr4);
assertNotSame(lr3, lr4);
}
@Test
public void testRenewal() throws Exception {
// Keep track of how many times the lease gets renewed
final AtomicInteger leaseRenewalCount = new AtomicInteger();
Mockito.doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
leaseRenewalCount.incrementAndGet();
return true;
}
}).when(MOCK_DFSCLIENT).renewLease();
// Set up a file so that we start renewing our lease.
DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);
long fileId = 123L;
renewer.put(MOCK_DFSCLIENT);
// Wait for lease to get renewed
long failTime = Time.monotonicNow() + 5000;
while (Time.monotonicNow() < failTime &&
leaseRenewalCount.get() == 0) {
Thread.sleep(50);
}
if (leaseRenewalCount.get() == 0) {
fail("Did not renew lease at all!");
}
renewer.closeClient(MOCK_DFSCLIENT);
}
/**
* Regression test for HDFS-2810. In this bug, the LeaseRenewer has handles
* to several DFSClients with the same name, the first of which has no files
* open. Previously, this was causing the lease to not get renewed.
*/
@Test
public void testManyDfsClientsWhereSomeNotOpen() throws Exception {
// First DFSClient has no files open so doesn't renew leases.
final DFSClient mockClient1 = createMockClient();
Mockito.doReturn(false).when(mockClient1).renewLease();
assertSame(renewer, LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, mockClient1));
long fileId = 456L;
renewer.put(mockClient1);
// Second DFSClient does renew lease
final DFSClient mockClient2 = createMockClient();
Mockito.doReturn(true).when(mockClient2).renewLease();
assertSame(renewer, LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, mockClient2));
renewer.put(mockClient2);
// Wait for lease to get renewed
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
Mockito.verify(mockClient1, Mockito.atLeastOnce()).renewLease();
Mockito.verify(mockClient2, Mockito.atLeastOnce()).renewLease();
return true;
} catch (AssertionError err) {
LeaseRenewer.LOG.warn("Not yet satisfied", err);
return false;
} catch (IOException e) {
// should not throw!
throw new RuntimeException(e);
}
}
}, 100, 10000);
renewer.closeClient(mockClient1);
renewer.closeClient(mockClient2);
renewer.closeClient(MOCK_DFSCLIENT);
// Make sure renewer is not running due to expiration.
Thread.sleep(FAST_GRACE_PERIOD * 2);
assertTrue(!renewer.isRunning());
}
@Test
public void testThreadName() throws Exception {
assertFalse(renewer.isRunning(), "Renewer not initially running");
// Pretend to open a file
renewer.put(MOCK_DFSCLIENT);
assertTrue(renewer.isRunning(), "Renewer should have started running");
// Check the thread name is reasonable
String threadName = renewer.getDaemonName();
assertEquals("LeaseRenewer:myuser@hdfs://nn1/", threadName);
// Pretend to close the file
renewer.closeClient(MOCK_DFSCLIENT);
renewer.setEmptyTime(Time.monotonicNow());
// Should stop the renewer running within a few seconds
long failTime = Time.monotonicNow() + 5000;
while (renewer.isRunning() && Time.monotonicNow() < failTime) {
Thread.sleep(50);
}
assertFalse(renewer.isRunning());
}
/**
* Test for HDFS-14575. In this fix, the LeaseRenewer clears all clients
* and expires immediately via setting empty time to 0 before it's removed
* from factory. Previously, LeaseRenewer#daemon thread might leak.
*/
@Test
public void testDaemonThreadLeak() throws Exception {
assertFalse(renewer.isRunning(), "Renewer not initially running");
// Pretend to create a file#1, daemon#1 starts
renewer.put(MOCK_DFSCLIENT);
assertTrue(renewer.isRunning(), "Renewer should have started running");
Pattern daemonThreadNamePattern = Pattern.compile("LeaseRenewer:\\S+");
assertEquals(1, countThreadMatching(daemonThreadNamePattern));
// Pretend to create file#2, daemon#2 starts due to expiration
LeaseRenewer lastRenewer = renewer;
renewer =
LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
assertEquals(lastRenewer, renewer);
// Pretend to close file#1
renewer.closeClient(MOCK_DFSCLIENT);
assertEquals(1, countThreadMatching(daemonThreadNamePattern));
// Pretend to be expired
renewer.setEmptyTime(0);
renewer =
LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD);
boolean success = renewer.put(MOCK_DFSCLIENT);
if (!success) {
LeaseRenewer.remove(renewer);
renewer =
LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD);
renewer.put(MOCK_DFSCLIENT);
}
int threadCount = countThreadMatching(daemonThreadNamePattern);
//Sometimes old LR#Daemon gets closed and lead to count 1 (rare scenario)
assertTrue(1 == threadCount || 2 == threadCount);
// After grace period, both daemon#1 and renewer#1 will be removed due to
// expiration, then daemon#2 will leak before HDFS-14575.
Thread.sleep(FAST_GRACE_PERIOD * 2);
// Pretend to close file#2, renewer#2 will be created
lastRenewer = renewer;
renewer =
LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
assertEquals(lastRenewer, renewer);
renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD);
renewer.closeClient(MOCK_DFSCLIENT);
renewer.setEmptyTime(0);
// Make sure LeaseRenewer#daemon threads will terminate after grace period
Thread.sleep(FAST_GRACE_PERIOD * 2);
assertEquals(0, countThreadMatching(daemonThreadNamePattern),
"LeaseRenewer#daemon thread leaks");
}
private static int countThreadMatching(Pattern pattern) {
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
ThreadInfo[] infos =
threadBean.getThreadInfo(threadBean.getAllThreadIds(), 1);
int count = 0;
for (ThreadInfo info : infos) {
if (info == null) {
continue;
}
if (pattern.matcher(info.getThreadName()).matches()) {
count++;
}
}
return count;
}
}
| TestLeaseRenewer |
java | google__guice | core/test/com/google/inject/ProvisionExceptionsTest.java | {
"start": 13102,
"end": 13679
} | class ____ implements Exploder {
@Inject
public Explosion(@Named("runtime") boolean runtime) throws IOException {
if (runtime) {
throw new IllegalStateException("boom!");
} else {
throw new IOException("boom!");
}
}
public static Explosion createRuntime() {
try {
return new Explosion(true);
} catch (IOException iox) {
throw new RuntimeException();
}
}
public static Explosion createChecked() throws IOException {
return new Explosion(false);
}
}
private static | Explosion |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/body/DefaultMessageBodyHandlerRegistry.java | {
"start": 5173,
"end": 8700
} | class ____<T> extends FilteringQualifier<T> {
private final Argument<?> type;
private final List<MediaType> mediaTypes;
private final Class<? extends Annotation> annotationType;
private MediaTypeQualifier(Argument<?> type,
List<MediaType> mediaTypes,
Class<? extends Annotation> annotationType) {
this.type = type;
this.mediaTypes = mediaTypes;
this.annotationType = annotationType;
}
@Override
public <K extends BeanType<T>> Collection<K> filter(Class<T> beanType, Collection<K> candidates) {
List<K> all = new ArrayList<>(candidates.size());
candidatesLoop:
for (K candidate : candidates) {
String[] applicableTypes = candidate.getAnnotationMetadata().stringValues(annotationType);
if (applicableTypes.length == 0) {
all.add(candidate);
continue;
}
for (String mt : applicableTypes) {
MediaType mediaType = new MediaType(mt);
for (MediaType m : mediaTypes) {
if (m.matches(mediaType)) {
all.add(candidate);
continue candidatesLoop;
}
}
}
}
// Handlers with a media type defined should have a priority
all.sort(Comparator.comparingInt(this::findOrder).reversed());
return all;
}
@Override
public <BT extends QualifiedBeanType<T>> Collection<BT> filterQualified(Class<T> beanType, Collection<BT> candidates) {
return filter(beanType, candidates);
}
private int findOrder(BeanType<?> beanType) {
int order = 0;
String[] applicableTypes = beanType.getAnnotationMetadata().stringValues(annotationType);
int size = mediaTypes.size();
for (String mt : applicableTypes) {
int index = mediaTypes.indexOf(new MediaType(mt));
if (index == -1) {
continue;
}
int compareValue = size - index; // First value should have the priority
order = Integer.max(order, compareValue);
}
return order;
}
private static boolean isInvalidType(List<Argument<?>> consumedType, Argument<?> requiredType) {
Argument<?> argument = consumedType.get(0);
return !(argument.isTypeVariable() || argument.isAssignableFrom(requiredType.getType()));
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
MediaTypeQualifier<?> that = (MediaTypeQualifier<?>) o;
return type.equalsType(that.type) && mediaTypes.equals(that.mediaTypes);
}
@Override
public int hashCode() {
return Objects.hash(type.typeHashCode(), mediaTypes);
}
@Override
public String toString() {
return "MediaTypeQualifier[" +
"type=" + type + ", " +
"mediaTypes=" + mediaTypes + ", " +
"annotationType=" + annotationType + ']';
}
}
}
| MediaTypeQualifier |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/dataformat/SoapDataFormat.java | {
"start": 9235,
"end": 10365
} | class ____ implements DataFormatBuilder<SoapDataFormat> {
private String contextPath;
private String encoding;
private String elementNameStrategy;
private Object elementNameStrategyObject;
private String version;
private String namespacePrefix;
private String schema;
private String ignoreUnmarshalledHeaders;
/**
* Package name where your JAXB classes are located.
*/
public Builder contextPath(String contextPath) {
this.contextPath = contextPath;
return this;
}
/**
* To overrule and use a specific encoding
*/
public Builder encoding(String encoding) {
this.encoding = encoding;
return this;
}
/**
* Refers to an element strategy to lookup from the registry.
* <p/>
* An element name strategy is used for two purposes. The first is to find a xml element name for a given object
* and soap action when marshaling the object into a SOAP message. The second is to find an Exception | Builder |
java | google__error-prone | core/src/main/java/com/google/errorprone/refaster/annotation/RequiredAnnotation.java | {
"start": 1129,
"end": 1214
} | interface ____ {
Class<? extends Annotation>[] value() default {};
}
| RequiredAnnotation |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java | {
"start": 19640,
"end": 27210
} | class ____<
R extends InternalAggregation,
B extends InternalMultiBucketAggregation.InternalBucket,
TB extends InternalMultiBucketAggregation.InternalBucket> extends CollectionStrategy {
private final boolean excludeDeletedDocs;
private final ResultStrategy<R, B, TB> collectionStrategy;
DenseGlobalOrds(ResultStrategy<R, B, TB> collectionStrategy, boolean excludeDeletedDocs) {
this.excludeDeletedDocs = excludeDeletedDocs;
this.collectionStrategy = collectionStrategy;
}
@Override
String describe() {
return "dense";
}
@Override
long totalBuckets() {
return valueCount;
}
@Override
void globalOrdsReady(SortedSetDocValues globalOrds) {
grow(globalOrds.getValueCount());
}
@Override
void collectGlobalOrd(long owningBucketOrd, int doc, long globalOrd, LeafBucketCollector sub) throws IOException {
assert owningBucketOrd == 0;
collectExistingBucket(sub, doc, globalOrd);
}
@Override
long globalOrdToBucketOrd(long globalOrd) {
return globalOrd;
}
private void collect(BucketInfoConsumer consumer) throws IOException {
if (excludeDeletedDocs) {
forEachExcludeDeletedDocs(consumer);
} else {
forEachAllowDeletedDocs(consumer);
}
}
private void forEachAllowDeletedDocs(BucketInfoConsumer consumer) throws IOException {
for (long globalOrd = 0; globalOrd < valueCount; globalOrd++) {
if (false == acceptedGlobalOrdinals.test(globalOrd)) {
continue;
}
long docCount = bucketDocCount(globalOrd);
if (bucketCountThresholds.getMinDocCount() == 0 || docCount > 0) {
consumer.accept(globalOrd, globalOrd, docCount);
}
}
}
/**
* Excludes deleted docs in the results by cross-checking with liveDocs.
*/
private void forEachExcludeDeletedDocs(BucketInfoConsumer consumer) throws IOException {
try (LongHash accepted = new LongHash(20, bigArrays())) {
for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) {
LeafReader reader = ctx.reader();
Bits liveDocs = reader.getLiveDocs();
SortedSetDocValues globalOrds = null;
for (int docId = 0; docId < reader.maxDoc(); ++docId) {
if (liveDocs == null || liveDocs.get(docId)) { // document is not deleted
globalOrds = globalOrds == null ? valuesSource.globalOrdinalsValues(ctx) : globalOrds;
if (globalOrds.advanceExact(docId)) {
for (int i = 0; i < globalOrds.docValueCount(); i++) {
long globalOrd = globalOrds.nextOrd();
if (accepted.find(globalOrd) >= 0) {
continue;
}
if (false == acceptedGlobalOrdinals.test(globalOrd)) {
continue;
}
long docCount = bucketDocCount(globalOrd);
if (bucketCountThresholds.getMinDocCount() == 0 || docCount > 0) {
consumer.accept(globalOrd, globalOrd, docCount);
accepted.add(globalOrd);
}
}
}
}
}
}
}
}
@Override
public void close() {}
@Override
InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException {
assert owningBucketOrds.size() == 1 && owningBucketOrds.get(0) == 0;
try (
LongArray otherDocCount = bigArrays().newLongArray(1, true);
ObjectArray<B[]> topBucketsPreOrd = collectionStrategy.buildTopBucketsPerOrd(1)
) {
GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd;
final int size = (int) Math.min(valueCount, bucketCountThresholds.getShardSize());
try (ObjectArrayPriorityQueue<BucketAndOrd<TB>> ordered = collectionStrategy.buildPriorityQueue(size)) {
BucketUpdater<TB> updater = collectionStrategy.bucketUpdater(0, lookupGlobalOrd);
collect(new BucketInfoConsumer() {
BucketAndOrd<TB> spare = null;
@Override
public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException {
otherDocCount.increment(0, docCount);
if (docCount >= bucketCountThresholds.getShardMinDocCount()) {
if (spare == null) {
checkRealMemoryCBForInternalBucket();
spare = new BucketAndOrd<>(collectionStrategy.buildEmptyTemporaryBucket());
}
spare.ord = bucketOrd;
updater.updateBucket(spare.bucket, globalOrd, docCount);
spare = ordered.insertWithOverflow(spare);
}
}
});
// Get the top buckets
int orderedSize = (int) ordered.size();
try (LongArray ordsArray = bigArrays().newLongArray(orderedSize)) {
B[] buckets = collectionStrategy.buildBuckets(orderedSize);
for (int i = orderedSize - 1; i >= 0; --i) {
checkRealMemoryCBForInternalBucket();
BucketAndOrd<TB> bucketAndOrd = ordered.pop();
B bucket = collectionStrategy.convertTempBucketToRealBucket(bucketAndOrd.bucket, lookupGlobalOrd);
ordsArray.set(i, bucketAndOrd.ord);
buckets[i] = bucket;
otherDocCount.increment(0, -bucket.getDocCount());
}
topBucketsPreOrd.set(0, buckets);
collectionStrategy.buildSubAggs(topBucketsPreOrd, ordsArray);
}
}
return GlobalOrdinalsStringTermsAggregator.this.buildAggregations(
Math.toIntExact(owningBucketOrds.size()),
ordIdx -> collectionStrategy.buildResult(
owningBucketOrds.get(ordIdx),
otherDocCount.get(ordIdx),
topBucketsPreOrd.get(ordIdx)
)
);
}
}
}
/**
* {@linkplain CollectionStrategy} that uses a {@link LongHash} to map the
* global ordinal into bucket ordinals. This uses more memory than
* {@link DenseGlobalOrds} when collecting every ordinal, but significantly
* less when collecting only a few.
*/
private | DenseGlobalOrds |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldHaveMethods.java | {
"start": 836,
"end": 3764
} | class ____ extends BasicErrorMessageFactory {
/**
* Creates a new <code>{@link org.assertj.core.error.ShouldHaveMethods}</code>.
*
* @param actual the actual value in the failed assertion.
* @param declared {@code true} if the expected methods are declared ones, {@code false} otherwise.
* @param expected expected methods for this class
* @param missing missing methods for this class
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldHaveMethods(Class<?> actual, boolean declared, SortedSet<String> expected,
SortedSet<String> missing) {
return new ShouldHaveMethods(actual, expected, missing, declared);
}
public static ErrorMessageFactory shouldHaveMethods(Class<?> actual, boolean declared, SortedSet<String> expected,
String modifier, Map<String, String> nonMatching) {
return new ShouldHaveMethods(actual, expected, modifier, nonMatching, declared);
}
public static ErrorMessageFactory shouldNotHaveMethods(Class<?> actual, String modifier, boolean declared,
SortedSet<String> actualMethodsHavingModifier) {
return new ShouldHaveMethods(actual, modifier, declared, actualMethodsHavingModifier);
}
public static ErrorMessageFactory shouldNotHaveMethods(Class<?> actual, boolean declared,
SortedSet<String> actualMethodsHavingModifier) {
return new ShouldHaveMethods(actual, null, declared, actualMethodsHavingModifier);
}
private ShouldHaveMethods(Class<?> actual, Set<String> expected, Set<String> missing, boolean declared) {
super("%n" +
"Expecting%n" +
" %s%n" +
"to have " + (declared ? "declared " : "") + "methods:%n" +
" %s%n" +
"but could not find:%n" +
" %s", actual, expected, missing);
}
private ShouldHaveMethods(Class<?> actual, Set<String> expected, String modifier, Map<String, String> nonMatching,
boolean declared) {
super("%n" +
"Expecting%n" +
" %s%n" +
"to have " + (declared ? "declared " : "") + modifier + " " + "methods:%n" +
" %s%n" +
"but the following are not " + modifier + ":%n" +
" %s", actual, expected, nonMatching);
}
private ShouldHaveMethods(Class<?> actual, String modifier, boolean declared,
Set<String> actualMethodsHavingModifier) {
super("%n" +
"Expecting%n" +
" %s%n" +
"not to have any " + (declared ? "declared " : "")
+ (modifier != null && !modifier.isEmpty() ? modifier + " " : "") + "methods but it has the following:%n" +
" %s", actual, actualMethodsHavingModifier);
}
}
| ShouldHaveMethods |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java | {
"start": 28269,
"end": 29323
} | class ____ extends BaseTransition {
@Override
public void transition(Component component, ComponentEvent event) {
boolean isCancel = event.getType().equals(CANCEL_UPGRADE);
UpgradeStatus status = !isCancel ? component.upgradeStatus :
component.cancelUpgradeStatus;
status.inProgress.set(true);
status.targetSpec = event.getTargetSpec();
status.targetVersion = event.getUpgradeVersion();
LOG.info("[COMPONENT {}]: need upgrade to {}",
component.getName(), status.targetVersion);
status.containersNeedUpgrade.set(
component.componentSpec.getNumberOfContainers());
component.setComponentState(org.apache.hadoop.yarn.service.api.
records.ComponentState.NEEDS_UPGRADE);
component.getAllComponentInstances().forEach(instance -> {
instance.setContainerState(ContainerState.NEEDS_UPGRADE);
});
if (event.getType().equals(CANCEL_UPGRADE)) {
component.upgradeStatus.reset();
}
}
}
private static | NeedsUpgradeTransition |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/RequireSetterForGetterSerTest.java | {
"start": 1474,
"end": 3879
} | class ____ {
private int readonly;
private int readwrite;
public Data736() {
readonly = 1;
readwrite = 2;
}
public int getReadwrite() {
return readwrite;
}
public void setReadwrite(int readwrite) {
this.readwrite = readwrite;
}
public int getReadonly() {
return readonly;
}
}
/*
/**********************************************************************
/* Test methods
/**********************************************************************
*/
@Test
public void testGettersWithoutSetters() throws Exception
{
ObjectMapper m = new ObjectMapper();
GettersWithoutSetters bean = new GettersWithoutSetters(123);
assertFalse(m.isEnabled(MapperFeature.REQUIRE_SETTERS_FOR_GETTERS));
// by default, all 4 found:
assertEquals("{\"a\":3,\"b\":4,\"c\":5,\"d\":6}", m.writeValueAsString(bean));
// but 3 if we require mutator:
m = jsonMapperBuilder()
.enable(MapperFeature.REQUIRE_SETTERS_FOR_GETTERS)
.build();
assertEquals("{\"a\":3,\"c\":5,\"d\":6}", m.writeValueAsString(bean));
}
@Test
public void testGettersWithoutSettersOverride() throws Exception
{
GettersWithoutSetters2 bean = new GettersWithoutSetters2();
ObjectMapper m = jsonMapperBuilder()
.enable(MapperFeature.REQUIRE_SETTERS_FOR_GETTERS)
.build();
assertEquals("{\"a\":123}", m.writeValueAsString(bean));
}
// for [databind#736]
@Test
public void testNeedForSetters() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.changeDefaultVisibility(vc -> vc
.withVisibility(PropertyAccessor.ALL, Visibility.NONE)
.withVisibility(PropertyAccessor.FIELD, Visibility.NONE)
.withVisibility(PropertyAccessor.GETTER, Visibility.PUBLIC_ONLY)
.withVisibility(PropertyAccessor.SETTER, Visibility.PUBLIC_ONLY))
.enable(MapperFeature.REQUIRE_SETTERS_FOR_GETTERS)
.build();
Data736 dataB = new Data736();
String json = mapper.writeValueAsString(dataB);
assertEquals(a2q("{'readwrite':2}"), json);
}
}
| Data736 |
java | quarkusio__quarkus | test-framework/junit5-component/src/test/java/io/quarkus/test/component/config/ConfigMappingTest.java | {
"start": 1270,
"end": 1371
} | class ____ {
@Inject
FooConfig config;
}
@ConfigMapping(prefix = "foo")
| Foo |
java | quarkusio__quarkus | extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/client/TlsBasicConnectorMissingTlsConfigurationTest.java | {
"start": 1949,
"end": 2031
} | class ____ {
@OnOpen
void open() {
}
}
}
| ServerEndpoint |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunction.java | {
"start": 846,
"end": 1623
} | interface ____ {
double evaluate(double value, double scale);
Explanation explainFunction(String valueString, double value, double scale);
/**
* The final scale parameter is computed from the scale parameter given by
* the user and a value. This value is the value that the decay function
* should compute if document distance and user defined scale equal. The
* scale parameter for the function must be adjusted accordingly in this
* function
*
* @param scale
* the raw scale value given by the user
* @param decay
* the value which decay function should take once the distance
* reaches this scale
* */
double processScale(double scale, double decay);
}
| DecayFunction |
java | quarkusio__quarkus | extensions/amazon-lambda/deployment/src/test/java/io/quarkus/amazon/lambda/deployment/RequestHandlerJandexUtilTest.java | {
"start": 19291,
"end": 19570
} | class ____ extends MultiLevelMiddle<Double, Float> {
@Override
public Float handleRequest(Double input, Context context) {
return input.floatValue();
}
}
// Inverted type parameters test cases
public static abstract | MultiLevelConcrete |
java | quarkusio__quarkus | extensions/kubernetes/vanilla/deployment/src/main/java/io/quarkus/kubernetes/deployment/InitTaskConfig.java | {
"start": 718,
"end": 1061
} | interface ____ {
/**
* The init task image to use by the init container.
*/
@WithDefault("groundnuty/k8s-wait-for:no-root-v1.7")
String image();
/**
* Image pull policy.
*/
@WithDefault("always")
ImagePullPolicy imagePullPolicy();
}
}
| InitTaskContainerConfig |
java | apache__camel | components/camel-xmlsecurity/src/generated/java/org/apache/camel/component/xmlsecurity/XmlSignerEndpointConfigurer.java | {
"start": 738,
"end": 13208
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
XmlSignerEndpoint target = (XmlSignerEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "addkeyinforeference":
case "addKeyInfoReference": target.getConfiguration().setAddKeyInfoReference(property(camelContext, java.lang.Boolean.class, value)); return true;
case "baseuri":
case "baseUri": target.getConfiguration().setBaseUri(property(camelContext, java.lang.String.class, value)); return true;
case "canonicalizationmethod":
case "canonicalizationMethod": target.getConfiguration().setCanonicalizationMethod(property(camelContext, javax.xml.crypto.AlgorithmMethod.class, value)); return true;
case "clearheaders":
case "clearHeaders": target.getConfiguration().setClearHeaders(property(camelContext, java.lang.Boolean.class, value)); return true;
case "contentobjectid":
case "contentObjectId": target.getConfiguration().setContentObjectId(property(camelContext, java.lang.String.class, value)); return true;
case "contentreferencetype":
case "contentReferenceType": target.getConfiguration().setContentReferenceType(property(camelContext, java.lang.String.class, value)); return true;
case "contentreferenceuri":
case "contentReferenceUri": target.getConfiguration().setContentReferenceUri(property(camelContext, java.lang.String.class, value)); return true;
case "cryptocontextproperties":
case "cryptoContextProperties": target.getConfiguration().setCryptoContextProperties(property(camelContext, java.util.Map.class, value)); return true;
case "digestalgorithm":
case "digestAlgorithm": target.getConfiguration().setDigestAlgorithm(property(camelContext, java.lang.String.class, value)); return true;
case "disallowdoctypedecl":
case "disallowDoctypeDecl": target.getConfiguration().setDisallowDoctypeDecl(property(camelContext, java.lang.Boolean.class, value)); return true;
case "keyaccessor":
case "keyAccessor": target.getConfiguration().setKeyAccessor(property(camelContext, org.apache.camel.component.xmlsecurity.api.KeyAccessor.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "omitxmldeclaration":
case "omitXmlDeclaration": target.getConfiguration().setOmitXmlDeclaration(property(camelContext, java.lang.Boolean.class, value)); return true;
case "outputxmlencoding":
case "outputXmlEncoding": target.getConfiguration().setOutputXmlEncoding(property(camelContext, java.lang.String.class, value)); return true;
case "parentlocalname":
case "parentLocalName": target.getConfiguration().setParentLocalName(property(camelContext, java.lang.String.class, value)); return true;
case "parentnamespace":
case "parentNamespace": target.getConfiguration().setParentNamespace(property(camelContext, java.lang.String.class, value)); return true;
case "parentxpath":
case "parentXpath": target.getConfiguration().setParentXpath(property(camelContext, javax.xml.crypto.dsig.spec.XPathFilterParameterSpec.class, value)); return true;
case "plaintext":
case "plainText": target.getConfiguration().setPlainText(property(camelContext, java.lang.Boolean.class, value)); return true;
case "plaintextencoding":
case "plainTextEncoding": target.getConfiguration().setPlainTextEncoding(property(camelContext, java.lang.String.class, value)); return true;
case "prefixforxmlsignaturenamespace":
case "prefixForXmlSignatureNamespace": target.getConfiguration().setPrefixForXmlSignatureNamespace(property(camelContext, java.lang.String.class, value)); return true;
case "properties": target.getConfiguration().setProperties(property(camelContext, org.apache.camel.component.xmlsecurity.api.XmlSignatureProperties.class, value)); return true;
case "schemaresourceuri":
case "schemaResourceUri": target.getConfiguration().setSchemaResourceUri(property(camelContext, java.lang.String.class, value)); return true;
case "signaturealgorithm":
case "signatureAlgorithm": target.getConfiguration().setSignatureAlgorithm(property(camelContext, java.lang.String.class, value)); return true;
case "signatureid":
case "signatureId": target.getConfiguration().setSignatureId(property(camelContext, java.lang.String.class, value)); return true;
case "transformmethods":
case "transformMethods": target.getConfiguration().setTransformMethods(property(camelContext, java.util.List.class, value)); return true;
case "uridereferencer":
case "uriDereferencer": target.getConfiguration().setUriDereferencer(property(camelContext, javax.xml.crypto.URIDereferencer.class, value)); return true;
case "xpathstoidattributes":
case "xpathsToIdAttributes": target.getConfiguration().setXpathsToIdAttributes(property(camelContext, java.util.List.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "addkeyinforeference":
case "addKeyInfoReference": return java.lang.Boolean.class;
case "baseuri":
case "baseUri": return java.lang.String.class;
case "canonicalizationmethod":
case "canonicalizationMethod": return javax.xml.crypto.AlgorithmMethod.class;
case "clearheaders":
case "clearHeaders": return java.lang.Boolean.class;
case "contentobjectid":
case "contentObjectId": return java.lang.String.class;
case "contentreferencetype":
case "contentReferenceType": return java.lang.String.class;
case "contentreferenceuri":
case "contentReferenceUri": return java.lang.String.class;
case "cryptocontextproperties":
case "cryptoContextProperties": return java.util.Map.class;
case "digestalgorithm":
case "digestAlgorithm": return java.lang.String.class;
case "disallowdoctypedecl":
case "disallowDoctypeDecl": return java.lang.Boolean.class;
case "keyaccessor":
case "keyAccessor": return org.apache.camel.component.xmlsecurity.api.KeyAccessor.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "omitxmldeclaration":
case "omitXmlDeclaration": return java.lang.Boolean.class;
case "outputxmlencoding":
case "outputXmlEncoding": return java.lang.String.class;
case "parentlocalname":
case "parentLocalName": return java.lang.String.class;
case "parentnamespace":
case "parentNamespace": return java.lang.String.class;
case "parentxpath":
case "parentXpath": return javax.xml.crypto.dsig.spec.XPathFilterParameterSpec.class;
case "plaintext":
case "plainText": return java.lang.Boolean.class;
case "plaintextencoding":
case "plainTextEncoding": return java.lang.String.class;
case "prefixforxmlsignaturenamespace":
case "prefixForXmlSignatureNamespace": return java.lang.String.class;
case "properties": return org.apache.camel.component.xmlsecurity.api.XmlSignatureProperties.class;
case "schemaresourceuri":
case "schemaResourceUri": return java.lang.String.class;
case "signaturealgorithm":
case "signatureAlgorithm": return java.lang.String.class;
case "signatureid":
case "signatureId": return java.lang.String.class;
case "transformmethods":
case "transformMethods": return java.util.List.class;
case "uridereferencer":
case "uriDereferencer": return javax.xml.crypto.URIDereferencer.class;
case "xpathstoidattributes":
case "xpathsToIdAttributes": return java.util.List.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
XmlSignerEndpoint target = (XmlSignerEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "addkeyinforeference":
case "addKeyInfoReference": return target.getConfiguration().getAddKeyInfoReference();
case "baseuri":
case "baseUri": return target.getConfiguration().getBaseUri();
case "canonicalizationmethod":
case "canonicalizationMethod": return target.getConfiguration().getCanonicalizationMethod();
case "clearheaders":
case "clearHeaders": return target.getConfiguration().getClearHeaders();
case "contentobjectid":
case "contentObjectId": return target.getConfiguration().getContentObjectId();
case "contentreferencetype":
case "contentReferenceType": return target.getConfiguration().getContentReferenceType();
case "contentreferenceuri":
case "contentReferenceUri": return target.getConfiguration().getContentReferenceUri();
case "cryptocontextproperties":
case "cryptoContextProperties": return target.getConfiguration().getCryptoContextProperties();
case "digestalgorithm":
case "digestAlgorithm": return target.getConfiguration().getDigestAlgorithm();
case "disallowdoctypedecl":
case "disallowDoctypeDecl": return target.getConfiguration().getDisallowDoctypeDecl();
case "keyaccessor":
case "keyAccessor": return target.getConfiguration().getKeyAccessor();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "omitxmldeclaration":
case "omitXmlDeclaration": return target.getConfiguration().getOmitXmlDeclaration();
case "outputxmlencoding":
case "outputXmlEncoding": return target.getConfiguration().getOutputXmlEncoding();
case "parentlocalname":
case "parentLocalName": return target.getConfiguration().getParentLocalName();
case "parentnamespace":
case "parentNamespace": return target.getConfiguration().getParentNamespace();
case "parentxpath":
case "parentXpath": return target.getConfiguration().getParentXpath();
case "plaintext":
case "plainText": return target.getConfiguration().getPlainText();
case "plaintextencoding":
case "plainTextEncoding": return target.getConfiguration().getPlainTextEncoding();
case "prefixforxmlsignaturenamespace":
case "prefixForXmlSignatureNamespace": return target.getConfiguration().getPrefixForXmlSignatureNamespace();
case "properties": return target.getConfiguration().getProperties();
case "schemaresourceuri":
case "schemaResourceUri": return target.getConfiguration().getSchemaResourceUri();
case "signaturealgorithm":
case "signatureAlgorithm": return target.getConfiguration().getSignatureAlgorithm();
case "signatureid":
case "signatureId": return target.getConfiguration().getSignatureId();
case "transformmethods":
case "transformMethods": return target.getConfiguration().getTransformMethods();
case "uridereferencer":
case "uriDereferencer": return target.getConfiguration().getUriDereferencer();
case "xpathstoidattributes":
case "xpathsToIdAttributes": return target.getConfiguration().getXpathsToIdAttributes();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "cryptocontextproperties":
case "cryptoContextProperties": return java.lang.Object.class;
case "transformmethods":
case "transformMethods": return javax.xml.crypto.AlgorithmMethod.class;
case "xpathstoidattributes":
case "xpathsToIdAttributes": return javax.xml.crypto.dsig.spec.XPathFilterParameterSpec.class;
default: return null;
}
}
}
| XmlSignerEndpointConfigurer |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/Item.java | {
"start": 477,
"end": 1113
} | class ____ {
@Id
@GeneratedValue
private Integer id;
private String name;
@ElementCollection
@CollectionTable(name = "item_roles")
@Convert(converter = ItemAttributeConverter.class)
public List<Attribute> roles = new ArrayList<>();
Item() {
}
Item(String name) {
this.name = name;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<Attribute> getRoles() {
return roles;
}
public void setRoles(List<Attribute> roles) {
this.roles = roles;
}
}
| Item |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/stages/LoadManifestsStage.java | {
"start": 10636,
"end": 11403
} | class ____ {
/**
* File where the listing has been saved.
*/
private final File entrySequenceFile;
/**
* Capacity for queue between manifest loader and the writers.
*/
private final int queueCapacity;
/**
* Arguments.
* @param entrySequenceFile path to local file to create for storing entries
* @param queueCapacity capacity of the queue
*/
public Arguments(
final File entrySequenceFile,
final int queueCapacity) {
this.entrySequenceFile = entrySequenceFile;
this.queueCapacity = queueCapacity;
}
private Path getEntrySequenceData() {
return new Path(entrySequenceFile.toURI());
}
}
/**
* Result of the stage.
*/
public static final | Arguments |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/CompileTimeConstantCheckerTest.java | {
"start": 3911,
"end": 4529
} | class ____ {
public static final String S = "Hello";
public void m(String s, @CompileTimeConstant String p) {}
public void r(String x) {
this.m(x, S + " World!");
}
}
""")
.doTest();
}
@Test
public void matches_identCallFailsWithNonConstant() {
compilationHelper
.addSourceLines(
"test/CompileTimeConstantTestCase.java",
"""
package test;
import com.google.errorprone.annotations.CompileTimeConstant;
public | CompileTimeConstantTestCase |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/rank/async/AsyncStateFastTop1Function.java | {
"start": 2927,
"end": 6093
} | class ____ extends AbstractAsyncStateTopNFunction
implements CheckpointedFunction {
private static final long serialVersionUID = 1L;
private final TypeSerializer<RowData> inputRowSer;
private final long cacheSize;
// a value state stores the latest record
private transient ValueState<RowData> dataState;
private transient AsyncStateFastTop1Helper helper;
public AsyncStateFastTop1Function(
StateTtlConfig ttlConfig,
InternalTypeInfo<RowData> inputRowType,
GeneratedRecordComparator generatedSortKeyComparator,
RowDataKeySelector sortKeySelector,
RankType rankType,
RankRange rankRange,
boolean generateUpdateBefore,
boolean outputRankNumber,
long cacheSize) {
super(
ttlConfig,
inputRowType,
generatedSortKeyComparator,
sortKeySelector,
rankType,
rankRange,
generateUpdateBefore,
outputRankNumber);
this.inputRowSer = inputRowType.createSerializer(new SerializerConfigImpl());
this.cacheSize = cacheSize;
}
@Override
public void open(OpenContext openContext) throws Exception {
super.open(openContext);
ValueStateDescriptor<RowData> valueStateDescriptor =
new ValueStateDescriptor<>("Top1-Rank-State", inputRowType);
if (ttlConfig.isEnabled()) {
valueStateDescriptor.enableTimeToLive(ttlConfig);
}
dataState = getRuntimeContext().getState(valueStateDescriptor);
helper = new AsyncStateFastTop1Helper();
helper.registerMetric();
}
@Override
public void processElement(
RowData input,
KeyedProcessFunction<RowData, RowData, RowData>.Context ctx,
Collector<RowData> out)
throws Exception {
helper.accRequestCount();
// load state under current key if necessary
RowData currentKey = (RowData) keyContext.getCurrentKey();
RowData prevRowFromCache = helper.getPrevRowFromCache(currentKey);
StateFuture<RowData> prevRowFuture;
if (prevRowFromCache == null) {
prevRowFuture = dataState.asyncValue();
} else {
helper.accHitCount();
prevRowFuture = StateFutureUtils.completedFuture(prevRowFromCache);
}
prevRowFuture.thenAccept(
prevRow -> {
// first row under current key.
if (prevRow == null) {
helper.processAsFirstRow(input, currentKey, out);
} else {
helper.processWithPrevRow(input, currentKey, prevRow, out);
}
});
}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
helper.flushAllCacheToState();
}
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
// nothing to do
}
private | AsyncStateFastTop1Function |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/createTable/OracleCreateTableTest24.java | {
"start": 1067,
"end": 3178
} | class ____ extends OracleTest {
public void test_types() throws Exception {
String sql = //
"CREATE TABLE locations_demo"
+ " ( location_id NUMBER(4) CONSTRAINT loc_id_pk PRIMARY KEY"
+ " , street_address VARCHAR2(40)"
+ " , postal_code VARCHAR2(12)"
+ " , city VARCHAR2(30)"
+ " , state_province VARCHAR2(25)"
+ " , country_id CHAR(2)"
+ " ) ;";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
assertEquals("CREATE TABLE locations_demo ("
+ "\n\tlocation_id NUMBER(4)"//
+ "\n\t\tCONSTRAINT loc_id_pk PRIMARY KEY,"
+ "\n\tstreet_address VARCHAR2(40),"
+ "\n\tpostal_code VARCHAR2(12),"
+ "\n\tcity VARCHAR2(30),"
+ "\n\tstate_province VARCHAR2(25),"
+ "\n\tcountry_id CHAR(2)"
+ "\n);",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(6, visitor.getColumns().size());
assertTrue(visitor.getColumns().contains(new TableStat.Column("locations_demo", "location_id")));
}
}
| OracleCreateTableTest24 |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueStore.java | {
"start": 1372,
"end": 9634
} | class ____ {
//This map is the single source of truth, this will store ALL queues
//using the queue path as the key
private final Map<String, CSQueue> fullNameQueues = new HashMap<>();
//this map will contain all short names and the paths they can be derived from
//this set is required for remove operation to properly set the short name
//mapping when the ambiguity is resolved.
private final Map<String, Set<String>> shortNameToLongNames = new HashMap<>();
//This map will store the result to the get calls to prevent unnecessary
//checks, this will be updated on queue add / remove
private final Map<String, CSQueue> getMap = new HashMap<>();
//this lock will be used to make sure isAmbiguous can be called parallel
//it will be only blocked during add / remove operations.
private ReadWriteLock modificationLock = new ReentrantReadWriteLock();
/**
* This getter method will return an immutable map with all the queues with
* queue path as the key.
* @return Map containing all queues and having path as key
*/
Map<String, CSQueue> getFullNameQueues() {
return ImmutableMap.copyOf(fullNameQueues);
}
/**
* This getter method will return an immutable map with all queues
* which can be disambiguously referenced by short name, using short name
* as the key.
* @return Map containing queues and having short name as key
*/
@VisibleForTesting
Map<String, CSQueue> getShortNameQueues() {
//this is not the most efficient way to create a short named list
//but this method is only used in tests
try {
modificationLock.readLock().lock();
return ImmutableMap.copyOf(
fullNameQueues
//getting all queues from path->queue map
.entrySet()
.stream()
//filtering the list to contain only disambiguous short names
.filter(
//keeping queues where get(queueShortname) == queue
//these are the ambigous references
entry -> getMap.get(entry.getValue().getQueueShortName())
== entry.getValue())
//making a map from the stream
.collect(
Collectors.toMap(
//using the queue's short name as key
entry->entry.getValue().getQueueShortName(),
//using the queue as value
entry->entry.getValue()))
);
} finally {
modificationLock.readLock().unlock();
}
}
/**
* This method will update the getMap for the short name provided, depending
* on how many queues are present with the same shortname.
* @param shortName The short name of the queue to be updated
*/
private void updateGetMapForShortName(String shortName) {
//we protect the root, since root can be both a full path and a short name
//we simply deny adding root as a shortname to the getMap.
if (shortName.equals(CapacitySchedulerConfiguration.ROOT)) {
return;
}
//getting all queues with the same short name
Set<String> fullNames = this.shortNameToLongNames.get(shortName);
//if there is only one queue we add it to the getMap
if (fullNames != null && fullNames.size() == 1) {
getMap.put(shortName,
fullNameQueues.get(fullNames.iterator().next()));
} else {
//in all other cases using only shortName cannot disambigously identifiy
//a queue
getMap.remove(shortName);
}
}
/**
* Method for adding a queue to the store.
* @param queue Queue to be added
*/
public void add(CSQueue queue) {
String fullName = queue.getQueuePath();
String shortName = queue.getQueueShortName();
try {
modificationLock.writeLock().lock();
fullNameQueues.put(fullName, queue);
getMap.put(fullName, queue);
//we only update short queue name ambiguity for non root queues
if (!shortName.equals(CapacitySchedulerConfiguration.ROOT)) {
//getting or creating the ambiguity set for the current queue
Set<String> fullNamesSet =
this.shortNameToLongNames.getOrDefault(shortName, new HashSet<>());
//adding the full name to the queue
fullNamesSet.add(fullName);
this.shortNameToLongNames.put(shortName, fullNamesSet);
}
//updating the getMap references for the queue
updateGetMapForShortName(shortName);
} finally {
modificationLock.writeLock().unlock();
}
}
/**
* Method for removing a queue from the store.
* @param queue The queue to be removed
*/
public void remove(CSQueue queue) {
//if no queue is specified, we can consider it already removed,
//also consistent with hashmap behaviour
if (queue == null) {
return;
}
try {
modificationLock.writeLock().lock();
String fullName = queue.getQueuePath();
String shortName = queue.getQueueShortName();
fullNameQueues.remove(fullName);
getMap.remove(fullName);
//we only update short queue name ambiguity for non root queues
if (!shortName.equals(CapacitySchedulerConfiguration.ROOT)) {
Set<String> fullNamesSet = this.shortNameToLongNames.get(shortName);
fullNamesSet.remove(fullName);
//if there are no more queues with the current short name, we simply
//remove the set to free up some memory
if (fullNamesSet.size() == 0) {
this.shortNameToLongNames.remove(shortName);
}
}
//updating the getMap references for the queue
updateGetMapForShortName(shortName);
} finally {
modificationLock.writeLock().unlock();
}
}
/**
* Method for removing a queue from the store by name.
* @param name A deterministic name for the queue to be removed
*/
public void remove(String name) {
CSQueue queue = get(name);
if (queue != null) {
remove(queue);
}
}
/**
* Returns a queue by looking it up by its fully qualified name.
* @param fullName The full name/path of the queue
* @return The queue or null if none found
*/
CSQueue getByFullName(String fullName) {
if (fullName == null) {
return null;
}
try {
modificationLock.readLock().lock();
return fullNameQueues.getOrDefault(fullName, null);
} finally {
modificationLock.readLock().unlock();
}
}
/**
* Check for name ambiguity returns true, if there are at least two queues
* with the same short name. Queue named "root" is protected, and it will
* always return the root queue regardless of ambiguity.
* @param shortName The short name to be checked for ambiguity
* @return true if there are at least two queues found false otherwise
*/
boolean isAmbiguous(String shortName) {
if (shortName == null) {
return false;
}
boolean ret = true;
try {
modificationLock.readLock().lock();
Set<String> fullNamesSet = this.shortNameToLongNames.get(shortName);
if (fullNamesSet == null || fullNamesSet.size() <= 1) {
ret = false;
}
} finally {
modificationLock.readLock().unlock();
}
return ret;
}
/**
* Getter method for the queue it can find queues by both full and
* short names.
* @param name Full or short name of the queue
* @return the queue
*/
public CSQueue get(String name) {
if (name == null) {
return null;
}
try {
modificationLock.readLock().lock();
return getMap.getOrDefault(name, null);
} finally {
modificationLock.readLock().unlock();
}
}
/**
* Clears the store, removes all queue references.
*/
public void clear() {
try {
modificationLock.writeLock().lock();
fullNameQueues.clear();
shortNameToLongNames.clear();
getMap.clear();
} finally {
modificationLock.writeLock().unlock();
}
}
/**
* Returns all queues as a list.
* @return List containing all the queues
*/
public Collection<CSQueue> getQueues() {
try {
modificationLock.readLock().lock();
return ImmutableList.copyOf(fullNameQueues.values());
} finally {
modificationLock.readLock().unlock();
}
}
}
| CSQueueStore |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ReservedFieldNamesTests.java | {
"start": 535,
"end": 1099
} | class ____ extends ESTestCase {
public void testIsValidFieldName() {
assertTrue(ReservedFieldNames.isValidFieldName("host"));
assertTrue(ReservedFieldNames.isValidFieldName("host.actual"));
assertFalse(ReservedFieldNames.isValidFieldName("actual.host"));
assertFalse(ReservedFieldNames.isValidFieldName(AnomalyRecord.BUCKET_SPAN.getPreferredName()));
assertFalse(ReservedFieldNames.isValidFieldName(GetResult._INDEX));
assertFalse(ReservedFieldNames.isValidFieldName(GetResult._ID));
}
}
| ReservedFieldNamesTests |
java | netty__netty | varhandle-stub/src/main/java/java/lang/invoke/VarHandle.java | {
"start": 3063,
"end": 3762
} | class ____ {
@MethodHandle.PolymorphicSignature
public native Object get(Object... args);
@MethodHandle.PolymorphicSignature
public native Object getAcquire(Object... args);
@MethodHandle.PolymorphicSignature
public native void set(Object... args);
@MethodHandle.PolymorphicSignature
public native void setRelease(Object... args);
@MethodHandle.PolymorphicSignature
public native Object getAndAdd(Object... args);
@MethodHandle.PolymorphicSignature
public native boolean compareAndSet(Object... args);
public static void storeStoreFence() {
throw new UnsupportedOperationException("Not implemented in varhandle-stub");
}
}
| VarHandle |
java | apache__camel | components/camel-aws/camel-aws2-sts/src/test/java/org/apache/camel/component/aws2/sts/STS2ProducerTest.java | {
"start": 1476,
"end": 4505
} | class ____ extends CamelTestSupport {
@BindToRegistry("amazonStsClient")
AmazonSTSClientMock clientMock = new AmazonSTSClientMock();
@EndpointInject("mock:result")
private MockEndpoint mock;
@Test
public void stsAssumeRoleTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:assumeRole", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(STS2Constants.OPERATION, STS2Operations.assumeRole);
exchange.getIn().setHeader(STS2Constants.ROLE_ARN, "arn");
exchange.getIn().setHeader(STS2Constants.ROLE_SESSION_NAME, "sessionarn");
}
});
MockEndpoint.assertIsSatisfied(context);
AssumeRoleResponse resultGet = (AssumeRoleResponse) exchange.getIn().getBody();
assertEquals("arn", resultGet.assumedRoleUser().arn());
}
@Test
public void stsGetSessionTokenTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:getSessionToken", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(STS2Constants.OPERATION, STS2Operations.getSessionToken);
}
});
MockEndpoint.assertIsSatisfied(context);
GetSessionTokenResponse resultGet = (GetSessionTokenResponse) exchange.getIn().getBody();
assertEquals("xxx", resultGet.credentials().accessKeyId());
}
@Test
public void stsGetFederationTokenTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:getFederationToken", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(STS2Constants.OPERATION, STS2Operations.getFederationToken);
exchange.getIn().setHeader(STS2Constants.FEDERATED_NAME, "federation-account");
}
});
MockEndpoint.assertIsSatisfied(context);
GetFederationTokenResponse resultGet = (GetFederationTokenResponse) exchange.getIn().getBody();
assertEquals("xxx", resultGet.credentials().accessKeyId());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:assumeRole").to("aws2-sts://test?stsClient=#amazonStsClient&operation=assumeRole")
.to("mock:result");
from("direct:getSessionToken").to("aws2-sts://test?stsClient=#amazonStsClient&operation=getSessionToken")
.to("mock:result");
from("direct:getFederationToken").to("aws2-sts://test?stsClient=#amazonStsClient&operation=getFederationToken")
.to("mock:result");
}
};
}
}
| STS2ProducerTest |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/ConfigurationWarningsApplicationContextInitializerTests.java | {
"start": 2338,
"end": 4849
} | class ____ {
private static final String DEFAULT_SCAN_WARNING = "Your ApplicationContext is unlikely to "
+ "start due to a @ComponentScan of the default package.";
private static final String ORGSPRING_SCAN_WARNING = "Your ApplicationContext is unlikely to "
+ "start due to a @ComponentScan of 'org.springframework'.";
@Test
void logWarningInDefaultPackage(CapturedOutput output) {
load(InDefaultPackageConfiguration.class);
assertThat(output).contains(DEFAULT_SCAN_WARNING);
}
@Test
void logWarningInDefaultPackageAndMetaAnnotation(CapturedOutput output) {
load(InDefaultPackageWithMetaAnnotationConfiguration.class);
assertThat(output).contains(DEFAULT_SCAN_WARNING);
}
@Test
void noLogIfInRealPackage(CapturedOutput output) {
load(InRealPackageConfiguration.class);
assertThat(output).doesNotContain(DEFAULT_SCAN_WARNING);
}
@Test
void noLogWithoutComponentScanAnnotation(CapturedOutput output) {
load(InDefaultPackageWithoutScanConfiguration.class);
assertThat(output).doesNotContain(DEFAULT_SCAN_WARNING);
}
@Test
void noLogIfHasValue(CapturedOutput output) {
load(InDefaultPackageWithValueConfiguration.class);
assertThat(output).doesNotContain(DEFAULT_SCAN_WARNING);
}
@Test
void noLogIfHasBasePackages(CapturedOutput output) {
load(InDefaultPackageWithBasePackagesConfiguration.class);
assertThat(output).doesNotContain(DEFAULT_SCAN_WARNING);
}
@Test
void noLogIfHasBasePackageClasses(CapturedOutput output) {
load(InDefaultPackageWithBasePackageClassesConfiguration.class);
assertThat(output).doesNotContain(DEFAULT_SCAN_WARNING);
}
@Test
void logWarningInOrgSpringPackage(CapturedOutput output) {
load(InOrgSpringPackageConfiguration.class);
assertThat(output).contains(ORGSPRING_SCAN_WARNING);
}
@Test
void logWarningIfScanningProblemPackages(CapturedOutput output) {
load(InRealButScanningProblemPackages.class);
assertThat(output).contains("Your ApplicationContext is unlikely to start due to a "
+ "@ComponentScan of the default package, 'org.springframework'.");
}
private void load(Class<?> configClass) {
try (AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext()) {
new TestConfigurationWarningsApplicationContextInitializer().initialize(context);
context.register(configClass);
context.refresh();
}
}
/**
* Testable version of {@link ConfigurationWarningsApplicationContextInitializer}.
*/
static | ConfigurationWarningsApplicationContextInitializerTests |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnMissingBeanWithFilteredClasspathTests.java | {
"start": 1718,
"end": 1970
} | class ____ {
@Bean
@ConditionalOnMissingBean(
type = "org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBeanWithFilteredClasspathTests.TestCacheManager")
String foo() {
return "foo";
}
}
static | OnBeanTypeConfiguration |
java | quarkusio__quarkus | integration-tests/hibernate-orm-panache/src/test/java/io/quarkus/it/panache/defaultpu/TransactionalRepositoryTest.java | {
"start": 213,
"end": 598
} | class ____ {
@Inject
BeerRepository beerRepository;
@Test
public void testTransactionalRepository() {
// Make sure there are no beers stored
beerRepository.deleteAll();
Beer b = new Beer();
b.name = "IPA";
beerRepository.persist(b);
Assertions.assertEquals(1, beerRepository.count());
}
}
| TransactionalRepositoryTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusWithUpgradeDomain.java | {
"start": 1328,
"end": 5361
} | class ____ {
private Set<String> upgradeDomains;
private BlockPlacementStatusDefault bpsd =
mock(BlockPlacementStatusDefault.class);
@BeforeEach
public void setup() {
upgradeDomains = new HashSet<String>();
upgradeDomains.add("1");
upgradeDomains.add("2");
upgradeDomains.add("3");
when(bpsd.isPlacementPolicySatisfied()).thenReturn(true);
}
@Test
public void testIsPolicySatisfiedParentFalse() {
when(bpsd.isPlacementPolicySatisfied()).thenReturn(false);
when(bpsd.getAdditionalReplicasRequired()).thenReturn(1);
BlockPlacementStatusWithUpgradeDomain bps =
new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 3, 3);
// Parent policy is not satisfied but upgrade domain policy is
assertFalse(bps.isPlacementPolicySatisfied());
assertEquals(1, bps.getAdditionalReplicasRequired());
}
@Test
public void testIsPolicySatisfiedAllEqual() {
BlockPlacementStatusWithUpgradeDomain bps =
new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 3, 3);
// Number of domains, replicas and upgradeDomainFactor is equal and parent
// policy is satisfied
assertTrue(bps.isPlacementPolicySatisfied());
assertEquals(0, bps.getAdditionalReplicasRequired());
}
@Test
public void testIsPolicySatisfiedSmallDomains() {
// Number of domains is less than replicas but equal to factor
BlockPlacementStatusWithUpgradeDomain bps =
new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 4, 3);
assertTrue(bps.isPlacementPolicySatisfied());
assertEquals(0, bps.getAdditionalReplicasRequired());
// Same as above but replicas is greater than factor
bps = new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 4, 2);
assertTrue(bps.isPlacementPolicySatisfied());
assertEquals(0, bps.getAdditionalReplicasRequired());
// Number of domains is less than replicas and factor
bps = new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 4, 4);
assertFalse(bps.isPlacementPolicySatisfied());
assertEquals(1, bps.getAdditionalReplicasRequired());
}
@Test
public void testIsPolicySatisfiedSmallReplicas() {
// Replication factor 1 file
upgradeDomains.clear();
upgradeDomains.add("1");
BlockPlacementStatusWithUpgradeDomain bps =
new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 1, 3);
assertTrue(bps.isPlacementPolicySatisfied());
assertEquals(0, bps.getAdditionalReplicasRequired());
// Replication factor 2 file, but one domain
bps =
new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 2, 3);
assertFalse(bps.isPlacementPolicySatisfied());
assertEquals(1, bps.getAdditionalReplicasRequired());
// Replication factor 2 file, but two domains
upgradeDomains.add("2");
bps =
new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 2, 3);
assertTrue(bps.isPlacementPolicySatisfied());
assertEquals(0, bps.getAdditionalReplicasRequired());
}
@Test
public void testPolicyIsNotSatisfiedInsufficientDomains() {
// Insufficient Domains - 1 domain, replication factor 3
upgradeDomains.clear();
upgradeDomains.add("1");
BlockPlacementStatusWithUpgradeDomain bps =
new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 3, 3);
assertFalse(bps.isPlacementPolicySatisfied());
assertEquals(2, bps.getAdditionalReplicasRequired());
// One domain, replication factor 2 file
bps =
new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 2, 3);
assertFalse(bps.isPlacementPolicySatisfied());
assertEquals(1, bps.getAdditionalReplicasRequired());
// 2 domains, replication factor 3
upgradeDomains.add("2");
bps =
new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 3, 3);
assertFalse(bps.isPlacementPolicySatisfied());
assertEquals(1, bps.getAdditionalReplicasRequired());
}
}
| TestBlockPlacementStatusWithUpgradeDomain |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/writing/DirectInstanceBindingRepresentation.java | {
"start": 1457,
"end": 5935
} | class ____ {
private final ContributionBinding binding;
private final BindingGraph graph;
private final ComponentImplementation componentImplementation;
private final ComponentMethodRequestRepresentation.Factory
componentMethodRequestRepresentationFactory;
private final ImmediateFutureRequestRepresentation.Factory
immediateFutureRequestRepresentationFactory;
private final PrivateMethodRequestRepresentation.Factory
privateMethodRequestRepresentationFactory;
private final UnscopedDirectInstanceRequestRepresentationFactory
unscopedDirectInstanceRequestRepresentationFactory;
private final Map<BindingRequest, RequestRepresentation> requestRepresentations = new HashMap<>();
@AssistedInject
DirectInstanceBindingRepresentation(
@Assisted ContributionBinding binding,
BindingGraph graph,
ComponentImplementation componentImplementation,
ComponentMethodRequestRepresentation.Factory componentMethodRequestRepresentationFactory,
ImmediateFutureRequestRepresentation.Factory immediateFutureRequestRepresentationFactory,
PrivateMethodRequestRepresentation.Factory privateMethodRequestRepresentationFactory,
UnscopedDirectInstanceRequestRepresentationFactory
unscopedDirectInstanceRequestRepresentationFactory) {
this.binding = binding;
this.graph = graph;
this.componentImplementation = componentImplementation;
this.componentMethodRequestRepresentationFactory = componentMethodRequestRepresentationFactory;
this.immediateFutureRequestRepresentationFactory = immediateFutureRequestRepresentationFactory;
this.privateMethodRequestRepresentationFactory = privateMethodRequestRepresentationFactory;
this.unscopedDirectInstanceRequestRepresentationFactory =
unscopedDirectInstanceRequestRepresentationFactory;
}
public RequestRepresentation getRequestRepresentation(BindingRequest request) {
return reentrantComputeIfAbsent(
requestRepresentations, request, this::getRequestRepresentationUncached);
}
private RequestRepresentation getRequestRepresentationUncached(BindingRequest request) {
switch (request.requestKind()) {
case INSTANCE:
return requiresMethodEncapsulation(binding)
? wrapInMethod(unscopedDirectInstanceRequestRepresentationFactory.create(binding))
: unscopedDirectInstanceRequestRepresentationFactory.create(binding);
case FUTURE:
return immediateFutureRequestRepresentationFactory.create(
getRequestRepresentation(bindingRequest(binding.key(), RequestKind.INSTANCE)),
binding.key().type().xprocessing());
default:
throw new AssertionError(
String.format("Invalid binding request kind: %s", request.requestKind()));
}
}
/**
* Returns a binding expression that uses a given one as the body of a method that users call. If
* a component provision method matches it, it will be the method implemented. If it does not
* match a component provision method and the binding is modifiable, then a new public modifiable
* binding method will be written. If the binding doesn't match a component method and is not
* modifiable, then a new private method will be written.
*/
RequestRepresentation wrapInMethod(RequestRepresentation bindingExpression) {
// If we've already wrapped the expression, then use the delegate.
if (bindingExpression instanceof MethodRequestRepresentation) {
return bindingExpression;
}
BindingRequest request = bindingRequest(binding.key(), RequestKind.INSTANCE);
Optional<ComponentMethodDescriptor> matchingComponentMethod =
graph.findFirstMatchingComponentMethod(request);
ShardImplementation shardImplementation = componentImplementation.shardImplementation(binding);
// Consider the case of a request from a component method like:
//
// DaggerMyComponent extends MyComponent {
// @Overrides
// Foo getFoo() {
// <FOO_BINDING_REQUEST>
// }
// }
//
// Normally, in this case we would return a ComponentMethodRequestRepresentation rather than a
// PrivateMethodRequestRepresentation so that #getFoo() can inline the implementation rather
// than
// create an unnecessary private method and return that. However, with sharding we don't want to
// inline the implementation because that would defeat some of the | DirectInstanceBindingRepresentation |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/KeyBug_for_zhongl.java | {
"start": 187,
"end": 819
} | class ____ extends TestCase {
public void testCustomedKey() throws Exception {
Assert.assertEquals("{\"uid\":1}", JSON.toJSONString(new V2(1)));
}
public void testDeserialize() throws Exception {
Assert.assertEquals(123, JSON.parseObject("{\"uid\":123}", V2.class).id);
}
public void testCustomedKey_static() throws Exception {
Assert.assertEquals("{\"uid\":1}", JSON.toJSONString(new VO(1)));
}
public void testDeserialize_static() throws Exception {
Assert.assertEquals(123, JSON.parseObject("{\"uid\":123}", VO.class).id);
}
public static | KeyBug_for_zhongl |
java | google__dagger | javatests/dagger/hilt/android/testsubpackage/PackagePrivateConstructorTestClasses.java | {
"start": 2072,
"end": 2215
} | class ____ extends BroadcastReceiver {
public BaseBroadcastReceiver() {}
BaseBroadcastReceiver(int unused) {}
}
}
| BaseBroadcastReceiver |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/probe/TestDefaultProbe.java | {
"start": 1634,
"end": 6516
} | class ____ {
private DefaultProbe probe;
public void initTestDefaultProbe(Probe paramProbe) {
this.probe = (DefaultProbe) paramProbe;
}
public static Collection<Object[]> data() {
// test run 1: Default probe checks that container has an IP
Probe p1 = MonitorUtils.getProbe(null);
// test run 2: Default probe with DNS check for component instance hostname
ReadinessCheck rc2 = new ReadinessCheck()
.type(ReadinessCheck.TypeEnum.DEFAULT)
.properties(Collections.singletonMap(
MonitorKeys.DEFAULT_PROBE_DNS_CHECK_ENABLED, "true"));
Probe p2 = MonitorUtils.getProbe(rc2);
// test run 3: Default probe with DNS check using specific DNS server
Map<String, String> props = new HashMap<>();
props.put(MonitorKeys.DEFAULT_PROBE_DNS_CHECK_ENABLED, "true");
props.put(MonitorKeys.DEFAULT_PROBE_DNS_ADDRESS, "8.8.8.8");
ReadinessCheck rc3 = new ReadinessCheck()
.type(ReadinessCheck.TypeEnum.DEFAULT).properties(props);
Probe p3 = MonitorUtils.getProbe(rc3);
return Arrays.asList(new Object[][] {{p1}, {p2}, {p3}});
}
@MethodSource("data")
@ParameterizedTest
public void testDefaultProbe(Probe paramProbe) {
initTestDefaultProbe(paramProbe);
// component instance has a good hostname, so probe will eventually succeed
// whether or not DNS checking is enabled
ComponentInstance componentInstance =
createMockComponentInstance("example.com");
checkPingResults(probe, componentInstance, false);
// component instance has a bad hostname, so probe will fail when DNS
// checking is enabled
componentInstance = createMockComponentInstance("bad.dns.test");
checkPingResults(probe, componentInstance, probe.isDnsCheckEnabled());
}
private static void checkPingResults(Probe probe, ComponentInstance
componentInstance, boolean expectDNSCheckFailure) {
// on the first ping, null container status results in failure
ProbeStatus probeStatus = probe.ping(componentInstance);
assertFalse(probeStatus.isSuccess(),
"Expected failure for " + probeStatus.toString());
assertTrue(probeStatus.toString().contains(
componentInstance.getCompInstanceName() + ": IP is not available yet"),
"Expected IP failure for " + probeStatus.toString());
// on the second ping, container status is retrieved but there are no
// IPs, resulting in failure
probeStatus = probe.ping(componentInstance);
assertFalse(probeStatus.isSuccess(),
"Expected failure for " + probeStatus.toString());
assertTrue(probeStatus.toString().contains(componentInstance
.getCompInstanceName() + ": IP is not available yet"),
"Expected IP failure for " + probeStatus.toString());
// on the third ping, IPs are retrieved and success depends on whether or
// not a DNS lookup can be performed for the component instance hostname
probeStatus = probe.ping(componentInstance);
if (expectDNSCheckFailure) {
assertFalse(probeStatus.isSuccess(),
"Expected failure for " + probeStatus.toString());
assertTrue(probeStatus.toString().contains(componentInstance
.getCompInstanceName() + ": DNS checking is enabled, but lookup" +
" for " + componentInstance.getHostname() + " is not available " +
"yet"), "Expected DNS failure for " + probeStatus.toString());
} else {
assertTrue(probeStatus.isSuccess(),
"Expected success for " + probeStatus.toString());
}
}
private static ComponentInstance createMockComponentInstance(String
hostname) {
ComponentInstance componentInstance = mock(ComponentInstance.class);
when(componentInstance.getHostname()).thenReturn(hostname);
when(componentInstance.getCompInstanceName()).thenReturn("comp-0");
when(componentInstance.getContainerStatus())
.thenAnswer(new Answer<ContainerStatus>() {
private int count = 0;
@Override
public ContainerStatus answer(InvocationOnMock invocationOnMock) {
count++;
if (count == 1) {
// first call to getContainerStatus returns null
return null;
} else if (count == 2) {
// second call returns a ContainerStatus with no IPs
ContainerStatus containerStatus = mock(ContainerStatus.class);
when(containerStatus.getIPs()).thenReturn(null);
return containerStatus;
} else {
// third call returns a ContainerStatus with one IP
ContainerStatus containerStatus = mock(ContainerStatus.class);
when(containerStatus.getIPs())
.thenReturn(Collections.singletonList("1.2.3.4"));
return containerStatus;
}
}
});
return componentInstance;
}
}
| TestDefaultProbe |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java | {
"start": 54246,
"end": 58923
} | class ____ implements FutureCallback<HttpResponse> {
@Override
public void completed(HttpResponse result) {
assertThat(result.getStatusLine().getStatusCode(), lessThan(300));
}
@Override
public void failed(Exception ex) {
logger.error(ex);
fail(ex.toString());
}
@Override
public void cancelled() {
fail("The request was cancelled for some reason");
}
}
public void testLoadProfilesFromExtensions() {
final Map<SslProfileExtension, Map<String, SslProfile>> appliedProfiles = new HashMap<>();
final Map<String, Tuple<SslClientAuthenticationMode, SslVerificationMode>> allExtensionsPrefixes = new HashMap<>();
final Settings.Builder settings = Settings.builder().put(env.settings()).put("xpack.http.ssl.verification_mode", "certificate");
final List<SslProfileExtension> extensions = randomList(1, 3, () -> {
final Set<String> prefixes = randomSet(
1,
3,
() -> randomValueOtherThanMany(
allExtensionsPrefixes::containsKey,
() -> randomAlphaOfLengthBetween(3, 8) + "." + randomAlphaOfLengthBetween(5, 10) + ".ssl"
)
);
for (var prefix : prefixes) {
final SslClientAuthenticationMode clientAuthMode = randomFrom(SslClientAuthenticationMode.values());
final SslVerificationMode verificationMode = randomFrom(SslVerificationMode.values());
settings.put(prefix + ".client_authentication", clientAuthMode.name().toLowerCase(Locale.ROOT));
settings.put(prefix + ".verification_mode", verificationMode.name().toLowerCase(Locale.ROOT));
allExtensionsPrefixes.put(prefix, new Tuple<>(clientAuthMode, verificationMode));
}
return new SslProfileExtension() {
@Override
public Set<String> getSettingPrefixes() {
return prefixes;
}
@Override
public void applyProfile(String name, SslProfile profile) {
appliedProfiles.computeIfAbsent(this, ignore -> new HashMap<>()).put(name, profile);
}
};
});
env = newEnvironment(settings.build());
final SSLService.LoadedSslConfigurations loadedConfiguration = SSLService.getSSLConfigurations(env, extensions);
assertThat(loadedConfiguration.extensions().keySet(), equalTo(allExtensionsPrefixes.keySet()));
for (var ext : extensions) {
for (var ctx : ext.getSettingPrefixes()) {
assertThat(loadedConfiguration.extensions(), hasEntry(ctx, ext));
final SslConfiguration cfg = loadedConfiguration.configuration(ctx);
assertThat(cfg, notNullValue());
assertThat(cfg.clientAuth(), equalTo(allExtensionsPrefixes.get(ctx).v1()));
assertThat(cfg.verificationMode(), equalTo(allExtensionsPrefixes.get(ctx).v2()));
}
}
assertThat(appliedProfiles, aMapWithSize(0));
final SSLService service = new SSLService(env, loadedConfiguration);
assertThat(appliedProfiles.keySet(), equalTo(Set.copyOf(extensions)));
for (var ext : extensions) {
for (var ctx : ext.getSettingPrefixes()) {
var profile = appliedProfiles.get(ext).get(ctx);
assertThat(profile, notNullValue());
assertThat(profile.configuration(), equalTo(loadedConfiguration.configuration(ctx)));
assertThat(service.profile(ctx), sameInstance(profile));
}
}
}
private CloseableHttpAsyncClient getAsyncHttpClient(SSLIOSessionStrategy sslStrategy) throws Exception {
try {
return AccessController.doPrivileged(
(PrivilegedExceptionAction<CloseableHttpAsyncClient>) () -> HttpAsyncClientBuilder.create()
.setSSLStrategy(sslStrategy)
.build()
);
} catch (PrivilegedActionException e) {
throw (Exception) e.getCause();
}
}
private static void privilegedConnect(CheckedRunnable<Exception> runnable) throws Exception {
try {
AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> {
runnable.run();
return null;
});
} catch (PrivilegedActionException e) {
throw (Exception) e.getCause();
}
}
private static final | AssertionCallback |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 561873,
"end": 564288
} | class ____ extends YamlDeserializerBase<LangChain4jSentenceTokenizerDefinition> {
public LangChain4jSentenceTokenizerDefinitionDeserializer() {
super(LangChain4jSentenceTokenizerDefinition.class);
}
@Override
protected LangChain4jSentenceTokenizerDefinition newInstance() {
return new LangChain4jSentenceTokenizerDefinition();
}
@Override
protected boolean setProperty(LangChain4jSentenceTokenizerDefinition target,
String propertyKey, String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "id": {
String val = asText(node);
target.setId(val);
break;
}
case "maxOverlap": {
String val = asText(node);
target.setMaxOverlap(val);
break;
}
case "maxTokens": {
String val = asText(node);
target.setMaxTokens(val);
break;
}
case "modelName": {
String val = asText(node);
target.setModelName(val);
break;
}
case "tokenizerType": {
String val = asText(node);
target.setTokenizerType(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
types = org.apache.camel.model.tokenizer.LangChain4jTokenizerDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
properties = {
@YamlProperty(name = "id", type = "string"),
@YamlProperty(name = "maxOverlap", type = "number", required = true),
@YamlProperty(name = "maxTokens", type = "number", required = true),
@YamlProperty(name = "modelName", type = "string"),
@YamlProperty(name = "tokenizerType", type = "enum:OPEN_AI,AZURE,QWEN")
}
)
public static | LangChain4jSentenceTokenizerDefinitionDeserializer |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/client/AmazonBedrockRequestSenderTests.java | {
"start": 2435,
"end": 7855
} | class ____ extends ESTestCase {
private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS);
private ThreadPool threadPool;
private final AtomicReference<Thread> threadRef = new AtomicReference<>();
@Before
public void init() throws Exception {
threadPool = createThreadPool(inferenceUtilityExecutors());
threadRef.set(null);
}
@After
public void shutdown() throws IOException, InterruptedException {
if (threadRef.get() != null) {
threadRef.get().join(TIMEOUT.millis());
}
terminate(threadPool);
}
public void testCreateSender_UsesTheSameInstanceForRequestExecutor() throws Exception {
var requestSender = new AmazonBedrockMockExecuteRequestSender(new AmazonBedrockMockClientCache(), mock(ThrottlerManager.class));
requestSender.enqueue(AmazonBedrockExecutorTests.getTestInvokeResult(TEST_AMAZON_TITAN_EMBEDDINGS_RESULT));
var senderFactory = createSenderFactory(threadPool, Settings.EMPTY, requestSender);
var sender1 = createSender(senderFactory);
var sender2 = createSender(senderFactory);
assertThat(sender1, instanceOf(AmazonBedrockRequestSender.class));
assertThat(sender2, instanceOf(AmazonBedrockRequestSender.class));
assertThat(sender1, sameInstance(sender2));
}
public void testCreateSender_CanCallStartMultipleTimes() throws Exception {
var requestSender = new AmazonBedrockMockExecuteRequestSender(new AmazonBedrockMockClientCache(), mock(ThrottlerManager.class));
requestSender.enqueue(AmazonBedrockExecutorTests.getTestInvokeResult(TEST_AMAZON_TITAN_EMBEDDINGS_RESULT));
var senderFactory = createSenderFactory(threadPool, Settings.EMPTY, requestSender);
try (var sender = createSender(senderFactory)) {
sender.startSynchronously();
sender.startSynchronously();
sender.startSynchronously();
}
}
public void testCreateSender_SendsEmbeddingsRequestAndReceivesResponse() throws Exception {
var requestSender = new AmazonBedrockMockExecuteRequestSender(new AmazonBedrockMockClientCache(), mock(ThrottlerManager.class));
requestSender.enqueue(AmazonBedrockExecutorTests.getTestInvokeResult(TEST_AMAZON_TITAN_EMBEDDINGS_RESULT));
var senderFactory = createSenderFactory(threadPool, Settings.EMPTY, requestSender);
try (var sender = createSender(senderFactory)) {
sender.startSynchronously();
var model = AmazonBedrockEmbeddingsModelTests.createModel(
"test_id",
"test_region",
"test_model",
AmazonBedrockProvider.AMAZONTITAN,
"accesskey",
"secretkey"
);
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
var serviceComponents = ServiceComponentsTests.createWithEmptySettings(threadPool);
var requestManager = new AmazonBedrockEmbeddingsRequestManager(
model,
serviceComponents.truncator(),
threadPool,
new TimeValue(30, TimeUnit.SECONDS)
);
sender.send(requestManager, new EmbeddingsInput(List.of("abc"), null), null, listener);
var result = listener.actionGet(TIMEOUT);
assertThat(result.asMap(), is(buildExpectationFloat(List.of(new float[] { 0.123F, 0.456F, 0.678F, 0.789F }))));
}
}
public void testCreateSender_SendsCompletionRequestAndReceivesResponse() throws Exception {
var requestSender = new AmazonBedrockMockExecuteRequestSender(new AmazonBedrockMockClientCache(), mock(ThrottlerManager.class));
requestSender.enqueue(AmazonBedrockExecutorTests.getTestConverseResult("test response text"));
var senderFactory = createSenderFactory(threadPool, Settings.EMPTY, requestSender);
try (var sender = createSender(senderFactory)) {
sender.startSynchronously();
var model = AmazonBedrockChatCompletionModelTests.createModel(
"test_id",
"test_region",
"test_model",
AmazonBedrockProvider.AMAZONTITAN,
"accesskey",
"secretkey"
);
PlainActionFuture<InferenceServiceResults> listener = new PlainActionFuture<>();
var requestManager = new AmazonBedrockChatCompletionRequestManager(model, threadPool, new TimeValue(30, TimeUnit.SECONDS));
sender.send(requestManager, new ChatCompletionInput(List.of("abc")), null, listener);
var result = listener.actionGet(TIMEOUT);
assertThat(result.asMap(), is(buildExpectationCompletion(List.of("test response text"))));
}
}
public static AmazonBedrockRequestSender.Factory createSenderFactory(
ThreadPool threadPool,
Settings settings,
AmazonBedrockMockExecuteRequestSender requestSender
) {
return new AmazonBedrockRequestSender.Factory(
ServiceComponentsTests.createWithSettings(threadPool, settings),
mockClusterServiceEmpty(),
requestSender
);
}
public static Sender createSender(AmazonBedrockRequestSender.Factory factory) {
return factory.createSender();
}
}
| AmazonBedrockRequestSenderTests |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/annotation/EndpointExtension.java | {
"start": 2369,
"end": 2443
} | class ____ to extend
*/
Class<?> endpoint() default Void.class;
}
| endpoint |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/protocolPB/TestFSSerialization.java | {
"start": 1277,
"end": 3528
} | class ____ {
@Test
@SuppressWarnings("deprecation")
public void testWritableFlagSerialization() throws Exception {
final Path p = new Path("hdfs://yaks:4344/dingos/f");
for (int i = 0; i < 0x8; ++i) {
final boolean acl = 0 != (i & 0x1);
final boolean crypt = 0 != (i & 0x2);
final boolean ec = 0 != (i & 0x4);
FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31,
12345678L, 87654321L, FsPermission.getFileDefault(),
"hadoop", "unqbbc", null, p, acl, crypt, ec);
DataOutputBuffer dob = new DataOutputBuffer();
stat.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), 0, dob.getLength());
FileStatus fstat = new FileStatus();
fstat.readFields(dib);
assertEquals(stat, fstat);
checkFields(stat, fstat);
}
}
@Test
public void testUtilitySerialization() throws Exception {
final Path p = new Path("hdfs://yaks:4344/dingos/f");
FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31,
12345678L, 87654321L, FsPermission.createImmutable((short)0111),
"hadoop", "unqbbc", null, p);
FileStatusProto fsp = PBHelper.convert(stat);
FileStatus stat2 = PBHelper.convert(fsp);
assertEquals(stat, stat2);
checkFields(stat, stat2);
}
private static void checkFields(FileStatus expected, FileStatus actual) {
assertEquals(expected.getPath(), actual.getPath());
assertEquals(expected.isDirectory(), actual.isDirectory());
assertEquals(expected.getLen(), actual.getLen());
assertEquals(expected.getPermission(), actual.getPermission());
assertEquals(expected.getOwner(), actual.getOwner());
assertEquals(expected.getGroup(), actual.getGroup());
assertEquals(expected.getModificationTime(), actual.getModificationTime());
assertEquals(expected.getAccessTime(), actual.getAccessTime());
assertEquals(expected.getReplication(), actual.getReplication());
assertEquals(expected.getBlockSize(), actual.getBlockSize());
assertEquals(expected.hasAcl(), actual.hasAcl());
assertEquals(expected.isEncrypted(), actual.isEncrypted());
assertEquals(expected.isErasureCoded(), actual.isErasureCoded());
}
}
| TestFSSerialization |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/AnnotatedBeanDefinitionReader.java | {
"start": 6342,
"end": 6468
} | class ____ the bean
* @param qualifiers specific qualifier annotations to consider,
* in addition to qualifiers at the bean | of |
java | spring-projects__spring-boot | module/spring-boot-webflux/src/test/java/org/springframework/boot/webflux/actuate/web/exchanges/RecordableServerHttpRequestTests.java | {
"start": 1350,
"end": 3278
} | class ____ {
private ServerWebExchange exchange;
private ServerHttpRequest request;
@BeforeEach
void setUp() {
this.exchange = mock(ServerWebExchange.class);
this.request = mock(ServerHttpRequest.class);
given(this.exchange.getRequest()).willReturn(this.request);
given(this.request.getMethod()).willReturn(HttpMethod.GET);
}
@Test
void getMethod() {
RecordableServerHttpRequest sourceRequest = new RecordableServerHttpRequest(this.request);
assertThat(sourceRequest.getMethod()).isEqualTo("GET");
}
@Test
void getUri() {
URI uri = URI.create("http://localhost:8080/");
given(this.request.getURI()).willReturn(uri);
RecordableServerHttpRequest sourceRequest = new RecordableServerHttpRequest(this.request);
assertThat(sourceRequest.getUri()).isSameAs(uri);
}
@Test
void getHeaders() {
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add("name", "value");
given(this.request.getHeaders()).willReturn(httpHeaders);
RecordableServerHttpRequest sourceRequest = new RecordableServerHttpRequest(this.request);
assertThat(sourceRequest.getHeaders()).containsOnly(entry("name", Collections.singletonList("value")));
}
@Test
void getUnresolvedRemoteAddress() {
InetSocketAddress socketAddress = InetSocketAddress.createUnresolved("unresolved.example.com", 8080);
given(this.request.getRemoteAddress()).willReturn(socketAddress);
RecordableServerHttpRequest sourceRequest = new RecordableServerHttpRequest(this.request);
assertThat(sourceRequest.getRemoteAddress()).isNull();
}
@Test
void getRemoteAddress() {
InetSocketAddress socketAddress = new InetSocketAddress(0);
given(this.request.getRemoteAddress()).willReturn(socketAddress);
RecordableServerHttpRequest sourceRequest = new RecordableServerHttpRequest(this.request);
assertThat(sourceRequest.getRemoteAddress()).isEqualTo(socketAddress.getAddress().toString());
}
}
| RecordableServerHttpRequestTests |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java | {
"start": 1106,
"end": 1287
} | class ____
* client (4xx) errors over server (5xx) errors, shard-unavailable errors, and cancellation errors,
* as they are more useful for diagnosing issues.
*/
public final | prefers |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/test/subscriber/AssertSubscriber.java | {
"start": 12278,
"end": 30797
} | class ____: expected = \"" + message +
"\", actual = \"" + errors.get(0).getMessage() + "\"", null);
}
}
if (s > 1) {
assertionError("Multiple errors: " + s, null);
}
return this;
}
/**
* Assert an error signal has been received.
* @param expectation A method that can verify the exception contained in the error signal
* and throw an exception (like an {@link AssertionError}) if the exception is not valid.
* @return this
*/
public final AssertSubscriber<T> assertErrorWith(Consumer<? super Throwable> expectation) {
assertNotComplete();
int s = errors.size();
if (s == 0) {
throw new AssertionError("No error", null);
}
if (s == 1) {
expectation.accept(errors.get(0));
}
if (s > 1) {
throw new AssertionError("Multiple errors: " + s, null);
}
return this;
}
/**
* Assert that the upstream was a Fuseable source.
*
* @return this
*/
public final AssertSubscriber<T> assertFuseableSource() {
if (qs == null) {
throw new AssertionError("Upstream was not Fuseable");
}
return this;
}
/**
* Assert that the fusion mode was granted.
*
* @return this
*/
public final AssertSubscriber<T> assertFusionEnabled() {
if (establishedFusionMode != Fuseable.SYNC && establishedFusionMode != Fuseable.ASYNC) {
throw new AssertionError("Fusion was not enabled");
}
return this;
}
public final AssertSubscriber<T> assertFusionMode(int expectedMode) {
if (establishedFusionMode != expectedMode) {
throw new AssertionError("Wrong fusion mode: expected: " +
Fuseable.fusionModeName(expectedMode) +
", actual: " + Fuseable.fusionModeName(establishedFusionMode));
}
return this;
}
/**
* Assert that the fusion mode was granted.
*
* @return this
*/
public final AssertSubscriber<T> assertFusionRejected() {
if (establishedFusionMode != Fuseable.NONE) {
throw new AssertionError("Fusion was granted");
}
return this;
}
/**
* Assert no error signal has been received.
* @return this
*/
public final AssertSubscriber<T> assertNoError() {
int s = errors.size();
if (s == 1) {
Throwable e = errors.get(0);
String valueAndClass = e == null ? null : e + " (" + e.getClass().getSimpleName() + ")";
throw new AssertionError("Error present: " + valueAndClass, null);
}
if (s > 1) {
throw new AssertionError("Multiple errors: " + s, null);
}
return this;
}
/**
* Assert no values have been received.
*
* @return this
*/
public final AssertSubscriber<T> assertNoValues() {
if (valueCount != 0) {
throw new AssertionError("No values expected but received: [length = " + values.size() + "] " + values,
null);
}
return this;
}
/**
* Assert that the upstream was not a Fuseable source.
* @return this
*/
public final AssertSubscriber<T> assertNonFuseableSource() {
if (qs != null) {
throw new AssertionError("Upstream was Fuseable");
}
return this;
}
/**
* Assert no complete successfully signal has been received.
* @return this
*/
public final AssertSubscriber<T> assertNotComplete() {
int c = completionCount;
if (c == 1) {
throw new AssertionError("Completed", null);
}
if (c > 1) {
throw new AssertionError("Multiple completions: " + c, null);
}
return this;
}
/**
* Assert no subscription occurred.
*
* @return this
*/
public final AssertSubscriber<T> assertNotSubscribed() {
int s = subscriptionCount;
if (s == 1) {
throw new AssertionError("OnSubscribe called once", null);
}
if (s > 1) {
throw new AssertionError("OnSubscribe called multiple times: " + s, null);
}
return this;
}
/**
* Assert no complete successfully or error signal has been received.
* @return this
*/
public final AssertSubscriber<T> assertNotTerminated() {
if (cdl.getCount() == 0) {
throw new AssertionError("Terminated", null);
}
return this;
}
/**
* Assert subscription occurred (once).
* @return this
*/
public final AssertSubscriber<T> assertSubscribed() {
int s = subscriptionCount;
if (s == 0) {
throw new AssertionError("OnSubscribe not called", null);
}
if (s > 1) {
throw new AssertionError("OnSubscribe called multiple times: " + s, null);
}
return this;
}
/**
* Assert either complete successfully or error signal has been received.
* @return this
*/
public final AssertSubscriber<T> assertTerminated() {
if (cdl.getCount() != 0) {
throw new AssertionError("Not terminated", null);
}
return this;
}
/**
* Assert {@code n} values has been received.
*
* @param n the expected value count
*
* @return this
*/
public final AssertSubscriber<T> assertValueCount(long n) {
if (valueCount != n) {
throw new AssertionError("Different value count: expected = " + n + ", actual = " + valueCount,
null);
}
return this;
}
/**
* Assert the specified values have been received in the same order read by the
* passed {@link Iterable}. Values storage
* should be enabled to
* use this method.
* @param expectedSequence the values to assert
* @see #configureValuesStorage(boolean)
* @return this
*/
public final AssertSubscriber<T> assertValueSequence(Iterable<? extends T> expectedSequence) {
if (!valuesStorage) {
throw new IllegalStateException("Using assertNoValues() requires enabling values storage");
}
Iterator<T> actual = values.iterator();
Iterator<? extends T> expected = expectedSequence.iterator();
int i = 0;
for (; ; ) {
boolean n1 = actual.hasNext();
boolean n2 = expected.hasNext();
if (n1 && n2) {
T t1 = actual.next();
T t2 = expected.next();
if (!Objects.equals(t1, t2)) {
throw new AssertionError("The element with index " + i + " does not match: expected = " + valueAndClass(t2) + ", actual = "
+ valueAndClass(
t1), null);
}
i++;
} else if (n1 && !n2) {
throw new AssertionError("Actual contains more elements" + values, null);
} else if (!n1 && n2) {
throw new AssertionError("Actual contains fewer elements: " + values, null);
} else {
break;
}
}
return this;
}
/**
* Assert the specified values have been received in the declared order. Values
* storage should be enabled to use this method.
*
* @param expectedValues the values to assert
*
* @return this
*
* @see #configureValuesStorage(boolean)
*/
@SafeVarargs
public final AssertSubscriber<T> assertValues(T... expectedValues) {
return assertValueSequence(Arrays.asList(expectedValues));
}
/**
* Assert the specified values have been received in the declared order. Values
* storage should be enabled to use this method.
*
* @param expectations One or more methods that can verify the values and throw a
* exception (like an {@link AssertionError}) if the value is not valid.
*
* @return this
*
* @see #configureValuesStorage(boolean)
*/
@SafeVarargs
public final AssertSubscriber<T> assertValuesWith(Consumer<T>... expectations) {
if (!valuesStorage) {
throw new IllegalStateException(
"Using assertNoValues() requires enabling values storage");
}
final int expectedValueCount = expectations.length;
if (expectedValueCount != values.size()) {
throw new AssertionError("Different value count: expected = " + expectedValueCount + ", actual = " + valueCount, null);
}
for (int i = 0; i < expectedValueCount; i++) {
Consumer<T> consumer = expectations[i];
T actualValue = values.get(i);
consumer.accept(actualValue);
}
return this;
}
// ==============================================================================================================
// Await methods
// ==============================================================================================================
/**
* Blocking method that waits until a complete successfully or error signal is received.
* @return this
*/
public final AssertSubscriber<T> await() {
if (cdl.getCount() == 0) {
return this;
}
try {
cdl.await();
} catch (InterruptedException ex) {
throw new AssertionError("Wait interrupted", ex);
}
return this;
}
/**
* Blocking method that waits until a complete successfully or error signal is received
* or until a timeout occurs.
* @param timeout The timeout value
* @return this
*/
public final AssertSubscriber<T> await(Duration timeout) {
if (cdl.getCount() == 0) {
return this;
}
try {
if (!cdl.await(timeout.toMillis(), TimeUnit.MILLISECONDS)) {
throw new AssertionError("No complete or error signal before timeout");
}
return this;
}
catch (InterruptedException ex) {
throw new AssertionError("Wait interrupted", ex);
}
}
/**
* Blocking method that waits until {@code n} next values have been received.
*
* @param n the value count to assert
*
* @return this
*/
public final AssertSubscriber<T> awaitAndAssertNextValueCount(final long n) {
await(valuesTimeout, () -> {
if(valuesStorage){
return String.format("%d out of %d next values received within %d, " +
"values : %s",
valueCount - nextValueAssertedCount,
n,
valuesTimeout.toMillis(),
values.toString()
);
}
return String.format("%d out of %d next values received within %d",
valueCount - nextValueAssertedCount,
n,
valuesTimeout.toMillis());
}, () -> valueCount >= (nextValueAssertedCount + n));
nextValueAssertedCount += n;
return this;
}
/**
* Blocking method that waits until {@code n} next values have been received (n is the
* number of values provided) to assert them.
*
* @param values the values to assert
*
* @return this
*/
@SafeVarargs
@SuppressWarnings("unchecked")
public final AssertSubscriber<T> awaitAndAssertNextValues(T... values) {
final int expectedNum = values.length;
final List<Consumer<T>> expectations = new ArrayList<>();
for (int i = 0; i < expectedNum; i++) {
final T expectedValue = values[i];
expectations.add(actualValue -> {
if (!actualValue.equals(expectedValue)) {
throw new AssertionError(String.format(
"Expected Next signal: %s, but got: %s",
expectedValue,
actualValue));
}
});
}
awaitAndAssertNextValuesWith(expectations.toArray((Consumer<T>[]) new Consumer[0]));
return this;
}
/**
* Blocking method that waits until {@code n} next values have been received
* (n is the number of expectations provided) to assert them.
* @param expectations One or more methods that can verify the values and throw a
* exception (like an {@link AssertionError}) if the value is not valid.
* @return this
*/
@SafeVarargs
public final AssertSubscriber<T> awaitAndAssertNextValuesWith(Consumer<T>... expectations) {
valuesStorage = true;
final int expectedValueCount = expectations.length;
await(valuesTimeout, () -> {
if(valuesStorage){
return String.format("%d out of %d next values received within %d, " +
"values : %s",
valueCount - nextValueAssertedCount,
expectedValueCount,
valuesTimeout.toMillis(),
values.toString()
);
}
return String.format("%d out of %d next values received within %d ms",
valueCount - nextValueAssertedCount,
expectedValueCount,
valuesTimeout.toMillis());
}, () -> valueCount >= (nextValueAssertedCount + expectedValueCount));
List<T> nextValuesSnapshot;
List<T> empty = new ArrayList<>();
for(;;){
nextValuesSnapshot = values;
if(NEXT_VALUES.compareAndSet(this, values, empty)){
break;
}
}
if (nextValuesSnapshot.size() < expectedValueCount) {
throw new AssertionError(String.format("Expected %d number of signals but received %d",
expectedValueCount,
nextValuesSnapshot.size()));
}
for (int i = 0; i < expectedValueCount; i++) {
Consumer<T> consumer = expectations[i];
T actualValue = nextValuesSnapshot.get(i);
consumer.accept(actualValue);
}
nextValueAssertedCount += expectedValueCount;
return this;
}
// ==============================================================================================================
// Overrides
// ==============================================================================================================
@Override
public void cancel() {
Subscription a = s;
if (a != Operators.cancelledSubscription()) {
a = S.getAndSet(this, Operators.cancelledSubscription());
if (a != null && a != Operators.cancelledSubscription()) {
a.cancel();
}
}
}
final boolean isCancelled() {
return s == Operators.cancelledSubscription();
}
public final boolean isTerminated() {
return cdl.getCount() == 0;
}
@Override
public void onComplete() {
completionCount++;
cdl.countDown();
}
@Override
public void onError(Throwable t) {
errors.add(t);
cdl.countDown();
}
@Override
public void onNext(T t) {
if (establishedFusionMode == Fuseable.ASYNC) {
for (; ; ) {
t = qs.poll();
if (t == null) {
break;
}
valueCount++;
if (valuesStorage) {
List<T> nextValuesSnapshot;
for (; ; ) {
nextValuesSnapshot = values;
nextValuesSnapshot.add(t);
if (NEXT_VALUES.compareAndSet(this,
nextValuesSnapshot,
nextValuesSnapshot)) {
break;
}
}
}
}
}
else {
valueCount++;
if (valuesStorage) {
List<T> nextValuesSnapshot;
for (; ; ) {
nextValuesSnapshot = values;
nextValuesSnapshot.add(t);
if (NEXT_VALUES.compareAndSet(this,
nextValuesSnapshot,
nextValuesSnapshot)) {
break;
}
}
}
}
}
@Override
@SuppressWarnings("unchecked")
public void onSubscribe(Subscription s) {
subscriptionCount++;
int requestMode = requestedFusionMode;
if (requestMode >= 0) {
if (!setWithoutRequesting(s)) {
if (!isCancelled()) {
errors.add(new IllegalStateException("Subscription already set: " +
subscriptionCount));
}
} else {
if (s instanceof Fuseable.QueueSubscription) {
this.qs = (Fuseable.QueueSubscription<T>)s;
int m = qs.requestFusion(requestMode);
establishedFusionMode = m;
if (m == Fuseable.SYNC) {
for (;;) {
T v = qs.poll();
if (v == null) {
onComplete();
break;
}
onNext(v);
}
}
else {
requestDeferred();
}
}
else {
requestDeferred();
}
}
} else {
if (!set(s)) {
if (!isCancelled()) {
errors.add(new IllegalStateException("Subscription already set: " +
subscriptionCount));
}
}
}
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
if (establishedFusionMode != Fuseable.SYNC) {
normalRequest(n);
}
}
}
@Override
public @NonNull Context currentContext() {
return context;
}
/**
* Setup what fusion mode should be requested from the incoming
* Subscription if it happens to be QueueSubscription
* @param requestMode the mode to request, see Fuseable constants
* @return this
*/
public final AssertSubscriber<T> requestedFusionMode(int requestMode) {
this.requestedFusionMode = requestMode;
return this;
}
public Subscription upstream() {
return s;
}
// ==============================================================================================================
// Non public methods
// ==============================================================================================================
protected final void normalRequest(long n) {
Subscription a = s;
if (a != null) {
a.request(n);
} else {
Operators.addCap(REQUESTED, this, n);
a = s;
if (a != null) {
long r = REQUESTED.getAndSet(this, 0L);
if (r != 0L) {
a.request(r);
}
}
}
}
/**
* Requests the deferred amount if not zero.
*/
protected final void requestDeferred() {
long r = REQUESTED.getAndSet(this, 0L);
if (r != 0L) {
s.request(r);
}
}
/**
* Atomically sets the single subscription and requests the missed amount from it.
*
* @param s
* @return false if this arbiter is cancelled or there was a subscription already set
*/
protected final boolean set(Subscription s) {
Objects.requireNonNull(s, "s");
Subscription a = this.s;
if (a == Operators.cancelledSubscription()) {
s.cancel();
return false;
}
if (a != null) {
s.cancel();
Operators.reportSubscriptionSet();
return false;
}
if (S.compareAndSet(this, null, s)) {
long r = REQUESTED.getAndSet(this, 0L);
if (r != 0L) {
s.request(r);
}
return true;
}
a = this.s;
if (a != Operators.cancelledSubscription()) {
s.cancel();
return false;
}
Operators.reportSubscriptionSet();
return false;
}
/**
* Sets the Subscription once but does not request anything.
* @param s the Subscription to set
* @return true if successful, false if the current subscription is not null
*/
protected final boolean setWithoutRequesting(Subscription s) {
Objects.requireNonNull(s, "s");
for (;;) {
Subscription a = this.s;
if (a == Operators.cancelledSubscription()) {
s.cancel();
return false;
}
if (a != null) {
s.cancel();
Operators.reportSubscriptionSet();
return false;
}
if (S.compareAndSet(this, null, s)) {
return true;
}
}
}
/**
* Prepares and throws an AssertionError exception based on the message, cause, the
* active state and the potential errors so far.
*
* @param message the message
* @param cause the optional Throwable cause
*
* @throws AssertionError as expected
*/
protected final void assertionError(String message, Throwable cause) {
StringBuilder b = new StringBuilder();
if (cdl.getCount() != 0) {
b.append("(active) ");
}
b.append(message);
List<Throwable> err = errors;
if (!err.isEmpty()) {
b.append(" (+ ")
.append(err.size())
.append(" errors)");
}
AssertionError e = new AssertionError(b.toString(), cause);
for (Throwable t : err) {
e.addSuppressed(t);
}
throw e;
}
protected final String valueAndClass(Object o) {
if (o == null) {
return null;
}
return o + " (" + o.getClass().getSimpleName() + ")";
}
public List<T> values(){
return values;
}
public final AssertSubscriber<T> assertNoEvents() {
return assertNoValues().assertNoError().assertNotComplete();
}
@SafeVarargs
public final AssertSubscriber<T> assertIncomplete(T... values) {
return assertValues(values).assertNotComplete().assertNoError();
}
}
| incompatible |
java | grpc__grpc-java | grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java | {
"start": 1469,
"end": 1938
} | class ____ extends LoadBalancer {
private static final GrpclbConfig DEFAULT_CONFIG = GrpclbConfig.create(Mode.ROUND_ROBIN);
private final Helper helper;
private final Context context;
private final TimeProvider time;
private final Stopwatch stopwatch;
private final SubchannelPool subchannelPool;
private final BackoffPolicy.Provider backoffPolicyProvider;
private GrpclbConfig config = DEFAULT_CONFIG;
// All mutable states in this | GrpclbLoadBalancer |
java | quarkusio__quarkus | extensions/opentelemetry/deployment/src/test/java/io/quarkus/opentelemetry/deployment/OpenTelemetryContinuousTestingTest.java | {
"start": 782,
"end": 2733
} | class ____ {
@RegisterExtension
final static QuarkusDevModeTest TEST = new QuarkusDevModeTest()
.withApplicationRoot((jar) -> jar
.addClass(TracerRouter.class)
.addClasses(TestSpanExporter.class, TestSpanExporterProvider.class)
.addAsResource(new StringAsset(TestSpanExporterProvider.class.getCanonicalName()),
"META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider")
.add(new StringAsset(ContinuousTestingTestUtils.appProperties(
"quarkus.otel.traces.exporter=test-span-exporter",
"quarkus.otel.metrics.exporter=none")),
"application.properties"))
.setTestArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)
.addClass(TracerRouterUT.class));
@Test
void testContinuousTesting() {
ContinuousTestingTestUtils utils = new ContinuousTestingTestUtils();
TestStatus ts = utils.waitForNextCompletion();
Assertions.assertEquals(0L, ts.getTestsFailed());
Assertions.assertEquals(1L, ts.getTestsPassed());
Assertions.assertEquals(0L, ts.getTestsSkipped());
TEST.modifySourceFile(TracerRouter.class, s -> s.replace("Hello", "Goodbye"));
ts = utils.waitForNextCompletion();
Assertions.assertEquals(1L, ts.getTestsFailed());
Assertions.assertEquals(0L, ts.getTestsPassed());
Assertions.assertEquals(0L, ts.getTestsSkipped());
TEST.modifyTestSourceFile(TracerRouterUT.class, s -> s.replace("Hello", "Goodbye"));
ts = utils.waitForNextCompletion();
Assertions.assertEquals(0L, ts.getTestsFailed());
Assertions.assertEquals(1L, ts.getTestsPassed());
Assertions.assertEquals(0L, ts.getTestsSkipped());
}
}
| OpenTelemetryContinuousTestingTest |
java | hibernate__hibernate-orm | tooling/hibernate-ant/src/test/java/org/hibernate/tool/enhance/EnhancementTaskTest.java | {
"start": 749,
"end": 2405
} | class ____ {
@TempDir
private File projectDir;
@BeforeEach
public void beforeEach() throws Exception {
copyJavFiles();
prepareDestFolder();
}
@Test
public void testEnhancementDefault() throws Exception {
// The default settings for the enhancement task are as follows:
// enableLazyInitialization = 'true'
// enableDirtyTracking = 'true'
// enableAssociationManagement = 'false'
// enableExtendedEnhancement = 'false'
// The files are read from folder 'dir' which needs to be a subfolder of 'base'
// The property 'base' is mandatory
// If 'dir' is not specified, a 'fileset' element can be used (see #testEnhancementFileSet)
String enhanceTag =
"<enhance base='${basedir}/dest' dir='${basedir}/dest'/>\n";
Project project = createProject(enhanceTag);
executeCompileTarget( project );
executeEnhanceTarget( project );
// Both Bar and Baz should be enhanced
assertTrue(isEnhanced( "Bar" ));
assertTrue(isEnhanced( "Baz" ));
// Both Bar and Baz contain the method '$$_hibernate_getInterceptor'
// because of the default setting of 'enableLazyInitialization'
assertTrue( methodIsPresentInClass("$$_hibernate_getInterceptor", "Bar"));
assertTrue( methodIsPresentInClass("$$_hibernate_getInterceptor", "Baz"));
// Both Bar and Baz contain the method '$$_hibernate_hasDirtyAttributes'
// because of the default setting of 'enableDirtyTracking'
assertTrue( methodIsPresentInClass("$$_hibernate_hasDirtyAttributes", "Bar"));
assertTrue( methodIsPresentInClass("$$_hibernate_hasDirtyAttributes", "Baz"));
// Foo is not an entity and extended enhancement is not enabled so the | EnhancementTaskTest |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/ListConnectorSyncJobsActionTests.java | {
"start": 577,
"end": 972
} | class ____ extends ESTestCase {
public void testValidate_WhenPageParamsAreValid_ExpectNoValidationError() {
ListConnectorSyncJobsAction.Request request = ConnectorSyncJobTestUtils.getRandomListConnectorSyncJobsActionRequest();
ActionRequestValidationException exception = request.validate();
assertThat(exception, nullValue());
}
}
| ListConnectorSyncJobsActionTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/lock/spi/ConnectionLockTimeoutStrategy.java | {
"start": 504,
"end": 1884
} | interface ____ {
ConnectionLockTimeoutStrategy NONE = () -> Level.NONE;
/**
* What type, if any, of support this Dialect has for lock timeouts on the JDBC connection.
*
* @see #getLockTimeout
* @see #setLockTimeout
*/
Level getSupportedLevel();
/**
* Read the lock timeout associated with the JDBC connection, if supported and there is one.
*
* @see #getSupportedLevel
*
* @throws UnsupportedOperationException when {@linkplain #getSupportedLevel} is {@linkplain Level#NONE}
*/
default Timeout getLockTimeout(Connection connection, SessionFactoryImplementor factory) {
throw new UnsupportedOperationException( "Lock timeout on the JDBC connection is not supported" );
}
/**
* Set the lock timeout associated with the JDBC connection (if supported), in milliseconds.
*
* @see #getSupportedLevel()
*
* @throws UnsupportedOperationException when {@linkplain #getSupportedLevel} is {@linkplain Level#NONE}
*/
default void setLockTimeout(Timeout timeout, Connection connection, SessionFactoryImplementor factory) {
throw new UnsupportedOperationException( "Lock timeout on the JDBC connection is not supported" );
}
/**
* Indicates a Dialect's level of support for lock timeouts on the JDBC connection.
*
* @apiNote {@linkplain org.hibernate.Timeouts#SKIP_LOCKED skip-locked} is never supported.
*/
| ConnectionLockTimeoutStrategy |
java | apache__flink | flink-clients/src/main/java/org/apache/flink/client/FlinkPipelineTranslationUtil.java | {
"start": 1292,
"end": 4052
} | class ____ {
/** Transmogrifies the given {@link Pipeline} to a {@link JobGraph}. */
public static JobGraph getJobGraph(
ClassLoader userClassloader,
Pipeline pipeline,
Configuration optimizerConfiguration,
int defaultParallelism) {
FlinkPipelineTranslator pipelineTranslator =
getPipelineTranslator(userClassloader, pipeline);
JobGraph jobGraph =
pipelineTranslator.translateToJobGraph(
pipeline, optimizerConfiguration, defaultParallelism);
optimizerConfiguration
.getOptional(PipelineOptions.PARALLELISM_OVERRIDES)
.ifPresent(
map ->
jobGraph.getJobConfiguration()
.set(PipelineOptions.PARALLELISM_OVERRIDES, map));
return jobGraph;
}
/**
* Transmogrifies the given {@link Pipeline} under the userClassloader to a {@link JobGraph}.
*/
public static JobGraph getJobGraphUnderUserClassLoader(
final ClassLoader userClassloader,
final Pipeline pipeline,
final Configuration configuration,
final int defaultParallelism) {
final ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(userClassloader);
return FlinkPipelineTranslationUtil.getJobGraph(
userClassloader, pipeline, configuration, defaultParallelism);
} finally {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
/** Extracts the execution plan (as JSON) from the given {@link Pipeline}. */
public static String translateToJSONExecutionPlan(
ClassLoader userClassloader, Pipeline pipeline) {
FlinkPipelineTranslator pipelineTranslator =
getPipelineTranslator(userClassloader, pipeline);
return pipelineTranslator.translateToJSONExecutionPlan(pipeline);
}
private static FlinkPipelineTranslator getPipelineTranslator(
ClassLoader userClassloader, Pipeline pipeline) {
StreamGraphTranslator streamGraphTranslator = new StreamGraphTranslator(userClassloader);
if (streamGraphTranslator.canTranslate(pipeline)) {
return streamGraphTranslator;
}
throw new RuntimeException(
"Translator "
+ streamGraphTranslator
+ " cannot translate "
+ "the given pipeline "
+ pipeline
+ ".");
}
}
| FlinkPipelineTranslationUtil |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/identifier/NaturalIdEqualsHashCodeEntityTest.java | {
"start": 904,
"end": 1849
} | class ____ {
@BeforeEach
public void init(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
Library library = new Library();
library.setId(1L);
library.setName("Amazon");
entityManager.persist(library);
});
}
@Test
public void testPersist(EntityManagerFactoryScope scope) {
//tag::entity-pojo-natural-id-equals-hashcode-persist-example[]
Book book1 = new Book();
book1.setTitle("High-Performance Java Persistence");
book1.setIsbn("978-9730228236");
Library library = scope.fromTransaction( entityManager -> {
Library _library = entityManager.find(Library.class, 1L);
_library.getBooks().add(book1);
return _library;
});
assertTrue(library.getBooks().contains(book1));
//end::entity-pojo-natural-id-equals-hashcode-persist-example[]
}
//tag::entity-pojo-natural-id-equals-hashcode-example[]
@Entity(name = "MyLibrary")
public static | NaturalIdEqualsHashCodeEntityTest |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/onetomany/detached/BasicDetachedSet.java | {
"start": 932,
"end": 3798
} | class ____ {
private Integer str1_id;
private Integer str2_id;
private Integer coll1_id;
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
// Revision 1
scope.inTransaction( em -> {
StrTestEntity str1 = new StrTestEntity( "str1" );
StrTestEntity str2 = new StrTestEntity( "str2" );
em.persist( str1 );
em.persist( str2 );
SetRefCollEntity coll1 = new SetRefCollEntity( 3, "coll1" );
coll1.setCollection( new HashSet<StrTestEntity>() );
coll1.getCollection().add( str1 );
em.persist( coll1 );
str1_id = str1.getId();
str2_id = str2.getId();
coll1_id = coll1.getId();
} );
// Revision 2
scope.inTransaction( em -> {
StrTestEntity str2 = em.find( StrTestEntity.class, str2_id );
SetRefCollEntity coll1 = em.find( SetRefCollEntity.class, coll1_id );
coll1.getCollection().add( str2 );
} );
// Revision 3
scope.inTransaction( em -> {
StrTestEntity str1 = em.find( StrTestEntity.class, str1_id );
SetRefCollEntity coll1 = em.find( SetRefCollEntity.class, coll1_id );
coll1.getCollection().remove( str1 );
} );
// Revision 4
scope.inTransaction( em -> {
SetRefCollEntity coll1 = em.find( SetRefCollEntity.class, coll1_id );
coll1.getCollection().clear();
} );
}
@Test
public void testRevisionsCounts(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( Arrays.asList( 1, 2, 3, 4 ), auditReader.getRevisions( SetRefCollEntity.class, coll1_id ) );
assertEquals( Arrays.asList( 1 ), auditReader.getRevisions( StrTestEntity.class, str1_id ) );
assertEquals( Arrays.asList( 1 ), auditReader.getRevisions( StrTestEntity.class, str2_id ) );
} );
}
@Test
public void testHistoryOfColl1(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
StrTestEntity str1 = em.find( StrTestEntity.class, str1_id );
StrTestEntity str2 = em.find( StrTestEntity.class, str2_id );
final var auditReader = AuditReaderFactory.get( em );
SetRefCollEntity rev1 = auditReader.find( SetRefCollEntity.class, coll1_id, 1 );
SetRefCollEntity rev2 = auditReader.find( SetRefCollEntity.class, coll1_id, 2 );
SetRefCollEntity rev3 = auditReader.find( SetRefCollEntity.class, coll1_id, 3 );
SetRefCollEntity rev4 = auditReader.find( SetRefCollEntity.class, coll1_id, 4 );
assertEquals( TestTools.makeSet( str1 ), rev1.getCollection() );
assertEquals( TestTools.makeSet( str1, str2 ), rev2.getCollection() );
assertEquals( TestTools.makeSet( str2 ), rev3.getCollection() );
assertEquals( TestTools.makeSet(), rev4.getCollection() );
assertEquals( "coll1", rev1.getData() );
assertEquals( "coll1", rev2.getData() );
assertEquals( "coll1", rev3.getData() );
assertEquals( "coll1", rev4.getData() );
} );
}
}
| BasicDetachedSet |
java | google__error-prone | check_api/src/test/java/com/google/errorprone/util/ASTHelpersTest.java | {
"start": 46546,
"end": 47401
} | class ____ {
void f() {
Object unused = boolean.class;
}
}
""");
TestScanner scanner =
new TestScanner() {
@Override
public Void visitMemberSelect(MemberSelectTree tree, VisitorState state) {
Symbol targetSymbol = ASTHelpers.getSymbol(tree);
// ASTHelpers#outermostClass shouldn't itself NPE
assertThat(ASTHelpers.outermostClass(targetSymbol)).isNull();
setAssertionsComplete();
return super.visitMemberSelect(tree, state);
}
};
tests.add(scanner);
assertCompiles(scanner);
}
/** Replaces all throws clauses with more specific exceptions. */
@BugPattern(
summary = "Replaces all throws clauses with more specific exceptions.",
severity = WARNING)
public static final | Foo |
java | apache__camel | components/camel-ai/camel-langchain4j-agent-api/src/main/java/org/apache/camel/component/langchain4j/agent/api/AgentConfiguration.java | {
"start": 1305,
"end": 1388
} | class ____ AI agents in the Apache Camel LangChain4j integration.
*
* <p>
* This | for |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java | {
"start": 11024,
"end": 11320
} | class ____ extends ThisTestCustomMetadata {
public static final String TYPE = "test_metadata_scope_non_api";
NonApiMetadata(String data) {
super(data, TYPE, EnumSet.of(Metadata.XContentContext.GATEWAY, Metadata.XContentContext.SNAPSHOT));
}
}
}
| NonApiMetadata |
java | quarkusio__quarkus | independent-projects/tools/analytics-common/src/test/java/io/quarkus/analytics/config/ExtensionsFilterTest.java | {
"start": 182,
"end": 865
} | class ____ {
private static final MessageWriter log = MessageWriter.info();
@Test
void discardTest() {
assertFalse(ExtensionsFilter.onlyPublic("must.not.be.authorized", log));
assertFalse(ExtensionsFilter.onlyPublic(null, log));
assertFalse(ExtensionsFilter.onlyPublic("", log));
}
@Test
void acceptTest() {
assertTrue(ExtensionsFilter.onlyPublic("io.quarkus", log));
assertTrue(ExtensionsFilter.onlyPublic("io.quarkus.something", log));
assertTrue(ExtensionsFilter.onlyPublic("io.quarkiverse", log));
assertTrue(ExtensionsFilter.onlyPublic("io.quarkiverse.something", log));
}
}
| ExtensionsFilterTest |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/io/ClassLoaderWrapper.java | {
"start": 4397,
"end": 4731
} | class ____ to examine
*
* @return the resource or null
*/
URL getResourceAsURL(String resource, ClassLoader[] classLoader) {
URL url;
for (ClassLoader cl : classLoader) {
if (null != cl) {
// look for the resource as passed in...
url = cl.getResource(resource);
// ...but some | loaders |
java | quarkusio__quarkus | independent-projects/bootstrap/core/src/main/java/io/quarkus/bootstrap/app/CuratedApplication.java | {
"start": 9484,
"end": 11236
} | class ____ that can load the deployment artifacts and all their dependencies, but not
//any of the runtime artifacts, or user classes
//this will load any deployment artifacts from the parent CL if they are present
for (ResolvedDependency i : appModel.getDependencies()) {
if (configuredClassLoading.isRemovedArtifact(i.getKey())) {
processCpElement(i, builder::addBannedElement, true);
continue;
}
if (configuredClassLoading.isReloadableArtifact(i.getKey())) {
continue;
}
processCpElement(i, element -> addCpElement(builder, i, element), true);
}
for (Path i : quarkusBootstrap.getAdditionalDeploymentArchives()) {
builder.addNormalPriorityElement(ClassPathElement.fromPath(i, false));
}
Map<String, byte[]> banned = new HashMap<>();
for (Collection<String> i : configuredClassLoading.getRemovedResources().values()) {
for (String j : i) {
banned.put(j, new byte[0]);
}
}
builder.addBannedElement(new MemoryClassPathElement(banned, false));
augmentClassLoader = builder.build();
}
return augmentClassLoader;
}
/**
* In most cases {@link #getOrCreateAugmentClassLoader()} should be used but this can be useful if you want to be able to
* get this instance without creating it (and so potentially get null if it doesn't exist).
*/
public QuarkusClassLoader getAugmentClassLoader() {
return augmentClassLoader;
}
/**
* creates the base runtime | loader |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskMultipleInputSelectiveReadingTest.java | {
"start": 16444,
"end": 17005
} | class ____
extends AbstractStreamOperatorFactory<String> {
@Override
public <T extends StreamOperator<String>> T createStreamOperator(
StreamOperatorParameters<String> parameters) {
return (T) new TestInputStarvationMultipleInputOperator(parameters);
}
@Override
public Class<? extends StreamOperator> getStreamOperatorClass(ClassLoader classLoader) {
return TestInputStarvationMultipleInputOperator.class;
}
}
}
| TestInputStarvationMultipleInputOperatorFactory |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/manytomanyassociationclass/Group.java | {
"start": 240,
"end": 1206
} | class ____ {
private Long id;
private String name;
private Set<Membership> memberships = new HashSet<>();
public Group() {
}
public Group(String name) {
this.name = name;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Set<Membership> getMemberships() {
return memberships;
}
public void setMemberships(Set<Membership> memberships) {
this.memberships = memberships;
}
public boolean equals(Object obj) {
if ( this == obj ) {
return true;
}
if ( obj instanceof Group ) {
Group grp = ( Group ) obj;
if ( grp.getName() != null && name != null ) {
return grp.getName().equals( name );
}
else {
return super.equals( obj );
}
}
else {
return false;
}
}
public int hashCode() {
return ( name == null ? super.hashCode() : name.hashCode() );
}
}
| Group |
java | quarkusio__quarkus | independent-projects/tools/devtools-common/src/main/java/io/quarkus/devtools/commands/AddExtensions.java | {
"start": 605,
"end": 683
} | class ____ not thread-safe. They are created per single invocation.
*/
public | are |
java | apache__kafka | clients/clients-integration-tests/src/test/java/org/apache/kafka/server/quota/CustomQuotaCallbackTest.java | {
"start": 5521,
"end": 6865
} | class ____ implements ClientQuotaCallback {
public static final Map<String, AtomicInteger> COUNTERS = new ConcurrentHashMap<>();
private String nodeId;
@Override
public Map<String, String> quotaMetricTags(ClientQuotaType quotaType, KafkaPrincipal principal, String clientId) {
return Map.of();
}
@Override
public Double quotaLimit(ClientQuotaType quotaType, Map<String, String> metricTags) {
return Double.MAX_VALUE;
}
@Override
public void updateQuota(ClientQuotaType quotaType, ClientQuotaEntity quotaEntity, double newValue) {
}
@Override
public void removeQuota(ClientQuotaType quotaType, ClientQuotaEntity quotaEntity) {
}
@Override
public boolean quotaResetRequired(ClientQuotaType quotaType) {
return true;
}
@Override
public boolean updateClusterMetadata(Cluster cluster) {
COUNTERS.computeIfAbsent(nodeId, k -> new AtomicInteger()).incrementAndGet();
return true;
}
@Override
public void close() {
}
@Override
public void configure(Map<String, ?> configs) {
nodeId = (String) configs.get("node.id");
}
}
public static | CustomQuotaCallback |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/voyageai/request/VoyageAIRerankRequestTests.java | {
"start": 960,
"end": 4933
} | class ____ extends ESTestCase {
private static final String API_KEY = "foo";
public void testCreateRequest_WithMinimalFields() throws IOException {
var input = "input";
var query = "query";
var modelId = "model";
var request = createRequest(query, input, modelId, null, null, null);
var httpRequest = request.createHttpRequest();
assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class));
var httpPost = (HttpPost) httpRequest.httpRequestBase();
assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType()));
assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer " + API_KEY));
var requestMap = entityAsMap(httpPost.getEntity().getContent());
assertThat(requestMap, aMapWithSize(3));
assertThat(requestMap.get("documents"), is(List.of(input)));
assertThat(requestMap.get("query"), is(query));
assertThat(requestMap.get("model"), is(modelId));
}
public void testCreateRequest_WithAllFieldsDefined() throws IOException {
var input = "input";
var query = "query";
var topK = 1;
var taskSettingsTopK = 2;
var modelId = "model";
var request = createRequest(query, input, modelId, topK, Boolean.FALSE, taskSettingsTopK);
var httpRequest = request.createHttpRequest();
assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class));
var httpPost = (HttpPost) httpRequest.httpRequestBase();
assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType()));
assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer " + API_KEY));
var requestMap = entityAsMap(httpPost.getEntity().getContent());
assertThat(requestMap, aMapWithSize(5));
assertThat(requestMap.get("documents"), is(List.of(input)));
assertThat(requestMap.get("query"), is(query));
assertThat(requestMap.get("top_k"), is(topK));
assertThat(requestMap.get("model"), is(modelId));
assertThat(requestMap.get("return_documents"), is(Boolean.FALSE));
}
public void testCreateRequest_WithModelSet() throws IOException {
var input = "input";
var query = "query";
var modelId = "model";
var request = createRequest(query, input, modelId, null, null, null);
var httpRequest = request.createHttpRequest();
assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class));
var httpPost = (HttpPost) httpRequest.httpRequestBase();
assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType()));
assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer " + API_KEY));
var requestMap = entityAsMap(httpPost.getEntity().getContent());
assertThat(requestMap, aMapWithSize(3));
assertThat(requestMap.get("documents"), is(List.of(input)));
assertThat(requestMap.get("query"), is(query));
assertThat(requestMap.get("model"), is(modelId));
}
public void testTruncate_DoesNotTruncate() {
var request = createRequest("query", "input", "null", null, null, null);
var truncatedRequest = request.truncate();
assertThat(truncatedRequest, sameInstance(request));
}
private static VoyageAIRerankRequest createRequest(
String query,
String input,
@Nullable String modelId,
@Nullable Integer topK,
@Nullable Boolean returnDocuments,
@Nullable Integer taskSettingsTopK
) {
var rerankModel = VoyageAIRerankModelTests.createModel(API_KEY, modelId, taskSettingsTopK);
return new VoyageAIRerankRequest(query, List.of(input), returnDocuments, topK, rerankModel);
}
}
| VoyageAIRerankRequestTests |
java | spring-projects__spring-framework | spring-aop/src/test/java/org/springframework/aop/aspectj/annotation/AbstractAspectJAdvisorFactoryTests.java | {
"start": 33445,
"end": 33851
} | class ____ {
int count;
// Just to check that this doesn't cause problems with introduction processing
@SuppressWarnings("unused")
private final ITestBean fieldThatShouldBeIgnoredBySpringAtAspectJProcessing = new TestBean();
@Around("execution(int *.getAge())")
int returnCountAsAge() {
return count++;
}
@Before("execution(void *.set*(int))")
void countSetter() {
++count;
}
}
| PerThisAspect |
java | hibernate__hibernate-orm | hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseLegacySqmToSqlAstConverter.java | {
"start": 1058,
"end": 2722
} | class ____<T extends Statement> extends BaseSqmToSqlAstConverter<T> {
private boolean needsDummyTableGroup;
public SybaseLegacySqmToSqlAstConverter(
SqmStatement<?> statement,
QueryOptions queryOptions,
DomainParameterXref domainParameterXref,
QueryParameterBindings domainParameterBindings,
LoadQueryInfluencers fetchInfluencers,
SqlAstCreationContext creationContext,
boolean deduplicateSelectionItems) {
super(
creationContext,
statement,
queryOptions,
fetchInfluencers,
domainParameterXref,
domainParameterBindings,
deduplicateSelectionItems
);
}
@Override
public QuerySpec visitQuerySpec(SqmQuerySpec<?> sqmQuerySpec) {
final boolean needsDummy = this.needsDummyTableGroup;
this.needsDummyTableGroup = false;
try {
final QuerySpec querySpec = super.visitQuerySpec( sqmQuerySpec );
if ( this.needsDummyTableGroup ) {
querySpec.getFromClause().addRoot(
new StandardTableGroup(
true,
null,
null,
null,
new NamedTableReference( "(select 1)", "dummy_(x)" ),
null,
getCreationContext().getSessionFactory()
)
);
}
return querySpec;
}
finally {
this.needsDummyTableGroup = needsDummy;
}
}
@Override
protected Expression resolveGroupOrOrderByExpression(SqmExpression<?> groupByClauseExpression) {
final Expression expression = super.resolveGroupOrOrderByExpression( groupByClauseExpression );
if ( expression instanceof Literal ) {
// Note that SqlAstTranslator.renderPartitionItem depends on this
this.needsDummyTableGroup = true;
}
return expression;
}
}
| SybaseLegacySqmToSqlAstConverter |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/testsupport/TestHelper.java | {
"start": 128578,
"end": 130401
} | class ____<T> extends BasicQueueSubscription<T> implements ConditionalSubscriber<T> {
private static final long serialVersionUID = 365317603608134078L;
final Subscriber<? super T> downstream;
Subscription upstream;
QueueSubscription<T> qs;
ForwardingConditionalSubscriber(Subscriber<? super T> downstream) {
this.downstream = downstream;
}
@SuppressWarnings("unchecked")
@Override
public void onSubscribe(@NonNull Subscription s) {
this.upstream = s;
if (s instanceof QueueSubscription) {
this.qs = (QueueSubscription<T>)s;
}
downstream.onSubscribe(this);
}
@Override
public void onNext(@NonNull T t) {
downstream.onNext(t);
}
@Override
public boolean tryOnNext(@NonNull T t) {
downstream.onNext(t);
return true;
}
@Override
public void onError(Throwable t) {
downstream.onError(t);
}
@Override
public void onComplete() {
downstream.onComplete();
}
@Override
public int requestFusion(int mode) {
return qs != null ? qs.requestFusion(mode) : 0;
}
@Override
public @Nullable T poll() throws Throwable {
return qs.poll();
}
@Override
public boolean isEmpty() {
return qs.isEmpty();
}
@Override
public void clear() {
qs.clear();
}
@Override
public void request(long n) {
upstream.request(n);
}
@Override
public void cancel() {
upstream.cancel();
}
}
}
| ForwardingConditionalSubscriber |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/fetch/subphase/FieldFetcher.java | {
"start": 1532,
"end": 9089
} | class ____ {
private record ResolvedField(String field, String matchingPattern, MappedFieldType ft, String format) {}
/**
* Build a FieldFetcher for a given search context and collection of fields and formats
*/
public static FieldFetcher create(SearchExecutionContext context, Collection<FieldAndFormat> fieldAndFormats) {
List<String> unmappedFetchPattern = new ArrayList<>();
List<ResolvedField> resolvedFields = new ArrayList<>();
for (FieldAndFormat fieldAndFormat : fieldAndFormats) {
String fieldPattern = fieldAndFormat.field;
String matchingPattern = Regex.isSimpleMatchPattern(fieldPattern) ? fieldPattern : null;
if (fieldAndFormat.includeUnmapped != null && fieldAndFormat.includeUnmapped) {
unmappedFetchPattern.add(fieldAndFormat.field);
}
for (String field : context.getMatchingFieldNames(fieldPattern)) {
MappedFieldType ft = context.getFieldType(field);
// we want to skip metadata fields if we have a wildcard pattern
if (context.isMetadataField(field) && matchingPattern != null) {
continue;
}
resolvedFields.add(new ResolvedField(field, matchingPattern, ft, fieldAndFormat.format));
}
}
// The fields need to be sorted so that the nested partition functions will work correctly.
resolvedFields.sort(Comparator.comparing(f -> f.field));
Map<String, FieldContext> fieldContexts = buildFieldContexts(context, "", resolvedFields, unmappedFetchPattern);
UnmappedFieldFetcher unmappedFieldFetcher = buildUnmappedFieldFetcher(context, fieldContexts.keySet(), "", unmappedFetchPattern);
return new FieldFetcher(fieldContexts, unmappedFieldFetcher);
}
private static UnmappedFieldFetcher buildUnmappedFieldFetcher(
SearchExecutionContext context,
Set<String> mappedFields,
String nestedScope,
List<String> unmappedFetchPatterns
) {
if (unmappedFetchPatterns.isEmpty()) {
return UnmappedFieldFetcher.EMPTY;
}
// We pass in all mapped field names, and all the names of nested mappers that appear
// immediately below the current scope. This means that the unmapped field fetcher won't
// retrieve any fields that live inside a nested child, instead leaving this to the
// NestedFieldFetchers defined for each child scope in buildFieldContexts()
return new UnmappedFieldFetcher(mappedFields, context.nestedLookup().getImmediateChildMappers(nestedScope), unmappedFetchPatterns);
}
private static ValueFetcher buildValueFetcher(SearchExecutionContext context, ResolvedField fieldAndFormat) {
try {
return fieldAndFormat.ft.valueFetcher(context, fieldAndFormat.format);
} catch (IllegalArgumentException e) {
StringBuilder error = new StringBuilder("error fetching [").append(fieldAndFormat.field).append(']');
if (fieldAndFormat.matchingPattern != null) {
error.append(" which matched [").append(fieldAndFormat.matchingPattern).append(']');
}
error.append(": ").append(e.getMessage());
throw new IllegalArgumentException(error.toString(), e);
}
}
// Builds field contexts for each resolved field. If there are child mappers below
// the nested scope, then the resolved fields are partitioned by where they fall in
// the nested hierarchy, and we build a nested FieldContext for each child by calling
// this method again for the subset of resolved fields that live within it.
private static Map<String, FieldContext> buildFieldContexts(
SearchExecutionContext context,
String nestedScope,
List<ResolvedField> fields,
List<String> unmappedFetchPatterns
) {
final boolean includeUnmapped = unmappedFetchPatterns.isEmpty() == false;
Map<String, List<ResolvedField>> fieldsByNestedMapper = NestedUtils.partitionByChildren(
nestedScope,
context.nestedLookup().getImmediateChildMappers(nestedScope),
fields,
f -> f.field
);
// Keep the outputs sorted for easier testing
Map<String, FieldContext> output = new LinkedHashMap<>();
for (String scope : fieldsByNestedMapper.keySet()) {
if (nestedScope.equals(scope)) {
// These are fields in the current scope, so add them directly to the output map
for (ResolvedField ff : fieldsByNestedMapper.get(nestedScope)) {
output.put(ff.field, new FieldContext(ff.field, buildValueFetcher(context, ff)));
}
} else {
// don't create nested fetchers if no children have been requested as part of the fields
// request, unless we are trying to also fetch unmapped fields``
if (includeUnmapped || fieldsByNestedMapper.get(scope).isEmpty() == false) {
// These fields are in a child scope, so build a nested mapper for them
Map<String, FieldContext> scopedFields = buildFieldContexts(
context,
scope,
fieldsByNestedMapper.get(scope),
unmappedFetchPatterns
);
UnmappedFieldFetcher unmappedFieldFetcher = buildUnmappedFieldFetcher(
context,
scopedFields.keySet(),
scope,
unmappedFetchPatterns
);
NestedValueFetcher nvf = new NestedValueFetcher(scope, new FieldFetcher(scopedFields, unmappedFieldFetcher));
output.put(scope, new FieldContext(scope, nvf));
}
}
}
return output;
}
private final Map<String, FieldContext> fieldContexts;
private final UnmappedFieldFetcher unmappedFieldFetcher;
private final StoredFieldsSpec storedFieldsSpec;
private FieldFetcher(Map<String, FieldContext> fieldContexts, UnmappedFieldFetcher unmappedFieldFetcher) {
this.fieldContexts = fieldContexts;
this.unmappedFieldFetcher = unmappedFieldFetcher;
this.storedFieldsSpec = StoredFieldsSpec.build(fieldContexts.values(), fc -> fc.valueFetcher.storedFieldsSpec());
}
public StoredFieldsSpec storedFieldsSpec() {
return storedFieldsSpec;
}
public Map<String, DocumentField> fetch(Source source, int doc) throws IOException {
Map<String, DocumentField> documentFields = new HashMap<>();
for (FieldContext context : fieldContexts.values()) {
String field = context.fieldName;
ValueFetcher valueFetcher = context.valueFetcher;
final DocumentField docField = valueFetcher.fetchDocumentField(field, source, doc);
if (docField != null) {
documentFields.put(field, docField);
}
}
unmappedFieldFetcher.collectUnmapped(documentFields, source);
return documentFields;
}
public void setNextReader(LeafReaderContext readerContext) {
for (FieldContext field : fieldContexts.values()) {
field.valueFetcher.setNextReader(readerContext);
}
}
private record FieldContext(String fieldName, ValueFetcher valueFetcher) {}
}
| FieldFetcher |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SClass.java | {
"start": 777,
"end": 1562
} | class ____ extends ANode {
private final List<SFunction> functionNodes;
public SClass(int identifier, Location location, List<SFunction> functionNodes) {
super(identifier, location);
this.functionNodes = Collections.unmodifiableList(Objects.requireNonNull(functionNodes));
}
public List<SFunction> getFunctionNodes() {
return functionNodes;
}
@Override
public <Scope> void visit(UserTreeVisitor<Scope> userTreeVisitor, Scope scope) {
userTreeVisitor.visitClass(this, scope);
}
@Override
public <Scope> void visitChildren(UserTreeVisitor<Scope> userTreeVisitor, Scope scope) {
for (SFunction functionNode : functionNodes) {
functionNode.visit(userTreeVisitor, scope);
}
}
}
| SClass |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/query/Position.java | {
"start": 1444,
"end": 1529
} | class ____ be used to summarize all of
* that positional information.
* <p>
* This | can |
java | apache__flink | flink-libraries/flink-state-processing-api/src/test/java/org/apache/flink/state/api/input/KeyedStateInputFormatTest.java | {
"start": 15261,
"end": 15858
} | class ____ extends KeyedProcessFunction<Integer, Integer, Void> {
ValueState<Integer> state;
@Override
public void open(OpenContext openContext) {
state = getRuntimeContext().getState(stateDescriptor);
}
@Override
public void processElement(Integer value, Context ctx, Collector<Void> out)
throws Exception {
state.update(value);
ctx.timerService().registerEventTimeTimer(value);
ctx.timerService().registerProcessingTimeTimer(value);
}
}
static | StatefulFunctionWithTime |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStatsMXBean.java | {
"start": 946,
"end": 1038
} | interface ____ to retrieve statistic information related to
* block management.
*/
public | used |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configuration/WebSecurityConfigurationTests.java | {
"start": 25041,
"end": 25667
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http, WebExpressionAuthorizationManager.Builder authz)
throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().access(authz.expression("request.method == 'GET' ? @b.grant() : @b.deny()"))
);
return http.build();
// @formatter:on
}
@Bean
WebExpressionAuthorizationManager.Builder authz() {
return WebExpressionAuthorizationManager.withDefaults();
}
@Bean
public MyBean b() {
return new MyBean();
}
@RestController
| DefaultExpressionHandlerSetsBeanResolverConfig |
java | google__dagger | dagger-runtime/main/java/dagger/internal/SetFactory.java | {
"start": 1501,
"end": 2568
} | class ____<T extends @Nullable Object> implements Factory<Set<T>> {
private static final Factory<Set<Object>> EMPTY_FACTORY = InstanceFactory.create(emptySet());
@SuppressWarnings({"unchecked", "rawtypes"}) // safe covariant cast
public static <T extends @Nullable Object> Factory<Set<T>> empty() {
return (Factory) EMPTY_FACTORY;
}
/**
* Constructs a new {@link Builder} for a {@link SetFactory} with {@code individualProviderSize}
* individual {@code Provider<T>} and {@code collectionProviderSize} {@code
* Provider<Collection<T>>} instances.
*/
public static <T extends @Nullable Object> Builder<T> builder(
int individualProviderSize, int collectionProviderSize) {
return new Builder<T>(individualProviderSize, collectionProviderSize);
}
/**
* A builder to accumulate {@code Provider<T>} and {@code Provider<Collection<T>>} instances.
* These are only intended to be single-use and from within generated code. Do <em>NOT</em> add
* providers after calling {@link #build()}.
*/
public static final | SetFactory |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/Assertions_assertThat_with_FloatArray_Test.java | {
"start": 941,
"end": 1162
} | class ____ {
@Test
void should_create_Assert() {
AbstractFloatArrayAssert<?> assertions = Assertions.assertThat(emptyArray());
assertThat(assertions).isNotNull();
}
}
| Assertions_assertThat_with_FloatArray_Test |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/main/java/org/hibernate/processor/util/TypeUtils.java | {
"start": 27516,
"end": 28076
} | class ____ to extend from {@code false} otherwise.
*/
private static boolean extendsSuperMetaModel(Element superClassElement, boolean entityMetaComplete, Context context) {
// if we processed the superclass in the same run we definitely need to extend
final TypeElement typeElement = (TypeElement) superClassElement;
final String superClassName = typeElement.getQualifiedName().toString();
return context.containsMetaEntity( superClassName )
|| context.containsMetaEmbeddable( superClassName )
// to allow for the case that the metamodel | metamodel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.