language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
|
{
"start": 3036,
"end": 22336
}
|
class ____ {
/**
* Private constructor for preventing FSDirEncryptionZoneOp object creation.
* Static-only class.
*/
private FSDirEncryptionZoneOp() {}
/**
* Invoke KeyProvider APIs to generate an encrypted data encryption key for
* an encryption zone. Should not be called with any locks held.
*
* @param fsd the namespace tree.
* @param ezKeyName key name of an encryption zone
* @return New EDEK, or null if ezKeyName is null
* @throws IOException
*/
private static EncryptedKeyVersion generateEncryptedDataEncryptionKey(
final FSDirectory fsd, final String ezKeyName) throws IOException {
// must not be holding lock during this operation
assert !fsd.getFSNamesystem().hasReadLock(RwLockMode.FS);
assert !fsd.getFSNamesystem().hasWriteLock(RwLockMode.FS);
if (ezKeyName == null) {
return null;
}
long generateEDEKStartTime = monotonicNow();
// Generate EDEK with login user (hdfs) so that KMS does not need
// an extra proxy configuration allowing hdfs to proxy its clients and
// KMS does not need configuration to allow non-hdfs user GENERATE_EEK
// operation.
EncryptedKeyVersion edek = SecurityUtil.doAsLoginUser(
new PrivilegedExceptionAction<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion run() throws IOException {
try {
return fsd.getProvider().generateEncryptedKey(ezKeyName);
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
}
});
long generateEDEKTime = monotonicNow() - generateEDEKStartTime;
NameNode.getNameNodeMetrics().addGenerateEDEKTime(generateEDEKTime);
Preconditions.checkNotNull(edek);
return edek;
}
static KeyProvider.Metadata ensureKeyIsInitialized(final FSDirectory fsd,
final String keyName, final String src) throws IOException {
KeyProviderCryptoExtension provider = fsd.getProvider();
if (provider == null) {
throw new IOException("Can't create an encryption zone for " + src
+ " since no key provider is available.");
}
if (keyName == null || keyName.isEmpty()) {
throw new IOException("Must specify a key name when creating an "
+ "encryption zone");
}
EncryptionFaultInjector.getInstance().ensureKeyIsInitialized();
KeyProvider.Metadata metadata = provider.getMetadata(keyName);
if (metadata == null) {
/*
* It would be nice if we threw something more specific than
* IOException when the key is not found, but the KeyProvider API
* doesn't provide for that. If that API is ever changed to throw
* something more specific (e.g. UnknownKeyException) then we can
* update this to match it, or better yet, just rethrow the
* KeyProvider's exception.
*/
throw new IOException("Key " + keyName + " doesn't exist.");
}
// If the provider supports pool for EDEKs, this will fill in the pool
provider.warmUpEncryptedKeys(keyName);
return metadata;
}
/**
* Create an encryption zone on directory path using the specified key.
*
* @param fsd the namespace tree.
* @param srcArg the path of a directory which will be the root of the
* encryption zone. The directory must be empty
* @param pc permission checker to check fs permission
* @param cipher the name of the cipher suite, which will be used
* when it is generated.
* @param keyName name of a key which must be present in the configured
* KeyProvider
* @param logRetryCache whether to record RPC ids in editlog for retry cache
* rebuilding
* @return FileStatus
* @throws IOException
*/
static FileStatus createEncryptionZone(final FSDirectory fsd,
final String srcArg, final FSPermissionChecker pc, final String cipher,
final String keyName, final boolean logRetryCache) throws IOException {
final CipherSuite suite = CipherSuite.convert(cipher);
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
// For now this is hard coded, as we only support one method.
final CryptoProtocolVersion version =
CryptoProtocolVersion.ENCRYPTION_ZONES;
final INodesInPath iip;
fsd.writeLock();
try {
iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
final XAttr ezXAttr = fsd.ezManager.createEncryptionZone(iip, suite,
version, keyName);
xAttrs.add(ezXAttr);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logSetXAttrs(iip.getPath(), xAttrs, logRetryCache);
return fsd.getAuditFileInfo(iip);
}
/**
* Get the encryption zone for the specified path.
*
* @param fsd the namespace tree.
* @param srcArg the path of a file or directory to get the EZ for
* @param pc permission checker to check fs permission
* @return the EZ with file status.
*/
static Map.Entry<EncryptionZone, FileStatus> getEZForPath(
final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc)
throws IOException {
final INodesInPath iip;
final EncryptionZone ret;
fsd.readLock();
try {
iip = fsd.resolvePath(pc, srcArg, DirOp.READ);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
}
ret = fsd.ezManager.getEZINodeForPath(iip);
} finally {
fsd.readUnlock();
}
FileStatus auditStat = fsd.getAuditFileInfo(iip);
return new AbstractMap.SimpleImmutableEntry<>(ret, auditStat);
}
static EncryptionZone getEZForPath(final FSDirectory fsd,
final INodesInPath iip) throws IOException {
fsd.readLock();
try {
return fsd.ezManager.getEZINodeForPath(iip);
} finally {
fsd.readUnlock();
}
}
static BatchedListEntries<EncryptionZone> listEncryptionZones(
final FSDirectory fsd, final long prevId) throws IOException {
fsd.readLock();
try {
return fsd.ezManager.listEncryptionZones(prevId);
} finally {
fsd.readUnlock();
}
}
static List<XAttr> reencryptEncryptionZone(final FSDirectory fsd,
final INodesInPath iip, final String keyVersionName) throws IOException {
assert keyVersionName != null;
return fsd.ezManager.reencryptEncryptionZone(iip, keyVersionName);
}
static List<XAttr> cancelReencryptEncryptionZone(final FSDirectory fsd,
final INodesInPath iip) throws IOException {
return fsd.ezManager.cancelReencryptEncryptionZone(iip);
}
static BatchedListEntries<ZoneReencryptionStatus> listReencryptionStatus(
final FSDirectory fsd, final long prevId)
throws IOException {
fsd.readLock();
try {
return fsd.ezManager.listReencryptionStatus(prevId);
} finally {
fsd.readUnlock();
}
}
/**
* Update re-encryption progress (submitted). Caller should
* logSync after calling this, outside of the FSN lock.
* <p>
* The reencryption status is updated during SetXAttrs.
*/
static XAttr updateReencryptionSubmitted(final FSDirectory fsd,
final INodesInPath iip, final String ezKeyVersionName)
throws IOException {
assert fsd.hasWriteLock();
Preconditions.checkNotNull(ezKeyVersionName, "ezKeyVersionName is null.");
final ZoneEncryptionInfoProto zoneProto = getZoneEncryptionInfoProto(iip);
Preconditions.checkNotNull(zoneProto, "ZoneEncryptionInfoProto is null.");
final ReencryptionInfoProto newProto = PBHelperClient
.convert(ezKeyVersionName, Time.now(), false, 0, 0, null, null);
final ZoneEncryptionInfoProto newZoneProto = PBHelperClient
.convert(PBHelperClient.convert(zoneProto.getSuite()),
PBHelperClient.convert(zoneProto.getCryptoProtocolVersion()),
zoneProto.getKeyName(), newProto);
final XAttr xattr = XAttrHelper
.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, newZoneProto.toByteArray());
final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
xattrs.add(xattr);
FSDirXAttrOp.unprotectedSetXAttrs(fsd, iip, xattrs,
EnumSet.of(XAttrSetFlag.REPLACE));
return xattr;
}
/**
* Update re-encryption progress (start, checkpoint). Caller should
* logSync after calling this, outside of the FSN lock.
* <p>
* The reencryption status is updated during SetXAttrs.
* Original reencryption status is passed in to get existing information
* such as ezkeyVersionName and submissionTime.
*/
static XAttr updateReencryptionProgress(final FSDirectory fsd,
final INode zoneNode, final ZoneReencryptionStatus origStatus,
final String lastFile, final long numReencrypted, final long numFailures)
throws IOException {
assert fsd.hasWriteLock();
Preconditions.checkNotNull(zoneNode, "Zone node is null");
INodesInPath iip = INodesInPath.fromINode(zoneNode);
final ZoneEncryptionInfoProto zoneProto = getZoneEncryptionInfoProto(iip);
Preconditions.checkNotNull(zoneProto, "ZoneEncryptionInfoProto is null.");
Preconditions.checkNotNull(origStatus, "Null status for " + iip.getPath());
final ReencryptionInfoProto newProto = PBHelperClient
.convert(origStatus.getEzKeyVersionName(),
origStatus.getSubmissionTime(), false,
origStatus.getFilesReencrypted() + numReencrypted,
origStatus.getNumReencryptionFailures() + numFailures, null,
lastFile);
final ZoneEncryptionInfoProto newZoneProto = PBHelperClient
.convert(PBHelperClient.convert(zoneProto.getSuite()),
PBHelperClient.convert(zoneProto.getCryptoProtocolVersion()),
zoneProto.getKeyName(), newProto);
final XAttr xattr = XAttrHelper
.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, newZoneProto.toByteArray());
final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
xattrs.add(xattr);
FSDirXAttrOp.unprotectedSetXAttrs(fsd, iip, xattrs,
EnumSet.of(XAttrSetFlag.REPLACE));
return xattr;
}
/**
* Log re-encrypt complete (cancel, or 100% re-encrypt) to edits.
* Caller should logSync after calling this, outside of the FSN lock.
* <p>
* Original reencryption status is passed in to get existing information,
* this should include whether it is finished due to cancellation.
* The reencryption status is updated during SetXAttrs for completion time.
*/
static List<XAttr> updateReencryptionFinish(final FSDirectory fsd,
final INodesInPath zoneIIP, final ZoneReencryptionStatus origStatus)
throws IOException {
assert origStatus != null;
assert fsd.hasWriteLock();
fsd.ezManager.getReencryptionStatus()
.markZoneCompleted(zoneIIP.getLastINode().getId());
final XAttr xattr =
generateNewXAttrForReencryptionFinish(zoneIIP, origStatus);
final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
xattrs.add(xattr);
FSDirXAttrOp.unprotectedSetXAttrs(fsd, zoneIIP, xattrs,
EnumSet.of(XAttrSetFlag.REPLACE));
return xattrs;
}
static XAttr generateNewXAttrForReencryptionFinish(final INodesInPath iip,
final ZoneReencryptionStatus status) throws IOException {
final ZoneEncryptionInfoProto zoneProto = getZoneEncryptionInfoProto(iip);
final ReencryptionInfoProto newRiProto = PBHelperClient
.convert(status.getEzKeyVersionName(), status.getSubmissionTime(),
status.isCanceled(), status.getFilesReencrypted(),
status.getNumReencryptionFailures(), Time.now(), null);
final ZoneEncryptionInfoProto newZoneProto = PBHelperClient
.convert(PBHelperClient.convert(zoneProto.getSuite()),
PBHelperClient.convert(zoneProto.getCryptoProtocolVersion()),
zoneProto.getKeyName(), newRiProto);
final XAttr xattr = XAttrHelper
.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, newZoneProto.toByteArray());
return xattr;
}
private static ZoneEncryptionInfoProto getZoneEncryptionInfoProto(
final INodesInPath iip) throws IOException {
final XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByPrefixedName(
iip.getLastINode(), iip.getPathSnapshotId(),
CRYPTO_XATTR_ENCRYPTION_ZONE);
if (fileXAttr == null) {
throw new IOException(
"Could not find reencryption XAttr for file " + iip.getPath());
}
try {
return ZoneEncryptionInfoProto.parseFrom(fileXAttr.getValue());
} catch (InvalidProtocolBufferException e) {
throw new IOException(
"Could not parse file encryption info for " + "inode " + iip
.getPath(), e);
}
}
/**
* Save the batch's edeks to file xattrs.
*/
static void saveFileXAttrsForBatch(FSDirectory fsd,
List<FileEdekInfo> batch) {
assert fsd.getFSNamesystem().hasWriteLock(RwLockMode.FS);
if (batch != null && !batch.isEmpty()) {
for (FileEdekInfo entry : batch) {
final INode inode = fsd.getInode(entry.getInodeId());
// no dir lock, so inode could be removed. no-op if so.
if (inode == null) {
NameNode.LOG.info("Cannot find inode {}, skip saving xattr for"
+ " re-encryption", entry.getInodeId());
continue;
}
fsd.getEditLog().logSetXAttrs(inode.getFullPathName(),
inode.getXAttrFeature().getXAttrs(), false);
}
}
}
/**
* Set the FileEncryptionInfo for an INode.
*
* @param fsd the namespace tree.
* @param info file encryption information
* @param flag action when setting xattr. Either CREATE or REPLACE.
* @throws IOException
*/
static void setFileEncryptionInfo(final FSDirectory fsd,
final INodesInPath iip, final FileEncryptionInfo info,
final XAttrSetFlag flag) throws IOException {
// Make the PB for the xattr
final HdfsProtos.PerFileEncryptionInfoProto proto =
PBHelperClient.convertPerFileEncInfo(info);
final byte[] protoBytes = proto.toByteArray();
final XAttr fileEncryptionAttr =
XAttrHelper.buildXAttr(CRYPTO_XATTR_FILE_ENCRYPTION_INFO, protoBytes);
final List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
xAttrs.add(fileEncryptionAttr);
fsd.writeLock();
try {
FSDirXAttrOp.unprotectedSetXAttrs(fsd, iip, xAttrs, EnumSet.of(flag));
} finally {
fsd.writeUnlock();
}
}
/**
* This function combines the per-file encryption info (obtained
* from the inode's XAttrs), and the encryption info from its zone, and
* returns a consolidated FileEncryptionInfo instance. Null is returned
* for non-encrypted or raw files.
*
* @param fsd the namespace tree.
* @param iip inodes in the path containing the file, passed in to
* avoid obtaining the list of inodes again
* @return consolidated file encryption info; null for non-encrypted files
*/
static FileEncryptionInfo getFileEncryptionInfo(final FSDirectory fsd,
final INodesInPath iip) throws IOException {
if (iip.isRaw() ||
!fsd.ezManager.hasCreatedEncryptionZone() ||
!iip.getLastINode().isFile()) {
return null;
}
fsd.readLock();
try {
EncryptionZone encryptionZone = getEZForPath(fsd, iip);
if (encryptionZone == null) {
// not an encrypted file
return null;
} else if(encryptionZone.getPath() == null
|| encryptionZone.getPath().isEmpty()) {
if (NameNode.LOG.isDebugEnabled()) {
NameNode.LOG.debug("Encryption zone " +
encryptionZone.getPath() + " does not have a valid path.");
}
}
XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByPrefixedName(
iip.getLastINode(), iip.getPathSnapshotId(),
CRYPTO_XATTR_FILE_ENCRYPTION_INFO);
if (fileXAttr == null) {
NameNode.LOG.warn("Could not find encryption XAttr for file " +
iip.getPath() + " in encryption zone " + encryptionZone.getPath());
return null;
}
final CryptoProtocolVersion version = encryptionZone.getVersion();
final CipherSuite suite = encryptionZone.getSuite();
final String keyName = encryptionZone.getKeyName();
try {
HdfsProtos.PerFileEncryptionInfoProto fileProto =
HdfsProtos.PerFileEncryptionInfoProto.parseFrom(
fileXAttr.getValue());
return PBHelperClient.convert(fileProto, suite, version, keyName);
} catch (InvalidProtocolBufferException e) {
throw new IOException("Could not parse file encryption info for " +
"inode " + iip.getPath(), e);
}
} finally {
fsd.readUnlock();
}
}
/**
* If the file and encryption key are valid, return the encryption info,
* else throw a retry exception. The startFile method generates the EDEK
* outside of the lock so the zone must be reverified.
*
* @param dir the namespace tree.
* @param iip inodes in the file path
* @param ezInfo the encryption key
* @return FileEncryptionInfo for the file
* @throws RetryStartFileException if key is inconsistent with current zone
*/
static FileEncryptionInfo getFileEncryptionInfo(FSDirectory dir,
INodesInPath iip, EncryptionKeyInfo ezInfo)
throws RetryStartFileException, IOException {
FileEncryptionInfo feInfo = null;
final EncryptionZone zone = getEZForPath(dir, iip);
if (zone != null) {
// The path is now within an EZ, but we're missing encryption parameters
if (ezInfo == null) {
throw new RetryStartFileException();
}
// Path is within an EZ and we have provided encryption parameters.
// Make sure that the generated EDEK matches the settings of the EZ.
final String ezKeyName = zone.getKeyName();
if (!ezKeyName.equals(ezInfo.edek.getEncryptionKeyName())) {
throw new RetryStartFileException();
}
feInfo = new FileEncryptionInfo(ezInfo.suite, ezInfo.protocolVersion,
ezInfo.edek.getEncryptedKeyVersion().getMaterial(),
ezInfo.edek.getEncryptedKeyIv(),
ezKeyName, ezInfo.edek.getEncryptionKeyVersionName());
}
return feInfo;
}
static boolean isInAnEZ(final FSDirectory fsd, final INodesInPath iip)
throws UnresolvedLinkException, SnapshotAccessControlException,
IOException {
if (!fsd.ezManager.hasCreatedEncryptionZone()) {
return false;
}
fsd.readLock();
try {
return fsd.ezManager.isInAnEZ(iip);
} finally {
fsd.readUnlock();
}
}
/**
* Best-effort attempt to proactively warm up the edek cache. We'll get all the edek key names,
* then launch up a separate thread to warm them up. Retries happen if any of keys fail to warm up.
*/
static void warmUpEdekCache(final ExecutorService executor,
final FSDirectory fsd, final int delay, final int interval, final int maxRetries) {
fsd.readLock();
try {
String[] edeks = fsd.ezManager.getKeyNames();
executor.execute(
new EDEKCacheLoader(edeks, fsd.getProvider(), delay, interval, maxRetries));
} finally {
fsd.readUnlock();
}
}
/**
* EDEKCacheLoader is being run in a separate thread to loop through all the
* EDEKs and warm them up in the KMS cache.
*/
static
|
FSDirEncryptionZoneOp
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/http/converter/OAuth2ClientRegistrationHttpMessageConverter.java
|
{
"start": 9598,
"end": 10620
}
|
class ____
implements Converter<OAuth2ClientRegistration, Map<String, Object>> {
@Override
public Map<String, Object> convert(OAuth2ClientRegistration source) {
Map<String, Object> responseClaims = new LinkedHashMap<>(source.getClaims());
if (source.getClientIdIssuedAt() != null) {
responseClaims.put(OAuth2ClientMetadataClaimNames.CLIENT_ID_ISSUED_AT,
source.getClientIdIssuedAt().getEpochSecond());
}
if (source.getClientSecret() != null) {
long clientSecretExpiresAt = 0;
if (source.getClientSecretExpiresAt() != null) {
clientSecretExpiresAt = source.getClientSecretExpiresAt().getEpochSecond();
}
responseClaims.put(OAuth2ClientMetadataClaimNames.CLIENT_SECRET_EXPIRES_AT, clientSecretExpiresAt);
}
if (!CollectionUtils.isEmpty(source.getScopes())) {
responseClaims.put(OAuth2ClientMetadataClaimNames.SCOPE,
StringUtils.collectionToDelimitedString(source.getScopes(), " "));
}
return responseClaims;
}
}
}
|
OAuth2ClientRegistrationMapConverter
|
java
|
spring-projects__spring-boot
|
module/spring-boot-persistence/src/main/java/org/springframework/boot/persistence/autoconfigure/EntityScan.java
|
{
"start": 1656,
"end": 2464
}
|
interface ____ {
/**
* Alias for the {@link #basePackages()} attribute. Allows for more concise annotation
* declarations e.g.: {@code @EntityScan("org.my.pkg")} instead of
* {@code @EntityScan(basePackages="org.my.pkg")}.
* @return the base packages to scan
*/
@AliasFor("basePackages")
String[] value() default {};
/**
* Base packages to scan for entities. {@link #value()} is an alias for (and mutually
* exclusive with) this attribute.
* <p>
* Use {@link #basePackageClasses()} for a type-safe alternative to String-based
* package names.
* @return the base packages to scan
*/
@AliasFor("value")
String[] basePackages() default {};
/**
* Type-safe alternative to {@link #basePackages()} for specifying the packages to
* scan for entities. The package of each
|
EntityScan
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/PropertySourceAnnotationTests.java
|
{
"start": 2111,
"end": 2943
}
|
class ____ {
@Test
void withExplicitName() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(ConfigWithExplicitName.class);
assertThat(ctx.getEnvironment().getPropertySources().contains("p1")).as("property source p1 was not added").isTrue();
assertThat(ctx.getBean(TestBean.class).getName()).isEqualTo("p1TestBean");
// assert that the property source was added last to the set of sources
MutablePropertySources sources = ctx.getEnvironment().getPropertySources();
String name = sources.stream().toList().get(sources.size() - 1).getName();
assertThat(name).isEqualTo("p1");
ctx.close();
}
@Test
void withImplicitName() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(ConfigWithImplicitName.class);
String name = "
|
PropertySourceAnnotationTests
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/test/java/io/quarkus/arc/test/stereotype/DoubleScopedStereotypeHierarchyTestCase.java
|
{
"start": 1231,
"end": 1295
}
|
interface ____ {
}
@AnotherStereotype
|
AnotherStereotype
|
java
|
apache__dubbo
|
dubbo-serialization/dubbo-serialization-hessian2/src/main/java/org/apache/dubbo/common/serialize/hessian2/aot/HessianReflectionTypeDescriberRegistrar.java
|
{
"start": 1279,
"end": 3543
}
|
class ____ implements ReflectionTypeDescriberRegistrar {
@Override
public List<TypeDescriber> getTypeDescribers() {
List<TypeDescriber> typeDescribers = new ArrayList<>();
loadFile("META-INF/dubbo/hessian/deserializers", typeDescribers);
loadFile("META-INF/dubbo/hessian/serializers", typeDescribers);
typeDescribers.add(buildTypeDescriberWithDeclared(Date.class));
typeDescribers.add(buildTypeDescriberWithDeclared(Time.class));
typeDescribers.add(buildTypeDescriberWithDeclared(Timestamp.class));
return typeDescribers;
}
private void loadFile(String path, List<TypeDescriber> typeDescribers) {
try {
Enumeration<URL> resources = this.getClass().getClassLoader().getResources(path);
while (resources.hasMoreElements()) {
URL url = resources.nextElement();
Properties props = new Properties();
props.load(url.openStream());
for (Object value : props.values()) {
String className = (String) value;
typeDescribers.add(buildTypeDescriberWithDeclared(className));
}
}
} catch (Throwable t) {
// ignore
}
}
private TypeDescriber buildTypeDescriberWithDeclared(Class<?> cl) {
Set<MemberCategory> memberCategories = new HashSet<>();
memberCategories.add(MemberCategory.INVOKE_DECLARED_METHODS);
memberCategories.add(MemberCategory.INVOKE_DECLARED_CONSTRUCTORS);
memberCategories.add(MemberCategory.DECLARED_FIELDS);
return new TypeDescriber(
cl.getName(), null, new HashSet<>(), new HashSet<>(), new HashSet<>(), memberCategories);
}
private TypeDescriber buildTypeDescriberWithDeclared(String cl) {
Set<MemberCategory> memberCategories = new HashSet<>();
memberCategories.add(MemberCategory.INVOKE_DECLARED_METHODS);
memberCategories.add(MemberCategory.INVOKE_DECLARED_CONSTRUCTORS);
memberCategories.add(MemberCategory.DECLARED_FIELDS);
return new TypeDescriber(cl, null, new HashSet<>(), new HashSet<>(), new HashSet<>(), memberCategories);
}
}
|
HessianReflectionTypeDescriberRegistrar
|
java
|
apache__camel
|
components/camel-infinispan/camel-infinispan-embedded/src/main/java/org/apache/camel/component/infinispan/embedded/InfinispanEmbeddedEventListeners.java
|
{
"start": 1533,
"end": 1895
}
|
class ____ extends InfinispanEmbeddedEventListener {
public ClusteredSync(Set<Event.Type> eventTypes) {
super(eventTypes);
}
}
// ******************************************
//
// Local
//
// ******************************************
@Listener(clustered = false, sync = false)
public static
|
ClusteredSync
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSetFactory.java
|
{
"start": 1375,
"end": 2498
}
|
class ____ implements PriorityQueueSetFactory {
@Nonnull private final KeyGroupRange keyGroupRange;
@Nonnegative private final int totalKeyGroups;
@Nonnegative private final int minimumCapacity;
public HeapPriorityQueueSetFactory(
@Nonnull KeyGroupRange keyGroupRange,
@Nonnegative int totalKeyGroups,
@Nonnegative int minimumCapacity) {
this.keyGroupRange = keyGroupRange;
this.totalKeyGroups = totalKeyGroups;
this.minimumCapacity = minimumCapacity;
}
@Nonnull
@Override
public <T extends HeapPriorityQueueElement & PriorityComparable<? super T> & Keyed<?>>
HeapPriorityQueueSet<T> create(
@Nonnull String stateName,
@Nonnull TypeSerializer<T> byteOrderedElementSerializer) {
return new HeapPriorityQueueSet<>(
PriorityComparator.forPriorityComparableObjects(),
KeyExtractorFunction.forKeyedObjects(),
minimumCapacity,
keyGroupRange,
totalKeyGroups);
}
}
|
HeapPriorityQueueSetFactory
|
java
|
grpc__grpc-java
|
interop-testing/src/generated/main/grpc/io/grpc/testing/integration/UnimplementedServiceGrpc.java
|
{
"start": 13593,
"end": 14769
}
|
class ____
extends UnimplementedServiceBaseDescriptorSupplier
implements io.grpc.protobuf.ProtoMethodDescriptorSupplier {
private final java.lang.String methodName;
UnimplementedServiceMethodDescriptorSupplier(java.lang.String methodName) {
this.methodName = methodName;
}
@java.lang.Override
public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() {
return getServiceDescriptor().findMethodByName(methodName);
}
}
private static volatile io.grpc.ServiceDescriptor serviceDescriptor;
public static io.grpc.ServiceDescriptor getServiceDescriptor() {
io.grpc.ServiceDescriptor result = serviceDescriptor;
if (result == null) {
synchronized (UnimplementedServiceGrpc.class) {
result = serviceDescriptor;
if (result == null) {
serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME)
.setSchemaDescriptor(new UnimplementedServiceFileDescriptorSupplier())
.addMethod(getUnimplementedCallMethod())
.build();
}
}
}
return result;
}
}
|
UnimplementedServiceMethodDescriptorSupplier
|
java
|
google__error-prone
|
check_api/src/test/java/com/google/errorprone/util/ASTHelpersTest.java
|
{
"start": 23330,
"end": 23679
}
|
class ____ {
public List<? super Number> myList;
}
""");
TestScanner scanner = getUpperBoundScanner("java.lang.Object");
tests.add(scanner);
assertCompiles(scanner);
}
@Test
public void getUpperBoundTypeVariable() {
writeFile(
"A.java",
"""
import java.util.List;
public
|
A
|
java
|
google__dagger
|
javatests/dagger/hilt/android/UsesLocalComponentUninstallModuleTest.java
|
{
"start": 2095,
"end": 2324
}
|
class ____ {
@Rule public HiltAndroidRule rule = new HiltAndroidRule(this);
@Inject @UsesComponentQualifier String injectedString;
@Module
@InstallIn(SingletonComponent.class)
public
|
UsesLocalComponentUninstallModuleTest
|
java
|
spring-projects__spring-framework
|
spring-aop/src/main/java/org/springframework/aop/framework/Advised.java
|
{
"start": 1535,
"end": 1816
}
|
class ____ of specified interfaces?
*/
boolean isProxyTargetClass();
/**
* Return the interfaces proxied by the AOP proxy.
* <p>Will not include the target class, which may also be proxied.
*/
Class<?>[] getProxiedInterfaces();
/**
* Determine whether the given
|
instead
|
java
|
quarkusio__quarkus
|
extensions/agroal/runtime/src/main/java/io/quarkus/agroal/runtime/AgroalRecorder.java
|
{
"start": 521,
"end": 2845
}
|
class ____ {
private final RuntimeValue<DataSourcesRuntimeConfig> runtimeConfig;
private final RuntimeValue<DataSourcesJdbcRuntimeConfig> jdbcRuntimeConfig;
@Inject
public AgroalRecorder(RuntimeValue<DataSourcesRuntimeConfig> runtimeConfig,
RuntimeValue<DataSourcesJdbcRuntimeConfig> jdbcRuntimeConfig) {
this.runtimeConfig = runtimeConfig;
this.jdbcRuntimeConfig = jdbcRuntimeConfig;
}
public Supplier<AgroalDataSourceSupport> dataSourceSupportSupplier(AgroalDataSourceSupport agroalDataSourceSupport) {
return new Supplier<AgroalDataSourceSupport>() {
@Override
public AgroalDataSourceSupport get() {
return agroalDataSourceSupport;
}
};
}
public Supplier<ActiveResult> agroalDataSourceCheckActiveSupplier(String dataSourceName) {
return new Supplier<>() {
@Override
public ActiveResult get() {
Optional<Boolean> active = runtimeConfig.getValue().dataSources().get(dataSourceName).active();
if (active.isPresent() && !active.get()) {
return ActiveResult.inactive(DataSourceUtil.dataSourceInactiveReasonDeactivated(dataSourceName));
}
if (jdbcRuntimeConfig.getValue().dataSources().get(dataSourceName).jdbc().url().isEmpty()) {
return ActiveResult.inactive(DataSourceUtil.dataSourceInactiveReasonUrlMissing(dataSourceName,
"jdbc.url"));
}
return ActiveResult.active();
}
};
}
public Function<SyntheticCreationalContext<AgroalDataSource>, AgroalDataSource> agroalDataSourceSupplier(
String dataSourceName,
Optional<RuntimeValue<Boolean>> otelEnabled) {
return new Function<>() {
@SuppressWarnings("deprecation")
@Override
public AgroalDataSource apply(SyntheticCreationalContext<AgroalDataSource> context) {
DataSources dataSources = context.getInjectedReference(DataSources.class);
return dataSources.createDataSource(dataSourceName,
otelEnabled.isPresent() ? otelEnabled.get().getValue() : false);
}
};
}
}
|
AgroalRecorder
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TimeoutExtensionTests.java
|
{
"start": 28054,
"end": 28235
}
|
class ____ {
@Timeout(value = 10, unit = MILLISECONDS)
@BeforeEach
void setUp() throws Exception {
Thread.sleep(1000);
}
@Nested
|
NestedClassWithOuterSetupMethodTestCase
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/fileContext/TestOSSFileContextMainOperations.java
|
{
"start": 1249,
"end": 2175
}
|
class ____
extends FileContextMainOperationsBaseTest {
@BeforeEach
public void setUp() throws IOException, Exception {
Configuration conf = new Configuration();
fc = AliyunOSSTestUtils.createTestFileContext(conf);
super.setUp();
}
@Override
protected boolean listCorruptedBlocksSupported() {
return false;
}
@Test
@Disabled
public void testCreateFlagAppendExistingFile() throws IOException {
// append not supported, so test removed
}
@Test
@Disabled
public void testCreateFlagCreateAppendExistingFile() throws IOException {
// append not supported, so test removed
}
@Test
@Disabled
public void testSetVerifyChecksum() throws IOException {
// checksums ignored, so test ignored
}
@Test
@Disabled
public void testBuilderCreateAppendExistingFile() throws IOException {
// append not supported, so test removed
}
}
|
TestOSSFileContextMainOperations
|
java
|
grpc__grpc-java
|
api/src/test/java/io/grpc/PersistentHashArrayMappedTrieTest.java
|
{
"start": 6461,
"end": 7791
}
|
class ____ {
private void verify(Node<Key, Object> ret) {
CompressedIndex<Key, Object> collisionLeaf = (CompressedIndex<Key, Object>) ret;
assertEquals((1 << 7) | (1 << 19), collisionLeaf.bitmap);
assertEquals(2, collisionLeaf.values.length);
assertSame(value1, collisionLeaf.values[0].get(key1, key1.hashCode(), 0));
assertSame(value2, collisionLeaf.values[1].get(key2, key2.hashCode(), 0));
assertSame(value1, ret.get(key1, key1.hashCode(), 0));
assertSame(value2, ret.get(key2, key2.hashCode(), 0));
assertEquals(2, ret.size());
}
}
Verifier verifier = new Verifier();
verifier.verify(CompressedIndex.combine(leaf1, key1.hashCode(), leaf2, key2.hashCode(), 0));
verifier.verify(CompressedIndex.combine(leaf2, key2.hashCode(), leaf1, key1.hashCode(), 0));
assertEquals(1, leaf1.size());
assertEquals(1, leaf2.size());
}
@Test
public void compressedIndex_combine_sameIndexBit() {
final Key key1 = new Key(17 << 5 | 1); // 5 bit regions: (17, 1)
final Key key2 = new Key(31 << 5 | 1); // 5 bit regions: (31, 1)
final Object value1 = new Object();
final Object value2 = new Object();
Leaf<Key, Object> leaf1 = new Leaf<>(key1, value1);
Leaf<Key, Object> leaf2 = new Leaf<>(key2, value2);
|
Verifier
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/serde/ConfigurationJsonSerializerFilter.java
|
{
"start": 1212,
"end": 1553
}
|
class ____ {
@Override
public boolean equals(Object obj) {
if (obj == null) {
return true;
}
if (obj instanceof Configuration) {
Configuration other = (Configuration) obj;
return other.toMap().isEmpty();
}
return true;
}
}
|
ConfigurationJsonSerializerFilter
|
java
|
elastic__elasticsearch
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramXContent.java
|
{
"start": 1435,
"end": 7502
}
|
class ____ {
public static final String SCALE_FIELD = "scale";
public static final String SUM_FIELD = "sum";
public static final String MIN_FIELD = "min";
public static final String MAX_FIELD = "max";
public static final String ZERO_FIELD = "zero";
public static final String ZERO_COUNT_FIELD = "count";
public static final String ZERO_THRESHOLD_FIELD = "threshold";
public static final String POSITIVE_FIELD = "positive";
public static final String NEGATIVE_FIELD = "negative";
public static final String BUCKET_INDICES_FIELD = "indices";
public static final String BUCKET_COUNTS_FIELD = "counts";
/**
* Serializes an {@link ExponentialHistogram} to the provided {@link XContentBuilder}.
* @param builder the XContentBuilder to write to
* @param histogram the ExponentialHistogram to serialize
* @throws IOException if the XContentBuilder throws an IOException
*/
public static void serialize(XContentBuilder builder, @Nullable ExponentialHistogram histogram) throws IOException {
if (histogram == null) {
builder.nullValue();
return;
}
builder.startObject();
builder.field(SCALE_FIELD, histogram.scale());
if (histogram.sum() != 0.0 || histogram.valueCount() > 0) {
builder.field(SUM_FIELD, histogram.sum());
}
if (Double.isNaN(histogram.min()) == false) {
builder.field(MIN_FIELD, histogram.min());
}
if (Double.isNaN(histogram.max()) == false) {
builder.field(MAX_FIELD, histogram.max());
}
double zeroThreshold = histogram.zeroBucket().zeroThreshold();
long zeroCount = histogram.zeroBucket().count();
if (zeroCount != 0 || zeroThreshold != 0) {
builder.startObject(ZERO_FIELD);
if (zeroCount != 0) {
builder.field(ZERO_COUNT_FIELD, zeroCount);
}
if (zeroThreshold != 0) {
builder.field(ZERO_THRESHOLD_FIELD, zeroThreshold);
}
builder.endObject();
}
writeBuckets(builder, POSITIVE_FIELD, histogram.positiveBuckets());
writeBuckets(builder, NEGATIVE_FIELD, histogram.negativeBuckets());
builder.endObject();
}
private static void writeBuckets(XContentBuilder b, String fieldName, ExponentialHistogram.Buckets buckets) throws IOException {
if (buckets.iterator().hasNext() == false) {
return;
}
b.startObject(fieldName);
BucketIterator it = buckets.iterator();
b.startArray(BUCKET_INDICES_FIELD);
while (it.hasNext()) {
b.value(it.peekIndex());
it.advance();
}
b.endArray();
it = buckets.iterator();
b.startArray(BUCKET_COUNTS_FIELD);
while (it.hasNext()) {
b.value(it.peekCount());
it.advance();
}
b.endArray();
b.endObject();
}
/**
* Parses an {@link ExponentialHistogram} from the provided {@link XContentParser}.
* This method is neither optimized, nor does it do any validation of the parsed content.
* No estimation for missing sum/min/max is done.
* Therefore only intended for testing!
*
* @param xContent the serialized histogram to read
* @return the deserialized histogram
* @throws IOException if the XContentParser throws an IOException
*/
public static ExponentialHistogram parseForTesting(XContentParser xContent) throws IOException {
if (xContent.currentToken() == null) {
xContent.nextToken();
}
if (xContent.currentToken() == XContentParser.Token.VALUE_NULL) {
return null;
}
return parseForTesting(xContent.map());
}
/**
* Parses an {@link ExponentialHistogram} from a {@link Map}.
* This method is neither optimized, nor does it do any validation of the parsed content.
* No estimation for missing sum/min/max is done.
* Therefore only intended for testing!
*
* @param xContent the serialized histogram as a map
* @return the deserialized histogram
*/
public static ExponentialHistogram parseForTesting(@Nullable Map<String, Object> xContent) {
if (xContent == null) {
return null;
}
int scale = ((Number) xContent.get(SCALE_FIELD)).intValue();
ExponentialHistogramBuilder builder = ExponentialHistogram.builder(scale, ExponentialHistogramCircuitBreaker.noop());
Map<String, Number> zero = Types.forciblyCast(xContent.getOrDefault(ZERO_FIELD, Collections.emptyMap()));
double zeroThreshold = zero.getOrDefault(ZERO_THRESHOLD_FIELD, 0).doubleValue();
long zeroCount = zero.getOrDefault(ZERO_COUNT_FIELD, 0).longValue();
builder.zeroBucket(ZeroBucket.create(zeroThreshold, zeroCount));
builder.sum(((Number) xContent.getOrDefault(SUM_FIELD, 0)).doubleValue());
builder.min(((Number) xContent.getOrDefault(MIN_FIELD, Double.NaN)).doubleValue());
builder.max(((Number) xContent.getOrDefault(MAX_FIELD, Double.NaN)).doubleValue());
parseBuckets(Types.forciblyCast(xContent.getOrDefault(NEGATIVE_FIELD, Collections.emptyMap())), builder::setNegativeBucket);
parseBuckets(Types.forciblyCast(xContent.getOrDefault(POSITIVE_FIELD, Collections.emptyMap())), builder::setPositiveBucket);
return builder.build();
}
private static void parseBuckets(Map<String, List<Number>> serializedBuckets, BiConsumer<Long, Long> bucketSetter) {
List<Number> indices = serializedBuckets.getOrDefault(BUCKET_INDICES_FIELD, Collections.emptyList());
List<Number> counts = serializedBuckets.getOrDefault(BUCKET_COUNTS_FIELD, Collections.emptyList());
assert indices.size() == counts.size();
for (int i = 0; i < indices.size(); i++) {
bucketSetter.accept(indices.get(i).longValue(), counts.get(i).longValue());
}
}
}
|
ExponentialHistogramXContent
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/merge/MergeWithNullTest.java
|
{
"start": 484,
"end": 707
}
|
class ____ {
@JsonMerge
public AB loc = new AB(1, 2);
protected ConfigDefault() { }
public ConfigDefault(int a, int b) {
loc = new AB(a, b);
}
}
static
|
ConfigDefault
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/PortMapperConfigurerTests.java
|
{
"start": 1844,
"end": 2863
}
|
class ____ {
public final SpringTestContext spring = new SpringTestContext(this);
@Autowired
private MockMvc mockMvc;
@Test
public void requestWhenPortMapperTwiceInvokedThenDoesNotOverride() throws Exception {
this.spring.register(InvokeTwiceDoesNotOverride.class).autowire();
this.mockMvc.perform(get("http://localhost:543")).andExpect(redirectedUrl("https://localhost:123"));
}
@Test
public void requestWhenPortMapperHttpMapsToInLambdaThenRedirectsToHttpsPort() throws Exception {
this.spring.register(HttpMapsToInLambdaConfig.class).autowire();
this.mockMvc.perform(get("http://localhost:543")).andExpect(redirectedUrl("https://localhost:123"));
}
@Test
public void requestWhenCustomPortMapperInLambdaThenRedirectsToHttpsPort() throws Exception {
this.spring.register(CustomPortMapperInLambdaConfig.class).autowire();
this.mockMvc.perform(get("http://localhost:543")).andExpect(redirectedUrl("https://localhost:123"));
}
@Configuration
@EnableWebSecurity
static
|
PortMapperConfigurerTests
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/common/runtime/src/main/java/org/jboss/resteasy/reactive/ResponseHeader.java
|
{
"start": 952,
"end": 1128
}
|
interface ____ {
/**
* The {@link ResponseHeader} instances.
*
* @return the instances
*/
ResponseHeader[] value();
}
}
|
List
|
java
|
apache__kafka
|
storage/src/test/java/org/apache/kafka/storage/internals/log/LogValidatorTest.java
|
{
"start": 3448,
"end": 3685
}
|
class ____ {
private final Time time = Time.SYSTEM;
private final TopicPartition topicPartition = new TopicPartition("topic", 0);
private final MetricsRecorder metricsRecorder = new MetricsRecorder();
static
|
LogValidatorTest
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/cglib/beans/BeanCopier.java
|
{
"start": 2622,
"end": 6336
}
|
class ____ extends AbstractClassGenerator {
private static final Source SOURCE = new Source(BeanCopier.class.getName());
private Class source;
private Class target;
private boolean useConverter;
public Generator() {
super(SOURCE);
}
public void setSource(Class source) {
this.source = source;
// SPRING PATCH BEGIN
setContextClass(source);
setNamePrefix(source.getName());
// SPRING PATCH END
}
public void setTarget(Class target) {
this.target = target;
// SPRING PATCH BEGIN
setContextClass(target);
setNamePrefix(target.getName());
// SPRING PATCH END
}
public void setUseConverter(boolean useConverter) {
this.useConverter = useConverter;
}
@Override
protected ClassLoader getDefaultClassLoader() {
return source.getClassLoader();
}
@Override
protected ProtectionDomain getProtectionDomain() {
return ReflectUtils.getProtectionDomain(source);
}
public BeanCopier create() {
Object key = KEY_FACTORY.newInstance(source.getName(), target.getName(), useConverter);
return (BeanCopier)super.create(key);
}
@Override
public void generateClass(ClassVisitor v) {
Type sourceType = Type.getType(source);
Type targetType = Type.getType(target);
ClassEmitter ce = new ClassEmitter(v);
// Byte code level cannot be higher than 1.8 due to STATICHOOK methods
// which set static final fields outside the initializer method <clinit>.
ce.begin_class(Constants.V1_8,
Constants.ACC_PUBLIC,
getClassName(),
BEAN_COPIER,
null,
Constants.SOURCE_FILE);
EmitUtils.null_constructor(ce);
CodeEmitter e = ce.begin_method(Constants.ACC_PUBLIC, COPY, null);
PropertyDescriptor[] getters = ReflectUtils.getBeanGetters(source);
PropertyDescriptor[] setters = ReflectUtils.getBeanSetters(target);
Map names = new HashMap();
for (PropertyDescriptor getter : getters) {
names.put(getter.getName(), getter);
}
Local targetLocal = e.make_local();
Local sourceLocal = e.make_local();
if (useConverter) {
e.load_arg(1);
e.checkcast(targetType);
e.store_local(targetLocal);
e.load_arg(0);
e.checkcast(sourceType);
e.store_local(sourceLocal);
} else {
e.load_arg(1);
e.checkcast(targetType);
e.load_arg(0);
e.checkcast(sourceType);
}
for (PropertyDescriptor setter : setters) {
PropertyDescriptor getter = (PropertyDescriptor)names.get(setter.getName());
if (getter != null) {
MethodInfo read = ReflectUtils.getMethodInfo(getter.getReadMethod());
MethodInfo write = ReflectUtils.getMethodInfo(setter.getWriteMethod());
if (useConverter) {
Type setterType = write.getSignature().getArgumentTypes()[0];
e.load_local(targetLocal);
e.load_arg(2);
e.load_local(sourceLocal);
e.invoke(read);
e.box(read.getSignature().getReturnType());
EmitUtils.load_class(e, setterType);
e.push(write.getSignature().getName());
e.invoke_interface(CONVERTER, CONVERT);
e.unbox_or_zero(setterType);
e.invoke(write);
} else if (compatible(getter, setter)) {
e.dup2();
e.invoke(read);
e.invoke(write);
}
}
}
e.return_value();
e.end_method();
ce.end_class();
}
private static boolean compatible(PropertyDescriptor getter, PropertyDescriptor setter) {
// TODO: allow automatic widening conversions?
return setter.getPropertyType().isAssignableFrom(getter.getPropertyType());
}
@Override
protected Object firstInstance(Class type) {
return ReflectUtils.newInstance(type);
}
@Override
protected Object nextInstance(Object instance) {
return instance;
}
}
}
|
Generator
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/odps/OdpsGrantTest.java
|
{
"start": 897,
"end": 3631
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
String sql = "grant update, Select on table adl_register_baseline_sdt to user DXP_71074213@aliyun.com";
OdpsStatementParser parser = new OdpsStatementParser(sql);
SQLStatement stmt = parser.parseStatementList().get(0);
parser.match(Token.EOF);
String output = SQLUtils.toOdpsString(stmt);
// System.out.println(output);
assertEquals("GRANT UPDATE, SELECT ON TABLE adl_register_baseline_sdt TO USER DXP_71074213@aliyun.com",
output);
}
public void test_1() throws Exception {
String sql = "grant role_project_admin to aliyun$DXP_xxxxx@aliyun.com";
OdpsStatementParser parser = new OdpsStatementParser(sql);
SQLStatement stmt = parser.parseStatementList().get(0);
parser.match(Token.EOF);
String output = SQLUtils.toOdpsString(stmt);
// System.out.println(output);
assertEquals("GRANT ROLE_PROJECT_ADMIN TO aliyun$DXP_xxxxx@aliyun.com", output);
}
public void test_2() throws Exception {
String sql = "grant super Write to user aliyun$DXP_xxxxx@aliyun.com";
OdpsStatementParser parser = new OdpsStatementParser(sql);
SQLStatement stmt = parser.parseStatementList().get(0);
parser.match(Token.EOF);
String output = SQLUtils.toOdpsString(stmt);
// System.out.println(output);
assertEquals("GRANT SUPER WRITE TO USER aliyun$DXP_xxxxx@aliyun.com", output);
}
public void test_3() throws Exception {
String sql = "grant label 2 on table adl_register_baseline_sdt(c1, c2) to user aliyun$DXP_xxxxx@aliyun.com with exp 5";
OdpsStatementParser parser = new OdpsStatementParser(sql);
SQLStatement stmt = parser.parseStatementList().get(0);
parser.match(Token.EOF);
String output = SQLUtils.toOdpsString(stmt);
// System.out.println(output);
assertEquals("GRANT LABEL 2 ON TABLE adl_register_baseline_sdt(c1, c2) TO USER aliyun$DXP_xxxxx@aliyun.com WITH EXP 5",
output);
}
public void test_4() throws Exception {
String sql = "grant CreateInstance, CreateResource, CreateFunction, CreateTable, List ON PROJECT test_project TO ROLE worker";
OdpsStatementParser parser = new OdpsStatementParser(sql);
SQLStatement stmt = parser.parseStatementList().get(0);
parser.match(Token.EOF);
String output = SQLUtils.toOdpsString(stmt);
// System.out.println(output);
assertEquals("GRANT CREATEINSTANCE, CREATERESOURCE, CREATEFUNCTION, CREATETABLE, LIST ON PROJECT test_project TO ROLE worker",
output);
}
}
|
OdpsGrantTest
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java
|
{
"start": 1882,
"end": 3576
}
|
class ____ implements DeepCompare {
TaskAttemptID attemptID;
Pre21JobHistoryConstants.Values result;
long startTime = -1L;
long finishTime = -1L;
NodeName hostName;
long hdfsBytesRead = -1L;
long hdfsBytesWritten = -1L;
long fileBytesRead = -1L;
long fileBytesWritten = -1L;
long mapInputRecords = -1L;
long mapInputBytes = -1L;
long mapOutputBytes = -1L;
long mapOutputRecords = -1L;
long combineInputRecords = -1L;
long reduceInputGroups = -1L;
long reduceInputRecords = -1L;
long reduceShuffleBytes = -1L;
long reduceOutputRecords = -1L;
long spilledRecords = -1L;
long shuffleFinished = -1L;
long sortFinished = -1L;
LoggedLocation location;
// Initialize to default object for backward compatibility
ResourceUsageMetrics metrics = new ResourceUsageMetrics();
List<Integer> clockSplits = new ArrayList<Integer>();
List<Integer> cpuUsages = new ArrayList<Integer>();
List<Integer> vMemKbytes = new ArrayList<Integer>();
List<Integer> physMemKbytes = new ArrayList<Integer>();
LoggedTaskAttempt() {
super();
}
// carries the kinds of splits vectors a LoggedTaskAttempt holds.
//
// Each enumeral has the following methods:
// get(LoggedTaskAttempt attempt)
// returns a List<Integer> with the corresponding value field
// set(LoggedTaskAttempt attempt, List<Integer> newValue)
// sets the value
// There is also a pair of methods get(List<List<Integer>>) and
// set(List<List<Integer>>, List<Integer>) which correspondingly
// delivers or sets the appropriate element of the
// List<List<Integer>> .
// This makes it easier to add another kind in the future.
public
|
LoggedTaskAttempt
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/select/OracleSelectTest1.java
|
{
"start": 937,
"end": 2066
}
|
class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = "SELECT employees_seq.currval FROM DUAL; ";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
statemen.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("relationships : " + visitor.getRelationships());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("orders")));
assertEquals(0, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("bonuses", "employee_id")));
}
}
|
OracleSelectTest1
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
|
{
"start": 42577,
"end": 46168
}
|
class ____ extends TimedRequestState {
/**
* Member info (ID and epoch) to be included in the request if present.
*/
final MemberInfo memberInfo;
RetriableRequestState(LogContext logContext, String owner, long retryBackoffMs,
long retryBackoffMaxMs, MemberInfo memberInfo, Timer timer) {
super(logContext, owner, retryBackoffMs, retryBackoffMaxMs, timer);
this.memberInfo = memberInfo;
}
// Visible for testing
RetriableRequestState(LogContext logContext, String owner, long retryBackoffMs, int retryBackoffExpBase,
long retryBackoffMaxMs, double jitter, MemberInfo memberInfo, Timer timer) {
super(logContext, owner, retryBackoffMs, retryBackoffExpBase, retryBackoffMaxMs, jitter, timer);
this.memberInfo = memberInfo;
}
/**
* @return String containing the request name and arguments, to be used for logging
* purposes.
*/
abstract String requestDescription();
/**
* @return Future that will complete with the request response or failure.
*/
abstract CompletableFuture<?> future();
/**
* Complete the request future with a TimeoutException if the request has been sent out
* at least once and the timeout has been reached.
*/
void maybeExpire() {
if (numAttempts > 0 && isExpired()) {
removeRequest();
future().completeExceptionally(new TimeoutException(requestDescription() +
" could not complete before timeout expired."));
}
}
/**
* Build request with the given builder, including response handling logic.
*/
NetworkClientDelegate.UnsentRequest buildRequestWithResponseHandling(final AbstractRequest.Builder<?> builder) {
NetworkClientDelegate.UnsentRequest request = new NetworkClientDelegate.UnsentRequest(
builder,
coordinatorRequestManager.coordinator()
);
request.whenComplete(
(response, throwable) -> {
long completionTimeMs = request.handler().completionTimeMs();
handleClientResponse(response, throwable, completionTimeMs);
});
return request;
}
private void handleClientResponse(final ClientResponse response,
final Throwable error,
final long requestCompletionTimeMs) {
try {
if (error == null) {
onResponse(response);
} else {
log.debug("{} completed with error", requestDescription(), error);
onFailedAttempt(requestCompletionTimeMs);
coordinatorRequestManager.handleCoordinatorDisconnect(error, requestCompletionTimeMs);
future().completeExceptionally(error);
}
} catch (Throwable t) {
log.error("Unexpected error handling response for {}", requestDescription(), t);
future().completeExceptionally(t);
}
}
@Override
public String toStringBase() {
return super.toStringBase() + ", " + memberInfo;
}
abstract void onResponse(final ClientResponse response);
abstract void removeRequest();
}
|
RetriableRequestState
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableMergeDelayErrorTest.java
|
{
"start": 15201,
"end": 16222
}
|
class ____ implements Publisher<String> {
String[] valuesToReturn;
TestErrorFlowable(String... values) {
valuesToReturn = values;
}
@Override
public void subscribe(Subscriber<? super String> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
boolean errorThrown = false;
for (String s : valuesToReturn) {
if (s == null) {
System.out.println("throwing exception");
subscriber.onError(new NullPointerException());
errorThrown = true;
// purposefully not returning here so it will continue calling onNext
// so that we also test that we handle bad sequences like this
} else {
subscriber.onNext(s);
}
}
if (!errorThrown) {
subscriber.onComplete();
}
}
}
private static
|
TestErrorFlowable
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/InternalSplitEnumeratorMetricGroup.java
|
{
"start": 1262,
"end": 1716
}
|
class ____ extends ProxyMetricGroup<MetricGroup>
implements SplitEnumeratorMetricGroup {
public InternalSplitEnumeratorMetricGroup(MetricGroup parent) {
super(parent.addGroup("enumerator"));
}
@Override
public <G extends Gauge<Long>> G setUnassignedSplitsGauge(G unassignedSplitsGauge) {
return parentMetricGroup.gauge(MetricNames.UNASSIGNED_SPLITS, unassignedSplitsGauge);
}
}
|
InternalSplitEnumeratorMetricGroup
|
java
|
quarkusio__quarkus
|
extensions/quartz/deployment/src/test/java/io/quarkus/quartz/test/ApplicationNotRunningPredicateTest.java
|
{
"start": 1397,
"end": 1779
}
|
class ____ {
volatile boolean started;
void started(@Observes StartupEvent event) {
started = true;
}
@Scheduled(every = "0.2s", skipExecutionIf = Scheduled.ApplicationNotRunning.class)
void scheduleAfterStarted() {
if (!started) {
throw new IllegalStateException();
}
}
}
}
|
Jobs
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/spi/ExtendedLogger.java
|
{
"start": 14791,
"end": 15371
}
|
class ____
* method when location information needs to be logged.
* @param level The logging Level to check.
* @param marker A Marker or null.
* @param message The message format.
* @param p0 the message parameters
* @param p1 the message parameters
* @param p2 the message parameters
* @since 2.6
*/
void logIfEnabled(String fqcn, Level level, Marker marker, String message, Object p0, Object p1, Object p2);
/**
* Logs a message if the specified level is active.
*
* @param fqcn The fully qualified
|
and
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ArbitraryConditionalFunction.java
|
{
"start": 1082,
"end": 2174
}
|
class ____ extends ConditionalFunction {
private final ConditionalOperation operation;
ArbitraryConditionalFunction(Source source, List<Expression> fields, ConditionalOperation operation) {
super(source, fields);
this.operation = operation;
}
@Override
protected Pipe makePipe() {
return new ConditionalPipe(source(), this, Expressions.pipe(children()), operation);
}
@Override
public ScriptTemplate asScript() {
List<ScriptTemplate> templates = new ArrayList<>();
for (Expression ex : children()) {
templates.add(asScript(ex));
}
StringJoiner template = new StringJoiner(",", "{sql}." + operation.scriptMethodName() + "([", "])");
ParamsBuilder params = paramsBuilder();
for (ScriptTemplate scriptTemplate : templates) {
template.add(scriptTemplate.template());
params.script(scriptTemplate.params());
}
return new ScriptTemplate(formatTemplate(template.toString()), params.build(), dataType());
}
}
|
ArbitraryConditionalFunction
|
java
|
apache__camel
|
components/camel-platform-http/src/main/java/org/apache/camel/component/platform/http/PlatformHttpListener.java
|
{
"start": 861,
"end": 1154
}
|
interface ____ {
/**
* Callback when a new HTTP endpoint is added.
*/
void registerHttpEndpoint(HttpEndpointModel model);
/**
* Callback when an existing HTTP endpoint is removed.
*/
void unregisterHttpEndpoint(HttpEndpointModel model);
}
|
PlatformHttpListener
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/chunking/EmbeddingRequestChunkerTests.java
|
{
"start": 57757,
"end": 58122
}
|
class ____ implements ActionListener<List<ChunkedInference>> {
List<ChunkedInference> results;
@Override
public void onResponse(List<ChunkedInference> chunks) {
this.results = chunks;
}
@Override
public void onFailure(Exception e) {
fail(e.getMessage());
}
}
}
|
ChunkedResultsListener
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webmvc/src/main/java/org/springframework/cloud/gateway/server/mvc/filter/RetryFilterFunctions.java
|
{
"start": 1574,
"end": 3547
}
|
class ____ {
private static final Log log = LogFactory.getLog(RetryFilterFunctions.class);
private static final boolean USE_SPRING_RETRY = ClassUtils
.isPresent("org.springframework.retry.annotation.Retryable", ClassUtils.getDefaultClassLoader());
private static boolean useFrameworkRetry = false;
private RetryFilterFunctions() {
}
@Shortcut
@SuppressWarnings("deprecation")
public static HandlerFilterFunction<ServerResponse, ServerResponse> retry(int retries) {
return useSpringRetry() ? GatewayRetryFilterFunctions.retry(retries)
: FrameworkRetryFilterFunctions.frameworkRetry(retries);
}
@SuppressWarnings("deprecation")
public static HandlerFilterFunction<ServerResponse, ServerResponse> retry(Consumer<RetryConfig> configConsumer) {
return useSpringRetry() ? GatewayRetryFilterFunctions.retry(configConsumer)
: FrameworkRetryFilterFunctions.frameworkRetry(configConsumer);
}
@Shortcut({ "retries", "series", "methods" })
@Configurable
@SuppressWarnings("deprecation")
public static HandlerFilterFunction<ServerResponse, ServerResponse> retry(RetryConfig config) {
return useSpringRetry() ? GatewayRetryFilterFunctions.retry(config)
: FrameworkRetryFilterFunctions.frameworkRetry(config);
}
static void setUseFrameworkRetry(boolean useFrameworkRetry) {
RetryFilterFunctions.useFrameworkRetry = useFrameworkRetry;
}
/**
* If spring retry is on the classpath and we do not force the use of Framework retry
* then we will use Spring Retry.
*/
private static boolean useSpringRetry() {
boolean useSpringRetry = USE_SPRING_RETRY && !useFrameworkRetry;
if (log.isDebugEnabled()) {
log.debug(LogMessage.format(
"Retry filter selection: Spring Retry on classpath=%s, useFrameworkRetry=%s, selected filter=%s",
USE_SPRING_RETRY, useFrameworkRetry,
useSpringRetry ? "GatewayRetryFilterFunctions" : "FrameworkRetryFilterFunctions"));
}
return useSpringRetry;
}
public static
|
RetryFilterFunctions
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java
|
{
"start": 1406,
"end": 5143
}
|
class ____ extends AbstractWasbTestBase {
private static final String TEST_FILE = "/user/active/test.dat";
private static final Path TEST_PATH = new Path(TEST_FILE);
private static final String TEST_FILE_NORMAL = "/user/normal/test.dat";
private static final Path TEST_PATH_NORMAL = new Path(TEST_FILE_NORMAL);
private AzureBlobStorageTestAccount testAccount = null;
@BeforeEach
public void setUp() throws Exception {
super.setUp();
testAccount = createTestAccount();
fs = testAccount.getFileSystem();
Configuration conf = fs.getConf();
conf.setBoolean(NativeAzureFileSystem.APPEND_SUPPORT_ENABLE_PROPERTY_NAME, true);
conf.set(AzureNativeFileSystemStore.KEY_BLOCK_BLOB_WITH_COMPACTION_DIRECTORIES, "/user/active");
URI uri = fs.getUri();
fs.initialize(uri, conf);
}
/*
* Helper method that creates test data of size provided by the
* "size" parameter.
*/
private static byte[] getTestData(int size) {
byte[] testData = new byte[size];
System.arraycopy(RandomStringUtils.randomAlphabetic(size).getBytes(), 0, testData, 0, size);
return testData;
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
private BlockBlobAppendStream getBlockBlobAppendStream(FSDataOutputStream appendStream) {
SyncableDataOutputStream dataOutputStream = null;
if (appendStream.getWrappedStream() instanceof NativeAzureFileSystem.NativeAzureFsOutputStream) {
NativeAzureFileSystem.NativeAzureFsOutputStream fsOutputStream =
(NativeAzureFileSystem.NativeAzureFsOutputStream) appendStream.getWrappedStream();
dataOutputStream = (SyncableDataOutputStream) fsOutputStream.getOutStream();
}
if (appendStream.getWrappedStream() instanceof SyncableDataOutputStream) {
dataOutputStream = (SyncableDataOutputStream) appendStream.getWrappedStream();
}
assertNotNull(
dataOutputStream, "Did not recognize " + dataOutputStream);
return (BlockBlobAppendStream) dataOutputStream.getOutStream();
}
private void verifyBlockList(BlockBlobAppendStream blockBlobStream,
int[] testData) throws Throwable {
List<BlockEntry> blockList = blockBlobStream.getBlockList();
assertEquals(testData.length, blockList.size(), "Block list length");
int i = 0;
for (BlockEntry block: blockList) {
assertTrue(block.getSize() == testData[i++]);
}
}
private void appendBlockList(FSDataOutputStream fsStream,
ByteArrayOutputStream memStream,
int[] testData) throws Throwable {
for (int d: testData) {
byte[] data = getTestData(d);
memStream.write(data);
fsStream.write(data);
}
fsStream.hflush();
}
@Test
public void testCompactionDisabled() throws Throwable {
try (FSDataOutputStream appendStream = fs.create(TEST_PATH_NORMAL)) {
// testing new file
SyncableDataOutputStream dataOutputStream = null;
OutputStream wrappedStream = appendStream.getWrappedStream();
if (wrappedStream instanceof NativeAzureFileSystem.NativeAzureFsOutputStream) {
NativeAzureFileSystem.NativeAzureFsOutputStream fsOutputStream =
(NativeAzureFileSystem.NativeAzureFsOutputStream) wrappedStream;
dataOutputStream = (SyncableDataOutputStream) fsOutputStream.getOutStream();
} else if (wrappedStream instanceof SyncableDataOutputStream) {
dataOutputStream = (SyncableDataOutputStream) wrappedStream;
} else {
fail("Unable to determine type of " + wrappedStream
+ "
|
TestNativeAzureFileSystemBlockCompaction
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/util/Lists_newArrayList_Test.java
|
{
"start": 868,
"end": 1532
}
|
class ____ {
@Test
void should_return_empty_mutable_List() {
ArrayList<String> list = Lists.newArrayList();
assertThat(list).isEmpty();
list.add("abc");
assertThat(list).containsExactly("abc");
}
@Test
void should_return_new_List() {
ArrayList<String> list1 = Lists.newArrayList();
ArrayList<String> list2 = Lists.newArrayList();
assertThat(list2).isNotSameAs(list1);
// be sure they have nothing in common
list1.add("abc");
assertThat(list2).isEmpty();
}
@Test
void should_return_empty_List() {
ArrayList<String> list = Lists.newArrayList();
assertThat(list).isEmpty();
}
}
|
Lists_newArrayList_Test
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/SubcomponentCreatorValidationTest.java
|
{
"start": 12422,
"end": 12500
}
|
class ____ {",
" @Subcomponent.Builder",
" static
|
ChildComponent
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ThriftEndpointBuilderFactory.java
|
{
"start": 30237,
"end": 31536
}
|
interface ____
extends
AdvancedThriftEndpointConsumerBuilder,
AdvancedThriftEndpointProducerBuilder {
default ThriftEndpointBuilder basic() {
return (ThriftEndpointBuilder) this;
}
/**
* Sets whether synchronous processing should be strictly used.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param synchronous the value to set
* @return the dsl builder
*/
default AdvancedThriftEndpointBuilder synchronous(boolean synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param synchronous the value to set
* @return the dsl builder
*/
default AdvancedThriftEndpointBuilder synchronous(String synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
}
public
|
AdvancedThriftEndpointBuilder
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/serializer/AbstractTest.java
|
{
"start": 1041,
"end": 1726
}
|
class ____ implements ObjectDeserializer {
@SuppressWarnings("unchecked")
public <T> T deserialze(DefaultJSONParser parser, Type type, Object fieldName) {
JSONObject json = parser.parseObject();
int num = json.getInteger("num");
if (num == 1) {
return (T) JSON.toJavaObject(json, B.class);
} else if (num == 2) {
return (T) JSON.toJavaObject(json, C.class);
} else {
return (T) JSON.toJavaObject(json, A.class);
}
}
public int getFastMatchToken() {
return JSONToken.LBRACE;
}
}
public static
|
ADeserializer
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/resource/LiteWebJarsResourceResolverTests.java
|
{
"start": 1317,
"end": 4894
}
|
class ____ {
private List<Resource> locations = List.of(new ClassPathResource("/META-INF/resources/webjars"));
// for this to work, an actual WebJar must be on the test classpath
private LiteWebJarsResourceResolver resolver = new LiteWebJarsResourceResolver();
private ResourceResolverChain chain = mock();
private HttpServletRequest request = new MockHttpServletRequest();
@Test
void resolveUrlExisting() {
String file = "/foo/2.3/foo.txt";
given(this.chain.resolveUrlPath(file, this.locations)).willReturn(file);
String actual = this.resolver.resolveUrlPath(file, this.locations, this.chain);
assertThat(actual).isEqualTo(file);
verify(this.chain, times(1)).resolveUrlPath(file, this.locations);
}
@Test
void resolveUrlExistingNotInJarFile() {
String file = "foo/foo.txt";
given(this.chain.resolveUrlPath(file, this.locations)).willReturn(null);
String actual = this.resolver.resolveUrlPath(file, this.locations, this.chain);
assertThat(actual).isNull();
verify(this.chain, times(1)).resolveUrlPath(file, this.locations);
verify(this.chain, never()).resolveUrlPath("foo/2.3/foo.txt", this.locations);
}
@Test
void resolveUrlWebJarResource() {
String file = "underscorejs/underscore.js";
String expected = "underscorejs/1.8.3/underscore.js";
given(this.chain.resolveUrlPath(file, this.locations)).willReturn(null);
given(this.chain.resolveUrlPath(expected, this.locations)).willReturn(expected);
String actual = this.resolver.resolveUrlPath(file, this.locations, this.chain);
assertThat(actual).isEqualTo(expected);
verify(this.chain, times(1)).resolveUrlPath(file, this.locations);
verify(this.chain, times(1)).resolveUrlPath(expected, this.locations);
}
@Test
void resolveUrlWebJarResourceNotFound() {
String file = "something/something.js";
given(this.chain.resolveUrlPath(file, this.locations)).willReturn(null);
String actual = this.resolver.resolveUrlPath(file, this.locations, this.chain);
assertThat(actual).isNull();
verify(this.chain, times(1)).resolveUrlPath(file, this.locations);
verify(this.chain, never()).resolveUrlPath(null, this.locations);
}
@Test
void resolveResourceExisting() {
Resource expected = mock();
String file = "foo/2.3/foo.txt";
given(this.chain.resolveResource(this.request, file, this.locations)).willReturn(expected);
Resource actual = this.resolver.resolveResource(this.request, file, this.locations, this.chain);
assertThat(actual).isEqualTo(expected);
verify(this.chain, times(1)).resolveResource(this.request, file, this.locations);
}
@Test
void resolveResourceNotFound() {
String file = "something/something.js";
given(this.chain.resolveUrlPath(file, this.locations)).willReturn(null);
Resource actual = this.resolver.resolveResource(this.request, file, this.locations, this.chain);
assertThat(actual).isNull();
verify(this.chain, times(1)).resolveResource(this.request, file, this.locations);
verify(this.chain, never()).resolveResource(this.request, null, this.locations);
}
@Test
void resolveResourceWebJar() {
Resource expected = mock();
String file = "underscorejs/underscore.js";
String expectedPath = "underscorejs/1.8.3/underscore.js";
given(this.chain.resolveResource(this.request, expectedPath, this.locations)).willReturn(expected);
Resource actual = this.resolver.resolveResource(this.request, file, this.locations, this.chain);
assertThat(actual).isEqualTo(expected);
verify(this.chain, times(1)).resolveResource(this.request, file, this.locations);
}
}
|
LiteWebJarsResourceResolverTests
|
java
|
quarkusio__quarkus
|
integration-tests/packaging/src/test/java/io/quarkus/maven/CustomManifestEntriesThinJarTest.java
|
{
"start": 473,
"end": 1671
}
|
class ____ {
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withEmptyApplication()
.setApplicationName("Custom-Manifest-Thin")
.setApplicationVersion("0.1-SNAPSHOT")
.withConfigurationResource("projects/custom-manifest-section/custom-entries-thin.properties");
@ProdBuildResults
private ProdModeTestResults prodModeTestResults;
@Test
public void testManifestEntries() throws Exception {
assertThat(prodModeTestResults.getResults()).hasSize(1);
Path jarPath = prodModeTestResults.getResults().get(0).getPath();
try (InputStream fileInputStream = new FileInputStream(jarPath.toFile())) {
try (JarInputStream stream = new JarInputStream(fileInputStream)) {
Manifest manifest = stream.getManifest();
assertThat(manifest).isNotNull();
String customAttribute = manifest.getMainAttributes().getValue("Built-By");
assertThat(customAttribute).isNotNull();
assertThat(customAttribute).isEqualTo("Quarkus Plugin");
}
}
}
}
|
CustomManifestEntriesThinJarTest
|
java
|
grpc__grpc-java
|
api/src/main/java/io/grpc/LoadBalancerRegistry.java
|
{
"start": 1411,
"end": 6059
}
|
class ____ {
private static final Logger logger = Logger.getLogger(LoadBalancerRegistry.class.getName());
private static LoadBalancerRegistry instance;
private static final Iterable<Class<?>> HARDCODED_CLASSES = getHardCodedClasses();
private final LinkedHashSet<LoadBalancerProvider> allProviders =
new LinkedHashSet<>();
private final LinkedHashMap<String, LoadBalancerProvider> effectiveProviders =
new LinkedHashMap<>();
/**
* Register a provider.
*
* <p>If the provider's {@link LoadBalancerProvider#isAvailable isAvailable()} returns
* {@code false}, this method will throw {@link IllegalArgumentException}.
*
* <p>If more than one provider with the same {@link LoadBalancerProvider#getPolicyName policy
* name} are registered, the one with the highest {@link LoadBalancerProvider#getPriority
* priority} will be effective. If there are more than one name-sake providers rank the highest
* priority, the one registered first will be effective.
*/
public synchronized void register(LoadBalancerProvider provider) {
addProvider(provider);
refreshProviderMap();
}
private synchronized void addProvider(LoadBalancerProvider provider) {
checkArgument(provider.isAvailable(), "isAvailable() returned false");
allProviders.add(provider);
}
/**
* Deregisters a provider. No-op if the provider is not in the registry. If there are more
* than one providers with the same policy name as the deregistered one in the registry, one
* of them will become the effective provider for that policy, per the rule documented in {@link
* #register}.
*
* @param provider the provider that was added to the register via {@link #register}.
*/
public synchronized void deregister(LoadBalancerProvider provider) {
allProviders.remove(provider);
refreshProviderMap();
}
private synchronized void refreshProviderMap() {
effectiveProviders.clear();
for (LoadBalancerProvider provider : allProviders) {
String policy = provider.getPolicyName();
LoadBalancerProvider existing = effectiveProviders.get(policy);
if (existing == null || existing.getPriority() < provider.getPriority()) {
effectiveProviders.put(policy, provider);
}
}
}
/**
* Returns the default registry that loads providers via the Java service loader mechanism.
*/
public static synchronized LoadBalancerRegistry getDefaultRegistry() {
if (instance == null) {
List<LoadBalancerProvider> providerList = ServiceProviders.loadAll(
LoadBalancerProvider.class,
HARDCODED_CLASSES,
LoadBalancerProvider.class.getClassLoader(),
new LoadBalancerPriorityAccessor());
instance = new LoadBalancerRegistry();
for (LoadBalancerProvider provider : providerList) {
logger.fine("Service loader found " + provider);
instance.addProvider(provider);
}
instance.refreshProviderMap();
}
return instance;
}
/**
* Returns the effective provider for the given load-balancing policy, or {@code null} if no
* suitable provider can be found. Each provider declares its policy name via {@link
* LoadBalancerProvider#getPolicyName}.
*/
@Nullable
public synchronized LoadBalancerProvider getProvider(String policy) {
return effectiveProviders.get(checkNotNull(policy, "policy"));
}
/**
* Returns effective providers in a new map.
*/
@VisibleForTesting
synchronized Map<String, LoadBalancerProvider> providers() {
return new LinkedHashMap<>(effectiveProviders);
}
@VisibleForTesting
static List<Class<?>> getHardCodedClasses() {
// Class.forName(String) is used to remove the need for ProGuard configuration. Note that
// ProGuard does not detect usages of Class.forName(String, boolean, ClassLoader):
// https://sourceforge.net/p/proguard/bugs/418/
ArrayList<Class<?>> list = new ArrayList<>();
try {
list.add(Class.forName("io.grpc.internal.PickFirstLoadBalancerProvider"));
} catch (ClassNotFoundException e) {
logger.log(Level.WARNING, "Unable to find pick-first LoadBalancer", e);
}
try {
list.add(Class.forName("io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"));
} catch (ClassNotFoundException e) {
// Since hard-coded list is only used in Android environment, and we don't expect round-robin
// to be actually used there, we log it as a lower level.
logger.log(Level.FINE, "Unable to find round-robin LoadBalancer", e);
}
return Collections.unmodifiableList(list);
}
private static final
|
LoadBalancerRegistry
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/context/TestContextAnnotationUtils.java
|
{
"start": 10056,
"end": 10298
}
|
class ____.</li>
* <li>Recursively search through all interfaces implemented by the given class.</li>
* <li>Recursively search through the superclass hierarchy of the given class.</li>
* <li>Recursively search through the enclosing
|
declares
|
java
|
quarkusio__quarkus
|
extensions/scheduler/common/src/main/java/io/quarkus/scheduler/common/runtime/InstrumentedInvoker.java
|
{
"start": 307,
"end": 1047
}
|
class ____ extends DelegateInvoker {
private final JobInstrumenter instrumenter;
public InstrumentedInvoker(ScheduledInvoker delegate, JobInstrumenter instrumenter) {
super(delegate);
this.instrumenter = instrumenter;
}
@Override
public CompletionStage<Void> invoke(ScheduledExecution execution) throws Exception {
return instrumenter.instrument(new JobInstrumentationContext() {
@Override
public CompletionStage<Void> executeJob() {
return invokeDelegate(execution);
}
@Override
public String getSpanName() {
return execution.getTrigger().getId();
}
});
}
}
|
InstrumentedInvoker
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/SmtpsComponentBuilderFactory.java
|
{
"start": 35103,
"end": 42866
}
|
class ____
extends AbstractComponentBuilder<MailComponent>
implements SmtpsComponentBuilder {
@Override
protected MailComponent buildConcreteComponent() {
return new MailComponent();
}
private org.apache.camel.component.mail.MailConfiguration getOrCreateConfiguration(MailComponent component) {
if (component.getConfiguration() == null) {
component.setConfiguration(new org.apache.camel.component.mail.MailConfiguration());
}
return component.getConfiguration();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "bridgeErrorHandler": ((MailComponent) component).setBridgeErrorHandler((boolean) value); return true;
case "closeFolder": getOrCreateConfiguration((MailComponent) component).setCloseFolder((boolean) value); return true;
case "copyTo": getOrCreateConfiguration((MailComponent) component).setCopyTo((java.lang.String) value); return true;
case "decodeFilename": getOrCreateConfiguration((MailComponent) component).setDecodeFilename((boolean) value); return true;
case "delete": getOrCreateConfiguration((MailComponent) component).setDelete((boolean) value); return true;
case "disconnect": getOrCreateConfiguration((MailComponent) component).setDisconnect((boolean) value); return true;
case "handleFailedMessage": getOrCreateConfiguration((MailComponent) component).setHandleFailedMessage((boolean) value); return true;
case "mimeDecodeHeaders": getOrCreateConfiguration((MailComponent) component).setMimeDecodeHeaders((boolean) value); return true;
case "moveTo": getOrCreateConfiguration((MailComponent) component).setMoveTo((java.lang.String) value); return true;
case "peek": getOrCreateConfiguration((MailComponent) component).setPeek((boolean) value); return true;
case "skipFailedMessage": getOrCreateConfiguration((MailComponent) component).setSkipFailedMessage((boolean) value); return true;
case "unseen": getOrCreateConfiguration((MailComponent) component).setUnseen((boolean) value); return true;
case "failOnDuplicateFileAttachment": getOrCreateConfiguration((MailComponent) component).setFailOnDuplicateFileAttachment((boolean) value); return true;
case "fetchSize": getOrCreateConfiguration((MailComponent) component).setFetchSize((int) value); return true;
case "folderName": getOrCreateConfiguration((MailComponent) component).setFolderName((java.lang.String) value); return true;
case "generateMissingAttachmentNames": getOrCreateConfiguration((MailComponent) component).setGenerateMissingAttachmentNames((java.lang.String) value); return true;
case "handleDuplicateAttachmentNames": getOrCreateConfiguration((MailComponent) component).setHandleDuplicateAttachmentNames((java.lang.String) value); return true;
case "mapMailMessage": getOrCreateConfiguration((MailComponent) component).setMapMailMessage((boolean) value); return true;
case "bcc": getOrCreateConfiguration((MailComponent) component).setBcc((java.lang.String) value); return true;
case "cc": getOrCreateConfiguration((MailComponent) component).setCc((java.lang.String) value); return true;
case "from": getOrCreateConfiguration((MailComponent) component).setFrom((java.lang.String) value); return true;
case "lazyStartProducer": ((MailComponent) component).setLazyStartProducer((boolean) value); return true;
case "replyTo": getOrCreateConfiguration((MailComponent) component).setReplyTo((java.lang.String) value); return true;
case "subject": getOrCreateConfiguration((MailComponent) component).setSubject((java.lang.String) value); return true;
case "to": getOrCreateConfiguration((MailComponent) component).setTo((java.lang.String) value); return true;
case "javaMailSender": getOrCreateConfiguration((MailComponent) component).setJavaMailSender((org.apache.camel.component.mail.JavaMailSender) value); return true;
case "additionalJavaMailProperties": getOrCreateConfiguration((MailComponent) component).setAdditionalJavaMailProperties((java.util.Properties) value); return true;
case "alternativeBodyHeader": getOrCreateConfiguration((MailComponent) component).setAlternativeBodyHeader((java.lang.String) value); return true;
case "attachmentsContentTransferEncodingResolver": getOrCreateConfiguration((MailComponent) component).setAttachmentsContentTransferEncodingResolver((org.apache.camel.component.mail.AttachmentsContentTransferEncodingResolver) value); return true;
case "authenticator": getOrCreateConfiguration((MailComponent) component).setAuthenticator((org.apache.camel.component.mail.MailAuthenticator) value); return true;
case "autowiredEnabled": ((MailComponent) component).setAutowiredEnabled((boolean) value); return true;
case "configuration": ((MailComponent) component).setConfiguration((org.apache.camel.component.mail.MailConfiguration) value); return true;
case "connectionTimeout": getOrCreateConfiguration((MailComponent) component).setConnectionTimeout((int) value); return true;
case "contentType": getOrCreateConfiguration((MailComponent) component).setContentType((java.lang.String) value); return true;
case "contentTypeResolver": ((MailComponent) component).setContentTypeResolver((org.apache.camel.component.mail.ContentTypeResolver) value); return true;
case "debugMode": getOrCreateConfiguration((MailComponent) component).setDebugMode((boolean) value); return true;
case "ignoreUnsupportedCharset": getOrCreateConfiguration((MailComponent) component).setIgnoreUnsupportedCharset((boolean) value); return true;
case "ignoreUriScheme": getOrCreateConfiguration((MailComponent) component).setIgnoreUriScheme((boolean) value); return true;
case "javaMailProperties": getOrCreateConfiguration((MailComponent) component).setJavaMailProperties((java.util.Properties) value); return true;
case "session": getOrCreateConfiguration((MailComponent) component).setSession((jakarta.mail.Session) value); return true;
case "useInlineAttachments": getOrCreateConfiguration((MailComponent) component).setUseInlineAttachments((boolean) value); return true;
case "headerFilterStrategy": ((MailComponent) component).setHeaderFilterStrategy((org.apache.camel.spi.HeaderFilterStrategy) value); return true;
case "healthCheckConsumerEnabled": ((MailComponent) component).setHealthCheckConsumerEnabled((boolean) value); return true;
case "healthCheckProducerEnabled": ((MailComponent) component).setHealthCheckProducerEnabled((boolean) value); return true;
case "password": getOrCreateConfiguration((MailComponent) component).setPassword((java.lang.String) value); return true;
case "sslContextParameters": getOrCreateConfiguration((MailComponent) component).setSslContextParameters((org.apache.camel.support.jsse.SSLContextParameters) value); return true;
case "useGlobalSslContextParameters": ((MailComponent) component).setUseGlobalSslContextParameters((boolean) value); return true;
case "username": getOrCreateConfiguration((MailComponent) component).setUsername((java.lang.String) value); return true;
default: return false;
}
}
}
}
|
SmtpsComponentBuilderImpl
|
java
|
apache__flink
|
flink-formats/flink-orc/src/main/java/org/apache/flink/orc/vector/OrcBytesColumnVector.java
|
{
"start": 1015,
"end": 1603
}
|
class ____ extends AbstractOrcColumnVector
implements org.apache.flink.table.data.columnar.vector.BytesColumnVector {
private BytesColumnVector vector;
public OrcBytesColumnVector(BytesColumnVector vector) {
super(vector);
this.vector = vector;
}
@Override
public Bytes getBytes(int i) {
int rowId = vector.isRepeating ? 0 : i;
byte[][] data = vector.vector;
int[] start = vector.start;
int[] length = vector.length;
return new Bytes(data[rowId], start[rowId], length[rowId]);
}
}
|
OrcBytesColumnVector
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ControlBusEndpointBuilderFactory.java
|
{
"start": 1424,
"end": 1556
}
|
interface ____ {
/**
* Builder for endpoint for the Control Bus component.
*/
public
|
ControlBusEndpointBuilderFactory
|
java
|
apache__avro
|
lang/java/thrift/src/test/java/org/apache/avro/thrift/test/Foo.java
|
{
"start": 35356,
"end": 35582
}
|
class ____ implements org.apache.thrift.scheme.SchemeFactory {
public ping_resultStandardScheme getScheme() {
return new ping_resultStandardScheme();
}
}
private static
|
ping_resultStandardSchemeFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/duplicatedgenerator/DuplicateTest.java
|
{
"start": 666,
"end": 1667
}
|
class ____ {
@Test
public void testDuplicateEntityName() {
Configuration cfg = new Configuration();
cfg.setProperty( Environment.HBM2DDL_AUTO, "create-drop" );
ServiceRegistry serviceRegistry = null;
SessionFactory sf = null;
try {
cfg.addAnnotatedClass( Flight.class );
cfg.addAnnotatedClass( org.hibernate.orm.test.annotations.Flight.class );
cfg.addAnnotatedClass( Company.class );
cfg.addResource( "org/hibernate/orm/test/annotations/orm.xml" );
cfg.addResource( "org/hibernate/orm/test/annotations/duplicatedgenerator/orm.xml" );
serviceRegistry = ServiceRegistryBuilder.buildServiceRegistry( cfg.getProperties() );
sf = cfg.buildSessionFactory( serviceRegistry );
fail( "Should not be able to map the same entity name twice" );
}
catch (DuplicateMappingException ae) {
//success
}
finally {
if (sf != null){
sf.close();
}
if ( serviceRegistry != null ) {
ServiceRegistryBuilder.destroy( serviceRegistry );
}
}
}
}
|
DuplicateTest
|
java
|
elastic__elasticsearch
|
qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java
|
{
"start": 766,
"end": 3045
}
|
class ____ extends ESTestCase {
@BeforeClass
public static void initNodeName() {
assert "false".equals(System.getProperty("tests.security.manager")) : "-Dtests.security.manager=false has to be set";
JsonLogsTestSetup.init();
}
public void testEmptyType() {
expectThrows(IllegalArgumentException.class, () -> ESJsonLayout.newBuilder().build());
}
@SuppressForbidden(reason = "Need to test that a system property can be looked up in logs")
public void testLayout() {
System.setProperty("es.logs.cluster_name", "cluster123");
ESJsonLayout server = ESJsonLayout.newBuilder().setType("server").build();
String conversionPattern = server.getPatternLayout().getConversionPattern();
assertThat(conversionPattern, Matchers.equalTo(Strings.format("""
{\
"type": "server", \
"timestamp": "%%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}", \
"level": "%%p", \
"component": "%%c{1.}", \
"cluster.name": "${sys:es.logs.cluster_name}", \
"node.name": "%%node_name", \
"message": "%%notEmpty{%%enc{%%marker}{JSON} }%%enc{%%.-10000m}{JSON}"%%notEmpty{, \
%%node_and_cluster_id }%%notEmpty{, %%CustomMapFields }%%exceptionAsJson \
}%n""")));
assertThat(server.toSerializable(new Log4jLogEvent()), Matchers.containsString("\"cluster.name\": \"cluster123\""));
}
public void testLayoutWithAdditionalFieldOverride() {
ESJsonLayout server = ESJsonLayout.newBuilder().setType("server").setOverrideFields("message").build();
String conversionPattern = server.getPatternLayout().getConversionPattern();
// message field is removed as is expected to be provided by a field from a message
assertThat(conversionPattern, Matchers.equalTo(Strings.format("""
{\
"type": "server", \
"timestamp": "%%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}", \
"level": "%%p", \
"component": "%%c{1.}", \
"cluster.name": "${sys:es.logs.cluster_name}", \
"node.name": "%%node_name"%%notEmpty{, %%node_and_cluster_id }%%notEmpty{, %%CustomMapFields }%%exceptionAsJson \
}%n""")));
}
}
|
ESJsonLayoutTests
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/vectors/ESAcceptDocsTests.java
|
{
"start": 892,
"end": 4556
}
|
class ____ extends ESTestCase {
public void testAcceptAllDocs() throws IOException {
ESAcceptDocs acceptDocs = ESAcceptDocs.ESAcceptDocsAll.INSTANCE;
assertEquals(0L, acceptDocs.approximateCost());
assertEquals(0L, acceptDocs.cost());
assertNull(acceptDocs.iterator());
assertNull(acceptDocs.bits());
assertNull(acceptDocs.getBitSet());
}
public void testFromScorerSupplier() throws IOException {
int[] docIds = new int[] { 1, 3, 5, 7, 9 };
BitSet bitSet = new FixedBitSet(10);
for (int docId : docIds) {
bitSet.set(docId);
}
{
DocIdSetIterator iterator = new BitSetIterator(bitSet, bitSet.cardinality());
ESAcceptDocs acceptDocs = new ESAcceptDocs.ScorerSupplierAcceptDocs(new TestScorerSupplier(iterator), null, 10);
assertEquals(iterator.cost(), acceptDocs.approximateCost());
assertEquals(iterator.cost(), acceptDocs.cost());
// iterate the docs ensuring they match
DocIdSetIterator acceptDocsIterator = acceptDocs.iterator();
for (int docId : docIds) {
assertEquals(docId, acceptDocsIterator.nextDoc());
}
}
{
DocIdSetIterator iterator = new BitSetIterator(bitSet, bitSet.cardinality());
ESAcceptDocs acceptDocs = new ESAcceptDocs.ScorerSupplierAcceptDocs(new TestScorerSupplier(iterator), null, 10);
Bits acceptDocsBits = acceptDocs.bits();
for (int i = 0; i < 10; i++) {
assertEquals(bitSet.get(i), acceptDocsBits.get(i));
}
}
{
DocIdSetIterator iterator = new BitSetIterator(bitSet, bitSet.cardinality());
FixedBitSet liveDocs = new FixedBitSet(10);
liveDocs.set(0, 10);
// lets delete docs 1, 3, 9
liveDocs.clear(1);
liveDocs.clear(3);
liveDocs.clear(9);
ESAcceptDocs acceptDocs = new ESAcceptDocs.ScorerSupplierAcceptDocs(new TestScorerSupplier(iterator), liveDocs, 10);
// verify approximate cost doesn't count deleted docs
assertEquals(5L, acceptDocs.approximateCost());
// actual cost should count only live docs
assertEquals(2L, acceptDocs.cost());
// iterate the docs ensuring they match
DocIdSetIterator acceptDocsIterator = acceptDocs.iterator();
assertEquals(5, acceptDocsIterator.nextDoc());
assertEquals(7, acceptDocsIterator.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, acceptDocsIterator.nextDoc());
}
}
public void testFromBits() throws IOException {
FixedBitSet acceptedDocs = new FixedBitSet(10);
acceptedDocs.set(1);
acceptedDocs.set(3);
acceptedDocs.set(5);
ESAcceptDocs acceptDocs = new ESAcceptDocs.BitsAcceptDocs(acceptedDocs, 10);
assertEquals(3L, acceptDocs.approximateCost());
assertEquals(3L, acceptDocs.cost());
// iterate the docs ensuring they match
DocIdSetIterator acceptDocsIterator = acceptDocs.iterator();
assertEquals(1, acceptDocsIterator.nextDoc());
assertEquals(3, acceptDocsIterator.nextDoc());
assertEquals(5, acceptDocsIterator.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, acceptDocsIterator.nextDoc());
// verify bits
Bits acceptDocsBits = acceptDocs.bits();
for (int i = 0; i < 10; i++) {
assertEquals(acceptedDocs.get(i), acceptDocsBits.get(i));
}
}
private static
|
ESAcceptDocsTests
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/DuplicateBindingsValidationTest.java
|
{
"start": 39924,
"end": 40466
}
|
class ____ {",
" @Provides @Nullable static Object nullableParentChildConflict() {",
" return \"parent\";",
" }",
" }",
"}");
Source child =
CompilerTests.javaSource(
"test.Child",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"import dagger.Subcomponent;",
"",
"@Subcomponent(modules = Child.ChildModule.class)",
"
|
ParentModule
|
java
|
elastic__elasticsearch
|
modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java
|
{
"start": 12420,
"end": 13769
}
|
class ____ extends Plugin implements SystemIndexPlugin {
static final String SYSTEM_DATA_STREAM_NAME = ".test-data-stream";
@Override
public Collection<SystemDataStreamDescriptor> getSystemDataStreamDescriptors() {
return List.of(
new SystemDataStreamDescriptor(
SYSTEM_DATA_STREAM_NAME,
"a system data stream for testing",
SystemDataStreamDescriptor.Type.EXTERNAL,
ComposableIndexTemplate.builder()
.indexPatterns(List.of(".system-data-stream"))
.dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate())
.build(),
Map.of(),
Collections.singletonList("test"),
"test",
new ExecutorNames(ThreadPool.Names.SYSTEM_CRITICAL_READ, ThreadPool.Names.SYSTEM_READ, ThreadPool.Names.SYSTEM_WRITE)
)
);
}
@Override
public String getFeatureName() {
return SystemDataStreamTestPlugin.class.getSimpleName();
}
@Override
public String getFeatureDescription() {
return "A plugin for testing snapshots of system data streams";
}
}
}
|
SystemDataStreamTestPlugin
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlReductionLateMaterializationTestCase.java
|
{
"start": 1863,
"end": 9849
}
|
class ____ extends AbstractEsqlIntegTestCase {
private final int shardCount;
private final int maxConcurrentNodes;
private final int taskConcurrency;
EsqlReductionLateMaterializationTestCase(@Name("TestCase") TestCase testCase) {
this.shardCount = testCase.shardCount;
this.maxConcurrentNodes = testCase.maxConcurrentNodes;
this.taskConcurrency = testCase.taskConcurrency;
}
public record TestCase(int shardCount, int maxConcurrentNodes, int taskConcurrency) {}
@ParametersFactory
public static Iterable<Object[]> parameters() {
var result = new ArrayList<Object[]>();
for (int shardCount : new int[] { 1, 5 }) {
for (int maxConcurrentNodes : new int[] { 1, 5 }) {
for (int taskConcurrency : new int[] { 1, 5 }) {
result.add(new Object[] { new TestCase(shardCount, maxConcurrentNodes, taskConcurrency) });
}
}
}
return result;
}
public void setupIndex() throws Exception {
assumeTrue("requires query pragmas", canUseQueryPragmas());
XContentBuilder mapping = JsonXContent.contentBuilder().startObject();
mapping.startObject("properties");
{
mapping.startObject("sorted").field("type", "long").endObject();
mapping.startObject("filtered").field("type", "long").endObject();
mapping.startObject("read").field("type", "long").endObject();
mapping.startObject("more").field("type", "long").endObject();
mapping.startObject("some_more").field("type", "long").endObject();
}
mapping.endObject();
client().admin().indices().prepareCreate("test").setSettings(indexSettings(10, 0)).setMapping(mapping.endObject()).get();
var builders = IntStream.range(0, 1024)
.mapToObj(
i -> prepareIndex("test").setId(Integer.toString(i))
.setSource("read", i, "sorted", i * 2, "filtered", i * 3, "more", i * 4, "some_more", i * 5)
)
.toList();
indexRandom(true, builders);
}
@SuppressWarnings("unchecked")
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CollectionUtils.appendToCopyNoNullElements(super.nodePlugins(), MockSearchService.TestPlugin.class, SpatialPlugin.class);
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(PlannerSettings.REDUCTION_LATE_MATERIALIZATION.getKey(), true)
.build();
}
public void testNoPushdowns() throws Exception {
testLateMaterializationAfterReduceTopN(
"from test | sort sorted + 1 desc | limit 3 | stats sum(read)",
Set.of("sorted"),
Set.of("read")
);
}
public void testPushdownTopN() throws Exception {
testLateMaterializationAfterReduceTopN(
"from test | sort sorted desc | limit 3 | stats sum(read)",
Set.of("sorted"),
Set.of("read")
);
}
public void testPushdownTopNMultipleSortedFields() throws Exception {
testLateMaterializationAfterReduceTopN(
"from test | sort sorted desc, more asc | limit 3 | stats sum(read)",
Set.of("sorted", "more"),
Set.of("read")
);
}
public void testPushdownTopNMultipleRetrievedFields() throws Exception {
testLateMaterializationAfterReduceTopN(
"from test | sort sorted desc, more asc | limit 3 | stats x = sum(read), y = max(some_more)",
Set.of("sorted", "more"),
Set.of("read", "some_more")
);
}
public void testPushdownTopFilterOnNonProjected() throws Exception {
testLateMaterializationAfterReduceTopN(
"from test | where filtered > 0 | sort sorted desc | limit 3 | stats sum(read)",
Set.of("sorted"),
Set.of("read")
);
}
public void testPushdownTopFilterOnProjected() throws Exception {
testLateMaterializationAfterReduceTopN(
"from test | sort sorted desc | limit 3 | where filtered > 0 | stats sum(read)",
Set.of("sorted"),
Set.of("read", "filtered")
);
}
private void testLateMaterializationAfterReduceTopN(
String query,
Set<String> expectedDataLoadedFields,
Set<String> expectedNodeReduceFields
) throws Exception {
setupIndex();
try (var result = sendQuery(query)) {
assertThat(result.isRunning(), equalTo(false));
assertThat(result.isPartial(), equalTo(false));
assertSingleKeyFieldExtracted(result, "data", expectedDataLoadedFields);
assertSingleKeyFieldExtracted(result, "node_reduce", expectedNodeReduceFields);
var page = singleValue(result.pages());
assertThat(page.getPositionCount(), equalTo(1));
LongVectorBlock block = page.getBlock(0);
assertThat(block.getPositionCount(), equalTo(1));
assertThat(block.getLong(0), equalTo(1021L + 1022 + 1023));
}
}
private static void assertSingleKeyFieldExtracted(EsqlQueryResponse response, String driverName, Set<String> expectedLoadedFields) {
long totalValuesLoader = 0;
for (var driverProfile : response.profile().drivers().stream().filter(d -> d.description().equals(driverName)).toList()) {
OperatorStatus operatorStatus = singleValue(
Strings.format(
"Only a single ValuesSourceReaderOperator should be present in driver '%s'; "
+ "more than that means we didn't move the operator in the planner correctly",
driverName
),
driverProfile.operators().stream().filter(o -> o.operator().startsWith("ValuesSourceReaderOperator")).toList()
);
var status = (ValuesSourceReaderOperatorStatus) operatorStatus.status();
totalValuesLoader += status.valuesLoaded();
if (status.valuesLoaded() == 0) {
// This can happen if the indexRandom created dummy documents which led to empty segments.
continue;
}
assertThat(status.readersBuilt().size(), equalTo(expectedLoadedFields.size()));
for (String field : status.readersBuilt().keySet()) {
assertTrue(
"Field " + field + " was not expected to be loaded in driver " + driverName,
expectedLoadedFields.stream().anyMatch(field::contains)
);
}
}
assertThat("Values should have been loaded", totalValuesLoader, greaterThan(0L));
}
private EsqlQueryResponse sendQuery(String query) {
// Ensures there is no TopN pushdown to lucene, and that the pause happens after the TopN operator has been applied.
return client().execute(
EsqlQueryAction.INSTANCE,
syncEsqlQueryRequest(query).pragmas(
new QueryPragmas(
Settings.builder()
// Configured to ensure that there is only one worker handling all the shards, so that we can assert the correct
// expected behavior.
.put(QueryPragmas.MAX_CONCURRENT_NODES_PER_CLUSTER.getKey(), maxConcurrentNodes)
.put(QueryPragmas.MAX_CONCURRENT_SHARDS_PER_NODE.getKey(), shardCount)
.put(QueryPragmas.TASK_CONCURRENCY.getKey(), taskConcurrency)
.put(QueryPragmas.NODE_LEVEL_REDUCTION.getKey(), true)
.build()
)
).profile(true)
).actionGet(1, TimeUnit.MINUTES);
}
}
|
EsqlReductionLateMaterializationTestCase
|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/IRDecorations.java
|
{
"start": 7058,
"end": 7275
}
|
class ____ extends IRDType {
public IRDIndexType(Class<?> value) {
super(value);
}
}
/** describes the index variable name variable in a foreach loop */
public static
|
IRDIndexType
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
|
{
"start": 1463,
"end": 6783
}
|
class ____ extends StripedReconstructor
implements Runnable {
private StripedWriter stripedWriter;
StripedBlockReconstructor(ErasureCodingWorker worker,
StripedReconstructionInfo stripedReconInfo) {
super(worker, stripedReconInfo);
stripedWriter = new StripedWriter(this, getDatanode(),
getConf(), stripedReconInfo);
}
boolean hasValidTargets() {
return stripedWriter.hasValidTargets();
}
@Override
public void run() {
try {
initDecoderIfNecessary();
initDecodingValidatorIfNecessary();
getStripedReader().init();
stripedWriter.init();
reconstruct();
stripedWriter.endTargetBlocks();
// Currently we don't check the acks for packets, this is similar as
// block replication.
} catch (Throwable e) {
LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e);
getDatanode().getMetrics().incrECFailedReconstructionTasks();
} finally {
float xmitWeight = getErasureCodingWorker().getXmitWeight();
// if the xmits is smaller than 1, the xmitsSubmitted should be set to 1
// because if it set to zero, we cannot to measure the xmits submitted
int xmitsSubmitted = Math.max((int) (getXmits() * xmitWeight), 1);
getDatanode().decrementXmitsInProgress(xmitsSubmitted);
final DataNodeMetrics metrics = getDatanode().getMetrics();
metrics.incrECReconstructionTasks();
metrics.incrECReconstructionBytesRead(getBytesRead());
metrics.incrECReconstructionRemoteBytesRead(getRemoteBytesRead());
metrics.incrECReconstructionBytesWritten(getBytesWritten());
getStripedReader().close();
stripedWriter.close();
cleanup();
}
}
@Override
void reconstruct() throws IOException {
while (getPositionInBlock() < getMaxTargetLength()) {
DataNodeFaultInjector.get().stripedBlockReconstruction();
long remaining = getMaxTargetLength() - getPositionInBlock();
final int toReconstructLen =
(int) Math.min(getStripedReader().getBufferSize(), remaining);
long start = Time.monotonicNow();
long bytesToRead = (long) toReconstructLen * getStripedReader().getMinRequiredSources();
if (getDatanode().getEcReconstuctReadThrottler() != null) {
getDatanode().getEcReconstuctReadThrottler().throttle(bytesToRead);
}
// step1: read from minimum source DNs required for reconstruction.
// The returned success list is the source DNs we do real read from
getStripedReader().readMinimumSources(toReconstructLen);
long readEnd = Time.monotonicNow();
// step2: decode to reconstruct targets
reconstructTargets(toReconstructLen);
long decodeEnd = Time.monotonicNow();
// step3: transfer data
long bytesToWrite = (long) toReconstructLen * stripedWriter.getTargets();
if (getDatanode().getEcReconstuctWriteThrottler() != null) {
getDatanode().getEcReconstuctWriteThrottler().throttle(bytesToWrite);
}
if (stripedWriter.transferData2Targets() == 0) {
String error = "Transfer failed for all targets.";
throw new IOException(error);
}
long writeEnd = Time.monotonicNow();
// Only the succeed reconstructions are recorded.
final DataNodeMetrics metrics = getDatanode().getMetrics();
metrics.incrECReconstructionReadTime(readEnd - start);
metrics.incrECReconstructionDecodingTime(decodeEnd - readEnd);
metrics.incrECReconstructionWriteTime(writeEnd - decodeEnd);
updatePositionInBlock(toReconstructLen);
clearBuffers();
}
}
private void reconstructTargets(int toReconstructLen) throws IOException {
ByteBuffer[] inputs = getStripedReader().getInputBuffers(toReconstructLen);
int[] erasedIndices = stripedWriter.getRealTargetIndices();
ByteBuffer[] outputs = stripedWriter.getRealTargetBuffers(toReconstructLen);
if (isValidationEnabled()) {
markBuffers(inputs);
decode(inputs, erasedIndices, outputs);
resetBuffers(inputs);
DataNodeFaultInjector.get().badDecoding(outputs);
long start = Time.monotonicNow();
try {
getValidator().validate(inputs, erasedIndices, outputs);
long validateEnd = Time.monotonicNow();
getDatanode().getMetrics().incrECReconstructionValidateTime(
validateEnd - start);
} catch (InvalidDecodingException e) {
long validateFailedEnd = Time.monotonicNow();
getDatanode().getMetrics().incrECReconstructionValidateTime(
validateFailedEnd - start);
getDatanode().getMetrics().incrECInvalidReconstructionTasks();
throw e;
}
} else {
decode(inputs, erasedIndices, outputs);
}
stripedWriter.updateRealTargetBuffers(toReconstructLen);
}
private void decode(ByteBuffer[] inputs, int[] erasedIndices,
ByteBuffer[] outputs) throws IOException {
long start = System.nanoTime();
getDecoder().decode(inputs, erasedIndices, outputs);
long end = System.nanoTime();
this.getDatanode().getMetrics().incrECDecodingTime(end - start);
}
/**
* Clear all associated buffers.
*/
private void clearBuffers() {
getStripedReader().clearBuffers();
stripedWriter.clearBuffers();
}
}
|
StripedBlockReconstructor
|
java
|
spring-projects__spring-boot
|
module/spring-boot-webmvc/src/main/java/org/springframework/boot/webmvc/actuate/web/mappings/DispatcherServletsMappingDescriptionProvider.java
|
{
"start": 6249,
"end": 7379
}
|
class ____
implements HandlerMappingDescriptionProvider<RequestMappingInfoHandlerMapping> {
@Override
public Class<RequestMappingInfoHandlerMapping> getMappingClass() {
return RequestMappingInfoHandlerMapping.class;
}
@Override
public List<DispatcherServletMappingDescription> describe(RequestMappingInfoHandlerMapping handlerMapping) {
Map<RequestMappingInfo, HandlerMethod> handlerMethods = handlerMapping.getHandlerMethods();
return handlerMethods.entrySet().stream().map(this::describe).toList();
}
private DispatcherServletMappingDescription describe(Entry<RequestMappingInfo, HandlerMethod> mapping) {
DispatcherServletMappingDetails mappingDetails = new DispatcherServletMappingDetails();
mappingDetails.setHandlerMethod(new HandlerMethodDescription(mapping.getValue()));
mappingDetails.setRequestMappingConditions(new RequestMappingConditionsDescription(mapping.getKey()));
return new DispatcherServletMappingDescription(mapping.getKey().toString(), mapping.getValue().toString(),
mappingDetails);
}
}
private static final
|
RequestMappingInfoHandlerMappingDescriptionProvider
|
java
|
apache__avro
|
lang/java/ipc/src/test/java/org/apache/avro/io/Perf.java
|
{
"start": 51506,
"end": 51834
}
|
class ____ extends ReflectTest<ReflectNestedObjectArrayTest.Foo> {
ReflectNestedObjectArrayTest() throws IOException {
super("ReflectNestedObjectArray", new Foo(new Random()), 50);
}
@Override
protected Foo createDatum(Random r) {
return new Foo(r);
}
static public
|
ReflectNestedObjectArrayTest
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-ddb/src/main/java/org/apache/camel/component/aws2/ddb/UpdateTableCommand.java
|
{
"start": 1106,
"end": 2162
}
|
class ____ extends AbstractDdbCommand {
public UpdateTableCommand(DynamoDbClient ddbClient, Ddb2Configuration configuration, Exchange exchange) {
super(ddbClient, configuration, exchange);
}
@Override
public void execute() {
ddbClient.updateTable(UpdateTableRequest.builder().tableName(determineTableName())
.provisionedThroughput(ProvisionedThroughput.builder().readCapacityUnits(determineReadCapacity())
.writeCapacityUnits(determineWriteCapacity()).build())
.build());
}
private Long determineReadCapacity() {
Long readCapacity = exchange.getIn().getHeader(Ddb2Constants.READ_CAPACITY, Long.class);
return readCapacity != null ? readCapacity : configuration.getReadCapacity();
}
private Long determineWriteCapacity() {
Long writeCapacity = exchange.getIn().getHeader(Ddb2Constants.WRITE_CAPACITY, Long.class);
return writeCapacity != null ? writeCapacity : configuration.getWriteCapacity();
}
}
|
UpdateTableCommand
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/test/java/io/quarkus/arc/test/properties/CombinedBuildProfileAndBuildPropertiesTest.java
|
{
"start": 5179,
"end": 5496
}
|
class ____ {
@Produces
FooBean testFooBean;
public AnotherProducer() {
testFooBean = new FooBean() {
@Override
public String foo() {
return "foo from missing prop";
}
};
}
}
}
|
AnotherProducer
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStoreOpDurations.java
|
{
"start": 1741,
"end": 3443
}
|
class ____ implements MetricsSource {
@Metric("Duration for a load state call")
MutableRate loadStateCall;
@Metric("Duration for a store application state call")
MutableRate storeApplicationStateCall;
@Metric("Duration for a update application state call")
MutableRate updateApplicationStateCall;
@Metric("Duration to handle a remove application state call")
MutableRate removeApplicationStateCall;
protected static final MetricsInfo RECORD_INFO =
info("ZKRMStateStoreOpDurations", "Durations of ZKRMStateStore calls");
private final MetricsRegistry registry;
private static final ZKRMStateStoreOpDurations INSTANCE
= new ZKRMStateStoreOpDurations();
public static ZKRMStateStoreOpDurations getInstance() {
return INSTANCE;
}
private ZKRMStateStoreOpDurations() {
registry = new MetricsRegistry(RECORD_INFO);
registry.tag(RECORD_INFO, "ZKRMStateStoreOpDurations");
MetricsSystem ms = DefaultMetricsSystem.instance();
if (ms != null) {
ms.register(RECORD_INFO.name(), RECORD_INFO.description(), this);
}
}
@Override
public synchronized void getMetrics(MetricsCollector collector, boolean all) {
registry.snapshot(collector.addRecord(registry.info()), all);
}
public void addLoadStateCallDuration(long value) {
loadStateCall.add(value);
}
public void addStoreApplicationStateCallDuration(long value) {
storeApplicationStateCall.add(value);
}
public void addUpdateApplicationStateCallDuration(long value) {
updateApplicationStateCall.add(value);
}
public void addRemoveApplicationStateCallDuration(long value) {
removeApplicationStateCall.add(value);
}
}
|
ZKRMStateStoreOpDurations
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/imports/innerclasses/TargetWithInnerClass.java
|
{
"start": 755,
"end": 1012
}
|
class ____ {
private int value;
public int getValue() {
return value;
}
public void setValue(int value) {
this.value = value;
}
}
}
}
|
TargetInnerInnerClass
|
java
|
apache__camel
|
components/camel-sjms2/src/main/java/org/apache/camel/component/sjms2/Sjms2SendDynamicAware.java
|
{
"start": 990,
"end": 1052
}
|
class ____ extends SjmsSendDynamicAware {
}
|
Sjms2SendDynamicAware
|
java
|
apache__camel
|
core/camel-base/src/main/java/org/apache/camel/impl/event/CamelContextRoutesStoppedEvent.java
|
{
"start": 951,
"end": 1377
}
|
class ____ extends AbstractContextEvent implements CamelEvent.CamelContextRoutesStoppedEvent {
private static final @Serial long serialVersionUID = -1120225323715688981L;
public CamelContextRoutesStoppedEvent(CamelContext source) {
super(source);
}
@Override
public String toString() {
return "Stopped routes on CamelContext: " + getContext().getName();
}
}
|
CamelContextRoutesStoppedEvent
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/grant/MySqlGrantTest_5.java
|
{
"start": 969,
"end": 2355
}
|
class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "GRANT ALL ON mydb.mytbl TO 'someuser'@'somehost';";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
String output = SQLUtils.toMySqlString(stmt);
assertEquals("GRANT ALL ON mydb.mytbl TO 'someuser'@'somehost';", //
output);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("City")));
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("t2")));
// assertTrue(visitor.getColumns().contains(new Column("t2", "id")));
}
}
|
MySqlGrantTest_5
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/runtime/src/main/java/io/quarkus/rest/client/reactive/runtime/MicroProfileRestClientRequestFilter.java
|
{
"start": 1017,
"end": 6811
}
|
class ____ implements ResteasyReactiveClientRequestFilter {
private static final MultivaluedMap<String, String> EMPTY_MAP = new MultivaluedHashMap<>();
private final ClientHeadersFactory clientHeadersFactory;
public MicroProfileRestClientRequestFilter(ClientHeadersFactory clientHeadersFactory) {
this.clientHeadersFactory = clientHeadersFactory;
}
@Override
public void filter(ResteasyReactiveClientRequestContext requestContext) {
HeaderFiller headerFiller = (HeaderFiller) requestContext.getProperty(HeaderFiller.class.getName());
// mutable collection of headers
MultivaluedMap<String, String> headers = new MultivaluedHashMap<>();
// gather original headers
for (Map.Entry<String, List<Object>> headerEntry : requestContext.getHeaders().entrySet()) {
headers.put(headerEntry.getKey(), castToListOfStrings(headerEntry.getValue()));
}
// add headers from MP annotations
if (headerFiller != null) {
// add headers to a mutable headers collection
if (headerFiller instanceof ExtendedHeaderFiller) {
((ExtendedHeaderFiller) headerFiller).addHeaders(headers, requestContext);
} else {
headerFiller.addHeaders(headers);
}
}
MultivaluedMap<String, String> incomingHeaders = determineIncomingHeaders();
// Propagation with the default factory will then overwrite any values if required.
for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) {
requestContext.getHeaders().put(headerEntry.getKey(), castToListOfObjects(headerEntry.getValue()));
}
ClientHeadersFactory clientHeadersFactory = clientHeadersFactory(requestContext);
if (clientHeadersFactory != null) {
if (clientHeadersFactory instanceof ReactiveClientHeadersFactory reactiveClientHeadersFactory) {
// reactive
requestContext.suspend();
reactiveClientHeadersFactory.getHeaders(incomingHeaders, headers).subscribe().with(
new Consumer<>() {
@Override
public void accept(MultivaluedMap<String, String> newHeaders) {
for (var headerEntry : newHeaders.entrySet()) {
requestContext.getHeaders()
.put(headerEntry.getKey(), castToListOfObjects(headerEntry.getValue()));
}
requestContext.resume();
}
}, new Consumer<>() {
@Override
public void accept(Throwable t) {
requestContext.resume(t);
}
});
} else {
// blocking
incomingHeaders = clientHeadersFactory.update(incomingHeaders, headers);
for (var headerEntry : incomingHeaders.entrySet()) {
requestContext.getHeaders().put(headerEntry.getKey(), castToListOfObjects(headerEntry.getValue()));
}
}
}
}
private MultivaluedMap<String, String> determineIncomingHeaders() {
ArcContainer container = Arc.container();
if (container == null) {
return MicroProfileRestClientRequestFilter.EMPTY_MAP;
}
ManagedContext requestContext = container.requestContext();
if (!requestContext.isActive()) {
return MicroProfileRestClientRequestFilter.EMPTY_MAP;
}
InstanceHandle<HttpHeaders> jakartaRestServerHeaders = container.instance(HttpHeaders.class);
if (!jakartaRestServerHeaders.isAvailable()) {
return MicroProfileRestClientRequestFilter.EMPTY_MAP;
}
// TODO: we could in the future consider using the Vert.x request headers here as well...
try {
return jakartaRestServerHeaders.get().getRequestHeaders();
} catch (ContextNotActiveException | IllegalStateException ignored) {
// guard against the race condition that exists between checking if the context is active
// and actually pulling the headers out of that request context
// this could happen if the REST call is being offloaded to another thread pool in a fire and forget manner
return MicroProfileRestClientRequestFilter.EMPTY_MAP;
}
}
private ClientHeadersFactory clientHeadersFactory(ResteasyReactiveClientRequestContext requestContext) {
if (requestContext.getConfiguration() instanceof ConfigurationImpl configuration) {
ClientHeadersFactory localHeadersFactory = configuration.getFromContext(ClientHeadersFactory.class);
if (localHeadersFactory != null) {
return localHeadersFactory;
}
}
return clientHeadersFactory;
}
private static List<String> castToListOfStrings(Collection<Object> values) {
List<String> result = new ArrayList<>();
for (Object value : values) {
if (value instanceof String) {
result.add((String) value);
} else if (value instanceof Collection) {
result.addAll(castToListOfStrings((Collection<Object>) value));
} else {
result.add(String.valueOf(value));
}
}
return result;
}
@SuppressWarnings("unchecked")
private static List<Object> castToListOfObjects(List<String> values) {
return (List<Object>) (List<?>) values;
}
}
|
MicroProfileRestClientRequestFilter
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java
|
{
"start": 411,
"end": 884
}
|
class ____ extends ESTestCase {
public void testGet() {
int times = between(1, 1000);
for (int i = 0; i < times; i++) {
float score = randomFloat();
SearchHit hit = SearchHit.unpooled(1);
hit.score(score);
assertEquals(score, ScoreExtractor.INSTANCE.extract(hit));
}
}
public void testToString() {
assertEquals("SCORE", ScoreExtractor.INSTANCE.toString());
}
}
|
ScoreExtractorTests
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java
|
{
"start": 9490,
"end": 30586
}
|
class ____ {
private final String topic = "test";
private final Uuid topicId = Uuid.randomUuid();
private final TopicPartition tp0 = new TopicPartition(topic, 0);
private final TopicPartition tp1 = new TopicPartition(topic, 1);
private final String topic2 = "test2";
private final Uuid topicId2 = Uuid.randomUuid();
private final TopicPartition t2p0 = new TopicPartition(topic2, 0);
private final String topic3 = "test3";
private final Uuid topicId3 = Uuid.randomUuid();
private final TopicPartition t3p0 = new TopicPartition(topic3, 0);
private final int sessionTimeoutMs = 10000;
private final int defaultApiTimeoutMs = 60000;
private final int heartbeatIntervalMs = 1000;
// Set auto commit interval lower than heartbeat so we don't need to deal with
// a concurrent heartbeat request
private final int autoCommitIntervalMs = 500;
private final String groupId = "mock-group";
private final String memberId = "memberId";
private final String leaderId = "leaderId";
private final Optional<String> groupInstanceId = Optional.of("mock-instance");
private final Map<String, Uuid> topicIds = Stream.of(
new AbstractMap.SimpleEntry<>(topic, topicId),
new AbstractMap.SimpleEntry<>(topic2, topicId2),
new AbstractMap.SimpleEntry<>(topic3, topicId3))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
private final Map<Uuid, String> topicNames = Stream.of(
new AbstractMap.SimpleEntry<>(topicId, topic),
new AbstractMap.SimpleEntry<>(topicId2, topic2),
new AbstractMap.SimpleEntry<>(topicId3, topic3))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
private final String partitionRevoked = "Hit partition revoke ";
private final String partitionAssigned = "Hit partition assign ";
private final String partitionLost = "Hit partition lost ";
private final Collection<TopicPartition> singleTopicPartition = Set.of(new TopicPartition(topic, 0));
private final Time time = new MockTime();
private final SubscriptionState subscription = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST);
private final ConsumerPartitionAssignor assignor = new RoundRobinAssignor();
private KafkaConsumer<?, ?> consumer;
@AfterEach
public void cleanup() {
if (consumer != null) {
consumer.close(CloseOptions.timeout(Duration.ZERO));
}
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testSubscribingCustomMetricsDoesntAffectConsumerMetrics(GroupProtocol groupProtocol) {
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer());
Map<MetricName, KafkaMetric> customMetrics = customMetrics();
customMetrics.forEach((name, metric) -> consumer.registerMetricForSubscription(metric));
Map<MetricName, ? extends Metric> consumerMetrics = consumer.metrics();
customMetrics.forEach((name, metric) -> assertFalse(consumerMetrics.containsKey(name)));
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testSubscribingCustomMetricsWithSameNameDoesntAffectConsumerMetrics(GroupProtocol groupProtocol) {
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
Class<?> consumerClass = groupProtocol == GroupProtocol.CLASSIC ? ClassicKafkaConsumer.class : AsyncKafkaConsumer.class;
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
appender.setClassLogger(consumerClass, Level.DEBUG);
consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer());
KafkaMetric existingMetricToAdd = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue();
consumer.registerMetricForSubscription(existingMetricToAdd);
final String expectedMessage = String.format("Skipping registration for metric %s. Existing consumer metrics cannot be overwritten.", existingMetricToAdd.metricName());
assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage)));
}
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testAssignedPartitionsMetrics(GroupProtocol groupProtocol) throws InterruptedException {
consumer = newConsumer(groupProtocol, time, mock(KafkaClient.class), subscription,
mock(ConsumerMetadata.class), assignor, false, groupInstanceId);
Metrics metrics = consumer.metricsRegistry();
// This metric is added in the background thread for the AsyncConsumer, so waiting on it to avoid flakiness.
TestUtils.waitForCondition(() -> getMetric(metrics, "assigned-partitions") != null,
"Consumer should register the assigned-partitions metric");
assertNotNull(getMetric(metrics, "assigned-partitions"));
assertEquals(0.0d, getMetric(metrics, "assigned-partitions").metricValue());
subscription.assignFromUser(Set.of(tp0));
assertEquals(1.0d, getMetric(metrics, "assigned-partitions").metricValue());
subscription.assignFromUser(Set.of(tp0, tp1));
assertEquals(2.0d, getMetric(metrics, "assigned-partitions").metricValue());
subscription.unsubscribe();
subscription.subscribe(Set.of(topic), Optional.empty());
subscription.assignFromSubscribed(Set.of(tp0));
assertEquals(1.0d, getMetric(metrics, "assigned-partitions").metricValue());
}
private KafkaMetric getMetric(Metrics metrics, String name) {
return metrics.metrics().get(metrics.metricName(name, CONSUMER_METRIC_GROUP_PREFIX + COORDINATOR_METRICS_SUFFIX));
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testUnsubscribingCustomMetricsWithSameNameDoesntAffectConsumerMetrics(GroupProtocol groupProtocol) {
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
Class<?> consumerClass = groupProtocol == GroupProtocol.CLASSIC ? ClassicKafkaConsumer.class : AsyncKafkaConsumer.class;
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
appender.setClassLogger(consumerClass, Level.DEBUG);
consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer());
KafkaMetric existingMetricToRemove = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue();
consumer.unregisterMetricFromSubscription(existingMetricToRemove);
final String expectedMessage = String.format("Skipping unregistration for metric %s. Existing consumer metrics cannot be removed.", existingMetricToRemove.metricName());
assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage)));
}
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testShouldOnlyCallMetricReporterMetricChangeOnceWithExistingConsumerMetric(GroupProtocol groupProtocol) {
try (MockedStatic<CommonClientConfigs> mockedCommonClientConfigs = mockStatic(CommonClientConfigs.class, new CallsRealMethods())) {
ClientTelemetryReporter clientTelemetryReporter = mock(ClientTelemetryReporter.class);
clientTelemetryReporter.configure(any());
mockedCommonClientConfigs.when(() -> CommonClientConfigs.telemetryReporter(anyString(), any())).thenReturn(Optional.of(clientTelemetryReporter));
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer());
KafkaMetric existingMetric = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue();
consumer.registerMetricForSubscription(existingMetric);
// This test would fail without the check as the existing metric is registered in the consumer on startup
Mockito.verify(clientTelemetryReporter, atMostOnce()).metricChange(existingMetric);
}
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testShouldNotCallMetricReporterMetricRemovalWithExistingConsumerMetric(GroupProtocol groupProtocol) {
try (MockedStatic<CommonClientConfigs> mockedCommonClientConfigs = mockStatic(CommonClientConfigs.class, new CallsRealMethods())) {
ClientTelemetryReporter clientTelemetryReporter = mock(ClientTelemetryReporter.class);
clientTelemetryReporter.configure(any());
mockedCommonClientConfigs.when(() -> CommonClientConfigs.telemetryReporter(anyString(), any())).thenReturn(Optional.of(clientTelemetryReporter));
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer());
KafkaMetric existingMetric = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue();
consumer.unregisterMetricFromSubscription(existingMetric);
Mockito.verify(clientTelemetryReporter, never()).metricRemoval(existingMetric);
}
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testUnSubscribingNonExisingMetricsDoesntCauseError(GroupProtocol groupProtocol) {
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer());
Map<MetricName, KafkaMetric> customMetrics = customMetrics();
//Metrics never registered but removed should not cause an error
customMetrics.forEach((name, metric) -> assertDoesNotThrow(() -> consumer.unregisterMetricFromSubscription(metric)));
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testMetricsReporterAutoGeneratedClientId(GroupProtocol groupProtocol) {
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer());
assertEquals(2, consumer.metricsRegistry().reporters().size());
MockMetricsReporter mockMetricsReporter = (MockMetricsReporter) consumer.metricsRegistry().reporters().stream()
.filter(reporter -> reporter instanceof MockMetricsReporter).findFirst().orElseThrow();
assertEquals(consumer.clientId(), mockMetricsReporter.clientId);
consumer.close(CloseOptions.timeout(Duration.ZERO));
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testDisableJmxAndClientTelemetryReporter(GroupProtocol groupProtocol) {
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, "");
props.setProperty(ConsumerConfig.ENABLE_METRICS_PUSH_CONFIG, "false");
consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer());
assertTrue(consumer.metricsRegistry().reporters().isEmpty());
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testExplicitlyOnlyEnableJmxReporter(GroupProtocol groupProtocol) {
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, "org.apache.kafka.common.metrics.JmxReporter");
props.setProperty(ConsumerConfig.ENABLE_METRICS_PUSH_CONFIG, "false");
consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer());
assertEquals(1, consumer.metricsRegistry().reporters().size());
assertInstanceOf(JmxReporter.class, consumer.metricsRegistry().reporters().get(0));
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testExplicitlyOnlyEnableClientTelemetryReporter(GroupProtocol groupProtocol) {
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, "");
consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer());
assertEquals(1, consumer.metricsRegistry().reporters().size());
assertInstanceOf(ClientTelemetryReporter.class, consumer.metricsRegistry().reporters().get(0));
}
// TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol.
// Once it is implemented, this should use both group protocols.
@ParameterizedTest
@EnumSource(value = GroupProtocol.class, names = "CLASSIC")
@SuppressWarnings("unchecked")
public void testPollReturnsRecords(GroupProtocol groupProtocol) {
consumer = setUpConsumerWithRecordsToPoll(groupProtocol, tp0, 5);
ConsumerRecords<String, String> records = (ConsumerRecords<String, String>) consumer.poll(Duration.ZERO);
assertEquals(5, records.count());
assertEquals(Set.of(tp0), records.partitions());
assertEquals(5, records.records(tp0).size());
assertEquals(1, records.nextOffsets().size());
assertEquals(new OffsetAndMetadata(5), records.nextOffsets().get(tp0));
}
// TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol.
// Once it is implemented, this should use both group protocols.
@ParameterizedTest
@EnumSource(value = GroupProtocol.class, names = "CLASSIC")
@SuppressWarnings("unchecked")
public void testSecondPollWithDeserializationErrorThrowsRecordDeserializationException(GroupProtocol groupProtocol) {
int invalidRecordNumber = 4;
int invalidRecordOffset = 3;
StringDeserializer deserializer = mockErrorDeserializer(invalidRecordNumber);
consumer = setUpConsumerWithRecordsToPoll(groupProtocol, tp0, 5, deserializer);
ConsumerRecords<String, String> records = (ConsumerRecords<String, String>) consumer.poll(Duration.ZERO);
assertEquals(invalidRecordNumber - 1, records.count());
assertEquals(Set.of(tp0), records.partitions());
assertEquals(invalidRecordNumber - 1, records.records(tp0).size());
long lastOffset = records.records(tp0).get(records.records(tp0).size() - 1).offset();
assertEquals(invalidRecordNumber - 2, lastOffset);
assertEquals(1, records.nextOffsets().size());
assertEquals(new OffsetAndMetadata(lastOffset + 1), records.nextOffsets().get(tp0));
RecordDeserializationException rde = assertThrows(RecordDeserializationException.class, () -> consumer.poll(Duration.ZERO));
assertEquals(invalidRecordOffset, rde.offset());
assertEquals(tp0, rde.topicPartition());
assertEquals(consumer.position(tp0), rde.offset());
}
/*
Create a mock deserializer which throws a SerializationException on the Nth record's value deserialization
*/
private StringDeserializer mockErrorDeserializer(int recordNumber) {
int recordIndex = recordNumber - 1;
return new StringDeserializer() {
int i = 0;
@Override
public String deserialize(String topic, byte[] data) {
if (i == recordIndex) {
throw new SerializationException();
} else {
i++;
return super.deserialize(topic, data);
}
}
@Override
public String deserialize(String topic, Headers headers, ByteBuffer data) {
if (i == recordIndex) {
throw new SerializationException();
} else {
i++;
return super.deserialize(topic, headers, data);
}
}
};
}
private KafkaConsumer<?, ?> setUpConsumerWithRecordsToPoll(GroupProtocol groupProtocol,
TopicPartition tp,
int recordCount) {
return setUpConsumerWithRecordsToPoll(groupProtocol, tp, recordCount, new StringDeserializer());
}
private KafkaConsumer<?, ?> setUpConsumerWithRecordsToPoll(GroupProtocol groupProtocol,
TopicPartition tp,
int recordCount,
Deserializer<String> deserializer) {
Cluster cluster = TestUtils.singletonCluster(tp.topic(), 1);
Node node = cluster.nodes().get(0);
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Map.of(topic, 1));
consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor,
true, groupId, groupInstanceId, Optional.of(deserializer), false);
consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer));
prepareRebalance(client, node, assignor, List.of(tp), null);
consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
client.prepareResponseFrom(fetchResponse(tp, 0, recordCount), node);
return consumer;
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testConstructorClose(GroupProtocol groupProtocol) {
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testConstructorClose");
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "invalid-23-8409-adsfsdj");
props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
final int oldInitCount = MockMetricsReporter.INIT_COUNT.get();
final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get();
try {
newConsumer(props, new ByteArrayDeserializer(), new ByteArrayDeserializer());
fail("should have caught an exception and returned");
} catch (KafkaException e) {
assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get());
assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get());
assertEquals("Failed to construct kafka consumer", e.getMessage());
}
}
@ParameterizedTest
@EnumSource(GroupProtocol.class)
public void testConstructorInvalidMetricReporters(GroupProtocol groupProtocol) {
Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name());
props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testConstructorInvalidMetricReporters");
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, "an.invalid.class");
KafkaException e = assertThrows(
KafkaException.class,
() -> newConsumer(props, new ByteArrayDeserializer(), new ByteArrayDeserializer()));
assertEquals("Failed to construct kafka consumer", e.getMessage());
assertEquals("Class an.invalid.
|
KafkaConsumerTest
|
java
|
quarkusio__quarkus
|
extensions/panache/hibernate-reactive-panache/deployment/src/test/java/io/quarkus/hibernate/reactive/panache/test/MyOtherTestResource.java
|
{
"start": 350,
"end": 683
}
|
class ____ {
@GET
@Path("{id}")
@Produces(MediaType.APPLICATION_JSON)
public Uni<MyOtherEntity> get(@PathParam("id") long id) {
return MyOtherEntity.<MyOtherEntity> findById(id)
.onItem().ifNull().failWith(() -> new WebApplicationException(Response.Status.NOT_FOUND));
}
}
|
MyOtherTestResource
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/naturalid/immutableentity/ImmutableEntityNaturalIdTest.java
|
{
"start": 1971,
"end": 9548
}
|
class ____ {
@BeforeEach
public void createTestData(SessionFactoryScope scope) {
final SessionFactoryImplementor sessionFactory = scope.getSessionFactory();
final StatisticsImplementor stats = sessionFactory.getStatistics();
sessionFactory.getCache().evictAllRegions();
stats.clear();
scope.inTransaction(
(session) -> {
Building b1 = new Building();
b1.setName( "Computer Science" );
b1.setAddress( "1210 W. Dayton St." );
b1.setCity( "Madison" );
b1.setState( "WI" );
session.persist( b1 );
}
);
assertEquals( 0, stats.getNaturalIdCacheHitCount(), "Cache hits should be empty" );
assertEquals( 0, stats.getNaturalIdCacheMissCount(), "Cache misses should be empty" );
assertEquals( 1, stats.getNaturalIdCachePutCount(), "Cache put should be one after insert" );
}
@AfterEach
public void dropTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testNaturalIdMapping(SessionFactoryScope scope) {
final EntityMappingType buildingMapping = scope.getSessionFactory()
.getRuntimeMetamodels()
.getEntityMappingType( Building.class );
final NaturalIdMapping naturalIdMapping = buildingMapping.getNaturalIdMapping();
assertThat( naturalIdMapping, notNullValue() );
assertThat( naturalIdMapping.getNaturalIdAttributes().size(), is( 3 ) );
// nullability is not specified, so they should be nullable by annotations-specific default
for ( SingularAttributeMapping attribute : naturalIdMapping.getNaturalIdAttributes() ) {
assertThat( attribute.getAttributeMetadata().isNullable(), is( true ) );
}
final EntityPersister entityPersister = buildingMapping.getEntityPersister();
assertThat(
"Class should have a natural key",
entityPersister.hasNaturalIdentifier(),
is( true )
);
assertThat(
"Wrong number of attributes",
entityPersister.getNaturalIdentifierProperties().length,
is( 3 )
);
// nullability is not specified, so they should be nullable by annotations-specific default
assertTrue( entityPersister.getPropertyNullability()[ entityPersister.getPropertyIndex( "address" )] );
assertTrue( entityPersister.getPropertyNullability()[ entityPersister.getPropertyIndex( "city" )] );
assertTrue( entityPersister.getPropertyNullability()[ entityPersister.getPropertyIndex( "state" )] );
}
@Test
public void testImmutableNaturalIdLifecycle(SessionFactoryScope scope) {
final SessionFactoryImplementor sessionFactory = scope.getSessionFactory();
final StatisticsImplementor stats = sessionFactory.getStatistics();
// Clear caches and reset cache stats
sessionFactory.getCache().evictNaturalIdData();
stats.clear();
// load #1 - should result in:
// - cache miss
// - query
// - cache put
scope.inTransaction(
(session) -> {
final NaturalIdLoadAccess<Building> naturalIdLoader = session.byNaturalId( Building.class );
final Building building = naturalIdLoader
.using( "address", "1210 W. Dayton St." )
.using( "city", "Madison" )
.using( "state", "WI" )
.load();
assertThat( building, notNullValue() );
assertEquals( 0, stats.getNaturalIdCacheHitCount(), "Cache hits should be empty" );
assertEquals( 1, stats.getNaturalIdCacheMissCount(), "Cache misses should be one" );
assertEquals( 1, stats.getNaturalIdCachePutCount(), "Cache put should be one after load" );
assertThat( stats.getPrepareStatementCount(), is( 1L ) );
}
);
// load #2 - should result in
// - cache hit
scope.inTransaction(
(session) -> {
final NaturalIdLoadAccess<Building> naturalIdLoader = session.byNaturalId( Building.class );
final Building building = naturalIdLoader
.using( "address", "1210 W. Dayton St." )
.using( "city", "Madison" )
.using( "state", "WI" )
.load();
assertThat( building, notNullValue() );
assertEquals( 1, stats.getNaturalIdCacheHitCount(), "Cache hits should be one after second query" );
assertEquals( 1, stats.getNaturalIdCacheMissCount(), "Cache misses should be one after second query" );
assertEquals( 1, stats.getNaturalIdCachePutCount(), "Cache put should be one after second query" );
// Try Deleting
session.remove( building );
// third query
naturalIdLoader.load();
assertEquals( 1, stats.getNaturalIdCacheHitCount(), "Cache hits should be one after second query" );
assertEquals( 1, stats.getNaturalIdCacheMissCount(), "Cache misses should be two after second query" );
assertEquals( 1, stats.getNaturalIdCachePutCount(), "Cache put should be one after second query" );
}
);
//Try three, should be db lookup and miss
scope.inTransaction(
(session) -> {
final Building building = session.byNaturalId( Building.class )
.using( "address", "1210 W. Dayton St." )
.using( "city", "Madison" )
.using( "state", "WI" )
.load();
// second query
assertNull( building );
assertEquals( 1, stats.getNaturalIdCacheHitCount(), "Cache hits should be one after third query" );
assertEquals( 2, stats.getNaturalIdCacheMissCount(), "Cache misses should be one after third query" );
assertEquals( 1, stats.getNaturalIdCachePutCount(), "Cache put should be one after third query" );
// here, we should know that that natural-id does not exist as part of the Session...
session.byNaturalId( Building.class )
.using( "address", "1210 W. Dayton St." )
.using( "city", "Madison" )
.using( "state", "WI" )
.load();
assertEquals( 1, stats.getNaturalIdCacheHitCount(), "Cache hits should still be one" );
assertEquals( 3, stats.getNaturalIdCacheMissCount(), "Cache misses should now be four" );
assertEquals( 1, stats.getNaturalIdCachePutCount(), "Cache put should still be one" );
}
);
}
@Test
@JiraKey( value = "HHH-7371" )
public void testImmutableNaturalIdLifecycle2(SessionFactoryScope scope) {
scope.inTransaction(
(s) -> {
final NaturalIdLoadAccess<Building> naturalIdLoader = s.byNaturalId( Building.class );
naturalIdLoader
.using( "address", "1210 W. Dayton St." )
.using( "city", "Madison" )
.using( "state", "WI" );
Building building = naturalIdLoader.getReference();
assertNotNull( building );
s.remove( building );
building = naturalIdLoader.load();
//org.hibernate.ObjectNotFoundException: No row with the given identifier exists: [org.hibernate.test.naturalid.immutableentity.Building#1]
// at org.hibernate.internal.SessionFactoryImpl$1$1.handleEntityNotFound(SessionFactoryImpl.java:247)
// at org.hibernate.event.internal.DefaultLoadEventListener.returnNarrowedProxy(DefaultLoadEventListener.java:282)
// at org.hibernate.event.internal.DefaultLoadEventListener.proxyOrLoad(DefaultLoadEventListener.java:248)
// at org.hibernate.event.internal.DefaultLoadEventListener.onLoad(DefaultLoadEventListener.java:148)
// at org.hibernate.internal.SessionImpl.fireLoad(SessionImpl.java:1079)
// at org.hibernate.internal.SessionImpl.access$13(SessionImpl.java:1075)
// at org.hibernate.internal.SessionImpl$IdentifierLoadAccessImpl.load(SessionImpl.java:2425)
// at org.hibernate.internal.SessionImpl$NaturalIdLoadAccessImpl.load(SessionImpl.java:2586)
// at org.hibernate.test.naturalid.immutableentity.ImmutableEntityNaturalIdTest.testImmutableNaturalIdLifecycle2(ImmutableEntityNaturalIdTest.java:188)
assertNull( building );
}
);
}
}
|
ImmutableEntityNaturalIdTest
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/model/ServerResourceMethod.java
|
{
"start": 3650,
"end": 3700
}
|
interface ____.
*
* @return declaring
|
method
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/AppendableJoiner.java
|
{
"start": 3225,
"end": 11831
}
|
class ____<T> implements Supplier<AppendableJoiner<T>> {
/** The sequence of characters to be used at the beginning. */
private CharSequence prefix;
/** The sequence of characters to be used at the end. */
private CharSequence suffix;
/** The delimiter that separates each element. */
private CharSequence delimiter;
/** The consumer used to render each element of type {@code T} onto an {@link Appendable}. */
private FailableBiConsumer<Appendable, T, IOException> appender;
/**
* Constructs a new instance.
*/
Builder() {
// empty
}
/**
* Gets a new instance of {@link AppendableJoiner}.
*/
@Override
public AppendableJoiner<T> get() {
return new AppendableJoiner<>(prefix, suffix, delimiter, appender);
}
/**
* Sets the delimiter that separates each element.
*
* @param delimiter The delimiter that separates each element.
* @return {@code this} instance.
*/
public Builder<T> setDelimiter(final CharSequence delimiter) {
this.delimiter = delimiter;
return this;
}
/**
* Sets the consumer used to render each element of type {@code T} onto an {@link Appendable}.
*
* @param appender The consumer used to render each element of type {@code T} onto an {@link Appendable}.
* @return {@code this} instance.
*/
public Builder<T> setElementAppender(final FailableBiConsumer<Appendable, T, IOException> appender) {
this.appender = appender;
return this;
}
/**
* Sets the sequence of characters to be used at the beginning.
*
* @param prefix The sequence of characters to be used at the beginning.
* @return {@code this} instance.
*/
public Builder<T> setPrefix(final CharSequence prefix) {
this.prefix = prefix;
return this;
}
/**
* Sets the sequence of characters to be used at the end.
*
* @param suffix The sequence of characters to be used at the end.
* @return {@code this} instance.
*/
public Builder<T> setSuffix(final CharSequence suffix) {
this.suffix = suffix;
return this;
}
}
/**
* Creates a new builder.
*
* @param <T> The type of elements.
* @return a new builder.
*/
public static <T> Builder<T> builder() {
return new Builder<>();
}
/** Could be public in the future, in some form. */
@SafeVarargs
static <A extends Appendable, T> A joinA(final A appendable, final CharSequence prefix, final CharSequence suffix, final CharSequence delimiter,
final FailableBiConsumer<Appendable, T, IOException> appender, final T... elements) throws IOException {
return joinArray(appendable, prefix, suffix, delimiter, appender, elements);
}
private static <A extends Appendable, T> A joinArray(final A appendable, final CharSequence prefix, final CharSequence suffix, final CharSequence delimiter,
final FailableBiConsumer<Appendable, T, IOException> appender, final T[] elements) throws IOException {
appendable.append(prefix);
if (elements != null) {
if (elements.length > 0) {
appender.accept(appendable, elements[0]);
}
for (int i = 1; i < elements.length; i++) {
appendable.append(delimiter);
appender.accept(appendable, elements[i]);
}
}
appendable.append(suffix);
return appendable;
}
/** Could be public in the future, in some form. */
static <T> StringBuilder joinI(final StringBuilder stringBuilder, final CharSequence prefix, final CharSequence suffix, final CharSequence delimiter,
final FailableBiConsumer<Appendable, T, IOException> appender, final Iterable<T> elements) {
try {
return joinIterable(stringBuilder, prefix, suffix, delimiter, appender, elements);
} catch (final IOException e) {
// Cannot happen with a StringBuilder.
throw new UncheckedException(e);
}
}
private static <A extends Appendable, T> A joinIterable(final A appendable, final CharSequence prefix, final CharSequence suffix,
final CharSequence delimiter, final FailableBiConsumer<Appendable, T, IOException> appender, final Iterable<T> elements) throws IOException {
appendable.append(prefix);
if (elements != null) {
final Iterator<T> iterator = elements.iterator();
if (iterator.hasNext()) {
appender.accept(appendable, iterator.next());
}
while (iterator.hasNext()) {
appendable.append(delimiter);
appender.accept(appendable, iterator.next());
}
}
appendable.append(suffix);
return appendable;
}
/** Could be public in the future, in some form. */
@SafeVarargs
static <T> StringBuilder joinSB(final StringBuilder stringBuilder, final CharSequence prefix, final CharSequence suffix, final CharSequence delimiter,
final FailableBiConsumer<Appendable, T, IOException> appender, final T... elements) {
try {
return joinArray(stringBuilder, prefix, suffix, delimiter, appender, elements);
} catch (final IOException e) {
// Cannot happen with a StringBuilder.
throw new UncheckedException(e);
}
}
private static CharSequence nonNull(final CharSequence value) {
return value != null ? value : StringUtils.EMPTY;
}
/** The sequence of characters to be used at the beginning. */
private final CharSequence prefix;
/** The sequence of characters to be used at the end. */
private final CharSequence suffix;
/** The delimiter that separates each element. */
private final CharSequence delimiter;
private final FailableBiConsumer<Appendable, T, IOException> appender;
/**
* Constructs a new instance.
*/
private AppendableJoiner(final CharSequence prefix, final CharSequence suffix, final CharSequence delimiter,
final FailableBiConsumer<Appendable, T, IOException> appender) {
this.prefix = nonNull(prefix);
this.suffix = nonNull(suffix);
this.delimiter = nonNull(delimiter);
this.appender = appender != null ? appender : (a, e) -> a.append(String.valueOf(e));
}
/**
* Joins stringified objects from the given Iterable into a StringBuilder.
*
* @param stringBuilder The target.
* @param elements The source.
* @return The given StringBuilder.
*/
public StringBuilder join(final StringBuilder stringBuilder, final Iterable<T> elements) {
return joinI(stringBuilder, prefix, suffix, delimiter, appender, elements);
}
/**
* Joins stringified objects from the given array into a StringBuilder.
*
* @param stringBuilder The target.
* @param elements The source.
* @return the given target StringBuilder.
*/
public StringBuilder join(final StringBuilder stringBuilder, @SuppressWarnings("unchecked") final T... elements) {
return joinSB(stringBuilder, prefix, suffix, delimiter, appender, elements);
}
/**
* Joins stringified objects from the given Iterable into an Appendable.
*
* @param <A> the Appendable type.
* @param appendable The target.
* @param elements The source.
* @return The given StringBuilder.
* @throws IOException If an I/O error occurs
*/
public <A extends Appendable> A joinA(final A appendable, final Iterable<T> elements) throws IOException {
return joinIterable(appendable, prefix, suffix, delimiter, appender, elements);
}
/**
* Joins stringified objects from the given array into an Appendable.
*
* @param <A> the Appendable type.
* @param appendable The target.
* @param elements The source.
* @return The given StringBuilder.
* @throws IOException If an I/O error occurs
*/
public <A extends Appendable> A joinA(final A appendable, @SuppressWarnings("unchecked") final T... elements) throws IOException {
return joinA(appendable, prefix, suffix, delimiter, appender, elements);
}
}
|
Builder
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/internal/util/reflection/FieldInitializationReport.java
|
{
"start": 1525,
"end": 1735
}
|
class ____ the actual instance in the field.
*
* @return Class of the instance
*/
public Class<?> fieldClass() {
return fieldInstance != null ? fieldInstance.getClass() : null;
}
}
|
of
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/EnhancerCacheProvider.java
|
{
"start": 315,
"end": 365
}
|
class ____ is currently being enhanced.
*/
final
|
that
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/authentication/preauth/x509/SubjectDnX509PrincipalExtractor.java
|
{
"start": 1974,
"end": 4232
}
|
class ____ implements X509PrincipalExtractor, MessageSourceAware {
protected final Log logger = LogFactory.getLog(getClass());
protected MessageSourceAccessor messages = SpringSecurityMessageSource.getAccessor();
private Pattern subjectDnPattern;
@SuppressWarnings("NullAway") // Dataflow analysis limitation
public SubjectDnX509PrincipalExtractor() {
setSubjectDnRegex("CN=(.*?)(?:,|$)");
}
@Override
public Object extractPrincipal(X509Certificate clientCert) {
// String subjectDN = clientCert.getSubjectX500Principal().getName();
String subjectDN = clientCert.getSubjectDN().getName();
this.logger.debug(LogMessage.format("Subject DN is '%s'", subjectDN));
Matcher matcher = this.subjectDnPattern.matcher(subjectDN);
if (!matcher.find()) {
throw new BadCredentialsException(this.messages.getMessage("SubjectDnX509PrincipalExtractor.noMatching",
new Object[] { subjectDN }, "No matching pattern was found in subject DN: {0}"));
}
Assert.isTrue(matcher.groupCount() == 1, "Regular expression must contain a single group ");
String username = matcher.group(1);
this.logger.debug(LogMessage.format("Extracted Principal name is '%s'", username));
return username;
}
/**
* Sets the regular expression which will by used to extract the user name from the
* certificate's Subject DN.
* <p>
* It should contain a single group; for example the default expression
* "CN=(.*?)(?:,|$)" matches the common name field. So "CN=Jimi Hendrix, OU=..." will
* give a user name of "Jimi Hendrix".
* <p>
* The matches are case insensitive. So "emailAddress=(.?)," will match
* "EMAILADDRESS=jimi@hendrix.org, CN=..." giving a user name "jimi@hendrix.org"
* @param subjectDnRegex the regular expression to find in the subject
*/
public void setSubjectDnRegex(String subjectDnRegex) {
Assert.hasText(subjectDnRegex, "Regular expression may not be null or empty");
this.subjectDnPattern = Pattern.compile(subjectDnRegex, Pattern.CASE_INSENSITIVE);
}
/**
* @since 5.5
*/
@Override
public void setMessageSource(MessageSource messageSource) {
Assert.notNull(messageSource, "messageSource cannot be null");
this.messages = new MessageSourceAccessor(messageSource);
}
}
|
SubjectDnX509PrincipalExtractor
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/CommonAnnotationBeanPostProcessorTests.java
|
{
"start": 21920,
"end": 23657
}
|
class ____ extends AnnotatedInitDestroyBean {
public boolean init2Called = false;
public boolean init3Called = false;
public boolean destroy2Called = false;
public boolean destroy3Called = false;
@Resource
private TestBean testBean;
private TestBean testBean2;
@PostConstruct
protected void init2() {
if (this.testBean == null || this.testBean2 == null) {
throw new IllegalStateException("Resources not injected");
}
if (!this.initCalled) {
throw new IllegalStateException("Superclass init method not called yet");
}
if (this.init2Called) {
throw new IllegalStateException("Already called");
}
this.init2Called = true;
}
@PostConstruct
private void init() {
if (this.init3Called) {
throw new IllegalStateException("Already called");
}
this.init3Called = true;
}
@PreDestroy
protected void destroy2() {
if (this.destroyCalled) {
throw new IllegalStateException("Superclass destroy called too soon");
}
if (this.destroy2Called) {
throw new IllegalStateException("Already called");
}
this.destroy2Called = true;
}
@PreDestroy
private void destroy() {
if (this.destroyCalled) {
throw new IllegalStateException("Superclass destroy called too soon");
}
if (this.destroy3Called) {
throw new IllegalStateException("Already called");
}
this.destroy3Called = true;
}
@Resource
public void setTestBean2(TestBean testBean2) {
if (this.testBean2 != null) {
throw new IllegalStateException("Already called");
}
this.testBean2 = testBean2;
}
public TestBean getTestBean() {
return testBean;
}
public TestBean getTestBean2() {
return testBean2;
}
}
static
|
ResourceInjectionBean
|
java
|
quarkusio__quarkus
|
integration-tests/openapi/src/main/java/io/quarkus/it/openapi/spring/InputStreamResource.java
|
{
"start": 763,
"end": 2855
}
|
class ____ {
@GetMapping("/justInputStream/{fileName}")
public InputStream justInputStream(@PathVariable("fileName") String filename) {
return toInputStream(filename);
}
@PostMapping("/justInputStream")
public InputStream justInputStream(InputStream file) {
return file;
}
@GetMapping("/responseEntityInputStream/{fileName}")
public ResponseEntity<InputStream> restResponseInputStream(@PathVariable("fileName") String filename) {
return ResponseEntity.ok(toInputStream(filename));
}
@PostMapping("/responseEntityInputStream")
public ResponseEntity<InputStream> restResponseInputStream(InputStream file) {
return ResponseEntity.ok(file);
}
@GetMapping("/optionalInputStream/{fileName}")
public Optional<InputStream> optionalInputStream(@PathVariable("fileName") String filename) {
return Optional.of(toInputStream(filename));
}
@PostMapping("/optionalInputStream")
public Optional<InputStream> optionalInputStream(Optional<InputStream> file) {
return file;
}
@GetMapping("/uniInputStream/{fileName}")
public Uni<InputStream> uniInputStream(@PathVariable("fileName") String filename) {
return Uni.createFrom().item(toInputStream(filename));
}
@GetMapping("/completionStageInputStream/{fileName}")
public CompletionStage<InputStream> completionStageInputStream(@PathVariable("fileName") String filename) {
return CompletableFuture.completedStage(toInputStream(filename));
}
@GetMapping("/completedFutureInputStream/{fileName}")
public CompletableFuture<InputStream> completedFutureInputStream(@PathVariable("fileName") String filename) {
return CompletableFuture.completedFuture(toInputStream(filename));
}
private InputStream toInputStream(String filename) {
try {
String f = URLDecoder.decode(filename, "UTF-8");
return Files.newInputStream(Paths.get(f));
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
}
|
InputStreamResource
|
java
|
redisson__redisson
|
redisson-spring-data/redisson-spring-data-24/src/main/java/org/redisson/spring/data/connection/RedissonConnection.java
|
{
"start": 2466,
"end": 86470
}
|
class ____ extends AbstractRedisConnection {
private boolean closed;
protected final Redisson redisson;
private boolean filterOkResponses = false;
CommandAsyncExecutor executorService;
private RedissonSubscription subscription;
public RedissonConnection(RedissonClient redisson) {
super();
this.redisson = (Redisson) redisson;
executorService = this.redisson.getCommandExecutor();
}
public RedissonConnection(RedissonClient redisson, boolean filterOkResponses) {
super();
this.redisson = (Redisson) redisson;
this.filterOkResponses = filterOkResponses;
executorService = this.redisson.getCommandExecutor();
}
public boolean isFilterOkResponses() {
return filterOkResponses;
}
public void setFilterOkResponses(boolean filterOkResponses) {
this.filterOkResponses = filterOkResponses;
}
@Override
public void close() throws DataAccessException {
super.close();
if (isQueueing()) {
CommandBatchService es = (CommandBatchService) executorService;
if (!es.isExecuted()) {
discard();
}
}
closed = true;
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public Object getNativeConnection() {
return redisson;
}
@Override
public boolean isQueueing() {
if (executorService instanceof CommandBatchService) {
CommandBatchService es = (CommandBatchService) executorService;
return es.getOptions().getExecutionMode() == ExecutionMode.REDIS_WRITE_ATOMIC;
}
return false;
}
@Override
public boolean isPipelined() {
if (executorService instanceof CommandBatchService) {
CommandBatchService es = (CommandBatchService) executorService;
return es.getOptions().getExecutionMode() == ExecutionMode.IN_MEMORY || es.getOptions().getExecutionMode() == ExecutionMode.IN_MEMORY_ATOMIC;
}
return false;
}
public boolean isPipelinedAtomic() {
if (executorService instanceof CommandBatchService) {
CommandBatchService es = (CommandBatchService) executorService;
return es.getOptions().getExecutionMode() == ExecutionMode.IN_MEMORY_ATOMIC;
}
return false;
}
@Override
public void openPipeline() {
BatchOptions options = BatchOptions.defaults()
.executionMode(ExecutionMode.IN_MEMORY);
this.executorService = executorService.createCommandBatchService(options);
}
@Override
public List<Object> closePipeline() throws RedisPipelineException {
if (isPipelined()) {
CommandBatchService es = (CommandBatchService) executorService;
try {
BatchResult<?> result = es.execute();
filterResults(result);
if (isPipelinedAtomic()) {
return Arrays.<Object>asList((List<Object>) result.getResponses());
}
return (List<Object>) result.getResponses();
} catch (Exception ex) {
throw new RedisPipelineException(ex);
} finally {
resetConnection();
}
}
return Collections.emptyList();
}
@Override
public Object execute(String command, byte[]... args) {
for (Method method : this.getClass().getDeclaredMethods()) {
if (method.getName().equalsIgnoreCase(command)
&& Modifier.isPublic(method.getModifiers())
&& (method.getParameterTypes().length == args.length)) {
try {
Object t = execute(method, args);
if (t instanceof String) {
return ((String) t).getBytes();
}
return t;
} catch (IllegalArgumentException e) {
if (isPipelined()) {
throw new RedisPipelineException(e);
}
throw new InvalidDataAccessApiUsageException(e.getMessage(), e);
}
}
}
throw new UnsupportedOperationException();
}
private Object execute(Method method, byte[]... args) {
if (method.getParameterTypes().length > 0 && method.getParameterTypes()[0] == byte[][].class) {
return ReflectionUtils.invokeMethod(method, this, args);
}
if (args == null) {
return ReflectionUtils.invokeMethod(method, this);
}
return ReflectionUtils.invokeMethod(method, this, Arrays.asList(args).toArray());
}
<V> V syncFuture(RFuture<V> future) {
try {
return executorService.get(future);
} catch (Exception ex) {
throw transform(ex);
}
}
protected RuntimeException transform(Exception ex) {
DataAccessException exception = RedissonConnectionFactory.EXCEPTION_TRANSLATION.translate(ex);
if (exception != null) {
return exception;
}
return new RedisSystemException(ex.getMessage(), ex);
}
@Override
public Boolean exists(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.EXISTS, key);
}
@Override
public Long del(byte[]... keys) {
return write(keys[0], LongCodec.INSTANCE, RedisCommands.DEL, Arrays.asList(keys).toArray());
}
@Override
public Long unlink(byte[]... keys) {
return write(keys[0], LongCodec.INSTANCE, RedisCommands.UNLINK, Arrays.asList(keys).toArray());
}
private static final RedisStrictCommand<DataType> TYPE = new RedisStrictCommand<DataType>("TYPE", new DataTypeConvertor());
@Override
public DataType type(byte[] key) {
return read(key, StringCodec.INSTANCE, TYPE, key);
}
private static final RedisStrictCommand<Set<byte[]>> KEYS = new RedisStrictCommand<Set<byte[]>>("KEYS", new ObjectSetReplayDecoder<byte[]>());
@Override
public Set<byte[]> keys(byte[] pattern) {
if (isQueueing()) {
return read(null, ByteArrayCodec.INSTANCE, KEYS, pattern);
}
List<CompletableFuture<Set<byte[]>>> futures = executorService.readAllAsync(ByteArrayCodec.INSTANCE, KEYS, pattern);
CompletableFuture<Void> ff = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<Set<byte[]>> future = ff.thenApply(r -> {
return futures.stream().flatMap(f -> f.getNow(new HashSet<>()).stream()).collect(Collectors.toSet());
}).toCompletableFuture();
return sync(new CompletableFutureWrapper<>(future));
}
@Override
public Cursor<byte[]> scan(ScanOptions options) {
return new ScanCursor<byte[]>(0, options) {
private RedisClient client;
private Iterator<MasterSlaveEntry> entries = executorService.getConnectionManager().getEntrySet().iterator();
private MasterSlaveEntry entry = entries.next();
@Override
protected ScanIteration<byte[]> doScan(long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline / transaction mode.");
}
if (entry == null) {
return null;
}
List<Object> args = new ArrayList<Object>();
if (cursorId == 101010101010101010L) {
cursorId = 0;
}
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<byte[]>> f = executorService.readAsync(client, entry, ByteArrayCodec.INSTANCE, RedisCommands.SCAN, args.toArray());
ListScanResult<byte[]> res = syncFuture(f);
String pos = res.getPos();
client = res.getRedisClient();
if ("0".equals(pos)) {
if (entries.hasNext()) {
pos = "101010101010101010";
entry = entries.next();
client = null;
} else {
entry = null;
}
}
return new ScanIteration<byte[]>(Long.parseUnsignedLong(pos), res.getValues());
}
}.open();
}
@Override
public byte[] randomKey() {
if (isQueueing()) {
return read(null, ByteArrayCodec.INSTANCE, RedisCommands.RANDOM_KEY);
}
RFuture<byte[]> f = executorService.readRandomAsync(ByteArrayCodec.INSTANCE, RedisCommands.RANDOM_KEY);
return sync(f);
}
@Override
public void rename(byte[] oldName, byte[] newName) {
write(oldName, StringCodec.INSTANCE, RedisCommands.RENAME, oldName, newName);
}
@Override
public Boolean renameNX(byte[] oldName, byte[] newName) {
return write(oldName, StringCodec.INSTANCE, RedisCommands.RENAMENX, oldName, newName);
}
private static final RedisStrictCommand<Boolean> EXPIRE = new RedisStrictCommand<Boolean>("EXPIRE", new BooleanReplayConvertor());
@Override
public Boolean expire(byte[] key, long seconds) {
return write(key, StringCodec.INSTANCE, EXPIRE, key, seconds);
}
@Override
public Boolean pExpire(byte[] key, long millis) {
return write(key, StringCodec.INSTANCE, RedisCommands.PEXPIRE, key, millis);
}
private static final RedisStrictCommand<Boolean> EXPIREAT = new RedisStrictCommand<Boolean>("EXPIREAT", new BooleanReplayConvertor());
@Override
public Boolean expireAt(byte[] key, long unixTime) {
return write(key, StringCodec.INSTANCE, EXPIREAT, key, unixTime);
}
@Override
public Boolean pExpireAt(byte[] key, long unixTimeInMillis) {
return write(key, StringCodec.INSTANCE, RedisCommands.PEXPIREAT, key, unixTimeInMillis);
}
@Override
public Boolean persist(byte[] key) {
return write(key, StringCodec.INSTANCE, RedisCommands.PERSIST, key);
}
@Override
public Boolean move(byte[] key, int dbIndex) {
return write(key, StringCodec.INSTANCE, RedisCommands.MOVE, key, dbIndex);
}
private static final RedisStrictCommand<Long> TTL = new RedisStrictCommand<Long>("TTL");
@Override
public Long ttl(byte[] key) {
return read(key, StringCodec.INSTANCE, TTL, key);
}
protected <T> T sync(RFuture<T> f) {
if (isPipelined()) {
return null;
}
if (isQueueing()) {
((BatchPromise)f.toCompletableFuture()).getSentPromise().join();
return null;
}
return syncFuture(f);
}
@Override
public Long ttl(byte[] key, TimeUnit timeUnit) {
return read(key, StringCodec.INSTANCE, new RedisStrictCommand<Long>("TTL", new SecondsConvertor(timeUnit, TimeUnit.SECONDS)), key);
}
@Override
public Long pTtl(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.PTTL, key);
}
@Override
public Long pTtl(byte[] key, TimeUnit timeUnit) {
return read(key, StringCodec.INSTANCE, new RedisStrictCommand<Long>("PTTL", new SecondsConvertor(timeUnit, TimeUnit.MILLISECONDS)), key);
}
@Override
public List<byte[]> sort(byte[] key, SortParameters sortParams) {
List<Object> params = new ArrayList<Object>();
params.add(key);
if (sortParams != null) {
if (sortParams.getByPattern() != null) {
params.add("BY");
params.add(sortParams.getByPattern());
}
if (sortParams.getLimit() != null) {
params.add("LIMIT");
if (sortParams.getLimit().getStart() != -1) {
params.add(sortParams.getLimit().getStart());
}
if (sortParams.getLimit().getCount() != -1) {
params.add(sortParams.getLimit().getCount());
}
}
if (sortParams.getGetPattern() != null) {
for (byte[] pattern : sortParams.getGetPattern()) {
params.add("GET");
params.add(pattern);
}
}
if (sortParams.getOrder() != null) {
params.add(sortParams.getOrder());
}
Boolean isAlpha = sortParams.isAlphabetic();
if (isAlpha != null && isAlpha) {
params.add("ALPHA");
}
}
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.SORT_LIST, params.toArray());
}
private static final RedisCommand<Long> SORT_TO = new RedisCommand<Long>("SORT");
@Override
public Long sort(byte[] key, SortParameters sortParams, byte[] storeKey) {
List<Object> params = new ArrayList<Object>();
params.add(key);
if (sortParams != null) {
if (sortParams.getByPattern() != null) {
params.add("BY");
params.add(sortParams.getByPattern());
}
if (sortParams.getLimit() != null) {
params.add("LIMIT");
if (sortParams.getLimit().getStart() != -1) {
params.add(sortParams.getLimit().getStart());
}
if (sortParams.getLimit().getCount() != -1) {
params.add(sortParams.getLimit().getCount());
}
}
if (sortParams.getGetPattern() != null) {
for (byte[] pattern : sortParams.getGetPattern()) {
params.add("GET");
params.add(pattern);
}
}
if (sortParams.getOrder() != null) {
params.add(sortParams.getOrder());
}
Boolean isAlpha = sortParams.isAlphabetic();
if (isAlpha != null && isAlpha) {
params.add("ALPHA");
}
}
params.add("STORE");
params.add(storeKey);
return read(key, ByteArrayCodec.INSTANCE, SORT_TO, params.toArray());
}
@Override
public byte[] dump(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, key);
}
@Override
public void restore(byte[] key, long ttlInMillis, byte[] serializedValue) {
write(key, StringCodec.INSTANCE, RedisCommands.RESTORE, key, ttlInMillis, serializedValue);
}
@Override
public byte[] get(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key);
}
@Override
public byte[] getSet(byte[] key, byte[] value) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.GETSET, key, value);
}
private static final RedisCommand<List<Object>> MGET = new RedisCommand<List<Object>>("MGET", new ObjectListReplayDecoder<Object>());
@Override
public List<byte[]> mGet(byte[]... keys) {
return read(keys[0], ByteArrayCodec.INSTANCE, MGET, Arrays.asList(keys).toArray());
}
private static final RedisCommand<Boolean> SET = new RedisCommand<>("SET", new BooleanNullSafeReplayConvertor());
@Override
public Boolean set(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, SET, key, value);
}
@Override
public Boolean set(byte[] key, byte[] value, Expiration expiration, SetOption option) {
if (expiration == null) {
return set(key, value);
} else if (expiration.isPersistent()) {
if (option == null || option == SetOption.UPSERT) {
return set(key, value);
}
if (option == SetOption.SET_IF_ABSENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "NX");
}
if (option == SetOption.SET_IF_PRESENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "XX");
}
} else {
if (option == null || option == SetOption.UPSERT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "PX", expiration.getExpirationTimeInMilliseconds());
}
if (option == SetOption.SET_IF_ABSENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "PX", expiration.getExpirationTimeInMilliseconds(), "NX");
}
if (option == SetOption.SET_IF_PRESENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "PX", expiration.getExpirationTimeInMilliseconds(), "XX");
}
}
throw new IllegalArgumentException();
}
@Override
public Boolean setNX(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, RedisCommands.SETNX, key, value);
}
private static final RedisCommand<Boolean> SETEX = new RedisCommand<Boolean>("SETEX", new BooleanReplayConvertor());
@Override
public Boolean setEx(byte[] key, long seconds, byte[] value) {
return write(key, StringCodec.INSTANCE, SETEX, key, seconds, value);
}
private static final RedisCommand<Boolean> PSETEX = new RedisCommand<Boolean>("PSETEX", new BooleanReplayConvertor());
@Override
public Boolean pSetEx(byte[] key, long milliseconds, byte[] value) {
return write(key, StringCodec.INSTANCE, PSETEX, key, milliseconds, value);
}
private static final RedisCommand<Boolean> MSET = new RedisCommand<Boolean>("MSET", new BooleanReplayConvertor());
@Override
public Boolean mSet(Map<byte[], byte[]> tuple) {
List<byte[]> params = convert(tuple);
return write(tuple.keySet().iterator().next(), StringCodec.INSTANCE, MSET, params.toArray());
}
protected List<byte[]> convert(Map<byte[], byte[]> tuple) {
List<byte[]> params = new ArrayList<byte[]>(tuple.size()*2);
for (Entry<byte[], byte[]> entry : tuple.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
return params;
}
@Override
public Boolean mSetNX(Map<byte[], byte[]> tuple) {
List<byte[]> params = convert(tuple);
return write(tuple.keySet().iterator().next(), StringCodec.INSTANCE, RedisCommands.MSETNX, params.toArray());
}
@Override
public Long incr(byte[] key) {
return write(key, StringCodec.INSTANCE, RedisCommands.INCR, key);
}
@Override
public Long incrBy(byte[] key, long value) {
return write(key, StringCodec.INSTANCE, RedisCommands.INCRBY, key, value);
}
@Override
public Double incrBy(byte[] key, double value) {
return write(key, StringCodec.INSTANCE, RedisCommands.INCRBYFLOAT, key, BigDecimal.valueOf(value).toPlainString());
}
@Override
public Long decr(byte[] key) {
return write(key, StringCodec.INSTANCE, RedisCommands.DECR, key);
}
private static final RedisStrictCommand<Long> DECRBY = new RedisStrictCommand<Long>("DECRBY");
@Override
public Long decrBy(byte[] key, long value) {
return write(key, StringCodec.INSTANCE, DECRBY, key, value);
}
private static final RedisStrictCommand<Long> APPEND = new RedisStrictCommand<Long>("APPEND");
@Override
public Long append(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, APPEND, key, value);
}
private static final RedisCommand<Object> GETRANGE = new RedisCommand<Object>("GETRANGE");
@Override
public byte[] getRange(byte[] key, long begin, long end) {
return read(key, ByteArrayCodec.INSTANCE, GETRANGE, key, begin, end);
}
private static final RedisCommand<Void> SETRANGE = new RedisCommand<Void>("SETRANGE", new VoidReplayConvertor());
@Override
public void setRange(byte[] key, byte[] value, long offset) {
write(key, ByteArrayCodec.INSTANCE, SETRANGE, key, offset, value);
}
@Override
public Boolean getBit(byte[] key, long offset) {
return read(key, StringCodec.INSTANCE, RedisCommands.GETBIT, key, offset);
}
@Override
public Boolean setBit(byte[] key, long offset, boolean value) {
return write(key, StringCodec.INSTANCE, RedisCommands.SETBIT, key, offset, value ? 1 : 0);
}
@Override
public Long bitCount(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.BITCOUNT, key);
}
@Override
public Long bitCount(byte[] key, long begin, long end) {
return read(key, StringCodec.INSTANCE, RedisCommands.BITCOUNT, key, begin, end);
}
private static final RedisStrictCommand<Long> BITOP = new RedisStrictCommand<Long>("BITOP");
@Override
public Long bitOp(BitOperation op, byte[] destination, byte[]... keys) {
if (op == BitOperation.NOT && keys.length > 1) {
throw new UnsupportedOperationException("NOT operation doesn't support more than single source key");
}
List<Object> params = new ArrayList<Object>(keys.length + 2);
params.add(op);
params.add(destination);
params.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, BITOP, params.toArray());
}
@Override
public Long strLen(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.STRLEN, key);
}
private static final RedisStrictCommand<Long> RPUSH = new RedisStrictCommand<Long>("RPUSH");
@Override
public Long rPush(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, RPUSH, args.toArray());
}
private static final RedisStrictCommand<Long> LPUSH = new RedisStrictCommand<Long>("LPUSH");
@Override
public Long lPush(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, LPUSH, args.toArray());
}
private static final RedisStrictCommand<Long> RPUSHX = new RedisStrictCommand<Long>("RPUSHX");
@Override
public Long rPushX(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, RPUSHX, key, value);
}
private static final RedisStrictCommand<Long> LPUSHX = new RedisStrictCommand<Long>("LPUSHX");
@Override
public Long lPushX(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, LPUSHX, key, value);
}
private static final RedisStrictCommand<Long> LLEN = new RedisStrictCommand<Long>("LLEN");
@Override
public Long lLen(byte[] key) {
return read(key, StringCodec.INSTANCE, LLEN, key);
}
@Override
public List<byte[]> lRange(byte[] key, long start, long end) {
return read(key, ByteArrayCodec.INSTANCE, LRANGE, key, start, end);
}
@Override
public void lTrim(byte[] key, long start, long end) {
write(key, StringCodec.INSTANCE, RedisCommands.LTRIM, key, start, end);
}
@Override
public byte[] lIndex(byte[] key, long index) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.LINDEX, key, index);
}
private static final RedisStrictCommand<Long> LINSERT = new RedisStrictCommand<Long>("LINSERT");
@Override
public Long lInsert(byte[] key, Position where, byte[] pivot, byte[] value) {
return write(key, StringCodec.INSTANCE, LINSERT, key, where, pivot, value);
}
private final List<String> commandsToRemove = Arrays.asList("SET",
"RESTORE", "LTRIM", "SETEX", "SETRANGE", "FLUSHDB", "LSET", "MSET", "HMSET", "RENAME");
private final List<Integer> indexToRemove = new ArrayList<Integer>();
private int index = -1;
<T> T write(byte[] key, Codec codec, RedisCommand<?> command, Object... params) {
RFuture<T> f = executorService.writeAsync(key, codec, command, params);
indexCommand(command);
return sync(f);
}
protected void indexCommand(RedisCommand<?> command) {
if (isQueueing() || isPipelined()) {
index++;
if (filterOkResponses && commandsToRemove.contains(command.getName())) {
indexToRemove.add(index);
}
}
}
<T> T read(byte[] key, Codec codec, RedisCommand<?> command, Object... params) {
RFuture<T> f = executorService.readAsync(key, codec, command, params);
indexCommand(command);
return sync(f);
}
@Override
public void lSet(byte[] key, long index, byte[] value) {
write(key, StringCodec.INSTANCE, RedisCommands.LSET, key, index, value);
}
private static final RedisStrictCommand<Long> LREM = new RedisStrictCommand<Long>("LREM");
@Override
public Long lRem(byte[] key, long count, byte[] value) {
return write(key, StringCodec.INSTANCE, LREM, key, count, value);
}
@Override
public byte[] lPop(byte[] key) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.LPOP, key);
}
@Override
public byte[] rPop(byte[] key) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.RPOP, key);
}
@Override
public List<byte[]> bLPop(int timeout, byte[]... keys) {
List<Object> params = new ArrayList<Object>(keys.length + 1);
params.addAll(Arrays.asList(keys));
params.add(timeout);
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.BLPOP, params.toArray());
}
@Override
public List<byte[]> bRPop(int timeout, byte[]... keys) {
List<Object> params = new ArrayList<Object>(keys.length + 1);
params.addAll(Arrays.asList(keys));
params.add(timeout);
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.BRPOP, params.toArray());
}
@Override
public byte[] rPopLPush(byte[] srcKey, byte[] dstKey) {
return write(srcKey, ByteArrayCodec.INSTANCE, RedisCommands.RPOPLPUSH, srcKey, dstKey);
}
@Override
public byte[] bRPopLPush(int timeout, byte[] srcKey, byte[] dstKey) {
return write(srcKey, ByteArrayCodec.INSTANCE, RedisCommands.BRPOPLPUSH, srcKey, dstKey, timeout);
}
private static final RedisCommand<List<Long>> LPOS = new RedisCommand<>("LPOS", new ObjectListReplayDecoder<>());
@Override
public List<Long> lPos(byte[] key, byte[] element, Integer rank, Integer count) {
List<Object> args = new ArrayList<>();
args.add(key);
args.add(element);
if (rank != null) {
args.add("RANK");
args.add(rank);
}
if (count != null) {
args.add("COUNT");
args.add(count);
}
Object read = read(key, ByteArrayCodec.INSTANCE, LPOS, args.toArray());
if (read == null) {
return Collections.emptyList();
} else if (read instanceof Long) {
return Collections.singletonList((Long) read);
} else {
return (List<Long>) read;
}
}
private static final RedisCommand<Long> SADD = new RedisCommand<Long>("SADD");
@Override
public Long sAdd(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, SADD, args.toArray());
}
private static final RedisStrictCommand<Long> SREM = new RedisStrictCommand<Long>("SREM");
@Override
public Long sRem(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, SREM, args.toArray());
}
@Override
public byte[] sPop(byte[] key) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.SPOP_SINGLE, key);
}
private static final RedisCommand<List<Object>> SPOP = new RedisCommand<List<Object>>("SPOP", new ObjectListReplayDecoder<Object>());
@Override
public List<byte[]> sPop(byte[] key, long count) {
return write(key, ByteArrayCodec.INSTANCE, SPOP, key, count);
}
@Override
public Boolean sMove(byte[] srcKey, byte[] destKey, byte[] value) {
return write(srcKey, StringCodec.INSTANCE, RedisCommands.SMOVE, srcKey, destKey, value);
}
private static final RedisStrictCommand<Long> SCARD = new RedisStrictCommand<Long>("SCARD");
@Override
public Long sCard(byte[] key) {
return read(key, StringCodec.INSTANCE, SCARD, key);
}
@Override
public Boolean sIsMember(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.SISMEMBER, key, value);
}
@Override
public Set<byte[]> sInter(byte[]... keys) {
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.SINTER, Arrays.asList(keys).toArray());
}
@Override
public Long sInterStore(byte[] destKey, byte[]... keys) {
List<Object> args = new ArrayList<Object>(keys.length + 1);
args.add(destKey);
args.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, RedisCommands.SINTERSTORE, args.toArray());
}
@Override
public Set<byte[]> sUnion(byte[]... keys) {
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.SUNION, Arrays.asList(keys).toArray());
}
@Override
public Long sUnionStore(byte[] destKey, byte[]... keys) {
List<Object> args = new ArrayList<Object>(keys.length + 1);
args.add(destKey);
args.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, RedisCommands.SUNIONSTORE, args.toArray());
}
@Override
public Set<byte[]> sDiff(byte[]... keys) {
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.SDIFF, Arrays.asList(keys).toArray());
}
@Override
public Long sDiffStore(byte[] destKey, byte[]... keys) {
List<Object> args = new ArrayList<Object>(keys.length + 1);
args.add(destKey);
args.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, RedisCommands.SDIFFSTORE, args.toArray());
}
@Override
public Set<byte[]> sMembers(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.SMEMBERS, key);
}
@Override
public byte[] sRandMember(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.SRANDMEMBER_SINGLE, key);
}
private static final RedisCommand<List<Object>> SRANDMEMBER = new RedisCommand<>("SRANDMEMBER", new ObjectListReplayDecoder<>());
@Override
public List<byte[]> sRandMember(byte[] key, long count) {
return read(key, ByteArrayCodec.INSTANCE, SRANDMEMBER, key, count);
}
@Override
public Cursor<byte[]> sScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<byte[]>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<byte[]> doScan(byte[] key, long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<byte[]>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, RedisCommands.SSCAN, args.toArray());
ListScanResult<byte[]> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<byte[]>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
}
@Override
public Boolean zAdd(byte[] key, double score, byte[] value) {
return write(key, StringCodec.INSTANCE, RedisCommands.ZADD_BOOL, key, BigDecimal.valueOf(score).toPlainString(), value);
}
@Override
public Long zAdd(byte[] key, Set<Tuple> tuples) {
List<Object> params = new ArrayList<Object>(tuples.size()*2+1);
params.add(key);
for (Tuple entry : tuples) {
params.add(BigDecimal.valueOf(entry.getScore()).toPlainString());
params.add(entry.getValue());
}
return write(key, StringCodec.INSTANCE, RedisCommands.ZADD, params.toArray());
}
@Override
public Long zRem(byte[] key, byte[]... values) {
List<Object> params = new ArrayList<Object>(values.length+1);
params.add(key);
params.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, RedisCommands.ZREM_LONG, params.toArray());
}
@Override
public Double zIncrBy(byte[] key, double increment, byte[] value) {
return write(key, DoubleCodec.INSTANCE, RedisCommands.ZINCRBY,
key, new BigDecimal(increment).toPlainString(), value);
}
@Override
public Long zRank(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZRANK, key, value);
}
@Override
public Long zRevRank(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZREVRANK, key, value);
}
private static final RedisCommand<Set<Object>> ZRANGE = new RedisCommand<Set<Object>>("ZRANGE", new ObjectSetReplayDecoder<Object>());
@Override
public Set<byte[]> zRange(byte[] key, long start, long end) {
return read(key, ByteArrayCodec.INSTANCE, ZRANGE, key, start, end);
}
private static final RedisCommand<Set<Tuple>> ZRANGE_ENTRY = new RedisCommand<Set<Tuple>>("ZRANGE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZRANGE_ENTRY_V2 = new RedisCommand<Set<Tuple>>("ZRANGE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<Tuple> zRangeWithScores(byte[] key, long start, long end) {
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZRANGE_ENTRY_V2, key, start, end, "WITHSCORES");
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGE_ENTRY, key, start, end, "WITHSCORES");
}
private String value(Range.Boundary boundary, String defaultValue) {
if (boundary == null) {
return defaultValue;
}
Object score = boundary.getValue();
if (score == null) {
return defaultValue;
}
StringBuilder element = new StringBuilder();
if (!boundary.isIncluding()) {
element.append("(");
} else {
if (!(score instanceof Double)) {
element.append("[");
}
}
if (score instanceof Double) {
if (Double.isInfinite((Double) score)) {
element.append((Double)score > 0 ? "+inf" : "-inf");
} else {
element.append(BigDecimal.valueOf((Double)score).toPlainString());
}
} else {
element.append(score);
}
return element.toString();
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, double min, double max) {
return zRangeByScore(key, new Range().gte(min).lte(max));
}
@Override
public Set<Tuple> zRangeByScoreWithScores(byte[] key, Range range) {
return zRangeByScoreWithScores(key, range, null);
}
@Override
public Set<Tuple> zRangeByScoreWithScores(byte[] key, double min, double max) {
return zRangeByScoreWithScores(key, new Range().gte(min).lte(max));
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, double min, double max, long offset, long count) {
return zRangeByScore(key, new Range().gte(min).lte(max),
new Limit().offset(Long.valueOf(offset).intValue()).count(Long.valueOf(count).intValue()));
}
@Override
public Set<Tuple> zRangeByScoreWithScores(byte[] key, double min, double max, long offset, long count) {
return zRangeByScoreWithScores(key, new Range().gte(min).lte(max),
new Limit().offset(Long.valueOf(offset).intValue()).count(Long.valueOf(count).intValue()));
}
private static final RedisCommand<Set<Tuple>> ZRANGEBYSCORE = new RedisCommand<Set<Tuple>>("ZRANGEBYSCORE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZRANGEBYSCORE_V2 = new RedisCommand<Set<Tuple>>("ZRANGEBYSCORE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<Tuple> zRangeByScoreWithScores(byte[] key, Range range, Limit limit) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(min);
args.add(max);
args.add("WITHSCORES");
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZRANGEBYSCORE_V2, args.toArray());
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGEBYSCORE, args.toArray());
}
private static final RedisCommand<Set<Object>> ZREVRANGE = new RedisCommand<Set<Object>>("ZREVRANGE", new ObjectSetReplayDecoder<Object>());
@Override
public Set<byte[]> zRevRange(byte[] key, long start, long end) {
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGE, key, start, end);
}
private static final RedisCommand<Set<Tuple>> ZREVRANGE_ENTRY = new RedisCommand<Set<Tuple>>("ZREVRANGE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZREVRANGE_ENTRY_V2 = new RedisCommand("ZREVRANGE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<Tuple> zRevRangeWithScores(byte[] key, long start, long end) {
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGE_ENTRY_V2, key, start, end, "WITHSCORES");
}
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGE_ENTRY, key, start, end, "WITHSCORES");
}
@Override
public Set<byte[]> zRevRangeByScore(byte[] key, double min, double max) {
return zRevRangeByScore(key, new Range().gte(min).lte(max));
}
private static final RedisCommand<Set<byte[]>> ZREVRANGEBYSCORE = new RedisCommand<Set<byte[]>>("ZREVRANGEBYSCORE", new ObjectSetReplayDecoder<byte[]>());
private static final RedisCommand<Set<Tuple>> ZREVRANGEBYSCOREWITHSCORES = new RedisCommand<Set<Tuple>>("ZREVRANGEBYSCORE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZREVRANGEBYSCOREWITHSCORES_V2 = new RedisCommand<Set<Tuple>>("ZREVRANGEBYSCORE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<byte[]> zRevRangeByScore(byte[] key, Range range) {
return zRevRangeByScore(key, range, null);
}
@Override
public Set<Tuple> zRevRangeByScoreWithScores(byte[] key, double min, double max) {
return zRevRangeByScoreWithScores(key, new Range().gte(min).lte(max));
}
@Override
public Set<byte[]> zRevRangeByScore(byte[] key, double min, double max, long offset, long count) {
return zRevRangeByScore(key, new Range().gte(min).lte(max),
new Limit().offset(Long.valueOf(offset).intValue()).count(Long.valueOf(count).intValue()));
}
@Override
public Set<byte[]> zRevRangeByScore(byte[] key, Range range, Limit limit) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(max);
args.add(min);
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGEBYSCORE, args.toArray());
}
@Override
public Set<Tuple> zRevRangeByScoreWithScores(byte[] key, double min, double max, long offset, long count) {
return zRevRangeByScoreWithScores(key, new Range().gte(min).lte(max),
new Limit().offset(Long.valueOf(offset).intValue()).count(Long.valueOf(count).intValue()));
}
@Override
public Set<Tuple> zRevRangeByScoreWithScores(byte[] key, Range range) {
return zRevRangeByScoreWithScores(key, range, null);
}
@Override
public Set<Tuple> zRevRangeByScoreWithScores(byte[] key, Range range, Limit limit) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(max);
args.add(min);
args.add("WITHSCORES");
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGEBYSCOREWITHSCORES_V2, args.toArray());
}
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGEBYSCOREWITHSCORES, args.toArray());
}
@Override
public Long zCount(byte[] key, double min, double max) {
return zCount(key, new Range().gte(min).lte(max));
}
private static final RedisStrictCommand<Long> ZCOUNT = new RedisStrictCommand<Long>("ZCOUNT");
@Override
public Long zCount(byte[] key, Range range) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
return read(key, StringCodec.INSTANCE, ZCOUNT, key, min, max);
}
@Override
public Long zCard(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZCARD, key);
}
@Override
public Double zScore(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZSCORE, key, value);
}
private static final RedisStrictCommand<Long> ZREMRANGEBYRANK = new RedisStrictCommand<Long>("ZREMRANGEBYRANK");
private static final RedisStrictCommand<Long> ZREMRANGEBYSCORE = new RedisStrictCommand<Long>("ZREMRANGEBYSCORE");
@Override
public Long zRemRange(byte[] key, long start, long end) {
return write(key, StringCodec.INSTANCE, ZREMRANGEBYRANK, key, start, end);
}
@Override
public Long zRemRangeByScore(byte[] key, double min, double max) {
return zRemRangeByScore(key, new Range().gte(min).lte(max));
}
@Override
public Long zRemRangeByScore(byte[] key, Range range) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
return write(key, StringCodec.INSTANCE, ZREMRANGEBYSCORE, key, min, max);
}
@Override
public Long zUnionStore(byte[] destKey, byte[]... sets) {
return zUnionStore(destKey, null, (Weights)null, sets);
}
private static final RedisStrictCommand<Long> ZUNIONSTORE = new RedisStrictCommand<Long>("ZUNIONSTORE");
@Override
public Long zUnionStore(byte[] destKey, Aggregate aggregate, Weights weights, byte[]... sets) {
List<Object> args = new ArrayList<Object>(sets.length*2 + 5);
args.add(destKey);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
if (weights != null) {
args.add("WEIGHTS");
for (double weight : weights.toArray()) {
args.add(BigDecimal.valueOf(weight).toPlainString());
}
}
if (aggregate != null) {
args.add("AGGREGATE");
args.add(aggregate.name());
}
return write(destKey, LongCodec.INSTANCE, ZUNIONSTORE, args.toArray());
}
private static final RedisStrictCommand<Long> ZINTERSTORE = new RedisStrictCommand<Long>("ZINTERSTORE");
@Override
public Long zInterStore(byte[] destKey, byte[]... sets) {
return zInterStore(destKey, null, (Weights)null, sets);
}
@Override
public Long zInterStore(byte[] destKey, Aggregate aggregate, Weights weights, byte[]... sets) {
List<Object> args = new ArrayList<Object>(sets.length*2 + 5);
args.add(destKey);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
if (weights != null) {
args.add("WEIGHTS");
for (double weight : weights.toArray()) {
args.add(BigDecimal.valueOf(weight).toPlainString());
}
}
if (aggregate != null) {
args.add("AGGREGATE");
args.add(aggregate.name());
}
return write(destKey, StringCodec.INSTANCE, ZINTERSTORE, args.toArray());
}
private static final RedisCommand<ListScanResult<Object>> ZSCAN = new RedisCommand<>("ZSCAN", new ListMultiDecoder2(new ListScanResultReplayDecoder(), new ScoredSortedListReplayDecoder()));
@Override
public Cursor<Tuple> zScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<Tuple>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<Tuple> doScan(byte[] key, long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<Tuple>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, ZSCAN, args.toArray());
ListScanResult<Tuple> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<Tuple>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, String min, String max) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, key, min, max);
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, Range range) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, key, min, max);
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, String min, String max, long offset, long count) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, key, min, max, "LIMIT", offset, count);
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, Range range, Limit limit) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(min);
args.add(max);
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, args.toArray());
}
@Override
public Set<byte[]> zRangeByLex(byte[] key) {
return zRangeByLex(key, Range.unbounded());
}
private static final RedisCommand<Set<Object>> ZRANGEBYLEX = new RedisCommand<Set<Object>>("ZRANGEBYLEX", new ObjectSetReplayDecoder<Object>());
@Override
public Set<byte[]> zRangeByLex(byte[] key, Range range) {
List<Object> params = new ArrayList<Object>();
params.add(key);
if (range.getMin() != null) {
String min = value(range.getMin(), "-");
params.add(min);
} else {
params.add("-");
}
if (range.getMax() != null) {
String max = value(range.getMax(), "+");
params.add(max);
} else {
params.add("+");
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGEBYLEX, params.toArray());
}
@Override
public Set<byte[]> zRangeByLex(byte[] key, Range range, Limit limit) {
String min = value(range.getMin(), "-");
String max = value(range.getMax(), "+");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(min);
args.add(max);
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGEBYLEX, args.toArray());
}
@Override
public Boolean hSet(byte[] key, byte[] field, byte[] value) {
return write(key, StringCodec.INSTANCE, RedisCommands.HSET, key, field, value);
}
@Override
public Boolean hSetNX(byte[] key, byte[] field, byte[] value) {
return write(key, StringCodec.INSTANCE, RedisCommands.HSETNX, key, field, value);
}
@Override
public byte[] hGet(byte[] key, byte[] field) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HGET, key, field);
}
private static final RedisCommand<List<Object>> HMGET = new RedisCommand<List<Object>>("HMGET", new ObjectListReplayDecoder<Object>());
@Override
public List<byte[]> hMGet(byte[] key, byte[]... fields) {
List<Object> args = new ArrayList<Object>(fields.length + 1);
args.add(key);
args.addAll(Arrays.asList(fields));
return read(key, ByteArrayCodec.INSTANCE, HMGET, args.toArray());
}
@Override
public void hMSet(byte[] key, Map<byte[], byte[]> hashes) {
List<Object> params = new ArrayList<Object>(hashes.size()*2 + 1);
params.add(key);
for (Map.Entry<byte[], byte[]> entry : hashes.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
write(key, StringCodec.INSTANCE, RedisCommands.HMSET, params.toArray());
}
private static final RedisCommand<Long> HINCRBY = new RedisCommand<Long>("HINCRBY");
@Override
public Long hIncrBy(byte[] key, byte[] field, long delta) {
return write(key, StringCodec.INSTANCE, HINCRBY, key, field, delta);
}
private static final RedisCommand<Double> HINCRBYFLOAT = new RedisCommand<Double>("HINCRBYFLOAT", new DoubleReplayConvertor());
@Override
public Double hIncrBy(byte[] key, byte[] field, double delta) {
return write(key, StringCodec.INSTANCE, HINCRBYFLOAT, key, field, BigDecimal.valueOf(delta).toPlainString());
}
@Override
public Boolean hExists(byte[] key, byte[] field) {
return read(key, StringCodec.INSTANCE, RedisCommands.HEXISTS, key, field);
}
@Override
public Long hDel(byte[] key, byte[]... fields) {
List<Object> args = new ArrayList<Object>(fields.length + 1);
args.add(key);
args.addAll(Arrays.asList(fields));
return write(key, StringCodec.INSTANCE, RedisCommands.HDEL, args.toArray());
}
private static final RedisStrictCommand<Long> HLEN = new RedisStrictCommand<Long>("HLEN");
@Override
public Long hLen(byte[] key) {
return read(key, StringCodec.INSTANCE, HLEN, key);
}
@Override
public Set<byte[]> hKeys(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HKEYS, key);
}
@Override
public List<byte[]> hVals(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HVALS, key);
}
@Override
public Map<byte[], byte[]> hGetAll(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HGETALL, key);
}
@Override
public Cursor<Entry<byte[], byte[]>> hScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<Entry<byte[], byte[]>>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<Entry<byte[], byte[]>> doScan(byte[] key, long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'HSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<MapScanResult<byte[], byte[]>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, RedisCommands.HSCAN, args.toArray());
MapScanResult<byte[], byte[]> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<Entry<byte[], byte[]>>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
}
@Override
public void multi() {
if (isQueueing()) {
return;
}
if (isPipelined()) {
BatchOptions options = BatchOptions.defaults()
.executionMode(ExecutionMode.IN_MEMORY_ATOMIC);
this.executorService = executorService.createCommandBatchService(options);
return;
}
BatchOptions options = BatchOptions.defaults()
.executionMode(ExecutionMode.REDIS_WRITE_ATOMIC);
this.executorService = executorService.createCommandBatchService(options);
}
@Override
public List<Object> exec() {
if (isPipelinedAtomic()) {
return null;
}
if (isQueueing()) {
try {
BatchResult<?> result = ((CommandBatchService)executorService).execute();
filterResults(result);
return (List<Object>) result.getResponses();
} catch (Exception ex) {
throw transform(ex);
} finally {
resetConnection();
}
} else {
throw new InvalidDataAccessApiUsageException("Not in transaction mode. Please invoke multi method");
}
}
protected void filterResults(BatchResult<?> result) {
if (result.getResponses().isEmpty()) {
return;
}
int t = 0;
for (Integer index : indexToRemove) {
index -= t;
result.getResponses().remove((int)index);
t++;
}
for (ListIterator<Object> iterator = (ListIterator<Object>) result.getResponses().listIterator(); iterator.hasNext();) {
Object object = iterator.next();
if (object instanceof String) {
iterator.set(((String) object).getBytes());
}
}
}
protected void resetConnection() {
executorService = this.redisson.getCommandExecutor();
index = -1;
indexToRemove.clear();
}
@Override
public void discard() {
if (isQueueing()) {
syncFuture(executorService.writeAsync(null, RedisCommands.DISCARD));
resetConnection();
} else {
throw new InvalidDataAccessApiUsageException("Not in transaction mode. Please invoke multi method");
}
}
@Override
public void watch(byte[]... keys) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
syncFuture(executorService.writeAsync(null, RedisCommands.WATCH, keys));
}
@Override
public void unwatch() {
syncFuture(executorService.writeAsync(null, RedisCommands.UNWATCH));
}
@Override
public boolean isSubscribed() {
return subscription != null && subscription.isAlive();
}
@Override
public Subscription getSubscription() {
return subscription;
}
@Override
public Long publish(byte[] channel, byte[] message) {
return write(channel, StringCodec.INSTANCE, RedisCommands.PUBLISH, channel, message);
}
@Override
public void subscribe(MessageListener listener, byte[]... channels) {
checkSubscription();
subscription = new RedissonSubscription(executorService, listener);
subscription.subscribe(channels);
}
private void checkSubscription() {
if (subscription != null) {
throw new RedisSubscribedConnectionException("Connection already subscribed");
}
if (isQueueing()) {
throw new UnsupportedOperationException("Not supported in queueing mode");
}
if (isPipelined()) {
throw new UnsupportedOperationException("Not supported in pipelined mode");
}
}
@Override
public void pSubscribe(MessageListener listener, byte[]... patterns) {
checkSubscription();
subscription = new RedissonSubscription(executorService, listener);
subscription.pSubscribe(patterns);
}
@Override
public void select(int dbIndex) {
throw new UnsupportedOperationException();
}
private static final RedisCommand<Object> ECHO = new RedisCommand<Object>("ECHO");
@Override
public byte[] echo(byte[] message) {
return read(null, ByteArrayCodec.INSTANCE, ECHO, message);
}
@Override
public String ping() {
return read(null, StringCodec.INSTANCE, RedisCommands.PING);
}
@Override
public void bgWriteAof() {
throw new UnsupportedOperationException();
}
@Override
public void bgReWriteAof() {
write(null, StringCodec.INSTANCE, RedisCommands.BGREWRITEAOF);
}
@Override
public void bgSave() {
write(null, StringCodec.INSTANCE, RedisCommands.BGSAVE);
}
@Override
public Long lastSave() {
return write(null, StringCodec.INSTANCE, RedisCommands.LASTSAVE);
}
private static final RedisStrictCommand<Void> SAVE = new RedisStrictCommand<Void>("SAVE", new VoidReplayConvertor());
@Override
public void save() {
write(null, StringCodec.INSTANCE, SAVE);
}
@Override
public Long dbSize() {
if (isQueueing()) {
return read(null, StringCodec.INSTANCE, RedisCommands.DBSIZE);
}
List<CompletableFuture<Long>> futures = executorService.readAllAsync(RedisCommands.DBSIZE);
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<Long> s = f.thenApply(r -> futures.stream().mapToLong(v -> v.getNow(0L)).sum());
CompletableFutureWrapper<Long> ff = new CompletableFutureWrapper<>(s);
return sync(ff);
}
@Override
public void flushDb() {
if (isQueueing() || isPipelined()) {
write(null, StringCodec.INSTANCE, RedisCommands.FLUSHDB);
return;
}
RFuture<Void> f = executorService.writeAllVoidAsync(RedisCommands.FLUSHDB);
sync(f);
}
@Override
public void flushAll() {
RFuture<Void> f = executorService.writeAllVoidAsync(RedisCommands.FLUSHALL);
sync(f);
}
private static final RedisStrictCommand<Properties> INFO_DEFAULT = new RedisStrictCommand<Properties>("INFO", "DEFAULT", new ObjectDecoder(new PropertiesDecoder()));
private static final RedisStrictCommand<Properties> INFO = new RedisStrictCommand<Properties>("INFO", new ObjectDecoder(new PropertiesDecoder()));
@Override
public Properties info() {
return read(null, StringCodec.INSTANCE, INFO_DEFAULT);
}
@Override
public Properties info(String section) {
return read(null, StringCodec.INSTANCE, INFO, section);
}
@Override
public void shutdown() {
throw new UnsupportedOperationException();
}
@Override
public void shutdown(ShutdownOption option) {
throw new UnsupportedOperationException();
}
private static final RedisStrictCommand<Properties> CONFIG_GET = new RedisStrictCommand<Properties>("CONFIG", "GET", new PropertiesListDecoder());
@Override
public Properties getConfig(String pattern) {
return read(null, StringCodec.INSTANCE, CONFIG_GET, pattern);
}
@Override
public void setConfig(String param, String value) {
write(null, StringCodec.INSTANCE, RedisCommands.CONFIG_SET, param, value);
}
@Override
public void resetConfigStats() {
write(null, StringCodec.INSTANCE, RedisCommands.CONFIG_RESETSTAT);
}
@Override
public void killClient(String host, int port) {
throw new UnsupportedOperationException();
}
@Override
public void setClientName(byte[] name) {
throw new UnsupportedOperationException("Should be defined through Redisson Config object");
}
@Override
public String getClientName() {
throw new UnsupportedOperationException();
}
@Override
public List<RedisClientInfo> getClientList() {
return read(null, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST);
}
@Override
public void slaveOf(String host, int port) {
throw new UnsupportedOperationException();
}
@Override
public void slaveOfNoOne() {
throw new UnsupportedOperationException();
}
@Override
public void migrate(byte[] key, RedisNode target, int dbIndex, MigrateOption option) {
migrate(key, target, dbIndex, option, Long.MAX_VALUE);
}
@Override
public void migrate(byte[] key, RedisNode target, int dbIndex, MigrateOption option, long timeout) {
write(key, StringCodec.INSTANCE, RedisCommands.MIGRATE, target.getHost(), target.getPort(), key, dbIndex, timeout);
}
@Override
public void scriptFlush() {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException();
}
RFuture<Void> f = executorService.writeAllVoidAsync(RedisCommands.SCRIPT_FLUSH);
sync(f);
}
@Override
public void scriptKill() {
throw new UnsupportedOperationException();
}
@Override
public String scriptLoad(byte[] script) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
if (isPipelined()) {
throw new UnsupportedOperationException();
}
List<CompletableFuture<String>> futures = executorService.executeAllAsync(RedisCommands.SCRIPT_LOAD, (Object)script);
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<String> s = f.thenApply(r -> futures.get(0).getNow(null));
return sync(new CompletableFutureWrapper<>(s));
}
@Override
public List<Boolean> scriptExists(final String... scriptShas) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException();
}
List<CompletableFuture<List<Boolean>>> futures = executorService.writeAllAsync(RedisCommands.SCRIPT_EXISTS, (Object[]) scriptShas);
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<List<Boolean>> s = f.thenApply(r -> {
List<Boolean> result = futures.get(0).getNow(new ArrayList<>());
for (CompletableFuture<List<Boolean>> future : futures.subList(1, futures.size())) {
List<Boolean> l = future.getNow(new ArrayList<>());
for (int i = 0; i < l.size(); i++) {
result.set(i, result.get(i) | l.get(i));
}
}
return result;
});
return sync(new CompletableFutureWrapper<>(s));
}
@Override
public <T> T eval(byte[] script, ReturnType returnType, int numKeys, byte[]... keysAndArgs) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
if (isPipelined()) {
throw new UnsupportedOperationException();
}
RedisCommand<?> c = toCommand(returnType, "EVAL");
List<Object> params = new ArrayList<Object>();
params.add(script);
params.add(numKeys);
params.addAll(Arrays.asList(keysAndArgs));
byte[] key = getKey(numKeys, keysAndArgs);
return write(key, ByteArrayCodec.INSTANCE, c, params.toArray());
}
protected RedisCommand<?> toCommand(ReturnType returnType, String name) {
RedisCommand<?> c = null;
if (returnType == ReturnType.BOOLEAN) {
c = org.redisson.api.RScript.ReturnType.BOOLEAN.getCommand();
} else if (returnType == ReturnType.INTEGER) {
c = org.redisson.api.RScript.ReturnType.INTEGER.getCommand();
} else if (returnType == ReturnType.MULTI) {
c = org.redisson.api.RScript.ReturnType.MULTI.getCommand();
return new RedisCommand(c, name, new BinaryConvertor());
} else if (returnType == ReturnType.STATUS) {
c = org.redisson.api.RScript.ReturnType.STATUS.getCommand();
} else if (returnType == ReturnType.VALUE) {
c = org.redisson.api.RScript.ReturnType.VALUE.getCommand();
return new RedisCommand(c, name, new BinaryConvertor());
}
return new RedisCommand(c, name);
}
@Override
public <T> T evalSha(String scriptSha, ReturnType returnType, int numKeys, byte[]... keysAndArgs) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
if (isPipelined()) {
throw new UnsupportedOperationException();
}
RedisCommand<?> c = toCommand(returnType, "EVALSHA");
List<Object> params = new ArrayList<Object>();
params.add(scriptSha);
params.add(numKeys);
params.addAll(Arrays.asList(keysAndArgs));
byte[] key = getKey(numKeys, keysAndArgs);
return write(key, ByteArrayCodec.INSTANCE, c, params.toArray());
}
@Override
public <T> T evalSha(byte[] scriptSha, ReturnType returnType, int numKeys, byte[]... keysAndArgs) {
RedisCommand<?> c = toCommand(returnType, "EVALSHA");
List<Object> params = new ArrayList<Object>();
params.add(scriptSha);
params.add(numKeys);
params.addAll(Arrays.asList(keysAndArgs));
byte[] key = getKey(numKeys, keysAndArgs);
return write(key, ByteArrayCodec.INSTANCE, c, params.toArray());
}
private static byte[] getKey(int numKeys, byte[][] keysAndArgs) {
if (numKeys > 0 && keysAndArgs.length > 0) {
return keysAndArgs[0];
}
return null;
}
@Override
public Long geoAdd(byte[] key, Point point, byte[] member) {
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, key, point.getX(), point.getY(), member);
}
@Override
public Long geoAdd(byte[] key, GeoLocation<byte[]> location) {
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, key, location.getPoint().getX(), location.getPoint().getY(), location.getName());
}
@Override
public Long geoAdd(byte[] key, Map<byte[], Point> memberCoordinateMap) {
List<Object> params = new ArrayList<Object>(memberCoordinateMap.size()*3 + 1);
params.add(key);
for (Entry<byte[], Point> entry : memberCoordinateMap.entrySet()) {
params.add(entry.getValue().getX());
params.add(entry.getValue().getY());
params.add(entry.getKey());
}
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, params.toArray());
}
@Override
public Long geoAdd(byte[] key, Iterable<GeoLocation<byte[]>> locations) {
List<Object> params = new ArrayList<Object>();
params.add(key);
for (GeoLocation<byte[]> location : locations) {
params.add(location.getPoint().getX());
params.add(location.getPoint().getY());
params.add(location.getName());
}
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, params.toArray());
}
@Override
public Distance geoDist(byte[] key, byte[] member1, byte[] member2) {
return geoDist(key, member1, member2, DistanceUnit.METERS);
}
@Override
public Distance geoDist(byte[] key, byte[] member1, byte[] member2, Metric metric) {
return read(key, DoubleCodec.INSTANCE, new RedisCommand<Distance>("GEODIST", new DistanceConvertor(metric)), key, member1, member2, getAbbreviation(metric));
}
private static final RedisCommand<List<Object>> GEOHASH = new RedisCommand<List<Object>>("GEOHASH", new ObjectListReplayDecoder<Object>());
@Override
public List<String> geoHash(byte[] key, byte[]... members) {
List<Object> params = new ArrayList<Object>(members.length + 1);
params.add(key);
for (byte[] member : members) {
params.add(member);
}
return read(key, StringCodec.INSTANCE, GEOHASH, params.toArray());
}
private final MultiDecoder<Map<Object, Object>> geoDecoder = new ListMultiDecoder2(new ObjectListReplayDecoder2(), new PointDecoder());
@Override
public List<Point> geoPos(byte[] key, byte[]... members) {
List<Object> params = new ArrayList<Object>(members.length + 1);
params.add(key);
params.addAll(Arrays.asList(members));
RedisCommand<Map<Object, Object>> command = new RedisCommand<Map<Object, Object>>("GEOPOS", geoDecoder);
return read(key, StringCodec.INSTANCE, command, params.toArray());
}
private String convert(double longitude) {
return BigDecimal.valueOf(longitude).toPlainString();
}
private final MultiDecoder<GeoResults<GeoLocation<byte[]>>> postitionDecoder = new ListMultiDecoder2(new GeoResultsDecoder(), new CodecDecoder(), new PointDecoder(), new ObjectListReplayDecoder());
@Override
public GeoResults<GeoLocation<byte[]>> geoRadius(byte[] key, Circle within) {
RedisCommand<GeoResults<GeoLocation<byte[]>>> command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUS_RO", new GeoResultsDecoder());
return read(key, ByteArrayCodec.INSTANCE, command, key,
convert(within.getCenter().getX()), convert(within.getCenter().getY()),
within.getRadius().getValue(), getAbbreviation(within.getRadius().getMetric()));
}
@Override
public GeoResults<GeoLocation<byte[]>> geoRadius(byte[] key, Circle within, GeoRadiusCommandArgs args) {
List<Object> params = new ArrayList<Object>();
params.add(key);
params.add(convert(within.getCenter().getX()));
params.add(convert(within.getCenter().getY()));
params.add(within.getRadius().getValue());
params.add(getAbbreviation(within.getRadius().getMetric()));
RedisCommand<GeoResults<GeoLocation<byte[]>>> command;
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUS_RO", postitionDecoder);
params.add("WITHCOORD");
} else {
MultiDecoder<GeoResults<GeoLocation<byte[]>>> distanceDecoder = new ListMultiDecoder2(new GeoResultsDecoder(within.getRadius().getMetric()), new GeoDistanceDecoder());
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUS_RO", distanceDecoder);
params.add("WITHDIST");
}
if (args.getLimit() != null) {
params.add("COUNT");
params.add(args.getLimit());
}
if (args.getSortDirection() != null) {
params.add(args.getSortDirection().name());
}
return read(key, ByteArrayCodec.INSTANCE, command, params.toArray());
}
private String getAbbreviation(Metric metric) {
if (ObjectUtils.nullSafeEquals(Metrics.NEUTRAL, metric)) {
return DistanceUnit.METERS.getAbbreviation();
}
return metric.getAbbreviation();
}
@Override
public GeoResults<GeoLocation<byte[]>> geoRadiusByMember(byte[] key, byte[] member, double radius) {
return geoRadiusByMember(key, member, new Distance(radius, DistanceUnit.METERS));
}
private static final RedisCommand<GeoResults<GeoLocation<byte[]>>> GEORADIUSBYMEMBER = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUSBYMEMBER_RO", new GeoResultsDecoder());
@Override
public GeoResults<GeoLocation<byte[]>> geoRadiusByMember(byte[] key, byte[] member, Distance radius) {
return read(key, ByteArrayCodec.INSTANCE, GEORADIUSBYMEMBER, key, member, radius.getValue(), getAbbreviation(radius.getMetric()));
}
@Override
public GeoResults<GeoLocation<byte[]>> geoRadiusByMember(byte[] key, byte[] member, Distance radius,
GeoRadiusCommandArgs args) {
List<Object> params = new ArrayList<Object>();
params.add(key);
params.add(member);
params.add(radius.getValue());
params.add(getAbbreviation(radius.getMetric()));
RedisCommand<GeoResults<GeoLocation<byte[]>>> command;
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUSBYMEMBER_RO", postitionDecoder);
params.add("WITHCOORD");
} else {
MultiDecoder<GeoResults<GeoLocation<byte[]>>> distanceDecoder = new ListMultiDecoder2(new GeoResultsDecoder(radius.getMetric()), new GeoDistanceDecoder());
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUSBYMEMBER_RO", distanceDecoder);
params.add("WITHDIST");
}
if (args.getLimit() != null) {
params.add("COUNT");
params.add(args.getLimit());
}
if (args.getSortDirection() != null) {
params.add(args.getSortDirection().name());
}
return read(key, ByteArrayCodec.INSTANCE, command, params.toArray());
}
@Override
public Long geoRemove(byte[] key, byte[]... members) {
return zRem(key, members);
}
private static final RedisCommand<Long> PFADD = new RedisCommand<Long>("PFADD");
@Override
public Long pfAdd(byte[] key, byte[]... values) {
List<Object> params = new ArrayList<Object>(values.length + 1);
params.add(key);
for (byte[] member : values) {
params.add(member);
}
return write(key, StringCodec.INSTANCE, PFADD, params.toArray());
}
@Override
public Long pfCount(byte[]... keys) {
Assert.notEmpty(keys, "PFCOUNT requires at least one non 'null' key.");
Assert.noNullElements(keys, "Keys for PFOUNT must not contain 'null'.");
return write(keys[0], StringCodec.INSTANCE, RedisCommands.PFCOUNT, Arrays.asList(keys).toArray());
}
@Override
public void pfMerge(byte[] destinationKey, byte[]... sourceKeys) {
List<Object> args = new ArrayList<Object>(sourceKeys.length + 1);
args.add(destinationKey);
args.addAll(Arrays.asList(sourceKeys));
write(destinationKey, StringCodec.INSTANCE, RedisCommands.PFMERGE, args.toArray());
}
private static final RedisCommand<Long> HSTRLEN = new RedisCommand<Long>("HSTRLEN");
@Override
public Long hStrLen(byte[] key, byte[] field) {
return read(key, StringCodec.INSTANCE, HSTRLEN, key, field);
}
@Override
public RedisStreamCommands streamCommands() {
return new RedissonStreamCommands(this, executorService);
}
private static final RedisStrictCommand<List<Object>> BITFIELD = new RedisStrictCommand<>("BITFIELD", new ObjectListReplayDecoder<>());
@Override
public List<Long> bitField(byte[] key, BitFieldSubCommands subCommands) {
List<Object> params = new ArrayList<>();
params.add(key);
boolean writeOp = false;
for (BitFieldSubCommands.BitFieldSubCommand subCommand : subCommands) {
String size = "u";
if (subCommand.getType().isSigned()) {
size = "i";
}
size += subCommand.getType().getBits();
String offset = "#";
if (subCommand.getOffset().isZeroBased()) {
offset = "";
}
offset += subCommand.getOffset().getValue();
if (subCommand instanceof BitFieldSubCommands.BitFieldGet) {
params.add("GET");
params.add(size);
params.add(offset);
} else if (subCommand instanceof BitFieldSubCommands.BitFieldSet) {
writeOp = true;
params.add("SET");
params.add(size);
params.add(offset);
params.add(((BitFieldSubCommands.BitFieldSet) subCommand).getValue());
} else if (subCommand instanceof BitFieldSubCommands.BitFieldIncrBy) {
writeOp = true;
params.add("INCRBY");
params.add(size);
params.add(offset);
params.add(((BitFieldSubCommands.BitFieldIncrBy) subCommand).getValue());
BitFieldSubCommands.BitFieldIncrBy.Overflow overflow = ((BitFieldSubCommands.BitFieldIncrBy) subCommand).getOverflow();
if (overflow != null) {
params.add("OVERFLOW");
params.add(overflow);
}
}
}
if (writeOp) {
return write(key, StringCodec.INSTANCE, BITFIELD, params.toArray());
}
return read(key, StringCodec.INSTANCE, BITFIELD, params.toArray());
}
@Override
public Long exists(byte[]... keys) {
return read(keys[0], StringCodec.INSTANCE, RedisCommands.EXISTS_LONG, Arrays.asList(keys).toArray());
}
@Override
public Long touch(byte[]... keys) {
return read(keys[0], StringCodec.INSTANCE, RedisCommands.TOUCH_LONG, Arrays.asList(keys).toArray());
}
private static final RedisStrictCommand<ValueEncoding> OBJECT_ENCODING = new RedisStrictCommand<ValueEncoding>("OBJECT", "ENCODING", new Convertor<ValueEncoding>() {
@Override
public ValueEncoding convert(Object obj) {
return ValueEncoding.of((String) obj);
}
});
@Override
public ValueEncoding encodingOf(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return read(key, StringCodec.INSTANCE, OBJECT_ENCODING, key);
}
private static final RedisStrictCommand<Duration> OBJECT_IDLETIME = new RedisStrictCommand<>("OBJECT", "IDLETIME", new Convertor<Duration>() {
@Override
public Duration convert(Object obj) {
return Duration.ofSeconds((Long)obj);
}
});
@Override
public Duration idletime(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return read(key, StringCodec.INSTANCE, OBJECT_IDLETIME, key);
}
private static final RedisStrictCommand<Long> OBJECT_REFCOUNT = new RedisStrictCommand<Long>("OBJECT", "REFCOUNT");
@Override
public Long refcount(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return read(key, StringCodec.INSTANCE, OBJECT_REFCOUNT, key);
}
private static final RedisStrictCommand<Long> BITPOS = new RedisStrictCommand<>("BITPOS");
@Override
public Long bitPos(byte[] key, boolean bit, org.springframework.data.domain.Range<Long> range) {
Assert.notNull(key, "Key must not be null!");
Assert.notNull(range, "Range must not be null! Use Range.unbounded() instead.");
List<Object> params = new ArrayList<>();
params.add(key);
if (bit) {
params.add(1);
} else {
params.add(0);
}
if (range.getLowerBound().isBounded()) {
params.add(range.getLowerBound().getValue().get());
if (range.getUpperBound().isBounded()) {
params.add(range.getUpperBound().getValue().get());
}
}
return read(key, StringCodec.INSTANCE, BITPOS, params.toArray());
}
@Override
public void restore(byte[] key, long ttlInMillis, byte[] serializedValue, boolean replace) {
if (replace) {
write(key, StringCodec.INSTANCE, RedisCommands.RESTORE, key, ttlInMillis, serializedValue, "REPLACE");
return;
}
restore(key, ttlInMillis, serializedValue);
}
private static final RedisCommand<Set<byte[]>> ZREVRANGEBYLEX = new RedisCommand<>("ZREVRANGEBYLEX", new ObjectSetReplayDecoder<byte[]>());
@Override
public Set<byte[]> zRevRangeByLex(byte[] key, Range range, Limit limit) {
String min = value(range.getMin(), "-");
String max = value(range.getMax(), "+");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(max);
args.add(min);
if (!limit.isUnlimited()) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGEBYLEX, args.toArray());
}
private static final RedisStrictCommand<Long> ZLEXCOUNT = new RedisStrictCommand<>("ZLEXCOUNT");
@Override
public Long zLexCount(byte[] key, Range range) {
String min = value(range.getMin(), "-");
String max = value(range.getMax(), "+");
return read(key, StringCodec.INSTANCE, ZLEXCOUNT, key, min, max);
}
}
|
RedissonConnection
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-osgi/src/test/java/org/assertj/tests/core/osgi/soft/CustomSoftAssertionTest.java
|
{
"start": 2654,
"end": 3158
}
|
class ____<KEY, VALUE>
extends AbstractMapAssert<TestProxyableMapAssert<KEY, VALUE>, Map<KEY, VALUE>, KEY, VALUE> {
public TestProxyableMapAssert(Map<KEY, VALUE> actual) {
super(actual, TestProxyableMapAssert.class);
}
@Override
protected <ELEMENT> AbstractListAssert<?, List<? extends ELEMENT>, ELEMENT, ObjectAssert<ELEMENT>> newListAssertInstance(List<? extends ELEMENT> newActual) {
return new ListAssert<>(newActual);
}
}
public static
|
TestProxyableMapAssert
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/MySqlSetTest_1.java
|
{
"start": 956,
"end": 2016
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
String sql = "SET @@session.autocommit = ON;";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> stmtList = parser.parseStatementList();
SQLStatement stmt = stmtList.get(0);
// print(stmtList);
assertEquals(1, stmtList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(0, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
String text = SQLUtils.toMySqlString(stmt);
assertEquals("SET @@session.autocommit = ON;", text);
}
}
|
MySqlSetTest_1
|
java
|
spring-projects__spring-boot
|
module/spring-boot-integration/src/main/java/org/springframework/boot/integration/autoconfigure/IntegrationGraphEndpointAutoConfiguration.java
|
{
"start": 1982,
"end": 2364
}
|
class ____ {
@Bean
@ConditionalOnMissingBean
IntegrationGraphEndpoint integrationGraphEndpoint(IntegrationGraphServer integrationGraphServer) {
return new IntegrationGraphEndpoint(integrationGraphServer);
}
@Bean
@ConditionalOnMissingBean
IntegrationGraphServer integrationGraphServer() {
return new IntegrationGraphServer();
}
}
|
IntegrationGraphEndpointAutoConfiguration
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileServlet.java
|
{
"start": 6225,
"end": 15116
}
|
enum ____ {
SUMMARY,
TRACES,
FLAT,
COLLAPSED,
// No SVG in 2.x asyncprofiler.
SVG,
TREE,
JFR,
// In 2.x asyncprofiler, this is how you get flamegraphs.
HTML
}
private final Lock profilerLock = new ReentrantLock();
private transient volatile Process process;
private final String asyncProfilerHome;
private Integer pid;
public ProfileServlet() {
this.asyncProfilerHome = getAsyncProfilerHome();
this.pid = ProcessUtils.getPid();
LOG.info("Servlet process PID: {} asyncProfilerHome: {}", pid, asyncProfilerHome);
}
static void setIsTestRun(boolean isTestRun) {
ProfileServlet.isTestRun = isTestRun;
}
@Override
protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
throws IOException {
if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(), req, resp)) {
resp.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
setResponseHeader(resp);
resp.getWriter().write("Unauthorized: Instrumentation access is not allowed!");
return;
}
// make sure async profiler home is set
if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) {
resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
setResponseHeader(resp);
resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n"
+ "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n"
+ "environment is properly configured.");
return;
}
// if pid is explicitly specified, use it else default to current process
pid = getInteger(req, "pid", pid);
// if pid is not specified in query param and if current process pid cannot be determined
if (pid == null) {
resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
setResponseHeader(resp);
resp.getWriter().write(
"'pid' query parameter unspecified or unable to determine PID of current process.");
return;
}
final int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
final Output output = getOutput(req);
final Event event = getEvent(req);
final Long interval = getLong(req, "interval");
final Integer jstackDepth = getInteger(req, "jstackdepth", null);
final Long bufsize = getLong(req, "bufsize");
final boolean thread = req.getParameterMap().containsKey("thread");
final boolean simple = req.getParameterMap().containsKey("simple");
final Integer width = getInteger(req, "width", null);
final Integer height = getInteger(req, "height", null);
final Double minwidth = getMinWidth(req);
final boolean reverse = req.getParameterMap().containsKey("reverse");
if (process == null || !process.isAlive()) {
try {
int lockTimeoutSecs = 3;
if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) {
try {
File outputFile = new File(OUTPUT_DIR,
"async-prof-pid-" + pid + "-" + event.name().toLowerCase() + "-" + ID_GEN
.incrementAndGet() + "." + output.name().toLowerCase());
List<String> cmd = new ArrayList<>();
cmd.add(asyncProfilerHome + PROFILER_SCRIPT);
cmd.add("-e");
cmd.add(event.getInternalName());
cmd.add("-d");
cmd.add("" + duration);
cmd.add("-o");
cmd.add(output.name().toLowerCase());
cmd.add("-f");
cmd.add(outputFile.getAbsolutePath());
if (interval != null) {
cmd.add("-i");
cmd.add(interval.toString());
}
if (jstackDepth != null) {
cmd.add("-j");
cmd.add(jstackDepth.toString());
}
if (bufsize != null) {
cmd.add("-b");
cmd.add(bufsize.toString());
}
if (thread) {
cmd.add("-t");
}
if (simple) {
cmd.add("-s");
}
if (width != null) {
cmd.add("--width");
cmd.add(width.toString());
}
if (height != null) {
cmd.add("--height");
cmd.add(height.toString());
}
if (minwidth != null) {
cmd.add("--minwidth");
cmd.add(minwidth.toString());
}
if (reverse) {
cmd.add("--reverse");
}
cmd.add(pid.toString());
if (!isTestRun) {
process = ProcessUtils.runCmdAsync(cmd);
}
// set response and set refresh header to output location
setResponseHeader(resp);
resp.setStatus(HttpServletResponse.SC_ACCEPTED);
String relativeUrl = "/prof-output-hadoop/" + outputFile.getName();
resp.getWriter().write("Started [" + event.getInternalName()
+ "] profiling. This page will automatically redirect to " + relativeUrl + " after "
+ duration + " seconds. "
+ "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async "
+ "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler."
+ "\n\nCommand:\n" + Joiner.on(" ").join(cmd));
// to avoid auto-refresh by ProfileOutputServlet, refreshDelay can be specified
// via url param
int refreshDelay = getInteger(req, "refreshDelay", 0);
// instead of sending redirect, set auto-refresh so that browsers will refresh
// with redirected url
resp.setHeader("Refresh", (duration + refreshDelay) + ";" + relativeUrl);
resp.getWriter().flush();
} finally {
profilerLock.unlock();
}
} else {
setResponseHeader(resp);
resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
resp.getWriter()
.write("Unable to acquire lock. Another instance of profiler might be running.");
LOG.warn("Unable to acquire lock in {} seconds. Another instance of profiler might be"
+ " running.", lockTimeoutSecs);
}
} catch (InterruptedException e) {
LOG.warn("Interrupted while acquiring profile lock.", e);
resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
} else {
setResponseHeader(resp);
resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
resp.getWriter().write("Another instance of profiler is already running.");
}
}
private Integer getInteger(final HttpServletRequest req, final String param,
final Integer defaultValue) {
final String value = req.getParameter(param);
if (value != null) {
try {
return Integer.valueOf(value);
} catch (NumberFormatException e) {
return defaultValue;
}
}
return defaultValue;
}
private Long getLong(final HttpServletRequest req, final String param) {
final String value = req.getParameter(param);
if (value != null) {
try {
return Long.valueOf(value);
} catch (NumberFormatException e) {
return null;
}
}
return null;
}
private Double getMinWidth(final HttpServletRequest req) {
final String value = req.getParameter("minwidth");
if (value != null) {
try {
return Double.valueOf(value);
} catch (NumberFormatException e) {
return null;
}
}
return null;
}
private Event getEvent(final HttpServletRequest req) {
final String eventArg = req.getParameter("event");
if (eventArg != null) {
Event event = Event.fromInternalName(eventArg);
return event == null ? Event.CPU : event;
}
return Event.CPU;
}
private Output getOutput(final HttpServletRequest req) {
final String outputArg = req.getParameter("output");
if (req.getParameter("output") != null) {
try {
return Output.valueOf(outputArg.trim().toUpperCase());
} catch (IllegalArgumentException e) {
return Output.HTML;
}
}
return Output.HTML;
}
static void setResponseHeader(final HttpServletResponse response) {
response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, ALLOWED_METHODS);
response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
response.setContentType(CONTENT_TYPE_TEXT);
}
static String getAsyncProfilerHome() {
String asyncProfilerHome = System.getenv(ASYNC_PROFILER_HOME_ENV);
// if ENV is not set, see if -Dasync.profiler.home=/path/to/async/profiler/home is set
if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) {
asyncProfilerHome = System.getProperty(ASYNC_PROFILER_HOME_SYSTEM_PROPERTY);
}
return asyncProfilerHome;
}
}
|
Output
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/data/util/DataFormatConverters.java
|
{
"start": 23089,
"end": 23537
}
|
class ____ extends IdentityConverter<Boolean> {
private static final long serialVersionUID = 3618373319753553272L;
public static final BooleanConverter INSTANCE = new BooleanConverter();
private BooleanConverter() {}
@Override
Boolean toExternalImpl(RowData row, int column) {
return row.getBoolean(column);
}
}
/** Converter for byte. */
public static final
|
BooleanConverter
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/loader/ast/spi/Loadable.java
|
{
"start": 759,
"end": 3072
}
|
interface ____ extends ModelPart, RootTableGroupProducer {
/**
* The name for this loadable, for use as the root when generating
* {@linkplain org.hibernate.spi.NavigablePath relative paths}
*/
String getRootPathName();
/**
* @deprecated Use {@link #isAffectedByInfluencers(LoadQueryInfluencers, boolean)} instead
*/
@Deprecated(forRemoval = true)
default boolean isAffectedByInfluencers(LoadQueryInfluencers influencers) {
return isAffectedByInfluencers( influencers, false );
}
default boolean isAffectedByInfluencers(LoadQueryInfluencers influencers, boolean onlyApplyForLoadByKeyFilters) {
return isAffectedByEntityGraph( influencers )
|| isAffectedByEnabledFetchProfiles( influencers )
|| isAffectedByEnabledFilters( influencers, onlyApplyForLoadByKeyFilters )
|| isAffectedByBatchSize( influencers );
}
default boolean isNotAffectedByInfluencers(LoadQueryInfluencers influencers) {
return !isAffectedByEntityGraph( influencers )
&& !isAffectedByEnabledFetchProfiles( influencers )
&& !isAffectedByEnabledFilters( influencers )
&& !isAffectedByBatchSize( influencers )
&& influencers.getEnabledCascadingFetchProfile() == null;
}
private boolean isAffectedByBatchSize(LoadQueryInfluencers influencers) {
return influencers.getBatchSize() > 0
&& influencers.getBatchSize() != getBatchSize();
}
int getBatchSize();
/**
* Whether any of the "influencers" affect this loadable.
* @deprecated Use {@link #isAffectedByEnabledFilters(LoadQueryInfluencers, boolean)} instead
*/
@Deprecated(forRemoval = true)
default boolean isAffectedByEnabledFilters(LoadQueryInfluencers influencers) {
return isAffectedByEnabledFilters( influencers, false );
}
/**
* Whether any of the "influencers" affect this loadable.
*/
boolean isAffectedByEnabledFilters(LoadQueryInfluencers influencers, boolean onlyApplyForLoadByKeyFilters);
/**
* Whether the {@linkplain LoadQueryInfluencers#getEffectiveEntityGraph() effective entity-graph}
* applies to this loadable
*/
boolean isAffectedByEntityGraph(LoadQueryInfluencers influencers);
/**
* Whether any of the {@linkplain LoadQueryInfluencers#getEnabledFetchProfileNames()}
* apply to this loadable
*/
boolean isAffectedByEnabledFetchProfiles(LoadQueryInfluencers influencers);
}
|
Loadable
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/basic/ListOfStringTest.java
|
{
"start": 1150,
"end": 1330
}
|
class ____ {
@Id long id;
List<String> stringList; // this should be OK
Unbroken(List<String> stringList) {
this.stringList = stringList;
}
Unbroken() {
}
}
}
|
Unbroken
|
java
|
apache__camel
|
components/camel-sql/src/test/java/org/apache/camel/component/sql/SqlConsumerOutputTypeStreamListTest.java
|
{
"start": 1417,
"end": 4720
}
|
class ____ extends CamelTestSupport {
private EmbeddedDatabase db;
@Override
public void doPreSetup() throws Exception {
db = new EmbeddedDatabaseBuilder()
.setName(getClass().getSimpleName())
.setType(EmbeddedDatabaseType.H2)
.addScript("sql/createAndPopulateDatabase.sql").build();
}
@Override
public void doPostTearDown() throws Exception {
if (db != null) {
db.shutdown();
}
}
@Test
public void testReturnAnIterator() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMinimumMessageCount(1);
context.getRouteController().startRoute("route1");
mock.assertIsSatisfied();
assertThat(resultBodyAt(mock, 0), instanceOf(Iterator.class));
}
@Test
public void testSplit() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMinimumMessageCount(3);
context.getRouteController().startRoute("route2");
mock.assertIsSatisfied();
assertThat(resultBodyAt(mock, 0), instanceOf(Map.class));
assertThat(resultBodyAt(mock, 1), instanceOf(Map.class));
assertThat(resultBodyAt(mock, 2), instanceOf(Map.class));
}
@Test
public void testSplitWithModel() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMinimumMessageCount(3);
context.getRouteController().startRoute("route3");
mock.assertIsSatisfied();
assertThat(resultBodyAt(mock, 0), instanceOf(ProjectModel.class));
assertThat(resultBodyAt(mock, 1), instanceOf(ProjectModel.class));
assertThat(resultBodyAt(mock, 2), instanceOf(ProjectModel.class));
}
private Object resultBodyAt(MockEndpoint result, int index) {
return result.assertExchangeReceived(index).getIn().getBody();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
getContext().getComponent("sql", SqlComponent.class).setDataSource(db);
from("sql:select * from projects order by id?outputType=StreamList&initialDelay=0&delay=50").routeId("route1")
.noAutoStartup()
.to("log:stream")
.to("mock:result");
from("sql:select * from projects order by id?outputType=StreamList&initialDelay=0&delay=50").routeId("route2")
.noAutoStartup()
.to("log:stream")
.split(body()).streaming()
.to("log:row")
.to("mock:result")
.end();
from("sql:select * from projects order by id?outputType=StreamList&outputClass=org.apache.camel.component.sql.ProjectModel&initialDelay=0&delay=50")
.routeId("route3").noAutoStartup()
.to("log:stream")
.split(body()).streaming()
.to("log:row")
.to("mock:result")
.end();
}
};
}
}
|
SqlConsumerOutputTypeStreamListTest
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/basicType/LongTest2_obj.java
|
{
"start": 275,
"end": 4053
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
String json = "{\"v1\":-1883391953414482124,\"v2\":-3019416596934963650,\"v3\":6497525620823745793,\"v4\":2136224289077142499,\"v5\":-2090575024006307745}";
String json2 = "{\"v1\":\"-1883391953414482124\",\"v2\":\"-3019416596934963650\",\"v3\":\"6497525620823745793\",\"v4\":\"2136224289077142499\",\"v5\":\"-2090575024006307745\"}";
Model m1 = JSON.parseObject(json, Model.class);
Model m2 = JSON.parseObject(json2, Model.class);
assertNotNull(m1);
assertNotNull(m2);
assertEquals(-1883391953414482124L, m1.v1.longValue());
assertEquals(-3019416596934963650L, m1.v2.longValue());
assertEquals(6497525620823745793L, m1.v3.longValue());
assertEquals(2136224289077142499L, m1.v4.longValue());
assertEquals(-2090575024006307745L, m1.v5.longValue());
assertEquals(-1883391953414482124L, m2.v1.longValue());
assertEquals(-3019416596934963650L, m2.v2.longValue());
assertEquals(6497525620823745793L, m2.v3.longValue());
assertEquals(2136224289077142499L, m2.v4.longValue());
assertEquals(-2090575024006307745L, m2.v5.longValue());
}
public void test_1() throws Exception {
String json = "{\"v1\":-1883391953414482124,\"v2\":-3019416596934963650,\"v3\":6497525620823745793,\"v4\":2136224289077142499,\"v5\":-2090575024006307745}";
String json2 = "{\"v1\":\"-1883391953414482124\",\"v2\":\"-3019416596934963650\",\"v3\":\"6497525620823745793\",\"v4\":\"2136224289077142499\",\"v5\":\"-2090575024006307745\"}";
Model m1 = new JSONReader(new StringReader(json)).readObject(Model.class);
Model m2 = new JSONReader(new StringReader(json2)).readObject(Model.class);
assertNotNull(m1);
assertNotNull(m2);
assertEquals(-1883391953414482124L, m1.v1.longValue());
assertEquals(-3019416596934963650L, m1.v2.longValue());
assertEquals(6497525620823745793L, m1.v3.longValue());
assertEquals(2136224289077142499L, m1.v4.longValue());
assertEquals(-2090575024006307745L, m1.v5.longValue());
assertEquals(-1883391953414482124L, m2.v1.longValue());
assertEquals(-3019416596934963650L, m2.v2.longValue());
assertEquals(6497525620823745793L, m2.v3.longValue());
assertEquals(2136224289077142499L, m2.v4.longValue());
assertEquals(-2090575024006307745L, m2.v5.longValue());
}
public void test_2() throws Exception {
String json = "[-1883391953414482124,-3019416596934963650,6497525620823745793,2136224289077142499,-2090575024006307745]";
String json2 = "[\"-1883391953414482124\",\"-3019416596934963650\",\"6497525620823745793\",\"2136224289077142499\",\"-2090575024006307745\"]";
Model m1 = new JSONReader(new StringReader(json), Feature.SupportArrayToBean).readObject(Model.class);
Model m2 = new JSONReader(new StringReader(json2), Feature.SupportArrayToBean).readObject(Model.class);
assertNotNull(m1);
assertNotNull(m2);
assertEquals(-1883391953414482124L, m1.v1.longValue());
assertEquals(-3019416596934963650L, m1.v2.longValue());
assertEquals(6497525620823745793L, m1.v3.longValue());
assertEquals(2136224289077142499L, m1.v4.longValue());
assertEquals(-2090575024006307745L, m1.v5.longValue());
assertEquals(-1883391953414482124L, m2.v1.longValue());
assertEquals(-3019416596934963650L, m2.v2.longValue());
assertEquals(6497525620823745793L, m2.v3.longValue());
assertEquals(2136224289077142499L, m2.v4.longValue());
assertEquals(-2090575024006307745L, m2.v5.longValue());
}
public static
|
LongTest2_obj
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/AggregatedTaskDetailsInfo.java
|
{
"start": 7551,
"end": 9338
}
|
class ____ {
private final List<Long> values = new ArrayList<>();
private final String name;
private long sum = 0;
private Percentile percentile = null;
MetricsStatistics(String name) {
this.name = name;
}
void addValue(long value) {
values.add(value);
sum += value;
}
private String getName() {
return name;
}
private Map<String, Long> toMap() {
Map<String, Long> result = new HashMap<>();
result.put("min", getMin());
result.put("max", getMax());
result.put("avg", getAvg());
result.put("sum", getSum());
result.put("median", getPercentile(50));
result.put("p25", getPercentile(25));
result.put("p75", getPercentile(75));
result.put("p95", getPercentile(95));
return result;
}
long getMin() {
return values.stream()
.reduce(BinaryOperator.minBy(Comparator.naturalOrder()))
.orElse(0L);
}
long getMax() {
return values.stream()
.reduce(BinaryOperator.maxBy(Comparator.naturalOrder()))
.orElse(0L);
}
long getSum() {
return sum;
}
long getAvg() {
return values.isEmpty() ? 0 : sum / values.size();
}
long getPercentile(int percent) {
if (percentile == null) {
percentile = new Percentile();
percentile.setData(values.stream().mapToDouble(Long::doubleValue).toArray());
}
return (long) percentile.evaluate(percent);
}
}
}
|
MetricsStatistics
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ComponentCreatorTest.java
|
{
"start": 19837,
"end": 20087
}
|
interface ____ {",
" Builder primitive(long l);",
" TestComponent build();",
" }")
.addLinesIf(
FACTORY,
" @Component.Factory",
"
|
Builder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncSearchIndexServiceTests.java
|
{
"start": 1922,
"end": 7861
}
|
class ____ implements AsyncResponse<TestAsyncResponse> {
public final String test;
public final long expirationTimeMillis;
public String failure;
public TestAsyncResponse(String test, long expirationTimeMillis) {
this.test = test;
this.expirationTimeMillis = expirationTimeMillis;
}
public TestAsyncResponse(String test, long expirationTimeMillis, String failure) {
this.test = test;
this.expirationTimeMillis = expirationTimeMillis;
this.failure = failure;
}
public TestAsyncResponse(StreamInput input) throws IOException {
test = input.readOptionalString();
this.expirationTimeMillis = input.readLong();
failure = input.readOptionalString();
}
@Override
public long getExpirationTime() {
return expirationTimeMillis;
}
@Override
public TestAsyncResponse withExpirationTime(long expirationTime) {
return new TestAsyncResponse(test, expirationTime, failure);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(test);
out.writeLong(expirationTimeMillis);
out.writeOptionalString(failure);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TestAsyncResponse that = (TestAsyncResponse) o;
return expirationTimeMillis == that.expirationTimeMillis
&& Objects.equals(test, that.test)
&& Objects.equals(failure, that.failure);
}
@Override
public int hashCode() {
return Objects.hash(test, expirationTimeMillis, failure);
}
@Override
public String toString() {
return "TestAsyncResponse{"
+ "test='"
+ test
+ '\''
+ "failure='"
+ failure
+ '\''
+ ", expirationTimeMillis="
+ expirationTimeMillis
+ '}';
}
@Override
public TestAsyncResponse convertToFailure(Exception exc) {
return new TestAsyncResponse(test, expirationTimeMillis, exc.getMessage());
}
@Override
public void incRef() {}
@Override
public boolean tryIncRef() {
return true;
}
@Override
public boolean decRef() {
return false;
}
@Override
public boolean hasReferences() {
return true;
}
}
@Before
public void setup() {
ClusterService clusterService = getInstanceFromNode(ClusterService.class);
BigArrays bigArrays = getInstanceFromNode(BigArrays.class);
TransportService transportService = getInstanceFromNode(TransportService.class);
indexService = new AsyncTaskIndexService<>(
"test",
clusterService,
transportService.getThreadPool().getThreadContext(),
client(),
ASYNC_SEARCH_ORIGIN,
TestAsyncResponse::new,
writableRegistry(),
bigArrays
);
}
public void testEncodeSearchResponse() throws IOException {
final int iterations = iterations(1, 20);
for (int i = 0; i < iterations; i++) {
long expirationTime = randomLong();
String testMessage = randomAlphaOfLength(10);
TestAsyncResponse initialResponse = new TestAsyncResponse(testMessage, expirationTime);
AsyncExecutionId executionId = new AsyncExecutionId(
Long.toString(randomNonNegativeLong()),
new TaskId(randomAlphaOfLength(10), randomNonNegativeLong())
);
PlainActionFuture<DocWriteResponse> createFuture = new PlainActionFuture<>();
indexService.createResponse(executionId.getDocId(), Map.of(), initialResponse, createFuture);
assertThat(createFuture.actionGet().getResult(), equalTo(DocWriteResponse.Result.CREATED));
if (randomBoolean()) {
PlainActionFuture<TestAsyncResponse> getFuture = new PlainActionFuture<>();
indexService.getResponse(executionId, randomBoolean(), getFuture);
assertThat(getFuture.actionGet(), equalTo(initialResponse));
}
int updates = randomIntBetween(1, 5);
for (int u = 0; u < updates; u++) {
if (randomBoolean()) {
testMessage = randomAlphaOfLength(10);
TestAsyncResponse updateResponse = new TestAsyncResponse(testMessage, randomLong());
PlainActionFuture<UpdateResponse> updateFuture = new PlainActionFuture<>();
indexService.updateResponse(executionId.getDocId(), Map.of(), updateResponse, updateFuture);
updateFuture.actionGet();
} else {
expirationTime = randomLong();
PlainActionFuture<UpdateResponse> updateFuture = new PlainActionFuture<>();
indexService.updateExpirationTime(executionId.getDocId(), expirationTime, updateFuture);
updateFuture.actionGet();
}
if (randomBoolean()) {
PlainActionFuture<TestAsyncResponse> getFuture = new PlainActionFuture<>();
indexService.getResponse(executionId, randomBoolean(), getFuture);
assertThat(getFuture.actionGet().test, equalTo(testMessage));
assertThat(getFuture.actionGet().expirationTimeMillis, equalTo(expirationTime));
}
}
}
}
static
|
TestAsyncResponse
|
java
|
apache__kafka
|
storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/RemotePartitionMetadataStore.java
|
{
"start": 1835,
"end": 1986
}
|
class ____ a store to maintain the {@link RemotePartitionDeleteMetadata} and {@link RemoteLogMetadataCache} for each topic partition.
*/
public
|
represents
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/state/filesystem/FsCheckpointStreamFactoryTest.java
|
{
"start": 1438,
"end": 8062
}
|
class ____ {
@TempDir private Path exclusiveStateDir;
@TempDir private Path sharedStateDir;
// ------------------------------------------------------------------------
// tests
// ------------------------------------------------------------------------
@Test
@SuppressWarnings("ConstantConditions")
void testWriteFlushesIfAboveThreshold() throws IOException {
int fileSizeThreshold = 100;
final FsCheckpointStreamFactory factory =
createFactory(
FileSystem.getLocalFileSystem(), fileSizeThreshold, fileSizeThreshold);
final FsCheckpointStreamFactory.FsCheckpointStateOutputStream stream =
factory.createCheckpointStateOutputStream(CheckpointedStateScope.EXCLUSIVE);
stream.write(new byte[fileSizeThreshold]);
File[] files = new File(exclusiveStateDir.toUri()).listFiles();
assertThat(files).hasSize(1);
File file = files[0];
assertThat(file).hasSize(fileSizeThreshold);
stream.write(new byte[fileSizeThreshold - 1]); // should buffer without flushing
stream.write(127); // should buffer without flushing
assertThat(file).hasSize(fileSizeThreshold);
}
@Test
void testExclusiveStateHasRelativePathHandles() throws IOException {
final FsCheckpointStreamFactory factory = createFactory(FileSystem.getLocalFileSystem(), 0);
final FsCheckpointStreamFactory.FsCheckpointStateOutputStream stream =
factory.createCheckpointStateOutputStream(CheckpointedStateScope.EXCLUSIVE);
stream.write(1657);
final StreamStateHandle handle = stream.closeAndGetHandle();
assertThat(handle).isInstanceOf(RelativeFileStateHandle.class);
assertPathsEqual(
exclusiveStateDir, ((RelativeFileStateHandle) handle).getFilePath().getParent());
}
@Test
void testSharedStateHasAbsolutePathHandles() throws IOException {
final FsCheckpointStreamFactory factory = createFactory(FileSystem.getLocalFileSystem(), 0);
final FsCheckpointStreamFactory.FsCheckpointStateOutputStream stream =
factory.createCheckpointStateOutputStream(CheckpointedStateScope.SHARED);
stream.write(0);
final StreamStateHandle handle = stream.closeAndGetHandle();
assertThat(handle).isInstanceOf(FileStateHandle.class);
assertThat(handle).isNotInstanceOf(RelativeFileStateHandle.class);
assertPathsEqual(sharedStateDir, ((FileStateHandle) handle).getFilePath().getParent());
}
@Test
void testEntropyMakesExclusiveStateAbsolutePaths() throws IOException {
final FsStorageEntropyTest.TestEntropyAwareFs fs =
new FsStorageEntropyTest.TestEntropyAwareFs();
final FsCheckpointStreamFactory factory = createFactory(fs, 0);
final FsCheckpointStreamFactory.FsCheckpointStateOutputStream stream =
factory.createCheckpointStateOutputStream(CheckpointedStateScope.EXCLUSIVE);
stream.write(0);
final StreamStateHandle handle = stream.closeAndGetHandle();
assertThat(handle).isInstanceOf(FileStateHandle.class);
assertThat(handle).isNotInstanceOf(RelativeFileStateHandle.class);
assertPathsEqual(
exclusiveStateDir.resolve(fs.generateEntropy()),
((FileStateHandle) handle).getFilePath().getParent());
}
@Test
void testFSWithDisabledEntropyHasRelativePaths() throws IOException {
final FsCheckpointStreamFactory factory = createFactory(new DisabledEntropyFS(), 0);
final FsCheckpointStreamFactory.FsCheckpointStateOutputStream stream =
factory.createCheckpointStateOutputStream(CheckpointedStateScope.EXCLUSIVE);
stream.write(0);
final StreamStateHandle handle = stream.closeAndGetHandle();
assertThat(handle).isInstanceOf(RelativeFileStateHandle.class);
assertPathsEqual(
exclusiveStateDir, ((RelativeFileStateHandle) handle).getFilePath().getParent());
}
@Test
void testFlushUnderThreshold() throws IOException {
flushAndVerify(10, 10, true);
}
@Test
void testFlushAboveThreshold() throws IOException {
flushAndVerify(10, 11, false);
}
private void flushAndVerify(int minFileSize, int bytesToFlush, boolean expectEmpty)
throws IOException {
FsCheckpointStreamFactory.FsCheckpointStateOutputStream stream =
createFactory(new FsStorageEntropyTest.TestEntropyAwareFs(), minFileSize)
.createCheckpointStateOutputStream(CheckpointedStateScope.EXCLUSIVE);
stream.write(new byte[bytesToFlush], 0, bytesToFlush);
stream.flush();
assertThat(new File(exclusiveStateDir.toUri()).listFiles()).hasSize(expectEmpty ? 0 : 1);
}
// ------------------------------------------------------------------------
// test utils
// ------------------------------------------------------------------------
private static void assertPathsEqual(Path expected, org.apache.flink.core.fs.Path actual) {
final org.apache.flink.core.fs.Path reNormalizedExpected =
new org.apache.flink.core.fs.Path(
new org.apache.flink.core.fs.Path(expected.toUri()).toString());
assertThat(actual).isEqualTo(reNormalizedExpected);
}
private FsCheckpointStreamFactory createFactory(FileSystem fs, int fileSizeThreshold) {
return createFactory(fs, fileSizeThreshold, 4096);
}
private FsCheckpointStreamFactory createFactory(
FsStorageEntropyTest.TestEntropyAwareFs fs, int fileSizeThreshold) {
final Path exclusiveStateDirWithEntropy =
exclusiveStateDir.resolve(Objects.requireNonNull(fs.getEntropyInjectionKey()));
return new FsCheckpointStreamFactory(
fs,
new org.apache.flink.core.fs.Path(exclusiveStateDirWithEntropy.toUri()),
new org.apache.flink.core.fs.Path(sharedStateDir.toUri()),
fileSizeThreshold,
4096);
}
private FsCheckpointStreamFactory createFactory(
FileSystem fs, int fileSizeThreshold, int bufferSize) {
return new FsCheckpointStreamFactory(
fs,
new org.apache.flink.core.fs.Path(exclusiveStateDir.toUri()),
new org.apache.flink.core.fs.Path(sharedStateDir.toUri()),
fileSizeThreshold,
bufferSize);
}
private static final
|
FsCheckpointStreamFactoryTest
|
java
|
apache__camel
|
components/camel-thrift/src/generated/java/org/apache/camel/dataformat/thrift/ThriftDataFormatConfigurer.java
|
{
"start": 728,
"end": 3156
}
|
class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("ContentTypeFormat", java.lang.String.class);
map.put("ContentTypeHeader", boolean.class);
map.put("InstanceClass", java.lang.String.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
ThriftDataFormat target = (ThriftDataFormat) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "contenttypeformat":
case "contentTypeFormat": target.setContentTypeFormat(property(camelContext, java.lang.String.class, value)); return true;
case "contenttypeheader":
case "contentTypeHeader": target.setContentTypeHeader(property(camelContext, boolean.class, value)); return true;
case "instanceclass":
case "instanceClass": target.setInstanceClass(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "contenttypeformat":
case "contentTypeFormat": return java.lang.String.class;
case "contenttypeheader":
case "contentTypeHeader": return boolean.class;
case "instanceclass":
case "instanceClass": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
ThriftDataFormat target = (ThriftDataFormat) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "contenttypeformat":
case "contentTypeFormat": return target.getContentTypeFormat();
case "contenttypeheader":
case "contentTypeHeader": return target.isContentTypeHeader();
case "instanceclass":
case "instanceClass": return target.getInstanceClass();
default: return null;
}
}
}
|
ThriftDataFormatConfigurer
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/inheritance/complex/StandaloneSourceCompositeTargetCompositeMapper.java
|
{
"start": 307,
"end": 691
}
|
interface ____ {
StandaloneSourceCompositeTargetCompositeMapper INSTANCE =
Mappers.getMapper( StandaloneSourceCompositeTargetCompositeMapper.class );
TargetComposite sourceToTarget(SourceComposite source);
Reference asReference(SourceBase source);
Iterable<Number> intListToNumberIterable(List<Integer> source);
}
|
StandaloneSourceCompositeTargetCompositeMapper
|
java
|
alibaba__nacos
|
console/src/test/java/com/alibaba/nacos/console/proxy/naming/ServiceProxyTest.java
|
{
"start": 1968,
"end": 6024
}
|
class ____ {
private static final String NAMESPACE_ID = "namespaceId";
private static final String SERVICE_NAME = "serviceName";
private static final String GROUP_NAME = "groupName";
@Mock
private ServiceHandler serviceHandler;
private ServiceProxy serviceProxy;
private ServiceForm serviceForm;
private ServiceMetadata serviceMetadata;
@BeforeEach
public void setUp() {
serviceProxy = new ServiceProxy(serviceHandler);
serviceForm = new ServiceForm();
serviceMetadata = new ServiceMetadata();
}
@Test
public void createService() throws Exception {
assertDoesNotThrow(() -> serviceProxy.createService(serviceForm, serviceMetadata));
verify(serviceHandler).createService(serviceForm, serviceMetadata);
}
@Test
public void updateService() throws Exception {
doNothing().when(serviceHandler).updateService(serviceForm, serviceMetadata);
serviceProxy.updateService(serviceForm, serviceMetadata);
}
@Test
public void deleteService() throws Exception {
doNothing().when(serviceHandler).deleteService(NAMESPACE_ID, SERVICE_NAME, GROUP_NAME);
serviceProxy.deleteService(NAMESPACE_ID, SERVICE_NAME, GROUP_NAME);
}
@Test
public void getSelectorTypeList() throws NacosException {
List<String> expectedSelectorTypes = Arrays.asList("type1", "type2");
when(serviceHandler.getSelectorTypeList()).thenReturn(expectedSelectorTypes);
List<String> actualSelectorTypes = serviceProxy.getSelectorTypeList();
assertEquals(expectedSelectorTypes, actualSelectorTypes,
"The selector type list should match the expected list.");
}
@Test
public void getServiceList() throws NacosException {
Object expectedServiceList = new Object();
when(serviceHandler.getServiceList(anyBoolean(), anyString(), anyInt(), anyInt(), anyString(), anyString(),
anyBoolean())).thenReturn(expectedServiceList);
Object actualServiceList = serviceProxy.getServiceList(true, "namespaceId", 1, 10, "serviceName", "groupName",
true);
assertEquals(expectedServiceList, actualServiceList);
}
@Test
public void getSubscribers() throws Exception {
Page<SubscriberInfo> expectedPage = new Page<>();
when(serviceHandler.getSubscribers(anyInt(), anyInt(), anyString(), anyString(), anyString(),
anyBoolean())).thenReturn(expectedPage);
Page<SubscriberInfo> result = serviceProxy.getSubscribers(1, 10, "namespaceId", "serviceName", "groupName",
true);
assertNotNull(result);
assertEquals(expectedPage, result);
verify(serviceHandler, times(1)).getSubscribers(1, 10, "namespaceId", "serviceName", "groupName", true);
}
@Test
public void getServiceDetail() throws NacosException {
ServiceDetailInfo expectedInfo = new ServiceDetailInfo();
when(serviceHandler.getServiceDetail(NAMESPACE_ID, SERVICE_NAME, GROUP_NAME)).thenReturn(expectedInfo);
ServiceDetailInfo actualInfo = serviceProxy.getServiceDetail(NAMESPACE_ID, SERVICE_NAME, GROUP_NAME);
assertEquals(expectedInfo, actualInfo);
verify(serviceHandler, times(1)).getServiceDetail(NAMESPACE_ID, SERVICE_NAME, GROUP_NAME);
}
@Test
public void updateClusterMetadata() throws Exception {
String namespaceId = "testNamespace";
String groupName = "testGroup";
String serviceName = "testService";
String clusterName = "testCluster";
ClusterMetadata clusterMetadata = new ClusterMetadata();
doNothing().when(serviceHandler)
.updateClusterMetadata(namespaceId, groupName, serviceName, clusterName, clusterMetadata);
serviceProxy.updateClusterMetadata(namespaceId, groupName, serviceName, clusterName, clusterMetadata);
}
}
|
ServiceProxyTest
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-spring/src/test/java/org/apache/dubbo/config/spring/isolation/spring/annotation/consumer/tri/DemoServiceV1.java
|
{
"start": 1134,
"end": 1483
}
|
class ____ implements DemoService {
@DubboReference(version = "1.0.0", group = "Group1", scope = "remote", protocol = "tri")
private DemoService demoService;
@Override
public String sayName(String name) {
return demoService.sayName(name);
}
@Override
public Box getBox() {
return null;
}
}
|
DemoServiceV1
|
java
|
apache__spark
|
sql/catalyst/src/main/java/org/apache/spark/sql/connector/expressions/GeneralScalarExpression.java
|
{
"start": 12111,
"end": 12929
}
|
class ____ extends ExpressionWithToString {
private String name;
private Expression[] children;
public GeneralScalarExpression(String name, Expression[] children) {
this.name = name;
this.children = children;
}
public String name() { return name; }
@Override
public Expression[] children() { return children; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
GeneralScalarExpression that = (GeneralScalarExpression) o;
if (!name.equals(that.name)) return false;
return Arrays.equals(children, that.children);
}
@Override
public int hashCode() {
int result = name.hashCode();
result = 31 * result + Arrays.hashCode(children);
return result;
}
}
|
GeneralScalarExpression
|
java
|
spring-projects__spring-framework
|
spring-r2dbc/src/main/java/org/springframework/r2dbc/connection/DelegatingConnectionFactory.java
|
{
"start": 1294,
"end": 2316
}
|
class ____ implements ConnectionFactory, Wrapped<ConnectionFactory> {
private final ConnectionFactory targetConnectionFactory;
/**
* Create a new DelegatingConnectionFactory.
* @param targetConnectionFactory the target ConnectionFactory
*/
public DelegatingConnectionFactory(ConnectionFactory targetConnectionFactory) {
Assert.notNull(targetConnectionFactory, "ConnectionFactory must not be null");
this.targetConnectionFactory = targetConnectionFactory;
}
/**
* Return the target ConnectionFactory that this ConnectionFactory delegates to.
*/
public ConnectionFactory getTargetConnectionFactory() {
return this.targetConnectionFactory;
}
@Override
public Mono<? extends Connection> create() {
return Mono.from(this.targetConnectionFactory.create());
}
@Override
public ConnectionFactoryMetadata getMetadata() {
return this.targetConnectionFactory.getMetadata();
}
@Override
public ConnectionFactory unwrap() {
return this.targetConnectionFactory;
}
}
|
DelegatingConnectionFactory
|
java
|
quarkusio__quarkus
|
extensions/hibernate-search-orm-elasticsearch/deployment/src/test/java/io/quarkus/hibernate/search/orm/elasticsearch/test/configuration/IndexedEntityInNamedBackend.java
|
{
"start": 427,
"end": 699
}
|
class ____ {
@Id
@GeneratedValue
public Long id;
@FullTextField
public String name;
protected IndexedEntityInNamedBackend() {
}
public IndexedEntityInNamedBackend(String name) {
this.name = name;
}
}
|
IndexedEntityInNamedBackend
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.