language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
processing__processing4
|
java/src/processing/mode/java/CompletionGenerator.java
|
{
"start": 24134,
"end": 24436
}
|
class ____ the default list of imports as well as
* the Sketch classpath
*/
static private Class<?> findClassIfExists(PreprocSketch ps, String className){
if (className == null){
return null;
}
if (className.indexOf('.') >= 0) {
// Figure out what is package and what is
|
in
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/function/FailableToLongBiFunction.java
|
{
"start": 914,
"end": 1215
}
|
interface ____ {@link ToLongBiFunction} that declares a {@link Throwable}.
*
* @param <T> the type of the first argument to the function
* @param <U> the type of the second argument to the function
* @param <E> The kind of thrown exception or error.
* @since 3.11
*/
@FunctionalInterface
public
|
like
|
java
|
bumptech__glide
|
annotation/compiler/test/src/test/java/com/bumptech/glide/annotation/compiler/AppGlideModuleWithMultipleExcludesTest.java
|
{
"start": 1060,
"end": 3545
}
|
class ____ implements CompilationProvider {
@Rule
public final RegenerateResourcesRule regenerateResourcesRule = new RegenerateResourcesRule(this);
private Compilation compilation;
@Before
public void setUp() {
compilation =
javac()
.withProcessors(new GlideAnnotationProcessor())
.compile(
forResource("AppModuleWithMultipleExcludes.java"),
forResource("EmptyLibraryModule1.java"),
forResource("EmptyLibraryModule2.java"));
assertThat(compilation).succeededWithoutWarnings();
}
@Override
public Compilation getCompilation() {
return compilation;
}
@Test
@ReferencedResource
public void compilation_generatesExpectedGlideOptionsClass() throws IOException {
assertThat(compilation)
.generatedSourceFile(subpackage("GlideOptions"))
.hasSourceEquivalentTo(appResource("GlideOptions.java"));
}
@Test
@ReferencedResource
public void compilation_generatesExpectedGlideRequestClass() throws IOException {
assertThat(compilation)
.generatedSourceFile(subpackage("GlideRequest"))
.hasSourceEquivalentTo(appResource("GlideRequest.java"));
}
@Test
@ReferencedResource
public void compilation_generatesExpectedGlideRequestsClass() throws IOException {
assertThat(compilation)
.generatedSourceFile(subpackage("GlideRequests"))
.hasSourceEquivalentTo(appResource("GlideRequests.java"));
}
@Test
@ReferencedResource
public void compilationGeneratesExpectedGlideAppClass() throws IOException {
assertThat(compilation)
.generatedSourceFile(subpackage("GlideApp"))
.hasSourceEquivalentTo(appResource("GlideApp.java"));
}
@Test
public void compilation_generatesExpectedGeneratedAppGlideModuleImpl() throws IOException {
assertThat(compilation)
.generatedSourceFile(glide("GeneratedAppGlideModuleImpl"))
.hasSourceEquivalentTo(forResource("GeneratedAppGlideModuleImpl.java"));
}
@Test
@ReferencedResource
public void compilation_generatesExpectedGeneratedRequestManagerFactory() throws IOException {
assertThat(compilation)
.generatedSourceFile(glide("GeneratedRequestManagerFactory"))
.hasSourceEquivalentTo(appResource("GeneratedRequestManagerFactory.java"));
}
private JavaFileObject forResource(String name) {
return Util.forResource(getClass().getSimpleName(), name);
}
}
|
AppGlideModuleWithMultipleExcludesTest
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/QueryRewriter.java
|
{
"start": 1843,
"end": 2818
}
|
interface ____ {
/**
* Rewrite the assembled query with the given {@link Sort}.
* <p>
* WARNING: No checks are performed before the transformed query is passed to the EntityManager.
*
* @param query the assembled query.
* @param sort current {@link Sort} settings provided by the method, or {@link Sort#unsorted()}} if there are none.
* @return the query to be used with the {@code EntityManager}.
*/
String rewrite(String query, Sort sort);
/**
* Rewrite the assembled query with the given {@link Pageable}.
*
* @param query the assembled query.
* @param pageRequest current {@link Pageable} settings provided by the method, or {@link Pageable#unpaged()} if not
* paged.
* @return the query to be used with the {@code EntityManager}.
*/
default String rewrite(String query, Pageable pageRequest) {
return rewrite(query, pageRequest.getSort());
}
/**
* A {@link QueryRewriter} that doesn't change the query.
*/
|
QueryRewriter
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
|
{
"start": 1959,
"end": 8942
}
|
class ____ {
private static final Logger LOG = LoggerFactory.getLogger(CryptoUtils.class);
public static boolean isEncryptedSpillEnabled(Configuration conf) {
return conf.getBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA,
MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA);
}
/**
* This method creates and initializes an IV (Initialization Vector)
*
* @param conf configuration
* @return byte[] initialization vector
* @throws IOException exception in case of error
*/
public static byte[] createIV(Configuration conf) throws IOException {
CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
if (isEncryptedSpillEnabled(conf)) {
byte[] iv = new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
cryptoCodec.generateSecureRandom(iv);
cryptoCodec.close();
return iv;
} else {
return null;
}
}
public static int cryptoPadding(Configuration conf) throws IOException {
// Sizeof(IV) + long(start-offset)
if (!isEncryptedSpillEnabled(conf)) {
return 0;
}
final CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
try {
return cryptoCodec.getCipherSuite().getAlgorithmBlockSize() + 8;
} finally {
cryptoCodec.close();
}
}
private static byte[] getEncryptionKey() throws IOException {
return TokenCache.getEncryptedSpillKey(UserGroupInformation.getCurrentUser()
.getCredentials());
}
private static int getBufferSize(Configuration conf) {
return conf.getInt(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB,
MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB) * 1024;
}
/**
* Wraps a given FSDataOutputStream with a CryptoOutputStream. The size of the
* data buffer required for the stream is specified by the
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
* variable.
*
* @param conf configuration
* @param out given output stream
* @return FSDataOutputStream encrypted output stream if encryption is
* enabled; otherwise the given output stream itself
* @throws IOException exception in case of error
*/
public static FSDataOutputStream wrapIfNecessary(Configuration conf,
FSDataOutputStream out) throws IOException {
return wrapIfNecessary(conf, out, true);
}
/**
* Wraps a given FSDataOutputStream with a CryptoOutputStream. The size of the
* data buffer required for the stream is specified by the
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
* variable.
*
* @param conf configuration
* @param out given output stream
* @param closeOutputStream flag to indicate whether closing the wrapped
* stream will close the given output stream
* @return FSDataOutputStream encrypted output stream if encryption is
* enabled; otherwise the given output stream itself
* @throws IOException exception in case of error
*/
public static FSDataOutputStream wrapIfNecessary(Configuration conf,
FSDataOutputStream out, boolean closeOutputStream) throws IOException {
if (isEncryptedSpillEnabled(conf)) {
out.write(ByteBuffer.allocate(8).putLong(out.getPos()).array());
byte[] iv = createIV(conf);
out.write(iv);
if (LOG.isDebugEnabled()) {
LOG.debug("IV written to Stream ["
+ Base64.encodeBase64URLSafeString(iv) + "]");
}
return new CryptoFSDataOutputStream(out, CryptoCodec.getInstance(conf),
getBufferSize(conf), getEncryptionKey(), iv, closeOutputStream);
} else {
return out;
}
}
/**
* Wraps a given InputStream with a CryptoInputStream. The size of the data
* buffer required for the stream is specified by the
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
* variable.
*
* If the value of 'length' is > -1, The InputStream is additionally
* wrapped in a LimitInputStream. CryptoStreams are late buffering in nature.
* This means they will always try to read ahead if they can. The
* LimitInputStream will ensure that the CryptoStream does not read past the
* provided length from the given Input Stream.
*
* @param conf configuration
* @param in given input stream
* @param length maximum number of bytes to read from the input stream
* @return InputStream encrypted input stream if encryption is
* enabled; otherwise the given input stream itself
* @throws IOException exception in case of error
*/
public static InputStream wrapIfNecessary(Configuration conf, InputStream in,
long length) throws IOException {
if (isEncryptedSpillEnabled(conf)) {
int bufferSize = getBufferSize(conf);
if (length > -1) {
in = new LimitInputStream(in, length);
}
byte[] offsetArray = new byte[8];
IOUtils.readFully(in, offsetArray, 0, 8);
long offset = ByteBuffer.wrap(offsetArray).getLong();
CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
byte[] iv =
new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
IOUtils.readFully(in, iv, 0,
cryptoCodec.getCipherSuite().getAlgorithmBlockSize());
if (LOG.isDebugEnabled()) {
LOG.debug("IV read from ["
+ Base64.encodeBase64URLSafeString(iv) + "]");
}
return new CryptoInputStream(in, cryptoCodec, bufferSize,
getEncryptionKey(), iv, offset + cryptoPadding(conf));
} else {
return in;
}
}
/**
* Wraps a given FSDataInputStream with a CryptoInputStream. The size of the
* data buffer required for the stream is specified by the
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
* variable.
*
* @param conf configuration
* @param in given input stream
* @return FSDataInputStream encrypted input stream if encryption is
* enabled; otherwise the given input stream itself
* @throws IOException exception in case of error
*/
public static FSDataInputStream wrapIfNecessary(Configuration conf,
FSDataInputStream in) throws IOException {
if (isEncryptedSpillEnabled(conf)) {
CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
int bufferSize = getBufferSize(conf);
// Not going to be used... but still has to be read...
// Since the O/P stream always writes it..
IOUtils.readFully(in, new byte[8], 0, 8);
byte[] iv =
new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
IOUtils.readFully(in, iv, 0,
cryptoCodec.getCipherSuite().getAlgorithmBlockSize());
if (LOG.isDebugEnabled()) {
LOG.debug("IV read from Stream ["
+ Base64.encodeBase64URLSafeString(iv) + "]");
}
return new CryptoFSDataInputStream(in, cryptoCodec, bufferSize,
getEncryptionKey(), iv);
} else {
return in;
}
}
}
|
CryptoUtils
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/discovery/SettingsBasedFPGADiscoveryStrategy.java
|
{
"start": 1324,
"end": 1942
}
|
class ____
implements FPGADiscoveryStrategy {
private final String type;
private final String availableDevices;
public SettingsBasedFPGADiscoveryStrategy(
String fpgaType, String devices) {
this.type = fpgaType;
this.availableDevices = devices;
}
@Override
public List<FpgaDevice> discover() throws ResourceHandlerException {
List<FpgaDevice> list =
DeviceSpecParser.getDevicesFromString(type, availableDevices);
if (list.isEmpty()) {
throw new ResourceHandlerException("No FPGA devices were specified");
}
return list;
}
}
|
SettingsBasedFPGADiscoveryStrategy
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/cursor_cache_oom/User.java
|
{
"start": 712,
"end": 1142
}
|
class ____ {
private Integer id;
private String name;
private Friend friend;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Friend getFriend() {
return friend;
}
public void setFriend(Friend friend) {
this.friend = friend;
}
}
|
User
|
java
|
micronaut-projects__micronaut-core
|
test-suite/src/test/java/io/micronaut/docs/server/intro/Application.java
|
{
"start": 742,
"end": 874
}
|
class ____ {
public static void main(String[] args) {
Micronaut.run(Application.class);
}
}
// end::class[]
|
Application
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/common/xcontent/SuggestingErrorOnUnknown.java
|
{
"start": 790,
"end": 2681
}
|
class ____ implements ErrorOnUnknown {
@Override
public String errorMessage(String parserName, String unknownField, Iterable<String> candidates) {
return String.format(Locale.ROOT, "[%s] unknown field [%s]%s", parserName, unknownField, suggest(unknownField, candidates));
}
@Override
public int priority() {
return 0;
}
/**
* Builds suggestions for an unknown field, returning an empty string if there
* aren't any suggestions or " did you mean " and then the list of suggestions.
*/
public static String suggest(String unknownField, Iterable<String> candidates) {
// TODO it'd be nice to combine this with BaseRestHandler's implementation.
LevenshteinDistance ld = new LevenshteinDistance();
final List<Tuple<Float, String>> scored = new ArrayList<>();
for (String candidate : candidates) {
float distance = ld.getDistance(unknownField, candidate);
if (distance > 0.5f) {
scored.add(new Tuple<>(distance, candidate));
}
}
if (scored.isEmpty()) {
return "";
}
CollectionUtil.timSort(scored, (a, b) -> {
// sort by distance in reverse order, then parameter name for equal distances
int compare = a.v1().compareTo(b.v1());
if (compare != 0) {
return -compare;
}
return a.v2().compareTo(b.v2());
});
List<String> keys = scored.stream().map(Tuple::v2).toList();
StringBuilder builder = new StringBuilder(" did you mean ");
if (keys.size() == 1) {
builder.append("[").append(keys.get(0)).append("]");
} else {
builder.append("any of ").append(keys.toString());
}
builder.append("?");
return builder.toString();
}
}
|
SuggestingErrorOnUnknown
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjointGeoPointDocValuesAndSourceGridEvaluator.java
|
{
"start": 4216,
"end": 5296
}
|
class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory encodedPoints;
private final EvalOperator.ExpressionEvaluator.Factory gridIds;
private final DataType gridType;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory encodedPoints,
EvalOperator.ExpressionEvaluator.Factory gridIds, DataType gridType) {
this.source = source;
this.encodedPoints = encodedPoints;
this.gridIds = gridIds;
this.gridType = gridType;
}
@Override
public SpatialDisjointGeoPointDocValuesAndSourceGridEvaluator get(DriverContext context) {
return new SpatialDisjointGeoPointDocValuesAndSourceGridEvaluator(source, encodedPoints.get(context), gridIds.get(context), gridType, context);
}
@Override
public String toString() {
return "SpatialDisjointGeoPointDocValuesAndSourceGridEvaluator[" + "encodedPoints=" + encodedPoints + ", gridIds=" + gridIds + ", gridType=" + gridType + "]";
}
}
}
|
Factory
|
java
|
spring-projects__spring-framework
|
spring-aop/src/main/java/org/springframework/aop/support/DefaultBeanFactoryPointcutAdvisor.java
|
{
"start": 1357,
"end": 1971
}
|
class ____ extends AbstractBeanFactoryPointcutAdvisor {
private Pointcut pointcut = Pointcut.TRUE;
/**
* Specify the pointcut targeting the advice.
* <p>Default is {@code Pointcut.TRUE}.
* @see #setAdviceBeanName
*/
public void setPointcut(@Nullable Pointcut pointcut) {
this.pointcut = (pointcut != null ? pointcut : Pointcut.TRUE);
}
@Override
public Pointcut getPointcut() {
return this.pointcut;
}
@Override
public String toString() {
return getClass().getName() + ": pointcut [" + getPointcut() + "]; advice bean '" + getAdviceBeanName() + "'";
}
}
|
DefaultBeanFactoryPointcutAdvisor
|
java
|
apache__flink
|
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapreduce/HadoopOutputFormatBase.java
|
{
"start": 2201,
"end": 11883
}
|
class ____<K, V, T> extends HadoopOutputFormatCommonBase<T>
implements FinalizeOnMaster {
private static final long serialVersionUID = 1L;
// Mutexes to avoid concurrent operations on Hadoop OutputFormats.
// Hadoop parallelizes tasks across JVMs which is why they might rely on this JVM isolation.
// In contrast, Flink parallelizes using Threads, so multiple Hadoop OutputFormat instances
// might be used in the same JVM.
protected static final Object OPEN_MUTEX = new Object();
protected static final Object CONFIGURE_MUTEX = new Object();
protected static final Object CLOSE_MUTEX = new Object();
protected org.apache.hadoop.conf.Configuration configuration;
protected org.apache.hadoop.mapreduce.OutputFormat<K, V> mapreduceOutputFormat;
protected transient RecordWriter<K, V> recordWriter;
protected transient OutputCommitter outputCommitter;
protected transient TaskAttemptContext context;
protected transient int taskNumber;
public HadoopOutputFormatBase(
org.apache.hadoop.mapreduce.OutputFormat<K, V> mapreduceOutputFormat, Job job) {
super(job.getCredentials());
this.mapreduceOutputFormat = mapreduceOutputFormat;
this.configuration = job.getConfiguration();
HadoopUtils.mergeHadoopConf(configuration);
}
public org.apache.hadoop.conf.Configuration getConfiguration() {
return this.configuration;
}
// --------------------------------------------------------------------------------------------
// OutputFormat
// --------------------------------------------------------------------------------------------
@Override
public void configure(Configuration parameters) {
// enforce sequential configure() calls
synchronized (CONFIGURE_MUTEX) {
if (this.mapreduceOutputFormat instanceof Configurable) {
((Configurable) this.mapreduceOutputFormat).setConf(this.configuration);
}
}
}
/**
* create the temporary output file for hadoop RecordWriter.
*
* @throws java.io.IOException
*/
@Override
public void open(InitializationContext context) throws IOException {
int taskNumber = context.getTaskNumber();
// enforce sequential open() calls
synchronized (OPEN_MUTEX) {
if (Integer.toString(taskNumber + 1).length() > 6) {
throw new IOException("Task id too large.");
}
this.taskNumber = taskNumber + 1;
// for hadoop 2.2
this.configuration.set("mapreduce.output.basename", "tmp");
TaskAttemptID taskAttemptID =
TaskAttemptID.forName(
"attempt__0000_r_"
+ String.format(
"%"
+ (6
- Integer.toString(
taskNumber + 1)
.length())
+ "s",
" ")
.replace(" ", "0")
+ Integer.toString(taskNumber + 1)
+ "_0");
this.configuration.set("mapred.task.id", taskAttemptID.toString());
this.configuration.setInt("mapred.task.partition", taskNumber + 1);
// for hadoop 2.2
this.configuration.set("mapreduce.task.attempt.id", taskAttemptID.toString());
this.configuration.setInt("mapreduce.task.partition", taskNumber + 1);
try {
this.context = new TaskAttemptContextImpl(this.configuration, taskAttemptID);
this.outputCommitter = this.mapreduceOutputFormat.getOutputCommitter(this.context);
this.outputCommitter.setupJob(new JobContextImpl(this.configuration, new JobID()));
} catch (Exception e) {
throw new RuntimeException(e);
}
this.context.getCredentials().addAll(this.credentials);
Credentials currentUserCreds =
getCredentialsFromUGI(UserGroupInformation.getCurrentUser());
if (currentUserCreds != null) {
this.context.getCredentials().addAll(currentUserCreds);
}
// compatible for hadoop 2.2.0, the temporary output directory is different from hadoop
// 1.2.1
if (outputCommitter instanceof FileOutputCommitter) {
this.configuration.set(
"mapreduce.task.output.dir",
((FileOutputCommitter) this.outputCommitter).getWorkPath().toString());
}
try {
this.recordWriter = this.mapreduceOutputFormat.getRecordWriter(this.context);
} catch (InterruptedException e) {
throw new IOException("Could not create RecordWriter.", e);
}
}
}
/**
* commit the task by moving the output file out from the temporary directory.
*
* @throws java.io.IOException
*/
@Override
public void close() throws IOException {
// enforce sequential close() calls
synchronized (CLOSE_MUTEX) {
try {
this.recordWriter.close(this.context);
} catch (InterruptedException e) {
throw new IOException("Could not close RecordReader.", e);
}
if (this.outputCommitter.needsTaskCommit(this.context)) {
this.outputCommitter.commitTask(this.context);
}
Path outputPath = new Path(this.configuration.get("mapred.output.dir"));
// rename tmp-file to final name
FileSystem fs = FileSystem.get(outputPath.toUri(), this.configuration);
String taskNumberStr = Integer.toString(this.taskNumber);
String tmpFileTemplate = "tmp-r-00000";
String tmpFile =
tmpFileTemplate.substring(0, 11 - taskNumberStr.length()) + taskNumberStr;
if (fs.exists(new Path(outputPath.toString() + "/" + tmpFile))) {
fs.rename(
new Path(outputPath.toString() + "/" + tmpFile),
new Path(outputPath.toString() + "/" + taskNumberStr));
}
}
}
@Override
public void finalizeGlobal(FinalizationContext context) throws IOException {
JobContext jobContext;
TaskAttemptContext taskContext;
try {
TaskAttemptID taskAttemptID =
TaskAttemptID.forName(
"attempt__0000_r_"
+ String.format(
"%" + (6 - Integer.toString(1).length()) + "s",
" ")
.replace(" ", "0")
+ Integer.toString(1)
+ "_0");
jobContext = new JobContextImpl(this.configuration, new JobID());
taskContext = new TaskAttemptContextImpl(this.configuration, taskAttemptID);
this.outputCommitter = this.mapreduceOutputFormat.getOutputCommitter(taskContext);
} catch (Exception e) {
throw new RuntimeException(e);
}
jobContext.getCredentials().addAll(this.credentials);
Credentials currentUserCreds = getCredentialsFromUGI(UserGroupInformation.getCurrentUser());
if (currentUserCreds != null) {
jobContext.getCredentials().addAll(currentUserCreds);
}
// finalize HDFS output format
if (this.outputCommitter != null) {
this.outputCommitter.commitJob(jobContext);
}
}
// --------------------------------------------------------------------------------------------
// Custom serialization methods
// --------------------------------------------------------------------------------------------
private void writeObject(ObjectOutputStream out) throws IOException {
super.write(out);
out.writeUTF(this.mapreduceOutputFormat.getClass().getName());
this.configuration.write(out);
}
@SuppressWarnings("unchecked")
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
super.read(in);
String hadoopOutputFormatClassName = in.readUTF();
org.apache.hadoop.conf.Configuration configuration =
new org.apache.hadoop.conf.Configuration();
configuration.readFields(in);
if (this.configuration == null) {
this.configuration = configuration;
}
try {
this.mapreduceOutputFormat =
(org.apache.hadoop.mapreduce.OutputFormat<K, V>)
Class.forName(
hadoopOutputFormatClassName,
true,
Thread.currentThread().getContextClassLoader())
.newInstance();
} catch (Exception e) {
throw new RuntimeException("Unable to instantiate the hadoop output format", e);
}
}
}
|
HadoopOutputFormatBase
|
java
|
apache__avro
|
lang/java/avro/src/test/java/org/apache/avro/io/parsing/SymbolTest.java
|
{
"start": 956,
"end": 2585
}
|
class ____ {
private static final String SCHEMA = "{\"type\":\"record\",\"name\":\"SampleNode\","
+ "\"namespace\":\"org.spf4j.ssdump2.avro\",\n" + " \"fields\":[\n"
+ " {\"name\":\"count\",\"type\":\"int\",\"default\":0},\n" + " {\"name\":\"subNodes\",\"type\":\n"
+ " {\"type\":\"array\",\"items\":{\n" + " \"type\":\"record\",\"name\":\"SamplePair\",\n"
+ " \"fields\":[\n" + " {\"name\":\"method\",\"type\":\n"
+ " {\"type\":\"record\",\"name\":\"Method\",\n" + " \"fields\":[\n"
+ " {\"name\":\"declaringClass\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},\n"
+ " {\"name\":\"methodName\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}\n"
+ " ]}},\n" + " {\"name\":\"node\",\"type\":\"SampleNode\"}]}}}]}";
@Test
void someMethod() throws IOException {
Schema schema = new Schema.Parser().parse(SCHEMA);
Symbol root = new ResolvingGrammarGenerator().generate(schema, schema);
validateNonNull(root, new HashSet<>());
}
private static void validateNonNull(final Symbol symb, Set<Symbol> seen) {
if (seen.contains(symb)) {
return;
} else {
seen.add(symb);
}
if (symb.production != null) {
for (Symbol s : symb.production) {
if (s == null) {
fail("invalid parsing tree should not contain nulls");
} else if (s.kind != Symbol.Kind.ROOT) {
validateNonNull(s, seen);
}
}
}
}
}
|
SymbolTest
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-resource-server/src/main/java/org/springframework/security/oauth2/server/resource/authentication/DPoPAuthenticationToken.java
|
{
"start": 1183,
"end": 3204
}
|
class ____ extends AbstractAuthenticationToken {
@Serial
private static final long serialVersionUID = 5481690438914686216L;
private final String accessToken;
private final String dPoPProof;
private final String method;
private final String resourceUri;
/**
* Constructs a {@code DPoPAuthenticationToken} using the provided parameters.
* @param accessToken the DPoP-bound access token
* @param dPoPProof the DPoP Proof {@link Jwt}
* @param method the value of the HTTP method of the request
* @param resourceUri the value of the HTTP resource URI of the request, without query
* and fragment parts
*/
public DPoPAuthenticationToken(String accessToken, String dPoPProof, String method, String resourceUri) {
super(Collections.emptyList());
Assert.hasText(accessToken, "accessToken cannot be empty");
Assert.hasText(dPoPProof, "dPoPProof cannot be empty");
Assert.hasText(method, "method cannot be empty");
Assert.hasText(resourceUri, "resourceUri cannot be empty");
this.accessToken = accessToken;
this.dPoPProof = dPoPProof;
this.method = method;
this.resourceUri = resourceUri;
}
@Override
public Object getPrincipal() {
return getAccessToken();
}
@Override
public Object getCredentials() {
return getAccessToken();
}
/**
* Returns the DPoP-bound access token.
* @return the DPoP-bound access token
*/
public String getAccessToken() {
return this.accessToken;
}
/**
* Returns the DPoP Proof {@link Jwt}.
* @return the DPoP Proof {@link Jwt}
*/
public String getDPoPProof() {
return this.dPoPProof;
}
/**
* Returns the value of the HTTP method of the request.
* @return the value of the HTTP method of the request
*/
public String getMethod() {
return this.method;
}
/**
* Returns the value of the HTTP resource URI of the request, without query and
* fragment parts.
* @return the value of the HTTP resource URI of the request
*/
public String getResourceUri() {
return this.resourceUri;
}
}
|
DPoPAuthenticationToken
|
java
|
apache__camel
|
components/camel-platform-http-vertx/src/test/java/org/apache/camel/component/platform/http/vertx/model/Pet.java
|
{
"start": 2361,
"end": 2579
}
|
enum ____ {
AVAILABLE,
PENDING,
SOLD;
@JsonCreator
public static Status fromString(String status) {
return Status.valueOf(status.toUpperCase());
}
}
}
|
Status
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/lazyload_proxyfactory_comparison/DefaultLazyTest.java
|
{
"start": 721,
"end": 850
}
|
class ____ extends AbstractLazyTest {
@Override
protected String getConfiguration() {
return "default";
}
}
|
DefaultLazyTest
|
java
|
spring-projects__spring-boot
|
module/spring-boot-liquibase/src/main/java/org/springframework/boot/liquibase/autoconfigure/LiquibaseSchemaManagementProvider.java
|
{
"start": 1174,
"end": 1787
}
|
class ____ implements SchemaManagementProvider {
private final Iterable<SpringLiquibase> liquibaseInstances;
LiquibaseSchemaManagementProvider(ObjectProvider<SpringLiquibase> liquibases) {
this.liquibaseInstances = liquibases;
}
@Override
public SchemaManagement getSchemaManagement(DataSource dataSource) {
return StreamSupport.stream(this.liquibaseInstances.spliterator(), false)
.map(SpringLiquibase::getDataSource)
.filter(dataSource::equals)
.findFirst()
.map((managedDataSource) -> SchemaManagement.MANAGED)
.orElse(SchemaManagement.UNMANAGED);
}
}
|
LiquibaseSchemaManagementProvider
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/main/java/org/springframework/jdbc/core/JdbcTemplate.java
|
{
"start": 17720,
"end": 18678
}
|
class ____ implements StatementCallback<T>, SqlProvider {
@Override
public T doInStatement(Statement stmt) throws SQLException {
ResultSet rs = null;
try {
rs = stmt.executeQuery(sql);
return rse.extractData(rs);
}
finally {
JdbcUtils.closeResultSet(rs);
}
}
@Override
public String getSql() {
return sql;
}
}
return execute(new QueryStatementCallback(), true);
}
@Override
public void query(String sql, RowCallbackHandler rch) throws DataAccessException {
query(sql, new RowCallbackHandlerResultSetExtractor(rch, this.maxRows));
}
@Override
public <T extends @Nullable Object> List<T> query(String sql, RowMapper<T> rowMapper) throws DataAccessException {
return result(query(sql, new RowMapperResultSetExtractor<>(rowMapper, 0, this.maxRows)));
}
@Override
public <T> Stream<T> queryForStream(String sql, RowMapper<T> rowMapper) throws DataAccessException {
|
QueryStatementCallback
|
java
|
alibaba__fastjson
|
src/main/java/com/alibaba/fastjson/asm/TypeCollector.java
|
{
"start": 170,
"end": 3118
}
|
class ____ {
private static String JSONType = ASMUtils.desc(com.alibaba.fastjson.annotation.JSONType.class);
private static final Map<String, String> primitives = new HashMap<String, String>() {
{
put("int","I");
put("boolean","Z");
put("byte", "B");
put("char","C");
put("short","S");
put("float","F");
put("long","J");
put("double","D");
}
};
private final String methodName;
private final Class<?>[] parameterTypes;
protected MethodCollector collector;
protected boolean jsonType;
public TypeCollector(String methodName, Class<?>[] parameterTypes) {
this.methodName = methodName;
this.parameterTypes = parameterTypes;
this.collector = null;
}
protected MethodCollector visitMethod(int access, String name, String desc) {
if (collector != null) {
return null;
}
if (!name.equals(methodName)) {
return null;
}
Type[] argTypes = Type.getArgumentTypes(desc);
int longOrDoubleQuantity = 0;
for (Type t : argTypes) {
String className = t.getClassName();
if (className.equals("long") || className.equals("double")) {
longOrDoubleQuantity++;
}
}
if (argTypes.length != this.parameterTypes.length) {
return null;
}
for (int i = 0; i < argTypes.length; i++) {
if (!correctTypeName(argTypes[i], this.parameterTypes[i].getName())) {
return null;
}
}
return collector = new MethodCollector(
Modifier.isStatic(access) ? 0 : 1,
argTypes.length + longOrDoubleQuantity);
}
public void visitAnnotation(String desc) {
if (JSONType.equals(desc)) {
jsonType = true;
}
}
private boolean correctTypeName(Type type, String paramTypeName) {
String s = type.getClassName();
// array notation needs cleanup.
StringBuilder braces = new StringBuilder();
while (s.endsWith("[]")) {
braces.append('[');
s = s.substring(0, s.length() - 2);
}
if (braces.length() != 0) {
if (primitives.containsKey(s)) {
s = braces.append(primitives.get(s)).toString();
} else {
s = braces.append('L').append(s).append(';').toString();
}
}
return s.equals(paramTypeName);
}
public String[] getParameterNamesForMethod() {
if (collector == null || !collector.debugInfoPresent) {
return new String[0];
}
return collector.getResult().split(",");
}
public boolean matched() {
return collector != null;
}
public boolean hasJsonType() {
return jsonType;
}
}
|
TypeCollector
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/checkreturnvalue/BuilderReturnThisTest.java
|
{
"start": 2819,
"end": 3271
}
|
class ____ {
@CheckReturnValue
TestBuilder setBar(String bar) {
return new TestBuilder();
}
@CheckReturnValue
TestBuilder setTernary(String baz) {
return true ? new TestBuilder() : this;
}
}
}
""")
.doTest(BugCheckerRefactoringTestHelper.TestMode.TEXT_MATCH);
}
}
|
TestBuilder
|
java
|
mybatis__mybatis-3
|
src/main/java/org/apache/ibatis/executor/loader/cglib/CglibProxyFactory.java
|
{
"start": 1840,
"end": 4314
}
|
class ____ implements ProxyFactory {
private static final String FINALIZE_METHOD = "finalize";
private static final String WRITE_REPLACE_METHOD = "writeReplace";
public CglibProxyFactory() {
try {
Resources.classForName("net.sf.cglib.proxy.Enhancer");
} catch (Throwable e) {
throw new IllegalStateException(
"Cannot enable lazy loading because CGLIB is not available. Add CGLIB to your classpath.", e);
}
}
@Override
public Object createProxy(Object target, ResultLoaderMap lazyLoader, Configuration configuration,
ObjectFactory objectFactory, List<Class<?>> constructorArgTypes, List<Object> constructorArgs) {
return EnhancedResultObjectProxyImpl.createProxy(target, lazyLoader, configuration, objectFactory,
constructorArgTypes, constructorArgs);
}
public Object createDeserializationProxy(Object target, Map<String, ResultLoaderMap.LoadPair> unloadedProperties,
ObjectFactory objectFactory, List<Class<?>> constructorArgTypes, List<Object> constructorArgs) {
return EnhancedDeserializationProxyImpl.createProxy(target, unloadedProperties, objectFactory, constructorArgTypes,
constructorArgs);
}
static Object createStaticProxy(Class<?> type, Callback callback, List<Class<?>> constructorArgTypes,
List<Object> constructorArgs) {
LogHolder.log.warn("CglibProxyFactory is deprecated. Use another proxy factory implementation.");
Enhancer enhancer = new Enhancer();
enhancer.setCallback(callback);
enhancer.setSuperclass(type);
try {
type.getDeclaredMethod(WRITE_REPLACE_METHOD);
// ObjectOutputStream will call writeReplace of objects returned by writeReplace
if (LogHolder.log.isDebugEnabled()) {
LogHolder.log.debug(WRITE_REPLACE_METHOD + " method was found on bean " + type + ", make sure it returns this");
}
} catch (NoSuchMethodException e) {
enhancer.setInterfaces(new Class[] { WriteReplaceInterface.class });
} catch (SecurityException e) {
// nothing to do here
}
Object enhanced;
if (constructorArgTypes.isEmpty()) {
enhanced = enhancer.create();
} else {
Class<?>[] typesArray = constructorArgTypes.toArray(new Class[constructorArgTypes.size()]);
Object[] valuesArray = constructorArgs.toArray(new Object[constructorArgs.size()]);
enhanced = enhancer.create(typesArray, valuesArray);
}
return enhanced;
}
private static
|
CglibProxyFactory
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ProducerConsumer.java
|
{
"start": 4977,
"end": 6539
}
|
class ____ implements Runnable {
private WorkRequestProcessor<T, R> processor;
/**
* Constructor.
* @param processor is used to process an item from input queue.
*/
public Worker(WorkRequestProcessor<T, R> processor) {
this.processor = processor;
}
/**
* The worker continuously gets an item from input queue, process it and
* then put the processed result into output queue. It waits to get an item
* from input queue if there's none.
*/
public void run() {
while (true) {
WorkRequest<T> work;
try {
work = inputQueue.take();
} catch (InterruptedException e) {
// It is assumed that if an interrupt occurs while taking a work
// out from input queue, the interrupt is likely triggered by
// ProducerConsumer.shutdown(). Therefore, exit the thread.
LOG.debug("Interrupted while waiting for requests from inputQueue.");
return;
}
boolean isDone = false;
while (!isDone) {
try {
// if the interrupt happens while the work is being processed,
// go back to process the same work again.
WorkReport<R> result = processor.processItem(work);
outputQueue.put(result);
isDone = true;
} catch (InterruptedException ie) {
LOG.debug("Worker thread was interrupted while processing an item,"
+ " or putting into outputQueue. Retrying...");
}
}
}
}
}
}
|
Worker
|
java
|
apache__camel
|
core/camel-management-api/src/main/java/org/apache/camel/api/management/ManagedAttribute.java
|
{
"start": 1263,
"end": 1368
}
|
interface ____ {
String description() default "";
boolean mask() default false;
}
|
ManagedAttribute
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/MonoTimeout.java
|
{
"start": 1411,
"end": 2785
}
|
class ____<T, U, V> extends InternalMonoOperator<T, T> {
final Publisher<U> firstTimeout;
final @Nullable Publisher<? extends T> other;
final @Nullable String timeoutDescription; //only useful when no `other`
@SuppressWarnings("rawtypes")
final static Function NEVER = e -> Flux.never();
MonoTimeout(Mono<? extends T> source,
Publisher<U> firstTimeout,
String timeoutDescription) {
super(source);
this.firstTimeout = Mono.fromDirect(Objects.requireNonNull(firstTimeout, "firstTimeout"));
this.other = null;
this.timeoutDescription = timeoutDescription;
}
MonoTimeout(Mono<? extends T> source,
Publisher<U> firstTimeout,
Publisher<? extends T> other) {
super(source);
this.firstTimeout = Mono.fromDirect(Objects.requireNonNull(firstTimeout, "firstTimeout"));
this.other = Mono.fromDirect(Objects.requireNonNull(other, "other"));
this.timeoutDescription = null;
}
@Override
@SuppressWarnings("unchecked")
public CoreSubscriber<? super T> subscribeOrReturn(CoreSubscriber<? super T> actual) {
return new FluxTimeout.TimeoutMainSubscriber<T, T>(
actual,
firstTimeout,
NEVER,
other,
addNameToTimeoutDescription(source, timeoutDescription)
);
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return super.scanUnsafe(key);
}
}
|
MonoTimeout
|
java
|
spring-projects__spring-boot
|
module/spring-boot-security/src/main/java/org/springframework/boot/security/autoconfigure/web/servlet/PathRequest.java
|
{
"start": 2373,
"end": 3387
}
|
class ____ extends ApplicationContextRequestMatcher<ApplicationContext> {
private volatile @Nullable RequestMatcher delegate;
private H2ConsoleRequestMatcher() {
super(ApplicationContext.class);
}
@Override
protected boolean ignoreApplicationContext(WebApplicationContext applicationContext) {
return WebServerApplicationContext.hasServerNamespace(applicationContext, "management");
}
@Override
protected void initialized(Supplier<ApplicationContext> context) {
String path = context.get().getBean(H2ConsoleProperties.class).getPath();
Assert.hasText(path, "'path' in H2ConsoleProperties must not be empty");
this.delegate = PathPatternRequestMatcher.withDefaults().matcher(path + "/**");
}
@Override
protected boolean matches(HttpServletRequest request, Supplier<ApplicationContext> context) {
RequestMatcher delegate = this.delegate;
Assert.state(delegate != null, "'delegate' must not be null");
return delegate.matches(request);
}
}
}
|
H2ConsoleRequestMatcher
|
java
|
spring-projects__spring-framework
|
spring-websocket/src/test/java/org/springframework/web/socket/config/annotation/WebSocketHandlerRegistrationTests.java
|
{
"start": 1836,
"end": 8281
}
|
class ____ {
private TestWebSocketHandlerRegistration registration = new TestWebSocketHandlerRegistration();
private TaskScheduler taskScheduler = mock();
@Test
void minimal() {
WebSocketHandler handler = new TextWebSocketHandler();
this.registration.addHandler(handler, "/foo", "/bar");
List<Mapping> mappings = this.registration.getMappings();
assertThat(mappings).hasSize(2);
Mapping m1 = mappings.get(0);
assertThat(m1.webSocketHandler).isEqualTo(handler);
assertThat(m1.path).isEqualTo("/foo");
assertThat(m1.interceptors).isNotNull();
assertThat(m1.interceptors).hasSize(1);
assertThat(m1.interceptors[0].getClass()).isEqualTo(OriginHandshakeInterceptor.class);
Mapping m2 = mappings.get(1);
assertThat(m2.webSocketHandler).isEqualTo(handler);
assertThat(m2.path).isEqualTo("/bar");
assertThat(m2.interceptors).isNotNull();
assertThat(m2.interceptors).hasSize(1);
assertThat(m2.interceptors[0].getClass()).isEqualTo(OriginHandshakeInterceptor.class);
}
@Test
void interceptors() {
WebSocketHandler handler = new TextWebSocketHandler();
HttpSessionHandshakeInterceptor interceptor = new HttpSessionHandshakeInterceptor();
this.registration.addHandler(handler, "/foo").addInterceptors(interceptor);
List<Mapping> mappings = this.registration.getMappings();
assertThat(mappings).hasSize(1);
Mapping mapping = mappings.get(0);
assertThat(mapping.webSocketHandler).isEqualTo(handler);
assertThat(mapping.path).isEqualTo("/foo");
assertThat(mapping.interceptors).isNotNull();
assertThat(mapping.interceptors).hasSize(2);
assertThat(mapping.interceptors[0]).isEqualTo(interceptor);
assertThat(mapping.interceptors[1].getClass()).isEqualTo(OriginHandshakeInterceptor.class);
}
@Test
void emptyAllowedOrigin() {
WebSocketHandler handler = new TextWebSocketHandler();
HttpSessionHandshakeInterceptor interceptor = new HttpSessionHandshakeInterceptor();
this.registration.addHandler(handler, "/foo").addInterceptors(interceptor).setAllowedOrigins();
List<Mapping> mappings = this.registration.getMappings();
assertThat(mappings).hasSize(1);
Mapping mapping = mappings.get(0);
assertThat(mapping.webSocketHandler).isEqualTo(handler);
assertThat(mapping.path).isEqualTo("/foo");
assertThat(mapping.interceptors).isNotNull();
assertThat(mapping.interceptors).hasSize(2);
assertThat(mapping.interceptors[0]).isEqualTo(interceptor);
assertThat(mapping.interceptors[1].getClass()).isEqualTo(OriginHandshakeInterceptor.class);
}
@Test
void interceptorsWithAllowedOrigins() {
WebSocketHandler handler = new TextWebSocketHandler();
HttpSessionHandshakeInterceptor interceptor = new HttpSessionHandshakeInterceptor();
this.registration.addHandler(handler, "/foo")
.addInterceptors(interceptor)
.setAllowedOrigins("https://mydomain1.example")
.setAllowedOriginPatterns("https://*.abc.com");
List<Mapping> mappings = this.registration.getMappings();
assertThat(mappings).hasSize(1);
Mapping mapping = mappings.get(0);
assertThat(mapping.webSocketHandler).isEqualTo(handler);
assertThat(mapping.path).isEqualTo("/foo");
assertThat(mapping.interceptors).isNotNull();
assertThat(mapping.interceptors).hasSize(2);
assertThat(mapping.interceptors[0]).isEqualTo(interceptor);
OriginHandshakeInterceptor originInterceptor = (OriginHandshakeInterceptor) mapping.interceptors[1];
assertThat(originInterceptor.getAllowedOrigins()).containsExactly("https://mydomain1.example");
assertThat(originInterceptor.getAllowedOriginPatterns()).containsExactly("https://*.abc.com");
}
@Test
void interceptorsPassedToSockJsRegistration() {
WebSocketHandler handler = new TextWebSocketHandler();
HttpSessionHandshakeInterceptor interceptor = new HttpSessionHandshakeInterceptor();
this.registration.addHandler(handler, "/foo")
.addInterceptors(interceptor)
.setAllowedOrigins("https://mydomain1.example")
.setAllowedOriginPatterns("https://*.abc.com")
.withSockJS();
this.registration.getSockJsServiceRegistration().setTaskScheduler(this.taskScheduler);
List<Mapping> mappings = this.registration.getMappings();
assertThat(mappings).hasSize(1);
Mapping mapping = mappings.get(0);
assertThat(mapping.webSocketHandler).isEqualTo(handler);
assertThat(mapping.path).isEqualTo("/foo/**");
assertThat(mapping.sockJsService).isNotNull();
assertThat(mapping.sockJsService.getAllowedOrigins()).contains("https://mydomain1.example");
List<HandshakeInterceptor> interceptors = mapping.sockJsService.getHandshakeInterceptors();
assertThat(interceptors.get(0)).isEqualTo(interceptor);
OriginHandshakeInterceptor originInterceptor = (OriginHandshakeInterceptor) interceptors.get(1);
assertThat(originInterceptor.getAllowedOrigins()).containsExactly("https://mydomain1.example");
assertThat(originInterceptor.getAllowedOriginPatterns()).containsExactly("https://*.abc.com");
}
@Test
void handshakeHandler() {
WebSocketHandler handler = new TextWebSocketHandler();
HandshakeHandler handshakeHandler = new DefaultHandshakeHandler();
this.registration.addHandler(handler, "/foo").setHandshakeHandler(handshakeHandler);
List<Mapping> mappings = this.registration.getMappings();
assertThat(mappings).hasSize(1);
Mapping mapping = mappings.get(0);
assertThat(mapping.webSocketHandler).isEqualTo(handler);
assertThat(mapping.path).isEqualTo("/foo");
assertThat(mapping.handshakeHandler).isSameAs(handshakeHandler);
}
@Test
void handshakeHandlerPassedToSockJsRegistration() {
WebSocketHandler handler = new TextWebSocketHandler();
HandshakeHandler handshakeHandler = new DefaultHandshakeHandler();
this.registration.addHandler(handler, "/foo").setHandshakeHandler(handshakeHandler).withSockJS();
this.registration.getSockJsServiceRegistration().setTaskScheduler(this.taskScheduler);
List<Mapping> mappings = this.registration.getMappings();
assertThat(mappings).hasSize(1);
Mapping mapping = mappings.get(0);
assertThat(mapping.webSocketHandler).isEqualTo(handler);
assertThat(mapping.path).isEqualTo("/foo/**");
assertThat(mapping.sockJsService).isNotNull();
WebSocketTransportHandler transportHandler =
(WebSocketTransportHandler) mapping.sockJsService.getTransportHandlers().get(TransportType.WEBSOCKET);
assertThat(transportHandler.getHandshakeHandler()).isSameAs(handshakeHandler);
}
private static
|
WebSocketHandlerRegistrationTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/support/EmailServer.java
|
{
"start": 1355,
"end": 5081
}
|
class ____ {
public static final String USERNAME = "_user";
public static final String PASSWORD = "_passwd";
private final List<Listener> listeners = new CopyOnWriteArrayList<>();
private final SMTPServer server;
public EmailServer(String host, @Nullable SSLContext sslContext, final Logger logger) {
final SimpleMessageListenerAdapter listener = new SimpleMessageListenerAdapter(new SimpleMessageListener() {
@Override
public boolean accept(String from, String recipient) {
return true;
}
@Override
public void deliver(String from, String recipient, InputStream data) throws IOException {
try {
Session session = Session.getInstance(new Properties());
MimeMessage msg = new MimeMessage(session, data);
for (Listener listener1 : listeners) {
try {
listener1.on(msg);
} catch (Exception e) {
logger.error("Unexpected failure", e);
fail(e.getMessage());
}
}
} catch (MessagingException me) {
throw new RuntimeException("could not create mime message", me);
}
}
});
final EasyAuthenticationHandlerFactory authentication = new EasyAuthenticationHandlerFactory((user, passwd) -> {
assertThat(user, is(USERNAME));
assertThat(passwd, is(PASSWORD));
});
server = new SMTPServer(listener, authentication) {
@Override
public SSLSocket createSSLSocket(Socket socket) throws IOException {
if (sslContext == null) {
return super.createSSLSocket(socket);
} else {
SSLSocketFactory factory = sslContext.getSocketFactory();
InetSocketAddress remoteAddress = (InetSocketAddress) socket.getRemoteSocketAddress();
SSLSocket sslSocket = (SSLSocket) factory.createSocket(socket, remoteAddress.getHostString(), socket.getPort(), true);
sslSocket.setUseClientMode(false);
sslSocket.setEnabledCipherSuites(sslSocket.getSupportedCipherSuites());
return sslSocket;
}
}
};
server.setHostName(host);
server.setPort(0);
if (sslContext != null) {
server.setEnableTLS(true);
server.setRequireTLS(true);
server.setHideTLS(false);
}
}
/**
* @return the port that the underlying server is listening on
*/
public int port() {
return server.getPort();
}
public void start() {
// Must have privileged access because underlying server will accept socket connections
AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
server.start();
return null;
});
}
public void stop() {
server.stop();
listeners.clear();
}
public void addListener(Listener listener) {
listeners.add(listener);
}
public void clearListeners() {
this.listeners.clear();
}
public static EmailServer localhost(final Logger logger) {
return localhost(logger, null);
}
public static EmailServer localhost(final Logger logger, @Nullable SSLContext sslContext) {
EmailServer server = new EmailServer("localhost", sslContext, logger);
server.start();
return server;
}
@FunctionalInterface
public
|
EmailServer
|
java
|
elastic__elasticsearch
|
x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java
|
{
"start": 16901,
"end": 17812
}
|
class ____ implements AggValueExtractor {
@Override
public Object value(Aggregation agg, Map<String, String> fieldTypeMap, String lookupFieldPrefix) {
Percentiles aggregation = (Percentiles) agg;
Map<String, Double> percentiles = new LinkedHashMap<>();
for (Percentile p : aggregation) {
// in case of sparse data percentiles might not have data, in this case it returns NaN,
// we need to guard the output and set null in this case
if (Numbers.isValidDouble(p.value()) == false) {
percentiles.put(OutputFieldNameConverter.fromDouble(p.percent()), null);
} else {
percentiles.put(OutputFieldNameConverter.fromDouble(p.percent()), p.value());
}
}
return percentiles;
}
}
static
|
PercentilesAggExtractor
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/SelectOnlyArrayPropertyTest.java
|
{
"start": 832,
"end": 2554
}
|
class ____ {
@Test
@JiraKey("HHH-16606")
public void criteriaSelectOnlyIntArray(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
final byte[] result = "Hello, World!".getBytes();
EntityWithIdAndIntegerArray myEntity = new EntityWithIdAndIntegerArray( 1, result );
entityManager.persist( myEntity );
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery<byte[]> cq = cb.createQuery( byte[].class );
Root<EntityWithIdAndIntegerArray> root = cq.from( EntityWithIdAndIntegerArray.class );
cq.select( root.get( "bytes" ) )
.where( cb.equal( root.get( "id" ), 1 ) );
TypedQuery<byte[]> q = entityManager.createQuery( cq );
byte[] bytes = q.getSingleResult();
assertArrayEquals( result, bytes );
} );
}
@Test
public void criteriaSelectWrappedIntArray(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
final byte[] result = "Hi there!".getBytes();
EntityWithIdAndIntegerArray myEntity = new EntityWithIdAndIntegerArray( 2, result );
entityManager.persist( myEntity );
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery<Object[]> cq = cb.createQuery( Object[].class );
Root<EntityWithIdAndIntegerArray> root = cq.from( EntityWithIdAndIntegerArray.class );
cq.select( root.get( "bytes" ) )
.where( cb.equal( root.get( "id" ), 2 ) );
TypedQuery<Object[]> q = entityManager.createQuery( cq );
final Object[] objects = q.getSingleResult();
assertEquals( 1, objects.length );
byte[] bytes = (byte[]) objects[0];
assertArrayEquals( result, bytes );
} );
}
@Entity(name = "EntityWithIdAndIntegerArray")
public static
|
SelectOnlyArrayPropertyTest
|
java
|
playframework__playframework
|
web/play-java-forms/src/main/java/play/data/format/Formats.java
|
{
"start": 3759,
"end": 5862
}
|
class ____
extends Formatters.AnnotationFormatter<DateTime, Date> {
private final MessagesApi messagesApi;
/**
* Creates an annotation date formatter.
*
* @param messagesApi messages to look up the pattern
*/
public AnnotationDateFormatter(MessagesApi messagesApi) {
this.messagesApi = messagesApi;
}
/**
* Binds the field - constructs a concrete value from submitted data.
*
* @param annotation the annotation that triggered this formatter
* @param text the field text
* @param locale the current <code>Locale</code>
* @return a new value
*/
public Date parse(DateTime annotation, String text, Locale locale)
throws java.text.ParseException {
if (text == null || text.trim().isEmpty()) {
return null;
}
Lang lang = new Lang(locale);
SimpleDateFormat sdf =
new SimpleDateFormat(
Optional.ofNullable(this.messagesApi)
.map(messages -> messages.get(lang, annotation.pattern()))
.orElse(annotation.pattern()),
locale);
sdf.setLenient(false);
return sdf.parse(text);
}
/**
* Unbinds this field - converts a concrete value to plain string
*
* @param annotation the annotation that triggered this formatter
* @param value the value to unbind
* @param locale the current <code>Locale</code>
* @return printable version of the value
*/
public String print(DateTime annotation, Date value, Locale locale) {
if (value == null) {
return "";
}
Lang lang = new Lang(locale);
return new SimpleDateFormat(
Optional.ofNullable(this.messagesApi)
.map(messages -> messages.get(lang, annotation.pattern()))
.orElse(annotation.pattern()),
locale)
.format(value);
}
}
// -- STRING
/** Defines the format for a <code>String</code> field that cannot be empty. */
@Target({FIELD})
@Retention(RUNTIME)
public static @
|
AnnotationDateFormatter
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/collections/custom/declaredtype/HeadList.java
|
{
"start": 435,
"end": 574
}
|
class ____<X> extends ArrayList<X> implements IHeadList<X> {
@Override
public X head() {
return isEmpty() ? null : get( 0 );
}
}
|
HeadList
|
java
|
spring-projects__spring-boot
|
module/spring-boot-micrometer-tracing/src/test/java/org/springframework/boot/micrometer/tracing/autoconfigure/MicrometerTracingAutoConfigurationTests.java
|
{
"start": 11788,
"end": 12077
}
|
class ____ {
@Bean
SpanTagAnnotationHandler spanTagAnnotationHandler() {
return new SpanTagAnnotationHandler((valueResolverClass) -> mock(ValueResolver.class),
(valueExpressionResolverClass) -> mock(ValueExpressionResolver.class));
}
}
}
|
SpanTagAnnotationHandlerConfiguration
|
java
|
quarkusio__quarkus
|
extensions/quartz/deployment/src/test/java/io/quarkus/quartz/test/composite/CompositeSchedulerNotUsedTest.java
|
{
"start": 336,
"end": 943
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot(root -> root
.addClasses(Jobs.class))
.assertException(t -> {
assertThat(t).cause().isInstanceOf(IllegalStateException.class)
.hasMessageContaining(
"The required scheduler implementation is not available because the composite scheduler is not used: SIMPLE");
});
@Test
public void test() {
fail();
}
static
|
CompositeSchedulerNotUsedTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/mapping/Value.java
|
{
"start": 1149,
"end": 5896
}
|
interface ____ extends Serializable {
/**
* The number of columns and formulas in the mapping.
*/
int getColumnSpan();
/**
* The mapping to columns and formulas.
*/
List<Selectable> getSelectables();
/**
* If the mapping involves only columns, return them.
*
* @throws org.hibernate.AssertionFailure if the mapping involves formulas
*/
List<Column> getColumns();
/**
* Same as {@link #getSelectables()} except it returns the PK for the
* non-owning side of a one-to-one association.
*/
default List<Selectable> getVirtualSelectables() {
return getSelectables();
}
/**
* Same as {@link #getColumns()} except it returns the PK for the
* non-owning side of a one-to-one association.
*
* @throws org.hibernate.AssertionFailure if the mapping involves formulas
*/
default List<Column> getConstraintColumns() {
return getColumns();
}
Type getType() throws MappingException;
@Incubating
default JdbcMapping getSelectableType(MappingContext mappingContext, int index) throws MappingException {
return getType( mappingContext, getType(), index );
}
private JdbcMapping getType(MappingContext factory, Type elementType, int index) {
if ( elementType instanceof CompositeType compositeType ) {
final Type[] subtypes = compositeType.getSubtypes();
for ( int i = 0; i < subtypes.length; i++ ) {
final Type subtype = subtypes[i];
final int columnSpan =
subtype instanceof EntityType entityType
? getIdType( entityType ).getColumnSpan( factory )
: subtype.getColumnSpan( factory );
if ( columnSpan < index ) {
index -= columnSpan;
}
else if ( columnSpan != 0 ) {
return getType( factory, subtype, index );
}
}
// Should never happen
throw new IllegalStateException( "Type index is past the types column span!" );
}
else if ( elementType instanceof EntityType entityType ) {
return getType( factory, getIdType( entityType ), index );
}
else if ( elementType instanceof MetaType metaType ) {
return (JdbcMapping) metaType.getBaseType();
}
else {
return (JdbcMapping) elementType;
}
}
private Type getIdType(EntityType entityType) {
final PersistentClass entityBinding =
getBuildingContext().getMetadataCollector()
.getEntityBinding( entityType.getAssociatedEntityName() );
return entityType.isReferenceToPrimaryKey()
? entityBinding.getIdentifier().getType()
: entityBinding.getProperty( entityType.getRHSUniqueKeyPropertyName() ).getType();
}
FetchMode getFetchMode();
Table getTable();
boolean hasFormula();
boolean isAlternateUniqueKey();
boolean isPartitionKey();
boolean isNullable();
void createForeignKey();
// called when this is the foreign key of a
// @OneToOne with a FK, or a @OneToMany with
// a join table
void createUniqueKey(MetadataBuildingContext context);
boolean isSimpleValue();
boolean isValid(MappingContext mappingContext) throws MappingException;
void setTypeUsingReflection(String className, String propertyName) throws MappingException;
Object accept(ValueVisitor visitor);
boolean isSame(Value other);
boolean[] getColumnInsertability();
boolean hasAnyInsertableColumns();
boolean[] getColumnUpdateability();
boolean hasAnyUpdatableColumns();
@Incubating
default MetadataBuildingContext getBuildingContext() {
throw new UnsupportedOperationException( "Value#getBuildingContext is not implemented by: " + getClass().getName() );
}
ServiceRegistry getServiceRegistry();
Value copy();
boolean isColumnInsertable(int index);
boolean isColumnUpdateable(int index);
@Incubating
default String getExtraCreateTableInfo() {
return "";
}
/**
* Checks if this value contains any duplicate column. A column
* is considered duplicate when its {@link Column#getName() name} is
* already contained in the {@code distinctColumn} set.
* <p>
* If a duplicate column is found, a {@link MappingException} is thrown.
*
* @param distinctColumns set containing the names of the columns to check
* @param owner the owner of this value, used just for error reporting
*/
@Internal
default void checkColumnDuplication(Set<String> distinctColumns, String owner) {
for ( int i = 0; i < getSelectables().size(); i++ ) {
final Selectable selectable = getSelectables().get( i );
if ( isColumnInsertable( i ) || isColumnUpdateable( i ) ) {
final Column col = (Column) selectable;
if ( !distinctColumns.add( col.getName() ) ) {
throw new MappingException(
"Column '" + col.getName()
+ "' is duplicated in mapping for " + owner
+ " (use '@Column(insertable=false, updatable=false)' when mapping multiple properties to the same column)"
);
}
}
}
}
}
|
Value
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableTransformValues.java
|
{
"start": 6533,
"end": 9435
}
|
class ____ implements KTableValueGetter<K, VOut> {
private final KTableValueGetter<K, V> parentGetter;
private InternalProcessorContext<?, ?> internalProcessorContext;
private final ValueTransformerWithKey<? super K, ? super V, ? extends VOut> valueTransformer;
KTableTransformValuesGetter(final KTableValueGetter<K, V> parentGetter,
final ValueTransformerWithKey<? super K, ? super V, ? extends VOut> valueTransformer) {
this.parentGetter = Objects.requireNonNull(parentGetter, "parentGetter");
this.valueTransformer = Objects.requireNonNull(valueTransformer, "valueTransformer");
}
@Override
public void init(final ProcessorContext<?, ?> context) {
internalProcessorContext = (InternalProcessorContext<?, ?>) context;
parentGetter.init(context);
valueTransformer.init(new ForwardingDisabledProcessorContext(internalProcessorContext));
}
@Override
public ValueAndTimestamp<VOut> get(final K key) {
return transformValue(key, parentGetter.get(key));
}
@Override
public ValueAndTimestamp<VOut> get(final K key, final long asOfTimestamp) {
return transformValue(key, parentGetter.get(key, asOfTimestamp));
}
@Override
public boolean isVersioned() {
return parentGetter.isVersioned();
}
@Override
public void close() {
parentGetter.close();
valueTransformer.close();
}
private ValueAndTimestamp<VOut> transformValue(final K key, final ValueAndTimestamp<V> valueAndTimestamp) {
final ProcessorRecordContext currentContext = internalProcessorContext.recordContext();
internalProcessorContext.setRecordContext(new ProcessorRecordContext(
valueAndTimestamp == null ? UNKNOWN : valueAndTimestamp.timestamp(),
-1L, // we don't know the original offset
// technically, we know the partition, but in the new `api.Processor` class,
// we move to `RecordMetadata` than would be `null` for this case and thus
// we won't have the partition information, so it's better to not provide it
// here either, to not introduce a regression later on
-1,
null, // we don't know the upstream input topic
new RecordHeaders()
));
final ValueAndTimestamp<VOut> result = ValueAndTimestamp.make(
valueTransformer.transform(key, getValueOrNull(valueAndTimestamp)),
valueAndTimestamp == null ? UNKNOWN : valueAndTimestamp.timestamp());
internalProcessorContext.setRecordContext(currentContext);
return result;
}
}
}
|
KTableTransformValuesGetter
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/event/EventListenerMethodProcessor.java
|
{
"start": 5078,
"end": 8175
}
|
class ____ bean with name '" + beanName + "'", ex);
}
}
if (type != null) {
if (ScopedObject.class.isAssignableFrom(type)) {
try {
Class<?> targetClass = AutoProxyUtils.determineTargetClass(
beanFactory, ScopedProxyUtils.getTargetBeanName(beanName));
if (targetClass != null) {
type = targetClass;
}
}
catch (Throwable ex) {
// An invalid scoped proxy arrangement - let's ignore it.
if (logger.isDebugEnabled()) {
logger.debug("Could not resolve target bean for scoped proxy '" + beanName + "'", ex);
}
}
}
try {
processBean(beanName, type);
}
catch (Throwable ex) {
throw new BeanInitializationException("Failed to process @EventListener " +
"annotation on bean with name '" + beanName + "': " + ex.getMessage(), ex);
}
}
}
}
}
private void processBean(final String beanName, final Class<?> targetType) {
if (!this.nonAnnotatedClasses.contains(targetType) &&
AnnotationUtils.isCandidateClass(targetType, EventListener.class) &&
!isSpringContainerClass(targetType)) {
Map<Method, EventListener> annotatedMethods = null;
try {
annotatedMethods = MethodIntrospector.selectMethods(targetType,
(MethodIntrospector.MetadataLookup<EventListener>) method ->
AnnotatedElementUtils.findMergedAnnotation(method, EventListener.class));
}
catch (Throwable ex) {
// An unresolvable type in a method signature, probably from a lazy bean - let's ignore it.
if (logger.isDebugEnabled()) {
logger.debug("Could not resolve methods for bean with name '" + beanName + "'", ex);
}
}
if (CollectionUtils.isEmpty(annotatedMethods)) {
this.nonAnnotatedClasses.add(targetType);
if (logger.isTraceEnabled()) {
logger.trace("No @EventListener annotations found on bean class: " + targetType.getName());
}
}
else {
// Non-empty set of methods
ConfigurableApplicationContext context = this.applicationContext;
Assert.state(context != null, "No ApplicationContext set");
List<EventListenerFactory> factories = this.eventListenerFactories;
Assert.state(factories != null, "EventListenerFactory List not initialized");
for (Method method : annotatedMethods.keySet()) {
for (EventListenerFactory factory : factories) {
if (factory.supportsMethod(method)) {
Method methodToUse = AopUtils.selectInvocableMethod(method, context.getType(beanName));
ApplicationListener<?> applicationListener =
factory.createApplicationListener(beanName, targetType, methodToUse);
if (applicationListener instanceof ApplicationListenerMethodAdapter alma) {
alma.init(context, this.evaluator);
}
context.addApplicationListener(applicationListener);
break;
}
}
}
if (logger.isDebugEnabled()) {
logger.debug(annotatedMethods.size() + " @EventListener methods processed on bean '" +
beanName + "': " + annotatedMethods);
}
}
}
}
/**
* Determine whether the given
|
for
|
java
|
micronaut-projects__micronaut-core
|
test-suite/src/test/java/io/micronaut/docs/factories/VehicleMockSpec.java
|
{
"start": 413,
"end": 770
}
|
class ____ {
@Requires(beans = VehicleMockSpec.class)
@Bean @Replaces(Engine.class)
Engine mockEngine = () -> "Mock Started"; // <1>
@Inject Vehicle vehicle; // <2>
@Test
void testStartEngine() {
final String result = vehicle.start();
assertEquals("Mock Started", result); // <3>
}
}
// end::class[]
|
VehicleMockSpec
|
java
|
apache__rocketmq
|
tools/src/main/java/org/apache/rocketmq/tools/admin/common/AdminToolHandler.java
|
{
"start": 858,
"end": 939
}
|
interface ____ {
AdminToolResult doExecute() throws Exception;
}
|
AdminToolHandler
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/streaming/api/functions/source/legacy/FromIteratorFunction.java
|
{
"start": 1241,
"end": 1803
}
|
class ____<T> implements SourceFunction<T> {
private static final long serialVersionUID = 1L;
private final Iterator<T> iterator;
private volatile boolean isRunning = true;
public FromIteratorFunction(Iterator<T> iterator) {
this.iterator = iterator;
}
@Override
public void run(SourceContext<T> ctx) throws Exception {
while (isRunning && iterator.hasNext()) {
ctx.collect(iterator.next());
}
}
@Override
public void cancel() {
isRunning = false;
}
}
|
FromIteratorFunction
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/ExtensionRegistrationViaParametersAndFieldsTests.java
|
{
"start": 20804,
"end": 21019
}
|
class ____ {
@Test
void test(@ExtendWith(DummyExtension.class) @ExtendWith(LongParameterResolver.class) Long number) {
assertThat(number).isEqualTo(42L);
}
}
static
|
MultipleRegistrationsViaParameterTestCase
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/CheckpointExceptionHandlerConfigurationTest.java
|
{
"start": 1203,
"end": 2065
}
|
class ____ {
@Test
void testCheckpointConfigDefault() {
StreamExecutionEnvironment streamExecutionEnvironment =
StreamExecutionEnvironment.getExecutionEnvironment();
CheckpointConfig checkpointConfig = streamExecutionEnvironment.getCheckpointConfig();
assertThat(checkpointConfig.getTolerableCheckpointFailureNumber()).isZero();
}
@Test
void testSetCheckpointConfig() {
StreamExecutionEnvironment streamExecutionEnvironment =
StreamExecutionEnvironment.getExecutionEnvironment();
CheckpointConfig checkpointConfig = streamExecutionEnvironment.getCheckpointConfig();
checkpointConfig.setTolerableCheckpointFailureNumber(5);
assertThat(checkpointConfig.getTolerableCheckpointFailureNumber()).isEqualTo(5);
}
}
|
CheckpointExceptionHandlerConfigurationTest
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/ResolvableTypeTests.java
|
{
"start": 80214,
"end": 80461
}
|
class ____ extends HashMap<String, RecursiveMapWithInterface>
implements Map<String, RecursiveMapWithInterface> {
}
PaymentCreator<? extends Payment, PaymentCreatorParameter<? extends Payment>> paymentCreator;
static
|
RecursiveMapWithInterface
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheActionTests.java
|
{
"start": 1714,
"end": 8104
}
|
class ____ extends ESTestCase {
private static ThreadPool threadPool;
private AuthenticationService authenticationService;
private TransportClearRealmCacheAction action;
private TestCachingRealm nativeRealm;
private TestCachingRealm fileRealm;
@Before
public void setup() {
threadPool = new TestThreadPool("TransportClearRealmCacheActionTests");
authenticationService = mock(AuthenticationService.class);
nativeRealm = mockRealm("native");
fileRealm = mockRealm("file");
final Realms realms = mockRealms(List.of(nativeRealm, fileRealm));
TransportService transportService = mock(TransportService.class);
when(transportService.getThreadPool()).thenReturn(threadPool);
action = new TransportClearRealmCacheAction(
threadPool,
mockClusterService(),
transportService,
mock(ActionFilters.class),
realms,
authenticationService
);
}
@After
public void cleanup() {
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
threadPool = null;
}
public void testSingleUserCacheCleanupForAllRealms() {
final String user = "test";
// When no realm is specified we should clear all realms.
// This is equivalent to using a wildcard (*) instead of specifying realm name in query.
final String[] realmsToClear = randomFrom(Strings.EMPTY_ARRAY, null);
final String[] usersToClear = new String[] { user };
ClearRealmCacheRequest.Node clearCacheRequest = mockClearCacheRequest(realmsToClear, usersToClear);
ClearRealmCacheResponse.Node response = action.nodeOperation(clearCacheRequest, mock(Task.class));
assertThat(response.getNode(), notNullValue());
// We expect that caches of all realms are cleared for the given user,
// including last successful cache in the authentication service.
verify(fileRealm).expire(user);
verify(nativeRealm).expire(user);
verify(authenticationService).expire(user);
// We don't expect that expireAll methods are called.
verify(fileRealm, never()).expireAll();
verify(nativeRealm, never()).expireAll();
verify(authenticationService, never()).expireAll();
}
public void testSingleUserCacheCleanupForSingleRealm() {
final String user = "test";
// We want to clear user only from native realm cache.
final String[] realmsToClear = new String[] { nativeRealm.name() };
final String[] usersToClear = new String[] { user };
ClearRealmCacheRequest.Node clearCacheRequest = mockClearCacheRequest(realmsToClear, usersToClear);
ClearRealmCacheResponse.Node response = action.nodeOperation(clearCacheRequest, mock(Task.class));
assertThat(response, notNullValue());
// We expect that only native cache is cleared,
// including last successful cache in the authentication service.
verify(nativeRealm).expire(user);
verify(fileRealm, never()).expire(user);
verify(authenticationService).expire(user);
// We don't expect that expireAll methods are called.
verify(fileRealm, never()).expireAll();
verify(nativeRealm, never()).expireAll();
verify(authenticationService, never()).expireAll();
}
public void testAllUsersCacheCleanupForSingleRealm() {
// We want to clear all users from native realm cache.
final String[] realmsToClear = new String[] { nativeRealm.name() };
final String[] usersToClear = randomFrom(Strings.EMPTY_ARRAY, null);
ClearRealmCacheRequest.Node clearCacheRequest = mockClearCacheRequest(realmsToClear, usersToClear);
ClearRealmCacheResponse.Node response = action.nodeOperation(clearCacheRequest, mock(Task.class));
assertThat(response, notNullValue());
// We expect that whole native cache is cleared,
// including last successful cache in the authentication service.
verify(nativeRealm).expireAll();
verify(fileRealm, never()).expireAll();
verify(authenticationService).expireAll();
}
public void testAllUsersCacheCleanupForAllRealms() {
// We want to clear all users from all realms.
final String[] realmsToClear = randomFrom(Strings.EMPTY_ARRAY, null);
final String[] usersToClear = randomFrom(Strings.EMPTY_ARRAY, null);
ClearRealmCacheRequest.Node clearCacheRequest = mockClearCacheRequest(realmsToClear, usersToClear);
ClearRealmCacheResponse.Node response = action.nodeOperation(clearCacheRequest, mock(Task.class));
assertThat(response, notNullValue());
verify(fileRealm).expireAll();
verify(nativeRealm).expireAll();
verify(authenticationService).expireAll();
}
private TestCachingRealm mockRealm(String name) {
TestCachingRealm realm = mock(TestCachingRealm.class);
when(realm.name()).thenReturn(name);
return realm;
}
private Realms mockRealms(List<Realm> activeRealms) {
Realms realms = mock(Realms.class);
when(realms.realm(any())).then(in -> {
final String name = in.getArgument(0, String.class);
return activeRealms.stream()
.filter(r -> r.name().equals(name))
.findFirst()
.orElseThrow(() -> new IllegalStateException("Realm '" + name + "' not found!"));
});
when(realms.iterator()).thenReturn(activeRealms.iterator());
return realms;
}
private ClearRealmCacheRequest.Node mockClearCacheRequest(String[] realms, String[] users) {
ClearRealmCacheRequest.Node clearCacheRequest = mock(ClearRealmCacheRequest.Node.class);
when(clearCacheRequest.getRealms()).thenReturn(realms);
when(clearCacheRequest.getUsernames()).thenReturn(users);
return clearCacheRequest;
}
private ClusterService mockClusterService() {
ClusterService clusterService = mock(ClusterService.class);
DiscoveryNode localNode = DiscoveryNodeUtils.create("localnode", buildNewFakeTransportAddress(), Map.of(), Set.of());
when(clusterService.localNode()).thenReturn(localNode);
return clusterService;
}
private abstract static
|
TransportClearRealmCacheActionTests
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/appender/rolling/DefaultRolloverStrategy.java
|
{
"start": 4568,
"end": 4822
}
|
class ____ extends AbstractRolloverStrategy {
private static final int MIN_WINDOW_SIZE = 1;
private static final int DEFAULT_WINDOW_SIZE = 7;
/**
* Builds DefaultRolloverStrategy instances.
*/
public static
|
DefaultRolloverStrategy
|
java
|
google__guava
|
android/guava/src/com/google/common/util/concurrent/ClosingFuture.java
|
{
"start": 95176,
"end": 98191
}
|
class ____ extends IdentityHashMap<AutoCloseable, Executor>
implements AutoCloseable {
private final DeferredCloser closer = new DeferredCloser(this);
private volatile boolean closed;
private volatile @Nullable CountDownLatch whenClosed;
<V extends @Nullable Object, U extends @Nullable Object>
ListenableFuture<U> applyClosingFunction(
ClosingFunction<? super V, U> transformation, @ParametricNullness V input)
throws Exception {
// TODO(dpb): Consider ways to defer closing without creating a separate CloseableList.
CloseableList newCloseables = new CloseableList();
try {
return immediateFuture(transformation.apply(newCloseables.closer, input));
} finally {
add(newCloseables, directExecutor());
}
}
<V extends @Nullable Object, U extends @Nullable Object>
FluentFuture<U> applyAsyncClosingFunction(
AsyncClosingFunction<V, U> transformation, @ParametricNullness V input)
throws Exception {
// TODO(dpb): Consider ways to defer closing without creating a separate CloseableList.
CloseableList newCloseables = new CloseableList();
try {
ClosingFuture<U> closingFuture = transformation.apply(newCloseables.closer, input);
closingFuture.becomeSubsumedInto(newCloseables);
return closingFuture.future;
} finally {
add(newCloseables, directExecutor());
}
}
@Override
public void close() {
if (closed) {
return;
}
synchronized (this) {
if (closed) {
return;
}
closed = true;
}
for (Map.Entry<AutoCloseable, Executor> entry : entrySet()) {
closeQuietly(entry.getKey(), entry.getValue());
}
clear();
if (whenClosed != null) {
whenClosed.countDown();
}
}
void add(@Nullable AutoCloseable closeable, Executor executor) {
checkNotNull(executor);
if (closeable == null) {
return;
}
synchronized (this) {
if (!closed) {
put(closeable, executor);
return;
}
}
closeQuietly(closeable, executor);
}
/**
* Returns a latch that reaches zero when this objects' deferred closeables have been closed.
*/
CountDownLatch whenClosedCountDown() {
if (closed) {
return new CountDownLatch(0);
}
synchronized (this) {
if (closed) {
return new CountDownLatch(0);
}
checkState(whenClosed == null);
return whenClosed = new CountDownLatch(1);
}
}
}
/**
* Returns an object that can be used to wait until this objects' deferred closeables have all had
* {@link Runnable}s that close them submitted to each one's closing {@link Executor}.
*/
@VisibleForTesting
CountDownLatch whenClosedCountDown() {
return closeables.whenClosedCountDown();
}
/** The state of a {@link CloseableList}. */
|
CloseableList
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/main/java/org/springframework/web/servlet/config/DefaultServletHandlerBeanDefinitionParser.java
|
{
"start": 1791,
"end": 3685
}
|
class ____ implements BeanDefinitionParser {
@Override
public @Nullable BeanDefinition parse(Element element, ParserContext parserContext) {
Object source = parserContext.extractSource(element);
String defaultServletName = element.getAttribute("default-servlet-name");
RootBeanDefinition defaultServletHandlerDef = new RootBeanDefinition(DefaultServletHttpRequestHandler.class);
defaultServletHandlerDef.setSource(source);
defaultServletHandlerDef.setRole(BeanDefinition.ROLE_INFRASTRUCTURE);
if (StringUtils.hasText(defaultServletName)) {
defaultServletHandlerDef.getPropertyValues().add("defaultServletName", defaultServletName);
}
String defaultServletHandlerName = parserContext.getReaderContext().generateBeanName(defaultServletHandlerDef);
parserContext.getRegistry().registerBeanDefinition(defaultServletHandlerName, defaultServletHandlerDef);
parserContext.registerComponent(new BeanComponentDefinition(defaultServletHandlerDef, defaultServletHandlerName));
Map<String, String> urlMap = new ManagedMap<>();
urlMap.put("/**", defaultServletHandlerName);
RootBeanDefinition handlerMappingDef = new RootBeanDefinition(SimpleUrlHandlerMapping.class);
handlerMappingDef.setSource(source);
handlerMappingDef.setRole(BeanDefinition.ROLE_INFRASTRUCTURE);
handlerMappingDef.getPropertyValues().add("urlMap", urlMap);
String handlerMappingBeanName = parserContext.getReaderContext().generateBeanName(handlerMappingDef);
parserContext.getRegistry().registerBeanDefinition(handlerMappingBeanName, handlerMappingDef);
parserContext.registerComponent(new BeanComponentDefinition(handlerMappingDef, handlerMappingBeanName));
// Ensure BeanNameUrlHandlerMapping (SPR-8289) and default HandlerAdapters are not "turned off"
MvcNamespaceUtils.registerDefaultComponents(parserContext, source);
return null;
}
}
|
DefaultServletHandlerBeanDefinitionParser
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/nestedresulthandler_gh1551/ProductInfo.java
|
{
"start": 722,
"end": 1207
}
|
class ____ {
private Long id;
private String productId;
private String otherInfo;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getProductId() {
return productId;
}
public void setProductId(String productId) {
this.productId = productId;
}
public String getOtherInfo() {
return otherInfo;
}
public void setOtherInfo(String otherInfo) {
this.otherInfo = otherInfo;
}
}
|
ProductInfo
|
java
|
spring-projects__spring-security
|
saml2/saml2-service-provider/src/opensaml5Main/java/org/springframework/security/saml2/provider/service/web/OpenSaml5AuthenticationTokenConverter.java
|
{
"start": 1745,
"end": 4825
}
|
class ____ implements AuthenticationConverter {
private final BaseOpenSamlAuthenticationTokenConverter delegate;
/**
* Constructs a {@link OpenSaml5AuthenticationTokenConverter} given a repository for
* {@link RelyingPartyRegistration}s
* @param registrations the repository for {@link RelyingPartyRegistration}s
* {@link RelyingPartyRegistration}s
*/
public OpenSaml5AuthenticationTokenConverter(RelyingPartyRegistrationRepository registrations) {
Assert.notNull(registrations, "relyingPartyRegistrationRepository cannot be null");
this.delegate = new BaseOpenSamlAuthenticationTokenConverter(registrations, new OpenSaml5Template());
}
/**
* Resolve an authentication request from the given {@link HttpServletRequest}.
*
* <p>
* First uses the configured {@link RequestMatcher} to deduce whether an
* authentication request is being made and optionally for which
* {@code registrationId}.
*
* <p>
* If there is an associated {@code <saml2:AuthnRequest>}, then the
* {@code registrationId} is looked up and used.
*
* <p>
* If a {@code registrationId} is found in the request, then it is looked up and used.
* In that case, if none is found a {@link Saml2AuthenticationException} is thrown.
*
* <p>
* Finally, if no {@code registrationId} is found in the request, then the code
* attempts to resolve the {@link RelyingPartyRegistration} from the SAML Response's
* Issuer.
* @param request the HTTP request
* @return the {@link Saml2AuthenticationToken} authentication request
* @throws Saml2AuthenticationException if the {@link RequestMatcher} specifies a
* non-existent {@code registrationId}
*/
@Override
public Saml2AuthenticationToken convert(HttpServletRequest request) {
return this.delegate.convert(request);
}
/**
* Use the given {@link Saml2AuthenticationRequestRepository} to load authentication
* request.
* @param authenticationRequestRepository the
* {@link Saml2AuthenticationRequestRepository} to use
*/
public void setAuthenticationRequestRepository(
Saml2AuthenticationRequestRepository<AbstractSaml2AuthenticationRequest> authenticationRequestRepository) {
Assert.notNull(authenticationRequestRepository, "authenticationRequestRepository cannot be null");
this.delegate.setAuthenticationRequestRepository(authenticationRequestRepository);
}
/**
* Use the given {@link RequestMatcher} to match the request.
* @param requestMatcher the {@link RequestMatcher} to use
*/
public void setRequestMatcher(RequestMatcher requestMatcher) {
Assert.notNull(requestMatcher, "requestMatcher cannot be null");
this.delegate.setRequestMatcher(requestMatcher);
}
/**
* Use the given {@code shouldConvertGetRequests} to convert {@code GET} requests.
* Default is {@code true}.
* @param shouldConvertGetRequests the {@code shouldConvertGetRequests} to use
* @since 7.0
*/
public void setShouldConvertGetRequests(boolean shouldConvertGetRequests) {
this.delegate.setShouldConvertGetRequests(shouldConvertGetRequests);
}
}
|
OpenSaml5AuthenticationTokenConverter
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
|
{
"start": 5728,
"end": 35884
}
|
class ____ {
@RegisterExtension
private TestName name = new TestName();
public static final Logger LOG =
LoggerFactory.getLogger(ITestProvidedImplementation.class);
private final Random r = new Random();
private final File fBASE = new File(MiniDFSCluster.getBaseDirectory());
private final Path pBASE = new Path(fBASE.toURI().toString());
private final Path providedPath = new Path(pBASE, "providedDir");
private final Path nnDirPath = new Path(pBASE, "nnDir");
private final String singleUser = "usr1";
private final String singleGroup = "grp1";
private final int numFiles = 10;
private final String filePrefix = "file";
private final String fileSuffix = ".dat";
private final int baseFileLen = 1024;
private long providedDataSize = 0;
private final String bpid = "BP-1234-10.1.1.1-1224";
private static final String clusterID = "CID-PROVIDED";
private Configuration conf;
private MiniDFSCluster cluster;
@BeforeEach
public void setSeed() throws Exception {
if (fBASE.exists() && !FileUtil.fullyDelete(fBASE)) {
throw new IOException("Could not fully delete " + fBASE);
}
long seed = r.nextLong();
r.setSeed(seed);
System.out.println(name.getMethodName() + " seed: " + seed);
conf = new HdfsConfiguration();
conf.set(SingleUGIResolver.USER, singleUser);
conf.set(SingleUGIResolver.GROUP, singleGroup);
conf.set(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID,
DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true);
conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
TextFileRegionAliasMap.class, BlockAliasMap.class);
conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_WRITE_DIR,
nnDirPath.toString());
conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_READ_FILE,
new Path(nnDirPath, fileNameFromBlockPoolID(bpid)).toString());
conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_DELIMITER, "\t");
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR_PROVIDED,
new File(providedPath.toUri()).toString());
File imageDir = new File(providedPath.toUri());
if (!imageDir.exists()) {
LOG.info("Creating directory: " + imageDir);
imageDir.mkdirs();
}
File nnDir = new File(nnDirPath.toUri());
if (!nnDir.exists()) {
nnDir.mkdirs();
}
// create 10 random files under pBASE
for (int i=0; i < numFiles; i++) {
File newFile = new File(
new Path(providedPath, filePrefix + i + fileSuffix).toUri());
if(!newFile.exists()) {
try {
LOG.info("Creating " + newFile.toString());
newFile.createNewFile();
Writer writer = new OutputStreamWriter(
new FileOutputStream(newFile.getAbsolutePath()), StandardCharsets.UTF_8);
for(int j=0; j < baseFileLen*i; j++) {
writer.write("0");
}
writer.flush();
writer.close();
providedDataSize += newFile.length();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
@AfterEach
public void shutdown() throws Exception {
try {
if (cluster != null) {
cluster.shutdown(true, true);
}
} finally {
cluster = null;
}
}
void createImage(TreeWalk t, Path out,
Class<? extends BlockResolver> blockIdsClass) throws Exception {
createImage(t, out, blockIdsClass, "", TextFileRegionAliasMap.class);
}
void createImage(TreeWalk t, Path out,
Class<? extends BlockResolver> blockIdsClass, String clusterID,
Class<? extends BlockAliasMap> aliasMapClass) throws Exception {
ImageWriter.Options opts = ImageWriter.defaults();
opts.setConf(conf);
opts.output(out.toString())
.blocks(aliasMapClass)
.blockIds(blockIdsClass)
.clusterID(clusterID)
.blockPoolID(bpid);
try (ImageWriter w = new ImageWriter(opts)) {
for (TreePath e : t) {
w.accept(e);
}
}
}
void startCluster(Path nspath, int numDatanodes,
StorageType[] storageTypes,
StorageType[][] storageTypesPerDatanode,
boolean doFormat) throws IOException {
startCluster(nspath, numDatanodes, storageTypes, storageTypesPerDatanode,
doFormat, null);
}
void startCluster(Path nspath, int numDatanodes,
StorageType[] storageTypes,
StorageType[][] storageTypesPerDatanode,
boolean doFormat, String[] racks) throws IOException {
startCluster(nspath, numDatanodes,
storageTypes, storageTypesPerDatanode,
doFormat, racks, null,
new MiniDFSCluster.Builder(conf));
}
void startCluster(Path nspath, int numDatanodes,
StorageType[] storageTypes,
StorageType[][] storageTypesPerDatanode,
boolean doFormat, String[] racks,
MiniDFSNNTopology topo,
MiniDFSCluster.Builder builder) throws IOException {
conf.set(DFS_NAMENODE_NAME_DIR_KEY, nspath.toString());
builder.format(doFormat)
.manageNameDfsDirs(doFormat)
.numDataNodes(numDatanodes)
.racks(racks);
if (storageTypesPerDatanode != null) {
builder.storageTypes(storageTypesPerDatanode);
} else if (storageTypes != null) {
builder.storagesPerDatanode(storageTypes.length)
.storageTypes(storageTypes);
}
if (topo != null) {
builder.nnTopology(topo);
// If HA or Federation is enabled and formatting is set to false,
// copy the FSImage to all Namenode directories.
if ((topo.isHA() || topo.isFederated()) && !doFormat) {
builder.manageNameDfsDirs(true);
builder.enableManagedDfsDirsRedundancy(false);
builder.manageNameDfsSharedDirs(true);
List<File> nnDirs =
getProvidedNamenodeDirs(conf.get(HDFS_MINIDFS_BASEDIR), topo);
for (File nnDir : nnDirs) {
MiniDFSCluster.copyNameDirs(
Collections.singletonList(nspath.toUri()),
Collections.singletonList(fileAsURI(nnDir)),
conf);
}
}
}
cluster = builder.build();
cluster.waitActive();
}
private static List<File> getProvidedNamenodeDirs(String baseDir,
MiniDFSNNTopology topo) {
List<File> nnDirs = new ArrayList<>();
int nsCounter = 0;
for (MiniDFSNNTopology.NSConf nsConf : topo.getNameservices()) {
int nnCounter = nsCounter;
for (MiniDFSNNTopology.NNConf nnConf : nsConf.getNNs()) {
if (providedNameservice.equals(nsConf.getId())) {
// only add the first one
File[] nnFiles =
MiniDFSCluster.getNameNodeDirectory(
baseDir, nsCounter, nnCounter);
if (nnFiles == null || nnFiles.length == 0) {
throw new RuntimeException("Failed to get a location for the"
+ "Namenode directory for namespace: " + nsConf.getId()
+ " and namenodeId: " + nnConf.getNnId());
}
nnDirs.add(nnFiles[0]);
}
nnCounter++;
}
nsCounter = nnCounter;
}
return nnDirs;
}
@Test
@Timeout(value = 20)
public void testLoadImage() throws Exception {
final long seed = r.nextLong();
LOG.info("providedPath: " + providedPath);
createImage(new RandomTreeWalk(seed), nnDirPath, FixedBlockResolver.class);
startCluster(nnDirPath, 0,
new StorageType[] {StorageType.PROVIDED, StorageType.DISK}, null,
false);
FileSystem fs = cluster.getFileSystem();
for (TreePath e : new RandomTreeWalk(seed)) {
FileStatus rs = e.getFileStatus();
Path hp = new Path(rs.getPath().toUri().getPath());
assertTrue(fs.exists(hp));
FileStatus hs = fs.getFileStatus(hp);
assertEquals(rs.getPath().toUri().getPath(),
hs.getPath().toUri().getPath());
assertEquals(rs.getPermission(), hs.getPermission());
assertEquals(rs.getLen(), hs.getLen());
assertEquals(singleUser, hs.getOwner());
assertEquals(singleGroup, hs.getGroup());
assertEquals(rs.getAccessTime(), hs.getAccessTime());
assertEquals(rs.getModificationTime(), hs.getModificationTime());
}
}
@Test
@Timeout(value = 30)
public void testProvidedReporting() throws Exception {
conf.setClass(ImageWriter.Options.UGI_CLASS,
SingleUGIResolver.class, UGIResolver.class);
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class);
int numDatanodes = 10;
startCluster(nnDirPath, numDatanodes,
new StorageType[] {StorageType.PROVIDED, StorageType.DISK}, null,
false);
long diskCapacity = 1000;
// set the DISK capacity for testing
for (DataNode dn: cluster.getDataNodes()) {
for (FsVolumeSpi ref : dn.getFSDataset().getFsVolumeReferences()) {
if (ref.getStorageType() == StorageType.DISK) {
((FsVolumeImpl) ref).setCapacityForTesting(diskCapacity);
}
}
}
// trigger heartbeats to update the capacities
cluster.triggerHeartbeats();
Thread.sleep(10000);
// verify namenode stats
FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
DatanodeStatistics dnStats = namesystem.getBlockManager()
.getDatanodeManager().getDatanodeStatistics();
// total capacity reported includes only the local volumes and
// not the provided capacity
assertEquals(diskCapacity * numDatanodes, namesystem.getTotal());
// total storage used should be equal to the totalProvidedStorage
// no capacity should be remaining!
assertEquals(providedDataSize, dnStats.getProvidedCapacity());
assertEquals(providedDataSize, namesystem.getProvidedCapacityTotal());
assertEquals(providedDataSize, dnStats.getStorageTypeStats()
.get(StorageType.PROVIDED).getCapacityTotal());
assertEquals(providedDataSize, dnStats.getStorageTypeStats()
.get(StorageType.PROVIDED).getCapacityUsed());
// verify datanode stats
for (DataNode dn: cluster.getDataNodes()) {
for (StorageReport report : dn.getFSDataset()
.getStorageReports(namesystem.getBlockPoolId())) {
if (report.getStorage().getStorageType() == StorageType.PROVIDED) {
assertEquals(providedDataSize, report.getCapacity());
assertEquals(providedDataSize, report.getDfsUsed());
assertEquals(providedDataSize, report.getBlockPoolUsed());
assertEquals(0, report.getNonDfsUsed());
assertEquals(0, report.getRemaining());
}
}
}
DFSClient client = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), cluster.getConfiguration(0));
BlockManager bm = namesystem.getBlockManager();
for (int fileId = 0; fileId < numFiles; fileId++) {
String filename = "/" + filePrefix + fileId + fileSuffix;
LocatedBlocks locatedBlocks = client.getLocatedBlocks(
filename, 0, baseFileLen);
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
BlockInfo blockInfo =
bm.getStoredBlock(locatedBlock.getBlock().getLocalBlock());
Iterator<DatanodeStorageInfo> storagesItr = blockInfo.getStorageInfos();
DatanodeStorageInfo info = storagesItr.next();
assertEquals(StorageType.PROVIDED, info.getStorageType());
DatanodeDescriptor dnDesc = info.getDatanodeDescriptor();
// check the locations that are returned by FSCK have the right name
assertEquals(ProvidedStorageMap.ProvidedDescriptor.NETWORK_LOCATION
+ PATH_SEPARATOR_STR + ProvidedStorageMap.ProvidedDescriptor.NAME,
NodeBase.getPath(dnDesc));
// no DatanodeStorageInfos should remain
assertFalse(storagesItr.hasNext());
}
}
}
@Test
@Timeout(value = 500)
public void testDefaultReplication() throws Exception {
int targetReplication = 2;
conf.setInt(FixedBlockMultiReplicaResolver.REPLICATION, targetReplication);
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockMultiReplicaResolver.class);
// make the last Datanode with only DISK
startCluster(nnDirPath, 3, null,
new StorageType[][] {
{StorageType.PROVIDED, StorageType.DISK},
{StorageType.PROVIDED, StorageType.DISK},
{StorageType.DISK}},
false);
// wait for the replication to finish
Thread.sleep(50000);
FileSystem fs = cluster.getFileSystem();
int count = 0;
for (TreePath e : new FSTreeWalk(providedPath, conf)) {
FileStatus rs = e.getFileStatus();
Path hp = removePrefix(providedPath, rs.getPath());
LOG.info("path: " + hp.toUri().getPath());
e.accept(count++);
assertTrue(fs.exists(hp));
FileStatus hs = fs.getFileStatus(hp);
if (rs.isFile()) {
BlockLocation[] bl = fs.getFileBlockLocations(
hs.getPath(), 0, hs.getLen());
int i = 0;
for(; i < bl.length; i++) {
int currentRep = bl[i].getHosts().length;
assertEquals(targetReplication, currentRep);
}
}
}
}
static Path removePrefix(Path base, Path walk) {
Path wpath = new Path(walk.toUri().getPath());
Path bpath = new Path(base.toUri().getPath());
Path ret = new Path("/");
while (!(bpath.equals(wpath) || "".equals(wpath.getName()))) {
ret = "".equals(ret.getName())
? new Path("/", wpath.getName())
: new Path(new Path("/", wpath.getName()),
new Path(ret.toString().substring(1)));
wpath = wpath.getParent();
}
if (!bpath.equals(wpath)) {
throw new IllegalArgumentException(base + " not a prefix of " + walk);
}
return ret;
}
private void verifyFileSystemContents(int nnIndex) throws Exception {
FileSystem fs = cluster.getFileSystem(nnIndex);
int count = 0;
// read NN metadata, verify contents match
for (TreePath e : new FSTreeWalk(providedPath, conf)) {
FileStatus rs = e.getFileStatus();
Path hp = removePrefix(providedPath, rs.getPath());
LOG.info("path: " + hp.toUri().getPath());
e.accept(count++);
assertTrue(fs.exists(hp));
FileStatus hs = fs.getFileStatus(hp);
assertEquals(hp.toUri().getPath(), hs.getPath().toUri().getPath());
assertEquals(rs.getPermission(), hs.getPermission());
assertEquals(rs.getOwner(), hs.getOwner());
assertEquals(rs.getGroup(), hs.getGroup());
if (rs.isFile()) {
assertEquals(rs.getLen(), hs.getLen());
try (ReadableByteChannel i = Channels.newChannel(
new FileInputStream(new File(rs.getPath().toUri())))) {
try (ReadableByteChannel j = Channels.newChannel(
fs.open(hs.getPath()))) {
ByteBuffer ib = ByteBuffer.allocate(4096);
ByteBuffer jb = ByteBuffer.allocate(4096);
while (true) {
int il = i.read(ib);
int jl = j.read(jb);
if (il < 0 || jl < 0) {
assertEquals(il, jl);
break;
}
ib.flip();
jb.flip();
int cmp = Math.min(ib.remaining(), jb.remaining());
for (int k = 0; k < cmp; ++k) {
assertEquals(ib.get(), jb.get());
}
ib.compact();
jb.compact();
}
}
}
}
}
}
private BlockLocation[] createFile(Path path, short replication,
long fileLen, long blockLen) throws IOException {
FileSystem fs = cluster.getFileSystem();
// create a file that is not provided
DFSTestUtil.createFile(fs, path, false, (int) blockLen,
fileLen, blockLen, replication, 0, true);
return fs.getFileBlockLocations(path, 0, fileLen);
}
@Test
@Timeout(value = 30)
public void testClusterWithEmptyImage() throws IOException {
// start a cluster with 2 datanodes without any provided storage
startCluster(nnDirPath, 2, null,
new StorageType[][] {
{StorageType.DISK},
{StorageType.DISK}},
true);
assertTrue(cluster.isClusterUp());
assertTrue(cluster.isDataNodeUp());
BlockLocation[] locations = createFile(new Path("/testFile1.dat"),
(short) 2, 1024*1024, 1024*1024);
assertEquals(1, locations.length);
assertEquals(2, locations[0].getHosts().length);
}
private DatanodeInfo[] getAndCheckBlockLocations(DFSClient client,
String filename, long fileLen, long expectedBlocks, int expectedLocations)
throws IOException {
LocatedBlocks locatedBlocks = client.getLocatedBlocks(filename, 0, fileLen);
// given the start and length in the above call,
// only one LocatedBlock in LocatedBlocks
assertEquals(expectedBlocks, locatedBlocks.getLocatedBlocks().size());
DatanodeInfo[] locations =
locatedBlocks.getLocatedBlocks().get(0).getLocations();
assertEquals(expectedLocations, locations.length);
checkUniqueness(locations);
return locations;
}
/**
* verify that the given locations are all unique.
* @param locations
*/
private void checkUniqueness(DatanodeInfo[] locations) {
Set<String> set = new HashSet<>();
for (DatanodeInfo info: locations) {
assertFalse(set.contains(info.getDatanodeUuid()),
"All locations should be unique");
set.add(info.getDatanodeUuid());
}
}
/**
* Tests setting replication of provided files.
* @throws Exception
*/
@Test
@Timeout(value = 50)
public void testSetReplicationForProvidedFiles() throws Exception {
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class);
// 10 Datanodes with both DISK and PROVIDED storage
startCluster(nnDirPath, 10,
new StorageType[]{
StorageType.PROVIDED, StorageType.DISK},
null,
false);
setAndUnsetReplication("/" + filePrefix + (numFiles - 1) + fileSuffix);
}
private void setAndUnsetReplication(String filename) throws Exception {
Path file = new Path(filename);
FileSystem fs = cluster.getFileSystem();
// set the replication to 4, and test that the file has
// the required replication.
short newReplication = 4;
LOG.info("Setting replication of file {} to {}", filename, newReplication);
fs.setReplication(file, newReplication);
DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
file, newReplication, 10000);
DFSClient client = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), cluster.getConfiguration(0));
getAndCheckBlockLocations(client, filename, baseFileLen, 1, newReplication);
// set the replication back to 1
newReplication = 1;
LOG.info("Setting replication of file {} back to {}",
filename, newReplication);
fs.setReplication(file, newReplication);
// defaultReplication number of replicas should be returned
int defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
file, (short) defaultReplication, 10000);
getAndCheckBlockLocations(client, filename, baseFileLen, 1,
defaultReplication);
}
@Test
@Timeout(value = 30)
public void testProvidedDatanodeFailures() throws Exception {
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class);
startCluster(nnDirPath, 3, null,
new StorageType[][] {
{StorageType.PROVIDED, StorageType.DISK},
{StorageType.PROVIDED, StorageType.DISK},
{StorageType.DISK}},
false);
DataNode providedDatanode1 = cluster.getDataNodes().get(0);
DataNode providedDatanode2 = cluster.getDataNodes().get(1);
DFSClient client = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), cluster.getConfiguration(0));
DatanodeStorageInfo providedDNInfo = getProvidedDatanodeStorageInfo();
if (numFiles >= 1) {
String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix;
// 2 locations returned as there are 2 PROVIDED datanodes
DatanodeInfo[] dnInfos =
getAndCheckBlockLocations(client, filename, baseFileLen, 1, 2);
// the location should be one of the provided DNs available
assertTrue(
dnInfos[0].getDatanodeUuid().equals(
providedDatanode1.getDatanodeUuid())
|| dnInfos[0].getDatanodeUuid().equals(
providedDatanode2.getDatanodeUuid()));
// stop the 1st provided datanode
MiniDFSCluster.DataNodeProperties providedDNProperties1 =
cluster.stopDataNode(0);
// make NameNode detect that datanode is down
BlockManagerTestUtil.noticeDeadDatanode(
cluster.getNameNode(),
providedDatanode1.getDatanodeId().getXferAddr());
// should find the block on the 2nd provided datanode
dnInfos = getAndCheckBlockLocations(client, filename, baseFileLen, 1, 1);
assertEquals(providedDatanode2.getDatanodeUuid(),
dnInfos[0].getDatanodeUuid());
// stop the 2nd provided datanode
MiniDFSCluster.DataNodeProperties providedDNProperties2 =
cluster.stopDataNode(0);
// make NameNode detect that datanode is down
BlockManagerTestUtil.noticeDeadDatanode(
cluster.getNameNode(),
providedDatanode2.getDatanodeId().getXferAddr());
getAndCheckBlockLocations(client, filename, baseFileLen, 1, 0);
// BR count for the provided ProvidedDatanodeStorageInfo should reset to
// 0, when all DNs with PROVIDED storage fail.
assertEquals(0, providedDNInfo.getBlockReportCount());
// restart the provided datanode
cluster.restartDataNode(providedDNProperties1, true);
cluster.waitActive();
assertEquals(1, providedDNInfo.getBlockReportCount());
// should find the block on the 1st provided datanode now
dnInfos = getAndCheckBlockLocations(client, filename, baseFileLen, 1, 1);
// not comparing UUIDs as the datanode can now have a different one.
assertEquals(providedDatanode1.getDatanodeId().getXferAddr(),
dnInfos[0].getXferAddr());
}
}
@Test
@Timeout(value = 300)
public void testTransientDeadDatanodes() throws Exception {
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class);
// 3 Datanodes, 2 PROVIDED and other DISK
startCluster(nnDirPath, 3, null,
new StorageType[][] {
{StorageType.PROVIDED, StorageType.DISK},
{StorageType.PROVIDED, StorageType.DISK},
{StorageType.DISK}},
false);
DataNode providedDatanode = cluster.getDataNodes().get(0);
DatanodeStorageInfo providedDNInfo = getProvidedDatanodeStorageInfo();
int initialBRCount = providedDNInfo.getBlockReportCount();
for (int i= 0; i < numFiles; i++) {
// expect to have 2 locations as we have 2 provided Datanodes.
verifyFileLocation(i, 2);
// NameNode thinks the datanode is down
BlockManagerTestUtil.noticeDeadDatanode(
cluster.getNameNode(),
providedDatanode.getDatanodeId().getXferAddr());
cluster.waitActive();
cluster.triggerHeartbeats();
Thread.sleep(1000);
// the report count should just continue to increase.
assertEquals(initialBRCount + i + 1,
providedDNInfo.getBlockReportCount());
verifyFileLocation(i, 2);
}
}
private DatanodeStorageInfo getProvidedDatanodeStorageInfo() {
ProvidedStorageMap providedStorageMap =
cluster.getNamesystem().getBlockManager().getProvidedStorageMap();
return providedStorageMap.getProvidedStorageInfo();
}
@Test
@Timeout(value = 30)
public void testNamenodeRestart() throws Exception {
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class);
// 3 Datanodes, 2 PROVIDED and other DISK
startCluster(nnDirPath, 3, null,
new StorageType[][] {
{StorageType.PROVIDED, StorageType.DISK},
{StorageType.PROVIDED, StorageType.DISK},
{StorageType.DISK}},
false);
verifyFileLocation(numFiles - 1, 2);
cluster.restartNameNodes();
cluster.waitActive();
verifyFileLocation(numFiles - 1, 2);
}
/**
* verify that the specified file has a valid provided location.
* @param fileIndex the index of the file to verify.
* @throws Exception
*/
private void verifyFileLocation(int fileIndex, int replication)
throws Exception {
DFSClient client = new DFSClient(
new InetSocketAddress("localhost", cluster.getNameNodePort()),
cluster.getConfiguration(0));
if (fileIndex < numFiles && fileIndex >= 0) {
String filename = filePrefix + fileIndex + fileSuffix;
File file = new File(new Path(providedPath, filename).toUri());
long fileLen = file.length();
long blockSize = conf.getLong(FixedBlockResolver.BLOCKSIZE,
FixedBlockResolver.BLOCKSIZE_DEFAULT);
long numLocatedBlocks =
fileLen == 0 ? 1 : (long) Math.ceil(fileLen * 1.0 / blockSize);
getAndCheckBlockLocations(client, "/" + filename, fileLen,
numLocatedBlocks, replication);
}
}
@Test
@Timeout(value = 30)
public void testSetClusterID() throws Exception {
String clusterID = "PROVIDED-CLUSTER";
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class, clusterID, TextFileRegionAliasMap.class);
// 2 Datanodes, 1 PROVIDED and other DISK
startCluster(nnDirPath, 2, null,
new StorageType[][] {
{StorageType.PROVIDED, StorageType.DISK},
{StorageType.DISK}},
false);
NameNode nn = cluster.getNameNode();
assertEquals(clusterID, nn.getNamesystem().getClusterId());
}
@Test
@Timeout(value = 30)
public void testNumberOfProvidedLocations() throws Exception {
// set default replication to 4
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 4);
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class);
// start with 4 PROVIDED location
startCluster(nnDirPath, 4,
new StorageType[]{
StorageType.PROVIDED, StorageType.DISK},
null,
false);
int expectedLocations = 4;
for (int i = 0; i < numFiles; i++) {
verifyFileLocation(i, expectedLocations);
}
// stop 2 datanodes, one after the other and verify number of locations.
for (int i = 1; i <= 2; i++) {
DataNode dn = cluster.getDataNodes().get(0);
cluster.stopDataNode(0);
// make NameNode detect that datanode is down
BlockManagerTestUtil.noticeDeadDatanode(cluster.getNameNode(),
dn.getDatanodeId().getXferAddr());
expectedLocations = 4 - i;
for (int j = 0; j < numFiles; j++) {
verifyFileLocation(j, expectedLocations);
}
}
}
@Test
@Timeout(value = 30)
public void testNumberOfProvidedLocationsManyBlocks() throws Exception {
// increase number of blocks per file to at least 10 blocks per file
conf.setLong(FixedBlockResolver.BLOCKSIZE, baseFileLen/10);
// set default replication to 4
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 4);
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class);
// start with 4 PROVIDED location
startCluster(nnDirPath, 4,
new StorageType[]{
StorageType.PROVIDED, StorageType.DISK},
null,
false);
int expectedLocations = 4;
for (int i = 0; i < numFiles; i++) {
verifyFileLocation(i, expectedLocations);
}
}
private File createInMemoryAliasMapImage() throws Exception {
conf.setClass(ImageWriter.Options.UGI_CLASS, FsUGIResolver.class,
UGIResolver.class);
conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
InMemoryLevelDBAliasMapClient.class, BlockAliasMap.class);
conf.set(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS, "localhost:32445");
File tempDirectory =
new File(new Path(nnDirPath, "in-memory-alias-map").toUri());
File levelDBDir = new File(tempDirectory, bpid);
levelDBDir.mkdirs();
conf.set(DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR,
tempDirectory.getAbsolutePath());
conf.setInt(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES, 10);
conf.set(DFS_PROVIDED_ALIASMAP_LEVELDB_PATH,
tempDirectory.getAbsolutePath());
createImage(new FSTreeWalk(providedPath, conf),
nnDirPath,
FixedBlockResolver.class, clusterID,
LevelDBFileRegionAliasMap.class);
return tempDirectory;
}
@Test
public void testInMemoryAliasMap() throws Exception {
File aliasMapImage = createInMemoryAliasMapImage();
// start cluster with two datanodes,
// each with 1 PROVIDED volume and other DISK volume
conf.setBoolean(DFS_PROVIDED_ALIASMAP_INMEMORY_ENABLED, true);
conf.setInt(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES, 10);
startCluster(nnDirPath, 2,
new StorageType[] {StorageType.PROVIDED, StorageType.DISK},
null, false);
verifyFileSystemContents(0);
FileUtils.deleteDirectory(aliasMapImage);
}
/**
* Find a free port that hasn't been assigned yet.
*
* @param usedPorts set of ports that have already been assigned.
* @param maxTrials maximum number of random ports to try before failure.
* @return an unassigned port.
*/
private int getUnAssignedPort(Set<Integer> usedPorts, int maxTrials) {
int count = 0;
while (count < maxTrials) {
int port = NetUtils.getFreeSocketPort();
if (usedPorts.contains(port)) {
count++;
} else {
return port;
}
}
return -1;
}
private static String providedNameservice;
/**
* Extends the {@link MiniDFSCluster.Builder} to create instances of
* {@link MiniDFSClusterBuilderAliasMap}.
*/
private static
|
ITestProvidedImplementation
|
java
|
google__guice
|
core/test/com/google/inject/example/ClientServiceWithFactories.java
|
{
"start": 966,
"end": 1260
}
|
class ____ {
private ServiceFactory() {}
private static Service instance = new ServiceImpl();
public static Service getInstance() {
return instance;
}
public static void setInstance(Service service) {
instance = service;
}
}
public static
|
ServiceFactory
|
java
|
apache__dubbo
|
dubbo-common/src/main/java/org/apache/dubbo/common/convert/multiple/StringToSortedSetConverter.java
|
{
"start": 1065,
"end": 1384
}
|
class ____ extends StringToIterableConverter<SortedSet> {
public StringToSortedSetConverter(FrameworkModel frameworkModel) {
super(frameworkModel);
}
@Override
protected SortedSet createMultiValue(int size, Class<?> multiValueType) {
return new TreeSet();
}
}
|
StringToSortedSetConverter
|
java
|
google__dagger
|
hilt-android/main/java/dagger/hilt/android/migration/OptionalInjectCheck.java
|
{
"start": 3128,
"end": 3308
}
|
class ____ both @AndroidEntryPoint and @OptionalInject.",
obj.getClass());
return ((InjectedByHilt) obj).wasInjectedByHilt();
}
private OptionalInjectCheck() {}
}
|
with
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/RObservableRx.java
|
{
"start": 816,
"end": 1291
}
|
interface ____ {
/**
* Adds object event listener
*
* @see org.redisson.api.ExpiredObjectListener
* @see org.redisson.api.DeletedObjectListener
*
* @param listener - object event listener
* @return listener id
*/
Single<Integer> addListener(ObjectListener listener);
/**
* Removes object event listener
*
* @param listenerId - listener id
*/
Completable removeListener(int listenerId);
}
|
RObservableRx
|
java
|
hibernate__hibernate-orm
|
hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/function/array/GaussDBArraySetFunction.java
|
{
"start": 1227,
"end": 2878
}
|
class ____ extends AbstractSqmSelfRenderingFunctionDescriptor {
public GaussDBArraySetFunction() {
super(
"array_set",
StandardArgumentsValidators.composite(
new ArrayAndElementArgumentValidator( 0, 2 ),
new ArgumentTypesValidator( null, ANY, INTEGER, ANY )
),
ArrayViaArgumentReturnTypeResolver.DEFAULT_INSTANCE,
StandardFunctionArgumentTypeResolvers.composite(
StandardFunctionArgumentTypeResolvers.IMPLIED_RESULT_TYPE,
StandardFunctionArgumentTypeResolvers.invariant( ANY, INTEGER, ANY ),
new ArrayAndElementArgumentTypeResolver( 0, 2 )
)
);
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
ReturnableType<?> returnType,
SqlAstTranslator<?> walker) {
final Expression arrayExpression = (Expression) sqlAstArguments.get( 0 );
final Expression indexExpression = (Expression) sqlAstArguments.get( 1 );
final Expression elementExpression = (Expression) sqlAstArguments.get( 2 );
sqlAppender.append( "( SELECT array_agg( CASE WHEN idx_gen = ");
indexExpression.accept( walker );
sqlAppender.append( " THEN ");
elementExpression.accept( walker );
sqlAppender.append( " ELSE CASE WHEN idx_gen <= array_length(ewa1_0.the_array, 1) ");
sqlAppender.append( " THEN ewa1_0.the_array[idx_gen] ELSE NULL END END ORDER BY idx_gen ) ");
sqlAppender.append( " FROM generate_series(1, GREATEST(COALESCE(array_length( ");
arrayExpression.accept( walker );
sqlAppender.append( " , 1), 0), ");
indexExpression.accept( walker );
sqlAppender.append( " )) AS idx_gen ) AS result_array ");
}
}
|
GaussDBArraySetFunction
|
java
|
apache__logging-log4j2
|
log4j-jpa/src/test/java/org/apache/logging/log4j/core/appender/db/jpa/AbstractJpaAppenderTest.java
|
{
"start": 1880,
"end": 11462
}
|
class ____ {
private final String databaseType;
private Connection connection;
public AbstractJpaAppenderTest(final String databaseType) {
this.databaseType = databaseType;
}
protected abstract Connection setUpConnection() throws SQLException;
public void setUp(final String configFileName) throws SQLException {
this.connection = this.setUpConnection();
System.setProperty(
ConfigurationFactory.CONFIGURATION_FILE_PROPERTY,
"org/apache/logging/log4j/core/appender/db/jpa/" + configFileName);
final LoggerContext context = LoggerContext.getContext(false);
if (context.getConfiguration() instanceof DefaultConfiguration) {
context.reconfigure();
}
StatusLogger.getLogger().reset();
}
public void tearDown() throws SQLException {
final LoggerContext context = LoggerContext.getContext(false);
try {
final String appenderName = "databaseAppender";
final Appender appender = context.getConfiguration().getAppender(appenderName);
assertNotNull(appender, "The appender '" + appenderName + "' should not be null.");
assertInstanceOf(JpaAppender.class, appender, "The appender should be a JpaAppender.");
((JpaAppender) appender).getManager().close();
} finally {
System.clearProperty(ConfigurationFactory.CONFIGURATION_FILE_PROPERTY);
context.reconfigure();
StatusLogger.getLogger().reset();
if (this.connection != null) {
try (final Statement statement = this.connection.createStatement()) {
statement.execute("SHUTDOWN");
}
this.connection.close();
}
}
}
@Test
public void testBaseJpaEntityAppender() throws SQLException {
try {
this.setUp("log4j2-" + this.databaseType + "-jpa-base.xml");
final RuntimeException exception = new RuntimeException("Hello, world!");
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final PrintWriter writer = new PrintWriter(outputStream);
exception.printStackTrace(writer);
writer.close();
final String stackTrace = outputStream.toString().replace("\r\n", "\n");
final long millis = System.currentTimeMillis();
final Logger logger1 = LogManager.getLogger(this.getClass().getName() + ".testBaseJpaEntityAppender");
final Logger logger2 = LogManager.getLogger(this.getClass().getName() + ".testBaseJpaEntityAppenderAgain");
logger1.info("Test my message 01.");
logger1.error("This is another message 02.", exception);
logger2.warn("A final warning has been issued.");
final Statement statement = this.connection.createStatement();
final ResultSet resultSet = statement.executeQuery("SELECT * FROM jpaBaseLogEntry ORDER BY id");
assertTrue(resultSet.next(), "There should be at least one row.");
long date = resultSet.getTimestamp("eventDate").getTime();
assertTrue(date >= millis, "The date should be later than pre-logging (1).");
assertTrue(date <= System.currentTimeMillis(), "The date should be earlier than now (1).");
assertEquals("INFO", resultSet.getString("level"), "The level column is not correct (1).");
assertEquals(logger1.getName(), resultSet.getString("logger"), "The logger column is not correct (1).");
assertEquals(
"Test my message 01.", resultSet.getString("message"), "The message column is not correct (1).");
assertNull(resultSet.getString("exception"), "The exception column is not correct (1).");
assertTrue(resultSet.next(), "There should be at least two rows.");
date = resultSet.getTimestamp("eventDate").getTime();
assertTrue(date >= millis, "The date should be later than pre-logging (2).");
assertTrue(date <= System.currentTimeMillis(), "The date should be earlier than now (2).");
assertEquals("ERROR", resultSet.getString("level"), "The level column is not correct (2).");
assertEquals(logger1.getName(), resultSet.getString("logger"), "The logger column is not correct (2).");
assertEquals(
"This is another message 02.",
resultSet.getString("message"),
"The message column is not correct (2).");
assertEquals(stackTrace, resultSet.getString("exception"), "The exception column is not correct (2).");
assertTrue(resultSet.next(), "There should be three rows.");
date = resultSet.getTimestamp("eventDate").getTime();
assertTrue(date >= millis, "The date should be later than pre-logging (3).");
assertTrue(date <= System.currentTimeMillis(), "The date should be earlier than now (3).");
assertEquals("WARN", resultSet.getString("level"), "The level column is not correct (3).");
assertEquals(logger2.getName(), resultSet.getString("logger"), "The logger column is not correct (3).");
assertEquals(
"A final warning has been issued.",
resultSet.getString("message"),
"The message column is not correct (3).");
assertNull(resultSet.getString("exception"), "The exception column is not correct (3).");
assertFalse(resultSet.next(), "There should not be four rows.");
} finally {
this.tearDown();
}
}
@Test
public void testBasicJpaEntityAppender() throws SQLException {
try {
this.setUp("log4j2-" + this.databaseType + "-jpa-basic.xml");
final Error exception = new Error("Goodbye, cruel world!");
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
final PrintWriter writer = new PrintWriter(outputStream);
exception.printStackTrace(writer);
writer.close();
final String stackTrace = outputStream.toString().replace("\r\n", "\n");
final long millis = System.currentTimeMillis();
final Logger logger1 = LogManager.getLogger(this.getClass().getName() + ".testBasicJpaEntityAppender");
final Logger logger2 = LogManager.getLogger(this.getClass().getName() + ".testBasicJpaEntityAppenderAgain");
logger1.debug("Test my debug 01.");
logger1.warn("This is another warning 02.", exception);
logger2.fatal("A fatal warning has been issued.");
final Statement statement = this.connection.createStatement();
final ResultSet resultSet = statement.executeQuery("SELECT * FROM jpaBasicLogEntry ORDER BY id");
assertTrue(resultSet.next(), "There should be at least one row.");
long date = resultSet.getLong("timemillis");
assertTrue(date >= millis, "The date should be later than pre-logging (1).");
assertTrue(date <= System.currentTimeMillis(), "The date should be earlier than now (1).");
assertEquals("DEBUG", resultSet.getString("level"), "The level column is not correct (1).");
assertEquals(logger1.getName(), resultSet.getString("loggerName"), "The logger column is not correct (1).");
assertEquals("Test my debug 01.", resultSet.getString("message"), "The message column is not correct (1).");
assertNull(resultSet.getString("thrown"), "The exception column is not correct (1).");
assertTrue(resultSet.next(), "There should be at least two rows.");
date = resultSet.getLong("timemillis");
assertTrue(date >= millis, "The date should be later than pre-logging (2).");
assertTrue(date <= System.currentTimeMillis(), "The date should be earlier than now (2).");
assertEquals("WARN", resultSet.getString("level"), "The level column is not correct (2).");
assertEquals(logger1.getName(), resultSet.getString("loggerName"), "The logger column is not correct (2).");
assertEquals(
"This is another warning 02.",
resultSet.getString("message"),
"The message column is not correct (2).");
assertEquals(stackTrace, resultSet.getString("thrown"), "The exception column is not correct (2).");
assertTrue(resultSet.next(), "There should be three rows.");
date = resultSet.getLong("timemillis");
assertTrue(date >= millis, "The date should be later than pre-logging (3).");
assertTrue(date <= System.currentTimeMillis(), "The date should be earlier than now (3).");
assertEquals("FATAL", resultSet.getString("level"), "The level column is not correct (3).");
assertEquals(logger2.getName(), resultSet.getString("loggerName"), "The logger column is not correct (3).");
assertEquals(
"A fatal warning has been issued.",
resultSet.getString("message"),
"The message column is not correct (3).");
assertNull(resultSet.getString("thrown"), "The exception column is not correct (3).");
assertFalse(resultSet.next(), "There should not be four rows.");
} finally {
this.tearDown();
}
}
}
|
AbstractJpaAppenderTest
|
java
|
redisson__redisson
|
redisson/src/test/java/org/redisson/codec/SnappyCodecV2Test.java
|
{
"start": 1590,
"end": 3621
}
|
class ____ implements Serializable
{
private static final long serialVersionUID = 155311581144181716L;
private long field0;
private long field1;
private int field2;
private int field3;
private int field4;
public int getField3()
{
return field3;
}
public void setField3(int allowBisunessAcct)
{
this.field3 = allowBisunessAcct;
}
public int getField2()
{
return field2;
}
public void setField2(int fundSrcSubType)
{
this.field2 = fundSrcSubType;
}
public long getField0()
{
return field0;
}
public void setField0(long partnerId)
{
this.field0 = partnerId;
}
public long getField1()
{
return field1;
}
public void setField1(long productId)
{
this.field1 = productId;
}
public int getField4()
{
return field4;
}
public void setField4(int txnPymntSchldType)
{
this.field4 = txnPymntSchldType;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (int) (field0 ^ (field0 >>> 32));
result = prime * result + (int) (field1 ^ (field1 >>> 32));
result = prime * result + field2;
result = prime * result + field3;
result = prime * result + field4;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
TestObject other = (TestObject) obj;
if (field0 != other.field0)
return false;
if (field1 != other.field1)
return false;
if (field2 != other.field2)
return false;
if (field3 != other.field3)
return false;
if (field4 != other.field4)
return false;
return true;
}
}
|
TestObject
|
java
|
quarkusio__quarkus
|
extensions/smallrye-graphql-client/deployment/src/test/java/io/quarkus/smallrye/graphql/client/deployment/DynamicGraphQLClientWebSocketAuthenticationTest.java
|
{
"start": 1526,
"end": 7803
}
|
class ____ {
static String url = "http://" + System.getProperty("quarkus.http.host", "localhost") + ":" +
System.getProperty("quarkus.http.test-port", "8081") + "/graphql";
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(SecuredApi.class, Foo.class)
.addAsResource("application-secured.properties", "application.properties")
.addAsResource("users.properties")
.addAsResource("roles.properties")
.addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"));
@Test
public void testAuthenticatedUserForSubscription() throws Exception {
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url)
.header("Authorization", "Basic ZGF2aWQ6cXdlcnR5MTIz");
try (DynamicGraphQLClient client = clientBuilder.build()) {
Multi<Response> subscription = client
.subscription("subscription fooSub { fooSub { message } }");
assertNotNull(subscription);
AtomicBoolean hasData = new AtomicBoolean(false);
AtomicBoolean hasCompleted = new AtomicBoolean(false);
subscription.subscribe().with(item -> {
assertFalse(hasData.get());
assertTrue(item.hasData());
assertEquals(JsonValue.ValueType.OBJECT, item.getData().get("fooSub").getValueType());
assertEquals("foo", item.getData().getJsonObject("fooSub").getString("message"));
hasData.set(true);
}, Assertions::fail, () -> {
hasCompleted.set(true);
});
await().untilTrue(hasCompleted);
assertTrue(hasData.get());
}
}
@Test
public void testAuthenticatedUserForQueryWebSocket() throws Exception {
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url)
.header("Authorization", "Basic ZGF2aWQ6cXdlcnR5MTIz")
.executeSingleOperationsOverWebsocket(true);
try (DynamicGraphQLClient client = clientBuilder.build()) {
Response response = client.executeSync("{ foo { message} }");
assertTrue(response.hasData());
assertEquals("foo", response.getData().getJsonObject("foo").getString("message"));
}
}
@Test
public void testAuthorizedAndUnauthorizedForQueryWebSocket() throws Exception {
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url)
.header("Authorization", "Basic ZGF2aWQ6cXdlcnR5MTIz")
.executeSingleOperationsOverWebsocket(true);
try (DynamicGraphQLClient client = clientBuilder.build()) {
Response response = client.executeSync("{ foo { message} }");
assertTrue(response.hasData());
assertEquals("foo", response.getData().getJsonObject("foo").getString("message"));
// Run a second query with a different result to validate that the result of the first query isn't being cached at all.
response = client.executeSync("{ bar { message} }");
assertEquals(JsonValue.ValueType.NULL, response.getData().get("bar").getValueType());
}
}
@Test
public void testUnauthorizedUserForSubscription() throws Exception {
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url)
.header("Authorization", "Basic ZGF2aWQ6cXdlcnR5MTIz");
try (DynamicGraphQLClient client = clientBuilder.build()) {
Multi<Response> subscription = client
.subscription("subscription barSub { barSub { message } }");
assertNotNull(subscription);
AtomicBoolean returned = new AtomicBoolean(false);
subscription.subscribe().with(item -> {
assertEquals(JsonValue.ValueType.NULL, item.getData().get("barSub").getValueType());
returned.set(true);
}, throwable -> Assertions.fail(throwable));
await().untilTrue(returned);
}
}
@Test
public void testAuthenticatedUserButDefinedWithClientInitForQueryWebSocket() throws Exception {
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url)
// Because quarkus.smallrye-graphql.authorization-client-init-payload-name is undefined, this will be ignored.
.initPayload(Map.of("Authorization", "Basic ZGF2aWQ6cXdlcnR5MTIz"))
.executeSingleOperationsOverWebsocket(true);
try (DynamicGraphQLClient client = clientBuilder.build()) {
Response response = client.executeSync("{ foo { message} }");
assertEquals(JsonValue.ValueType.NULL, response.getData().get("foo").getValueType());
}
}
@Test
public void testUnauthorizedUserForQueryWebSocket() throws Exception {
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url)
.header("Authorization", "Basic ZGF2aWQ6cXdlcnR5MTIz")
.executeSingleOperationsOverWebsocket(true);
try (DynamicGraphQLClient client = clientBuilder.build()) {
Response response = client.executeSync("{ bar { message } }");
assertEquals(JsonValue.ValueType.NULL, response.getData().get("bar").getValueType());
}
}
@Test
public void testUnauthenticatedForQueryWebSocket() throws Exception {
DynamicGraphQLClientBuilder clientBuilder = DynamicGraphQLClientBuilder.newBuilder()
.url(url)
.executeSingleOperationsOverWebsocket(true);
try (DynamicGraphQLClient client = clientBuilder.build()) {
Response response = client.executeSync("{ foo { message} }");
assertEquals(JsonValue.ValueType.NULL, response.getData().get("foo").getValueType());
}
}
public static
|
DynamicGraphQLClientWebSocketAuthenticationTest
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Grep.java
|
{
"start": 1740,
"end": 3828
}
|
class ____ extends Configured implements Tool {
private Grep() {} // singleton
public int run(String[] args) throws Exception {
if (args.length < 3) {
System.out.println("Grep <inDir> <outDir> <regex> [<group>]");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
}
Path tempDir =
new Path("grep-temp-"+
Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));
Configuration conf = getConf();
conf.set(RegexMapper.PATTERN, args[2]);
if (args.length == 4)
conf.set(RegexMapper.GROUP, args[3]);
Job grepJob = Job.getInstance(conf);
try {
grepJob.setJobName("grep-search");
grepJob.setJarByClass(Grep.class);
FileInputFormat.setInputPaths(grepJob, args[0]);
grepJob.setMapperClass(RegexMapper.class);
grepJob.setCombinerClass(LongSumReducer.class);
grepJob.setReducerClass(LongSumReducer.class);
FileOutputFormat.setOutputPath(grepJob, tempDir);
grepJob.setOutputFormatClass(SequenceFileOutputFormat.class);
grepJob.setOutputKeyClass(Text.class);
grepJob.setOutputValueClass(LongWritable.class);
grepJob.waitForCompletion(true);
Job sortJob = Job.getInstance(conf);
sortJob.setJobName("grep-sort");
sortJob.setJarByClass(Grep.class);
FileInputFormat.setInputPaths(sortJob, tempDir);
sortJob.setInputFormatClass(SequenceFileInputFormat.class);
sortJob.setMapperClass(InverseMapper.class);
sortJob.setNumReduceTasks(1); // write a single file
FileOutputFormat.setOutputPath(sortJob, new Path(args[1]));
sortJob.setSortComparatorClass( // sort by decreasing freq
LongWritable.DecreasingComparator.class);
sortJob.waitForCompletion(true);
}
finally {
FileSystem.get(conf).delete(tempDir, true);
}
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Grep(), args);
System.exit(res);
}
}
|
Grep
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_2018/Issue2018Mapper.java
|
{
"start": 338,
"end": 534
}
|
interface ____ {
Issue2018Mapper INSTANCE = Mappers.getMapper( Issue2018Mapper.class );
@Mapping(target = "some_value", source = "someValue")
Target map(Source source);
}
|
Issue2018Mapper
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/ExtendedBeanInfoTests.java
|
{
"start": 6078,
"end": 6643
}
|
class ____ implements Spr9453<Class<?>> {
@Override
public Class<?> getProp() {
return null;
}
}
{ // always passes
BeanInfo info = Introspector.getBeanInfo(Bean.class);
assertThat(info.getPropertyDescriptors()).hasSize(2);
}
{ // failed prior to fix for SPR-9453
BeanInfo info = new ExtendedBeanInfo(Introspector.getBeanInfo(Bean.class));
assertThat(info.getPropertyDescriptors()).hasSize(2);
}
}
@Test
void standardReadMethodInSuperclassAndNonStandardWriteMethodInSubclass() throws Exception {
@SuppressWarnings("unused")
|
Bean
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/admin/indices/sampling/TransportDeleteSampleConfigurationAction.java
|
{
"start": 1978,
"end": 4286
}
|
class ____ extends AcknowledgedTransportMasterNodeAction<
DeleteSampleConfigurationAction.Request> {
private static final Logger logger = LogManager.getLogger(TransportDeleteSampleConfigurationAction.class);
private final ProjectResolver projectResolver;
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final SamplingService samplingService;
@Inject
public TransportDeleteSampleConfigurationAction(
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
ProjectResolver projectResolver,
IndexNameExpressionResolver indexNameExpressionResolver,
SamplingService samplingService
) {
super(
DeleteSampleConfigurationAction.NAME,
transportService,
clusterService,
threadPool,
actionFilters,
DeleteSampleConfigurationAction.Request::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.projectResolver = projectResolver;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.samplingService = samplingService;
}
@Override
protected void masterOperation(
Task task,
DeleteSampleConfigurationAction.Request request,
ClusterState state,
ActionListener<AcknowledgedResponse> listener
) throws Exception {
try {
SamplingService.throwIndexNotFoundExceptionIfNotDataStreamOrIndex(indexNameExpressionResolver, projectResolver, state, request);
} catch (IndexNotFoundException e) {
listener.onFailure(e);
return;
}
ProjectId projectId = projectResolver.getProjectId();
samplingService.deleteSampleConfiguration(
projectId,
request.indices()[0],
request.masterNodeTimeout(),
request.ackTimeout(),
listener
);
}
@Override
protected ClusterBlockException checkBlock(DeleteSampleConfigurationAction.Request request, ClusterState state) {
return state.blocks().globalBlockedException(projectResolver.getProjectId(), ClusterBlockLevel.METADATA_WRITE);
}
}
|
TransportDeleteSampleConfigurationAction
|
java
|
apache__camel
|
components/camel-fhir/camel-fhir-component/src/generated/java/org/apache/camel/component/fhir/FhirHistoryEndpointConfigurationConfigurer.java
|
{
"start": 730,
"end": 14237
}
|
class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("AccessToken", java.lang.String.class);
map.put("ApiName", org.apache.camel.component.fhir.internal.FhirApiName.class);
map.put("Client", ca.uhn.fhir.rest.client.api.IGenericClient.class);
map.put("ClientFactory", ca.uhn.fhir.rest.client.api.IRestfulClientFactory.class);
map.put("Compress", boolean.class);
map.put("ConnectionTimeout", java.lang.Integer.class);
map.put("Count", java.lang.Integer.class);
map.put("Cutoff", java.util.Date.class);
map.put("DeferModelScanning", boolean.class);
map.put("Encoding", java.lang.String.class);
map.put("ExtraParameters", java.util.Map.class);
map.put("FhirContext", ca.uhn.fhir.context.FhirContext.class);
map.put("FhirVersion", java.lang.String.class);
map.put("ForceConformanceCheck", boolean.class);
map.put("ICutoff", org.hl7.fhir.instance.model.api.IPrimitiveType.class);
map.put("Id", org.hl7.fhir.instance.model.api.IIdType.class);
map.put("Log", boolean.class);
map.put("MethodName", java.lang.String.class);
map.put("Password", java.lang.String.class);
map.put("PrettyPrint", boolean.class);
map.put("ProxyHost", java.lang.String.class);
map.put("ProxyPassword", java.lang.String.class);
map.put("ProxyPort", java.lang.Integer.class);
map.put("ProxyUser", java.lang.String.class);
map.put("ResourceType", java.lang.Class.class);
map.put("ReturnType", java.lang.Class.class);
map.put("ServerUrl", java.lang.String.class);
map.put("SessionCookie", java.lang.String.class);
map.put("SocketTimeout", java.lang.Integer.class);
map.put("Summary", java.lang.String.class);
map.put("Username", java.lang.String.class);
map.put("ValidationMode", java.lang.String.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.component.fhir.FhirHistoryEndpointConfiguration target = (org.apache.camel.component.fhir.FhirHistoryEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": target.setAccessToken(property(camelContext, java.lang.String.class, value)); return true;
case "apiname":
case "apiName": target.setApiName(property(camelContext, org.apache.camel.component.fhir.internal.FhirApiName.class, value)); return true;
case "client": target.setClient(property(camelContext, ca.uhn.fhir.rest.client.api.IGenericClient.class, value)); return true;
case "clientfactory":
case "clientFactory": target.setClientFactory(property(camelContext, ca.uhn.fhir.rest.client.api.IRestfulClientFactory.class, value)); return true;
case "compress": target.setCompress(property(camelContext, boolean.class, value)); return true;
case "connectiontimeout":
case "connectionTimeout": target.setConnectionTimeout(property(camelContext, java.lang.Integer.class, value)); return true;
case "count": target.setCount(property(camelContext, java.lang.Integer.class, value)); return true;
case "cutoff": target.setCutoff(property(camelContext, java.util.Date.class, value)); return true;
case "defermodelscanning":
case "deferModelScanning": target.setDeferModelScanning(property(camelContext, boolean.class, value)); return true;
case "encoding": target.setEncoding(property(camelContext, java.lang.String.class, value)); return true;
case "extraparameters":
case "extraParameters": target.setExtraParameters(property(camelContext, java.util.Map.class, value)); return true;
case "fhircontext":
case "fhirContext": target.setFhirContext(property(camelContext, ca.uhn.fhir.context.FhirContext.class, value)); return true;
case "fhirversion":
case "fhirVersion": target.setFhirVersion(property(camelContext, java.lang.String.class, value)); return true;
case "forceconformancecheck":
case "forceConformanceCheck": target.setForceConformanceCheck(property(camelContext, boolean.class, value)); return true;
case "icutoff":
case "iCutoff": target.setICutoff(property(camelContext, org.hl7.fhir.instance.model.api.IPrimitiveType.class, value)); return true;
case "id": target.setId(property(camelContext, org.hl7.fhir.instance.model.api.IIdType.class, value)); return true;
case "log": target.setLog(property(camelContext, boolean.class, value)); return true;
case "methodname":
case "methodName": target.setMethodName(property(camelContext, java.lang.String.class, value)); return true;
case "password": target.setPassword(property(camelContext, java.lang.String.class, value)); return true;
case "prettyprint":
case "prettyPrint": target.setPrettyPrint(property(camelContext, boolean.class, value)); return true;
case "proxyhost":
case "proxyHost": target.setProxyHost(property(camelContext, java.lang.String.class, value)); return true;
case "proxypassword":
case "proxyPassword": target.setProxyPassword(property(camelContext, java.lang.String.class, value)); return true;
case "proxyport":
case "proxyPort": target.setProxyPort(property(camelContext, java.lang.Integer.class, value)); return true;
case "proxyuser":
case "proxyUser": target.setProxyUser(property(camelContext, java.lang.String.class, value)); return true;
case "resourcetype":
case "resourceType": target.setResourceType(property(camelContext, java.lang.Class.class, value)); return true;
case "returntype":
case "returnType": target.setReturnType(property(camelContext, java.lang.Class.class, value)); return true;
case "serverurl":
case "serverUrl": target.setServerUrl(property(camelContext, java.lang.String.class, value)); return true;
case "sessioncookie":
case "sessionCookie": target.setSessionCookie(property(camelContext, java.lang.String.class, value)); return true;
case "sockettimeout":
case "socketTimeout": target.setSocketTimeout(property(camelContext, java.lang.Integer.class, value)); return true;
case "summary": target.setSummary(property(camelContext, java.lang.String.class, value)); return true;
case "username": target.setUsername(property(camelContext, java.lang.String.class, value)); return true;
case "validationmode":
case "validationMode": target.setValidationMode(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": return java.lang.String.class;
case "apiname":
case "apiName": return org.apache.camel.component.fhir.internal.FhirApiName.class;
case "client": return ca.uhn.fhir.rest.client.api.IGenericClient.class;
case "clientfactory":
case "clientFactory": return ca.uhn.fhir.rest.client.api.IRestfulClientFactory.class;
case "compress": return boolean.class;
case "connectiontimeout":
case "connectionTimeout": return java.lang.Integer.class;
case "count": return java.lang.Integer.class;
case "cutoff": return java.util.Date.class;
case "defermodelscanning":
case "deferModelScanning": return boolean.class;
case "encoding": return java.lang.String.class;
case "extraparameters":
case "extraParameters": return java.util.Map.class;
case "fhircontext":
case "fhirContext": return ca.uhn.fhir.context.FhirContext.class;
case "fhirversion":
case "fhirVersion": return java.lang.String.class;
case "forceconformancecheck":
case "forceConformanceCheck": return boolean.class;
case "icutoff":
case "iCutoff": return org.hl7.fhir.instance.model.api.IPrimitiveType.class;
case "id": return org.hl7.fhir.instance.model.api.IIdType.class;
case "log": return boolean.class;
case "methodname":
case "methodName": return java.lang.String.class;
case "password": return java.lang.String.class;
case "prettyprint":
case "prettyPrint": return boolean.class;
case "proxyhost":
case "proxyHost": return java.lang.String.class;
case "proxypassword":
case "proxyPassword": return java.lang.String.class;
case "proxyport":
case "proxyPort": return java.lang.Integer.class;
case "proxyuser":
case "proxyUser": return java.lang.String.class;
case "resourcetype":
case "resourceType": return java.lang.Class.class;
case "returntype":
case "returnType": return java.lang.Class.class;
case "serverurl":
case "serverUrl": return java.lang.String.class;
case "sessioncookie":
case "sessionCookie": return java.lang.String.class;
case "sockettimeout":
case "socketTimeout": return java.lang.Integer.class;
case "summary": return java.lang.String.class;
case "username": return java.lang.String.class;
case "validationmode":
case "validationMode": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.component.fhir.FhirHistoryEndpointConfiguration target = (org.apache.camel.component.fhir.FhirHistoryEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": return target.getAccessToken();
case "apiname":
case "apiName": return target.getApiName();
case "client": return target.getClient();
case "clientfactory":
case "clientFactory": return target.getClientFactory();
case "compress": return target.isCompress();
case "connectiontimeout":
case "connectionTimeout": return target.getConnectionTimeout();
case "count": return target.getCount();
case "cutoff": return target.getCutoff();
case "defermodelscanning":
case "deferModelScanning": return target.isDeferModelScanning();
case "encoding": return target.getEncoding();
case "extraparameters":
case "extraParameters": return target.getExtraParameters();
case "fhircontext":
case "fhirContext": return target.getFhirContext();
case "fhirversion":
case "fhirVersion": return target.getFhirVersion();
case "forceconformancecheck":
case "forceConformanceCheck": return target.isForceConformanceCheck();
case "icutoff":
case "iCutoff": return target.getICutoff();
case "id": return target.getId();
case "log": return target.isLog();
case "methodname":
case "methodName": return target.getMethodName();
case "password": return target.getPassword();
case "prettyprint":
case "prettyPrint": return target.isPrettyPrint();
case "proxyhost":
case "proxyHost": return target.getProxyHost();
case "proxypassword":
case "proxyPassword": return target.getProxyPassword();
case "proxyport":
case "proxyPort": return target.getProxyPort();
case "proxyuser":
case "proxyUser": return target.getProxyUser();
case "resourcetype":
case "resourceType": return target.getResourceType();
case "returntype":
case "returnType": return target.getReturnType();
case "serverurl":
case "serverUrl": return target.getServerUrl();
case "sessioncookie":
case "sessionCookie": return target.getSessionCookie();
case "sockettimeout":
case "socketTimeout": return target.getSocketTimeout();
case "summary": return target.getSummary();
case "username": return target.getUsername();
case "validationmode":
case "validationMode": return target.getValidationMode();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "extraparameters":
case "extraParameters": return java.lang.Object.class;
case "icutoff":
case "iCutoff": return java.util.Date.class;
case "resourcetype":
case "resourceType": return org.hl7.fhir.instance.model.api.IBaseResource.class;
case "returntype":
case "returnType": return org.hl7.fhir.instance.model.api.IBaseBundle.class;
default: return null;
}
}
}
|
FhirHistoryEndpointConfigurationConfigurer
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java
|
{
"start": 1382,
"end": 2898
}
|
class ____ extends AbstractConvertFunction implements PostOptimizationVerificationAware {
protected FoldablesConvertFunction(Source source, Expression field) {
super(source, field);
}
@Override
public final void writeTo(StreamOutput out) {
throw new UnsupportedOperationException("not serialized");
}
@Override
public final String getWriteableName() {
throw new UnsupportedOperationException("not serialized");
}
@Override
protected final TypeResolution resolveType() {
if (childrenResolved() == false) {
return new TypeResolution("Unresolved children");
}
return isType(
field(),
dt -> isString(dt) || dt == dataType(),
sourceText(),
null,
false,
dataType().typeName().toLowerCase(Locale.ROOT) + " or string"
);
}
@Override
protected final Map<DataType, BuildFactory> factories() {
// This is used by ResolveUnionTypes, which is expected to be applied to ES fields only
// FoldablesConvertFunction takes only constants as inputs, so this is empty
return Map.of();
}
@Override
public final Object fold(FoldContext ctx) {
return foldToTemporalAmount(ctx, field(), sourceText(), dataType());
}
@Override
public final void postOptimizationVerification(Failures failures) {
failures.add(isFoldable(field(), sourceText(), null));
}
}
|
FoldablesConvertFunction
|
java
|
apache__flink
|
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/vectors/ArrowBigIntColumnVector.java
|
{
"start": 1125,
"end": 1656
}
|
class ____ implements LongColumnVector {
/** Container which is used to store the sequence of bigint values of a column to read. */
private final BigIntVector bigIntVector;
public ArrowBigIntColumnVector(BigIntVector bigIntVector) {
this.bigIntVector = Preconditions.checkNotNull(bigIntVector);
}
@Override
public long getLong(int i) {
return bigIntVector.get(i);
}
@Override
public boolean isNullAt(int i) {
return bigIntVector.isNull(i);
}
}
|
ArrowBigIntColumnVector
|
java
|
mapstruct__mapstruct
|
integrationtest/src/test/resources/cdiTest/src/main/java/org/mapstruct/itest/cdi/SourceTargetMapperDecorator.java
|
{
"start": 305,
"end": 627
}
|
class ____ implements DecoratedSourceTargetMapper {
@Delegate
@Inject
private DecoratedSourceTargetMapper delegate;
@Override
public Target sourceToTarget(Source source) {
Target t = delegate.sourceToTarget( source );
t.setFoo( 43L );
return t;
}
}
|
SourceTargetMapperDecorator
|
java
|
apache__camel
|
components/camel-braintree/src/generated/java/org/apache/camel/component/braintree/UsBankAccountGatewayEndpointConfiguration.java
|
{
"start": 914,
"end": 1732
}
|
class ____ extends BraintreeConfiguration {
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "find"), @ApiMethod(methodName = "sale")})
private String token;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "sale")})
private com.braintreegateway.TransactionRequest transactionRequest;
public String getToken() {
return token;
}
public void setToken(String token) {
this.token = token;
}
public com.braintreegateway.TransactionRequest getTransactionRequest() {
return transactionRequest;
}
public void setTransactionRequest(com.braintreegateway.TransactionRequest transactionRequest) {
this.transactionRequest = transactionRequest;
}
}
|
UsBankAccountGatewayEndpointConfiguration
|
java
|
google__auto
|
value/src/it/functional/src/test/java/com/google/auto/value/AutoValueJava8Test.java
|
{
"start": 21720,
"end": 22029
}
|
class ____ {
public abstract String notOptional();
public abstract Optional<String> optional();
public static Builder builder() {
return new AutoValue_AutoValueJava8Test_OptionalPropertyWithNullableBuilder.Builder();
}
@AutoValue.Builder
public
|
OptionalPropertyWithNullableBuilder
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/BeanMethodPolymorphismTests.java
|
{
"start": 8911,
"end": 8986
}
|
class ____ extends BaseTestBean {
}
@Configuration
static
|
ExtendedTestBean
|
java
|
apache__camel
|
core/camel-core-processor/src/main/java/org/apache/camel/processor/aggregate/AggregationStrategyBeanAdapter.java
|
{
"start": 2563,
"end": 3093
}
|
class ____ of the pojo
*/
public AggregationStrategyBeanAdapter(Class<?> type) {
this(type, null);
}
/**
* Creates this adapter.
*
* @param pojo the pojo to use.
* @param methodName the name of the method to call
*/
public AggregationStrategyBeanAdapter(Object pojo, String methodName) {
this.pojo = pojo;
this.type = pojo.getClass();
this.methodName = methodName;
}
/**
* Creates this adapter.
*
* @param type the
|
type
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/bean/override/easymock/EasyMockResetTestExecutionListener.java
|
{
"start": 1315,
"end": 2358
}
|
class ____ extends AbstractTestExecutionListener {
@Override
public int getOrder() {
return Ordered.LOWEST_PRECEDENCE - 100;
}
@Override
public void beforeTestMethod(TestContext testContext) throws Exception {
resetMocks(testContext.getApplicationContext());
}
@Override
public void afterTestMethod(TestContext testContext) throws Exception {
resetMocks(testContext.getApplicationContext());
}
private void resetMocks(ApplicationContext applicationContext) {
if (applicationContext instanceof ConfigurableApplicationContext configurableContext) {
resetMocks(configurableContext);
}
}
private void resetMocks(ConfigurableApplicationContext applicationContext) {
ConfigurableListableBeanFactory beanFactory = applicationContext.getBeanFactory();
try {
beanFactory.getBean(EasyMockBeans.class).resetAll();
}
catch (NoSuchBeanDefinitionException ex) {
// Continue
}
if (applicationContext.getParent() != null) {
resetMocks(applicationContext.getParent());
}
}
}
|
EasyMockResetTestExecutionListener
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/common/timer/HashedWheelTimerTest.java
|
{
"start": 1526,
"end": 1708
}
|
class ____ implements TimerTask {
@Override
public void run(Timeout timeout) throws InterruptedException {
this.wait();
}
}
private
|
BlockTask
|
java
|
apache__camel
|
components/camel-kubernetes/src/main/java/org/apache/camel/component/kubernetes/cloud/KubernetesServiceDiscovery.java
|
{
"start": 1096,
"end": 1456
}
|
class ____ extends DefaultServiceDiscovery {
private final KubernetesConfiguration configuration;
protected KubernetesServiceDiscovery(KubernetesConfiguration configuration) {
this.configuration = configuration;
}
protected KubernetesConfiguration getConfiguration() {
return this.configuration;
}
}
|
KubernetesServiceDiscovery
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/iterables/Iterables_assertContainsExactly_Test.java
|
{
"start": 2185,
"end": 8302
}
|
class ____ extends IterablesBaseTest {
@Test
void should_pass_if_actual_contains_exactly_given_values() {
iterables.assertContainsExactly(INFO, actual, array("Luke", "Yoda", "Leia"));
}
@Test
void should_pass_if_non_restartable_actual_contains_exactly_given_values() {
iterables.assertContainsExactly(INFO, createSinglyIterable(actual), array("Luke", "Yoda", "Leia"));
}
@Test
void should_pass_if_actual_contains_given_values_exactly_with_null_elements() {
// GIVEN
actual.add(null);
// WHEN/THEN
iterables.assertContainsExactly(INFO, actual, array("Luke", "Yoda", "Leia", null));
}
@Test
void should_pass_if_actual_and_given_values_are_empty() {
// GIVEN
actual.clear();
// WHEN/THEN
iterables.assertContainsExactly(INFO, actual, array());
}
@Test
void should_fail_if_array_of_values_to_look_for_is_empty_and_actual_is_not() {
// GIVEN
Object[] values = emptyArray();
// WHEN/THEN
expectAssertionError(() -> iterables.assertContainsExactly(INFO, actual, values));
}
@Test
void should_throw_error_if_array_of_values_to_look_for_is_null() {
assertThatNullPointerException().isThrownBy(() -> iterables.assertContainsExactly(INFO, emptyList(), null))
.withMessage(valuesToLookForIsNull());
}
@Test
void should_fail_if_actual_is_null() {
// GIVEN
actual = null;
// WHEN
var assertionError = expectAssertionError(() -> iterables.assertContainsExactly(INFO, actual, array("Yoda")));
// THEN
then(assertionError).hasMessage(actualIsNull());
}
@Test
void should_fail_if_actual_does_not_contain_given_values_exactly() {
// GIVEN
Object[] expected = { "Luke", "Yoda", "Han" };
// WHEN
expectAssertionError(() -> iterables.assertContainsExactly(INFO, actual, expected));
// THEN
List<String> notFound = list("Han");
List<String> notExpected = list("Leia");
verify(failures).failure(INFO, shouldContainExactly(actual, asList(expected), notFound, notExpected), actual,
asList(expected));
}
@Test
void should_fail_if_actual_contains_all_given_values_in_different_order() {
// GIVEN
Object[] expected = { "Luke", "Leia", "Yoda" };
// WHEN
expectAssertionError(() -> iterables.assertContainsExactly(INFO, actual, expected));
// THEN
List<IndexedDiff> indexDiffs = list(new IndexedDiff("Yoda", "Leia", 1),
new IndexedDiff("Leia", "Yoda", 2));
verify(failures).failure(INFO, shouldContainExactlyWithIndexes(actual, list(expected), indexDiffs), actual, list(expected));
}
@Test
void should_fail_if_actual_contains_all_given_values_but_size_differ() {
// GIVEN
actual = list("Luke", "Leia", "Luke");
Object[] expected = { "Luke", "Leia" };
// WHEN
expectAssertionError(() -> iterables.assertContainsExactly(INFO, actual, expected));
// THEN
verify(failures).failure(INFO, shouldContainExactly(actual, asList(expected), emptyList(), list("Luke")), actual,
asList(expected));
}
// ------------------------------------------------------------------------------------------------------------------
// tests using a custom comparison strategy
// ------------------------------------------------------------------------------------------------------------------
@Test
void should_pass_if_actual_contains_given_values_exactly_according_to_custom_comparison_strategy() {
iterablesWithCaseInsensitiveComparisonStrategy.assertContainsExactly(INFO, actual, array("LUKE", "YODA", "Leia"));
}
@Test
void should_fail_if_actual_does_not_contain_given_values_exactly_according_to_custom_comparison_strategy() {
// GIVEN
Object[] expected = { "Luke", "Yoda", "Han" };
// WHEN
expectAssertionError(() -> iterablesWithCaseInsensitiveComparisonStrategy.assertContainsExactly(INFO, actual, expected));
// THEN
verify(failures).failure(INFO, shouldContainExactly(actual, asList(expected), list("Han"), list("Leia"), comparisonStrategy),
actual, asList(expected));
}
@Test
void should_fail_if_actual_contains_all_given_values_in_different_order_according_to_custom_comparison_strategy() {
// GIVEN
Object[] expected = { "Luke", "Leia", "Yoda" };
// WHEN
expectAssertionError(() -> iterablesWithCaseInsensitiveComparisonStrategy.assertContainsExactly(INFO, actual, expected));
// THEN
List<IndexedDiff> indexDiffs = list(new IndexedDiff("Yoda", "Leia", 1),
new IndexedDiff("Leia", "Yoda", 2));
verify(failures).failure(INFO, shouldContainExactlyWithIndexes(actual, list(expected), indexDiffs, comparisonStrategy),
actual, list(expected));
}
@Test
void should_fail_if_actual_contains_all_given_values_but_size_differ_according_to_custom_comparison_strategy() {
// GIVEN
actual = list("Luke", "Leia", "Luke");
Object[] expected = { "LUKE", "Leia" };
// WHEN
expectAssertionError(() -> iterablesWithCaseInsensitiveComparisonStrategy.assertContainsExactly(INFO, actual, expected));
// THEN
verify(failures).failure(INFO, shouldContainExactly(actual, asList(expected), emptyList(), list("Luke"), comparisonStrategy),
actual, asList(expected));
}
@Test
void should_fail_if_order_does_not_match_and_total_printed_indexes_should_be_equal_to_max_elements_for_printing() {
// GIVEN
List<Integer> actual = IntStream.rangeClosed(0, MAX_INDICES_FOR_PRINTING).boxed().collect(toList());
Object[] expected = IntStream.rangeClosed(0, MAX_INDICES_FOR_PRINTING).boxed().sorted(reverseOrder()).toArray();
// WHEN
var error = expectAssertionError(() -> iterables.assertContainsExactly(INFO, actual, expected));
// THEN
int maxIndex = MAX_INDICES_FOR_PRINTING - 1;
then(error).hasMessageContaining("index " + maxIndex)
.hasMessageNotContaining("index " + maxIndex + 1);
}
}
|
Iterables_assertContainsExactly_Test
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/RandomModIntegerTest.java
|
{
"start": 1200,
"end": 1653
}
|
class ____ {
public static void main(String[] args) {
Random r = new Random();
// BUG: Diagnostic contains:
System.err.println(r.nextInt() % 100);
}
}
""")
.doTest();
}
@Test
public void negative() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import java.util.Random;
|
Test
|
java
|
micronaut-projects__micronaut-core
|
http/src/test/java/io/micronaut/http/cookie/ServerCookieEncoderTest.java
|
{
"start": 137,
"end": 325
}
|
class ____ {
@Test
void serverCookieEncoderResolvedViaSpi() {
assertInstanceOf(DefaultServerCookieEncoder.class, ServerCookieEncoder.INSTANCE);
}
}
|
ServerCookieEncoderTest
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/ConfigurationClassAndBFPPTests.java
|
{
"start": 3135,
"end": 3257
}
|
class ____ {
@Bean
public static TestBean testBean() {
return new TestBean("foo");
}
}
}
|
ConfigWithStaticBeanMethod
|
java
|
spring-projects__spring-framework
|
spring-aop/src/test/java/org/springframework/aop/aspectj/annotation/AbstractAspectJAdvisorFactoryTests.java
|
{
"start": 33216,
"end": 33239
}
|
interface ____ {}
|
Measured
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java
|
{
"start": 10102,
"end": 11158
}
|
class ____ {
public final IndexFieldCache indexCache;
public final IndexReader.CacheKey readerKey;
public final ShardId shardId;
public final List<IndexFieldDataCache.Listener> listeners = new ArrayList<>();
Key(IndexFieldCache indexCache, IndexReader.CacheKey readerKey, @Nullable ShardId shardId) {
this.indexCache = indexCache;
this.readerKey = readerKey;
this.shardId = shardId;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Key key = (Key) o;
if (indexCache.equals(key.indexCache) == false) return false;
if (readerKey.equals(key.readerKey) == false) return false;
return true;
}
@Override
public int hashCode() {
int result = indexCache.hashCode();
result = 31 * result + readerKey.hashCode();
return result;
}
}
}
|
Key
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/X509ConfigurerTests.java
|
{
"start": 3635,
"end": 8629
}
|
class ____ {
public final SpringTestContext spring = new SpringTestContext(this);
@Autowired
MockMvc mvc;
@Test
public void configureWhenRegisteringObjectPostProcessorThenInvokedOnX509AuthenticationFilter() {
this.spring.register(ObjectPostProcessorConfig.class).autowire();
ObjectPostProcessor<Object> objectPostProcessor = this.spring.getContext().getBean(ObjectPostProcessor.class);
verify(objectPostProcessor).postProcess(any(X509AuthenticationFilter.class));
}
@Test
public void x509WhenInvokedTwiceThenUsesOriginalSubjectPrincipalRegex() throws Exception {
this.spring.register(DuplicateDoesNotOverrideConfig.class).autowire();
X509Certificate certificate = loadCert("rodatexampledotcom.cer");
// @formatter:off
this.mvc.perform(get("/").with(x509(certificate)))
.andExpect(authenticated().withUsername("rod"));
// @formatter:on
}
@Test
public void x509WhenConfiguredInLambdaThenUsesDefaults() throws Exception {
this.spring.register(DefaultsInLambdaConfig.class).autowire();
X509Certificate certificate = loadCert("rod.cer");
// @formatter:off
this.mvc.perform(get("/").with(x509(certificate)))
.andExpect(authenticated().withUsername("rod"));
// @formatter:on
}
@Test
public void x509WhenCustomSecurityContextHolderStrategyThenUses() throws Exception {
this.spring.register(DefaultsInLambdaConfig.class, SecurityContextChangedListenerConfig.class).autowire();
X509Certificate certificate = loadCert("rod.cer");
// @formatter:off
this.mvc.perform(get("/").with(x509(certificate)))
.andExpect(authenticated().withUsername("rod"));
// @formatter:on
SecurityContextHolderStrategy strategy = this.spring.getContext().getBean(SecurityContextHolderStrategy.class);
verify(strategy, atLeastOnce()).getContext();
SecurityContextChangedListener listener = this.spring.getContext()
.getBean(SecurityContextChangedListener.class);
verify(listener).securityContextChanged(setAuthentication(PreAuthenticatedAuthenticationToken.class));
}
@Test
public void x509WhenSubjectPrincipalRegexInLambdaThenUsesRegexToExtractPrincipal() throws Exception {
this.spring.register(SubjectPrincipalRegexInLambdaConfig.class).autowire();
X509Certificate certificate = loadCert("rodatexampledotcom.cer");
// @formatter:off
this.mvc.perform(get("/").with(x509(certificate)))
.andExpect(authenticated().withUsername("rod"));
// @formatter:on
}
@Test
public void x509WhenUserDetailsServiceNotConfiguredThenUsesBean() throws Exception {
this.spring.register(UserDetailsServiceBeanConfig.class).autowire();
X509Certificate certificate = loadCert("rod.cer");
// @formatter:off
this.mvc.perform(get("/").with(x509(certificate)))
.andExpect(authenticated().withUsername("rod"));
// @formatter:on
}
@Test
public void x509WhenUserDetailsServiceAndBeanConfiguredThenDoesNotUseBean() throws Exception {
this.spring.register(UserDetailsServiceAndBeanConfig.class).autowire();
X509Certificate certificate = loadCert("rod.cer");
// @formatter:off
this.mvc.perform(get("/").with(x509(certificate)))
.andExpect(authenticated().withUsername("rod"));
// @formatter:on
}
// gh-13008
@Test
public void x509WhenStatelessSessionManagementThenDoesNotCreateSession() throws Exception {
this.spring.register(StatelessSessionManagementConfig.class).autowire();
X509Certificate certificate = loadCert("rodatexampledotcom.cer");
// @formatter:off
this.mvc.perform(get("/").with(x509(certificate)))
.andExpect((result) -> assertThat(result.getRequest().getSession(false)).isNull())
.andExpect(authenticated().withUsername("rod"));
// @formatter:on
}
@Test
public void x509WhenSubjectX500PrincipalExtractor() throws Exception {
this.spring.register(SubjectX500PrincipalExtractorConfig.class).autowire();
X509Certificate certificate = loadCert("rod.cer");
// @formatter:off
this.mvc.perform(get("/").with(x509(certificate)))
.andExpect((result) -> assertThat(result.getRequest().getSession(false)).isNull())
.andExpect(authenticated().withUsername("rod"));
// @formatter:on
}
@Test
public void x509WhenSubjectX500PrincipalExtractorBean() throws Exception {
this.spring.register(SubjectX500PrincipalExtractorEmailConfig.class).autowire();
X509Certificate certificate = X509TestUtils.buildTestCertificate();
// @formatter:off
this.mvc.perform(get("/").with(x509(certificate)))
.andExpect((result) -> assertThat(result.getRequest().getSession(false)).isNull())
.andExpect(authenticated().withUsername("luke@monkeymachine"));
// @formatter:on
}
private <T extends Certificate> T loadCert(String location) {
try (InputStream is = new ClassPathResource(location).getInputStream()) {
CertificateFactory certFactory = CertificateFactory.getInstance("X.509");
return (T) certFactory.generateCertificate(is);
}
catch (Exception ex) {
throw new IllegalArgumentException(ex);
}
}
@Configuration
@EnableWebSecurity
static
|
X509ConfigurerTests
|
java
|
netty__netty
|
codec-http2/src/test/java/io/netty/handler/codec/http2/DefaultHttp2LocalFlowControllerTest.java
|
{
"start": 2229,
"end": 19611
}
|
class ____ {
private static final int STREAM_ID = 1;
private DefaultHttp2LocalFlowController controller;
@Mock
private Http2FrameWriter frameWriter;
@Mock
private ChannelHandlerContext ctx;
@Mock
private EventExecutor executor;
@Mock
private ChannelPromise promise;
private DefaultHttp2Connection connection;
@BeforeEach
public void setup() throws Http2Exception {
MockitoAnnotations.initMocks(this);
setupChannelHandlerContext(false);
when(executor.inEventLoop()).thenReturn(true);
initController(false);
}
private void setupChannelHandlerContext(boolean allowFlush) {
reset(ctx);
when(ctx.newPromise()).thenReturn(promise);
if (allowFlush) {
when(ctx.flush()).then(new Answer<ChannelHandlerContext>() {
@Override
public ChannelHandlerContext answer(InvocationOnMock invocationOnMock) {
return ctx;
}
});
} else {
when(ctx.flush()).then(new Answer<ChannelHandlerContext>() {
@Override
public ChannelHandlerContext answer(InvocationOnMock invocationOnMock) {
fail("forbidden");
return null;
}
});
}
when(ctx.executor()).thenReturn(executor);
}
@Test
public void dataFrameShouldBeAccepted() throws Http2Exception {
receiveFlowControlledFrame(STREAM_ID, 10, 0, false);
verifyWindowUpdateNotSent();
}
@Test
public void windowUpdateShouldSendOnceBytesReturned() throws Http2Exception {
int dataSize = (int) (DEFAULT_WINDOW_SIZE * DEFAULT_WINDOW_UPDATE_RATIO) + 1;
receiveFlowControlledFrame(STREAM_ID, dataSize, 0, false);
// Return only a few bytes and verify that the WINDOW_UPDATE hasn't been sent.
assertFalse(consumeBytes(STREAM_ID, 10));
verifyWindowUpdateNotSent(STREAM_ID);
verifyWindowUpdateNotSent(CONNECTION_STREAM_ID);
// Return the rest and verify the WINDOW_UPDATE is sent.
assertTrue(consumeBytes(STREAM_ID, dataSize - 10));
verifyWindowUpdateSent(STREAM_ID, dataSize);
verifyWindowUpdateSent(CONNECTION_STREAM_ID, dataSize);
verifyNoMoreInteractions(frameWriter);
}
@Test
public void connectionWindowShouldAutoRefillWhenDataReceived() throws Http2Exception {
// Reconfigure controller to auto-refill the connection window.
initController(true);
int dataSize = (int) (DEFAULT_WINDOW_SIZE * DEFAULT_WINDOW_UPDATE_RATIO) + 1;
receiveFlowControlledFrame(STREAM_ID, dataSize, 0, false);
// Verify that we immediately refill the connection window.
verifyWindowUpdateSent(CONNECTION_STREAM_ID, dataSize);
// Return only a few bytes and verify that the WINDOW_UPDATE hasn't been sent for the stream.
assertFalse(consumeBytes(STREAM_ID, 10));
verifyWindowUpdateNotSent(STREAM_ID);
// Return the rest and verify the WINDOW_UPDATE is sent for the stream.
assertTrue(consumeBytes(STREAM_ID, dataSize - 10));
verifyWindowUpdateSent(STREAM_ID, dataSize);
verifyNoMoreInteractions(frameWriter);
}
@Test
public void connectionFlowControlExceededShouldThrow() throws Http2Exception {
// Window exceeded because of the padding.
assertThrows(Http2Exception.class, new Executable() {
@Override
public void execute() throws Throwable {
receiveFlowControlledFrame(STREAM_ID, DEFAULT_WINDOW_SIZE, 1, true);
}
});
}
@Test
public void windowUpdateShouldNotBeSentAfterEndOfStream() throws Http2Exception {
int dataSize = (int) (DEFAULT_WINDOW_SIZE * DEFAULT_WINDOW_UPDATE_RATIO) + 1;
// Set end-of-stream on the frame, so no window update will be sent for the stream.
receiveFlowControlledFrame(STREAM_ID, dataSize, 0, true);
verifyWindowUpdateNotSent(CONNECTION_STREAM_ID);
verifyWindowUpdateNotSent(STREAM_ID);
assertTrue(consumeBytes(STREAM_ID, dataSize));
verifyWindowUpdateSent(CONNECTION_STREAM_ID, dataSize);
verifyWindowUpdateNotSent(STREAM_ID);
}
@Test
public void windowUpdateShouldNotBeSentAfterStreamIsClosedForUnconsumedBytes() throws Http2Exception {
int dataSize = (int) (DEFAULT_WINDOW_SIZE * DEFAULT_WINDOW_UPDATE_RATIO) + 1;
// Don't set end-of-stream on the frame as we want to verify that we not return the unconsumed bytes in this
// case once the stream was closed,
receiveFlowControlledFrame(STREAM_ID, dataSize, 0, false);
verifyWindowUpdateNotSent(CONNECTION_STREAM_ID);
verifyWindowUpdateNotSent(STREAM_ID);
// Close the stream
Http2Stream stream = connection.stream(STREAM_ID);
stream.close();
assertEquals(State.CLOSED, stream.state());
assertNull(connection.stream(STREAM_ID));
// The window update for the connection should made it through but not the update for the already closed
// stream
verifyWindowUpdateSent(CONNECTION_STREAM_ID, dataSize);
verifyWindowUpdateNotSent(STREAM_ID);
}
@Test
public void windowUpdateShouldBeWrittenWhenStreamIsClosedAndFlushed() throws Http2Exception {
int dataSize = (int) (DEFAULT_WINDOW_SIZE * DEFAULT_WINDOW_UPDATE_RATIO) + 1;
setupChannelHandlerContext(true);
receiveFlowControlledFrame(STREAM_ID, dataSize, 0, false);
verifyWindowUpdateNotSent(CONNECTION_STREAM_ID);
verifyWindowUpdateNotSent(STREAM_ID);
connection.stream(STREAM_ID).close();
verifyWindowUpdateSent(CONNECTION_STREAM_ID, dataSize);
// Verify we saw one flush.
verify(ctx).flush();
}
@Test
public void halfWindowRemainingShouldUpdateAllWindows() throws Http2Exception {
int dataSize = (int) (DEFAULT_WINDOW_SIZE * DEFAULT_WINDOW_UPDATE_RATIO) + 1;
int initialWindowSize = DEFAULT_WINDOW_SIZE;
int windowDelta = getWindowDelta(initialWindowSize, initialWindowSize, dataSize);
// Don't set end-of-stream so we'll get a window update for the stream as well.
receiveFlowControlledFrame(STREAM_ID, dataSize, 0, false);
assertTrue(consumeBytes(STREAM_ID, dataSize));
verifyWindowUpdateSent(CONNECTION_STREAM_ID, windowDelta);
verifyWindowUpdateSent(STREAM_ID, windowDelta);
}
@Test
public void initialWindowUpdateShouldAllowMoreFrames() throws Http2Exception {
// Send a frame that takes up the entire window.
int initialWindowSize = DEFAULT_WINDOW_SIZE;
receiveFlowControlledFrame(STREAM_ID, initialWindowSize, 0, false);
assertEquals(0, window(STREAM_ID));
assertEquals(0, window(CONNECTION_STREAM_ID));
consumeBytes(STREAM_ID, initialWindowSize);
assertEquals(initialWindowSize, window(STREAM_ID));
assertEquals(DEFAULT_WINDOW_SIZE, window(CONNECTION_STREAM_ID));
// Update the initial window size to allow another frame.
int newInitialWindowSize = 2 * initialWindowSize;
controller.initialWindowSize(newInitialWindowSize);
assertEquals(newInitialWindowSize, window(STREAM_ID));
assertEquals(DEFAULT_WINDOW_SIZE, window(CONNECTION_STREAM_ID));
// Clear any previous calls to the writer.
reset(frameWriter);
// Send the next frame and verify that the expected window updates were sent.
receiveFlowControlledFrame(STREAM_ID, initialWindowSize, 0, false);
assertTrue(consumeBytes(STREAM_ID, initialWindowSize));
int delta = newInitialWindowSize - initialWindowSize;
verifyWindowUpdateSent(STREAM_ID, delta);
verifyWindowUpdateSent(CONNECTION_STREAM_ID, delta);
}
@Test
public void connectionWindowShouldAdjustWithMultipleStreams() throws Http2Exception {
int newStreamId = 3;
connection.local().createStream(newStreamId, false);
try {
assertEquals(DEFAULT_WINDOW_SIZE, window(STREAM_ID));
assertEquals(DEFAULT_WINDOW_SIZE, window(CONNECTION_STREAM_ID));
// Test that both stream and connection window are updated (or not updated) together
int data1 = (int) (DEFAULT_WINDOW_SIZE * DEFAULT_WINDOW_UPDATE_RATIO) + 1;
receiveFlowControlledFrame(STREAM_ID, data1, 0, false);
verifyWindowUpdateNotSent(STREAM_ID);
verifyWindowUpdateNotSent(CONNECTION_STREAM_ID);
assertEquals(DEFAULT_WINDOW_SIZE - data1, window(STREAM_ID));
assertEquals(DEFAULT_WINDOW_SIZE - data1, window(CONNECTION_STREAM_ID));
assertTrue(consumeBytes(STREAM_ID, data1));
verifyWindowUpdateSent(STREAM_ID, data1);
verifyWindowUpdateSent(CONNECTION_STREAM_ID, data1);
reset(frameWriter);
// Create a scenario where data is depleted from multiple streams, but not enough data
// to generate a window update on those streams. The amount will be enough to generate
// a window update for the connection stream.
--data1;
int data2 = data1 >> 1;
receiveFlowControlledFrame(STREAM_ID, data1, 0, false);
receiveFlowControlledFrame(newStreamId, data1, 0, false);
verifyWindowUpdateNotSent(STREAM_ID);
verifyWindowUpdateNotSent(newStreamId);
verifyWindowUpdateNotSent(CONNECTION_STREAM_ID);
assertEquals(DEFAULT_WINDOW_SIZE - data1, window(STREAM_ID));
assertEquals(DEFAULT_WINDOW_SIZE - data1, window(newStreamId));
assertEquals(DEFAULT_WINDOW_SIZE - (data1 << 1), window(CONNECTION_STREAM_ID));
assertFalse(consumeBytes(STREAM_ID, data1));
assertTrue(consumeBytes(newStreamId, data2));
verifyWindowUpdateNotSent(STREAM_ID);
verifyWindowUpdateNotSent(newStreamId);
verifyWindowUpdateSent(CONNECTION_STREAM_ID, data1 + data2);
assertEquals(DEFAULT_WINDOW_SIZE - data1, window(STREAM_ID));
assertEquals(DEFAULT_WINDOW_SIZE - data1, window(newStreamId));
assertEquals(DEFAULT_WINDOW_SIZE - (data1 - data2), window(CONNECTION_STREAM_ID));
} finally {
connection.stream(newStreamId).close();
}
}
@Test
public void closeShouldConsumeBytes() throws Http2Exception {
receiveFlowControlledFrame(STREAM_ID, 10, 0, false);
assertEquals(10, controller.unconsumedBytes(connection.connectionStream()));
stream(STREAM_ID).close();
assertEquals(0, controller.unconsumedBytes(connection.connectionStream()));
}
@Test
public void closeShouldNotConsumeConnectionWindowWhenAutoRefilled() throws Http2Exception {
// Reconfigure controller to auto-refill the connection window.
initController(true);
receiveFlowControlledFrame(STREAM_ID, 10, 0, false);
assertEquals(0, controller.unconsumedBytes(connection.connectionStream()));
stream(STREAM_ID).close();
assertEquals(0, controller.unconsumedBytes(connection.connectionStream()));
}
@Test
public void dataReceivedForClosedStreamShouldImmediatelyConsumeBytes() throws Http2Exception {
Http2Stream stream = stream(STREAM_ID);
stream.close();
receiveFlowControlledFrame(stream, 10, 0, false);
assertEquals(0, controller.unconsumedBytes(connection.connectionStream()));
}
@Test
public void dataReceivedForNullStreamShouldImmediatelyConsumeBytes() throws Http2Exception {
receiveFlowControlledFrame(null, 10, 0, false);
assertEquals(0, controller.unconsumedBytes(connection.connectionStream()));
}
@Test
public void consumeBytesForNullStreamShouldIgnore() throws Http2Exception {
controller.consumeBytes(null, 10);
assertEquals(0, controller.unconsumedBytes(connection.connectionStream()));
}
@Test
public void globalRatioShouldImpactStreams() throws Http2Exception {
float ratio = 0.6f;
controller.windowUpdateRatio(ratio);
testRatio(ratio, DEFAULT_WINDOW_SIZE << 1, 3, false);
}
@Test
public void streamlRatioShouldImpactStreams() throws Http2Exception {
float ratio = 0.6f;
testRatio(ratio, DEFAULT_WINDOW_SIZE << 1, 3, true);
}
@Test
public void consumeBytesForZeroNumBytesShouldIgnore() throws Http2Exception {
assertFalse(controller.consumeBytes(connection.stream(STREAM_ID), 0));
}
@Test
public void consumeBytesForNegativeNumBytesShouldFail() throws Http2Exception {
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() throws Throwable {
controller.consumeBytes(connection.stream(STREAM_ID), -1);
}
});
}
private void testRatio(float ratio, int newDefaultWindowSize, int newStreamId, boolean setStreamRatio)
throws Http2Exception {
int delta = newDefaultWindowSize - DEFAULT_WINDOW_SIZE;
controller.incrementWindowSize(stream(0), delta);
Http2Stream stream = connection.local().createStream(newStreamId, false);
if (setStreamRatio) {
controller.windowUpdateRatio(stream, ratio);
}
controller.incrementWindowSize(stream, delta);
reset(frameWriter);
try {
int data1 = (int) (newDefaultWindowSize * ratio) + 1;
int data2 = (int) (DEFAULT_WINDOW_SIZE * DEFAULT_WINDOW_UPDATE_RATIO) >> 1;
receiveFlowControlledFrame(STREAM_ID, data2, 0, false);
receiveFlowControlledFrame(newStreamId, data1, 0, false);
verifyWindowUpdateNotSent(STREAM_ID);
verifyWindowUpdateNotSent(newStreamId);
verifyWindowUpdateNotSent(CONNECTION_STREAM_ID);
assertEquals(DEFAULT_WINDOW_SIZE - data2, window(STREAM_ID));
assertEquals(newDefaultWindowSize - data1, window(newStreamId));
assertEquals(newDefaultWindowSize - data2 - data1, window(CONNECTION_STREAM_ID));
assertFalse(consumeBytes(STREAM_ID, data2));
assertTrue(consumeBytes(newStreamId, data1));
verifyWindowUpdateNotSent(STREAM_ID);
verifyWindowUpdateSent(newStreamId, data1);
verifyWindowUpdateSent(CONNECTION_STREAM_ID, data1 + data2);
assertEquals(DEFAULT_WINDOW_SIZE - data2, window(STREAM_ID));
assertEquals(newDefaultWindowSize, window(newStreamId));
assertEquals(newDefaultWindowSize, window(CONNECTION_STREAM_ID));
} finally {
connection.stream(newStreamId).close();
}
}
private static int getWindowDelta(int initialSize, int windowSize, int dataSize) {
int newWindowSize = windowSize - dataSize;
return initialSize - newWindowSize;
}
private void receiveFlowControlledFrame(int streamId, int dataSize, int padding,
boolean endOfStream) throws Http2Exception {
receiveFlowControlledFrame(stream(streamId), dataSize, padding, endOfStream);
}
private void receiveFlowControlledFrame(Http2Stream stream, int dataSize, int padding,
boolean endOfStream) throws Http2Exception {
final ByteBuf buf = dummyData(dataSize);
try {
controller.receiveFlowControlledFrame(stream, buf, padding, endOfStream);
} finally {
buf.release();
}
}
private static ByteBuf dummyData(int size) {
final ByteBuf buffer = Unpooled.buffer(size);
buffer.writerIndex(size);
return buffer;
}
private boolean consumeBytes(int streamId, int numBytes) throws Http2Exception {
return controller.consumeBytes(stream(streamId), numBytes);
}
private void verifyWindowUpdateSent(int streamId, int windowSizeIncrement) {
verify(frameWriter).writeWindowUpdate(eq(ctx), eq(streamId), eq(windowSizeIncrement), eq(promise));
}
private void verifyWindowUpdateNotSent(int streamId) {
verify(frameWriter, never()).writeWindowUpdate(eq(ctx), eq(streamId), anyInt(), eq(promise));
}
private void verifyWindowUpdateNotSent() {
verify(frameWriter, never()).writeWindowUpdate(any(ChannelHandlerContext.class), anyInt(), anyInt(),
any(ChannelPromise.class));
}
private int window(int streamId) {
return controller.windowSize(stream(streamId));
}
private Http2Stream stream(int streamId) {
return connection.stream(streamId);
}
private void initController(boolean autoRefillConnectionWindow) throws Http2Exception {
connection = new DefaultHttp2Connection(false);
controller = new DefaultHttp2LocalFlowController(connection,
DEFAULT_WINDOW_UPDATE_RATIO, autoRefillConnectionWindow).frameWriter(frameWriter);
connection.local().flowController(controller);
connection.local().createStream(STREAM_ID, false);
controller.channelHandlerContext(ctx);
}
}
|
DefaultHttp2LocalFlowControllerTest
|
java
|
micronaut-projects__micronaut-core
|
http-server/src/main/java/io/micronaut/http/server/util/locale/HttpLocaleResolutionConfiguration.java
|
{
"start": 905,
"end": 1374
}
|
interface ____ extends LocaleResolutionConfiguration {
/**
* @return The key in the session that stores the locale
*/
@NonNull
Optional<String> getSessionAttribute();
/**
* @return The name of the cookie that contains the locale.
*/
@NonNull
Optional<String> getCookieName();
/**
* @return True if the accept header should be searched for the locale.
*/
boolean isHeader();
}
|
HttpLocaleResolutionConfiguration
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/beanmanager/BeanManagerTest.java
|
{
"start": 13050,
"end": 13129
}
|
class ____ extends Legacy {
}
@RequestScoped
static
|
AlternativeLegacy
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/io/CompressedBlockChannelReader.java
|
{
"start": 2059,
"end": 7833
}
|
class ____
implements BlockChannelReader<MemorySegment>, RequestDoneCallback<Buffer>, BufferRecycler {
private final LinkedBlockingQueue<MemorySegment> blockQueue;
private final boolean copyCompress;
private final BlockDecompressor decompressor;
private final BufferFileReader reader;
private final AtomicReference<IOException> cause;
private final LinkedBlockingQueue<Buffer> retBuffers = new LinkedBlockingQueue<>();
private byte[] buf;
private ByteBuffer bufWrapper;
private int offset;
private int len;
public CompressedBlockChannelReader(
IOManager ioManager,
ID channel,
LinkedBlockingQueue<MemorySegment> blockQueue,
BlockCompressionFactory codecFactory,
int preferBlockSize,
int segmentSize)
throws IOException {
this.reader = ioManager.createBufferFileReader(channel, this);
this.blockQueue = blockQueue;
copyCompress = preferBlockSize > segmentSize * 2;
int blockSize = copyCompress ? preferBlockSize : segmentSize;
this.decompressor = codecFactory.getDecompressor();
cause = new AtomicReference<>();
if (copyCompress) {
this.buf = new byte[blockSize];
this.bufWrapper = ByteBuffer.wrap(buf);
}
BlockCompressor compressor = codecFactory.getCompressor();
for (int i = 0; i < 2; i++) {
MemorySegment segment =
MemorySegmentFactory.wrap(new byte[compressor.getMaxCompressedSize(blockSize)]);
reader.readInto(new NetworkBuffer(segment, this));
}
}
@Override
public void readBlock(MemorySegment segment) throws IOException {
if (cause.get() != null) {
throw cause.get();
}
if (copyCompress) {
int readOffset = 0;
int readLen = segment.size();
while (readLen > 0) {
int copy = Math.min(readLen, len - offset);
if (copy == 0) {
readBuffer();
} else {
segment.put(readOffset, buf, offset, copy);
offset += copy;
readOffset += copy;
readLen -= copy;
}
}
} else {
int len =
segment.processAsByteBuffer(
FunctionUtils.uncheckedFunction(this::decompressBuffer));
Preconditions.checkState(len == segment.size());
}
boolean add = blockQueue.add(segment);
Preconditions.checkState(add); // LinkedBlockingQueue never add fail.
}
private void readBuffer() throws IOException {
len = decompressBuffer(bufWrapper);
}
private int decompressBuffer(ByteBuffer toRead) throws IOException {
try {
Buffer buffer;
while ((buffer = retBuffers.poll(1000, TimeUnit.MILLISECONDS)) == null) {
if (cause.get() != null) {
throw cause.get();
}
}
final MemorySegment srcSegment = buffer.getMemorySegment();
final int srcSize = buffer.getSize();
int readLen =
srcSegment.processAsByteBuffer(
(srcBuffer) -> {
return decompressor.decompress(srcBuffer, 0, srcSize, toRead, 0);
});
buffer.recycleBuffer();
return readLen;
} catch (InterruptedException e) {
throw new IOException(e);
}
}
@Override
public void seekToPosition(long position) throws IOException {
throw new RuntimeException("Not support yet!");
}
@Override
public MemorySegment getNextReturnedBlock() throws IOException {
try {
while (true) {
final MemorySegment next = blockQueue.poll(1000, TimeUnit.MILLISECONDS);
if (next != null) {
return next;
} else {
if (reader.isClosed()) {
throw new IOException("The writer has been closed.");
}
}
}
} catch (InterruptedException e) {
throw new IOException(
"Writer was interrupted while waiting for the next returning segment.");
}
}
@Override
public LinkedBlockingQueue<MemorySegment> getReturnQueue() {
return blockQueue;
}
@Override
public ID getChannelID() {
return reader.getChannelID();
}
@Override
public long getSize() throws IOException {
return reader.getSize();
}
@Override
public boolean isClosed() {
return reader.isClosed();
}
@Override
public void close() throws IOException {
reader.close();
}
@Override
public void deleteChannel() {
reader.deleteChannel();
}
@Override
public void closeAndDelete() throws IOException {
reader.closeAndDelete();
}
@Override
public FileChannel getNioFileChannel() {
return reader.getNioFileChannel();
}
@Override
public void requestSuccessful(Buffer request) {
retBuffers.add(request);
}
@Override
public void requestFailed(Buffer buffer, IOException e) {
cause.compareAndSet(null, e);
throw new RuntimeException(e);
}
@Override
public void recycle(MemorySegment segment) {
try {
reader.readInto(new NetworkBuffer(segment, this));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
|
CompressedBlockChannelReader
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/scripting/groovy/GroovyScriptFactory.java
|
{
"start": 2321,
"end": 3540
}
|
class ____ implements ScriptFactory, BeanFactoryAware, BeanClassLoaderAware {
private final String scriptSourceLocator;
private @Nullable GroovyObjectCustomizer groovyObjectCustomizer;
private @Nullable CompilerConfiguration compilerConfiguration;
private @Nullable GroovyClassLoader groovyClassLoader;
private @Nullable Class<?> scriptClass;
private @Nullable Class<?> scriptResultClass;
private @Nullable CachedResultHolder cachedResult;
private final Object scriptClassMonitor = new Object();
private boolean wasModifiedForTypeCheck = false;
/**
* Create a new GroovyScriptFactory for the given script source.
* <p>We don't need to specify script interfaces here, since
* a Groovy script defines its Java interfaces itself.
* @param scriptSourceLocator a locator that points to the source of the script.
* Interpreted by the post-processor that actually creates the script.
*/
public GroovyScriptFactory(String scriptSourceLocator) {
Assert.hasText(scriptSourceLocator, "'scriptSourceLocator' must not be empty");
this.scriptSourceLocator = scriptSourceLocator;
}
/**
* Create a new GroovyScriptFactory for the given script source,
* specifying a strategy
|
GroovyScriptFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/cdi/type/SimpleTests.java
|
{
"start": 1206,
"end": 3009
}
|
class ____ {
@Test
void testProperUsage() {
final ExtendedBeanManagerImpl extendedBeanManager = new ExtendedBeanManagerImpl();
final StandardServiceRegistryBuilder ssrbBuilder = ServiceRegistryUtil.serviceRegistryBuilder()
.applySetting( AvailableSettings.HBM2DDL_AUTO, Action.CREATE_DROP )
.applySetting( AvailableSettings.JAKARTA_CDI_BEAN_MANAGER, extendedBeanManager );
try ( final StandardServiceRegistry ssr = ssrbBuilder.build() ) {
final Metadata metadata = new MetadataSources( ssr )
.addAnnotatedClass( MappedEntity.class )
.buildMetadata();
final PersistentClass entityBinding = metadata.getEntityBinding( MappedEntity.class.getName() );
final Property property = entityBinding.getProperty( "url" );
assertThat( property ).isNotNull();
assertThat( property.getValue() ).isInstanceOf( BasicValue.class );
final BasicValue.Resolution<?> resolution = ( (BasicValue) property.getValue() ).getResolution();
assertThat( resolution ).isNotNull();
assertThat( resolution ).isInstanceOf( UserTypeResolution.class );
// assertThat( ( (UserTypeResolution) resolution ).isResolved() ).isFalse();
final SeContainerInitializer cdiInitializer = SeContainerInitializer.newInstance()
.disableDiscovery()
.addBeanClasses( UrlType.class, OtherBean.class );
try ( final SeContainer cdiContainer = cdiInitializer.initialize() ) {
final BeanManager beanManager = cdiContainer.getBeanManager();
extendedBeanManager.injectBeanManager( beanManager );
}
try ( final SessionFactory sf = metadata.buildSessionFactory() ) {
sf.inSession( (session) -> {
session.createSelectionQuery( "from MappedEntity" ).list();
} );
}
}
}
@Entity( name = "MappedEntity" )
@Table( name = "mapped_entity" )
public static
|
SimpleTests
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/graph/WithMasterCheckpointHookConfigTest.java
|
{
"start": 6617,
"end": 7061
}
|
class ____ extends TestSource
implements WithMasterCheckpointHook<String> {
private final String id;
TestSourceWithHook(String id) {
this.id = id;
}
@Override
public TestHook createMasterTriggerRestoreHook() {
return new TestHook(id);
}
}
// -----------------------------------------------------------------------
private static
|
TestSourceWithHook
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java
|
{
"start": 2708,
"end": 13243
}
|
class ____ implements
GroupMappingServiceProvider {
@Override
public List<String> getGroups(String user) {
return Arrays.asList(user+"_group");
}
@Override
public void cacheGroupsRefresh() {
}
@Override
public void cacheGroupsAdd(List<String> groups) {
}
@Override
public Set<String> getGroupsSet(String user) {
return ImmutableSet.of(user+"_group");
}
}
private RouterContext router;
private FileSystem routerFS;
private MiniRouterDFSCluster cluster;
@BeforeAll
public static void before() throws Exception {
globalSetUp();
}
@AfterAll
public static void after() {
tearDown();
}
@BeforeEach
public void testSetup() throws Exception {
setup();
router = getRouterContext();
routerFS = getRouterFileSystem();
cluster = getCluster();
}
private void testRenameDir(RouterContext testRouter, String path,
String renamedPath, boolean exceptionExpected, Callable<Object> call)
throws IOException {
createDir(testRouter.getFileSystem(), path);
// rename
boolean exceptionThrown = false;
try {
call.call();
assertFalse(verifyFileExists(testRouter.getFileSystem(), path));
assertTrue(
verifyFileExists(testRouter.getFileSystem(), renamedPath + "/file"));
} catch (Exception ex) {
exceptionThrown = true;
assertTrue(verifyFileExists(testRouter.getFileSystem(), path + "/file"));
assertFalse(verifyFileExists(testRouter.getFileSystem(), renamedPath));
} finally {
FileContext fileContext = testRouter.getFileContext();
fileContext.delete(new Path(path), true);
fileContext.delete(new Path(renamedPath), true);
}
if (exceptionExpected) {
// Error was expected.
assertTrue(exceptionThrown);
} else {
// No error was expected.
assertFalse(exceptionThrown);
}
}
@Test
public void testSuccessfulRbfRename() throws Exception {
List<String> nss = cluster.getNameservices();
String ns0 = nss.get(0);
String ns1 = nss.get(1);
// Test successfully rename a dir to a destination that is in a different
// namespace.
String dir =
cluster.getFederatedTestDirectoryForNS(ns0) + "/" + getMethodName();
String renamedDir =
cluster.getFederatedTestDirectoryForNS(ns1) + "/" + getMethodName();
testRenameDir(router, dir, renamedDir, false, () -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename(dir, renamedDir);
return null;
});
testRenameDir(router, dir, renamedDir, false, () -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename2(dir, renamedDir);
return null;
});
}
@Test
public void testRbfRenameFile() throws Exception {
List<String> nss = cluster.getNameservices();
String ns0 = nss.get(0);
String ns1 = nss.get(1);
// Test router federation rename a file.
String file =
cluster.getFederatedTestDirectoryForNS(ns0) + "/" + getMethodName();
String renamedFile =
cluster.getFederatedTestDirectoryForNS(ns1) + "/" + getMethodName();
createFile(routerFS, file, 32);
getRouterFileSystem().mkdirs(new Path(renamedFile));
LambdaTestUtils.intercept(RemoteException.class, "should be a directory",
"Expect RemoteException.", () -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename(file, renamedFile);
return null;
});
LambdaTestUtils.intercept(RemoteException.class, "should be a directory",
"Expect RemoteException.", () -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename2(file, renamedFile);
return null;
});
getRouterFileSystem().delete(new Path(file), true);
getRouterFileSystem().delete(new Path(renamedFile), true);
}
@Test
public void testRbfRenameWhenDstAlreadyExists() throws Exception {
List<String> nss = cluster.getNameservices();
String ns0 = nss.get(0);
String ns1 = nss.get(1);
// Test router federation rename a path to a destination that is in a
// different namespace and already exists.
String dir =
cluster.getFederatedTestDirectoryForNS(ns0) + "/" + getMethodName();
String renamedDir =
cluster.getFederatedTestDirectoryForNS(ns1) + "/" + getMethodName();
createDir(routerFS, dir);
getRouterFileSystem().mkdirs(new Path(renamedDir));
LambdaTestUtils.intercept(RemoteException.class, "already exists",
"Expect RemoteException.", () -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename(dir, renamedDir);
return null;
});
LambdaTestUtils.intercept(RemoteException.class, "already exists",
"Expect RemoteException.", () -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename2(dir, renamedDir);
return null;
});
getRouterFileSystem().delete(new Path(dir), true);
getRouterFileSystem().delete(new Path(renamedDir), true);
}
@Test
public void testRbfRenameWhenSrcNotExists() throws Exception {
List<String> nss = cluster.getNameservices();
String ns0 = nss.get(0);
String ns1 = nss.get(1);
// Test router federation rename un-existed path.
String dir =
cluster.getFederatedTestDirectoryForNS(ns0) + "/" + getMethodName();
String renamedDir =
cluster.getFederatedTestDirectoryForNS(ns1) + "/" + getMethodName();
LambdaTestUtils.intercept(RemoteException.class, "File does not exist",
"Expect RemoteException.", () -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename(dir, renamedDir);
return null;
});
LambdaTestUtils.intercept(RemoteException.class, "File does not exist",
"Expect RemoteException.", () -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename2(dir, renamedDir);
return null;
});
}
@Test
public void testRbfRenameOfMountPoint() throws Exception {
List<String> nss = cluster.getNameservices();
String ns0 = nss.get(0);
String ns1 = nss.get(1);
// Test router federation rename a mount point.
String dir = cluster.getFederatedPathForNS(ns0);
String renamedDir = cluster.getFederatedPathForNS(ns1);
LambdaTestUtils.intercept(RemoteException.class, "is a mount point",
"Expect RemoteException.", () -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename(dir, renamedDir);
return null;
});
LambdaTestUtils.intercept(RemoteException.class, "is a mount point",
"Expect RemoteException.", () -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename2(dir, renamedDir);
return null;
});
}
@Test
public void testRbfRenameWithMultiDestination() throws Exception {
List<String> nss = cluster.getNameservices();
String ns1 = nss.get(1);
FileSystem rfs = getRouterFileSystem();
// Test router federation rename a path with multi-destination.
String dir = "/same/" + getMethodName();
String renamedDir = cluster.getFederatedTestDirectoryForNS(ns1) + "/"
+ getMethodName();
createDir(rfs, dir);
getRouterFileSystem().mkdirs(new Path(renamedDir));
LambdaTestUtils.intercept(RemoteException.class,
"The remote location should be exactly one", "Expect RemoteException.",
() -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename(dir, renamedDir);
return null;
});
LambdaTestUtils.intercept(RemoteException.class,
"The remote location should be exactly one", "Expect RemoteException.",
() -> {
DFSClient client = router.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename2(dir, renamedDir);
return null;
});
getRouterFileSystem().delete(new Path(dir), true);
getRouterFileSystem().delete(new Path(renamedDir), true);
}
@Test
@Timeout(value = 20)
public void testCounter() throws Exception {
final RouterRpcServer rpcServer = router.getRouter().getRpcServer();
List<String> nss = cluster.getNameservices();
String ns0 = nss.get(0);
String ns1 = nss.get(1);
RouterFederationRename rbfRename =
Mockito.spy(new RouterFederationRename(rpcServer, router.getConf()));
String path = "/src";
createDir(cluster.getCluster().getFileSystem(0), path);
// Watch the scheduler job count.
int expectedSchedulerCount = rpcServer.getSchedulerJobCount() + 1;
AtomicInteger maxSchedulerCount = new AtomicInteger();
AtomicBoolean watch = new AtomicBoolean(true);
Thread watcher = new SubjectInheritingThread(() -> {
while (watch.get()) {
int schedulerCount = rpcServer.getSchedulerJobCount();
if (schedulerCount > maxSchedulerCount.get()) {
maxSchedulerCount.set(schedulerCount);
}
try {
Thread.sleep(1);
} catch (InterruptedException e) {
}
}
});
watcher.start();
// Trigger rename.
rbfRename.routerFedRename("/src", "/dst",
Arrays.asList(new RemoteLocation(ns0, path, null)),
Arrays.asList(new RemoteLocation(ns1, path, null)));
// Verify count.
verify(rbfRename).countIncrement();
verify(rbfRename).countDecrement();
watch.set(false);
watcher.interrupt();
watcher.join();
assertEquals(expectedSchedulerCount, maxSchedulerCount.get());
// Clean up.
assertFalse(cluster.getCluster().getFileSystem(0).exists(new Path(path)));
assertTrue(
cluster.getCluster().getFileSystem(1).delete(new Path(path), true));
}
}
|
MockGroupsMapping
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoConstants.java
|
{
"start": 1250,
"end": 6019
}
|
class ____ {
private DynoConstants() {}
// Directory to use for remote storage (a location on the remote FS which
// can be accessed by all components). This will be the name of the directory
// within the submitter's home directory.
public static final String DYNAMOMETER_STORAGE_DIR = ".dynamometer";
/* The following used for Client -> AM communication */
// Resource for the zip file of all of the configuration for the
// DataNodes/NameNode
public static final DynoResource CONF_ZIP =
new DynoResource("CONF_ZIP", ARCHIVE, "conf");
// Resource for the Hadoop binary archive (distribution tar)
public static final DynoResource HADOOP_BINARY =
new DynoResource("HADOOP_BINARY", ARCHIVE, "hadoopBinary");
// Resource for the script used to start the DataNodes/NameNode
public static final DynoResource START_SCRIPT =
new DynoResource("START_SCRIPT", FILE, "start-component.sh");
// Resource for the file system image file used by the NameNode
public static final DynoResource FS_IMAGE =
new DynoResource("FS_IMAGE", FILE, null);
// Resource for the md5 file accompanying the file system image for the
// NameNode
public static final DynoResource FS_IMAGE_MD5 =
new DynoResource("FS_IMAGE_MD5", FILE, null);
// Resource for the VERSION file accompanying the file system image
public static final DynoResource VERSION =
new DynoResource("VERSION", FILE, "VERSION");
// Resource for the archive containing all dependencies
public static final DynoResource DYNO_DEPENDENCIES =
new DynoResource("DYNO_DEPS", ARCHIVE, "dependencies");
// Environment variable which will contain the location of the directory
// which holds all of the block files for the DataNodes
public static final String BLOCK_LIST_PATH_ENV = "BLOCK_ZIP_PATH";
// The format of the name of a single block file
public static final Pattern BLOCK_LIST_FILE_PATTERN =
Pattern.compile("dn[0-9]+-a-[0-9]+-r-[0-9]+");
// The file name to use when localizing the block file on a DataNode; will be
// suffixed with an integer
public static final String BLOCK_LIST_RESOURCE_PATH_PREFIX = "blocks/block";
public static final PathFilter BLOCK_LIST_FILE_FILTER = (path) ->
DynoConstants.BLOCK_LIST_FILE_PATTERN.matcher(path.getName()).find();
// Environment variable which will contain the full path of the directory
// which should be used for remote (shared) storage
public static final String REMOTE_STORAGE_PATH_ENV = "REMOTE_STORAGE_PATH";
// Environment variable which will contain the RPC address of the NameNode
// which the DataNodes should contact, if the NameNode is not launched
// internally by this application
public static final String REMOTE_NN_RPC_ADDR_ENV = "REMOTE_NN_RPC_ADDR";
// Environment variable which will contain the view ACLs for the launched
// containers.
public static final String JOB_ACL_VIEW_ENV = "JOB_ACL_VIEW";
/* The following used for AM -> DN, NN communication */
// The name of the file which will store information about the NameNode
// (within the remote storage directory)
public static final String NN_INFO_FILE_NAME = "nn_info.prop";
// Environment variable which will contain additional arguments for the
// NameNode
public static final String NN_ADDITIONAL_ARGS_ENV = "NN_ADDITIONAL_ARGS";
// Environment variable which will contain additional arguments for the
// DataNode
public static final String DN_ADDITIONAL_ARGS_ENV = "DN_ADDITIONAL_ARGS";
// Environment variable which will contain the directory to use for the
// NameNode's name directory;
// if not specified a directory within the YARN container working directory
// will be used.
public static final String NN_NAME_DIR_ENV = "NN_NAME_DIR";
// Environment variable which will contain the directory to use for the
// NameNode's edits directory;
// if not specified a directory within the YARN container working directory
// will be used.
public static final String NN_EDITS_DIR_ENV = "NN_EDITS_DIR";
public static final String NN_FILE_METRIC_PERIOD_ENV =
"NN_FILE_METRIC_PERIOD";
/*
* These are used as the names of properties and as the environment variables
*/
// The port to use on the NameNode host when contacting for client RPCs
public static final String NN_RPC_PORT = "NN_RPC_PORT";
// The hostname of the machine running the NameNode
public static final String NN_HOSTNAME = "NN_HOSTNAME";
// The port to use on the NameNode host when contacting for service RPCs
public static final String NN_SERVICERPC_PORT = "NN_SERVICERPC_PORT";
// The port to use on the NameNode host when contacting for HTTP access
public static final String NN_HTTP_PORT = "NN_HTTP_PORT";
}
|
DynoConstants
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embeddables/collection/EmbeddableWithOneToMany_HHH_11302_Test.java
|
{
"start": 4328,
"end": 5305
}
|
class ____ implements Serializable {
@Column(name = "name")
String name;
@OneToMany(cascade = CascadeType.ALL)
@JoinTable(
name = "CONTACT_TYPE",
joinColumns = @JoinColumn(name = "id"),
inverseJoinColumns = @JoinColumn(name = "id")
)
private List<ContactType> contactType = new ArrayList<>();
public List<ContactType> getContactType() {
return contactType;
}
public void setContactType(final List<ContactType> contactType) {
this.contactType = contactType;
}
public void setName(String name) {
this.name = name;
}
public String getName() {
return name;
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
ContactInformation that = (ContactInformation) o;
return Objects.equals( name, that.name );
}
@Override
public int hashCode() {
return Objects.hash( name );
}
}
}
|
ContactInformation
|
java
|
netty__netty
|
codec-http3/src/main/java/io/netty/handler/codec/http3/QpackHuffmanEncoder.java
|
{
"start": 826,
"end": 3977
}
|
class ____ {
private final int[] codes;
private final byte[] lengths;
private final EncodedLengthProcessor encodedLengthProcessor = new EncodedLengthProcessor();
private final EncodeProcessor encodeProcessor = new EncodeProcessor();
QpackHuffmanEncoder() {
this(QpackUtil.HUFFMAN_CODES, QpackUtil.HUFFMAN_CODE_LENGTHS);
}
/**
* Creates a new Huffman encoder with the specified Huffman coding.
*
* @param codes the Huffman codes indexed by symbol
* @param lengths the length of each Huffman code
*/
private QpackHuffmanEncoder(int[] codes, byte[] lengths) {
this.codes = codes;
this.lengths = lengths;
}
/**
* Compresses the input string literal using the Huffman coding.
*
* @param out the output stream for the compressed data
* @param data the string literal to be Huffman encoded
*/
public void encode(ByteBuf out, CharSequence data) {
ObjectUtil.checkNotNull(out, "out");
if (data instanceof AsciiString) {
AsciiString string = (AsciiString) data;
try {
encodeProcessor.out = out;
string.forEachByte(encodeProcessor);
} catch (Exception e) {
throw new IllegalStateException(e);
} finally {
encodeProcessor.end();
}
} else {
encodeSlowPath(out, data);
}
}
private void encodeSlowPath(ByteBuf out, CharSequence data) {
long current = 0;
int n = 0;
for (int i = 0; i < data.length(); i++) {
int b = data.charAt(i) & 0xFF;
int code = codes[b];
int nbits = lengths[b];
current <<= nbits;
current |= code;
n += nbits;
while (n >= 8) {
n -= 8;
out.writeByte((int) (current >> n));
}
}
if (n > 0) {
current <<= 8 - n;
current |= 0xFF >>> n; // this should be EOS symbol
out.writeByte((int) current);
}
}
/**
* Returns the number of bytes required to Huffman encode the input string literal.
*
* @param data the string literal to be Huffman encoded
* @return the number of bytes required to Huffman encode {@code data}
*/
int getEncodedLength(CharSequence data) {
if (data instanceof AsciiString) {
AsciiString string = (AsciiString) data;
try {
encodedLengthProcessor.reset();
string.forEachByte(encodedLengthProcessor);
return encodedLengthProcessor.length();
} catch (Exception e) {
throw new IllegalStateException(e);
}
} else {
return getEncodedLengthSlowPath(data);
}
}
private int getEncodedLengthSlowPath(CharSequence data) {
long len = 0;
for (int i = 0; i < data.length(); i++) {
len += lengths[data.charAt(i) & 0xFF];
}
return (int) ((len + 7) >> 3);
}
private final
|
QpackHuffmanEncoder
|
java
|
apache__flink
|
flink-table/flink-table-code-splitter/src/test/resources/return/code/TestNotRewrite.java
|
{
"start": 7,
"end": 369
}
|
class ____ {
public void fun1(int a) {
if (a > 0) {
a += 5;
return;
}
a -= 5;
return;
}
public int fun2(int a) {
a += 1;
return a;
}
public String fun3() {
return "aVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongString";
}
}
|
TestNotRewrite
|
java
|
quarkusio__quarkus
|
extensions/kubernetes-config/runtime/src/main/java/io/quarkus/kubernetes/config/runtime/KubernetesConfigSourceConfig.java
|
{
"start": 382,
"end": 1952
}
|
interface ____ {
/**
* If set to true, the application will attempt to look up the configuration from the API server
*/
@WithDefault("false")
boolean enabled();
/**
* If set to true, the application will not start if any of the configured config sources cannot be located
*/
@WithDefault("true")
boolean failOnMissingConfig();
/**
* ConfigMaps to look for in the namespace that the Kubernetes Client has been configured for.
* ConfigMaps defined later in this list have a higher priority that ConfigMaps defined earlier
* in this list.
* Furthermore, any Secrets defined in {@code secrets}, will have higher priorities than all ConfigMaps.
*/
Optional<List<String>> configMaps();
/**
* Secrets to look for in the namespace that the Kubernetes Client has been configured for.
* If you use this, you probably want to enable {@code quarkus.kubernetes-config.secrets.enabled}.
* Secrets defined later in this list have a higher priority that ConfigMaps defined earlier
* in this list.
* Furthermore, these Secrets have a higher priorities than all ConfigMaps defined in {@code configMaps}.
*/
Optional<List<String>> secrets();
/**
* Namespace to look for config maps and secrets. If this is not specified, then the namespace configured in the kubectl
* config context is used. If the value is specified and the namespace doesn't exist, the application will fail to start.
*/
Optional<String> namespace();
}
|
KubernetesConfigSourceConfig
|
java
|
apache__flink
|
flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/operations/utils/CorrelatedFunctionTableFactory.java
|
{
"start": 2721,
"end": 7350
}
|
class ____
extends ResolvedExpressionDefaultVisitor<CorrelatedFunctionQueryOperation> {
private final List<String> leftTableFieldNames;
private static final String ATOMIC_FIELD_NAME = "f0";
public FunctionTableCallVisitor(List<String> leftTableFieldNames) {
this.leftTableFieldNames = leftTableFieldNames;
}
@Override
public CorrelatedFunctionQueryOperation visit(CallExpression call) {
FunctionDefinition definition = call.getFunctionDefinition();
if (definition.equals(AS)) {
return unwrapFromAlias(call);
}
return createFunctionCall(call, Collections.emptyList(), call.getResolvedChildren());
}
private CorrelatedFunctionQueryOperation unwrapFromAlias(CallExpression call) {
List<Expression> children = call.getChildren();
List<String> aliases =
children.subList(1, children.size()).stream()
.map(
alias ->
ExpressionUtils.extractValue(alias, String.class)
.orElseThrow(
() ->
new ValidationException(
"Unexpected alias: "
+ alias)))
.collect(toList());
if (!isFunctionOfKind(children.get(0), FunctionKind.TABLE)) {
throw fail();
}
CallExpression tableCall = (CallExpression) children.get(0);
return createFunctionCall(tableCall, aliases, tableCall.getResolvedChildren());
}
private CorrelatedFunctionQueryOperation createFunctionCall(
CallExpression callExpression,
List<String> aliases,
List<ResolvedExpression> parameters) {
final ResolvedSchema resolvedSchema =
adjustNames(
extractSchema(callExpression.getOutputDataType()),
aliases,
callExpression.getFunctionName());
return new CorrelatedFunctionQueryOperation(
ContextResolvedFunction.fromCallExpression(callExpression),
parameters,
resolvedSchema);
}
private ResolvedSchema extractSchema(DataType resultDataType) {
if (LogicalTypeChecks.isCompositeType(resultDataType.getLogicalType())) {
return DataTypeUtils.expandCompositeTypeToSchema(resultDataType);
}
int i = 0;
String fieldName = ATOMIC_FIELD_NAME;
while (leftTableFieldNames.contains(fieldName)) {
fieldName = ATOMIC_FIELD_NAME + "_" + i++;
}
return ResolvedSchema.physical(
Collections.singletonList(fieldName),
Collections.singletonList(resultDataType));
}
private ResolvedSchema adjustNames(
ResolvedSchema resolvedSchema, List<String> aliases, String functionName) {
int aliasesSize = aliases.size();
if (aliasesSize == 0) {
return resolvedSchema;
}
int callArity = resolvedSchema.getColumnCount();
if (callArity != aliasesSize) {
throw new ValidationException(
String.format(
"List of column aliases must have same degree as table; "
+ "the returned table of function '%s' has "
+ "%d columns, whereas alias list has %d columns",
functionName, callArity, aliasesSize));
}
return ResolvedSchema.physical(aliases, resolvedSchema.getColumnDataTypes());
}
@Override
protected CorrelatedFunctionQueryOperation defaultMethod(ResolvedExpression expression) {
throw fail();
}
private ValidationException fail() {
return new ValidationException(
"A lateral join only accepts an expression which defines a table function "
+ "call that might be followed by some alias.");
}
}
}
|
FunctionTableCallVisitor
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/window/tvf/unslicing/UnslicingWindowTimerServiceImpl.java
|
{
"start": 1343,
"end": 2112
}
|
class ____ extends WindowTimerServiceBase<TimeWindow> {
public UnslicingWindowTimerServiceImpl(
InternalTimerService<TimeWindow> internalTimerService, ZoneId shiftTimeZone) {
super(internalTimerService, shiftTimeZone);
}
@Override
public void registerProcessingTimeWindowTimer(TimeWindow window) {
internalTimerService.registerProcessingTimeTimer(
window, TimeWindowUtil.toEpochMillsForTimer(window.maxTimestamp(), shiftTimeZone));
}
@Override
public void registerEventTimeWindowTimer(TimeWindow window) {
internalTimerService.registerEventTimeTimer(
window, TimeWindowUtil.toEpochMillsForTimer(window.maxTimestamp(), shiftTimeZone));
}
}
|
UnslicingWindowTimerServiceImpl
|
java
|
elastic__elasticsearch
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramGenerator.java
|
{
"start": 1705,
"end": 8331
}
|
class ____ implements Accountable, Releasable {
private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(ExponentialHistogramGenerator.class);
// Merging individual values into a histogram would be way too slow with our sparse, array-backed histogram representation.
// Therefore, for a bucket capacity of c, we first buffer c raw values to be inserted.
// We then turn those into an "exact" histogram, which in turn we merge with our actual result accumulator.
// This yields an amortized runtime of O(log(c)).
private final double[] rawValueBuffer;
private int valueCount;
private final ExponentialHistogramMerger resultMerger;
private final FixedCapacityExponentialHistogram valueBuffer;
private final ExponentialHistogramCircuitBreaker circuitBreaker;
private boolean closed = false;
/**
* Creates a new instance with the specified maximum number of buckets.
*
* @param maxBucketCount the maximum number of buckets for the generated histogram
* @param circuitBreaker the circuit breaker to use to limit memory allocations
*/
public static ExponentialHistogramGenerator create(int maxBucketCount, ExponentialHistogramCircuitBreaker circuitBreaker) {
long size = estimateBaseSize(maxBucketCount);
circuitBreaker.adjustBreaker(size);
try {
return new ExponentialHistogramGenerator(maxBucketCount, circuitBreaker);
} catch (RuntimeException e) {
circuitBreaker.adjustBreaker(-size);
throw e;
}
}
private ExponentialHistogramGenerator(int maxBucketCount, ExponentialHistogramCircuitBreaker circuitBreaker) {
this.circuitBreaker = circuitBreaker;
rawValueBuffer = new double[maxBucketCount];
valueCount = 0;
FixedCapacityExponentialHistogram buffer = null;
ExponentialHistogramMerger merger = null;
try {
buffer = FixedCapacityExponentialHistogram.create(maxBucketCount, circuitBreaker);
merger = ExponentialHistogramMerger.create(maxBucketCount, circuitBreaker);
} catch (RuntimeException e) {
Releasables.close(buffer, merger);
throw e;
}
this.valueBuffer = buffer;
this.resultMerger = merger;
}
/**
* Adds the given value to the histogram.
*
* @param value the value to add
*/
public void add(double value) {
assert closed == false : "ExponentialHistogramGenerator has already been closed";
if (valueCount == rawValueBuffer.length) {
mergeValuesToHistogram();
}
rawValueBuffer[valueCount] = value;
valueCount++;
}
/**
* Returns the histogram representing the distribution of all accumulated values.
*
* @return the histogram representing the distribution of all accumulated values
*/
public ReleasableExponentialHistogram getAndClear() {
mergeValuesToHistogram();
return resultMerger.getAndClear();
}
private void mergeValuesToHistogram() {
if (valueCount == 0) {
return;
}
Arrays.sort(rawValueBuffer, 0, valueCount);
int negativeValuesCount = 0;
while (negativeValuesCount < valueCount && rawValueBuffer[negativeValuesCount] < 0) {
negativeValuesCount++;
}
valueBuffer.reset();
Aggregates aggregates = rawValuesAggregates();
valueBuffer.setSum(aggregates.sum());
valueBuffer.setMin(aggregates.min());
valueBuffer.setMax(aggregates.max());
int scale = valueBuffer.scale();
// Buckets must be provided with their indices in ascending order.
// For the negative range, higher bucket indices correspond to bucket boundaries closer to -INF
// and smaller bucket indices correspond to bucket boundaries closer to zero.
// therefore we have to iterate the negative values in the sorted rawValueBuffer reverse order,
// from the value closest to -INF to the value closest to zero.
// not that i here is the index of the value in the rawValueBuffer array
// and is unrelated to the histogram bucket index for the value.
for (int i = negativeValuesCount - 1; i >= 0; i--) {
long count = 1;
long index = computeIndex(rawValueBuffer[i], scale);
while ((i - 1) >= 0 && computeIndex(rawValueBuffer[i - 1], scale) == index) {
i--;
count++;
}
valueBuffer.tryAddBucket(index, count, false);
}
int zeroCount = 0;
while ((negativeValuesCount + zeroCount) < valueCount && rawValueBuffer[negativeValuesCount + zeroCount] == 0) {
zeroCount++;
}
valueBuffer.setZeroBucket(ZeroBucket.minimalWithCount(zeroCount));
for (int i = negativeValuesCount + zeroCount; i < valueCount; i++) {
long count = 1;
long index = computeIndex(rawValueBuffer[i], scale);
while ((i + 1) < valueCount && computeIndex(rawValueBuffer[i + 1], scale) == index) {
i++;
count++;
}
valueBuffer.tryAddBucket(index, count, true);
}
resultMerger.add(valueBuffer);
valueCount = 0;
}
private Aggregates rawValuesAggregates() {
if (valueCount == 0) {
return new Aggregates(0, Double.NaN, Double.NaN);
}
double sum = 0;
double min = Double.MAX_VALUE;
double max = -Double.MAX_VALUE;
for (int i = 0; i < valueCount; i++) {
sum += rawValueBuffer[i];
min = Math.min(min, rawValueBuffer[i]);
max = Math.max(max, rawValueBuffer[i]);
}
return new Aggregates(sum, min, max);
}
private static long estimateBaseSize(int numBuckets) {
return SHALLOW_SIZE + RamEstimationUtil.estimateDoubleArray(numBuckets);
};
@Override
public long ramBytesUsed() {
return estimateBaseSize(rawValueBuffer.length) + resultMerger.ramBytesUsed() + valueBuffer.ramBytesUsed();
}
@Override
public void close() {
if (closed) {
assert false : "ExponentialHistogramGenerator closed multiple times";
} else {
closed = true;
resultMerger.close();
valueBuffer.close();
circuitBreaker.adjustBreaker(-estimateBaseSize(rawValueBuffer.length));
}
}
private record Aggregates(double sum, double min, double max) {}
}
|
ExponentialHistogramGenerator
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java
|
{
"start": 527,
"end": 800
}
|
class ____ extends ActionType<PutWatchResponse> {
public static final PutWatchAction INSTANCE = new PutWatchAction();
public static final String NAME = "cluster:admin/xpack/watcher/watch/put";
private PutWatchAction() {
super(NAME);
}
}
|
PutWatchAction
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/BaseTransportInferenceAction.java
|
{
"start": 2638,
"end": 14798
}
|
class ____<Request extends BaseInferenceActionRequest> extends HandledTransportAction<
Request,
InferenceAction.Response> {
private static final Logger log = LogManager.getLogger(BaseTransportInferenceAction.class);
private static final String STREAMING_INFERENCE_TASK_TYPE = "streaming_inference";
private static final String STREAMING_TASK_ACTION = "xpack/inference/streaming_inference[n]";
private final XPackLicenseState licenseState;
private final InferenceEndpointRegistry endpointRegistry;
private final InferenceServiceRegistry serviceRegistry;
private final InferenceStats inferenceStats;
private final StreamingTaskManager streamingTaskManager;
private final NodeClient nodeClient;
private final ThreadPool threadPool;
private final TransportService transportService;
private final Random random;
public BaseTransportInferenceAction(
String inferenceActionName,
TransportService transportService,
ActionFilters actionFilters,
XPackLicenseState licenseState,
InferenceEndpointRegistry endpointRegistry,
InferenceServiceRegistry serviceRegistry,
InferenceStats inferenceStats,
StreamingTaskManager streamingTaskManager,
Writeable.Reader<Request> requestReader,
NodeClient nodeClient,
ThreadPool threadPool
) {
super(inferenceActionName, transportService, actionFilters, requestReader, EsExecutors.DIRECT_EXECUTOR_SERVICE);
this.licenseState = licenseState;
this.endpointRegistry = endpointRegistry;
this.serviceRegistry = serviceRegistry;
this.inferenceStats = inferenceStats;
this.streamingTaskManager = streamingTaskManager;
this.nodeClient = nodeClient;
this.threadPool = threadPool;
this.transportService = transportService;
this.random = Randomness.get();
}
protected abstract boolean isInvalidTaskTypeForInferenceEndpoint(Request request, Model model);
protected abstract ElasticsearchStatusException createInvalidTaskTypeException(Request request, Model model);
protected abstract void doInference(
Model model,
Request request,
InferenceService service,
ActionListener<InferenceServiceResults> listener
);
@Override
protected void doExecute(Task task, Request request, ActionListener<InferenceAction.Response> listener) {
var timer = InferenceTimer.start();
var getModelListener = ActionListener.wrap((Model model) -> {
var serviceName = model.getConfigurations().getService();
if (InferenceLicenceCheck.isServiceLicenced(serviceName, licenseState) == false) {
listener.onFailure(InferenceLicenceCheck.complianceException(serviceName));
return;
}
try {
validateRequest(request, model);
} catch (Exception e) {
recordRequestDurationMetrics(model, timer, e);
listener.onFailure(e);
return;
}
// TODO: this is a temporary solution for passing around the product use case.
// We want to pass InferenceContext through the various infer methods in InferenceService in the long term
var context = request.getContext();
if (Objects.nonNull(context)) {
var headerNotPresentInThreadContext = Objects.isNull(
threadPool.getThreadContext().getHeader(InferencePlugin.X_ELASTIC_PRODUCT_USE_CASE_HTTP_HEADER)
);
if (headerNotPresentInThreadContext) {
threadPool.getThreadContext()
.putHeader(InferencePlugin.X_ELASTIC_PRODUCT_USE_CASE_HTTP_HEADER, context.productUseCase());
}
}
var service = serviceRegistry.getService(serviceName).get();
var localNodeId = nodeClient.getLocalNodeId();
inferOnServiceWithMetrics(model, request, service, timer, localNodeId, listener);
}, e -> {
try {
inferenceStats.inferenceDuration().record(timer.elapsedMillis(), responseAttributes(e));
} catch (Exception metricsException) {
log.atDebug().withThrowable(metricsException).log("Failed to record metrics when the model is missing, dropping metrics");
}
listener.onFailure(e);
});
endpointRegistry.getEndpoint(request.getInferenceEntityId(), getModelListener);
}
private void validateRequest(Request request, Model model) {
var serviceName = model.getConfigurations().getService();
var requestTaskType = request.getTaskType();
var service = serviceRegistry.getService(serviceName);
validationHelper(service::isEmpty, () -> unknownServiceException(serviceName, request.getInferenceEntityId()));
validationHelper(
() -> request.getTaskType().isAnyOrSame(model.getTaskType()) == false,
() -> requestModelTaskTypeMismatchException(requestTaskType, model.getTaskType())
);
validationHelper(() -> isInvalidTaskTypeForInferenceEndpoint(request, model), () -> createInvalidTaskTypeException(request, model));
}
private static void validationHelper(Supplier<Boolean> validationFailure, Supplier<ElasticsearchStatusException> exceptionCreator) {
if (validationFailure.get()) {
throw exceptionCreator.get();
}
}
private void recordRequestDurationMetrics(Model model, InferenceTimer timer, @Nullable Throwable t) {
Map<String, Object> metricAttributes = new HashMap<>();
metricAttributes.putAll(InferenceStats.serviceAttributes(model));
metricAttributes.putAll(responseAttributes(unwrapCause(t)));
inferenceStats.inferenceDuration().record(timer.elapsedMillis(), metricAttributes);
}
private void inferOnServiceWithMetrics(
Model model,
Request request,
InferenceService service,
InferenceTimer timer,
String localNodeId,
ActionListener<InferenceAction.Response> listener
) {
recordRequestCountMetrics(model, request, localNodeId);
inferOnService(model, request, service, ActionListener.wrap(inferenceResults -> {
if (request.isStreaming()) {
var taskProcessor = streamingTaskManager.<InferenceServiceResults.Result>create(
STREAMING_INFERENCE_TASK_TYPE,
STREAMING_TASK_ACTION
);
inferenceResults.publisher().subscribe(taskProcessor);
var instrumentedStream = publisherWithMetrics(timer, model, request, localNodeId, taskProcessor);
var streamErrorHandler = streamErrorHandler(instrumentedStream);
listener.onResponse(new InferenceAction.Response(inferenceResults, streamErrorHandler));
} else {
recordRequestDurationMetrics(model, timer, request, localNodeId, null);
listener.onResponse(new InferenceAction.Response(inferenceResults));
}
}, e -> {
recordRequestDurationMetrics(model, timer, request, localNodeId, e);
listener.onFailure(e);
}));
}
private <T> Flow.Publisher<T> publisherWithMetrics(
InferenceTimer timer,
Model model,
Request request,
String localNodeId,
Flow.Processor<T, T> upstream
) {
return downstream -> {
upstream.subscribe(new Flow.Subscriber<>() {
@Override
public void onSubscribe(Flow.Subscription subscription) {
downstream.onSubscribe(new Flow.Subscription() {
@Override
public void request(long n) {
subscription.request(n);
}
@Override
public void cancel() {
recordRequestDurationMetrics(model, timer, request, localNodeId, null);
subscription.cancel();
}
});
}
@Override
public void onNext(T item) {
downstream.onNext(item);
}
@Override
public void onError(Throwable throwable) {
recordRequestDurationMetrics(model, timer, request, localNodeId, throwable);
downstream.onError(throwable);
}
@Override
public void onComplete() {
recordRequestDurationMetrics(model, timer, request, localNodeId, null);
downstream.onComplete();
}
});
};
}
protected <T> Flow.Publisher<T> streamErrorHandler(Flow.Publisher<T> upstream) {
return upstream;
}
private void recordRequestCountMetrics(Model model, Request request, String localNodeId) {
Map<String, Object> requestCountAttributes = new HashMap<>();
requestCountAttributes.putAll(InferenceStats.serviceAttributes(model));
inferenceStats.requestCount().incrementBy(1, requestCountAttributes);
}
private void recordRequestDurationMetrics(
Model model,
InferenceTimer timer,
Request request,
String localNodeId,
@Nullable Throwable t
) {
Map<String, Object> metricAttributes = new HashMap<>();
metricAttributes.putAll(serviceAndResponseAttributes(model, unwrapCause(t)));
inferenceStats.inferenceDuration().record(timer.elapsedMillis(), metricAttributes);
}
private void inferOnService(Model model, Request request, InferenceService service, ActionListener<InferenceServiceResults> listener) {
if (request.isStreaming() == false || service.canStream(model.getTaskType())) {
doInference(model, request, service, listener);
} else {
listener.onFailure(unsupportedStreamingTaskException(request, service));
}
}
private ElasticsearchStatusException unsupportedStreamingTaskException(Request request, InferenceService service) {
var supportedTasks = service.supportedStreamingTasks();
if (supportedTasks.isEmpty()) {
return new ElasticsearchStatusException(
format("Streaming is not allowed for service [%s].", service.name()),
RestStatus.METHOD_NOT_ALLOWED
);
} else {
var validTasks = supportedTasks.stream().map(TaskType::toString).collect(Collectors.joining(","));
return new ElasticsearchStatusException(
format(
"Streaming is not allowed for service [%s] and task [%s]. Supported tasks: [%s]",
service.name(),
request.getTaskType(),
validTasks
),
RestStatus.METHOD_NOT_ALLOWED
);
}
}
private static ElasticsearchStatusException unknownServiceException(String service, String inferenceId) {
return new ElasticsearchStatusException("Unknown service [{}] for model [{}]", RestStatus.BAD_REQUEST, service, inferenceId);
}
private static ElasticsearchStatusException requestModelTaskTypeMismatchException(TaskType requested, TaskType expected) {
return new ElasticsearchStatusException(
"Incompatible task_type, the requested type [{}] does not match the model type [{}]",
RestStatus.BAD_REQUEST,
requested,
expected
);
}
private record NodeRoutingDecision(boolean currentNodeShouldHandleRequest, DiscoveryNode targetNode) {
static NodeRoutingDecision handleLocally() {
return new NodeRoutingDecision(true, null);
}
static NodeRoutingDecision routeTo(DiscoveryNode node) {
return new NodeRoutingDecision(false, node);
}
}
}
|
BaseTransportInferenceAction
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/objectarray/ObjectArrayAssert_containsOnlyOnceElementsOf_Test.java
|
{
"start": 1005,
"end": 1471
}
|
class ____ extends ObjectArrayAssertBaseTest {
private final List<String> values = newArrayList("Yoda", "Luke");
@Override
protected ObjectArrayAssert<Object> invoke_api_method() {
return assertions.containsOnlyOnceElementsOf(values);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContainsOnlyOnce(getInfo(assertions), getActual(assertions), values.toArray());
}
}
|
ObjectArrayAssert_containsOnlyOnceElementsOf_Test
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java
|
{
"start": 9389,
"end": 11556
}
|
class ____ implements Runnable {
private final String name;
private final long sleepTime;
private final boolean expectFailure;
private AssertionError assertion;
private boolean invoked;
private int invokedOrder;
private boolean completed;
private boolean interrupted;
private long startTime;
Hook(final String name,
final long sleepTime,
final boolean expectFailure) {
this.name = name;
this.sleepTime = sleepTime;
this.expectFailure = expectFailure;
}
@Override
public void run() {
try {
invoked = true;
invokedOrder = INVOCATION_COUNT.incrementAndGet();
startTime = System.currentTimeMillis();
LOG.info("Starting shutdown of {} with sleep time of {}",
name, sleepTime);
if (sleepTime > 0) {
sleep(sleepTime);
}
LOG.info("Completed shutdown of {}", name);
completed = true;
if (expectFailure) {
assertion = new AssertionError("Expected a failure of " + name);
}
} catch (InterruptedException ex) {
LOG.info("Shutdown {} interrupted exception", name, ex);
interrupted = true;
if (!expectFailure) {
assertion = new AssertionError("Timeout of " + name, ex);
}
}
maybeThrowAssertion();
}
/**
* Raise any exception generated during the shutdown process.
* @throws AssertionError any assertion from the shutdown.
*/
void maybeThrowAssertion() throws AssertionError {
if (assertion != null) {
throw assertion;
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("Hook{");
sb.append("name='").append(name).append('\'');
sb.append(", sleepTime=").append(sleepTime);
sb.append(", expectFailure=").append(expectFailure);
sb.append(", invoked=").append(invoked);
sb.append(", invokedOrder=").append(invokedOrder);
sb.append(", completed=").append(completed);
sb.append(", interrupted=").append(interrupted);
sb.append('}');
return sb.toString();
}
}
}
|
Hook
|
java
|
apache__camel
|
tooling/maven/camel-package-maven-plugin/src/main/java/org/apache/camel/maven/packaging/DynamicClassLoader.java
|
{
"start": 1083,
"end": 3005
}
|
class ____ extends URLClassLoader {
public DynamicClassLoader(URL[] urls, ClassLoader parent) {
super(urls, parent);
}
public static DynamicClassLoader createDynamicClassLoaderFromUrls(List<URL> classpathElements) {
final URL[] urls = new URL[classpathElements.size()];
int i = 0;
for (Iterator<URL> it = classpathElements.iterator(); it.hasNext(); i++) {
urls[i] = it.next();
}
final ClassLoader tccl = Thread.currentThread().getContextClassLoader();
return new DynamicClassLoader(urls, tccl != null ? tccl : DynamicClassLoader.class.getClassLoader());
}
public static DynamicClassLoader createDynamicClassLoader(List<String> classpathElements) {
final URL[] urls = new URL[classpathElements.size()];
int i = 0;
for (Iterator<?> it = classpathElements.iterator(); it.hasNext(); i++) {
try {
urls[i] = new File((String) it.next()).toURI().toURL();
} catch (MalformedURLException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
final ClassLoader tccl = Thread.currentThread().getContextClassLoader();
return new DynamicClassLoader(urls, tccl != null ? tccl : DynamicClassLoader.class.getClassLoader());
}
public Class<?> defineClass(String name, byte[] data) {
return super.defineClass(name, data, 0, data.length);
}
public Class<?> generateDummyClass(String clazzName) {
try {
return loadClass(clazzName);
} catch (ClassNotFoundException e) {
ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES);
cw.visit(Opcodes.V17, Opcodes.ACC_PUBLIC, clazzName.replace('.', '/'), null, "java/lang/Object", null);
cw.visitEnd();
return defineClass(clazzName, cw.toByteArray());
}
}
}
|
DynamicClassLoader
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.