language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
spring-projects__spring-boot
|
module/spring-boot-http-codec/src/main/java/org/springframework/boot/http/codec/autoconfigure/CodecsAutoConfiguration.java
|
{
"start": 5455,
"end": 5739
}
|
class ____ {
@Bean
DefaultCodecCustomizer defaultCodecCustomizer(HttpCodecsProperties httpCodecProperties) {
return new DefaultCodecCustomizer(httpCodecProperties.isLogRequestDetails(),
httpCodecProperties.getMaxInMemorySize());
}
static final
|
DefaultCodecsConfiguration
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/utils/TableConnectorUtils.java
|
{
"start": 961,
"end": 1439
}
|
class ____ {
private TableConnectorUtils() {
// do not instantiate
}
/** Returns the table connector name used for logging and web UI. */
public static String generateRuntimeName(Class<?> clazz, String[] fields) {
String className = clazz.getSimpleName();
if (null == fields) {
return className + "(*)";
} else {
return className + "(" + String.join(", ", fields) + ")";
}
}
}
|
TableConnectorUtils
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelopeTests.java
|
{
"start": 1402,
"end": 3572
}
|
class ____ extends AbstractScalarFunctionTestCase {
public StEnvelopeTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
String expectedGeo = "StEnvelopeFromWKBGeoEvaluator[wkb=Attribute[channel=0]]";
String expectedCartesian = "StEnvelopeFromWKBEvaluator[wkb=Attribute[channel=0]]";
final List<TestCaseSupplier> suppliers = new ArrayList<>();
TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedGeo, GEO_SHAPE, StEnvelopeTests::valueOfGeo, List.of());
TestCaseSupplier.forUnaryCartesianPoint(
suppliers,
expectedCartesian,
CARTESIAN_SHAPE,
StEnvelopeTests::valueOfCartesian,
List.of()
);
TestCaseSupplier.forUnaryGeoShape(suppliers, expectedGeo, GEO_SHAPE, StEnvelopeTests::valueOfGeo, List.of());
TestCaseSupplier.forUnaryCartesianShape(
suppliers,
expectedCartesian,
CARTESIAN_SHAPE,
StEnvelopeTests::valueOfCartesian,
List.of()
);
return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers);
}
private static BytesRef valueOfGeo(BytesRef wkb) {
return valueOf(wkb, true);
}
private static BytesRef valueOfCartesian(BytesRef wkb) {
return valueOf(wkb, false);
}
private static BytesRef valueOf(BytesRef wkb, boolean geo) {
var geometry = UNSPECIFIED.wkbToGeometry(wkb);
if (geometry instanceof Point) {
return wkb;
}
var envelope = geo
? SpatialEnvelopeVisitor.visitGeo(geometry, WrapLongitude.WRAP)
: SpatialEnvelopeVisitor.visitCartesian(geometry);
if (envelope.isPresent()) {
return UNSPECIFIED.asWkb(envelope.get());
}
throw new IllegalArgumentException("Geometry is empty");
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new StEnvelope(source, args.get(0));
}
}
|
StEnvelopeTests
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionPartitionLifecycleTest.java
|
{
"start": 15838,
"end": 16873
}
|
class ____ implements ShuffleDescriptor {
private static final long serialVersionUID = 1819950291216655728L;
private final ExecutionAttemptID producerExecutionId;
private final IntermediateResultPartitionID producedPartitionId;
private final ResourceID producerLocation;
TestingShuffleDescriptor(
IntermediateResultPartitionID producedPartitionId,
ExecutionAttemptID producerExecutionId,
ResourceID producerLocation) {
this.producedPartitionId = producedPartitionId;
this.producerExecutionId = producerExecutionId;
this.producerLocation = producerLocation;
}
@Override
public ResultPartitionID getResultPartitionID() {
return new ResultPartitionID(producedPartitionId, producerExecutionId);
}
@Override
public Optional<ResourceID> storesLocalResourcesOn() {
return Optional.of(producerLocation);
}
}
}
|
TestingShuffleDescriptor
|
java
|
quarkusio__quarkus
|
extensions/panache/hibernate-reactive-rest-data-panache/runtime/src/main/java/io/quarkus/hibernate/reactive/rest/data/panache/PanacheEntityResource.java
|
{
"start": 759,
"end": 879
}
|
interface ____<Entity extends PanacheEntityBase, ID> extends ReactiveRestDataResource<Entity, ID> {
}
|
PanacheEntityResource
|
java
|
apache__flink
|
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
|
{
"start": 27712,
"end": 28150
}
|
class ____ {
private final StringWriter stringWriter = new StringWriter();
private final PrintWriter printWriter = new PrintWriter(stringWriter);
public ConfigBuilder add(String configLine) {
printWriter.println(configLine);
return this;
}
public Config build() {
return ConfigFactory.parseString(stringWriter.toString()).resolve();
}
}
}
|
ConfigBuilder
|
java
|
micronaut-projects__micronaut-core
|
benchmarks/src/jmh/java/io/micronaut/context/scope/ThreadLocalScopeBenchmark.java
|
{
"start": 836,
"end": 2008
}
|
class ____ {
ApplicationContext ctx;
Holder holder;
@Setup
public void setup() {
ctx = ApplicationContext.run(Map.of("spec.name", "ThreadLocalScopeBenchmark"));
holder = ctx.getBean(Holder.class);
}
@TearDown
public void tearDown() {
ctx.close();
}
@Benchmark
public int bench() {
return holder.myThreadLocal.foo();
}
public static void main(String[] args) throws RunnerException {
if (false) {
ThreadLocalScopeBenchmark b = new ThreadLocalScopeBenchmark();
b.setup();
for (int i = 0; i < 100; i++) {
b.bench();
}
return;
}
Options opt = new OptionsBuilder()
.include(ThreadLocalScopeBenchmark.class.getName() + ".*")
.warmupIterations(10)
.measurementIterations(10)
.mode(Mode.AverageTime)
.timeUnit(TimeUnit.NANOSECONDS)
.forks(1)
.build();
new Runner(opt).run();
}
@Singleton
@Requires(property = "spec.name", value = "ThreadLocalScopeBenchmark")
static
|
ThreadLocalScopeBenchmark
|
java
|
grpc__grpc-java
|
xds/src/main/java/io/grpc/xds/XdsDependencyManager.java
|
{
"start": 20434,
"end": 20658
}
|
interface ____ {
/**
* An updated XdsConfig or RPC-safe Status. The status code will be either UNAVAILABLE or
* INTERNAL.
*/
void onUpdate(StatusOr<XdsConfig> config);
}
private final
|
XdsConfigWatcher
|
java
|
apache__maven
|
its/core-it-suite/src/test/resources/mng-6084-jsr250-support/src/main/java/org/apache/maven/plugins/Jsr250Mojo.java
|
{
"start": 1986,
"end": 2365
}
|
class ____ extends AbstractMojo {
private Jsr250Component component;
@Inject
public Jsr250Mojo(Jsr250Component component) {
this.component = component;
}
public void execute() throws MojoExecutionException {
//
// Say hello to the world, my little constructor-injected component!
//
component.hello();
}
}
|
Jsr250Mojo
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/VarifierTest.java
|
{
"start": 4269,
"end": 4667
}
|
class ____ {
public void t() {
var duration = Duration.newBuilder().setSeconds(4).build();
}
}
""")
.doTest();
}
@Test
public void builderChainGeneric() {
refactoringHelper
.addInputLines(
"Test.java",
"""
import com.google.common.collect.ImmutableList;
|
Test
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
|
{
"start": 1493,
"end": 1640
}
|
interface ____ {
Logger LOG = LoggerFactory.getLogger(InterDatanodeProtocol.class.getName());
/**
* Until version 9, this
|
InterDatanodeProtocol
|
java
|
elastic__elasticsearch
|
libs/entitlement/qa/entitlement-test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/JvmActions.java
|
{
"start": 1212,
"end": 4138
}
|
class ____ {
@EntitlementTest(expectedAccess = PLUGINS)
static void setSystemProperty() {
System.setProperty("es.entitlements.checkSetSystemProperty", "true");
try {
System.clearProperty("es.entitlements.checkSetSystemProperty");
} catch (RuntimeException e) {
// ignore for this test case
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void clearSystemProperty() {
EntitledPlugin.selfTest(); // TODO: find a better home
System.clearProperty("es.entitlements.checkClearSystemProperty");
}
@EntitlementTest(expectedAccess = ALWAYS_DENIED)
static void setSystemProperties() {
System.setProperties(System.getProperties()); // no side effect in case if allowed (but shouldn't)
}
@EntitlementTest(expectedAccess = ALWAYS_DENIED)
static void setDefaultLocale() {
Locale.setDefault(Locale.getDefault());
}
@EntitlementTest(expectedAccess = ALWAYS_DENIED)
static void setDefaultLocaleForCategory() {
Locale.setDefault(Locale.Category.DISPLAY, Locale.getDefault(Locale.Category.DISPLAY));
}
@EntitlementTest(expectedAccess = ALWAYS_DENIED)
static void setDefaultTimeZone() {
TimeZone.setDefault(TimeZone.getDefault());
}
@EntitlementTest(expectedAccess = PLUGINS)
static void createClassLoader() throws IOException {
try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) {
// intentionally empty, just let the loader close
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void createClassLoaderNewInstance1() throws IOException {
try (var classLoader = URLClassLoader.newInstance(new URL[0])) {
// intentionally empty, just let the loader close
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void createClassLoaderNewInstance2() throws IOException {
try (var classLoader = URLClassLoader.newInstance(new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) {
// intentionally empty, just let the loader close
}
}
@EntitlementTest(expectedAccess = ALWAYS_DENIED)
static void createLogManager() {
new java.util.logging.LogManager() {
};
}
@EntitlementTest(expectedAccess = ALWAYS_DENIED)
static void thread$$setDefaultUncaughtExceptionHandler() {
Thread.setDefaultUncaughtExceptionHandler(Thread.getDefaultUncaughtExceptionHandler());
}
@EntitlementTest(expectedAccess = ALWAYS_ALLOWED)
static void useJavaXmlParser() {
// java.xml is part of the jdk, but not a system module. this checks it's actually usable
// as it needs to read classes from the jdk which is not generally allowed
SAXParserFactory.newInstance();
}
private JvmActions() {}
}
|
JvmActions
|
java
|
elastic__elasticsearch
|
x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingIntegTestCase.java
|
{
"start": 458,
"end": 657
}
|
class ____ extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(LocalStateAutoscaling.class);
}
}
|
AutoscalingIntegTestCase
|
java
|
quarkusio__quarkus
|
devtools/cli/src/main/java/io/quarkus/cli/image/Podman.java
|
{
"start": 769,
"end": 1926
}
|
class ____ extends BaseImageSubCommand {
private static final String PODMAN = "podman";
private static final String PODMAN_CONFIG_PREFIX = "quarkus.podman.";
private static final String DOCKERFILE_JVM_PATH = "dockerfile-jvm-path";
private static final String DOCKERFILE_NATIVE_PATH = "dockerfile-native-path";
@CommandLine.Option(order = 7, names = { "--dockerfile" }, description = "The path to the Dockerfile.")
public Optional<String> dockerFile;
@Override
public void populateContext(BuildToolContext context) {
var properties = context.getPropertiesOptions().properties;
properties.put(QUARKUS_CONTAINER_IMAGE_BUILDER, PODMAN);
dockerFile.ifPresent(d -> properties.put(
PODMAN_CONFIG_PREFIX + (context.getBuildOptions().buildNative ? DOCKERFILE_NATIVE_PATH : DOCKERFILE_JVM_PATH),
d));
context.getForcedExtensions().add(QUARKUS_CONTAINER_IMAGE_EXTENSION_KEY_PREFIX + PODMAN);
}
@Override
public String toString() {
return "Podman {imageOptions='" + imageOptions + "', dockerFile:'" + dockerFile.orElse("<none>") + "'}";
}
}
|
Podman
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/mapper/blockloader/docvalues/fn/AbstractBooleansFromDocValuesBlockLoaderTests.java
|
{
"start": 1154,
"end": 3764
}
|
class ____ extends ESTestCase {
@ParametersFactory(argumentFormatting = "blockAtATime=%s, multiValues=%s, missingValues=%s")
public static List<Object[]> parameters() throws IOException {
List<Object[]> parameters = new ArrayList<>();
for (boolean blockAtATime : new boolean[] { true, false }) {
for (boolean multiValues : new boolean[] { true, false }) {
for (boolean missingValues : new boolean[] { true, false }) {
parameters.add(new Object[] { blockAtATime, multiValues, missingValues });
}
}
}
return parameters;
}
protected final boolean blockAtATime;
protected final boolean multiValues;
protected final boolean missingValues;
public AbstractBooleansFromDocValuesBlockLoaderTests(boolean blockAtATime, boolean multiValues, boolean missingValues) {
this.blockAtATime = blockAtATime;
this.multiValues = multiValues;
this.missingValues = missingValues;
}
protected abstract void innerTest(LeafReaderContext ctx, int mvCount) throws IOException;
public void test() throws IOException {
int mvCount = 0;
try (Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir)) {
int docCount = 10_000;
for (int i = 0; i < docCount; i++) {
List<IndexableField> doc = new ArrayList<>(2);
doc.add(field(i));
if (multiValues && i % 100 == 0) {
doc.add(field((i % 100) + 1));
mvCount++;
}
iw.addDocument(doc);
}
if (missingValues) {
iw.addDocument(List.of());
}
iw.forceMerge(1);
try (DirectoryReader dr = iw.getReader()) {
LeafReaderContext ctx = getOnlyLeafReader(dr).getContext();
innerTest(ctx, mvCount);
}
}
}
protected final TestBlock read(BlockLoader loader, BlockLoader.AllReader reader, LeafReaderContext ctx, BlockLoader.Docs docs)
throws IOException {
BlockLoader.AllReader toUse = blockAtATime
? reader
: new ForceDocAtATime(() -> loader.builder(TestBlock.factory(), docs.count()), reader);
return (TestBlock) toUse.read(TestBlock.factory(), docs, 0, false);
}
private static SortedNumericDocValuesField field(int v) {
return new SortedNumericDocValuesField("field", v % 4 == 0 ? 1 : 0);
}
}
|
AbstractBooleansFromDocValuesBlockLoaderTests
|
java
|
spring-projects__spring-framework
|
spring-core-test/src/main/java/org/springframework/core/test/tools/Compiled.java
|
{
"start": 4211,
"end": 5486
}
|
class ____ to load
* @return an instance of the class
* @throws IllegalStateException if no instance can be found or instantiated
*/
public <T> T getInstance(Class<T> type, String className) {
Class<?> loaded = loadClass(className);
return type.cast(newInstance(loaded));
}
/**
* Return all compiled classes.
* @return a list of all compiled classes
*/
public List<Class<?>> getAllCompiledClasses() {
List<Class<?>> compiledClasses = this.compiledClasses;
if (compiledClasses == null) {
compiledClasses = new ArrayList<>();
this.sourceFiles.stream().map(this::loadClass).forEach(compiledClasses::add);
this.compiledClasses = Collections.unmodifiableList(compiledClasses);
}
return compiledClasses;
}
private Object newInstance(Class<?> loaded) {
try {
Constructor<?> constructor = loaded.getDeclaredConstructor();
return constructor.newInstance();
}
catch (Exception ex) {
throw new IllegalStateException(ex);
}
}
private Class<?> loadClass(SourceFile sourceFile) {
return loadClass(sourceFile.getClassName());
}
private Class<?> loadClass(String className) {
try {
return this.classLoader.loadClass(className);
}
catch (ClassNotFoundException ex) {
throw new IllegalStateException(ex);
}
}
}
|
name
|
java
|
quarkusio__quarkus
|
extensions/grpc/runtime/src/main/java/io/quarkus/grpc/runtime/supports/blocking/BlockingServerInterceptor.java
|
{
"start": 1144,
"end": 7940
}
|
class ____ implements ServerInterceptor, Function<String, Boolean>, Prioritized {
private static final Logger log = Logger.getLogger(BlockingServerInterceptor.class);
// Reserved keywords, based on the jls, see:
// https://github.com/grpc/grpc-java/blob/master/compiler/src/java_plugin/cpp/java_generator.cpp#L90
private static final Set<String> GRPC_JAVA_RESERVED_KEYWORDS = Set.of(
"abstract",
"assert",
"boolean",
"break",
"byte",
"case",
"catch",
"char",
"class",
"const",
"continue",
"default",
"do",
"double",
"else",
"enum",
"extends",
"final",
"finally",
"float",
"for",
"goto",
"if",
"implements",
"import",
"instanceof",
"int",
"interface",
"long",
"native",
"new",
"package",
"private",
"protected",
"public",
"return",
"short",
"static",
"strictfp",
"super",
"switch",
"synchronized",
"this",
"throw",
"throws",
"transient",
"try",
"void",
"volatile",
"while",
"true",
"false");
private final Vertx vertx;
private final Set<String> blockingMethods;
private final Set<String> virtualMethods;
private final Map<String, Boolean> blockingCache = new ConcurrentHashMap<>();
private final Map<String, Boolean> virtualCache = new ConcurrentHashMap<>();
private final boolean devMode;
private final Executor virtualThreadExecutor;
public BlockingServerInterceptor(Vertx vertx, List<String> blockingMethods, List<String> virtualMethods,
Executor virtualThreadExecutor, boolean devMode) {
this.vertx = vertx;
this.blockingMethods = new HashSet<>();
this.virtualMethods = new HashSet<>();
this.devMode = devMode;
if (blockingMethods != null) {
for (String method : blockingMethods) {
this.blockingMethods.add(method.toLowerCase());
}
}
if (virtualMethods != null) {
for (String method : virtualMethods) {
this.virtualMethods.add(method.toLowerCase());
}
}
this.virtualThreadExecutor = virtualThreadExecutor;
}
@Override
public Boolean apply(String name) {
String methodName = name.substring(name.lastIndexOf("/") + 1);
return blockingMethods.contains(toLowerCaseBeanSpec(methodName));
}
public Boolean applyVirtual(String name) {
String methodName = name.substring(name.lastIndexOf("/") + 1);
return virtualMethods.contains(toLowerCaseBeanSpec(methodName));
}
private String toLowerCaseBeanSpec(String name) {
// Methods cannot always be lowercased for comparison.
// - gRPC allows using method names which normally would not work in java because of reserved keywords.
// - Underscores are removed.
String lowerBeanSpec = name.toLowerCase().replace("_", "");
return GRPC_JAVA_RESERVED_KEYWORDS.contains(lowerBeanSpec) ? lowerBeanSpec + "_" : lowerBeanSpec;
}
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(ServerCall<ReqT, RespT> call,
Metadata headers,
ServerCallHandler<ReqT, RespT> next) {
// We need to check if the method is annotated with @Blocking.
// Unfortunately, we can't have the Java method object, we can only have the gRPC full method name.
// We extract the method name, and check if the name is in the list.
// This makes the following assumptions:
// 1. the code generator does not change the method name (which makes sense)
// 2. the method name is unique, which is a constraint of gRPC
// For performance purpose, we execute the lookup only once
String fullMethodName = call.getMethodDescriptor().getFullMethodName();
boolean isBlocking = blockingCache.computeIfAbsent(fullMethodName, this);
boolean isVirtual = virtualCache.computeIfAbsent(fullMethodName, this::applyVirtual);
if (isVirtual) {
final ManagedContext requestContext = getRequestContext();
// context should always be active here
// it is initialized by io.quarkus.grpc.runtime.supports.context.GrpcRequestContextGrpcInterceptor
// that should always be called before this interceptor
ContextState state = requestContext.getState();
VirtualReplayListener<ReqT> replay = new VirtualReplayListener<>(state);
virtualThreadExecutor.execute(() -> {
ServerCall.Listener<ReqT> listener;
try {
requestContext.activate(state);
listener = next.startCall(call, headers);
} finally {
requestContext.deactivate();
}
replay.setDelegate(listener);
});
return replay;
} else if (isBlocking) {
final ManagedContext requestContext = getRequestContext();
// context should always be active here
// it is initialized by io.quarkus.grpc.runtime.supports.context.GrpcRequestContextGrpcInterceptor
// that should always be called before this interceptor
ContextState state = requestContext.getState();
ReplayListener<ReqT> replay = new ReplayListener<>(state);
vertx.executeBlocking(() -> {
ServerCall.Listener<ReqT> listener;
try {
requestContext.activate(state);
listener = next.startCall(call, headers);
} finally {
requestContext.deactivate();
}
return listener;
}, false)
.onComplete(event -> replay.setDelegate(event.result()));
return replay;
} else {
return next.startCall(call, headers);
}
}
@Override
public int getPriority() {
return Interceptors.BLOCKING_HANDLER;
}
/**
* Stores the incoming events until the listener is injected.
* When injected, replay the events.
* <p>
* Note that event must be executed in order, explaining why incomingEvents
* are executed sequentially
*/
private
|
BlockingServerInterceptor
|
java
|
google__gson
|
gson/src/main/java/com/google/gson/internal/JavaVersion.java
|
{
"start": 716,
"end": 2956
}
|
class ____ {
// Oracle defines naming conventions at
// http://www.oracle.com/technetwork/java/javase/versioning-naming-139433.html
// However, many alternate implementations differ. For example, Debian used 9-debian as the
// version string
private static final int majorJavaVersion = determineMajorJavaVersion();
private static int determineMajorJavaVersion() {
String javaVersion = System.getProperty("java.version");
return parseMajorJavaVersion(javaVersion);
}
// Visible for testing only
static int parseMajorJavaVersion(String javaVersion) {
int version = parseDotted(javaVersion);
if (version == -1) {
version = extractBeginningInt(javaVersion);
}
if (version == -1) {
return 6; // Choose minimum supported JDK version as default
}
return version;
}
// Parses both legacy 1.8 style and newer 9.0.4 style
private static int parseDotted(String javaVersion) {
try {
String[] parts = javaVersion.split("[._]", 3);
int firstVer = Integer.parseInt(parts[0]);
if (firstVer == 1 && parts.length > 1) {
return Integer.parseInt(parts[1]);
} else {
return firstVer;
}
} catch (NumberFormatException e) {
return -1;
}
}
private static int extractBeginningInt(String javaVersion) {
try {
StringBuilder num = new StringBuilder();
for (int i = 0; i < javaVersion.length(); ++i) {
char c = javaVersion.charAt(i);
if (Character.isDigit(c)) {
num.append(c);
} else {
break;
}
}
return Integer.parseInt(num.toString());
} catch (NumberFormatException e) {
return -1;
}
}
/**
* Gets the major Java version
*
* @return the major Java version, i.e. '8' for Java 1.8, '9' for Java 9 etc.
*/
public static int getMajorJavaVersion() {
return majorJavaVersion;
}
/**
* Gets a boolean value depending if the application is running on Java 9 or later
*
* @return {@code true} if the application is running on Java 9 or later; and {@code false}
* otherwise.
*/
public static boolean isJava9OrLater() {
return majorJavaVersion >= 9;
}
private JavaVersion() {}
}
|
JavaVersion
|
java
|
spring-projects__spring-boot
|
module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/invoke/OperationParameters.java
|
{
"start": 844,
"end": 1782
}
|
interface ____ extends Iterable<OperationParameter> {
/**
* Return {@code true} if there is at least one parameter.
* @return if there are parameters
*/
default boolean hasParameters() {
return getParameterCount() > 0;
}
/**
* Return the total number of parameters.
* @return the total number of parameters
*/
int getParameterCount();
/**
* Return if any of the contained parameters are
* {@link OperationParameter#isMandatory() mandatory}.
* @return if any parameters are mandatory
*/
default boolean hasMandatoryParameter() {
return stream().anyMatch(OperationParameter::isMandatory);
}
/**
* Return the parameter at the specified index.
* @param index the parameter index
* @return the parameter
*/
OperationParameter get(int index);
/**
* Return a stream of the contained parameters.
* @return a stream of the parameters
*/
Stream<OperationParameter> stream();
}
|
OperationParameters
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapOutputType.java
|
{
"start": 2061,
"end": 2561
}
|
class ____
implements Mapper<WritableComparable, Writable, Text, Text> {
public void configure(JobConf job) {
}
public void map(WritableComparable key, Writable val,
OutputCollector<Text, Text> out,
Reporter reporter) throws IOException {
out.collect(new Text("Hello"), new Text("World"));
}
public void close() {
}
}
/** A do-nothing reducer class. We won't get this far, really.
*
*/
static
|
TextGen
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/spi/PackageScanClassResolver.java
|
{
"start": 1458,
"end": 1552
}
|
class ____ to use
*/
Set<ClassLoader> getClassLoaders();
/**
* Adds the
|
loaders
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/Char2DArrayAssertBaseTest.java
|
{
"start": 749,
"end": 840
}
|
class ____ {@link Char2DArrayAssert} tests.
*
* @author Maciej Wajcht
*/
public abstract
|
for
|
java
|
apache__spark
|
examples/src/main/java/org/apache/spark/examples/ml/JavaTargetEncoderExample.java
|
{
"start": 1371,
"end": 3378
}
|
class ____ {
public static void main(String[] args) {
SparkSession spark = SparkSession
.builder()
.appName("JavaTargetEncoderExample")
.getOrCreate();
// Note: categorical features are usually first encoded with StringIndexer
// $example on$
List<Row> data = Arrays.asList(
RowFactory.create(0.0, 1.0, 0, 10.0),
RowFactory.create(1.0, 0.0, 1, 20.0),
RowFactory.create(2.0, 1.0, 0, 30.0),
RowFactory.create(0.0, 2.0, 1, 40.0),
RowFactory.create(0.0, 1.0, 0, 50.0),
RowFactory.create(2.0, 0.0, 1, 60.0)
);
StructType schema = new StructType(new StructField[]{
new StructField("categoryIndex1", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("categoryIndex2", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("binaryLabel", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("continuousLabel", DataTypes.DoubleType, false, Metadata.empty())
});
Dataset<Row> df = spark.createDataFrame(data, schema);
// binary target
TargetEncoder bin_encoder = new TargetEncoder()
.setInputCols(new String[] {"categoryIndex1", "categoryIndex2"})
.setOutputCols(new String[] {"categoryIndex1Target", "categoryIndex2Target"})
.setLabelCol("binaryLabel")
.setTargetType("binary");
TargetEncoderModel bin_model = bin_encoder.fit(df);
Dataset<Row> bin_encoded = bin_model.transform(df);
bin_encoded.show();
// continuous target
TargetEncoder cont_encoder = new TargetEncoder()
.setInputCols(new String[] {"categoryIndex1", "categoryIndex2"})
.setOutputCols(new String[] {"categoryIndex1Target", "categoryIndex2Target"})
.setLabelCol("continuousLabel")
.setTargetType("continuous");
TargetEncoderModel cont_model = cont_encoder.fit(df);
Dataset<Row> cont_encoded = cont_model.transform(df);
cont_encoded.show();
// $example off$
spark.stop();
}
}
|
JavaTargetEncoderExample
|
java
|
quarkusio__quarkus
|
integration-tests/redis-client/src/main/java/io/quarkus/redis/it/RedisResourceWithNamedClient.java
|
{
"start": 568,
"end": 1663
}
|
class ____ {
private final ValueCommands<String, String> blocking;
private final ReactiveValueCommands<String, String> reactive;
public RedisResourceWithNamedClient(
@RedisClientName("named-client") RedisDataSource ds,
@RedisClientName("named-reactive-client") ReactiveRedisDataSource reactiveDs) {
blocking = ds.value(String.class);
reactive = reactiveDs.value(String.class);
}
// synchronous
@GET
@Path("/sync/{key}")
public String getSync(@PathParam("key") String key) {
return blocking.get(key);
}
@POST
@Path("/sync/{key}")
public void setSync(@PathParam("key") String key, String value) {
blocking.set(key, value);
}
// reactive
@GET
@Path("/reactive/{key}")
public Uni<String> getReactive(@PathParam("key") String key) {
return reactive.get(key);
}
@POST
@Path("/reactive/{key}")
public Uni<Void> setReactive(@PathParam("key") String key, String value) {
return this.reactive.set(key, value);
}
}
|
RedisResourceWithNamedClient
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/lazy/HHH_10708/UnexpectedDeleteTest2.java
|
{
"start": 1100,
"end": 1985
}
|
class ____ {
private Bar myBar;
@BeforeEach
public void prepare(SessionFactoryScope scope) {
scope.inTransaction( s -> {
Bar bar = new Bar();
Foo foo1 = new Foo();
Foo foo2 = new Foo();
s.persist( bar );
s.persist( foo1 );
s.persist( foo2 );
bar.foos.add( foo1 );
bar.foos.add( foo2 );
myBar = bar;
} );
}
@Test
public void test(SessionFactoryScope scope) {
scope.inTransaction( s -> {
assertThrows(IllegalArgumentException.class,
() -> s.refresh( myBar ),
"Given entity is not associated with the persistence context"
);
// The issue is that currently, for some unknown reason, foos are deleted on flush
} );
scope.inTransaction( s -> {
Bar bar = s.get( Bar.class, myBar.id );
assertFalse( bar.foos.isEmpty() );
} );
}
// --- //
@Entity(name = "Bar")
@Table( name = "BAR" )
static
|
UnexpectedDeleteTest2
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/builder/RouteTemplateOptionalValueTest.java
|
{
"start": 925,
"end": 2268
}
|
class ____ extends ContextTestSupport {
@Test
public void testOptionalProvided() throws Exception {
TemplatedRouteBuilder.builder(context, "myTemplate")
.parameter("foo", "start")
.parameter("myRetain", "1")
.routeId("myRoute")
.add();
getMockEndpoint("mock:result?retainFirst=1").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Test
public void testOptional() throws Exception {
TemplatedRouteBuilder.builder(context, "myTemplate")
.parameter("foo", "start2")
.routeId("myRoute")
.add();
getMockEndpoint("mock:result").expectedMessageCount(1);
template.sendBody("direct:start2", "Bye World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
routeTemplate("myTemplate").templateParameter("foo").templateOptionalParameter("myRetain")
.from("direct:{{foo}}")
.to("mock:result?retainFirst={{?myRetain}}");
}
};
}
}
|
RouteTemplateOptionalValueTest
|
java
|
elastic__elasticsearch
|
modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilderTests.java
|
{
"start": 733,
"end": 2812
}
|
class ____ extends ESTestCase {
public void testInvalidInterval() {
AutoDateHistogramAggregationBuilder builder = new AutoDateHistogramAggregationBuilder("name");
IllegalArgumentException wrongIntervalName = expectThrows(
IllegalArgumentException.class,
() -> builder.setMinimumIntervalExpression("foobar")
);
assertTrue(wrongIntervalName.getMessage().startsWith("minimum_interval must be one of"));
}
public void testBuildRoundingsWithNullParameter() {
int expectedLength = AutoDateHistogramAggregationBuilder.ALLOWED_INTERVALS.size();
AutoDateHistogramAggregationBuilder.RoundingInfo[] roundings = AutoDateHistogramAggregationBuilder.buildRoundings(null, null);
assertThat(roundings.length, equalTo(expectedLength));
}
public void testBuildRoundingsWithMinIntervalOfAYear() {
int[] expectedYearIntervals = { 1, 5, 10, 20, 50, 100 };
AutoDateHistogramAggregationBuilder.RoundingInfo[] roundings = AutoDateHistogramAggregationBuilder.buildRoundings(null, "year");
assertThat(roundings.length, equalTo(1));
AutoDateHistogramAggregationBuilder.RoundingInfo year = roundings[0];
assertEquals(year.unitAbbreviation, "y");
assertEquals(year.dateTimeUnit, "year");
assertEquals(year.roughEstimateDurationMillis, 31536000000L);
assertArrayEquals(year.innerIntervals, expectedYearIntervals);
}
public void testRoundingsMatchAllowedIntervals() {
AutoDateHistogramAggregationBuilder.RoundingInfo[] roundings = AutoDateHistogramAggregationBuilder.buildRoundings(null, "second");
Set<String> actualDateTimeUnits = Arrays.stream(roundings)
.map(AutoDateHistogramAggregationBuilder.RoundingInfo::getDateTimeUnit)
.collect(Collectors.toSet());
Set<String> expectedDateTimeUnits = new HashSet<>(AutoDateHistogramAggregationBuilder.ALLOWED_INTERVALS.values());
assertEquals(actualDateTimeUnits, expectedDateTimeUnits);
}
}
|
AutoDateHistogramAggregationBuilderTests
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/test/java/io/quarkus/arc/test/interceptor/IgnoredPrivateInterceptedMethodTest.java
|
{
"start": 1036,
"end": 2079
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(SimpleBean.class, SimpleInterceptor.class, Simple.class)
.addAsResource(new StringAsset("quarkus.arc.fail-on-intercepted-private-method=false"),
"application.properties"))
.setLogRecordPredicate(record -> record.getLevel().intValue() >= Level.WARNING.intValue())
.assertLogRecords(records -> assertThat(records)
.anySatisfy(record -> assertThat(record)
.extracting(LogRecord::getMessage, InstanceOfAssertFactories.STRING)
.contains("@Simple will have no effect on method " + SimpleBean.class.getName() + ".foo")));
@Inject
SimpleBean simpleBean;
@Test
public void testBeanInvocation() {
assertEquals("foo", simpleBean.foo());
}
@Singleton
static
|
IgnoredPrivateInterceptedMethodTest
|
java
|
quarkusio__quarkus
|
extensions/hibernate-envers/deployment/src/test/java/io/quarkus/hibernate/orm/envers/config/EnversTestAuditStrategyResource.java
|
{
"start": 392,
"end": 933
}
|
class ____ extends AbstractEnversResource {
@GET
public String getConfiguredAuditStrategy() {
final AuditStrategy auditStrategy = getAuditStrategy();
final Class<?> expectedClass = ValidityAuditStrategy.class;
final Class<?> actualClass = auditStrategy.getClass();
if (expectedClass.equals(actualClass)) {
return "OK";
}
return "Expected that audit strategy " + actualClass.getName() + " is not as expected: " + expectedClass.getName();
}
}
|
EnversTestAuditStrategyResource
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/WeaviateVectorDbEndpointBuilderFactory.java
|
{
"start": 5044,
"end": 7580
}
|
interface ____
extends
EndpointProducerBuilder {
default WeaviateVectorDbEndpointBuilder basic() {
return (WeaviateVectorDbEndpointBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedWeaviateVectorDbEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedWeaviateVectorDbEndpointBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
public
|
AdvancedWeaviateVectorDbEndpointBuilder
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/expressions/Expression.java
|
{
"start": 989,
"end": 1519
}
|
interface ____ all kinds of expressions.
*
* <p>Expressions represent a logical tree for producing a computation result. Every expression
* consists of zero, one, or more subexpressions. Expressions might be literal values, function
* calls, or field references.
*
* <p>Expressions are part of the API. They might be transformed multiple times within the API stack
* until they are fully {@link ResolvedExpression}s. Value types and output types are expressed as
* instances of {@link DataType}.
*/
@PublicEvolving
public
|
for
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/LumberjackComponentBuilderFactory.java
|
{
"start": 1388,
"end": 1880
}
|
interface ____ {
/**
* Lumberjack (camel-lumberjack)
* Receive logs messages using the Lumberjack protocol.
*
* Category: monitoring
* Since: 2.18
* Maven coordinates: org.apache.camel:camel-lumberjack
*
* @return the dsl builder
*/
static LumberjackComponentBuilder lumberjack() {
return new LumberjackComponentBuilderImpl();
}
/**
* Builder for the Lumberjack component.
*/
|
LumberjackComponentBuilderFactory
|
java
|
apache__flink
|
flink-filesystems/flink-oss-fs-hadoop/src/test/java/org/apache/flink/fs/osshadoop/OSSTestUtils.java
|
{
"start": 1636,
"end": 4597
}
|
class ____ {
private static final int BUFFER_SIZE = 10;
public static void objectContentEquals(
FileSystem fs, Path objectPath, List<byte[]> expectContents) throws IOException {
String actualContent;
try (FSDataInputStream in = fs.open(objectPath);
ByteArrayOutputStream out = new ByteArrayOutputStream()) {
byte[] buffer = new byte[4096];
int bytes = in.read(buffer);
while (bytes != -1) {
out.write(buffer, 0, bytes);
bytes = in.read(buffer);
}
actualContent = out.toString(StandardCharsets.UTF_8.name());
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
for (byte[] bytes : expectContents) {
out.write(bytes);
}
assertThat(actualContent).isEqualTo(out.toString());
}
public static void objectContentEquals(FileSystem fs, Path objectPath, byte[]... expectContents)
throws IOException {
objectContentEquals(fs, objectPath, Arrays.asList(expectContents));
}
public static byte[] bytesOf(String str, long requiredSize) {
StringBuilder sb = new StringBuilder();
while (sb.length() < requiredSize) {
sb.append(str);
}
return sb.toString().getBytes(StandardCharsets.UTF_8);
}
public static void uploadPart(
OSSRecoverableMultipartUpload uploader,
final File temporaryFolder,
final byte[] content)
throws IOException {
RefCountedBufferingFileStream partFile = writeData(temporaryFolder, content);
partFile.close();
uploader.uploadPart(partFile);
}
public static RefCountedBufferingFileStream writeData(File temporaryFolder, byte[] content)
throws IOException {
final File newFile = new File(temporaryFolder, ".tmp_" + UUID.randomUUID());
final OutputStream out =
Files.newOutputStream(newFile.toPath(), StandardOpenOption.CREATE_NEW);
final RefCountedBufferingFileStream testStream =
new RefCountedBufferingFileStream(
RefCountedFileWithStream.newFile(newFile, out), BUFFER_SIZE);
testStream.write(content, 0, content.length);
return testStream;
}
public static List<byte[]> generateRandomBuffer(long size, int partSize) {
List<byte[]> buffers = new ArrayList<>();
final SplittableRandom random = new SplittableRandom();
long totalSize = 0L;
while (totalSize < size) {
int bufferSize = random.nextInt(0, 2 * partSize);
byte[] buffer = new byte[bufferSize];
for (int i = 0; i < bufferSize; ++i) {
buffer[i] = (byte) (random.nextInt() & 0xFF);
}
buffers.add(buffer);
totalSize += bufferSize;
}
return buffers;
}
}
|
OSSTestUtils
|
java
|
google__dagger
|
javatests/dagger/hilt/android/MultiTestRoot1Test.java
|
{
"start": 3178,
"end": 3417
}
|
interface ____ {
@Provides
@MultiTestRootExternalModules.External
static String provideString() {
return REPLACE_EXTERNAL_STR_VALUE;
}
}
@Module
@InstallIn(ActivityComponent.class)
public
|
ReplaceExternalAppModule
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/TaskStateSnapshot.java
|
{
"start": 2809,
"end": 10062
}
|
class ____ implements CompositeStateHandle {
private static final long serialVersionUID = 1L;
public static final TaskStateSnapshot FINISHED_ON_RESTORE =
new TaskStateSnapshot(new HashMap<>(), true, true);
/** Mapping from an operator id to the state of one subtask of this operator. */
private final Map<OperatorID, OperatorSubtaskState> subtaskStatesByOperatorID;
private final boolean isTaskDeployedAsFinished;
private final boolean isTaskFinished;
public TaskStateSnapshot() {
this(10, false);
}
public TaskStateSnapshot(int size, boolean isTaskFinished) {
this(CollectionUtil.newHashMapWithExpectedSize(size), false, isTaskFinished);
}
public TaskStateSnapshot(Map<OperatorID, OperatorSubtaskState> subtaskStatesByOperatorID) {
this(subtaskStatesByOperatorID, false, false);
}
private TaskStateSnapshot(
Map<OperatorID, OperatorSubtaskState> subtaskStatesByOperatorID,
boolean isTaskDeployedAsFinished,
boolean isTaskFinished) {
this.subtaskStatesByOperatorID = Preconditions.checkNotNull(subtaskStatesByOperatorID);
this.isTaskDeployedAsFinished = isTaskDeployedAsFinished;
this.isTaskFinished = isTaskFinished;
}
/** Returns whether all the operators of the task are already finished on restoring. */
public boolean isTaskDeployedAsFinished() {
return isTaskDeployedAsFinished;
}
/** Returns whether all the operators of the task have called finished methods. */
public boolean isTaskFinished() {
return isTaskFinished;
}
/** Returns the subtask state for the given operator id (or null if not contained). */
@Nullable
public OperatorSubtaskState getSubtaskStateByOperatorID(OperatorID operatorID) {
return subtaskStatesByOperatorID.get(operatorID);
}
/**
* Maps the given operator id to the given subtask state. Returns the subtask state of a
* previous mapping, if such a mapping existed or null otherwise.
*/
public OperatorSubtaskState putSubtaskStateByOperatorID(
@Nonnull OperatorID operatorID, @Nonnull OperatorSubtaskState state) {
return subtaskStatesByOperatorID.put(operatorID, Preconditions.checkNotNull(state));
}
/** Returns the set of all mappings from operator id to the corresponding subtask state. */
public Set<Map.Entry<OperatorID, OperatorSubtaskState>> getSubtaskStateMappings() {
return subtaskStatesByOperatorID.entrySet();
}
/**
* Returns true if at least one {@link OperatorSubtaskState} in subtaskStatesByOperatorID has
* state.
*/
public boolean hasState() {
for (OperatorSubtaskState operatorSubtaskState : subtaskStatesByOperatorID.values()) {
if (operatorSubtaskState != null && operatorSubtaskState.hasState()) {
return true;
}
}
return isTaskDeployedAsFinished;
}
/**
* Returns the input channel mapping for rescaling with in-flight data or {@link
* InflightDataRescalingDescriptor#NO_RESCALE}.
*/
public InflightDataRescalingDescriptor getInputRescalingDescriptor() {
return getMapping(OperatorSubtaskState::getInputRescalingDescriptor);
}
/**
* Returns the output channel mapping for rescaling with in-flight data or {@link
* InflightDataRescalingDescriptor#NO_RESCALE}.
*/
public InflightDataRescalingDescriptor getOutputRescalingDescriptor() {
return getMapping(OperatorSubtaskState::getOutputRescalingDescriptor);
}
@Override
public void discardState() throws Exception {
StateUtil.bestEffortDiscardAllStateObjects(subtaskStatesByOperatorID.values());
}
@Override
public long getStateSize() {
return streamOperatorSubtaskStates().mapToLong(StateObject::getStateSize).sum();
}
@Override
public void collectSizeStats(StateObjectSizeStatsCollector collector) {
streamOperatorSubtaskStates().forEach(oss -> oss.collectSizeStats(collector));
}
private Stream<OperatorSubtaskState> streamOperatorSubtaskStates() {
return subtaskStatesByOperatorID.values().stream().filter(Objects::nonNull);
}
@Override
public long getCheckpointedSize() {
long size = 0L;
for (OperatorSubtaskState subtaskState : subtaskStatesByOperatorID.values()) {
if (subtaskState != null) {
size += subtaskState.getCheckpointedSize();
}
}
return size;
}
@Override
public void registerSharedStates(SharedStateRegistry stateRegistry, long checkpointID) {
for (OperatorSubtaskState operatorSubtaskState : subtaskStatesByOperatorID.values()) {
if (operatorSubtaskState != null) {
operatorSubtaskState.registerSharedStates(stateRegistry, checkpointID);
}
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TaskStateSnapshot that = (TaskStateSnapshot) o;
return subtaskStatesByOperatorID.equals(that.subtaskStatesByOperatorID)
&& isTaskDeployedAsFinished == that.isTaskDeployedAsFinished
&& isTaskFinished == that.isTaskFinished;
}
@Override
public int hashCode() {
return Objects.hash(subtaskStatesByOperatorID, isTaskDeployedAsFinished, isTaskFinished);
}
@Override
public String toString() {
return "TaskOperatorSubtaskStates{"
+ "subtaskStatesByOperatorID="
+ subtaskStatesByOperatorID
+ ", isTaskDeployedAsFinished="
+ isTaskDeployedAsFinished
+ ", isTaskFinished="
+ isTaskFinished
+ '}';
}
/** Returns the only valid mapping as ensured by {@link StateAssignmentOperation}. */
private InflightDataRescalingDescriptor getMapping(
Function<OperatorSubtaskState, InflightDataRescalingDescriptor> mappingExtractor) {
return Iterators.getOnlyElement(
subtaskStatesByOperatorID.values().stream()
.map(mappingExtractor)
.filter(mapping -> !mapping.equals(NO_RESCALE))
.iterator(),
NO_RESCALE);
}
@Nullable
public static SerializedValue<TaskStateSnapshot> serializeTaskStateSnapshot(
TaskStateSnapshot subtaskState) {
try {
return subtaskState == null ? null : new SerializedValue<>(subtaskState);
} catch (IOException e) {
throw new FlinkRuntimeException(e);
}
}
@Nullable
public static TaskStateSnapshot deserializeTaskStateSnapshot(
SerializedValue<TaskStateSnapshot> subtaskState, ClassLoader classLoader) {
try {
return subtaskState == null ? null : subtaskState.deserializeValue(classLoader);
} catch (IOException | ClassNotFoundException e) {
throw new FlinkRuntimeException(e);
}
}
}
|
TaskStateSnapshot
|
java
|
grpc__grpc-java
|
xds/src/main/java/io/grpc/xds/XdsCredentialsProvider.java
|
{
"start": 1258,
"end": 1487
}
|
class ____.
* Implementations that need arguments in their constructor can be manually registered by
* {@link XdsCredentialsRegistry#register}.
*
* <p>Implementations <em>should not</em> throw. If they do, it may interrupt
|
name
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/aop/framework/ProxyFactoryBeanTests.java
|
{
"start": 28489,
"end": 28911
}
|
class ____ implements IntroductionInterceptor {
@Override
public boolean implementsInterface(Class<?> intf) {
return intf.equals(AddedGlobalInterface.class);
}
@Override
public Object invoke(MethodInvocation mi) throws Throwable {
if (mi.getMethod().getDeclaringClass().equals(AddedGlobalInterface.class)) {
return -1;
}
return mi.proceed();
}
}
public static
|
GlobalAspectInterfaceInterceptor
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatEnvironment.java
|
{
"start": 1451,
"end": 5353
}
|
class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(HdfsCompatEnvironment.class);
private static final String DATE_FORMAT = "yyyy_MM_dd_HH_mm_ss";
private static final Random RANDOM = new Random();
private final Path uri;
private final Configuration conf;
private FileSystem fs;
private LocalFileSystem localFs;
private Path rootDir;
private Path baseDir;
private String defaultLocalDir;
private String[] defaultStoragePolicyNames;
public HdfsCompatEnvironment(Path uri, Configuration conf) {
this.conf = conf;
this.uri = uri;
}
public void init() throws IOException {
Date now = new Date();
String uuid = UUID.randomUUID().toString();
String uniqueDir = "hadoop-compatibility-benchmark/" +
new SimpleDateFormat(DATE_FORMAT).format(now) + "/" + uuid;
this.fs = uri.getFileSystem(conf);
this.localFs = FileSystem.getLocal(conf);
this.rootDir = fs.makeQualified(new Path("/"));
this.baseDir = fs.makeQualified(new Path(uri, uniqueDir));
String tmpdir = getEnvTmpDir();
if ((tmpdir == null) || tmpdir.isEmpty()) {
LOG.warn("Cannot get valid io.tmpdir, will use /tmp");
tmpdir = "/tmp";
}
this.defaultLocalDir = new File(tmpdir, uniqueDir).getAbsolutePath();
this.defaultStoragePolicyNames = getDefaultStoragePolicyNames();
}
public FileSystem getFileSystem() {
return fs;
}
public LocalFileSystem getLocalFileSystem() {
return localFs;
}
public Path getRoot() {
return rootDir;
}
public Path getBase() {
return baseDir;
}
public String getLocalTmpDir() {
final String scheme = this.uri.toUri().getScheme();
final String key = "fs." + scheme + ".compatibility.local.tmpdir";
final String localDir = conf.get(key, null);
return (localDir != null) ? localDir : defaultLocalDir;
}
public String getPrivilegedUser() {
final String scheme = this.uri.toUri().getScheme();
final String key = "fs." + scheme + ".compatibility.privileged.user";
final String privileged = conf.get(key, null);
return (privileged != null) ? privileged :
conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
}
public String[] getStoragePolicyNames() {
final String scheme = this.uri.toUri().getScheme();
final String key = "fs." + scheme + ".compatibility.storage.policies";
final String storagePolicies = conf.get(key, null);
return (storagePolicies != null) ? storagePolicies.split(",") :
defaultStoragePolicyNames.clone();
}
public String getDelegationTokenRenewer() {
final String scheme = this.uri.toUri().getScheme();
final String key = "fs." + scheme + ".compatibility.delegation.token.renewer";
return conf.get(key, "");
}
private String getEnvTmpDir() {
final String systemDefault = System.getProperty("java.io.tmpdir");
if ((systemDefault == null) || systemDefault.isEmpty()) {
return null;
}
String[] tmpDirs = systemDefault.split(",|" + File.pathSeparator);
List<String> validDirs = Arrays.stream(tmpDirs).filter(
s -> (s != null && !s.isEmpty())
).collect(Collectors.toList());
if (validDirs.isEmpty()) {
return null;
}
final String tmpDir = validDirs.get(
RANDOM.nextInt(validDirs.size()));
return new File(tmpDir).getAbsolutePath();
}
private String[] getDefaultStoragePolicyNames() {
Collection<? extends BlockStoragePolicySpi> policies = null;
try {
policies = fs.getAllStoragePolicies();
} catch (Exception e) {
LOG.warn("Cannot get storage policy", e);
}
if ((policies == null) || policies.isEmpty()) {
return new String[]{"Hot"};
} else {
return policies.stream().map(BlockStoragePolicySpi::getName).toArray(String[]::new);
}
}
}
|
HdfsCompatEnvironment
|
java
|
dropwizard__dropwizard
|
dropwizard-health/src/main/java/io/dropwizard/health/DefaultHealthFactory.java
|
{
"start": 1474,
"end": 8765
}
|
class ____ implements HealthFactory {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultHealthFactory.class);
private static final String DEFAULT_BASE_NAME = "health-check";
private static final String DEFAULT_PATH = "/health-check";
@JsonProperty
private boolean enabled = true;
@Valid
@NotNull
@JsonProperty
private List<HealthCheckConfiguration> healthChecks = Collections.emptyList();
@JsonProperty
private boolean initialOverallState = true;
@JsonProperty
private boolean delayedShutdownHandlerEnabled = false;
@NotNull
@JsonProperty
private Duration shutdownWaitPeriod = Duration.seconds(15);
@NotNull
@Size(min = 1)
@JsonProperty
private List<String> healthCheckUrlPaths = singletonList(DEFAULT_PATH);
@Valid
@JsonProperty("responseProvider")
private HealthResponseProviderFactory healthResponseProviderFactory =
new JsonHealthResponseProviderFactory();
@Valid
@JsonProperty("responder")
private HealthResponderFactory healthResponderFactory = new ServletHealthResponderFactory();
public boolean isEnabled() {
return enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
@JsonProperty("healthChecks")
public List<HealthCheckConfiguration> getHealthCheckConfigurations() {
return healthChecks;
}
@JsonProperty("healthChecks")
public void setHealthCheckConfigurations(final List<HealthCheckConfiguration> healthChecks) {
this.healthChecks = healthChecks;
}
public boolean isInitialOverallState() {
return initialOverallState;
}
public void setInitialOverallState(boolean initialOverallState) {
this.initialOverallState = initialOverallState;
}
public boolean isDelayedShutdownHandlerEnabled() {
return delayedShutdownHandlerEnabled;
}
public void setDelayedShutdownHandlerEnabled(final boolean delayedShutdownHandlerEnabled) {
this.delayedShutdownHandlerEnabled = delayedShutdownHandlerEnabled;
}
public Duration getShutdownWaitPeriod() {
return shutdownWaitPeriod;
}
public void setShutdownWaitPeriod(final Duration shutdownWaitPeriod) {
this.shutdownWaitPeriod = shutdownWaitPeriod;
}
public List<String> getHealthCheckUrlPaths() {
return healthCheckUrlPaths;
}
public void setHealthCheckUrlPaths(final List<String> healthCheckUrlPaths) {
this.healthCheckUrlPaths = healthCheckUrlPaths;
}
public HealthResponseProviderFactory getHealthResponseProviderFactory() {
return healthResponseProviderFactory;
}
public void setHealthResponseProviderFactory(HealthResponseProviderFactory healthResponseProviderFactory) {
this.healthResponseProviderFactory = healthResponseProviderFactory;
}
public HealthResponderFactory getHealthResponderFactory() {
return healthResponderFactory;
}
public void setHealthResponderFactory(HealthResponderFactory healthResponderFactory) {
this.healthResponderFactory = healthResponderFactory;
}
@Override
public void configure(final LifecycleEnvironment lifecycle, final ServletEnvironment servlets,
final JerseyEnvironment jersey, final HealthEnvironment health, final ObjectMapper mapper,
final String name) {
if (!isEnabled()) {
LOGGER.info("Health check configuration is disabled.");
return;
}
final MetricRegistry metrics = lifecycle.getMetricRegistry();
final HealthCheckRegistry envHealthChecks = health.healthChecks();
final String fullName = DEFAULT_BASE_NAME + "-" + name;
final List<HealthCheckConfiguration> healthCheckConfigs = getHealthCheckConfigurations();
// setup schedules for configured health checks
final ScheduledExecutorService scheduledHealthCheckExecutor = createScheduledExecutorForHealthChecks(
healthCheckConfigs.size(), metrics, lifecycle, fullName);
final HealthCheckScheduler scheduler = new HealthCheckScheduler(scheduledHealthCheckExecutor);
// configure health manager to receive registered health state listeners from HealthEnvironment (via reference)
final HealthCheckManager healthCheckManager = new HealthCheckManager(healthCheckConfigs, scheduler, metrics,
shutdownWaitPeriod, initialOverallState, health.healthStateListeners());
healthCheckManager.initializeAppHealth();
// setup response provider and responder to respond to health check requests
final HealthResponseProvider responseProvider = healthResponseProviderFactory.build(healthCheckManager,
healthCheckManager, mapper);
healthResponderFactory.configure(fullName, healthCheckUrlPaths, responseProvider, health, jersey, servlets,
mapper);
// register listener for HealthCheckRegistry and setup validator to ensure correct config
envHealthChecks.addListener(healthCheckManager);
lifecycle.manage(new HealthCheckConfigValidator(healthCheckConfigs, envHealthChecks));
// register shutdown handler with Jetty
final Duration shutdownDelay = getShutdownWaitPeriod();
if (isDelayedShutdownHandlerEnabled() && shutdownDelay.toMilliseconds() > 0) {
final DelayedShutdownHandler shutdownHandler = new DelayedShutdownHandler(healthCheckManager);
shutdownHandler.register();
LOGGER.debug("Set up delayed shutdown with delay: {}", shutdownDelay);
}
// Set the health state aggregator on the HealthEnvironment
health.setHealthStateAggregator(healthCheckManager);
LOGGER.debug("Configured ongoing health check monitoring for healthChecks: {}", getHealthCheckConfigurations());
}
private ScheduledExecutorService createScheduledExecutorForHealthChecks(
final int numberOfScheduledHealthChecks,
final MetricRegistry metrics,
final LifecycleEnvironment lifecycle,
final String fullName) {
final AtomicLong threadNum = new AtomicLong(0L);
final ThreadFactory defaultThreadFactory = defaultThreadFactory();
final ThreadFactory threadFactory = (Runnable runnable) -> {
Thread thread = defaultThreadFactory.newThread(runnable);
thread.setName(String.format("%s-%d", fullName, threadNum.incrementAndGet()));
thread.setDaemon(true);
thread.setUncaughtExceptionHandler((t, e) -> LOGGER.error("Thread={} died due to uncaught exception", t, e));
return thread;
};
final InstrumentedThreadFactory instrumentedThreadFactory =
new InstrumentedThreadFactory(threadFactory, metrics);
final ScheduledExecutorService scheduledExecutorService =
lifecycle.scheduledExecutorService(fullName + "-scheduled-executor", instrumentedThreadFactory)
.threads(numberOfScheduledHealthChecks)
.build();
return new InstrumentedScheduledExecutorService(scheduledExecutorService, metrics);
}
}
|
DefaultHealthFactory
|
java
|
apache__camel
|
test-infra/camel-test-infra-arangodb/src/test/java/org/apache/camel/test/infra/arangodb/services/ArangoDBServiceFactory.java
|
{
"start": 2792,
"end": 2933
}
|
class ____ extends ArangoDBLocalContainerInfraService
implements ArangoDBService, ContainerTestService {
}
|
ArangoDBLocalContainerService
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/generated/GeneratedAnnotationTests.java
|
{
"start": 3480,
"end": 4080
}
|
class ____ {
@Id
public Integer id;
public String name;
@Generated
@ColumnDefault( "current_timestamp" )
public Instant createdAt;
@Generated( event = { EventType.INSERT, EventType.UPDATE } )
@ColumnDefault( "current_timestamp" )
public Instant lastUpdatedAt;
public AuditedEntity() {
}
public AuditedEntity(Integer id, String name) {
this.id = id;
this.name = name;
}
}
private static void waitALittle() {
try {
Thread.sleep( 10 );
}
catch (InterruptedException e) {
throw new HibernateError( "Unexpected wakeup from test sleep" );
}
}
}
|
AuditedEntity
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-main/src/test/java/org/apache/camel/spring/MainDummyTest.java
|
{
"start": 1109,
"end": 1700
}
|
class ____ {
@Test
public void testMain() {
assertDoesNotThrow(() -> {
Main main = new Main();
main.start();
// should also be a Camel
CamelContext camel = main.getApplicationContext().getBean(CamelContext.class);
assertNotNull(camel, "Camel should be in Spring");
DummyBean dummy = (DummyBean) main.getApplicationContext().getBean("dummy");
assertNotNull(dummy);
assertEquals("John Doe", dummy.getName());
main.stop();
}
);
}
}
|
MainDummyTest
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/lists/Lists_assertContains_Test.java
|
{
"start": 1836,
"end": 4853
}
|
class ____ extends ListsBaseTest {
private static List<String> actual = newArrayList("Yoda", "Luke", "Leia");
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> lists.assertContains(someInfo(), null, "Yoda", someIndex()))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_is_empty() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> lists.assertContains(someInfo(), emptyList(), "Yoda",
someIndex()))
.withMessage(actualIsEmpty());
}
@Test
void should_throw_error_if_Index_is_null() {
assertThatNullPointerException().isThrownBy(() -> lists.assertContains(someInfo(), actual, "Yoda", null))
.withMessage("Index should not be null");
}
@Test
void should_throw_error_if_Index_is_out_of_bounds() {
assertThatExceptionOfType(IndexOutOfBoundsException.class).isThrownBy(() -> lists.assertContains(someInfo(), actual,
"Yoda",
atIndex(6)))
.withMessageContaining("Index should be between <0> and <2> (inclusive) but was:%n <6>".formatted());
}
@Test
void should_fail_if_actual_does_not_contain_value_at_index() {
AssertionInfo info = someInfo();
Index index = atIndex(1);
Throwable error = catchThrowable(() -> lists.assertContains(info, actual, "Han", index));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldContainAtIndex(actual, "Han", index, "Luke"));
}
@Test
void should_pass_if_actual_contains_value_at_index() {
lists.assertContains(someInfo(), actual, "Luke", atIndex(1));
}
@Test
void should_pass_if_actual_contains_value_at_index_according_to_custom_comparison_strategy() {
listsWithCaseInsensitiveComparisonStrategy.assertContains(someInfo(), actual, "Luke", atIndex(1));
listsWithCaseInsensitiveComparisonStrategy.assertContains(someInfo(), actual, "luke", atIndex(1));
listsWithCaseInsensitiveComparisonStrategy.assertContains(someInfo(), actual, "LUKE", atIndex(1));
}
@Test
void should_fail_if_actual_does_not_contain_value_at_index_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
Index index = atIndex(1);
Throwable error = catchThrowable(() -> listsWithCaseInsensitiveComparisonStrategy.assertContains(info, actual, "Han", index));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldContainAtIndex(actual, "Han", index, "Luke", comparisonStrategy));
}
}
|
Lists_assertContains_Test
|
java
|
spring-projects__spring-boot
|
module/spring-boot-amqp/src/main/java/org/springframework/boot/amqp/autoconfigure/RabbitProperties.java
|
{
"start": 26647,
"end": 28458
}
|
class ____ {
/**
* Whether publishing retries are enabled.
*/
private boolean enabled;
/**
* Maximum number of retry attempts to deliver a message.
*/
private long maxRetries = 3;
/**
* Duration between the first and second attempt to deliver a message.
*/
private Duration initialInterval = Duration.ofMillis(1000);
/**
* Multiplier to apply to the previous retry interval.
*/
private double multiplier = 1.0;
/**
* Maximum duration between attempts.
*/
private Duration maxInterval = Duration.ofMillis(10000);
public boolean isEnabled() {
return this.enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
public long getMaxRetries() {
return this.maxRetries;
}
public void setMaxRetries(long maxRetries) {
this.maxRetries = maxRetries;
}
public Duration getInitialInterval() {
return this.initialInterval;
}
public void setInitialInterval(Duration initialInterval) {
this.initialInterval = initialInterval;
}
public double getMultiplier() {
return this.multiplier;
}
public void setMultiplier(double multiplier) {
this.multiplier = multiplier;
}
public Duration getMaxInterval() {
return this.maxInterval;
}
public void setMaxInterval(Duration maxInterval) {
this.maxInterval = maxInterval;
}
RetryPolicySettings initializeRetryPolicySettings() {
PropertyMapper map = PropertyMapper.get();
RetryPolicySettings settings = new RetryPolicySettings();
map.from(this::getMaxRetries).to(settings::setMaxRetries);
map.from(this::getInitialInterval).to(settings::setDelay);
map.from(this::getMultiplier).to(settings::setMultiplier);
map.from(this::getMaxInterval).to(settings::setMaxDelay);
return settings;
}
}
public static
|
Retry
|
java
|
google__guava
|
android/guava-testlib/src/com/google/common/collect/testing/TestStringListGenerator.java
|
{
"start": 932,
"end": 1768
}
|
class ____ implements TestListGenerator<String> {
@Override
public SampleElements<String> samples() {
return new Strings();
}
@Override
public List<String> create(Object... elements) {
String[] array = new String[elements.length];
int i = 0;
for (Object e : elements) {
array[i++] = (String) e;
}
return create(array);
}
/**
* Creates a new collection containing the given elements; implement this method instead of {@link
* #create(Object...)}.
*/
protected abstract List<String> create(String[] elements);
@Override
public String[] createArray(int length) {
return new String[length];
}
/** Returns the original element list, unchanged. */
@Override
public List<String> order(List<String> insertionOrder) {
return insertionOrder;
}
}
|
TestStringListGenerator
|
java
|
apache__avro
|
lang/java/compiler/src/main/java/org/apache/avro/compiler/specific/SpecificCompiler.java
|
{
"start": 3222,
"end": 3778
}
|
class ____ passed a reference to the instance (`this), and hence, they are
* permitted at most `JVM_METHOD_ARG_LIMIT-1` "parameter units" for their
* arguments.
*
* @see <a href=
* "https://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.10">
* JVM Spec: Section 4.10</a>
*/
private static final int JVM_METHOD_ARG_LIMIT = 255;
/*
* Note: This is protected instead of private only so it's visible for testing.
*/
protected static final int MAX_FIELD_PARAMETER_UNIT_COUNT = JVM_METHOD_ARG_LIMIT - 1;
public
|
are
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/main/java/org/springframework/jdbc/core/simple/SimpleJdbcInsertOperations.java
|
{
"start": 1137,
"end": 7468
}
|
interface ____ {
/**
* Specify the table name to be used for the insert.
* @param tableName the name of the stored table
* @return this {@code SimpleJdbcInsert} (for method chaining)
*/
SimpleJdbcInsertOperations withTableName(String tableName);
/**
* Specify the schema name, if any, to be used for the insert.
* @param schemaName the name of the schema
* @return this {@code SimpleJdbcInsert} (for method chaining)
*/
SimpleJdbcInsertOperations withSchemaName(String schemaName);
/**
* Specify the catalog name, if any, to be used for the insert.
* @param catalogName the name of the catalog
* @return this {@code SimpleJdbcInsert} (for method chaining)
*/
SimpleJdbcInsertOperations withCatalogName(String catalogName);
/**
* Specify the column names that the insert statement should be limited to use.
* @param columnNames one or more column names
* @return this {@code SimpleJdbcInsert} (for method chaining)
*/
SimpleJdbcInsertOperations usingColumns(String... columnNames);
/**
* Specify the names of any columns that have auto-generated keys.
* @param columnNames one or more column names
* @return this {@code SimpleJdbcInsert} (for method chaining)
*/
SimpleJdbcInsertOperations usingGeneratedKeyColumns(String... columnNames);
/**
* Specify that SQL identifiers should be quoted.
* <p>If this method is invoked, the identifier quote string for the underlying
* database will be used to quote SQL identifiers in generated SQL statements.
* In this context, SQL identifiers refer to schema, table, and column names.
* <p>When identifiers are quoted, explicit column names must be supplied via
* {@link #usingColumns(String...)}. Furthermore, all identifiers for the
* schema name, table name, and column names must match the corresponding
* identifiers in the database's metadata regarding casing (mixed case,
* uppercase, or lowercase).
* @return this {@code SimpleJdbcInsert} (for method chaining)
* @since 6.1
* @see #withSchemaName(String)
* @see #withTableName(String)
* @see #usingColumns(String...)
* @see java.sql.DatabaseMetaData#getIdentifierQuoteString()
* @see java.sql.DatabaseMetaData#storesMixedCaseIdentifiers()
* @see java.sql.DatabaseMetaData#storesMixedCaseQuotedIdentifiers()
* @see java.sql.DatabaseMetaData#storesUpperCaseIdentifiers()
* @see java.sql.DatabaseMetaData#storesUpperCaseQuotedIdentifiers()
* @see java.sql.DatabaseMetaData#storesLowerCaseIdentifiers()
* @see java.sql.DatabaseMetaData#storesLowerCaseQuotedIdentifiers()
*/
SimpleJdbcInsertOperations usingQuotedIdentifiers();
/**
* Turn off any processing of column meta-data information obtained via JDBC.
* @return this {@code SimpleJdbcInsert} (for method chaining)
*/
SimpleJdbcInsertOperations withoutTableColumnMetaDataAccess();
/**
* Include synonyms for the column meta-data lookups via JDBC.
* <p>Note: This is only necessary to include for Oracle since other databases
* supporting synonyms seem to include the synonyms automatically.
* @return this {@code SimpleJdbcInsert} (for method chaining)
*/
SimpleJdbcInsertOperations includeSynonymsForTableColumnMetaData();
/**
* Execute the insert using the values passed in.
* @param args a Map containing column names and corresponding value
* @return the number of rows affected as returned by the JDBC driver
*/
int execute(Map<String, ?> args);
/**
* Execute the insert using the values passed in.
* @param parameterSource the SqlParameterSource containing values to use for insert
* @return the number of rows affected as returned by the JDBC driver
*/
int execute(SqlParameterSource parameterSource);
/**
* Execute the insert using the values passed in and return the generated key.
* <p>This requires that the name of the columns with auto generated keys have been specified.
* This method will always return a KeyHolder but the caller must verify that it actually
* contains the generated keys.
* @param args a Map containing column names and corresponding value
* @return the generated key value
*/
Number executeAndReturnKey(Map<String, ?> args);
/**
* Execute the insert using the values passed in and return the generated key.
* <p>This requires that the name of the columns with auto generated keys have been specified.
* This method will always return a KeyHolder but the caller must verify that it actually
* contains the generated keys.
* @param parameterSource the SqlParameterSource containing values to use for insert
* @return the generated key value.
*/
Number executeAndReturnKey(SqlParameterSource parameterSource);
/**
* Execute the insert using the values passed in and return the generated keys.
* <p>This requires that the name of the columns with auto generated keys have been specified.
* This method will always return a KeyHolder but the caller must verify that it actually
* contains the generated keys.
* @param args a Map containing column names and corresponding value
* @return the KeyHolder containing all generated keys
*/
KeyHolder executeAndReturnKeyHolder(Map<String, ?> args);
/**
* Execute the insert using the values passed in and return the generated keys.
* <p>This requires that the name of the columns with auto generated keys have been specified.
* This method will always return a KeyHolder but the caller must verify that it actually
* contains the generated keys.
* @param parameterSource the SqlParameterSource containing values to use for insert
* @return the KeyHolder containing all generated keys
*/
KeyHolder executeAndReturnKeyHolder(SqlParameterSource parameterSource);
/**
* Execute a batch insert using the batch of values passed in.
* @param batch an array of Maps containing a batch of column names and corresponding value
* @return the array of number of rows affected as returned by the JDBC driver
*/
@SuppressWarnings("unchecked")
int[] executeBatch(Map<String, ?>... batch);
/**
* Execute a batch insert using the batch of values passed in.
* @param batch an array of SqlParameterSource containing values for the batch
* @return the array of number of rows affected as returned by the JDBC driver
*/
int[] executeBatch(SqlParameterSource... batch);
}
|
SimpleJdbcInsertOperations
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableReduceWithSingle.java
|
{
"start": 1276,
"end": 2145
}
|
class ____<T, R> extends Single<R> {
final Publisher<T> source;
final Supplier<R> seedSupplier;
final BiFunction<R, ? super T, R> reducer;
public FlowableReduceWithSingle(Publisher<T> source, Supplier<R> seedSupplier, BiFunction<R, ? super T, R> reducer) {
this.source = source;
this.seedSupplier = seedSupplier;
this.reducer = reducer;
}
@Override
protected void subscribeActual(SingleObserver<? super R> observer) {
R seed;
try {
seed = Objects.requireNonNull(seedSupplier.get(), "The seedSupplier returned a null value");
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
EmptyDisposable.error(ex, observer);
return;
}
source.subscribe(new ReduceSeedObserver<>(observer, reducer, seed));
}
}
|
FlowableReduceWithSingle
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/manual/OverwriteObjects.java
|
{
"start": 10567,
"end": 11523
}
|
class ____
implements Iterator<Tuple2<IntValue, IntValue>>, Serializable {
private int numElements;
private final int keyRange;
private Tuple2<IntValue, IntValue> ret = new Tuple2<>(new IntValue(), new IntValue());
public TupleIntValueIntValueIterator(int numElements, int keyRange) {
this.numElements = numElements;
this.keyRange = keyRange;
}
private final Random rnd = new Random(123);
@Override
public boolean hasNext() {
return numElements > 0;
}
@Override
public Tuple2<IntValue, IntValue> next() {
numElements--;
ret.f0.setValue(rnd.nextInt(keyRange));
ret.f1.setValue(1);
return ret;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
private static
|
TupleIntValueIntValueIterator
|
java
|
processing__processing4
|
app/src/processing/app/Base.java
|
{
"start": 30834,
"end": 63487
}
|
class ____ no longer available.");
Messages.err("Incompatible Tool found during tool.init()", ncdfe);
} catch (Error | Exception e) {
System.err.println("An error occurred inside \"" + tool.getMenuTitle() + "\"");
e.printStackTrace();
}
}
}
protected void initInternalTool(Class<?> toolClass) {
try {
final Tool tool = (Tool)
toolClass.getDeclaredConstructor().newInstance();
tool.init(this);
internalTools.add(tool);
} catch (Exception e) {
e.printStackTrace();
}
}
public void clearToolMenus() {
for (Editor ed : editors) {
ed.clearToolMenu();
}
}
public void populateToolsMenu(JMenu toolsMenu) {
// If this is the first run, need to build out the lists
if (internalTools == null) {
rebuildToolList();
}
toolsMenu.removeAll();
for (Tool tool : internalTools) {
toolsMenu.add(createToolItem(tool));
}
toolsMenu.addSeparator();
if (!coreTools.isEmpty()) {
for (Tool tool : coreTools) {
toolsMenu.add(createToolItem(tool));
}
toolsMenu.addSeparator();
}
if (!contribTools.isEmpty()) {
for (Tool tool : contribTools) {
toolsMenu.add(createToolItem(tool));
}
toolsMenu.addSeparator();
}
JMenuItem manageTools =
new JMenuItem(Language.text("menu.tools.manage_tools"));
manageTools.addActionListener(e -> ContributionManager.openTools());
toolsMenu.add(manageTools);
}
JMenuItem createToolItem(final Tool tool) { //, Map<String, JMenuItem> toolItems) {
String title = tool.getMenuTitle();
final JMenuItem item = new JMenuItem(title);
item.addActionListener(e -> {
try {
tool.run();
} catch (NoSuchMethodError | NoClassDefFoundError ne) {
Messages.showWarning("Tool out of date",
tool.getMenuTitle() + " is not compatible with this version of Processing.\n" +
"Try updating the Mode or contact its author for a new version.", ne);
Messages.err("Incompatible tool found during tool.run()", ne);
item.setEnabled(false);
} catch (Exception ex) {
activeEditor.statusError("An error occurred inside \"" + tool.getMenuTitle() + "\"");
ex.printStackTrace();
item.setEnabled(false);
}
});
//toolItems.put(title, item);
return item;
}
// . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
void rebuildContribExamples() {
contribExamples =
ExamplesContribution.loadAll(getSketchbookExamplesFolder());
}
public List<ExamplesContribution> getContribExamples() {
return contribExamples;
}
// . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
String getDefaultModeIdentifier() {
// Used to initialize coreModes, so cannot use coreModes[0].getIdentifier()
return "processing.mode.java.JavaMode";
}
public Mode getDefaultMode() {
//return coreModes[0];
return coreMode;
}
/**
* @return true if mode is changed within this window (false if new window)
*/
public boolean changeMode(Mode mode) {
Mode oldMode = activeEditor.getMode();
if (oldMode != mode) {
Sketch sketch = activeEditor.getSketch();
nextMode = mode;
if (sketch.isModified()) {
// If sketch is modified, simpler to just open a new window.
// https://github.com/processing/processing4/issues/189
handleNew();
return false;
} else if (sketch.isUntitled()) {
// The current sketch is empty, just close and start fresh.
// (Otherwise the editor would lose its 'untitled' status.)
// Safe to do here because of the 'modified' check above.
handleClose(activeEditor, true);
handleNew();
} else {
// If the current sketch contains file extensions that the new Mode
// can handle, then write a sketch.properties file with that Mode
// specified, and reopen. Currently, only used for Java <-> Android.
if (mode.canEdit(sketch)) {
//final File props = new File(sketch.getFolder(), "sketch.properties");
//saveModeSettings(props, nextMode);
sketch.updateModeProperties(nextMode, getDefaultMode());
handleClose(activeEditor, true);
Editor editor = handleOpen(sketch.getMainPath());
if (editor == null) {
// the Mode change failed (probably code that's out of date)
// re-open the sketch using the mode we were in before
//saveModeSettings(props, oldMode);
sketch.updateModeProperties(oldMode, getDefaultMode());
handleOpen(sketch.getMainPath());
return false;
}
} else {
handleNew(); // create a new window with the new Mode
return false;
}
}
}
// Against all (or at least most) odds, we were able to reassign the Mode
return true;
}
protected Mode findMode(String id) {
for (Mode mode : getModeList()) {
if (mode.getIdentifier().equals(id)) {
return mode;
}
}
return null;
}
/**
* Called when a Mode is uninstalled, in case it's the current Mode.
*/
public void modeRemoved(Mode mode) {
if (nextMode == mode) {
nextMode = getDefaultMode();
}
}
// . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
/**
* Create a new untitled document in a new sketch window.
*/
public void handleNew() {
// long t1 = System.currentTimeMillis();
try {
// In 0126, untitled sketches will begin in the temp folder,
// and then moved to a new location because Save will default to Save As.
//File sketchbookDir = getSketchbookFolder();
File newbieDir = SketchName.nextFolder(untitledFolder);
// User was told to go outside or other problem happened inside naming.
if (newbieDir == null) return;
// Make the directory for the new sketch
if (!newbieDir.mkdirs()) {
throw new IOException("Could not create directory " + newbieDir);
}
// Retrieve the sketch name from the folder name (not a great
// assumption for the future, but overkill to do otherwise for now.)
String newbieName = newbieDir.getName();
// Add any template files from the Mode itself
File newbieFile = nextMode.addTemplateFiles(newbieDir, newbieName);
// Create sketch properties file if it's not the default mode.
if (!nextMode.equals(getDefaultMode())) {
Sketch.updateModeProperties(newbieDir, nextMode, getDefaultMode());
}
String path = newbieFile.getAbsolutePath();
handleOpenUntitled(path);
} catch (IOException e) {
Messages.showTrace("That's new to me",
"A strange and unexplainable error occurred\n" +
"while trying to create a new sketch.", e, false);
}
}
/**
* Prompt for a sketch to open, and open it in a new window.
*/
public void handleOpenPrompt() {
final StringList extensions = new StringList();
// Add support for pdez files
extensions.append(SKETCH_BUNDLE_EXT);
// Add the extensions for each installed Mode
for (Mode mode : getModeList()) {
extensions.append(mode.getDefaultExtension());
// not adding aux extensions b/c we're looking for the main
}
final String prompt = Language.text("open");
if (Preferences.getBoolean("chooser.files.native")) { //$NON-NLS-1$
// use the front-most window frame for placing file dialog
FileDialog openDialog =
new FileDialog(activeEditor, prompt, FileDialog.LOAD);
// Only show .pde files as eligible bachelors
openDialog.setFilenameFilter((dir, name) -> {
// confirmed to be working properly [fry 110128]
for (String ext : extensions) {
if (name.toLowerCase().endsWith("." + ext)) { //$NON-NLS-1$
return true;
}
}
return false;
});
openDialog.setVisible(true);
String directory = openDialog.getDirectory();
String filename = openDialog.getFile();
if (filename != null) {
File inputFile = new File(directory, filename);
handleOpen(inputFile.getAbsolutePath());
}
} else {
if (openChooser == null) {
openChooser = new JFileChooser();
openChooser.setDialogTitle(prompt);
}
openChooser.setFileFilter(new javax.swing.filechooser.FileFilter() {
public boolean accept(File file) {
// JFileChooser requires you to explicitly say yes to directories
// as well (unlike the AWT chooser). Useful, but... different.
// https://github.com/processing/processing/issues/1189
if (file.isDirectory()) {
return true;
}
for (String ext : extensions) {
if (file.getName().toLowerCase().endsWith("." + ext)) { //$NON-NLS-1$
return true;
}
}
return false;
}
public String getDescription() {
return "Processing Sketch";
}
});
if (openChooser.showOpenDialog(activeEditor) == JFileChooser.APPROVE_OPTION) {
handleOpen(openChooser.getSelectedFile().getAbsolutePath());
}
}
}
private Editor openSketchBundle(String path) {
File zipFile = new File(path);
try {
untitledFolder.mkdirs();
File destFolder = File.createTempFile("zip", "tmp", untitledFolder);
if (!destFolder.delete() || !destFolder.mkdirs()) {
// Hard to imagine why this would happen, but...
System.err.println("Could not create temporary folder " + destFolder);
return null;
}
Util.unzip(zipFile, destFolder);
File[] fileList = destFolder.listFiles(File::isDirectory);
if (fileList != null) {
if (fileList.length == 1) {
File sketchFile = Sketch.findMain(fileList[0], getModeList());
if (sketchFile != null) {
return handleOpenUntitled(sketchFile.getAbsolutePath());
}
} else {
System.err.println("Expecting one folder inside " +
SKETCH_BUNDLE_EXT + " file, found " + fileList.length + ".");
}
} else {
System.err.println("Could not read " + destFolder);
}
} catch (IOException e) {
e.printStackTrace();
}
return null; // no luck
}
private void openContribBundle(String path) {
EventQueue.invokeLater(() -> {
Editor editor = getActiveEditor();
if (editor == null) {
// Shouldn't really happen, but if it's still null, it's a no-go
Messages.showWarning("Failure is the only option",
"Please open an Editor window before installing an extension.");
} else {
File contribFile = new File(path);
String baseName = contribFile.getName();
baseName = baseName.substring(0, baseName.length() - CONTRIB_BUNDLE_EXT.length());
int result =
Messages.showYesNoQuestion(editor, "How to Handle " + CONTRIB_BUNDLE_EXT,
"Install " + baseName + "?",
"Libraries, Modes, and Tools should<br>" +
"only be installed from trusted sources.");
if (result == JOptionPane.YES_OPTION) {
editor.statusNotice("Installing " + baseName + "...");
editor.startIndeterminate();
new Thread(() -> {
try {
// do the work of the actual install
LocalContribution contrib =
AvailableContribution.install(this, new File(path));
EventQueue.invokeLater(() -> {
editor.stopIndeterminate();
if (contrib != null) {
editor.statusEmpty();
} else {
editor.statusError("Could not install " + path);
}
});
} catch (IOException e) {
EventQueue.invokeLater(() ->
Messages.showWarning("Exception During Installation",
"Could not install contrib from " + path, e));
}
}).start();
}
}
});
}
/**
* Return true if it's an obvious sketch folder: only .pde files,
* and maybe a data folder. Dot files (.DS_Store, ._blah) are ignored.
*/
private boolean smellsLikeSketchFolder(File folder) {
File[] files = folder.listFiles();
if (files == null) { // unreadable, assume badness
return false;
}
for (File file : files) {
String name = file.getName();
if (!(name.startsWith(".") ||
name.toLowerCase().endsWith(".pde")) ||
(file.isDirectory() && name.equals("data"))) {
return false;
}
}
return true;
}
private File moveLikeSketchFolder(File pdeFile, String baseName) throws IOException {
Object[] options = {
"Keep", "Move", "Cancel"
};
String prompt =
"Would you like to keep “" + pdeFile.getParentFile().getName() + "” as the sketch folder,\n" +
"or move “" + pdeFile.getName() + "” to its own folder?\n" +
"(Usually, “" + pdeFile.getName() + "” would be stored inside a\n" +
"sketch folder named “" + baseName + "”.)";
int result = JOptionPane.showOptionDialog(null,
prompt,
"Keep it? Move it?",
JOptionPane.YES_NO_CANCEL_OPTION,
JOptionPane.QUESTION_MESSAGE,
null,
options,
options[0]);
if (result == JOptionPane.YES_OPTION) { // keep
return pdeFile;
} else if (result == JOptionPane.NO_OPTION) { // move
// create properly named folder
File properFolder = new File(pdeFile.getParent(), baseName);
if (properFolder.exists()) {
throw new IOException("A folder named \"" + baseName + "\" " +
"already exists. Cannot open sketch.");
}
if (!properFolder.mkdirs()) {
throw new IOException("Could not create the sketch folder.");
}
// copy the sketch inside
File properPdeFile = new File(properFolder, pdeFile.getName());
Util.copyFile(pdeFile, properPdeFile);
// remove the original file, so user doesn't get confused
if (!pdeFile.delete()) {
Messages.err("Could not delete " + pdeFile);
}
// update with the new path
return properPdeFile;
}
// Catch all other cases, including Cancel or ESC
return null;
}
/**
* Handler for pde:// protocol URIs
* @param schemeUri the full URI, including pde://
*/
public Editor handleScheme(String schemeUri) {
var result = Schema.handleSchema(schemeUri, this);
if (result != null) {
return result;
}
String location = schemeUri.substring(6);
if (location.length() > 0) {
// if it leads with a slash, then it's a file url
if (location.charAt(0) == '/') {
File file = new File(location);
if (file.exists()) {
handleOpen(location); // it's a full path to a file
} else {
System.err.println(file + " does not exist.");
}
} else {
// turn it into an https url
final String url = "https://" + location;
if (location.toLowerCase().endsWith(".pdez") ||
location.toLowerCase().endsWith(".pdex")) {
String extension = location.substring(location.length() - 5);
try {
File tempFile = File.createTempFile("scheme", extension);
if (PApplet.saveStream(tempFile, Util.createInput(url))) {
return handleOpen(tempFile.getAbsolutePath());
} else {
System.err.println("Could not open " + tempFile);
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
return null;
}
/**
* Open a sketch from the path specified. Do not use for untitled sketches.
* Note that the user may have selected/double-clicked any .pde in a sketch.
*/
public Editor handleOpen(String path) {
if (path.startsWith("pde://")) {
return handleScheme(path);
}
if (path.endsWith(SKETCH_BUNDLE_EXT)) {
return openSketchBundle(path);
} else if (path.endsWith(CONTRIB_BUNDLE_EXT)) {
openContribBundle(path);
return null; // never returning an Editor for a contrib
}
File pdeFile = new File(path);
if (!pdeFile.exists()) {
System.err.println(path + " does not exist");
return null;
}
// Cycle through open windows to make sure that it's not already open.
for (Editor editor : editors) {
// User may have double-clicked any PDE in the sketch folder,
// so we have to check each open tab (not just the main one).
// https://github.com/processing/processing/issues/2506
for (SketchCode tab : editor.getSketch().getCode()) {
if (tab.getFile().equals(pdeFile)) {
editor.toFront();
// move back to the top of the recent list
Recent.append(editor);
return editor;
}
}
}
File parentFolder = pdeFile.getParentFile();
try {
// read the sketch.properties file or get an empty Settings object
Settings props = Sketch.loadProperties(parentFolder);
if (!props.isEmpty()) {
// First check for the Mode, because it may not even be available
String modeIdentifier = props.get("mode.id");
if (modeIdentifier != null) {
if (modeIdentifier.equals("galsasson.mode.tweak.TweakMode")) {
// Tweak Mode has been built into Processing since 2015,
// but there are some old sketch.properties files out there.
// https://github.com/processing/processing4/issues/415
nextMode = getDefaultMode();
// Clean up sketch.properties and re-save or remove if necessary
props.remove("mode");
props.remove("mode.id");
props.reckon();
} else {
// sketch.properties specifies a Mode, see if it's available
Mode mode = findMode(modeIdentifier);
if (mode != null) {
nextMode = mode;
} else {
ContributionManager.openModes();
Messages.showWarning("Missing Mode",
"You must first install " + props.get("mode") + " Mode to use this sketch.");
return null;
}
}
}
String main = props.get("main");
if (main != null) {
// this may be exactly what was passed, but override anyway
String mainPath = new File(parentFolder, main).getAbsolutePath();
if (!path.equals(mainPath)) {
// for now, post a warning if the main was different
System.out.println(path + " selected, but main is " + mainPath);
}
return handleOpenInternal(mainPath, false);
}
} else {
// Switch back to defaultMode, because a sketch.properties
// file is required whenever not using the default Mode.
// (Unless being called from, say, the Examples frame, which
// uses a version of this function that takes a Mode object.)
nextMode = getDefaultMode();
}
// Do some checks to make sure the file can be opened, and identify the
// Mode that it's using. (In 4.0b8, this became the fall-through case.)
if (!Sketch.isSanitaryName(pdeFile.getName())) {
Messages.showWarning("You're tricky, but not tricky enough",
pdeFile.getName() + " is not a valid name for sketch code.\n" +
"Better to stick to ASCII, no spaces, and make sure\n" +
"it doesn't start with a number.", null);
return null;
}
// Check if the name of the file matches the parent folder name.
String baseName = pdeFile.getName();
int dot = baseName.lastIndexOf('.');
if (dot == -1) {
// Shouldn't really be possible, right?
System.err.println(pdeFile + " does not have an extension.");
return null;
}
baseName = baseName.substring(0, dot);
if (!baseName.equals(parentFolder.getName())) {
// Parent folder name does not match, and because sketch.properties
// did not exist or did not specify it above, need to determine main.
// Check whether another .pde file has a matching name, and if so,
// switch to using that instead. Handles when a user selects a .pde
// file in the open dialog box that isn't the main tab.
// (Also important to use nextMode here, because the Mode
// may be set by sketch.properties when it's loaded above.)
String filename =
parentFolder.getName() + "." + nextMode.getDefaultExtension();
File mainFile = new File(parentFolder, filename);
if (mainFile.exists()) {
// User was opening the wrong file in a legit sketch folder.
pdeFile = mainFile;
} else if (smellsLikeSketchFolder(parentFolder)) {
// Looks like a sketch folder, set this as the main.
props.set("main", pdeFile.getName());
// Save for later use, then fall through.
props.save();
} else {
// If it's not an obvious sketch folder (just .pde files,
// maybe a data folder) prompt the user whether to
// 1) move sketch into its own folder, or
// 2) call this the main, and write sketch.properties.
File newFile = moveLikeSketchFolder(pdeFile, baseName);
if (newFile == pdeFile) {
// User wanted to keep this sketch folder, so update the
// property for the main tab and write sketch.properties.
props.set("main", newFile.getName());
props.save();
} else if (newFile == null) {
// User canceled, so exit handleOpen()
return null;
} else {
// User asked to move the sketch file
pdeFile = newFile;
}
}
}
// TODO Remove this selector? Seems too messy/precious. [fry 220205]
// Opting to remove in beta 7, because sketches that use another
// Mode should have a working sketch.properties. [fry 220302]
/*
// If the current Mode cannot open this file, try to find another.
if (!nextMode.canEdit(pdeFile)) {
final Mode mode = promptForMode(pdeFile);
if (mode == null) {
return null;
}
nextMode = mode;
}
*/
return handleOpenInternal(pdeFile.getAbsolutePath(), false);
} catch (IOException e) {
Messages.showWarning("sketch.properties",
"Error while reading sketch.properties from\n" + parentFolder, e);
return null;
}
}
/**
* Open a (vetted) sketch location using a particular Mode. Used by the
* Examples window, because Modes like Python and Android do not have
* "sketch.properties" files in each example folder.
*/
public Editor handleOpenExample(String path, Mode mode) {
nextMode = mode;
return handleOpenInternal(path, true);
}
/**
* Open the sketch associated with this .pde file in a new window
* as an "Untitled" sketch.
* @param path Path to the pde file for the sketch in question
* @return the Editor object, so that properties (like 'untitled')
* can be set by the caller
*/
protected Editor handleOpenUntitled(String path) {
return handleOpenInternal(path, true);
}
/**
* Internal function to actually open the sketch. At this point, the
* sketch file/folder must have been vetted, and nextMode set properly.
*/
protected Editor handleOpenInternal(String path, boolean untitled) {
try {
try {
EditorState state = EditorState.nextEditor(editors);
Editor editor = nextMode.createEditor(this, path, state);
// opened successfully, let's go to work
editor.setUpdatesAvailable(updatesAvailable);
editor.getSketch().setUntitled(untitled);
editors.add(editor);
Recent.append(editor);
// now that we're ready, show the window
// (don't do earlier, cuz we might move it based on a window being closed)
editor.setVisible(true);
return editor;
} catch (EditorException ee) {
if (ee.getMessage() != null) { // null if the user canceled
Messages.showWarning("Error opening sketch", ee.getMessage(), ee);
}
} catch (NoSuchMethodError me) {
Messages.showWarning("Mode out of date",
nextMode.getTitle() + " is not compatible with this version of Processing.\n" +
"Try updating the Mode or contact its author for a new version.", me);
} catch (Throwable t) {
if (nextMode.equals(getDefaultMode())) {
Messages.showTrace("Serious Problem",
"An unexpected, unknown, and unrecoverable error occurred\n" +
"while opening a new editor window. Please report this.", t, true);
} else {
Messages.showTrace("Mode Problems",
"A nasty error occurred while trying to use “" + nextMode.getTitle() + "”.\n" +
"It may not be compatible with this version of Processing.\n" +
"Try updating the Mode or contact its author for a new version.", t, false);
}
}
if (editors.isEmpty()) {
Mode defaultMode = getDefaultMode();
if (nextMode == defaultMode) {
// unreachable? hopefully?
Messages.showError("Editor Problems", """
An error occurred while trying to change modes.
We'll have to quit for now because it's an
unfortunate bit of indigestion with the default Mode.
""", null);
} else {
// Don't leave the user hanging or the PDE locked up
// https://github.com/processing/processing/issues/4467
if (untitled) {
nextMode = defaultMode;
handleNew();
return null; // ignored by any caller
} else {
// This null response will be kicked back to changeMode(),
// signaling it to re-open the sketch in the default Mode.
return null;
}
}
}
} catch (Throwable t) {
Messages.showTrace("Terrible News",
"A serious error occurred while " +
"trying to create a new editor window.", t,
nextMode == getDefaultMode()); // quit if default
nextMode = getDefaultMode();
}
return null;
}
// . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
/**
* Close a sketch as specified by its editor window.
* @param editor Editor object of the sketch to be closed.
* @param preventQuit For platforms that must have a window open,
* prevent a quit because a new window will be opened
* (i.e. when upgrading or changing the Mode)
* @return true if succeeded in closing, false if canceled.
*/
public boolean handleClose(Editor editor, boolean preventQuit) {
if (!editor.checkModified()) {
return false;
}
// Close the running window, avoid window boogers with multiple sketches
editor.internalCloseRunner();
if (editors.size() == 1) {
if (Platform.isMacOS()) {
// If the central menu bar isn't supported on this macOS JVM,
// we have to do the old behavior. Yuck!
if (defaultFileMenu == null) {
Object[] options = { Language.text("prompt.ok"), Language.text("prompt.cancel") };
int result = JOptionPane.showOptionDialog(editor,
Toolkit.formatMessage("Are you sure you want to Quit?",
"Closing the last open sketch will quit Processing."),
"Quit",
JOptionPane.YES_NO_OPTION,
JOptionPane.QUESTION_MESSAGE,
null,
options,
options[0]);
if (result == JOptionPane.NO_OPTION ||
result == JOptionPane.CLOSED_OPTION) {
return false;
}
}
}
if (defaultFileMenu == null) {
if (preventQuit) {
// need to close this editor, ever so temporarily
editor.setVisible(false);
editor.dispose();
activeEditor = null;
editors.remove(editor);
} else {
// Since this wasn't an actual Quit event, call System.exit()
System.exit(0);
}
} else { // on OS X, update the default file menu
editor.setVisible(false);
editor.dispose();
defaultFileMenu.insert(Recent.getMenu(), 2);
activeEditor = null;
editors.remove(editor);
}
} else {
// More than one editor window open,
// proceed with closing the current window.
editor.setVisible(false);
editor.dispose();
editors.remove(editor);
}
return true;
}
/**
* Handler for File → Quit. Note that this is *only* for the
* File menu. On macOS, it will not call System.exit() because the
* application will handle that. If calling this from elsewhere,
* you'll need a System.exit() call on macOS.
* @return false if canceled, true otherwise.
*/
public boolean handleQuit() {
// If quit is canceled, this will be replaced anyway
// by a later handleQuit() that is not canceled.
// storeSketches();
if (handleQuitEach()) {
// make sure running sketches close before quitting
for (Editor editor : editors) {
editor.internalCloseRunner();
}
// Save out the current prefs state
Preferences.save();
// Finished with this guy
Console.shutdown();
if (!Platform.isMacOS()) {
// If this was fired from the menu or an AppleEvent (the Finder),
// then Mac OS X will send the terminate signal itself.
System.exit(0);
}
return true;
}
return false;
}
/**
* Attempt to close each open sketch in preparation for quitting.
* @return false if canceled along the way
*/
protected boolean handleQuitEach() {
// int index = 0;
for (Editor editor : editors) {
// if (editor.checkModified()) {
// // Update to the new/final sketch path for this fella
// storeSketchPath(editor, index);
// index++;
//
// } else {
// return false;
// }
if (!editor.checkModified()) {
return false;
}
}
return true;
}
public void handleRestart() {
File app = Platform.getProcessingApp();
System.out.println(app);
if (app.exists()) {
if (handleQuitEach()) { // only if everything saved
SingleInstance.clearRunning();
// Launch on quit
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
try {
//Runtime.getRuntime().exec(app.getAbsolutePath());
System.out.println("launching");
Process p;
if (Platform.isMacOS()) {
p = Runtime.getRuntime().exec(new String[] {
// -n allows more than one instance to be opened at a time
"open", "-n", "-a", app.getAbsolutePath()
});
} else if (Platform.isLinux()) {
p = Runtime.getRuntime().exec(new String[] {
app.getAbsolutePath()
});
} else {
p = Runtime.getRuntime().exec(new String[] {
"cmd", "/c", app.getAbsolutePath()
});
}
System.out.println("launched with result " + p.waitFor());
System.out.flush();
} catch (Exception e) {
e.printStackTrace();
}
}));
handleQuit();
// handleQuit() does not call System.exit() on macOS
if (Platform.isMacOS()) {
System.exit(0);
}
}
} else {
Messages.showWarning("Cannot Restart",
"Cannot automatically restart because the Processing\n" +
"application has been renamed. Please quit and then restart manually.");
}
}
// . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
// /**
// * Asynchronous version of menu rebuild to be used on save and rename
// * to prevent the
|
is
|
java
|
google__guice
|
extensions/dagger-adapter/test/com/google/inject/daggeradapter/ModuleIncludesTest.java
|
{
"start": 1639,
"end": 1709
}
|
class ____ {}
@Module(includes = Deduplicated.class)
static
|
Includes1
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/stream/sql/ProcessTableFunctionTest.java
|
{
"start": 29395,
"end": 29748
}
|
class ____ extends ProcessTableFunction<String> {
@SuppressWarnings("unused")
public void eval(
@ArgumentHint({SET_SEMANTIC_TABLE, PASS_COLUMNS_THROUGH}) Row r1,
@ArgumentHint({SET_SEMANTIC_TABLE, PASS_COLUMNS_THROUGH}) Row r2) {}
}
/** Testing function. */
public static
|
InvalidPassThroughTables
|
java
|
apache__dubbo
|
dubbo-remoting/dubbo-remoting-http12/src/main/java/org/apache/dubbo/remoting/http12/message/codec/CodecUtils.java
|
{
"start": 1808,
"end": 5326
}
|
class ____ {
private final FrameworkModel frameworkModel;
private final List<HttpMessageDecoderFactory> decoderFactories;
private final List<HttpMessageEncoderFactory> encoderFactories;
private final ConcurrentHashMap<String, Optional<HttpMessageEncoderFactory>> encoderCache =
new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, Optional<HttpMessageDecoderFactory>> decoderCache =
new ConcurrentHashMap<>();
private Set<String> disallowedContentTypes = Collections.emptySet();
public CodecUtils(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
decoderFactories = frameworkModel.getActivateExtensions(HttpMessageDecoderFactory.class);
encoderFactories = frameworkModel.getActivateExtensions(HttpMessageEncoderFactory.class);
Configuration configuration = ConfigurationUtils.getGlobalConfiguration(frameworkModel.defaultApplication());
String contentTypes = configuration.getString(Constants.H2_SETTINGS_DISALLOWED_CONTENT_TYPES, null);
if (contentTypes != null) {
disallowedContentTypes = new HashSet<>(StringUtils.tokenizeToList(contentTypes));
}
}
public HttpMessageDecoder determineHttpMessageDecoder(URL url, String mediaType) {
return determineHttpMessageDecoderFactory(mediaType)
.orElseThrow(() -> new UnsupportedMediaTypeException(mediaType))
.createCodec(url, frameworkModel, mediaType);
}
public HttpMessageDecoder determineHttpMessageDecoder(String mediaType) {
return determineHttpMessageDecoder(null, mediaType);
}
public HttpMessageEncoder determineHttpMessageEncoder(URL url, String mediaType) {
return determineHttpMessageEncoderFactory(mediaType)
.orElseThrow(() -> new UnsupportedMediaTypeException(mediaType))
.createCodec(url, frameworkModel, mediaType);
}
public HttpMessageEncoder determineHttpMessageEncoder(String mediaType) {
return determineHttpMessageEncoder(null, mediaType);
}
public Optional<HttpMessageDecoderFactory> determineHttpMessageDecoderFactory(String mediaType) {
Assert.notNull(mediaType, "mediaType must not be null");
return ConcurrentHashMapUtils.computeIfAbsent(decoderCache, mediaType, k -> {
for (HttpMessageDecoderFactory factory : decoderFactories) {
if (factory.supports(k)
&& !disallowedContentTypes.contains(factory.mediaType().getName())) {
return Optional.of(factory);
}
}
return Optional.empty();
});
}
public Optional<HttpMessageEncoderFactory> determineHttpMessageEncoderFactory(String mediaType) {
Assert.notNull(mediaType, "mediaType must not be null");
return ConcurrentHashMapUtils.computeIfAbsent(encoderCache, mediaType, k -> {
for (HttpMessageEncoderFactory factory : encoderFactories) {
if (factory.supports(k)
&& !disallowedContentTypes.contains(factory.mediaType().getName())) {
return Optional.of(factory);
}
}
return Optional.empty();
});
}
public List<HttpMessageDecoderFactory> getDecoderFactories() {
return decoderFactories;
}
public List<HttpMessageEncoderFactory> getEncoderFactories() {
return encoderFactories;
}
}
|
CodecUtils
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/hybrid/tiered/netty/TieredStorageNettyService.java
|
{
"start": 1511,
"end": 2690
}
|
interface ____ {
/**
* {@link TierProducerAgent} will provide a callback named {@link NettyServiceProducer} to
* register to {@link TieredStorageNettyService}.
*
* @param partitionId partition id indicates the unique id of {@link TieredResultPartition}.
* @param serviceProducer serviceProducer is a callback from {@link TierProducerAgent} and used
* to register a {@link NettyConnectionWriter} and disconnect the netty connection.
*/
void registerProducer(
TieredStoragePartitionId partitionId, NettyServiceProducer serviceProducer);
/**
* {@link TierConsumerAgent} will register to {@link TieredStorageNettyService} and get a future
* of {@link NettyConnectionReader}.
*
* @param partitionId partition id indicates the unique id of {@link TieredResultPartition}.
* @param subpartitionId subpartition id indicates the unique id of subpartition.
* @return the future of netty connection reader.
*/
CompletableFuture<NettyConnectionReader> registerConsumer(
TieredStoragePartitionId partitionId, TieredStorageSubpartitionId subpartitionId);
}
|
TieredStorageNettyService
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/jinaai/JinaAIServiceFields.java
|
{
"start": 321,
"end": 406
}
|
class ____ {
static final int EMBEDDING_MAX_BATCH_SIZE = 2048;
}
|
JinaAIServiceFields
|
java
|
apache__camel
|
components/camel-bindy/src/main/java/org/apache/camel/dataformat/bindy/util/AnnotationModelLoader.java
|
{
"start": 1473,
"end": 2821
}
|
class ____ {
private PackageScanClassResolver resolver;
private PackageScanFilter filter;
private Set<Class<? extends Annotation>> annotations;
public AnnotationModelLoader(PackageScanClassResolver resolver) {
this.resolver = resolver;
annotations = new LinkedHashSet<>();
annotations.add(CsvRecord.class);
annotations.add(Link.class);
annotations.add(Message.class);
annotations.add(Section.class);
annotations.add(FixedLengthRecord.class);
}
public AnnotationModelLoader(PackageScanClassResolver resolver, PackageScanFilter filter) {
this(resolver);
this.filter = filter;
}
public Set<Class<?>> loadModels(String... packageNames) {
Set<Class<?>> results = resolver.findAnnotated(annotations, packageNames);
//TODO; this logic could be moved into the PackageScanClassResolver by creating:
// findAnnotated(annotations, packageNames, filter)
Set<Class<?>> resultsToRemove = new HashSet<>();
if (filter != null) {
for (Class<?> clazz : results) {
if (!filter.matches(clazz)) {
resultsToRemove.add(clazz);
}
}
}
results.removeAll(resultsToRemove);
return results;
}
}
|
AnnotationModelLoader
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/authentication/configuration/AuthenticationConfigurationTests.java
|
{
"start": 19097,
"end": 19264
}
|
class ____
extends DefaultOrderGlobalAuthenticationConfigurerAdapter {
}
@Order(Ordered.HIGHEST_PRECEDENCE)
static
|
LowestOrderGlobalAuthenticationConfigurerAdapter
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/main/java/org/springframework/web/servlet/support/ServletUriComponentsBuilder.java
|
{
"start": 1402,
"end": 1783
}
|
class ____ not extract
* {@code "Forwarded"} and {@code "X-Forwarded-*"} headers that specify the
* client-originated address. Please, use
* {@link org.springframework.web.filter.ForwardedHeaderFilter
* ForwardedHeaderFilter}, or similar from the underlying server, to extract
* and use such headers, or to discard them.
*
* @author Rossen Stoyanchev
* @since 3.1
*/
public
|
do
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/booleanarrays/BooleanArrays_assertContainsSubsequence_Test.java
|
{
"start": 1726,
"end": 3305
}
|
class ____ extends BooleanArraysBaseTest {
@Override
@BeforeEach
public void setUp() {
super.setUp();
actual = arrayOf(true, false, false, true);
}
@Test
void should_fail_if_actual_is_null() {
// WHEN
var assertionError = expectAssertionError(() -> arrays.assertContainsSubsequence(INFO, null, arrayOf(true)));
// THEN
then(assertionError).hasMessage(actualIsNull());
}
@Test
void should_throw_error_if_subsequence_is_null() {
// WHEN/THEN
assertThatNullPointerException().isThrownBy(() -> arrays.assertContainsSubsequence(INFO, actual, null))
.withMessage(valuesToLookForIsNull());
}
@Test
void should_pass_if_actual_and_given_values_are_empty() {
// GIVEN
actual = emptyArray();
// WHEN/THEN
arrays.assertContainsSubsequence(INFO, actual, emptyArray());
}
@Test
void should_pass_if_actual_contains_given_subsequence() {
// GIVEN
boolean[] subsequence = arrayOf(true, false);
// WHEN/THEN
arrays.assertContainsSubsequence(INFO, actual, subsequence);
}
@Test
void should_fail_if_actual_contains_first_elements_of_subsequence_but_not_whole_subsequence() {
// GIVEN
actual = arrayOf(false, false);
boolean[] subsequence = { false, true };
// WHEN
expectAssertionError(() -> arrays.assertContainsSubsequence(INFO, actual, subsequence));
// THEN
verify(failures).failure(INFO, shouldContainSubsequence(actual, subsequence, 1, StandardComparisonStrategy.instance()));
}
}
|
BooleanArrays_assertContainsSubsequence_Test
|
java
|
google__guice
|
core/src/com/google/inject/internal/aop/AbstractGlueGenerator.java
|
{
"start": 1551,
"end": 2643
}
|
class ____ a trampoline that accepts an index, context object, and argument array:
*
* <pre>
* public static Object GUICE$TRAMPOLINE(int index, Object context, Object[] args) {
* switch (index) {
* case 0: {
* return ...;
* }
* case 1: {
* return ...;
* }
* }
* return null;
* }
* </pre>
*
* Each indexed statement in the trampoline invokes a constructor or method, returning the result.
* The expected context object depends on the statement; it could be the invocation target, some
* additional constructor context, or it may be unused. Arguments are unpacked from the array onto
* the call stack, unboxing or casting them as necessary. Primitive results are autoboxed before
* being returned.
*
* <p>Where possible the trampoline is converted into a lookup {@link Function} mapping an integer
* to an invoker function, each invoker represented as a {@link BiFunction} that accepts a context
* object plus argument array and returns the result. These functional interfaces are used to avoid
* introducing a dependency from the glue
|
has
|
java
|
spring-projects__spring-framework
|
framework-docs/src/main/java/org/springframework/docs/web/webmvc/mvcconfig/mvcconfigviewresolvers/FreeMarkerConfiguration.java
|
{
"start": 1185,
"end": 1647
}
|
class ____ implements WebMvcConfigurer {
@Override
public void configureViewResolvers(ViewResolverRegistry registry) {
registry.enableContentNegotiation(new JacksonJsonView());
registry.freeMarker().cache(false);
}
@Bean
public FreeMarkerConfigurer freeMarkerConfigurer() {
FreeMarkerConfigurer configurer = new FreeMarkerConfigurer();
configurer.setTemplateLoaderPath("/freemarker");
return configurer;
}
}
// end::snippet[]
|
FreeMarkerConfiguration
|
java
|
processing__processing4
|
java/src/processing/mode/java/preproc/PreprocessIssueMessageSimplifier.java
|
{
"start": 10784,
"end": 10951
}
|
class ____ message simplification strategies that check for an equal number of
* occurrences for two characters like "(" and ")".
* </p>
*/
protected static
|
for
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/SchedulerBase.java
|
{
"start": 7882,
"end": 53140
}
|
class ____ implements SchedulerNG, CheckpointScheduling {
private final Logger log;
private final JobGraph jobGraph;
protected final JobInfo jobInfo;
private final ExecutionGraph executionGraph;
private final SchedulingTopology schedulingTopology;
protected final StateLocationRetriever stateLocationRetriever;
protected final InputsLocationsRetriever inputsLocationsRetriever;
private final CompletedCheckpointStore completedCheckpointStore;
private final CheckpointsCleaner checkpointsCleaner;
private final CheckpointIDCounter checkpointIdCounter;
protected final JobManagerJobMetricGroup jobManagerJobMetricGroup;
protected final ExecutionVertexVersioner executionVertexVersioner;
private final KvStateHandler kvStateHandler;
private final ExecutionGraphHandler executionGraphHandler;
protected final OperatorCoordinatorHandler operatorCoordinatorHandler;
private final ComponentMainThreadExecutor mainThreadExecutor;
private final BoundedFIFOQueue<RootExceptionHistoryEntry> exceptionHistory;
private RootExceptionHistoryEntry latestRootExceptionEntry;
private final ExecutionGraphFactory executionGraphFactory;
private final MetricOptions.JobStatusMetricsSettings jobStatusMetricsSettings;
private final DeploymentStateTimeMetrics deploymentStateTimeMetrics;
private final List<ExecutionStatusMetricsRegistrar> executionStateMetricsRegistrars;
private final VertexEndOfDataListener vertexEndOfDataListener;
public SchedulerBase(
final Logger log,
final JobGraph jobGraph,
final Executor ioExecutor,
final Configuration jobMasterConfiguration,
final CheckpointsCleaner checkpointsCleaner,
final CheckpointRecoveryFactory checkpointRecoveryFactory,
final JobManagerJobMetricGroup jobManagerJobMetricGroup,
final ExecutionVertexVersioner executionVertexVersioner,
long initializationTimestamp,
final ComponentMainThreadExecutor mainThreadExecutor,
final JobStatusListener jobStatusListener,
final ExecutionGraphFactory executionGraphFactory,
final VertexParallelismStore vertexParallelismStore,
ExecutionPlanSchedulingContext executionPlanSchedulingContext)
throws Exception {
this.log = checkNotNull(log);
this.jobGraph = checkNotNull(jobGraph);
this.jobInfo = new JobInfoImpl(jobGraph.getJobID(), jobGraph.getName());
this.executionGraphFactory = executionGraphFactory;
this.jobManagerJobMetricGroup = checkNotNull(jobManagerJobMetricGroup);
this.executionVertexVersioner = checkNotNull(executionVertexVersioner);
this.mainThreadExecutor = mainThreadExecutor;
this.checkpointsCleaner = checkpointsCleaner;
this.completedCheckpointStore =
SchedulerUtils.createCompletedCheckpointStoreIfCheckpointingIsEnabled(
jobGraph,
jobMasterConfiguration,
checkNotNull(checkpointRecoveryFactory),
ioExecutor,
log);
this.checkpointIdCounter =
SchedulerUtils.createCheckpointIDCounterIfCheckpointingIsEnabled(
jobGraph, checkNotNull(checkpointRecoveryFactory));
this.jobStatusMetricsSettings =
MetricOptions.JobStatusMetricsSettings.fromConfiguration(jobMasterConfiguration);
this.deploymentStateTimeMetrics =
new DeploymentStateTimeMetrics(jobGraph.getJobType(), jobStatusMetricsSettings);
this.executionStateMetricsRegistrars = new ArrayList<>(2);
this.executionStateMetricsRegistrars.add(
new DeploymentStateTimeMetrics(jobGraph.getJobType(), jobStatusMetricsSettings));
if (jobGraph.getJobType() == JobType.STREAMING) {
this.executionStateMetricsRegistrars.add(
new AllSubTasksRunningOrFinishedStateTimeMetrics(
jobGraph.getJobType(), jobStatusMetricsSettings));
}
final CheckpointStatsTracker checkpointStatsTracker =
SchedulerUtils.createCheckpointStatsTrackerIfCheckpointingIsEnabled(
jobGraph,
() ->
new DefaultCheckpointStatsTracker(
jobMasterConfiguration.get(
WebOptions.CHECKPOINTS_HISTORY_SIZE),
jobManagerJobMetricGroup,
jobMasterConfiguration.get(CHECKPOINT_SPAN_DETAIL_LEVEL),
null));
this.executionGraph =
createAndRestoreExecutionGraph(
completedCheckpointStore,
checkpointsCleaner,
checkpointIdCounter,
checkpointStatsTracker,
initializationTimestamp,
mainThreadExecutor,
jobStatusListener,
vertexParallelismStore,
executionPlanSchedulingContext);
this.schedulingTopology = executionGraph.getSchedulingTopology();
stateLocationRetriever =
executionVertexId ->
getExecutionVertex(executionVertexId).getPreferredLocationBasedOnState();
inputsLocationsRetriever =
new ExecutionGraphToInputsLocationsRetrieverAdapter(executionGraph);
this.kvStateHandler = new KvStateHandler(executionGraph);
this.executionGraphHandler =
new ExecutionGraphHandler(executionGraph, log, ioExecutor, this.mainThreadExecutor);
this.operatorCoordinatorHandler =
new DefaultOperatorCoordinatorHandler(executionGraph, this::handleGlobalFailure);
operatorCoordinatorHandler.initializeOperatorCoordinators(this.mainThreadExecutor);
this.exceptionHistory =
new BoundedFIFOQueue<>(
jobMasterConfiguration.get(WebOptions.MAX_EXCEPTION_HISTORY_SIZE));
this.vertexEndOfDataListener = new VertexEndOfDataListener(executionGraph);
}
private void shutDownCheckpointServices(JobStatus jobStatus) {
Exception exception = null;
try {
completedCheckpointStore.shutdown(jobStatus, checkpointsCleaner);
} catch (Exception e) {
exception = e;
}
try {
checkpointIdCounter.shutdown(jobStatus).get();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
if (exception != null) {
log.error("Error while shutting down checkpoint services.", exception);
}
}
private static int normalizeParallelism(int parallelism) {
if (parallelism == ExecutionConfig.PARALLELISM_DEFAULT) {
return 1;
}
return parallelism;
}
/**
* Get a default value to use for a given vertex's max parallelism if none was specified.
*
* @param vertex the vertex to compute a default max parallelism for
* @return the computed max parallelism
*/
public static int getDefaultMaxParallelism(JobVertex vertex) {
return getDefaultMaxParallelism(vertex.getParallelism());
}
public static int getDefaultMaxParallelism(int parallelism) {
return KeyGroupRangeAssignment.computeDefaultMaxParallelism(
normalizeParallelism(parallelism));
}
public static VertexParallelismStore computeVertexParallelismStore(
Iterable<JobVertex> vertices, Function<JobVertex, Integer> defaultMaxParallelismFunc) {
return computeVertexParallelismStore(
vertices, defaultMaxParallelismFunc, SchedulerBase::normalizeParallelism);
}
/**
* Compute the {@link VertexParallelismStore} for all given vertices, which will set defaults
* and ensure that the returned store contains valid parallelisms, with a custom function for
* default max parallelism calculation and a custom function for normalizing vertex parallelism.
*
* @param vertices the vertices to compute parallelism for
* @param defaultMaxParallelismFunc a function for computing a default max parallelism if none
* is specified on a given vertex
* @param normalizeParallelismFunc a function for normalizing vertex parallelism
* @return the computed parallelism store
*/
public static VertexParallelismStore computeVertexParallelismStore(
Iterable<JobVertex> vertices,
Function<JobVertex, Integer> defaultMaxParallelismFunc,
Function<Integer, Integer> normalizeParallelismFunc) {
DefaultVertexParallelismStore store = new DefaultVertexParallelismStore();
for (JobVertex vertex : vertices) {
int parallelism = normalizeParallelismFunc.apply(vertex.getParallelism());
int maxParallelism = vertex.getMaxParallelism();
final boolean autoConfigured;
// if no max parallelism was configured by the user, we calculate and set a default
if (maxParallelism == JobVertex.MAX_PARALLELISM_DEFAULT) {
maxParallelism = defaultMaxParallelismFunc.apply(vertex);
autoConfigured = true;
} else {
autoConfigured = false;
}
VertexParallelismInformation parallelismInfo =
new DefaultVertexParallelismInfo(
parallelism,
maxParallelism,
// Allow rescaling if the max parallelism was not set explicitly by the
// user
(newMax) ->
autoConfigured
? Optional.empty()
: Optional.of(
"Cannot override a configured max parallelism."));
store.setParallelismInfo(vertex.getID(), parallelismInfo);
}
return store;
}
/**
* Compute the {@link VertexParallelismStore} for all given vertices, which will set defaults
* and ensure that the returned store contains valid parallelisms.
*
* @param vertices the vertices to compute parallelism for
* @return the computed parallelism store
*/
public static VertexParallelismStore computeVertexParallelismStore(
Iterable<JobVertex> vertices) {
return computeVertexParallelismStore(vertices, SchedulerBase::getDefaultMaxParallelism);
}
/**
* Compute the {@link VertexParallelismStore} for all vertices of a given job graph, which will
* set defaults and ensure that the returned store contains valid parallelisms.
*
* @param jobGraph the job graph to retrieve vertices from
* @return the computed parallelism store
*/
public static VertexParallelismStore computeVertexParallelismStore(JobGraph jobGraph) {
return computeVertexParallelismStore(jobGraph.getVertices());
}
private ExecutionGraph createAndRestoreExecutionGraph(
CompletedCheckpointStore completedCheckpointStore,
CheckpointsCleaner checkpointsCleaner,
CheckpointIDCounter checkpointIdCounter,
CheckpointStatsTracker checkpointStatsTracker,
long initializationTimestamp,
ComponentMainThreadExecutor mainThreadExecutor,
JobStatusListener jobStatusListener,
VertexParallelismStore vertexParallelismStore,
ExecutionPlanSchedulingContext executionPlanSchedulingContext)
throws Exception {
final ExecutionStateUpdateListener combinedExecutionStateUpdateListener;
if (executionStateMetricsRegistrars.size() == 1) {
combinedExecutionStateUpdateListener = executionStateMetricsRegistrars.get(0);
} else {
combinedExecutionStateUpdateListener =
ExecutionStateUpdateListener.combine(
executionStateMetricsRegistrars.toArray(
new ExecutionStateUpdateListener[0]));
}
final ExecutionGraph newExecutionGraph =
executionGraphFactory.createAndRestoreExecutionGraph(
jobGraph,
completedCheckpointStore,
checkpointsCleaner,
checkpointIdCounter,
checkpointStatsTracker,
TaskDeploymentDescriptorFactory.PartitionLocationConstraint.fromJobType(
jobGraph.getJobType()),
initializationTimestamp,
new DefaultVertexAttemptNumberStore(),
vertexParallelismStore,
combinedExecutionStateUpdateListener,
getMarkPartitionFinishedStrategy(),
executionPlanSchedulingContext,
log);
newExecutionGraph.setInternalTaskFailuresListener(
new UpdateSchedulerNgOnInternalFailuresListener(this));
newExecutionGraph.registerJobStatusListener(jobStatusListener);
newExecutionGraph.start(mainThreadExecutor);
return newExecutionGraph;
}
protected void resetForNewExecutions(final Collection<ExecutionVertexID> vertices) {
vertices.stream().forEach(this::resetForNewExecution);
}
protected void resetForNewExecution(final ExecutionVertexID executionVertexId) {
getExecutionVertex(executionVertexId).resetForNewExecution();
}
protected void restoreState(
final Set<ExecutionVertexID> vertices, final boolean isGlobalRecovery)
throws Exception {
vertexEndOfDataListener.restoreVertices(vertices);
final CheckpointCoordinator checkpointCoordinator =
executionGraph.getCheckpointCoordinator();
if (checkpointCoordinator == null) {
// batch failover case - we only need to notify the OperatorCoordinators,
// not do any actual state restore
if (isGlobalRecovery) {
notifyCoordinatorsOfEmptyGlobalRestore();
} else {
notifyCoordinatorsOfSubtaskRestore(
getInvolvedExecutionJobVerticesAndSubtasks(vertices),
OperatorCoordinator.NO_CHECKPOINT);
}
return;
}
// if there is checkpointed state, reload it into the executions
// abort pending checkpoints to
// i) enable new checkpoint triggering without waiting for last checkpoint expired.
// ii) ensure the EXACTLY_ONCE semantics if needed.
checkpointCoordinator.abortPendingCheckpoints(
new CheckpointException(CheckpointFailureReason.JOB_FAILOVER_REGION));
if (isGlobalRecovery) {
final Set<ExecutionJobVertex> jobVerticesToRestore =
getInvolvedExecutionJobVertices(vertices);
checkpointCoordinator.restoreLatestCheckpointedStateToAll(jobVerticesToRestore, true);
} else {
final Map<ExecutionJobVertex, IntArrayList> subtasksToRestore =
getInvolvedExecutionJobVerticesAndSubtasks(vertices);
final OptionalLong restoredCheckpointId =
checkpointCoordinator.restoreLatestCheckpointedStateToSubtasks(
subtasksToRestore.keySet());
// Ideally, the Checkpoint Coordinator would call OperatorCoordinator.resetSubtask, but
// the Checkpoint Coordinator is not aware of subtasks in a local failover. It always
// assigns state to all subtasks, and for the subtask execution attempts that are still
// running (or not waiting to be deployed) the state assignment has simply no effect.
// Because of that, we need to do the "subtask restored" notification here.
// Once the Checkpoint Coordinator is properly aware of partial (region) recovery,
// this code should move into the Checkpoint Coordinator.
final long checkpointId =
restoredCheckpointId.orElse(OperatorCoordinator.NO_CHECKPOINT);
notifyCoordinatorsOfSubtaskRestore(subtasksToRestore, checkpointId);
}
}
private void notifyCoordinatorsOfSubtaskRestore(
final Map<ExecutionJobVertex, IntArrayList> restoredSubtasks, final long checkpointId) {
for (final Map.Entry<ExecutionJobVertex, IntArrayList> vertexSubtasks :
restoredSubtasks.entrySet()) {
final ExecutionJobVertex jobVertex = vertexSubtasks.getKey();
final IntArrayList subtasks = vertexSubtasks.getValue();
final Collection<OperatorCoordinatorHolder> coordinators =
jobVertex.getOperatorCoordinators();
if (coordinators.isEmpty()) {
continue;
}
while (!subtasks.isEmpty()) {
final int subtask =
subtasks.removeLast(); // this is how IntArrayList implements iterations
for (final OperatorCoordinatorHolder opCoordinator : coordinators) {
opCoordinator.subtaskReset(subtask, checkpointId);
}
}
}
}
private void notifyCoordinatorsOfEmptyGlobalRestore() throws Exception {
for (final ExecutionJobVertex ejv : getExecutionGraph().getAllVertices().values()) {
if (!ejv.isInitialized()) {
continue;
}
for (final OperatorCoordinatorHolder coordinator : ejv.getOperatorCoordinators()) {
coordinator.resetToCheckpoint(OperatorCoordinator.NO_CHECKPOINT, null);
}
}
}
private Set<ExecutionJobVertex> getInvolvedExecutionJobVertices(
final Set<ExecutionVertexID> executionVertices) {
final Set<ExecutionJobVertex> tasks = new HashSet<>();
for (ExecutionVertexID executionVertexID : executionVertices) {
final ExecutionVertex executionVertex = getExecutionVertex(executionVertexID);
tasks.add(executionVertex.getJobVertex());
}
return tasks;
}
private Map<ExecutionJobVertex, IntArrayList> getInvolvedExecutionJobVerticesAndSubtasks(
final Set<ExecutionVertexID> executionVertices) {
final HashMap<ExecutionJobVertex, IntArrayList> result = new HashMap<>();
for (ExecutionVertexID executionVertexID : executionVertices) {
final ExecutionVertex executionVertex = getExecutionVertex(executionVertexID);
final IntArrayList subtasks =
result.computeIfAbsent(
executionVertex.getJobVertex(), (key) -> new IntArrayList(32));
subtasks.add(executionVertex.getParallelSubtaskIndex());
}
return result;
}
protected void setGlobalFailureCause(@Nullable final Throwable cause, long timestamp) {
if (cause != null) {
executionGraph.initFailureCause(cause, timestamp);
}
}
protected ComponentMainThreadExecutor getMainThreadExecutor() {
return mainThreadExecutor;
}
protected void failJob(
Throwable cause, long timestamp, CompletableFuture<Map<String, String>> failureLabels) {
incrementVersionsOfAllVertices();
cancelAllPendingSlotRequestsInternal();
executionGraph.failJob(cause, timestamp);
getJobTerminationFuture().thenRun(() -> archiveGlobalFailure(cause, failureLabels));
}
protected final SchedulingTopology getSchedulingTopology() {
return schedulingTopology;
}
protected final ResultPartitionAvailabilityChecker getResultPartitionAvailabilityChecker() {
return executionGraph.getResultPartitionAvailabilityChecker();
}
protected final void transitionToRunning() {
executionGraph.transitionToRunning();
}
public ExecutionVertex getExecutionVertex(final ExecutionVertexID executionVertexId) {
return executionGraph
.getAllVertices()
.get(executionVertexId.getJobVertexId())
.getTaskVertices()[executionVertexId.getSubtaskIndex()];
}
public ExecutionJobVertex getExecutionJobVertex(final JobVertexID jobVertexId) {
return executionGraph.getAllVertices().get(jobVertexId);
}
protected JobGraph getJobGraph() {
return jobGraph;
}
protected abstract long getNumberOfRestarts();
protected abstract long getNumberOfRescales();
protected MarkPartitionFinishedStrategy getMarkPartitionFinishedStrategy() {
// blocking partition always need mark finished.
return ResultPartitionType::isBlockingOrBlockingPersistentResultPartition;
}
private Map<ExecutionVertexID, ExecutionVertexVersion> incrementVersionsOfAllVertices() {
return executionVertexVersioner.recordVertexModifications(
IterableUtils.toStream(schedulingTopology.getVertices())
.map(SchedulingExecutionVertex::getId)
.collect(Collectors.toSet()));
}
protected abstract void cancelAllPendingSlotRequestsInternal();
protected void transitionExecutionGraphState(
final JobStatus current, final JobStatus newState) {
executionGraph.transitionState(current, newState);
}
@VisibleForTesting
CheckpointCoordinator getCheckpointCoordinator() {
return executionGraph.getCheckpointCoordinator();
}
/**
* ExecutionGraph is exposed to make it easier to rework tests to be based on the new scheduler.
* ExecutionGraph is expected to be used only for state check. Yet at the moment, before all the
* actions are factored out from ExecutionGraph and its sub-components, some actions may still
* be performed directly on it.
*/
@VisibleForTesting
public ExecutionGraph getExecutionGraph() {
return executionGraph;
}
// ------------------------------------------------------------------------
// SchedulerNG
// ------------------------------------------------------------------------
@Override
public final void startScheduling() {
mainThreadExecutor.assertRunningInMainThread();
registerJobMetrics(
jobManagerJobMetricGroup,
executionGraph,
this::getNumberOfRestarts,
this::getNumberOfRescales,
executionStateMetricsRegistrars,
executionGraph::registerJobStatusListener,
executionGraph.getStatusTimestamp(JobStatus.INITIALIZING),
jobStatusMetricsSettings);
operatorCoordinatorHandler.startAllOperatorCoordinators();
startSchedulingInternal();
}
public static void registerJobMetrics(
MetricGroup metrics,
JobStatusProvider jobStatusProvider,
Gauge<Long> numberOfRestarts,
Gauge<Long> numberOfRescales,
Collection<? extends MetricsRegistrar> metricsRegistrars,
Consumer<JobStatusListener> jobStatusListenerRegistrar,
long initializationTimestamp,
MetricOptions.JobStatusMetricsSettings jobStatusMetricsSettings) {
metrics.gauge(DownTimeGauge.METRIC_NAME, new DownTimeGauge(jobStatusProvider));
metrics.gauge(UpTimeGauge.METRIC_NAME, new UpTimeGauge(jobStatusProvider));
metrics.gauge(MetricNames.NUM_RESTARTS, numberOfRestarts::getValue);
metrics.gauge(MetricNames.NUM_RESCALES, numberOfRescales::getValue);
final JobStatusMetrics jobStatusMetrics =
new JobStatusMetrics(initializationTimestamp, jobStatusMetricsSettings);
jobStatusMetrics.registerMetrics(metrics);
jobStatusListenerRegistrar.accept(jobStatusMetrics);
for (MetricsRegistrar metricsRegistrar : metricsRegistrars) {
metricsRegistrar.registerMetrics(metrics);
}
}
protected abstract void startSchedulingInternal();
@Override
public CompletableFuture<Void> closeAsync() {
mainThreadExecutor.assertRunningInMainThread();
final FlinkException cause = new FlinkException("Scheduler is being stopped.");
final CompletableFuture<Void> checkpointServicesShutdownFuture =
FutureUtils.composeAfterwardsAsync(
executionGraph
.getTerminationFuture()
.thenAcceptAsync(
this::shutDownCheckpointServices, getMainThreadExecutor()),
checkpointsCleaner::closeAsync,
getMainThreadExecutor());
FutureUtils.assertNoException(checkpointServicesShutdownFuture);
incrementVersionsOfAllVertices();
cancelAllPendingSlotRequestsInternal();
executionGraph.suspend(cause);
operatorCoordinatorHandler.disposeAllOperatorCoordinators();
return checkpointServicesShutdownFuture;
}
@Override
public void cancel() {
mainThreadExecutor.assertRunningInMainThread();
incrementVersionsOfAllVertices();
cancelAllPendingSlotRequestsInternal();
executionGraph.cancel();
}
@Override
public CompletableFuture<JobStatus> getJobTerminationFuture() {
return executionGraph.getTerminationFuture();
}
protected final void archiveGlobalFailure(
Throwable failure, CompletableFuture<Map<String, String>> failureLabels) {
archiveGlobalFailure(
failure,
executionGraph.getStatusTimestamp(JobStatus.FAILED),
failureLabels,
StreamSupport.stream(executionGraph.getAllExecutionVertices().spliterator(), false)
.map(ExecutionVertex::getCurrentExecutionAttempt)
.collect(Collectors.toSet()));
}
private void archiveGlobalFailure(
Throwable failure,
long timestamp,
CompletableFuture<Map<String, String>> failureLabels,
Iterable<Execution> executions) {
latestRootExceptionEntry =
RootExceptionHistoryEntry.fromGlobalFailure(
failure, timestamp, failureLabels, executions);
exceptionHistory.add(latestRootExceptionEntry);
log.debug("Archive global failure.", failure);
}
protected final void archiveFromFailureHandlingResult(
FailureHandlingResultSnapshot failureHandlingResult) {
if (!failureHandlingResult.isRootCause()) {
// Handle all subsequent exceptions as the concurrent exceptions when it's not a new
// attempt.
checkState(
latestRootExceptionEntry != null,
"A root exception entry should exist if failureHandlingResult wasn't "
+ "generated as part of a new error handling cycle.");
List<Execution> concurrentlyExecutions = new ArrayList<>();
failureHandlingResult.getRootCauseExecution().ifPresent(concurrentlyExecutions::add);
concurrentlyExecutions.addAll(failureHandlingResult.getConcurrentlyFailedExecution());
latestRootExceptionEntry.addConcurrentExceptions(concurrentlyExecutions);
} else if (failureHandlingResult.getRootCauseExecution().isPresent()) {
final Execution rootCauseExecution =
failureHandlingResult.getRootCauseExecution().get();
latestRootExceptionEntry =
RootExceptionHistoryEntry.fromFailureHandlingResultSnapshot(
failureHandlingResult);
exceptionHistory.add(latestRootExceptionEntry);
log.debug(
"Archive local failure causing attempt {} to fail: {}",
rootCauseExecution.getAttemptId(),
latestRootExceptionEntry.getExceptionAsString());
} else {
archiveGlobalFailure(
failureHandlingResult.getRootCause(),
failureHandlingResult.getTimestamp(),
failureHandlingResult.getFailureLabels(),
failureHandlingResult.getConcurrentlyFailedExecution());
}
}
@Override
public boolean updateTaskExecutionState(final TaskExecutionStateTransition taskExecutionState) {
final ExecutionAttemptID attemptId = taskExecutionState.getID();
final Execution execution = executionGraph.getRegisteredExecutions().get(attemptId);
if (execution != null && executionGraph.updateState(taskExecutionState)) {
onTaskExecutionStateUpdate(execution, taskExecutionState);
return true;
}
return false;
}
private void onTaskExecutionStateUpdate(
final Execution execution, final TaskExecutionStateTransition taskExecutionState) {
// only notifies a state update if it's effective, namely it successfully
// turns the execution state to the expected value.
if (execution.getState() != taskExecutionState.getExecutionState()) {
return;
}
// only notifies FINISHED and FAILED states which are needed at the moment.
// can be refined in FLINK-14233 after the actions are factored out from ExecutionGraph.
switch (taskExecutionState.getExecutionState()) {
case FINISHED:
onTaskFinished(execution, taskExecutionState.getIOMetrics());
break;
case FAILED:
onTaskFailed(execution);
break;
}
}
protected abstract void onTaskFinished(final Execution execution, final IOMetrics ioMetrics);
protected abstract void onTaskFailed(final Execution execution);
@Override
public SerializedInputSplit requestNextInputSplit(
JobVertexID vertexID, ExecutionAttemptID executionAttempt) throws IOException {
mainThreadExecutor.assertRunningInMainThread();
return executionGraphHandler.requestNextInputSplit(vertexID, executionAttempt);
}
@Override
public ExecutionState requestPartitionState(
final IntermediateDataSetID intermediateResultId,
final ResultPartitionID resultPartitionId)
throws PartitionProducerDisposedException {
mainThreadExecutor.assertRunningInMainThread();
return executionGraphHandler.requestPartitionState(intermediateResultId, resultPartitionId);
}
@VisibleForTesting
public Iterable<RootExceptionHistoryEntry> getExceptionHistory() {
return exceptionHistory.toArrayList();
}
@Override
public ExecutionGraphInfo requestJob() {
mainThreadExecutor.assertRunningInMainThread();
return new ExecutionGraphInfo(
ArchivedExecutionGraph.createFrom(executionGraph), getExceptionHistory());
}
@Override
public CheckpointStatsSnapshot requestCheckpointStats() {
mainThreadExecutor.assertRunningInMainThread();
return executionGraph.getCheckpointStatsSnapshot();
}
@Override
public JobStatus requestJobStatus() {
return executionGraph.getState();
}
@Override
public KvStateLocation requestKvStateLocation(final JobID jobId, final String registrationName)
throws UnknownKvStateLocation, FlinkJobNotFoundException {
mainThreadExecutor.assertRunningInMainThread();
return kvStateHandler.requestKvStateLocation(jobId, registrationName);
}
@Override
public void notifyKvStateRegistered(
final JobID jobId,
final JobVertexID jobVertexId,
final KeyGroupRange keyGroupRange,
final String registrationName,
final KvStateID kvStateId,
final InetSocketAddress kvStateServerAddress)
throws FlinkJobNotFoundException {
mainThreadExecutor.assertRunningInMainThread();
kvStateHandler.notifyKvStateRegistered(
jobId,
jobVertexId,
keyGroupRange,
registrationName,
kvStateId,
kvStateServerAddress);
}
@Override
public void notifyKvStateUnregistered(
final JobID jobId,
final JobVertexID jobVertexId,
final KeyGroupRange keyGroupRange,
final String registrationName)
throws FlinkJobNotFoundException {
mainThreadExecutor.assertRunningInMainThread();
kvStateHandler.notifyKvStateUnregistered(
jobId, jobVertexId, keyGroupRange, registrationName);
}
@Override
public void updateAccumulators(final AccumulatorSnapshot accumulatorSnapshot) {
mainThreadExecutor.assertRunningInMainThread();
executionGraph.updateAccumulators(accumulatorSnapshot);
}
@Override
public CompletableFuture<String> triggerSavepoint(
final String targetDirectory,
final boolean cancelJob,
final SavepointFormatType formatType) {
mainThreadExecutor.assertRunningInMainThread();
if (isAnyOutputBlocking(executionGraph)) {
// TODO: Introduce a more general solution to mark times when
// checkpoints are disabled, as well as the detailed reason.
// https://issues.apache.org/jira/browse/FLINK-34519
return FutureUtils.completedExceptionally(
new CheckpointException(CheckpointFailureReason.BLOCKING_OUTPUT_EXIST));
}
final CheckpointCoordinator checkpointCoordinator =
executionGraph.getCheckpointCoordinator();
StopWithSavepointTerminationManager.checkSavepointActionPreconditions(
checkpointCoordinator, targetDirectory, getJobId(), log);
log.info(
"Triggering {}savepoint for job {}.",
cancelJob ? "cancel-with-" : "",
jobGraph.getJobID());
if (cancelJob) {
stopCheckpointScheduler();
}
return checkpointCoordinator
.triggerSavepoint(targetDirectory, formatType)
.thenApply(CompletedCheckpoint::getExternalPointer)
.handleAsync(
(path, throwable) -> {
if (throwable != null) {
if (cancelJob) {
startCheckpointScheduler();
}
throw new CompletionException(throwable);
} else if (cancelJob) {
log.info(
"Savepoint stored in {}. Now cancelling {}.",
path,
jobGraph.getJobID());
cancel();
}
return path;
},
mainThreadExecutor);
}
@Override
public CompletableFuture<CompletedCheckpoint> triggerCheckpoint(CheckpointType checkpointType) {
mainThreadExecutor.assertRunningInMainThread();
final CheckpointCoordinator checkpointCoordinator =
executionGraph.getCheckpointCoordinator();
final JobID jobID = jobGraph.getJobID();
if (checkpointCoordinator == null) {
throw new IllegalStateException(String.format("Job %s is not a streaming job.", jobID));
}
log.info("Triggering a manual checkpoint for job {}.", jobID);
return checkpointCoordinator
.triggerCheckpoint(checkpointType)
.handleAsync(
(path, throwable) -> {
if (throwable != null) {
throw new CompletionException(throwable);
}
return path;
},
mainThreadExecutor);
}
@Override
public void stopCheckpointScheduler() {
final CheckpointCoordinator checkpointCoordinator = getCheckpointCoordinator();
if (checkpointCoordinator == null) {
log.info(
"Periodic checkpoint scheduling could not be stopped due to the CheckpointCoordinator being shutdown.");
} else {
checkpointCoordinator.stopCheckpointScheduler();
}
}
@Override
public void startCheckpointScheduler() {
mainThreadExecutor.assertRunningInMainThread();
final CheckpointCoordinator checkpointCoordinator = getCheckpointCoordinator();
if (checkpointCoordinator == null) {
log.info(
"Periodic checkpoint scheduling could not be started due to the CheckpointCoordinator being shutdown.");
} else if (checkpointCoordinator.isPeriodicCheckpointingConfigured()) {
try {
checkpointCoordinator.startCheckpointScheduler();
} catch (IllegalStateException ignored) {
// Concurrent shut down of the coordinator
}
}
}
@Override
public void acknowledgeCheckpoint(
final JobID jobID,
final ExecutionAttemptID executionAttemptID,
final long checkpointId,
final CheckpointMetrics checkpointMetrics,
final TaskStateSnapshot checkpointState) {
executionGraphHandler.acknowledgeCheckpoint(
jobID, executionAttemptID, checkpointId, checkpointMetrics, checkpointState);
}
@Override
public void declineCheckpoint(final DeclineCheckpoint decline) {
executionGraphHandler.declineCheckpoint(decline);
}
@Override
public void reportCheckpointMetrics(
JobID jobID, ExecutionAttemptID attemptId, long id, CheckpointMetrics metrics) {
executionGraphHandler.reportCheckpointMetrics(attemptId, id, metrics);
}
@Override
public void reportInitializationMetrics(
JobID jobId,
ExecutionAttemptID executionAttemptId,
SubTaskInitializationMetrics initializationMetrics) {
executionGraphHandler.reportInitializationMetrics(
executionAttemptId, initializationMetrics);
}
@Override
public CompletableFuture<String> stopWithSavepoint(
@Nullable final String targetDirectory,
final boolean terminate,
final SavepointFormatType formatType) {
mainThreadExecutor.assertRunningInMainThread();
if (isAnyOutputBlocking(executionGraph)) {
return FutureUtils.completedExceptionally(
new CheckpointException(CheckpointFailureReason.BLOCKING_OUTPUT_EXIST));
}
final CheckpointCoordinator checkpointCoordinator =
executionGraph.getCheckpointCoordinator();
StopWithSavepointTerminationManager.checkSavepointActionPreconditions(
checkpointCoordinator, targetDirectory, executionGraph.getJobID(), log);
log.info("Triggering stop-with-savepoint for job {}.", jobGraph.getJobID());
// we stop the checkpoint coordinator so that we are guaranteed
// to have only the data of the synchronous savepoint committed.
// in case of failure, and if the job restarts, the coordinator
// will be restarted by the CheckpointCoordinatorDeActivator.
stopCheckpointScheduler();
final CompletableFuture<Collection<ExecutionState>> executionTerminationsFuture =
getCombinedExecutionTerminationFuture();
final CompletableFuture<CompletedCheckpoint> savepointFuture =
checkpointCoordinator.triggerSynchronousSavepoint(
terminate, targetDirectory, formatType);
final StopWithSavepointTerminationManager stopWithSavepointTerminationManager =
new StopWithSavepointTerminationManager(
new StopWithSavepointTerminationHandlerImpl(
jobGraph.getJobID(), this, log));
return stopWithSavepointTerminationManager.stopWithSavepoint(
savepointFuture, executionTerminationsFuture, mainThreadExecutor);
}
/**
* Returns a {@code CompletableFuture} collecting the termination states of all {@link Execution
* Executions} of the underlying {@link ExecutionGraph}.
*
* @return a {@code CompletableFuture} that completes after all underlying {@code Executions}
* have been terminated.
*/
private CompletableFuture<Collection<ExecutionState>> getCombinedExecutionTerminationFuture() {
return FutureUtils.combineAll(
StreamSupport.stream(executionGraph.getAllExecutionVertices().spliterator(), false)
.map(ExecutionVertex::getCurrentExecutionAttempt)
.map(Execution::getTerminalStateFuture)
.collect(Collectors.toList()));
}
// ------------------------------------------------------------------------
// Operator Coordinators
//
// Note: It may be worthwhile to move the OperatorCoordinators out
// of the scheduler (have them owned by the JobMaster directly).
// Then we could avoid routing these events through the scheduler and
// doing this lazy initialization dance. However, this would require
// that the Scheduler does not eagerly construct the CheckpointCoordinator
// in the ExecutionGraph and does not eagerly restore the savepoint while
// doing that. Because during savepoint restore, the OperatorCoordinators
// (or at least their holders) already need to exist, to accept the restored
// state. But some components they depend on (Scheduler and MainThreadExecutor)
// are not fully usable and accessible at that point.
// ------------------------------------------------------------------------
@Override
public void deliverOperatorEventToCoordinator(
final ExecutionAttemptID taskExecutionId,
final OperatorID operatorId,
final OperatorEvent evt)
throws FlinkException {
operatorCoordinatorHandler.deliverOperatorEventToCoordinator(
taskExecutionId, operatorId, evt);
}
@Override
public CompletableFuture<CoordinationResponse> deliverCoordinationRequestToCoordinator(
OperatorID operator, CoordinationRequest request) throws FlinkException {
return operatorCoordinatorHandler.deliverCoordinationRequestToCoordinator(
operator, request);
}
@Override
public void notifyEndOfData(ExecutionAttemptID executionAttemptID) {
if (jobGraph.getJobType() == JobType.STREAMING
&& jobGraph.isCheckpointingEnabled()
&& jobGraph.getCheckpointingSettings()
.getCheckpointCoordinatorConfiguration()
.isEnableCheckpointsAfterTasksFinish()) {
vertexEndOfDataListener.recordTaskEndOfData(executionAttemptID);
if (vertexEndOfDataListener.areAllTasksOfJobVertexEndOfData(
executionAttemptID.getJobVertexId())) {
List<OperatorIDPair> operatorIDPairs =
executionGraph
.getJobVertex(executionAttemptID.getJobVertexId())
.getOperatorIDs();
CheckpointCoordinator checkpointCoordinator =
executionGraph.getCheckpointCoordinator();
if (checkpointCoordinator != null) {
for (OperatorIDPair operatorIDPair : operatorIDPairs) {
checkpointCoordinator.setIsProcessingBacklog(
operatorIDPair.getGeneratedOperatorID(), false);
}
}
}
if (vertexEndOfDataListener.areAllTasksEndOfData()) {
triggerCheckpoint(CheckpointType.CONFIGURED);
}
}
}
// ------------------------------------------------------------------------
// access utils for testing
// ------------------------------------------------------------------------
@VisibleForTesting
protected JobID getJobId() {
return jobGraph.getJobID();
}
@VisibleForTesting
VertexEndOfDataListener getVertexEndOfDataListener() {
return vertexEndOfDataListener;
}
}
|
SchedulerBase
|
java
|
spring-projects__spring-framework
|
spring-tx/src/main/java/org/springframework/transaction/jta/JtaTransactionManager.java
|
{
"start": 6103,
"end": 13320
}
|
interface ____.
* @see #setUserTransactionName
* @see #setAutodetectTransactionManager
*/
public static final String DEFAULT_USER_TRANSACTION_NAME = "java:comp/UserTransaction";
/**
* Fallback JNDI locations for the JTA TransactionManager. Applied if
* the JTA UserTransaction does not implement the JTA TransactionManager
* interface, provided that the "autodetectTransactionManager" flag is "true".
* @see #setTransactionManagerName
* @see #setAutodetectTransactionManager
*/
public static final String[] FALLBACK_TRANSACTION_MANAGER_NAMES =
new String[] {"java:comp/TransactionManager", "java:appserver/TransactionManager",
"java:pm/TransactionManager", "java:/TransactionManager"};
/**
* Standard Jakarta EE JNDI location for the JTA TransactionSynchronizationRegistry.
* Autodetected when available.
*/
public static final String DEFAULT_TRANSACTION_SYNCHRONIZATION_REGISTRY_NAME =
"java:comp/TransactionSynchronizationRegistry";
private transient JndiTemplate jndiTemplate = new JndiTemplate();
private transient @Nullable UserTransaction userTransaction;
private @Nullable String userTransactionName;
private boolean autodetectUserTransaction = true;
private boolean cacheUserTransaction = true;
private boolean userTransactionObtainedFromJndi = false;
private transient @Nullable TransactionManager transactionManager;
private @Nullable String transactionManagerName;
private boolean autodetectTransactionManager = true;
private transient @Nullable TransactionSynchronizationRegistry transactionSynchronizationRegistry;
private @Nullable String transactionSynchronizationRegistryName;
private boolean autodetectTransactionSynchronizationRegistry = true;
private boolean allowCustomIsolationLevels = false;
/**
* Create a new JtaTransactionManager instance, to be configured as bean.
* Invoke {@code afterPropertiesSet} to activate the configuration.
* @see #setUserTransactionName
* @see #setUserTransaction
* @see #setTransactionManagerName
* @see #setTransactionManager
* @see #afterPropertiesSet()
*/
public JtaTransactionManager() {
setNestedTransactionAllowed(true);
}
/**
* Create a new JtaTransactionManager instance.
* @param userTransaction the JTA UserTransaction to use as direct reference
*/
public JtaTransactionManager(UserTransaction userTransaction) {
this();
Assert.notNull(userTransaction, "UserTransaction must not be null");
this.userTransaction = userTransaction;
}
/**
* Create a new JtaTransactionManager instance.
* @param userTransaction the JTA UserTransaction to use as direct reference
* @param transactionManager the JTA TransactionManager to use as direct reference
*/
public JtaTransactionManager(UserTransaction userTransaction, TransactionManager transactionManager) {
this();
Assert.notNull(userTransaction, "UserTransaction must not be null");
Assert.notNull(transactionManager, "TransactionManager must not be null");
this.userTransaction = userTransaction;
this.transactionManager = transactionManager;
}
/**
* Create a new JtaTransactionManager instance.
* @param transactionManager the JTA TransactionManager to use as direct reference
*/
public JtaTransactionManager(TransactionManager transactionManager) {
this();
Assert.notNull(transactionManager, "TransactionManager must not be null");
this.transactionManager = transactionManager;
this.userTransaction = buildUserTransaction(transactionManager);
}
/**
* Set the JndiTemplate to use for JNDI lookups.
* A default one is used if not set.
*/
public void setJndiTemplate(JndiTemplate jndiTemplate) {
Assert.notNull(jndiTemplate, "JndiTemplate must not be null");
this.jndiTemplate = jndiTemplate;
}
/**
* Return the JndiTemplate used for JNDI lookups.
*/
public JndiTemplate getJndiTemplate() {
return this.jndiTemplate;
}
/**
* Set the JNDI environment to use for JNDI lookups.
* Creates a JndiTemplate with the given environment settings.
* @see #setJndiTemplate
*/
public void setJndiEnvironment(@Nullable Properties jndiEnvironment) {
this.jndiTemplate = new JndiTemplate(jndiEnvironment);
}
/**
* Return the JNDI environment to use for JNDI lookups.
*/
public @Nullable Properties getJndiEnvironment() {
return this.jndiTemplate.getEnvironment();
}
/**
* Set the JTA UserTransaction to use as direct reference.
* <p>Typically just used for local JTA setups; in a Jakarta EE environment,
* the UserTransaction will always be fetched from JNDI.
* @see #setUserTransactionName
* @see #setAutodetectUserTransaction
*/
public void setUserTransaction(@Nullable UserTransaction userTransaction) {
this.userTransaction = userTransaction;
}
/**
* Return the JTA UserTransaction that this transaction manager uses.
*/
public @Nullable UserTransaction getUserTransaction() {
return this.userTransaction;
}
/**
* Set the JNDI name of the JTA UserTransaction.
* <p>Note that the UserTransaction will be autodetected at the Jakarta EE
* default location "java:comp/UserTransaction" if not specified explicitly.
* @see #DEFAULT_USER_TRANSACTION_NAME
* @see #setUserTransaction
* @see #setAutodetectUserTransaction
*/
public void setUserTransactionName(String userTransactionName) {
this.userTransactionName = userTransactionName;
}
/**
* Set whether to autodetect the JTA UserTransaction at its default
* JNDI location "java:comp/UserTransaction", as specified by Jakarta EE.
* Will proceed without UserTransaction if none found.
* <p>Default is "true", autodetecting the UserTransaction unless
* it has been specified explicitly. Turn this flag off to allow for
* JtaTransactionManager operating against the TransactionManager only,
* despite a default UserTransaction being available.
* @see #DEFAULT_USER_TRANSACTION_NAME
*/
public void setAutodetectUserTransaction(boolean autodetectUserTransaction) {
this.autodetectUserTransaction = autodetectUserTransaction;
}
/**
* Set whether to cache the JTA UserTransaction object fetched from JNDI.
* <p>Default is "true": UserTransaction lookup will only happen at startup,
* reusing the same UserTransaction handle for all transactions of all threads.
* This is the most efficient choice for all application servers that provide
* a shared UserTransaction object (the typical case).
* <p>Turn this flag off to enforce a fresh lookup of the UserTransaction
* for every transaction. This is only necessary for application servers
* that return a new UserTransaction for every transaction, keeping state
* tied to the UserTransaction object itself rather than the current thread.
* @see #setUserTransactionName
*/
public void setCacheUserTransaction(boolean cacheUserTransaction) {
this.cacheUserTransaction = cacheUserTransaction;
}
/**
* Set the JTA TransactionManager to use as direct reference.
* <p>A TransactionManager is necessary for suspending and resuming transactions,
* as this not supported by the UserTransaction interface.
* <p>Note that the TransactionManager will be autodetected if the JTA
* UserTransaction object implements the JTA TransactionManager
|
there
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochResponse.java
|
{
"start": 1459,
"end": 2811
}
|
class ____ extends AbstractResponse {
private final BeginQuorumEpochResponseData data;
public BeginQuorumEpochResponse(BeginQuorumEpochResponseData data) {
super(ApiKeys.BEGIN_QUORUM_EPOCH);
this.data = data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errors = new EnumMap<>(Errors.class);
errors.put(Errors.forCode(data.errorCode()), 1);
for (BeginQuorumEpochResponseData.TopicData topicResponse : data.topics()) {
for (BeginQuorumEpochResponseData.PartitionData partitionResponse : topicResponse.partitions()) {
errors.compute(Errors.forCode(partitionResponse.errorCode()),
(error, count) -> count == null ? 1 : count + 1);
}
}
return errors;
}
@Override
public BeginQuorumEpochResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
public static BeginQuorumEpochResponse parse(Readable readable, short version) {
return new BeginQuorumEpochResponse(new BeginQuorumEpochResponseData(readable, version));
}
}
|
BeginQuorumEpochResponse
|
java
|
apache__camel
|
components/camel-mina/src/test/java/org/apache/camel/component/mina/MinaDisconnectTest.java
|
{
"start": 1055,
"end": 1877
}
|
class ____ extends BaseMinaTest {
@Test
public void testCloseSessionWhenComplete() {
Object out = template.requestBody(
String.format("mina:tcp://localhost:%1$s?sync=true&textline=true&disconnect=true", getPort()), "Chad");
assertEquals("Bye Chad", out);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
fromF("mina:tcp://localhost:%1$s?sync=true&textline=true&disconnect=true", getPort())
.process(exchange -> {
String body = exchange.getIn().getBody(String.class);
exchange.getMessage().setBody("Bye " + body);
});
}
};
}
}
|
MinaDisconnectTest
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/failover/FailoverStrategy.java
|
{
"start": 1079,
"end": 1731
}
|
interface ____ {
/**
* Returns a set of IDs corresponding to the set of vertices that should be restarted.
*
* @param executionVertexId ID of the failed task
* @param cause cause of the failure
* @return set of IDs of vertices to restart
*/
Set<ExecutionVertexID> getTasksNeedingRestart(
ExecutionVertexID executionVertexId, Throwable cause);
// ------------------------------------------------------------------------
// factory
// ------------------------------------------------------------------------
/** The factory to instantiate {@link FailoverStrategy}. */
|
FailoverStrategy
|
java
|
apache__flink
|
tools/ci/flink-ci-tools/src/test/java/org/apache/flink/tools/ci/utils/dependency/DependencyParserTreeTest.java
|
{
"start": 1179,
"end": 6368
}
|
class ____ {
private static Stream<String> getTestDependencyTree() {
return Stream.of(
"[INFO] --- maven-dependency-plugin:3.2.0:tree (default-cli) @ m1 ---",
"[INFO] internal:m1:jar:1.1",
"[INFO] +- external:dependency1:jar:2.1:compile",
"[INFO] | +- external:dependency2:jar:2.2:compile (optional)",
"[INFO] | | \\- external:dependency3:jar:2.3:provided",
"[INFO] | +- external:dependency4:jar:classifier:2.4:compile",
"[INFO]",
"[INFO] --- maven-dependency-plugin:3.2.0:tree (default-cli) @ m2 ---",
"[INFO] internal:m2:jar:1.2",
"[INFO] +- internal:m1:jar:1.1:compile",
"[INFO] | +- external:dependency4:jar:2.4:compile");
}
@Test
void testTreeParsing() {
final Map<String, DependencyTree> dependenciesByModule =
DependencyParser.parseDependencyTreeOutput(getTestDependencyTree());
assertThat(dependenciesByModule).containsOnlyKeys("m1", "m2");
assertThat(dependenciesByModule.get("m1").flatten())
.containsExactlyInAnyOrder(
Dependency.create("external", "dependency1", "2.1", null, "compile", false),
Dependency.create("external", "dependency2", "2.2", null, "compile", true),
Dependency.create(
"external", "dependency3", "2.3", null, "provided", false),
Dependency.create(
"external", "dependency4", "2.4", "classifier", "compile", false));
assertThat(dependenciesByModule.get("m2").flatten())
.containsExactlyInAnyOrder(
Dependency.create("internal", "m1", "1.1", null, "compile", false),
Dependency.create(
"external", "dependency4", "2.4", null, "compile", false));
}
@Test
void testTreeLineParsingGroupId() {
assertThat(
DependencyParser.parseTreeDependency(
"[INFO] +- external:dependency1:jar:1.0:compile"))
.hasValueSatisfying(
dependency -> assertThat(dependency.getGroupId()).isEqualTo("external"));
}
@Test
void testTreeLineParsingArtifactId() {
assertThat(
DependencyParser.parseTreeDependency(
"[INFO] +- external:dependency1:jar:1.0:compile"))
.hasValueSatisfying(
dependency ->
assertThat(dependency.getArtifactId()).isEqualTo("dependency1"));
}
@Test
void testTreeLineParsingVersion() {
assertThat(
DependencyParser.parseTreeDependency(
"[INFO] +- external:dependency1:jar:1.0:compile"))
.hasValueSatisfying(
dependency -> assertThat(dependency.getVersion()).isEqualTo("1.0"));
}
@Test
void testTreeLineParsingScope() {
assertThat(
DependencyParser.parseTreeDependency(
"[INFO] +- external:dependency1:jar:1.0:provided"))
.hasValueSatisfying(
dependency -> assertThat(dependency.getScope()).hasValue("provided"));
}
@Test
void testTreeLineParsingWithNonJarType() {
assertThat(
DependencyParser.parseTreeDependency(
"[INFO] +- external:dependency1:pom:1.0:compile"))
.hasValue(
Dependency.create(
"external", "dependency1", "1.0", null, "compile", false));
}
@Test
void testTreeLineParsingWithClassifier() {
assertThat(
DependencyParser.parseTreeDependency(
"[INFO] +- external:dependency1:jar:some_classifier:1.0:compile"))
.hasValue(
Dependency.create(
"external",
"dependency1",
"1.0",
"some_classifier",
"compile",
false));
}
@Test
void testTreeLineParsingWithoutOptional() {
assertThat(
DependencyParser.parseTreeDependency(
"[INFO] +- external:dependency1:jar:1.0:compile"))
.hasValueSatisfying(
dependency -> assertThat(dependency.isOptional()).hasValue(false));
}
@Test
void testTreeLineParsingWithOptional() {
assertThat(
DependencyParser.parseTreeDependency(
"[INFO] +- external:dependency1:jar:1.0:compile (optional)"))
.hasValueSatisfying(
dependency -> assertThat(dependency.isOptional()).hasValue(true));
}
}
|
DependencyParserTreeTest
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/gem/ConstantTest.java
|
{
"start": 444,
"end": 2387
}
|
class ____ {
@Test
public void constantsShouldBeEqual() {
assertThat( MappingConstants.ANY_REMAINING ).isEqualTo( MappingConstantsGem.ANY_REMAINING );
assertThat( MappingConstants.ANY_UNMAPPED ).isEqualTo( MappingConstantsGem.ANY_UNMAPPED );
assertThat( MappingConstants.NULL ).isEqualTo( MappingConstantsGem.NULL );
assertThat( MappingConstants.THROW_EXCEPTION ).isEqualTo( MappingConstantsGem.THROW_EXCEPTION );
assertThat( MappingConstants.SUFFIX_TRANSFORMATION ).isEqualTo( MappingConstantsGem.SUFFIX_TRANSFORMATION );
assertThat( MappingConstants.STRIP_SUFFIX_TRANSFORMATION )
.isEqualTo( MappingConstantsGem.STRIP_SUFFIX_TRANSFORMATION );
assertThat( MappingConstants.PREFIX_TRANSFORMATION ).isEqualTo( MappingConstantsGem.PREFIX_TRANSFORMATION );
assertThat( MappingConstants.STRIP_PREFIX_TRANSFORMATION )
.isEqualTo( MappingConstantsGem.STRIP_PREFIX_TRANSFORMATION );
assertThat( MappingConstants.CASE_TRANSFORMATION ).isEqualTo( MappingConstantsGem.CASE_TRANSFORMATION );
}
@Test
public void componentModelConstantsShouldBeEqual() {
assertThat( MappingConstants.ComponentModel.DEFAULT )
.isEqualTo( MappingConstantsGem.ComponentModelGem.DEFAULT );
assertThat( MappingConstants.ComponentModel.CDI ).isEqualTo( MappingConstantsGem.ComponentModelGem.CDI );
assertThat( MappingConstants.ComponentModel.SPRING ).isEqualTo( MappingConstantsGem.ComponentModelGem.SPRING );
assertThat( MappingConstants.ComponentModel.JSR330 ).isEqualTo( MappingConstantsGem.ComponentModelGem.JSR330 );
assertThat( MappingConstants.ComponentModel.JAKARTA )
.isEqualTo( MappingConstantsGem.ComponentModelGem.JAKARTA );
assertThat( MappingConstants.ComponentModel.JAKARTA_CDI )
.isEqualTo( MappingConstantsGem.ComponentModelGem.JAKARTA_CDI );
}
}
|
ConstantTest
|
java
|
apache__camel
|
dsl/camel-kamelet-main/src/main/java/org/apache/camel/main/download/StubComponentAutowireStrategy.java
|
{
"start": 1197,
"end": 3696
}
|
class ____ extends LifecycleStrategySupport implements AutowiredLifecycleStrategy, Ordered {
private final CamelContext camelContext;
private final String pattern;
public StubComponentAutowireStrategy(CamelContext camelContext, String pattern) {
this.camelContext = camelContext;
this.pattern = pattern;
}
@Override
public int getOrder() {
// we should be last
return Ordered.LOWEST;
}
@Override
public void onComponentAdd(String name, Component component) {
autowireComponent(name, component);
}
@Override
public void onDataFormatCreated(String name, DataFormat dataFormat) {
autowireDataFormat(name, dataFormat);
}
@Override
public void onLanguageCreated(String name, Language language) {
autowireLanguage(name, language);
}
private void autowireComponent(String name, Component component) {
// autowiring can be turned off on context level and per component
boolean enabled = camelContext.isAutowiredEnabled() && component.isAutowiredEnabled();
if (enabled) {
autowire(name, "component", component);
}
}
private void autowireDataFormat(String name, DataFormat dataFormat) {
// autowiring can be turned off on context level
boolean enabled = camelContext.isAutowiredEnabled();
if (enabled) {
autowire(name, "dataformat", dataFormat);
}
}
private void autowireLanguage(String name, Language language) {
// autowiring can be turned off on context level
boolean enabled = camelContext.isAutowiredEnabled();
if (enabled) {
autowire(name, "language", language);
}
}
private void autowire(String name, String kind, Object target) {
boolean stubbed = false;
for (String n : pattern.split(",")) {
if ("component".equals(kind) && n.startsWith("component:")) {
n = n.substring(10);
} else if ("dataformat".equals(kind) && n.startsWith("dataformat:")) {
n = n.substring(11);
} else if ("language".equals(kind) && n.startsWith("language:")) {
n = n.substring(9);
}
stubbed |= PatternHelper.matchPattern(name, n);
}
if (stubbed) {
// do not autowire
} else {
doAutoWire(name, kind, target, camelContext);
}
}
}
|
StubComponentAutowireStrategy
|
java
|
apache__camel
|
components/camel-olingo4/camel-olingo4-component/src/test/java/org/apache/camel/component/olingo4/AbstractOlingo4TestSupport.java
|
{
"start": 1802,
"end": 1947
}
|
class ____ Olingo 4.0 Integration tests generated by Camel API component maven plugin.
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public
|
for
|
java
|
apache__camel
|
components/camel-stax/src/test/java/org/apache/camel/language/xtokenizer/SplitGroupWrappedMultiXmlTokenTest.java
|
{
"start": 1250,
"end": 3630
}
|
class ____ extends CamelTestSupport {
@TempDir
Path testDirectory;
@Test
public void testTokenXMLPairGroup() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:split");
mock.expectedMessageCount(3);
mock.message(0).body()
.isEqualTo(
"<?xml version=\"1.0\"?>\n<orders xmlns=\"http:acme.com\">\n <order id=\"1\">Camel in Action</order><order id=\"2\">ActiveMQ in Action</order></orders>");
mock.message(1).body()
.isEqualTo(
"<?xml version=\"1.0\"?>\n<orders xmlns=\"http:acme.com\">\n <order id=\"3\">Spring in Action</order><order id=\"4\">Scala in Action</order></orders>");
mock.message(2).body().isEqualTo(
"<?xml version=\"1.0\"?>\n<orders xmlns=\"http:acme.com\">\n <order id=\"5\">Groovy in Action</order></orders>");
String body = createBody();
template.sendBodyAndHeader(TestSupport.fileUri(testDirectory), body, Exchange.FILE_NAME, "orders.xml");
MockEndpoint.assertIsSatisfied(context);
}
protected String createBody() {
StringBuilder sb = new StringBuilder("<?xml version=\"1.0\"?>\n");
sb.append("<orders xmlns=\"http:acme.com\">\n");
sb.append(" <order id=\"1\">Camel in Action</order>\n");
sb.append(" <order id=\"2\">ActiveMQ in Action</order>\n");
sb.append(" <order id=\"3\">Spring in Action</order>\n");
sb.append(" <order id=\"4\">Scala in Action</order>\n");
sb.append(" <order id=\"5\">Groovy in Action</order>\n");
sb.append("</orders>");
return sb.toString();
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
Namespaces ns = new Namespaces("", "http:acme.com");
@Override
public void configure() throws Exception {
// START SNIPPET: e1
from(TestSupport.fileUri(testDirectory, "?initialDelay=0&delay=10"))
// split the order child tags, and inherit namespaces from
// the orders root tag
.split().xtokenize("//order", 'w', ns, 2).to("log:split").to("mock:split");
// END SNIPPET: e1
}
};
}
}
|
SplitGroupWrappedMultiXmlTokenTest
|
java
|
apache__camel
|
components/camel-jfr/src/main/java/org/apache/camel/startup/jfr/FlightRecorderStartupStepRecorder.java
|
{
"start": 1497,
"end": 5306
}
|
class ____ extends DefaultStartupStepRecorder {
private static final Logger LOG = LoggerFactory.getLogger(FlightRecorderStartupStepRecorder.class);
private Recording rec;
private FlightRecorderListener frl;
public FlightRecorderStartupStepRecorder() {
// should default be enabled if discovered from classpath
setEnabled(true);
}
@Override
public void doStart() throws Exception {
super.doStart();
if (isRecording()) {
FlightRecorder.register(FlightRecorderStartupStep.class);
Configuration config = Configuration.getConfiguration(getRecordingProfile());
rec = new Recording(config);
rec.setName("Camel Recording");
if (!"false".equals(getRecordingDir())) {
// recording to disk can be turned off by setting to false
Path dir = getRecordingDir() != null ? Paths.get(getRecordingDir()) : Paths.get(".");
Path file = Files.createTempFile(dir, "camel-recording", ".jfr");
// when stopping then the recording is automatic dumped by flight recorder
rec.setDestination(file);
}
if (getStartupRecorderDuration() == 0) {
if (rec.getDestination() != null) {
rec.setDumpOnExit(true);
LOG.info("Java flight recorder with profile: {} will be saved to file on JVM exit: {}",
getRecordingProfile(), rec.getDestination());
}
} else if (getStartupRecorderDuration() > 0) {
rec.setDuration(Duration.ofSeconds(getStartupRecorderDuration()));
LOG.info("Starting Java flight recorder with profile: {} and duration: {} seconds", getRecordingProfile(),
getStartupRecorderDuration());
// add listener to trigger auto-save when duration is hit
frl = new FlightRecorderListener() {
@Override
public void recordingStateChanged(Recording recording) {
if (recording == rec && recording.getState().equals(RecordingState.STOPPED)) {
LOG.info("Java flight recorder stopped after {} seconds and saved to file: {}",
getStartupRecorderDuration(), rec.getDestination());
}
}
};
FlightRecorder.addListener(frl);
} else {
LOG.info("Starting Java flight recorder with profile: {}", getRecordingProfile());
}
rec.start();
}
}
@Override
public void doStop() throws Exception {
super.doStop();
if (rec != null) {
// if < 0 then manual stop the recording
if (getStartupRecorderDuration() < 0) {
LOG.debug("Stopping Java flight recorder");
// do GC before stopping to force flushing data into the recording
System.gc();
rec.stop();
LOG.info("Java flight recorder stopped and saved to file: {}", rec.getDestination());
}
FlightRecorder.unregister(FlightRecorderStartupStep.class);
if (frl != null) {
FlightRecorder.removeListener(frl);
}
rec = null;
frl = null;
}
}
@Override
public StartupStep createStartupStep(String type, String name, String description, int id, int parentId, int level) {
return new FlightRecorderStartupStep(name, id, parentId, level, type, description);
}
@Override
public String toString() {
return "java-flight-recorder";
}
}
|
FlightRecorderStartupStepRecorder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesAggregator.java
|
{
"start": 1382,
"end": 1896
}
|
class ____ extends CentroidPointAggregator {
public static void combine(CentroidState current, BytesRef wkb) {
Point point = SpatialAggregationUtils.decodePoint(wkb);
current.add(point.getX(), point.getY());
}
public static void combine(GroupingCentroidState current, int groupId, BytesRef wkb) {
Point point = SpatialAggregationUtils.decodePoint(wkb);
current.add(point.getX(), 0d, point.getY(), 0d, 1, groupId);
}
}
|
SpatialCentroidCartesianPointSourceValuesAggregator
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/PatternMatchingInstanceofTest.java
|
{
"start": 12692,
"end": 13029
}
|
class ____ {
void test(Object o) {
if (o instanceof Test) {
test((Test) o);
Test test = (Test) o;
test(test);
}
}
}
""")
.addOutputLines(
"Test.java",
"""
|
Test
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/bigintegers/BigIntegers_assertIsBetween_Test.java
|
{
"start": 1625,
"end": 3546
}
|
class ____ extends BigIntegersBaseTest {
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> numbers.assertIsBetween(someInfo(), null, ZERO, ONE))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_start_is_null() {
assertThatNullPointerException().isThrownBy(() -> numbers.assertIsBetween(someInfo(), ONE, null, ONE));
}
@Test
void should_fail_if_end_is_null() {
assertThatNullPointerException().isThrownBy(() -> numbers.assertIsBetween(someInfo(), ONE, ZERO, null));
}
@Test
void should_pass_if_actual_is_in_range() {
numbers.assertIsBetween(someInfo(), ONE, ZERO, TEN);
numbers.assertIsBetween(someInfo(), ONE, ONE, TEN);
numbers.assertIsBetween(someInfo(), ONE, new BigInteger("1"), TEN);
numbers.assertIsBetween(someInfo(), ONE, ZERO, new BigInteger("1"));
}
@Test
void should_pass_if_actual_is_equal_to_range_start() {
numbers.assertIsBetween(someInfo(), ONE, ONE, TEN);
}
@Test
void should_pass_if_actual_is_equal_to_range_end() {
numbers.assertIsBetween(someInfo(), ONE, ZERO, ONE);
}
@Test
void should_fail_if_actual_is_not_in_range_start() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> numbers.assertIsBetween(info, ONE, new BigInteger("2"), TEN));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeBetween(ONE, new BigInteger("2"), TEN, true, true));
}
@Test
void should_fail_if_actual_is_not_in_range_end() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> numbers.assertIsBetween(info, ONE, ZERO, ZERO));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeBetween(ONE, ZERO, ZERO, true, true));
}
}
|
BigIntegers_assertIsBetween_Test
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/nullness/ParameterMissingNullableTest.java
|
{
"start": 9957,
"end": 10417
}
|
class ____ {
void foo(Integer i) {
if (i == null) {
throwIt(new RuntimeException());
}
}
void throwIt(RuntimeException x) {
throw x;
}
}
""")
.doTest();
}
@Test
public void negativeLambdaParameter() {
aggressiveHelper
.addSourceLines(
"Foo.java",
"""
|
Foo
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/cache/AbstractCacheMap.java
|
{
"start": 9106,
"end": 10031
}
|
class ____<M> implements Iterator<M> {
private final Iterator<Map.Entry<K, CachedValue<K, V>>> keyIterator = map.entrySet().iterator();
Map.Entry<K, CachedValue<K, V>> mapEntry;
@Override
public boolean hasNext() {
if (mapEntry != null) {
return true;
}
mapEntry = null;
while (keyIterator.hasNext()) {
Map.Entry<K, CachedValue<K, V>> entry = keyIterator.next();
if (isValueExpired(entry.getValue())) {
continue;
}
mapEntry = entry;
break;
}
return mapEntry != null;
}
public CachedValue<K, V> cursorValue() {
if (mapEntry == null) {
throw new IllegalStateException();
}
return mapEntry.getValue();
}
}
final
|
MapIterator
|
java
|
quarkusio__quarkus
|
extensions/resteasy-classic/resteasy-multipart/deployment/src/test/java/io/quarkus/resteasy/multipart/FeedbackResource.java
|
{
"start": 290,
"end": 573
}
|
class ____ {
@POST
@Path("/multipart-encoding")
@Produces(MediaType.TEXT_PLAIN)
@Consumes(MediaType.MULTIPART_FORM_DATA + ";charset=UTF-8")
public String postForm(@MultipartForm final FeedbackBody feedback) {
return feedback.content;
}
}
|
FeedbackResource
|
java
|
spring-projects__spring-boot
|
module/spring-boot-batch/src/main/java/org/springframework/boot/batch/autoconfigure/BatchConversionServiceCustomizer.java
|
{
"start": 777,
"end": 1068
}
|
interface ____ can be implemented by beans wishing to customize the
* {@link ConfigurableConversionService} to fine-tune its auto-configuration. The
* conversion service is used by the Spring Batch infrastructure.
*
* @author Claudio Nave
* @since 4.0.0
*/
@FunctionalInterface
public
|
that
|
java
|
netty__netty
|
common/src/test/java/io/netty/util/concurrent/PromiseCombinerTest.java
|
{
"start": 8259,
"end": 8376
}
|
interface ____ {
void accept(GenericFutureListener<Future<Void>> listener);
}
}
|
GenericFutureListenerConsumer
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/config/SimpleUrlHandlerMappingGlobalCorsAutoConfiguration.java
|
{
"start": 1485,
"end": 1815
}
|
class ____ {
@Autowired
private GlobalCorsProperties globalCorsProperties;
@Autowired
private SimpleUrlHandlerMapping simpleUrlHandlerMapping;
@PostConstruct
void config() {
simpleUrlHandlerMapping.setCorsConfigurations(globalCorsProperties.getCorsConfigurations());
}
}
|
SimpleUrlHandlerMappingGlobalCorsAutoConfiguration
|
java
|
netty__netty
|
codec-dns/src/main/java/io/netty/handler/codec/dns/DnsMessageUtil.java
|
{
"start": 9852,
"end": 9982
}
|
interface ____ {
DnsQuery newQuery(int id, DnsOpCode dnsOpCode);
}
private DnsMessageUtil() {
}
}
|
DnsQueryFactory
|
java
|
apache__flink
|
flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroWriterFactory.java
|
{
"start": 1328,
"end": 1960
}
|
class ____<T> implements BulkWriter.Factory<T> {
private static final long serialVersionUID = 1L;
/** The builder to construct the Avro {@link DataFileWriter}. */
private final AvroBuilder<T> avroBuilder;
/** Creates a new AvroWriterFactory using the given builder to assemble the ParquetWriter. */
public AvroWriterFactory(AvroBuilder<T> avroBuilder) {
this.avroBuilder = avroBuilder;
}
@Override
public BulkWriter<T> create(FSDataOutputStream out) throws IOException {
return new AvroBulkWriter<>(avroBuilder.createWriter(new CloseShieldOutputStream(out)));
}
}
|
AvroWriterFactory
|
java
|
micronaut-projects__micronaut-core
|
benchmarks/src/jmh/java/io/micronaut/http/server/stack/ControllersBenchmark.java
|
{
"start": 17891,
"end": 18483
}
|
class ____ {
private int id;
private int randomNumber;
public SomeBean2() {
}
public SomeBean2(int id, int randomNumber) {
this.id = id;
this.randomNumber = randomNumber;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public int getRandomNumber() {
return randomNumber;
}
public void setRandomNumber(int randomNumber) {
this.randomNumber = randomNumber;
}
}
}
|
SomeBean2
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/metrics/HibernateMetricsRecorder.java
|
{
"start": 692,
"end": 13797
}
|
class ____ {
private static final String SESSION_FACTORY_TAG_NAME = "entityManagerFactory";
/* RUNTIME_INIT for metrics */
public Consumer<MetricsFactory> consumeMetricsFactory() {
return new Consumer<MetricsFactory>() {
@Override
public void accept(MetricsFactory metricsFactory) {
JPAConfig jpaConfig = Arc.container().instance(JPAConfig.class).get();
for (Tuple2<String, EntityManagerFactory> emf : jpaConfig.getEntityManagerFactories()) {
SessionFactory sessionFactory = emf.getItem2().unwrap(SessionFactory.class);
if (sessionFactory != null) {
registerMetrics(metricsFactory, emf.getItem1(), sessionFactory.getStatistics());
}
}
}
};
}
/**
* Register MP Metrics
*
* @param metricsFactory Quarkus MetricsFactory for generic metrics registration
* @param puName Name of persistence unit
* @param statistics Statistics MXBean for persistence unit
*/
void registerMetrics(MetricsFactory metricsFactory, String puName, Statistics statistics) {
// Session statistics
createStatisticsCounter(metricsFactory, "hibernate.sessions.open",
"Global number of sessions opened",
puName, statistics, Statistics::getSessionOpenCount);
createStatisticsCounter(metricsFactory, "hibernate.sessions.closed",
"Global number of sessions closed",
puName, statistics, Statistics::getSessionCloseCount);
// Transaction statistics
createStatisticsCounter(metricsFactory, "hibernate.transactions",
"The number of transactions (see result for success or failure)",
puName, statistics, Statistics::getSuccessfulTransactionCount,
"result", "success");
createStatisticsCounter(metricsFactory, "hibernate.transactions",
"The number of transactions (see result for success or failure)",
puName, statistics, s -> s.getTransactionCount() - s.getSuccessfulTransactionCount(),
"result", "failure");
createStatisticsCounter(metricsFactory, "hibernate.optimistic.failures",
"The number of Hibernate StaleObjectStateExceptions or JPA OptimisticLockExceptions that occurred.",
puName, statistics, Statistics::getOptimisticFailureCount);
createStatisticsCounter(metricsFactory, "hibernate.flushes",
"Global number of flush operations executed (either manual or automatic).",
puName, statistics, Statistics::getFlushCount);
createStatisticsCounter(metricsFactory, "hibernate.connections.obtained",
"Get the global number of connections asked by the sessions " +
"(the actual number of connections used may be much smaller depending " +
"whether you use a connection pool or not)",
puName, statistics, Statistics::getConnectCount);
// Statements
createStatisticsCounter(metricsFactory, "hibernate.statements",
"The number of prepared statements (see status for prepared or closed)",
puName, statistics, Statistics::getPrepareStatementCount,
"status", "prepared");
createStatisticsCounter(metricsFactory, "hibernate.statements",
"The number of prepared statements (see status for prepared or closed)",
puName, statistics, Statistics::getCloseStatementCount,
"status", "closed");
// Second Level Caching
Arrays.stream(statistics.getSecondLevelCacheRegionNames())
.filter(regionName -> this.hasDomainDataRegionStatistics(statistics, regionName))
.forEach(regionName -> {
CacheRegionStatistics regionStatistics = statistics.getDomainDataRegionStatistics(regionName);
createStatisticsCounter(metricsFactory, "hibernate.second.level.cache.requests",
"The number of requests made to second level cache (see result for hit or miss)",
puName, regionStatistics, CacheRegionStatistics::getHitCount,
"result", "hit", "region", regionName);
createStatisticsCounter(metricsFactory, "hibernate.second.level.cache.requests",
"The number of requests made to second level cache (see result for hit or miss)",
puName, regionStatistics, CacheRegionStatistics::getMissCount,
"result", "miss", "region", regionName);
createStatisticsCounter(metricsFactory, "hibernate.second.level.cache.puts",
"The number of entities/collections put in the second level cache",
puName, regionStatistics, CacheRegionStatistics::getPutCount,
"region", regionName);
});
// Entity Information
createStatisticsCounter(metricsFactory, "hibernate.entities.loads",
"Global number of entity loads",
puName, statistics, Statistics::getEntityLoadCount);
createStatisticsCounter(metricsFactory, "hibernate.entities.updates",
"Global number of entity updates",
puName, statistics, Statistics::getEntityUpdateCount);
createStatisticsCounter(metricsFactory, "hibernate.entities.inserts",
"Global number of entity inserts",
puName, statistics, Statistics::getEntityInsertCount);
createStatisticsCounter(metricsFactory, "hibernate.entities.deletes",
"Global number of entity deletes",
puName, statistics, Statistics::getEntityDeleteCount);
createStatisticsCounter(metricsFactory, "hibernate.entities.fetches",
"Global number of entity fetches",
puName, statistics, Statistics::getEntityFetchCount);
// Collections
createStatisticsCounter(metricsFactory, "hibernate.collections.loads",
"Global number of collections loaded",
puName, statistics, Statistics::getCollectionLoadCount);
createStatisticsCounter(metricsFactory, "hibernate.collections.updates",
"Global number of collections updated",
puName, statistics, Statistics::getCollectionUpdateCount);
createStatisticsCounter(metricsFactory, "hibernate.collections.deletes",
"Global number of collections removed",
puName, statistics, Statistics::getCollectionRemoveCount);
createStatisticsCounter(metricsFactory, "hibernate.collections.recreates",
"Global number of collections recreated",
puName, statistics, Statistics::getCollectionRecreateCount);
createStatisticsCounter(metricsFactory, "hibernate.collections.fetches",
"Global number of collections fetched",
puName, statistics, Statistics::getCollectionFetchCount);
// Natural Id cache
createStatisticsCounter(metricsFactory, "hibernate.natural.id.requests",
"The number of natural id cache requests (see result for hit or miss)",
puName, statistics, Statistics::getNaturalIdCacheHitCount,
"result", "hit");
createStatisticsCounter(metricsFactory, "hibernate.natural.id.cache.puts",
"The number of cacheable natural id requests put in cache",
puName, statistics, Statistics::getNaturalIdCachePutCount);
createStatisticsCounter(metricsFactory, "hibernate.natural.id.requests",
"The number of natural id cache requests (see result for hit or miss)",
puName, statistics, Statistics::getNaturalIdCacheMissCount,
"result", "miss");
// Natural Id statistics
createStatisticsCounter(metricsFactory, "hibernate.natural.id.executions",
"The number of natural id query executions",
puName, statistics, Statistics::getNaturalIdQueryExecutionCount);
createTimeGauge(metricsFactory, "hibernate.query.natural.id.executions.max",
"The maximum query time for natural id queries executed against the database",
puName, statistics, Statistics::getNaturalIdQueryExecutionMaxTime);
// Query statistics
createStatisticsCounter(metricsFactory, "hibernate.query.executions",
"The number of query executions",
puName, statistics, Statistics::getQueryExecutionCount);
createTimeGauge(metricsFactory, "hibernate.query.executions.max",
"The maximum query time for queries executed against the database",
puName, statistics, Statistics::getQueryExecutionMaxTime);
// Query Cache
createStatisticsCounter(metricsFactory, "hibernate.cache.query.requests",
"The number of query cache requests (see result for hit or miss)",
puName, statistics, Statistics::getQueryCacheHitCount,
"result", "hit");
createStatisticsCounter(metricsFactory, "hibernate.cache.query.requests",
"The number of query cache requests (see result for hit or miss)",
puName, statistics, Statistics::getQueryCacheMissCount,
"result", "miss");
createStatisticsCounter(metricsFactory, "hibernate.cache.query.puts",
"The number of cacheable queries put in cache",
puName, statistics, Statistics::getQueryCachePutCount);
createStatisticsCounter(metricsFactory, "hibernate.cache.query.plan",
"The number of query plan cache requests (see result for hit or miss)",
puName, statistics, Statistics::getQueryPlanCacheHitCount,
"result", "hit");
createStatisticsCounter(metricsFactory, "hibernate.cache.query.plan",
"The number of query plan cache requests (see result for hit or miss)",
puName, statistics, Statistics::getQueryPlanCacheMissCount,
"result", "miss");
// Timestamp cache
createStatisticsCounter(metricsFactory, "hibernate.cache.update.timestamps.requests",
"The number of update timestamps cache requests (see result for hit or miss)",
puName, statistics, Statistics::getUpdateTimestampsCacheHitCount,
"result", "hit");
createStatisticsCounter(metricsFactory, "hibernate.cache.update.timestamps.requests",
"The number of update timestamps cache requests (see result for hit or miss)",
puName, statistics, Statistics::getUpdateTimestampsCacheMissCount,
"result", "miss");
createStatisticsCounter(metricsFactory, "hibernate.cache.update.timestamps.puts",
"The number of update timestamps put in cache",
puName, statistics, Statistics::getUpdateTimestampsCachePutCount);
}
<T> void createStatisticsCounter(MetricsFactory metricsFactory, String metricName, String description,
String puName, T statistics, Function<T, Long> f, String... tags) {
createBuilder(metricsFactory, metricName, description, puName, tags)
.buildCounter(statistics, f);
}
void createTimeGauge(MetricsFactory metricsFactory, String metricName, String description,
String puName, Statistics statistics, Function<Statistics, Long> f, String... tags) {
createBuilder(metricsFactory, metricName, description, puName, tags)
.unit(TimeUnit.MILLISECONDS.toString())
.buildGauge(statistics, f);
}
MetricsFactory.MetricBuilder createBuilder(MetricsFactory metricsFactory, String metricName, String description,
String puName, String... tags) {
MetricsFactory.MetricBuilder builder = metricsFactory.builder(metricName)
.description(description)
.tag(SESSION_FACTORY_TAG_NAME, puName);
// Add (optional) additional tags
if (tags.length > 0 && tags.length % 2 == 0) {
for (int i = 0; i < tags.length; i = i + 2) {
builder.tag(tags[i], tags[i + 1]);
}
}
return builder;
}
private boolean hasDomainDataRegionStatistics(Statistics statistics, String regionName) {
// In 5.1/5.2, getSecondLevelCacheStatistics returns null if the region can't be resolved.
// In 5.3, getDomainDataRegionStatistics (a new method) will throw an IllegalArgumentException
// if the region can't be resolved.
try {
return statistics.getDomainDataRegionStatistics(regionName) != null;
} catch (IllegalArgumentException e) {
return false;
}
}
}
|
HibernateMetricsRecorder
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/time/JodaConstructorsTest.java
|
{
"start": 1221,
"end": 1895
}
|
class ____ {
private static final Duration ONE_MILLI = Duration.millis(1);
private static final Duration ONE_SEC = Duration.standardSeconds(1);
private static final Duration ONE_MIN = Duration.standardMinutes(1);
private static final Duration ONE_HOUR = Duration.standardHours(1);
private static final Duration ONE_DAY = Duration.standardDays(1);
}
""")
.doTest();
}
@Test
public void durationConstructorObject() {
helper
.addSourceLines(
"TestClass.java",
"""
import org.joda.time.Duration;
public
|
TestClass
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/internal/pool/SimpleConnectionPool.java
|
{
"start": 24313,
"end": 26314
}
|
class ____<C> implements Iterable<PoolWaiter<C>> {
private final PoolWaiter<C> head;
private int size;
public Waiters() {
head = new PoolWaiter<>(null, null, 0, null);
head.next = head.prev = head;
}
PoolWaiter<C> poll() {
if (head.next == head) {
return null;
}
PoolWaiter<C> node = head.next;
remove(node);
return node;
}
void addLast(PoolWaiter<C> node) {
if (node.queued) {
throw new IllegalStateException();
}
node.queued = true;
node.prev = head.prev;
node.next = head;
head.prev.next = node;
head.prev = node;
size++;
}
void addFirst(PoolWaiter<C> node) {
if (node.queued) {
throw new IllegalStateException();
}
node.queued = true;
node.prev = head;
node.next = head.prev;
head.next.prev = node;
head.next = node;
size++;
}
boolean remove(PoolWaiter<C> node) {
if (!node.queued) {
return false;
}
node.next.prev = node.prev;
node.prev.next = node.next;
node.next = node.prev = null;
node.queued = false;
size--;
return true;
}
List<PoolWaiter<C>> clear() {
List<PoolWaiter<C>> lst = new ArrayList<>(size);
this.forEach(lst::add);
size = 0;
head.next = head.prev = head;
return lst;
}
int size() {
return size;
}
@Override
public Iterator<PoolWaiter<C>> iterator() {
return new Iterator<PoolWaiter<C>>() {
PoolWaiter<C> current = head;
@Override
public boolean hasNext() {
return current.next != head;
}
@Override
public PoolWaiter<C> next() {
if (current.next == head) {
throw new NoSuchElementException();
}
try {
return current.next;
} finally {
current = current.next;
}
}
};
}
}
|
Waiters
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/common/logging/ESLogMessage.java
|
{
"start": 1059,
"end": 3815
}
|
class ____ extends MapMessage<ESLogMessage, Object> {
private final List<Object> arguments = new ArrayList<>();
private String messagePattern;
@SuppressWarnings("this-escape")
public ESLogMessage(String messagePattern, Object... args) {
super(new LinkedHashMap<>());
Collections.addAll(this.arguments, args);
this.messagePattern = messagePattern;
Object message = new Object() {
@Override
public String toString() {
return ParameterizedMessage.format(messagePattern, arguments.toArray());
}
};
with("message", message);
}
public ESLogMessage() {
super(new LinkedHashMap<>());
}
public ESLogMessage argAndField(String key, Object value) {
this.arguments.add(value);
super.with(key, value);
return this;
}
public ESLogMessage field(String key, Object value) {
super.with(key, value);
return this;
}
public ESLogMessage withFields(Map<String, Object> prepareMap) {
prepareMap.forEach(this::field);
return this;
}
/**
* This method is used in order to support ESJsonLayout which replaces %CustomMapFields from a pattern with JSON fields
* It is a modified version of {@link MapMessage#asJson(StringBuilder)} where the curly brackets are not added
* @param sb a string builder where JSON fields will be attached
*/
protected void addJsonNoBrackets(StringBuilder sb) {
for (int i = 0; i < getIndexedReadOnlyStringMap().size(); i++) {
if (i > 0) {
sb.append(", ");
}
sb.append(Chars.DQUOTE);
int start = sb.length();
sb.append(getIndexedReadOnlyStringMap().getKeyAt(i));
StringBuilders.escapeJson(sb, start);
sb.append(Chars.DQUOTE).append(':').append(Chars.DQUOTE);
start = sb.length();
Object value = getIndexedReadOnlyStringMap().getValueAt(i);
sb.append(value);
StringBuilders.escapeJson(sb, start);
sb.append(Chars.DQUOTE);
}
}
public static String inQuotes(String s) {
if (s == null) return inQuotes("");
return "\"" + s + "\"";
}
public static String inQuotes(Object s) {
if (s == null) return inQuotes("");
return inQuotes(s.toString());
}
public static String asJsonArray(Stream<String> stream) {
return "[" + stream.map(ESLogMessage::inQuotes).collect(Collectors.joining(", ")) + "]";
}
public Object[] getArguments() {
return arguments.toArray();
}
public String getMessagePattern() {
return messagePattern;
}
}
|
ESLogMessage
|
java
|
elastic__elasticsearch
|
server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java
|
{
"start": 1486,
"end": 11774
}
|
class ____ extends AbstractSnapshotIntegTestCase {
public void testDeletingSnapshotsIsLoggedAfterClusterStateIsProcessed() throws Exception {
createRepository("test-repo", "fs");
createIndexWithRandomDocs("test-index", randomIntBetween(1, 42));
createSnapshot("test-repo", "test-snapshot", List.of("test-index"));
try (var mockLog = MockLog.capture(SnapshotsService.class)) {
mockLog.addExpectation(
new MockLog.UnseenEventExpectation(
"[does-not-exist]",
SnapshotsService.class.getName(),
Level.INFO,
"deleting snapshots [does-not-exist] from repository [default/test-repo]"
)
);
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
"[deleting test-snapshot]",
SnapshotsService.class.getName(),
Level.INFO,
"deleting snapshots [test-snapshot] from repository [default/test-repo]"
)
);
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
"[test-snapshot deleted]",
SnapshotsService.class.getName(),
Level.INFO,
"snapshots [test-snapshot/*] deleted in repository [default/test-repo]"
)
);
final SnapshotMissingException e = expectThrows(
SnapshotMissingException.class,
startDeleteSnapshot("test-repo", "does-not-exist")
);
assertThat(e.getMessage(), containsString("[test-repo:does-not-exist] is missing"));
assertThat(startDeleteSnapshot("test-repo", "test-snapshot").actionGet().isAcknowledged(), is(true));
awaitNoMoreRunningOperations(); // ensure background file deletion is completed
mockLog.assertAllExpectationsMatched();
} finally {
deleteRepository("test-repo");
}
}
public void testSnapshotDeletionFailureShouldBeLogged() throws Exception {
createRepository("test-repo", "mock");
createIndexWithRandomDocs("test-index", randomIntBetween(1, 42));
createSnapshot("test-repo", "test-snapshot", List.of("test-index"));
try (var mockLog = MockLog.capture(SnapshotsService.class)) {
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
"[test-snapshot]",
SnapshotsService.class.getName(),
Level.WARN,
"failed to complete snapshot deletion for [test-snapshot] from repository [default/test-repo]"
)
);
if (randomBoolean()) {
// Failure when listing root blobs
final MockRepository mockRepository = getRepositoryOnMaster("test-repo");
mockRepository.setRandomControlIOExceptionRate(1.0);
final Exception e = expectThrows(Exception.class, startDeleteSnapshot("test-repo", "test-snapshot"));
assertThat(e.getCause().getMessage(), containsString("Random IOException"));
} else {
// Failure when finalizing on index-N file
final ActionFuture<AcknowledgedResponse> deleteFuture;
blockMasterFromFinalizingSnapshotOnIndexFile("test-repo");
deleteFuture = startDeleteSnapshot("test-repo", "test-snapshot");
waitForBlock(internalCluster().getMasterName(), "test-repo");
unblockNode("test-repo", internalCluster().getMasterName());
final Exception e = expectThrows(Exception.class, deleteFuture);
assertThat(e.getCause().getMessage(), containsString("exception after block"));
}
mockLog.assertAllExpectationsMatched();
} finally {
deleteRepository("test-repo");
}
}
public void testDeleteSnapshotWhenNotWaitingForCompletion() throws Exception {
createIndexWithRandomDocs("test-index", randomIntBetween(1, 5));
createRepository("test-repo", "mock");
createSnapshot("test-repo", "test-snapshot", List.of("test-index"));
MockRepository repository = getRepositoryOnMaster("test-repo");
PlainActionFuture<AcknowledgedResponse> listener = new PlainActionFuture<>();
SubscribableListener<Void> snapshotDeletionListener = createSnapshotDeletionListener("test-repo");
repository.blockOnDataFiles();
try {
clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snapshot")
.setWaitForCompletion(false)
.execute(listener);
// The request will complete as soon as the deletion is scheduled
safeGet(listener);
// The deletion won't complete until the block is removed
assertFalse(snapshotDeletionListener.isDone());
} finally {
repository.unblock();
}
safeAwait(snapshotDeletionListener);
}
public void testDeleteSnapshotWhenWaitingForCompletion() throws Exception {
createIndexWithRandomDocs("test-index", randomIntBetween(1, 5));
createRepository("test-repo", "mock");
createSnapshot("test-repo", "test-snapshot", List.of("test-index"));
MockRepository repository = getRepositoryOnMaster("test-repo");
PlainActionFuture<AcknowledgedResponse> requestCompleteListener = new PlainActionFuture<>();
SubscribableListener<Void> snapshotDeletionListener = createSnapshotDeletionListener("test-repo");
repository.blockOnDataFiles();
try {
clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snapshot")
.setWaitForCompletion(true)
.execute(requestCompleteListener);
// Neither the request nor the deletion will complete until we remove the block
assertFalse(requestCompleteListener.isDone());
assertFalse(snapshotDeletionListener.isDone());
} finally {
repository.unblock();
}
safeGet(requestCompleteListener);
safeAwait(snapshotDeletionListener);
}
/**
* Create a listener that completes once it has observed a snapshot delete begin and end for a specific repository
*
* @param repositoryName The repository to monitor for deletions
* @return the listener
*/
private SubscribableListener<Void> createSnapshotDeletionListener(String repositoryName) {
AtomicBoolean deleteHasStarted = new AtomicBoolean(false);
return ClusterServiceUtils.addMasterTemporaryStateListener(state -> {
SnapshotDeletionsInProgress deletionsInProgress = (SnapshotDeletionsInProgress) state.getCustoms()
.get(SnapshotDeletionsInProgress.TYPE);
if (deletionsInProgress == null) {
return false;
}
if (deleteHasStarted.get() == false) {
deleteHasStarted.set(deletionsInProgress.hasExecutingDeletion(ProjectId.DEFAULT, repositoryName));
return false;
} else {
return deletionsInProgress.hasExecutingDeletion(ProjectId.DEFAULT, repositoryName) == false;
}
});
}
public void testRerouteWhenShardSnapshotsCompleted() throws Exception {
final var repoName = randomIdentifier();
createRepository(repoName, "mock");
internalCluster().ensureAtLeastNumDataNodes(1);
final var originalNode = internalCluster().startDataOnlyNode();
final var indexName = randomIdentifier();
createIndexWithContent(
indexName,
indexSettings(1, 0).put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name", originalNode).build()
);
final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, originalNode);
// Use allocation filtering to push the shard to a new node, but it will not do so yet because of the ongoing snapshot.
updateIndexSettings(
Settings.builder()
.putNull(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name")
.put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name", originalNode)
);
final var shardMovedListener = ClusterServiceUtils.addMasterTemporaryStateListener(state -> {
final var primaryShard = state.routingTable().index(indexName).shard(0).primaryShard();
return primaryShard.started() && originalNode.equals(state.nodes().get(primaryShard.currentNodeId()).getName()) == false;
});
assertFalse(shardMovedListener.isDone());
unblockAllDataNodes(repoName);
assertEquals(SnapshotState.SUCCESS, snapshotFuture.get(10, TimeUnit.SECONDS).getSnapshotInfo().state());
// Now that the snapshot completed the shard should move to its new location.
safeAwait(shardMovedListener);
ensureGreen(indexName);
}
@TestLogging(reason = "testing task description, logged at DEBUG", value = "org.elasticsearch.cluster.service.MasterService:DEBUG")
public void testCreateSnapshotTaskDescription() {
createIndexWithRandomDocs(randomIdentifier(), randomIntBetween(1, 5));
final var repositoryName = randomIdentifier();
createRepository(repositoryName, "mock");
final var snapshotName = randomIdentifier();
MockLog.assertThatLogger(
() -> createFullSnapshot(repositoryName, snapshotName),
MasterService.class,
new MockLog.SeenEventExpectation(
"executing cluster state update debug message",
MasterService.class.getCanonicalName(),
Level.DEBUG,
"executing cluster state update for [create_snapshot ["
+ snapshotName
+ "][CreateSnapshotTask{repository="
+ repositoryName
+ ", snapshot=*"
+ snapshotName
+ "*}]]"
)
);
}
}
|
SnapshotsServiceIT
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/CriteriaToOneIdJoinTest.java
|
{
"start": 2340,
"end": 2619
}
|
class ____ {
@Id
@ManyToOne
private ChildEntity child;
public ParentEntity() {
}
public ParentEntity(ChildEntity child) {
this.child = child;
}
public ChildEntity getChild() {
return child;
}
}
@Entity( name = "ChildEntity" )
public static
|
ParentEntity
|
java
|
quarkusio__quarkus
|
extensions/devui/runtime/src/main/java/io/quarkus/devui/runtime/build/BuildMetricsDevUIController.java
|
{
"start": 8614,
"end": 9414
}
|
class ____ {
public final Set<Node> nodes;
public final Set<Link> links;
public DependencyGraph(Set<Node> nodes, Set<Link> links) {
this.nodes = nodes;
this.links = links;
}
public JsonObject toJson() {
JsonObject dependencyGraph = new JsonObject();
JsonArray nodes = new JsonArray();
JsonArray links = new JsonArray();
for (Node node : this.nodes) {
nodes.add(node.toJson());
}
for (Link link : this.links) {
links.add(link.toJson());
}
dependencyGraph.put("nodes", nodes);
dependencyGraph.put("links", links);
return dependencyGraph;
}
public static
|
DependencyGraph
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ComponentProcessorTest.java
|
{
"start": 24017,
"end": 24820
}
|
interface ____ {",
" SomeInjectableType someInjectableType();",
" Provider<SimpleComponent> selfProvider();",
"}");
CompilerTests.daggerCompiler(injectableTypeFile, componentFile)
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(0);
subject.generatedSource(goldenFileRule.goldenSource("test/DaggerSimpleComponent"));
});
}
@Test
public void membersInjectionInsideProvision() throws Exception {
Source injectableTypeFile =
CompilerTests.javaSource(
"test.SomeInjectableType",
"package test;",
"",
"import javax.inject.Inject;",
"",
"final
|
SimpleComponent
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GoogleSheetsStreamEndpointBuilderFactory.java
|
{
"start": 39382,
"end": 42051
}
|
class ____ {
/**
* The internal instance of the builder used to access to all the
* methods representing the name of headers.
*/
private static final GoogleSheetsStreamHeaderNameBuilder INSTANCE = new GoogleSheetsStreamHeaderNameBuilder();
/**
* Specifies the spreadsheet identifier that is used to identify the
* target to obtain.
*
* The option is a: {@code String} type.
*
* Group: consumer
*
* @return the name of the header {@code GoogleSheetsSpreadsheetId}.
*/
public String googleSheetsSpreadsheetId() {
return "CamelGoogleSheetsSpreadsheetId";
}
/**
* The URL of the spreadsheet.
*
* The option is a: {@code String} type.
*
* Group: consumer
*
* @return the name of the header {@code GoogleSheetsSpreadsheetUrl}.
*/
public String googleSheetsSpreadsheetUrl() {
return "CamelGoogleSheetsSpreadsheetUrl";
}
/**
* The major dimension of the values.
*
* The option is a: {@code String} type.
*
* Group: consumer
*
* @return the name of the header {@code GoogleSheetsMajorDimension}.
*/
public String googleSheetsMajorDimension() {
return "CamelGoogleSheetsMajorDimension";
}
/**
* The range the values cover, in A1 notation.
*
* The option is a: {@code String} type.
*
* Group: consumer
*
* @return the name of the header {@code GoogleSheetsRange}.
*/
public String googleSheetsRange() {
return "CamelGoogleSheetsRange";
}
/**
* The index of the range.
*
* The option is a: {@code int} type.
*
* Group: consumer
*
* @return the name of the header {@code GoogleSheetsRangeIndex}.
*/
public String googleSheetsRangeIndex() {
return "CamelGoogleSheetsRangeIndex";
}
/**
* The index of the value.
*
* The option is a: {@code int} type.
*
* Group: consumer
*
* @return the name of the header {@code GoogleSheetsValueIndex}.
*/
public String googleSheetsValueIndex() {
return "CamelGoogleSheetsValueIndex";
}
}
static GoogleSheetsStreamEndpointBuilder endpointBuilder(String componentName, String path) {
|
GoogleSheetsStreamHeaderNameBuilder
|
java
|
bumptech__glide
|
library/src/main/java/com/bumptech/glide/load/data/mediastore/FileService.java
|
{
"start": 72,
"end": 291
}
|
class ____ {
public boolean exists(File file) {
return file.exists();
}
public long length(File file) {
return file.length();
}
public File get(String path) {
return new File(path);
}
}
|
FileService
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/bean/override/BeanOverrideContextCustomizerTests.java
|
{
"start": 1156,
"end": 2601
}
|
class ____ {
@Test
void customizerIsEqualWithIdenticalMetadata() {
BeanOverrideContextCustomizer customizer = createCustomizer(new DummyBeanOverrideHandler("key"));
BeanOverrideContextCustomizer customizer2 = createCustomizer(new DummyBeanOverrideHandler("key"));
assertThat(customizer).isEqualTo(customizer2);
assertThat(customizer).hasSameHashCodeAs(customizer2);
}
@Test
void customizerIsEqualWithIdenticalMetadataInDifferentOrder() {
BeanOverrideContextCustomizer customizer = createCustomizer(
new DummyBeanOverrideHandler("key1"), new DummyBeanOverrideHandler("key2"));
BeanOverrideContextCustomizer customizer2 = createCustomizer(
new DummyBeanOverrideHandler("key2"), new DummyBeanOverrideHandler("key1"));
assertThat(customizer).isEqualTo(customizer2);
assertThat(customizer).hasSameHashCodeAs(customizer2);
}
@Test
void customizerIsNotEqualWithDifferentMetadata() {
BeanOverrideContextCustomizer customizer = createCustomizer(new DummyBeanOverrideHandler("key"));
BeanOverrideContextCustomizer customizer2 = createCustomizer(
new DummyBeanOverrideHandler("key"), new DummyBeanOverrideHandler("another"));
assertThat(customizer).isNotEqualTo(customizer2);
}
private BeanOverrideContextCustomizer createCustomizer(BeanOverrideHandler... handlers) {
return new BeanOverrideContextCustomizer(new LinkedHashSet<>(Arrays.asList(handlers)));
}
private static
|
BeanOverrideContextCustomizerTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/expression/function/scalar/string/StringUtils.java
|
{
"start": 524,
"end": 3669
}
|
class ____ {
private StringUtils() {}
/**
* Extracts a substring from string between left and right strings.
* Port of "between" function from the original EQL python implementation.
*
* @param string string to search.
* @param left left bounding substring to search for.
* @param right right bounding substring to search for.
* @param greedy match the longest substring if true.
* @param caseInsensitive match case when searching for {@code left} and {@code right} strings.
* @return the substring in between {@code left} and {@code right} strings.
*/
static String between(String string, String left, String right, boolean greedy, boolean caseInsensitive) {
if (hasLength(string) == false || hasLength(left) == false || hasLength(right) == false) {
return string;
}
String matchString = string;
if (caseInsensitive) {
matchString = matchString.toLowerCase(Locale.ROOT);
left = left.toLowerCase(Locale.ROOT);
right = right.toLowerCase(Locale.ROOT);
}
int idx = matchString.indexOf(left);
if (idx == -1) {
return EMPTY;
}
int start = idx + left.length();
if (greedy) {
idx = matchString.lastIndexOf(right);
} else {
idx = matchString.indexOf(right, start);
}
if (idx == -1) {
return EMPTY;
}
return string.substring(start, idx);
}
/**
* Checks if {@code string} contains {@code substring} string.
*
* @param string string to search through.
* @param substring string to search for.
* @param caseInsensitive toggle for case sensitivity.
* @return {@code true} if {@code string} string contains {@code substring} string.
*/
static boolean stringContains(String string, String substring, boolean caseInsensitive) {
if (hasLength(string) == false || hasLength(substring) == false) {
return false;
}
if (caseInsensitive) {
string = string.toLowerCase(Locale.ROOT);
substring = substring.toLowerCase(Locale.ROOT);
}
return string.contains(substring);
}
/**
* Returns a substring using the Python slice semantics, meaning
* start and end can be negative
*/
static String substringSlice(String string, int start, int end) {
if (hasLength(string) == false) {
return string;
}
int length = string.length();
// handle first negative values
if (start < 0) {
start += length;
}
if (start < 0) {
start = 0;
}
if (end < 0) {
end += length;
}
if (end < 0) {
end = 0;
} else if (end > length) {
end = length;
}
if (start >= end) {
return org.elasticsearch.xpack.ql.util.StringUtils.EMPTY;
}
return Strings.substring(string, start, end);
}
}
|
StringUtils
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/core/fs/RefCountedFSOutputStream.java
|
{
"start": 1084,
"end": 1570
}
|
class ____ extends FSDataOutputStream implements RefCounted {
/**
* Gets the underlying {@link File} that allows to read the contents of the file.
*
* @return A handle to the File object.
*/
public abstract File getInputFile();
/**
* Checks if the file is closed for writes.
*
* @return {@link true} if the file is closed, {@link false} otherwise.
*/
public abstract boolean isClosed() throws IOException;
}
|
RefCountedFSOutputStream
|
java
|
quarkusio__quarkus
|
independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/InterceptorGenerator.java
|
{
"start": 1616,
"end": 20158
}
|
class ____ extends BeanGenerator {
protected static final String FIELD_NAME_BINDINGS = "bindings";
public InterceptorGenerator(AnnotationLiteralProcessor annotationLiterals, Predicate<DotName> applicationClassPredicate,
PrivateMembersCollector privateMembers, boolean generateSources, ReflectionRegistration reflectionRegistration,
Set<String> existingClasses, Map<BeanInfo, String> beanToGeneratedName,
Predicate<DotName> injectionPointAnnotationsPredicate) {
super(annotationLiterals, applicationClassPredicate, privateMembers, generateSources, reflectionRegistration,
existingClasses, beanToGeneratedName, injectionPointAnnotationsPredicate, Collections.emptyList());
}
/**
* Precompute the generated name for the given interceptor so that the {@link ComponentsProviderGenerator}
* can be executed before all interceptors metadata are generated.
*
* @param interceptor
*/
void precomputeGeneratedName(InterceptorInfo interceptor) {
String baseName;
String targetPackage;
if (interceptor.isSynthetic()) {
DotName creatorClassName = DotName.createSimple(interceptor.getCreatorClass());
baseName = InterceptFunction.class.getSimpleName() + "_" + interceptor.getIdentifier();
targetPackage = DotNames.packagePrefix(creatorClassName);
} else {
ClassInfo interceptorClass = interceptor.getTarget().get().asClass();
baseName = interceptorClass.name().withoutPackagePrefix();
targetPackage = DotNames.packagePrefix(interceptor.getProviderType().name());
}
beanToGeneratedBaseName.put(interceptor, baseName);
String generatedName = generatedNameFromTarget(targetPackage, baseName, BEAN_SUFFIX);
beanToGeneratedName.put(interceptor, generatedName);
}
/**
*
* @param interceptor bean
* @return a collection of resources
*/
Collection<Resource> generate(InterceptorInfo interceptor) {
DotName targetPackageClassName = interceptor.isSynthetic()
? DotName.createSimple(interceptor.getCreatorClass())
: interceptor.getProviderType().name();
DotName isApplicationClassName = interceptor.isSynthetic()
? targetPackageClassName
: interceptor.getBeanClass();
String baseName = beanToGeneratedBaseName.get(interceptor);
String targetPackage = DotNames.packagePrefix(targetPackageClassName);
String generatedName = beanToGeneratedName.get(interceptor);
if (existingClasses.contains(generatedName)) {
return Collections.emptyList();
}
boolean isApplicationClass = applicationClassPredicate.test(isApplicationClassName)
|| interceptor.isForceApplicationClass();
ResourceClassOutput classOutput = new ResourceClassOutput(isApplicationClass,
name -> name.equals(generatedName) ? SpecialType.INTERCEPTOR_BEAN : null, generateSources);
Gizmo gizmo = gizmo(classOutput);
generateInterceptor(gizmo, interceptor, generatedName, baseName, targetPackage, isApplicationClass);
return classOutput.getResources();
}
private void generateInterceptor(Gizmo gizmo, InterceptorInfo interceptor, String generatedName, String baseName,
String targetPackage, boolean isApplicationClass) {
gizmo.class_(generatedName, cc -> {
cc.implements_(InjectableInterceptor.class);
cc.implements_(Supplier.class);
FieldDesc beanTypesField = cc.field(FIELD_NAME_BEAN_TYPES, fc -> {
fc.private_();
fc.final_();
fc.setType(Set.class);
});
FieldDesc bindingsField = cc.field(FIELD_NAME_BINDINGS, fc -> {
fc.private_();
fc.final_();
fc.setType(Set.class);
});
Map<InjectionPointInfo, FieldDesc> injectionPointToProviderField = new HashMap<>();
generateProviderFields(interceptor, cc, injectionPointToProviderField, Map.of(), Map.of());
generateConstructor(cc, interceptor, beanTypesField, bindingsField, injectionPointToProviderField,
isApplicationClass, interceptor.isSynthetic() ? bc -> {
SyntheticComponentsUtil.addParamsFieldAndInit(cc, bc, interceptor.getParams(),
annotationLiterals, interceptor.getDeployment().getBeanArchiveIndex());
} : ignored -> {
});
generateCreate(cc, interceptor, new ProviderType(interceptor.getProviderType()), baseName,
injectionPointToProviderField, Map.of(), Map.of(), targetPackage, isApplicationClass);
generateSupplierGet(cc);
generateInjectableReferenceProviderGet(interceptor, cc, baseName);
generateGetIdentifier(cc, interceptor);
generateGetTypes(beanTypesField, cc);
// always `@Dependent` -- no need to `generateGetScope()`
// always default qualifiers -- no need to `generateGetQualifiers()`
// never an alternative -- no need to `generateIsAlternative()`
generateGetPriority(cc, interceptor);
// never any stereotypes -- no need to `generateGetStereotypes()`
generateGetBeanClass(cc, interceptor);
// never named -- no need to `generateGetName()`
// never default bean -- no need to `generateIsDefaultBean()`
// `InjectableInterceptor.getKind()` always returns `Kind.INTERCEPTOR` -- no need to `generateGetKind()`
// never suppressed -- no need to `generateIsSuppressed()`
generateGetInjectionPoints(cc, interceptor);
generateEquals(cc, interceptor);
generateHashCode(cc, interceptor);
generateToString(cc);
generateGetInterceptorBindings(cc, bindingsField);
generateIntercepts(cc, interceptor);
generateIntercept(cc, interceptor, isApplicationClass);
});
}
private void generateConstructor(ClassCreator cc, InterceptorInfo interceptor, FieldDesc beanTypesField,
FieldDesc bindingsField, Map<InjectionPointInfo, FieldDesc> injectionPointToProviderField,
boolean isApplicationClass, Consumer<BlockCreator> additionalCode) {
super.generateConstructor(cc, interceptor, beanTypesField, null, null, null, injectionPointToProviderField,
Map.of(), Map.of(), bc -> {
LocalVar bindings = bc.localVar("bindings", bc.new_(HashSet.class));
for (AnnotationInstance binding : interceptor.getBindings()) {
ClassInfo bindingClass = interceptor.getDeployment().getInterceptorBinding(binding.name());
bc.withSet(bindings).add(annotationLiterals.create(bc, bindingClass, binding));
}
bc.set(cc.this_().field(bindingsField), bindings);
// Initialize a list of BiFunction for each interception type if multiple interceptor methods are declared in a hierarchy
ClassDesc interceptorClass = classDescOf(interceptor.getProviderType());
generateInterceptorMethodsField(cc, bc, InterceptionType.AROUND_INVOKE,
interceptor.getAroundInvokes(), interceptorClass, isApplicationClass);
generateInterceptorMethodsField(cc, bc, InterceptionType.AROUND_CONSTRUCT,
interceptor.getAroundConstructs(), interceptorClass, isApplicationClass);
generateInterceptorMethodsField(cc, bc, InterceptionType.POST_CONSTRUCT,
interceptor.getPostConstructs(), interceptorClass, isApplicationClass);
generateInterceptorMethodsField(cc, bc, InterceptionType.PRE_DESTROY,
interceptor.getPreDestroys(), interceptorClass, isApplicationClass);
additionalCode.accept(bc);
});
}
private void generateInterceptorMethodsField(ClassCreator cc, BlockCreator bc, InterceptionType interceptionType,
List<MethodInfo> methods, ClassDesc interceptorClass, boolean isApplicationClass) {
if (methods.size() < 2) {
// if there's just one interceptor method, we'll generate a more streamlined code, see `generateIntercept()`
return;
}
FieldDesc fieldDesc = cc.field(interceptorMethodsField(interceptionType), fc -> {
fc.private_();
fc.final_();
fc.setType(List.class);
});
LocalVar list = bc.localVar(fieldDesc.name(), bc.new_(ArrayList.class));
for (MethodInfo method : methods) {
Expr bifunc = bc.lambda(BiFunction.class, lc -> {
ParamVar interceptor = lc.parameter("interceptor", 0);
ParamVar invocationContext = lc.parameter("invocationContext", 1);
lc.body(lbc -> {
Expr result = invokeInterceptorMethod(lbc, interceptorClass, method, interceptionType,
isApplicationClass, invocationContext, interceptor);
lbc.return_(interceptionType == InterceptionType.AROUND_INVOKE ? result : Const.ofNull(Object.class));
});
});
bc.withList(list).add(bifunc);
}
bc.set(cc.this_().field(fieldDesc), list);
}
/**
* @see InjectableBean#getBeanClass()
*/
protected void generateGetBeanClass(ClassCreator cc, InterceptorInfo interceptor) {
cc.method("getBeanClass", mc -> {
mc.returning(Class.class);
mc.body(bc -> {
bc.return_(interceptor.isSynthetic()
? Const.of(interceptor.getCreatorClass())
: Const.of(classDescOf(interceptor.getBeanClass())));
});
});
}
/**
* @see InjectableInterceptor#getInterceptorBindings()
*/
protected void generateGetInterceptorBindings(ClassCreator cc, FieldDesc bindingsField) {
cc.method("getInterceptorBindings", mc -> {
mc.returning(Set.class);
mc.body(bc -> {
bc.return_(cc.this_().field(bindingsField));
});
});
}
/**
* @see InjectableInterceptor#intercepts(jakarta.enterprise.inject.spi.InterceptionType)
*/
protected void generateIntercepts(ClassCreator cc, InterceptorInfo interceptor) {
cc.method("intercepts", mc -> {
mc.returning(boolean.class);
ParamVar interceptionType = mc.parameter("interceptionType", InterceptionType.class);
mc.body(bc -> {
if (interceptor.isSynthetic()) {
FieldVar enumValue = Expr.staticField(FieldDesc.of(InterceptionType.class,
interceptor.getInterceptionType().name()));
bc.return_(bc.eq(enumValue, interceptionType));
} else {
generateIntercepts(interceptor, InterceptionType.AROUND_INVOKE, bc, interceptionType);
generateIntercepts(interceptor, InterceptionType.POST_CONSTRUCT, bc, interceptionType);
generateIntercepts(interceptor, InterceptionType.PRE_DESTROY, bc, interceptionType);
generateIntercepts(interceptor, InterceptionType.AROUND_CONSTRUCT, bc, interceptionType);
bc.returnFalse();
}
});
});
}
private void generateIntercepts(InterceptorInfo interceptor, InterceptionType interceptionType, BlockCreator bc,
ParamVar interceptionTypeParam) {
if (interceptor.intercepts(interceptionType)) {
FieldVar enumValue = Expr.staticField(FieldDesc.of(InterceptionType.class, interceptionType.name()));
bc.if_(bc.eq(enumValue, interceptionTypeParam), BlockCreator::returnTrue);
}
}
/**
* @see InjectableInterceptor#intercept(InterceptionType, Object, jakarta.interceptor.InvocationContext)
*/
protected void generateIntercept(ClassCreator cc, InterceptorInfo interceptor, boolean isApplicationClass) {
cc.method("intercept", mc -> {
mc.returning(Object.class);
ParamVar interceptionType = mc.parameter("interceptionType", InterceptionType.class);
ParamVar interceptorInstance = mc.parameter("interceptorInstance", Object.class);
ParamVar invocationContext = mc.parameter("invocationContext", InvocationContext.class);
mc.body(b0 -> {
if (interceptor.isSynthetic()) {
b0.if_(b0.eq(Const.of(interceptor.getInterceptionType()), interceptionType), b1 -> {
Expr interceptFunction = b1.cast(interceptorInstance, InterceptFunction.class);
b1.return_(b1.invokeInterface(MethodDescs.INTERCEPT_FUNCTION_INTERCEPT, interceptFunction,
invocationContext));
});
} else {
ClassDesc interceptorClass = classDescOf(interceptor.getProviderType());
generateIntercept(cc, b0, interceptor.getAroundInvokes(), InterceptionType.AROUND_INVOKE,
interceptorClass, isApplicationClass, interceptionType, interceptorInstance, invocationContext);
generateIntercept(cc, b0, interceptor.getPostConstructs(), InterceptionType.POST_CONSTRUCT,
interceptorClass, isApplicationClass, interceptionType, interceptorInstance, invocationContext);
generateIntercept(cc, b0, interceptor.getPreDestroys(), InterceptionType.PRE_DESTROY,
interceptorClass, isApplicationClass, interceptionType, interceptorInstance, invocationContext);
generateIntercept(cc, b0, interceptor.getAroundConstructs(), InterceptionType.AROUND_CONSTRUCT,
interceptorClass, isApplicationClass, interceptionType, interceptorInstance, invocationContext);
}
b0.return_(Const.ofNull(Object.class));
});
});
}
private void generateIntercept(ClassCreator cc, BlockCreator b0, List<MethodInfo> interceptorMethods,
InterceptionType interceptionType, ClassDesc interceptorClass, boolean isApplicationClass,
ParamVar interceptionTypeParam, ParamVar interceptorInstanceParam, ParamVar invocationContextParam) {
if (interceptorMethods.isEmpty()) {
return;
}
b0.if_(b0.eq(Const.of(interceptionType), interceptionTypeParam), b1 -> {
Expr result;
if (interceptorMethods.size() == 1) {
MethodInfo interceptorMethod = interceptorMethods.get(0);
result = invokeInterceptorMethod(b1, interceptorClass, interceptorMethod,
interceptionType, isApplicationClass, invocationContextParam, interceptorInstanceParam);
} else {
// Multiple interceptor methods found in the hierarchy
Expr list = cc.this_().field(FieldDesc.of(cc.type(), interceptorMethodsField(interceptionType), List.class));
Expr params;
if (interceptionType == InterceptionType.AROUND_INVOKE) {
params = b1.invokeInterface(MethodDescs.INVOCATION_CONTEXT_GET_PARAMETERS, invocationContextParam);
} else {
params = Const.ofNull(Object[].class);
}
result = b1.invokeStatic(MethodDescs.INVOCATION_CONTEXTS_PERFORM_SUPERCLASS,
invocationContextParam, list, interceptorInstanceParam, params);
}
b1.return_(InterceptionType.AROUND_INVOKE == interceptionType ? result : Const.ofNull(Object.class));
});
}
private String interceptorMethodsField(InterceptionType interceptionType) {
return switch (interceptionType) {
case AROUND_INVOKE -> "aroundInvokes";
case AROUND_CONSTRUCT -> "aroundConstructs";
case POST_CONSTRUCT -> "postConstructs";
case PRE_DESTROY -> "preDestroys";
default -> throw new IllegalArgumentException("Unsupported interception type: " + interceptionType);
};
}
private Expr invokeInterceptorMethod(BlockCreator bc, ClassDesc interceptorClass, MethodInfo interceptorMethod,
InterceptionType interceptionType, boolean isApplicationClass, ParamVar invocationContext,
ParamVar interceptorInstance) {
Class<?> resultType;
if (InterceptionType.AROUND_INVOKE.equals(interceptionType)) {
resultType = Object.class;
} else {
// @PostConstruct, @PreDestroy, @AroundConstruct
resultType = interceptorMethod.returnType().kind().equals(Type.Kind.VOID) ? void.class : Object.class;
}
// Check if interceptor method uses InvocationContext or ArcInvocationContext
Class<?> invocationContextClass;
if (interceptorMethod.parameterType(0).name().equals(DotNames.INVOCATION_CONTEXT)) {
invocationContextClass = InvocationContext.class;
} else {
invocationContextClass = ArcInvocationContext.class;
}
if (Modifier.isPrivate(interceptorMethod.flags())) {
privateMembers.add(isApplicationClass, String.format("Interceptor method %s#%s()",
interceptorMethod.declaringClass().name(), interceptorMethod.name()));
reflectionRegistration.registerMethod(interceptorMethod);
Expr paramTypes = bc.newArray(Class.class, Const.of(invocationContextClass));
Expr args = bc.newArray(Object.class, invocationContext);
return bc.invokeStatic(MethodDescs.REFLECTIONS_INVOKE_METHOD,
Const.of(classDescOf(interceptorMethod.declaringClass())),
Const.of(interceptorMethod.name()), paramTypes, interceptorInstance, args);
} else {
return bc.invokeVirtual(ClassMethodDesc.of(interceptorClass, interceptorMethod.name(),
resultType, invocationContextClass), interceptorInstance, invocationContext);
}
}
}
|
InterceptorGenerator
|
java
|
apache__camel
|
components/camel-dhis2/camel-dhis2-component/src/test/java/org/apache/camel/component/dhis2/Environment.java
|
{
"start": 1612,
"end": 5428
}
|
class ____ {
public static final Dhis2Client DHIS2_CLIENT;
public static final String PERSONAL_ACCESS_TOKEN;
public static final String ORG_UNIT_ID_UNDER_TEST;
private static final Network NETWORK = Network.newNetwork();
private static final PostgreSQLContainer<?> POSTGRESQL_CONTAINER;
private static final GenericContainer<?> DHIS2_CONTAINER;
private Environment() {
}
static {
POSTGRESQL_CONTAINER = new PostgreSQLContainer<>(
DockerImageName.parse("postgis/postgis:12-3.2-alpine").asCompatibleSubstituteFor("postgres"))
.withDatabaseName("dhis2")
.withNetworkAliases("db")
.withUsername("dhis")
.withPassword("dhis").withNetwork(NETWORK);
POSTGRESQL_CONTAINER.start();
DHIS2_CONTAINER = new GenericContainer<>(
"dhis2/core:2.40.2.1")
.dependsOn(POSTGRESQL_CONTAINER)
.withClasspathResourceMapping("dhis.conf", "/opt/dhis2/dhis.conf", BindMode.READ_WRITE)
.withNetwork(NETWORK).withExposedPorts(8080)
.waitingFor(
new HttpWaitStrategy().forStatusCode(200).withStartupTimeout(Duration.ofSeconds(360)))
.withEnv("WAIT_FOR_DB_CONTAINER", "db" + ":" + 5432 + " -t 0");
DHIS2_CONTAINER.start();
DHIS2_CLIENT = Dhis2ClientBuilder.newClient(
"http://" + Environment.getDhis2Container().getHost() + ":" + Environment.getDhis2Container()
.getFirstMappedPort() + "/api",
"admin", "district").build();
createOrgUnit("EvilCorp");
ORG_UNIT_ID_UNDER_TEST = createOrgUnit("Acme");
createOrgUnitLevel();
addOrgUnitToUser(ORG_UNIT_ID_UNDER_TEST);
PERSONAL_ACCESS_TOKEN = createPersonalAccessToken();
}
private static String createPersonalAccessToken() {
return DHIS2_CLIENT
.post("apiToken")
.withResource(
new ApiToken()
.withAttributes(
List.of(
Map.of(
"type",
"MethodAllowedList",
"allowedMethods",
List.of("GET", "POST", "PUT", "PATCH", "DELETE"))))
.withExpire(Long.MAX_VALUE))
.transfer()
.returnAs(WebMessage.class)
.getResponse()
.get().get("key");
}
private static String createOrgUnit(String name) {
OrganisationUnit organisationUnit = new OrganisationUnit().withName(name).withShortName(name)
.withOpeningDate(new Date());
return DHIS2_CLIENT.post("organisationUnits").withResource(organisationUnit)
.transfer()
.returnAs(WebMessage.class).getResponse().get().get("uid");
}
private static void createOrgUnitLevel() {
OrganisationUnitLevel organisationUnitLevel = new OrganisationUnitLevel().withName("Level 1")
.with("level", 1);
DHIS2_CLIENT.post("filledOrganisationUnitLevels").withResource(organisationUnitLevel).transfer();
}
private static void addOrgUnitToUser(String orgUnitId) {
DHIS2_CLIENT.post("users/M5zQapPyTZI/organisationUnits/{organisationUnitId}", orgUnitId).transfer();
}
public static GenericContainer<?> getDhis2Container() {
return DHIS2_CONTAINER;
}
public static Dhis2Client getDhis2Client() {
return DHIS2_CLIENT;
}
}
|
Environment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.