language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__camel
|
dsl/camel-kamelet-main/src/main/java/org/apache/camel/main/download/DependencyDownloaderPeriodTaskResolver.java
|
{
"start": 1163,
"end": 4687
}
|
class ____ extends DefaultPeriodTaskResolver {
private final DependencyDownloader downloader;
private final CamelContext camelContext;
private final String camelVersion;
private final boolean export;
public DependencyDownloaderPeriodTaskResolver(FactoryFinder finder, CamelContext camelContext, String camelVersion,
boolean export) {
super(finder);
this.camelContext = camelContext;
this.camelVersion = camelVersion;
this.downloader = camelContext.hasService(DependencyDownloader.class);
this.export = export;
}
@Override
public Optional<Object> newInstance(String key) {
maybeDownload(key);
if (export && skip(key)) {
return Optional.empty();
}
Optional<Object> answer = super.newInstance(key);
if (answer.isEmpty()) {
// need to use regular factory finder as bootstrap has already marked as a miss
final FactoryFinder finder
= camelContext.getCamelContextExtension().getFactoryFinder(PeriodTaskResolver.RESOURCE_PATH);
Object obj = ResolverHelper.resolveService(camelContext, finder, key, Object.class).orElse(null);
return Optional.ofNullable(obj);
}
return answer;
}
@Override
public <T> Optional<T> newInstance(String key, Class<T> type) {
maybeDownload(key);
if (export && skip(key)) {
return Optional.empty();
}
Optional<T> answer = super.newInstance(key, type);
if (answer.isEmpty()) {
// need to use regular factory finder as bootstrap has already marked as a miss
final FactoryFinder finder
= camelContext.getCamelContextExtension().getFactoryFinder(PeriodTaskResolver.RESOURCE_PATH);
T obj = ResolverHelper.resolveService(camelContext, finder, key, type).orElse(null);
return Optional.ofNullable(obj);
}
return answer;
}
private void maybeDownload(String key) {
if ("aws-secret-refresh".equals(key)) {
downloadLoader("camel-aws-secrets-manager");
} else if ("gcp-secret-refresh".equals(key)) {
downloadLoader("camel-google-secret-manager");
} else if ("azure-secret-refresh".equals(key)) {
downloadLoader("camel-azure-key-vault");
} else if ("kubernetes-secret-refresh".equals(key)) {
downloadLoader("camel-kubernetes");
} else if ("kubernetes-configmaps-refresh".equals(key)) {
downloadLoader("camel-kubernetes");
}
}
private void downloadLoader(String artifactId) {
String resolvedCamelVersion = camelContext.getVersion();
if (ObjectHelper.isEmpty(resolvedCamelVersion)) {
resolvedCamelVersion = camelVersion;
}
if (!downloader.alreadyOnClasspath("org.apache.camel", artifactId, resolvedCamelVersion)) {
downloader.downloadDependency("org.apache.camel", artifactId, resolvedCamelVersion);
}
}
private boolean skip(String key) {
// skip all vault refresh during export as they will attempt to connect to remote system
return "aws-secret-refresh".equals(key) || "gcp-secret-refresh".equals(key) || "azure-secret-refresh".equals(key)
|| "kubernetes-secret-refresh".equals(key) || "kubernetes-configmaps-refresh".equals(key);
}
}
|
DependencyDownloaderPeriodTaskResolver
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/buildextension/beans/SyntheticBeanWithStereotypeTest.java
|
{
"start": 2597,
"end": 2732
}
|
interface ____ {
}
@Target({ TYPE, METHOD, FIELD, PARAMETER })
@Retention(RUNTIME)
@InterceptorBinding
@
|
ToBeStereotype
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_1997/CarDetail.java
|
{
"start": 590,
"end": 847
}
|
class ____ {
private String model;
public Builder model(String model) {
this.model = model;
return this;
}
public CarDetail build() {
return new CarDetail( this );
}
}
}
|
Builder
|
java
|
processing__processing4
|
java/src/processing/mode/java/JavaProblem.java
|
{
"start": 924,
"end": 1059
}
|
class ____ IProblem that stores the tabIndex and line number
* according to its tab, including the original IProblem object
*/
public
|
for
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/header/writers/CrossOriginResourcePolicyHeaderWriter.java
|
{
"start": 1186,
"end": 2013
}
|
class ____ implements HeaderWriter {
private static final String RESOURCE_POLICY = "Cross-Origin-Resource-Policy";
private @Nullable CrossOriginResourcePolicy policy;
/**
* Sets the {@link CrossOriginResourcePolicy} value to be used in the
* {@code Cross-Origin-Resource-Policy} header
* @param resourcePolicy the {@link CrossOriginResourcePolicy} to use
*/
public void setPolicy(CrossOriginResourcePolicy resourcePolicy) {
Assert.notNull(resourcePolicy, "resourcePolicy cannot be null");
this.policy = resourcePolicy;
}
@Override
public void writeHeaders(HttpServletRequest request, HttpServletResponse response) {
if (this.policy != null && !response.containsHeader(RESOURCE_POLICY)) {
response.addHeader(RESOURCE_POLICY, this.policy.getPolicy());
}
}
public
|
CrossOriginResourcePolicyHeaderWriter
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/aggregator/AggregationStrategyBeanAdapterAllowNullOldExchangeTest.java
|
{
"start": 2079,
"end": 2417
}
|
class ____ {
public String append(String existing, String next) {
if (existing == null) {
return "OldWasNull" + next;
}
if (next != null) {
return existing + next;
} else {
return existing;
}
}
}
}
|
MyBodyAppender
|
java
|
apache__camel
|
test-infra/camel-test-infra-rocketmq/src/test/java/org/apache/camel/test/infra/rocketmq/services/RocketMQService.java
|
{
"start": 1047,
"end": 1141
}
|
interface ____ extends TestService, RocketMQInfraService, ContainerTestService {
}
|
RocketMQService
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/checkpointing/utils/SnapshotMigrationTestBase.java
|
{
"start": 3331,
"end": 3642
}
|
enum ____ {
/** Create binary snapshot(s), i.e. run the checkpointing functions. */
CREATE_SNAPSHOT,
/** Verify snapshot(s), i.e, restore snapshot and check execution result. */
VERIFY_SNAPSHOT
}
/** Types of snapshot supported by this base test. */
public
|
ExecutionMode
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/security/AnnotationBasedAuthMechanismSelectionTest.java
|
{
"start": 17626,
"end": 17906
}
|
class
____ "custom-inherited";
}
@Path("default-impl-custom-class-level-interface")
@GET
@Override
public String defaultImplementedClassLevelInterfaceMethod() {
// here we repeated Path annotation, therefore this
|
return
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/completable/CompletableSubscribeOn.java
|
{
"start": 1478,
"end": 2717
}
|
class ____
extends AtomicReference<Disposable>
implements CompletableObserver, Disposable, Runnable {
private static final long serialVersionUID = 7000911171163930287L;
final CompletableObserver downstream;
final SequentialDisposable task;
final CompletableSource source;
SubscribeOnObserver(CompletableObserver actual, CompletableSource source) {
this.downstream = actual;
this.source = source;
this.task = new SequentialDisposable();
}
@Override
public void run() {
source.subscribe(this);
}
@Override
public void onSubscribe(Disposable d) {
DisposableHelper.setOnce(this, d);
}
@Override
public void onError(Throwable e) {
downstream.onError(e);
}
@Override
public void onComplete() {
downstream.onComplete();
}
@Override
public void dispose() {
DisposableHelper.dispose(this);
task.dispose();
}
@Override
public boolean isDisposed() {
return DisposableHelper.isDisposed(get());
}
}
}
|
SubscribeOnObserver
|
java
|
elastic__elasticsearch
|
x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Email.java
|
{
"start": 9466,
"end": 13443
}
|
class ____ {
private String id;
private Address from;
private AddressList replyTo;
private Priority priority;
private ZonedDateTime sentDate;
private AddressList to;
private AddressList cc;
private AddressList bcc;
private String subject;
private String textBody;
private String htmlBody;
private Map<String, Attachment> attachments = new HashMap<>();
private Builder() {}
public Builder copyFrom(Email email) {
id = email.id;
from = email.from;
replyTo = email.replyTo;
priority = email.priority;
sentDate = email.sentDate;
to = email.to;
cc = email.cc;
bcc = email.bcc;
subject = email.subject;
textBody = email.textBody;
htmlBody = email.htmlBody;
attachments.putAll(email.attachments);
return this;
}
public Builder id(String id) {
this.id = id;
return this;
}
public Builder from(String address) throws AddressException {
return from(new Address(address));
}
public Builder from(Address from) {
this.from = from;
return this;
}
public Builder replyTo(AddressList replyTo) {
this.replyTo = replyTo;
return this;
}
public Builder replyTo(String addresses) throws AddressException {
return replyTo(Email.AddressList.parse(addresses));
}
public Builder priority(Priority priority) {
this.priority = priority;
return this;
}
public Builder sentDate(ZonedDateTime sentDate) {
this.sentDate = sentDate;
return this;
}
public Builder to(String addresses) throws AddressException {
return to(AddressList.parse(addresses));
}
public Builder to(AddressList to) {
this.to = to;
return this;
}
public AddressList to() {
return to;
}
public Builder cc(String addresses) throws AddressException {
return cc(AddressList.parse(addresses));
}
public Builder cc(AddressList cc) {
this.cc = cc;
return this;
}
public Builder bcc(String addresses) throws AddressException {
return bcc(AddressList.parse(addresses));
}
public Builder bcc(AddressList bcc) {
this.bcc = bcc;
return this;
}
public Builder subject(String subject) {
this.subject = subject;
return this;
}
public Builder textBody(String text) {
this.textBody = text;
return this;
}
public Builder htmlBody(String html) {
this.htmlBody = html;
return this;
}
public Builder attach(Attachment attachment) {
if (attachments == null) {
throw new IllegalStateException("Email has already been built!");
}
attachments.put(attachment.id(), attachment);
return this;
}
/**
* Build the email. Note that adding items to attachments or inlines
* after this is called is incorrect.
*/
public Email build() {
assert id != null : "email id should not be null";
Email email = new Email(
id,
from,
replyTo,
priority,
sentDate,
to,
cc,
bcc,
subject,
textBody,
htmlBody,
unmodifiableMap(attachments)
);
attachments = null;
return email;
}
}
public
|
Builder
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/GeoSearch.java
|
{
"start": 284,
"end": 2097
}
|
class ____ {
// TODO: Should be V
/**
* Create a {@link GeoRef} from a Geo set {@code member}.
*
* @param member the Geo set member to use as search reference starting point.
* @return the {@link GeoRef}.
*/
public static <K> GeoRef<K> fromMember(K member) {
LettuceAssert.notNull(member, "Reference member must not be null");
return new FromMember<>(member);
}
/**
* Create a {@link GeoRef} from WGS84 coordinates {@code longitude} and {@code latitude}.
*
* @param longitude the longitude coordinate according to WGS84.
* @param latitude the latitude coordinate according to WGS84.
* @return the {@link GeoRef}.
*/
public static <K> GeoRef<K> fromCoordinates(double longitude, double latitude) {
return (GeoRef<K>) new FromCoordinates(longitude, latitude);
}
/**
* Create a {@link GeoPredicate} by specifying a radius {@code distance} and {@link GeoArgs.Unit}.
*
* @param distance the radius.
* @param unit size unit.
* @return the {@link GeoPredicate} for the specified radius.
*/
public static GeoPredicate byRadius(double distance, GeoArgs.Unit unit) {
return new Radius(distance, unit);
}
/**
* Create a {@link GeoPredicate} by specifying a box of the size {@code width}, {@code height} and {@link GeoArgs.Unit}.
*
* @param width box width.
* @param height box height.
* @param unit size unit.
* @return the {@link GeoPredicate} for the specified box.
*/
public static GeoPredicate byBox(double width, double height, GeoArgs.Unit unit) {
return new Box(width, height, unit);
}
/**
* Geo reference specifying a search starting point.
*
* @param <K>
*/
public
|
GeoSearch
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ser/SerializationAnnotationsTest.java
|
{
"start": 1601,
"end": 1688
}
|
class ____.
*/
@JsonSerialize(using=BogusSerializer.class)
final static
|
itself
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/pattern/AbstractStyleNameConverter.java
|
{
"start": 10152,
"end": 11039
}
|
class ____ extends AbstractStyleNameConverter {
/** White */
protected static final String NAME = "white";
/**
* Constructs the converter. This constructor must be public.
*
* @param formatters The PatternFormatters to generate the text to manipulate.
* @param styling The styling that should encapsulate the pattern.
*/
public White(final List<PatternFormatter> formatters, final String styling) {
super(NAME, formatters, styling);
}
/**
* Gets an instance of the class (called via reflection).
*
* @param config The current Configuration.
* @param options The pattern options, may be null. If the first element is "short", only the first line of the
* throwable will be formatted.
* @return new instance of
|
White
|
java
|
netty__netty
|
example/src/main/java/io/netty/example/objectecho/ObjectEchoServerHandler.java
|
{
"start": 889,
"end": 1390
}
|
class ____ extends ChannelInboundHandlerAdapter {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
// Echo back the received object to the client.
ctx.write(msg);
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
ctx.flush();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
ctx.close();
}
}
|
ObjectEchoServerHandler
|
java
|
spring-projects__spring-framework
|
spring-jms/src/main/java/org/springframework/jms/listener/endpoint/DefaultJmsActivationSpecFactory.java
|
{
"start": 5248,
"end": 7443
}
|
class ____ - " +
"specify the 'activationSpecClass' property or override the 'determineActivationSpecClass' method");
}
/**
* This implementation supports Spring's extended "maxConcurrency"
* and "prefetchSize" settings through detecting corresponding
* ActivationSpec properties: "maxSessions"/"maxNumberOfWorks" and
* "maxMessagesPerSessions"/"maxMessages", respectively
* (following ActiveMQ's and JORAM's naming conventions).
*/
@Override
protected void populateActivationSpecProperties(BeanWrapper bw, JmsActivationSpecConfig config) {
super.populateActivationSpecProperties(bw, config);
if (config.getMaxConcurrency() > 0) {
if (bw.isWritableProperty("maxSessions")) {
// ActiveMQ
bw.setPropertyValue("maxSessions", Integer.toString(config.getMaxConcurrency()));
}
else if (bw.isWritableProperty("maxNumberOfWorks")) {
// JORAM
bw.setPropertyValue("maxNumberOfWorks", Integer.toString(config.getMaxConcurrency()));
}
else if (bw.isWritableProperty("maxConcurrency")){
// WebSphere
bw.setPropertyValue("maxConcurrency", Integer.toString(config.getMaxConcurrency()));
}
}
if (config.getPrefetchSize() > 0) {
if (bw.isWritableProperty("maxMessagesPerSessions")) {
// ActiveMQ
bw.setPropertyValue("maxMessagesPerSessions", Integer.toString(config.getPrefetchSize()));
}
else if (bw.isWritableProperty("maxMessages")) {
// JORAM
bw.setPropertyValue("maxMessages", Integer.toString(config.getPrefetchSize()));
}
else if (bw.isWritableProperty("maxBatchSize")){
// WebSphere
bw.setPropertyValue("maxBatchSize", Integer.toString(config.getPrefetchSize()));
}
}
}
/**
* This implementation maps {@code SESSION_TRANSACTED} onto an
* ActivationSpec property named "useRAManagedTransaction", if available
* (following ActiveMQ's naming conventions).
*/
@Override
protected void applyAcknowledgeMode(BeanWrapper bw, int ackMode) {
if (ackMode == Session.SESSION_TRANSACTED && bw.isWritableProperty("useRAManagedTransaction")) {
// ActiveMQ
bw.setPropertyValue("useRAManagedTransaction", "true");
}
else {
super.applyAcknowledgeMode(bw, ackMode);
}
}
}
|
defined
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-step-functions/src/test/java/org/apache/camel/component/aws2/stepfunctions/StepFunctions2ProducerHealthCheckStaticCredsTest.java
|
{
"start": 1462,
"end": 4023
}
|
class ____ extends CamelTestSupport {
CamelContext context;
@Override
protected CamelContext createCamelContext() throws Exception {
context = super.createCamelContext();
context.getPropertiesComponent().setLocation("ref:prop");
// install health check manually (yes a bit cumbersome)
HealthCheckRegistry registry = new DefaultHealthCheckRegistry();
registry.setCamelContext(context);
Object hc = registry.resolveById("context");
registry.register(hc);
hc = registry.resolveById("routes");
registry.register(hc);
hc = registry.resolveById("consumers");
registry.register(hc);
HealthCheckRepository hcr = (HealthCheckRepository) registry.resolveById("producers");
hcr.setEnabled(true);
registry.register(hcr);
context.getCamelContextExtension().addContextPlugin(HealthCheckRegistry.class, registry);
return context;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:listStateMachines")
.to("aws2-step-functions://test?operation=listStateMachines®ion=l&secretKey=l&accessKey=k");
}
};
}
@Test
public void testConnectivity() {
Collection<HealthCheck.Result> res = HealthCheckHelper.invokeLiveness(context);
boolean up = res.stream().allMatch(r -> r.getState().equals(HealthCheck.State.UP));
Assertions.assertTrue(up, "liveness check");
// health-check readiness should be down
await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> {
Collection<HealthCheck.Result> res2 = HealthCheckHelper.invokeReadiness(context);
boolean down = res2.stream().allMatch(r -> r.getState().equals(HealthCheck.State.DOWN));
boolean containsAws2SfnHealthCheck = res2.stream()
.anyMatch(result -> result.getCheck().getId().startsWith("producer:aws2-step-functions"));
boolean hasRegionMessage = res2.stream()
.anyMatch(r -> r.getMessage().stream().anyMatch(msg -> msg.contains("region")));
Assertions.assertTrue(down, "liveness check");
Assertions.assertTrue(containsAws2SfnHealthCheck, "aws2-step-functions check");
Assertions.assertTrue(hasRegionMessage, "aws2-step-functions check error message");
});
}
}
|
StepFunctions2ProducerHealthCheckStaticCredsTest
|
java
|
quarkusio__quarkus
|
extensions/hibernate-envers/deployment/src/test/java/io/quarkus/hibernate/orm/envers/config/EnversTestTrackEntitiesChangedInRevisionResource.java
|
{
"start": 299,
"end": 719
}
|
class ____ extends AbstractEnversResource {
@GET
public String getTrackEntitiesChangedInRevision() {
boolean trackEntityChangesInRevision = getConfiguration().isTrackEntitiesChanged();
if (!trackEntityChangesInRevision) {
return "Expected track_entities_changed_in_revision to be true but was false";
}
return "OK";
}
}
|
EnversTestTrackEntitiesChangedInRevisionResource
|
java
|
google__dagger
|
javatests/dagger/android/support/functional/UsesGeneratedModulesApplication.java
|
{
"start": 5456,
"end": 5648
}
|
class ____ {
@Provides
@IntoSet
static Class<?> addDummyValueToComponentHierarchy() {
return DummyBroadcastReceiverSubcomponent.class;
}
}
}
|
AddToHierarchy
|
java
|
google__auto
|
value/src/it/functional/src/test/java/com/google/auto/value/AutoBuilderTest.java
|
{
"start": 20260,
"end": 21317
}
|
interface ____<T extends Number> {
NumberHolderBuilder<T> setNumber(T number);
NumberHolder<T> build();
}
static <T extends Number> NumberHolderBuilder<T> numberHolderBuilder() {
return new AutoBuilder_AutoBuilderTest_NumberHolderBuilder<>();
}
static <T extends Number> NumberHolderBuilder<T> numberHolderBuilder(
NumberHolder<T> numberHolder) {
return new AutoBuilder_AutoBuilderTest_NumberHolderBuilder<>(numberHolder);
}
@Test
public void builderFromInstance() {
NumberHolder<Integer> instance1 =
AutoBuilderTest.<Integer>numberHolderBuilder().setNumber(23).build();
assertThat(instance1.getNumber()).isEqualTo(23);
NumberHolder<Integer> instance2 = numberHolderBuilder(instance1).build();
assertThat(instance2.getNumber()).isEqualTo(23);
NumberHolder<Integer> instance3 = numberHolderBuilder(instance2).setNumber(17).build();
assertThat(instance3.getNumber()).isEqualTo(17);
}
@AutoBuilder(callMethod = "of", ofClass = Simple.class)
@MyAnnotation("thing")
|
NumberHolderBuilder
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/customproviders/NoTargetTest.java
|
{
"start": 989,
"end": 1466
}
|
class ____ {
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(HelloResource.class, ThrowingPreMatchFilter.class, DummyExceptionMapper.class);
}
});
@Path("hello")
public static
|
NoTargetTest
|
java
|
square__okhttp
|
samples/guide/src/main/java/okhttp3/recipes/PostFile.java
|
{
"start": 822,
"end": 1576
}
|
class ____ {
public static final MediaType MEDIA_TYPE_MARKDOWN
= MediaType.get("text/x-markdown; charset=utf-8");
private final OkHttpClient client = new OkHttpClient();
public void run() throws Exception {
File file = new File("README.md");
Request request = new Request.Builder()
.url("https://api.github.com/markdown/raw")
.post(RequestBody.create(file, MEDIA_TYPE_MARKDOWN))
.build();
try (Response response = client.newCall(request).execute()) {
if (!response.isSuccessful()) throw new IOException("Unexpected code " + response);
System.out.println(response.body().string());
}
}
public static void main(String... args) throws Exception {
new PostFile().run();
}
}
|
PostFile
|
java
|
junit-team__junit5
|
junit-platform-console/src/main/java/org/junit/platform/console/command/CommandResult.java
|
{
"start": 589,
"end": 1150
}
|
class ____<T> {
public static <T> CommandResult<T> success() {
return create(ExitCode.SUCCESS, null);
}
public static <T> CommandResult<T> create(int exitCode, @Nullable T value) {
return new CommandResult<>(exitCode, value);
}
private final int exitCode;
private final @Nullable T value;
private CommandResult(int exitCode, @Nullable T value) {
this.exitCode = exitCode;
this.value = value;
}
public int getExitCode() {
return this.exitCode;
}
public Optional<T> getValue() {
return Optional.ofNullable(this.value);
}
}
|
CommandResult
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/JteComponentBuilderFactory.java
|
{
"start": 5950,
"end": 7816
}
|
class ____ directly without
* first compiling it.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param preCompile the value to set
* @return the dsl builder
*/
default JteComponentBuilder preCompile(boolean preCompile) {
doSetProperty("preCompile", preCompile);
return this;
}
/**
* Work directory where JTE will store compiled templates.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: jte-classes
* Group: producer
*
* @param workDir the value to set
* @return the dsl builder
*/
default JteComponentBuilder workDir(java.lang.String workDir) {
doSetProperty("workDir", workDir);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default JteComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
|
file
|
java
|
junit-team__junit5
|
junit-platform-commons/src/main/java/org/junit/platform/commons/util/ReflectionUtils.java
|
{
"start": 27793,
"end": 28460
}
|
class ____ which the method should be referenced; never {@code null}
* @param method the method; never {@code null}
* @return fully qualified method name; never {@code null}
* @since 1.4
* @see #getFullyQualifiedMethodName(Class, String, Class...)
*/
public static String getFullyQualifiedMethodName(Class<?> clazz, Method method) {
Preconditions.notNull(method, "Method must not be null");
return getFullyQualifiedMethodName(clazz, method.getName(), method.getParameterTypes());
}
/**
* Build the <em>fully qualified method name</em> for the method described by the
* supplied class, method name, and parameter types.
*
* <p>Note that the
|
from
|
java
|
apache__camel
|
components/camel-kafka/src/test/java/org/apache/camel/component/kafka/integration/health/KafkaConsumerBadPortSupervisingHealthCheckIT.java
|
{
"start": 2511,
"end": 6798
}
|
class ____ extends KafkaHealthCheckTestSupport {
public static final String TOPIC = "test-health";
private static final Logger LOG = LoggerFactory.getLogger(KafkaConsumerBadPortSupervisingHealthCheckIT.class);
@ContextFixture
@Override
public void configureContext(CamelContext context) {
context.getPropertiesComponent().setLocation("ref:prop");
context.setRouteController(new DefaultSupervisingRouteController());
SupervisingRouteController src = context.getRouteController().supervising();
src.setBackOffDelay(3);
src.setBackOffMaxAttempts(3);
src.setInitialDelay(3);
KafkaComponent kafka = new KafkaComponent(context);
kafka.init();
kafka.getConfiguration().setBrokers(service.getBootstrapServers() + 123);
// turn of pre validation so we startup and let supervising route controller handle this
// and we can see failure in health checks
kafka.getConfiguration().setPreValidateHostAndPort(false);
context.addComponent("kafka", kafka);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
String uri = "kafka:" + TOPIC
+ "?groupId=KafkaConsumerBadPortSupervisingHealthCheckIT&autoOffsetReset=earliest&keyDeserializer=org.apache.kafka.common.serialization.StringDeserializer&"
+ "valueDeserializer=org.apache.kafka.common.serialization.StringDeserializer"
+ "&autoCommitIntervalMs=1000&autoCommitEnable=true&interceptorClasses=org.apache.camel.component.kafka.MockConsumerInterceptor";
from(uri)
.process(exchange -> LOG.trace("Captured on the processor: {}", exchange.getMessage().getBody()))
.routeId("test-health-it").to(KafkaTestUtil.MOCK_RESULT);
}
};
}
@Order(1)
@Test
@DisplayName("Tests that liveness reports UP when it's actually up")
public void testReportUpWhenIsUp() {
// health-check liveness should be UP
CamelContext context = contextExtension.getContext();
Collection<HealthCheck.Result> res = HealthCheckHelper.invokeLiveness(context);
boolean up = res.stream().allMatch(r -> r.getState().equals(HealthCheck.State.UP));
Assertions.assertTrue(up, "liveness check");
}
@Order(2)
@Test
@DisplayName("Tests that readiness reports down when it's actually down")
public void testReportCorrectlyWhenDown() {
CamelContext context = contextExtension.getContext();
// health-check readiness should be down
await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> readinessCheck(context));
}
private static void readinessCheck(CamelContext context) {
Collection<HealthCheck.Result> res2 = HealthCheckHelper.invokeReadiness(context);
boolean up2 = res2.stream().allMatch(r -> {
return r.getState().equals(HealthCheck.State.DOWN) &&
r.getMessage().stream().allMatch(msg -> msg.contains("port"));
});
Assertions.assertTrue(up2, "readiness check");
}
@Order(3)
@Test
@DisplayName("I/O test to ensure everything is working as expected")
public void kafkaConsumerHealthCheck() throws InterruptedException {
String propagatedHeaderKey = "PropagatedCustomHeader";
byte[] propagatedHeaderValue = "propagated header value".getBytes();
MockEndpoint to = contextExtension.getMockEndpoint(KafkaTestUtil.MOCK_RESULT);
to.expectedMessageCount(0);
to.expectedMinimumMessageCount(0);
to.expectedNoHeaderReceived();
for (int k = 0; k < 5; k++) {
String msg = "message-" + k;
ProducerRecord<String, String> data = new ProducerRecord<>(TOPIC, "1", msg);
data.headers().add(new RecordHeader("CamelSkippedHeader", "skipped header value".getBytes()));
data.headers().add(new RecordHeader(propagatedHeaderKey, propagatedHeaderValue));
producer.send(data);
}
to.assertIsSatisfied(3000);
}
}
|
KafkaConsumerBadPortSupervisingHealthCheckIT
|
java
|
apache__camel
|
dsl/camel-kamelet-main/src/main/java/org/apache/camel/main/download/DependencyDownloaderClassLoader.java
|
{
"start": 1089,
"end": 1737
}
|
class ____ extends URLClassLoader {
private static final URL[] EMPTY_URL_ARRAY = new URL[0];
public DependencyDownloaderClassLoader(ClassLoader parent) {
super(EMPTY_URL_ARRAY, parent);
}
public void addFile(File file) {
try {
super.addURL(file.toURI().toURL());
} catch (MalformedURLException e) {
throw new DownloadException("Error adding JAR to classloader: " + file, e);
}
}
public List<String> getDownloaded() {
return Arrays.stream(getURLs()).map(u -> FileUtil.stripPath(u.getFile())).collect(Collectors.toList());
}
}
|
DependencyDownloaderClassLoader
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ClassCanBeStaticTest.java
|
{
"start": 6464,
"end": 6521
}
|
class ____ {
int x;
private
|
Test
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddresses.java
|
{
"start": 1243,
"end": 4355
}
|
class ____ {
private final boolean usingBootstrapControllers;
private final List<InetSocketAddress> addresses;
AdminBootstrapAddresses(
boolean usingBootstrapControllers,
List<InetSocketAddress> addresses
) {
this.usingBootstrapControllers = usingBootstrapControllers;
this.addresses = addresses;
}
public boolean usingBootstrapControllers() {
return usingBootstrapControllers;
}
public List<InetSocketAddress> addresses() {
return addresses;
}
public static AdminBootstrapAddresses fromConfig(AbstractConfig config) {
List<String> bootstrapServers = config.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG);
if (bootstrapServers == null) {
bootstrapServers = Collections.emptyList();
}
List<String> controllerServers = config.getList(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG);
if (controllerServers == null) {
controllerServers = Collections.emptyList();
}
String clientDnsLookupConfig = config.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG);
if (bootstrapServers.isEmpty()) {
if (controllerServers.isEmpty()) {
throw new ConfigException("You must set either " +
CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + " or " +
AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG);
} else {
return new AdminBootstrapAddresses(true,
ClientUtils.parseAndValidateAddresses(controllerServers, clientDnsLookupConfig));
}
} else {
if (controllerServers.isEmpty()) {
return new AdminBootstrapAddresses(false,
ClientUtils.parseAndValidateAddresses(bootstrapServers, clientDnsLookupConfig));
} else {
throw new ConfigException("You cannot set both " +
CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + " and " +
AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG);
}
}
}
@Override
public int hashCode() {
return Objects.hash(usingBootstrapControllers, addresses);
}
@Override
public boolean equals(Object o) {
if (o == null || (!o.getClass().equals(AdminBootstrapAddresses.class))) return false;
AdminBootstrapAddresses other = (AdminBootstrapAddresses) o;
return usingBootstrapControllers == other.usingBootstrapControllers &&
addresses.equals(other.addresses);
}
@Override
public String toString() {
StringBuilder bld = new StringBuilder();
bld.append("AdminBootstrapAddresses");
bld.append("(usingBootstrapControllers=").append(usingBootstrapControllers);
bld.append(", addresses=[");
String prefix = "";
for (InetSocketAddress address : addresses) {
bld.append(prefix).append(address);
prefix = ", ";
}
bld.append("])");
return bld.toString();
}
}
|
AdminBootstrapAddresses
|
java
|
netty__netty
|
codec-native-quic/src/test/java/io/netty/handler/codec/quic/QuicReadableTest.java
|
{
"start": 1223,
"end": 5746
}
|
class ____ extends AbstractQuicTest {
@ParameterizedTest
@MethodSource("newSslTaskExecutors")
public void testCorrectlyHandleReadableStreams(Executor executor) throws Throwable {
int numOfStreams = 256;
int readStreams = numOfStreams / 2;
// We do write longs.
int expectedDataRead = readStreams * Long.BYTES;
final CountDownLatch latch = new CountDownLatch(numOfStreams);
final AtomicInteger bytesRead = new AtomicInteger();
final AtomicReference<Throwable> serverErrorRef = new AtomicReference<>();
final AtomicReference<Throwable> clientErrorRef = new AtomicReference<>();
QuicChannelValidationHandler serverHandler = new QuicChannelValidationHandler();
Channel server = QuicTestUtils.newServer(
QuicTestUtils.newQuicServerBuilder(executor).initialMaxStreamsBidirectional(5000),
InsecureQuicTokenHandler.INSTANCE,
serverHandler, new ChannelInboundHandlerAdapter() {
private int counter;
@Override
public void channelRegistered(ChannelHandlerContext ctx) {
// Ensure we dont read from the streams so all of these will be reported as readable
ctx.channel().config().setAutoRead(false);
}
@Override
public void channelActive(ChannelHandlerContext ctx) {
counter++;
latch.countDown();
if (counter > readStreams) {
// Now set it to readable again for some channels
ctx.channel().config().setAutoRead(true);
}
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ByteBuf buffer = (ByteBuf) msg;
bytesRead.addAndGet(buffer.readableBytes());
buffer.release();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
serverErrorRef.set(cause);
}
@Override
public boolean isSharable() {
return true;
}
});
Channel channel = QuicTestUtils.newClient(executor);
QuicChannelValidationHandler clientHandler = new QuicChannelValidationHandler();
ByteBuf data = Unpooled.directBuffer().writeLong(8);
try {
QuicChannel quicChannel = QuicTestUtils.newQuicChannelBootstrap(channel)
.handler(clientHandler)
.streamHandler(new ChannelInboundHandlerAdapter())
.remoteAddress(server.localAddress())
.connect()
.get();
List<Channel> streams = new ArrayList<>();
for (int i = 0; i < numOfStreams; i++) {
QuicStreamChannel stream = quicChannel.createStream(
QuicStreamType.BIDIRECTIONAL, new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
clientErrorRef.set(cause);
}
}).get();
streams.add(stream.writeAndFlush(data.retainedSlice()).sync().channel());
}
latch.await();
while (bytesRead.get() < expectedDataRead) {
Thread.sleep(50);
}
for (Channel stream: streams) {
stream.close().sync();
}
quicChannel.close().sync();
throwIfNotNull(serverErrorRef);
throwIfNotNull(clientErrorRef);
serverHandler.assertState();
clientHandler.assertState();
} finally {
data.release();
server.close().sync();
// Close the parent Datagram channel as well.
channel.close().sync();
shutdown(executor);
}
}
private static void throwIfNotNull(AtomicReference<Throwable> errorRef) throws Throwable {
Throwable cause = errorRef.get();
if (cause != null) {
throw cause;
}
}
}
|
QuicReadableTest
|
java
|
google__dagger
|
javatests/dagger/functional/producers/subcomponent/ModuleSubcomponentsInterop.java
|
{
"start": 1492,
"end": 1541
}
|
class ____ {}
@Subcomponent
|
ProductionTestModule
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/search/SearchReply.java
|
{
"start": 4440,
"end": 7412
}
|
class ____<K, V> {
private final K id;
private Double score;
private V payload;
private V sortKey;
private final Map<K, V> fields = new HashMap<>();
/**
* Creates a new SearchResult with the specified document ID.
*
* @param id the document ID
*/
public SearchResult(K id) {
this.id = id;
}
public SearchResult() {
this.id = null;
}
/**
* Gets the document ID.
*
* @return the document ID
*/
public K getId() {
return id;
}
/**
* Gets the document score.
* <p>
* This is only available if WITHSCORES was used in the search.
*
* @return the document score, or null if not available
*/
public Double getScore() {
return score;
}
/**
* Sets the document score.
*
* @param score the document score
*/
void setScore(Double score) {
this.score = score;
}
/**
* Gets the document payload.
* <p>
* This is only available if WITHPAYLOADS was used in the search.
*
* @return the document payload, or null if not available
*/
public V getPayload() {
return payload;
}
/**
* Sets the document payload.
*
* @param payload the document payload
*/
void setPayload(V payload) {
this.payload = payload;
}
/**
* Gets the sort key.
* <p>
* This is only available if WITHSORTKEYS was used in the search.
*
* @return the sort key, or null if not available
*/
public V getSortKey() {
return sortKey;
}
/**
* Sets the sort key.
*
* @param sortKey the sort key
*/
void setSortKey(V sortKey) {
this.sortKey = sortKey;
}
/**
* Gets the document fields.
* <p>
* This contains the field names and values of the document. If NOCONTENT was used in the search, this will be null or
* empty.
*
* @return the document fields, or null if not available
*/
public Map<K, V> getFields() {
return fields;
}
/**
* Adds all the provided fields
*
* @param fields the document fields
*/
public void addFields(Map<K, V> fields) {
this.fields.putAll(fields);
}
/**
* Adds a single document field
*
* @param key the field name
* @param value the field value
*/
public void addFields(K key, V value) {
this.fields.put(key, value);
}
}
}
|
SearchResult
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/PersistenceUnitsHolder.java
|
{
"start": 4752,
"end": 5256
}
|
class ____ {
private final Map<PersistenceUnitKey, QuarkusPersistenceUnitDescriptor> units;
private final Map<PersistenceUnitKey, RecordedState> recordedStates;
public PersistenceUnits(final Map<PersistenceUnitKey, QuarkusPersistenceUnitDescriptor> units,
final Map<PersistenceUnitKey, RecordedState> recordedStates) {
this.units = Collections.unmodifiableMap(units);
this.recordedStates = recordedStates;
}
}
}
|
PersistenceUnits
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/common/Numbers.java
|
{
"start": 708,
"end": 7242
}
|
class ____ {
private static final BigInteger MAX_LONG_VALUE = BigInteger.valueOf(Long.MAX_VALUE);
private static final BigInteger MIN_LONG_VALUE = BigInteger.valueOf(Long.MIN_VALUE);
private Numbers() {}
public static short bytesToShort(byte[] bytes, int offset) {
return ByteUtils.readShortBE(bytes, offset);
}
public static int bytesToInt(byte[] bytes, int offset) {
return ByteUtils.readIntBE(bytes, offset);
}
public static long bytesToLong(byte[] bytes, int offset) {
return ByteUtils.readLongBE(bytes, offset);
}
public static long bytesToLong(BytesRef bytes) {
return bytesToLong(bytes.bytes, bytes.offset);
}
/**
* Converts an int to a byte array.
*
* @param val The int to convert to a byte array
* @return The byte array converted
*/
public static byte[] intToBytes(int val) {
byte[] arr = new byte[4];
ByteUtils.writeIntBE(val, arr, 0);
return arr;
}
/**
* Converts a short to a byte array.
*
* @param val The short to convert to a byte array
* @return The byte array converted
*/
public static byte[] shortToBytes(int val) {
byte[] arr = new byte[2];
ByteUtils.writeShortBE((short) val, arr, 0);
return arr;
}
/**
* Converts a long to a byte array.
*
* @param val The long to convert to a byte array
* @return The byte array converted
*/
public static byte[] longToBytes(long val) {
byte[] arr = new byte[8];
ByteUtils.writeLongBE(val, arr, 0);
return arr;
}
/**
* Converts a double to a byte array.
*
* @param val The double to convert to a byte array
* @return The byte array converted
*/
public static byte[] doubleToBytes(double val) {
return longToBytes(Double.doubleToRawLongBits(val));
}
/** Returns true if value is neither NaN nor infinite. */
public static boolean isValidDouble(double value) {
if (Double.isNaN(value) || Double.isInfinite(value)) {
return false;
}
return true;
}
/** Return the long that {@code n} stores, or throws an exception if the
* stored value cannot be converted to a long that stores the exact same
* value. */
public static long toLongExact(Number n) {
if (n instanceof Byte || n instanceof Short || n instanceof Integer || n instanceof Long) {
return n.longValue();
} else if (n instanceof Float || n instanceof Double) {
double d = n.doubleValue();
if (d != Math.round(d)) {
throw new IllegalArgumentException(n + " is not an integer value");
}
return n.longValue();
} else if (n instanceof BigDecimal) {
return ((BigDecimal) n).toBigIntegerExact().longValueExact();
} else if (n instanceof BigInteger) {
return ((BigInteger) n).longValueExact();
} else {
throw new IllegalArgumentException(
"Cannot check whether [" + n + "] of class [" + n.getClass().getName() + "] is actually a long"
);
}
}
// weak bounds on the BigDecimal representation to allow for coercion
private static BigDecimal BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE);
private static BigDecimal BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE);
/** Return the long that {@code stringValue} stores or throws an exception if the
* stored value cannot be converted to a long that stores the exact same
* value and {@code coerce} is false. */
public static long toLong(String stringValue, boolean coerce) {
try {
return Long.parseLong(stringValue);
} catch (NumberFormatException e) {
// we will try again with BigDecimal
}
final BigInteger bigIntegerValue;
try {
BigDecimal bigDecimalValue = new BigDecimal(stringValue);
if (bigDecimalValue.compareTo(BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE) >= 0
|| bigDecimalValue.compareTo(BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE) <= 0) {
throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long");
}
bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact();
} catch (ArithmeticException e) {
throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part");
} catch (NumberFormatException e) {
throw new IllegalArgumentException("For input string: \"" + stringValue + "\"");
}
if (bigIntegerValue.compareTo(MAX_LONG_VALUE) > 0 || bigIntegerValue.compareTo(MIN_LONG_VALUE) < 0) {
throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long");
}
return bigIntegerValue.longValue();
}
/** Return the int that {@code n} stores, or throws an exception if the
* stored value cannot be converted to an int that stores the exact same
* value. */
public static int toIntExact(Number n) {
return Math.toIntExact(toLongExact(n));
}
/** Return the short that {@code n} stores, or throws an exception if the
* stored value cannot be converted to a short that stores the exact same
* value. */
public static short toShortExact(Number n) {
long l = toLongExact(n);
if (l != (short) l) {
throw new ArithmeticException("short overflow: " + l);
}
return (short) l;
}
/** Return the byte that {@code n} stores, or throws an exception if the
* stored value cannot be converted to a byte that stores the exact same
* value. */
public static byte toByteExact(Number n) {
long l = toLongExact(n);
if (l != (byte) l) {
throw new ArithmeticException("byte overflow: " + l);
}
return (byte) l;
}
/**
* Checks if the given string can be parsed as a positive integer value.
*/
public static boolean isPositiveNumeric(String string) {
for (int i = 0; i < string.length(); ++i) {
final char c = string.charAt(i);
if (c < '0' || c > '9') {
return false;
}
}
return true;
}
}
|
Numbers
|
java
|
google__guice
|
core/test/com/google/inject/errors/NullInjectedIntoNonNullableTest.java
|
{
"start": 1112,
"end": 1181
}
|
class ____ {
@Inject
Foo(@Bar String string) {}
}
static
|
Foo
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/Digests_digestDiff_Test.java
|
{
"start": 1235,
"end": 3415
}
|
class ____ {
private static final byte[] EXPECTED_MD5_DIGEST = { 58, -63, -81, -94, -88, -101, 126, 79, 24, 102, 80, 40, 119, -65, 29, -59 };
private static final String EXPECTED_MD5_DIGEST_STR = "3AC1AFA2A89B7E4F1866502877BF1DC5";
private InputStream stream;
private MessageDigest digest;
private byte[] expected = new byte[] { 0, 1 };
@BeforeEach
public void init() {
stream = mock(InputStream.class);
digest = mock(MessageDigest.class);
}
@Test
void should_fail_if_stream_is_null() {
assertThatNullPointerException().isThrownBy(() -> digestDiff(null, null, null))
.withMessage("The stream should not be null");
}
@Test
void should_fail_if_digest_is_null() {
assertThatNullPointerException().isThrownBy(() -> digestDiff(stream, null, null))
.withMessage("The digest should not be null");
}
@Test
void should_fail_if_expected_is_null() {
assertThatNullPointerException().isThrownBy(() -> digestDiff(stream, digest, null))
.withMessage("The expected should not be null");
}
// todo should_error_if_IO
@Test
void should_pass_if_stream_is_readable() throws IOException {
// GIVEN
given(digest.digest()).willReturn(expected);
// THEN
digestDiff(stream, digest, expected);
}
@Test
void should_pass_if_digest_is_MD5() throws IOException, NoSuchAlgorithmException {
// GIVEN
InputStream inputStream = getClass().getResourceAsStream("/red.png");
// WHEN
DigestDiff diff = digestDiff(inputStream, MessageDigest.getInstance("MD5"), EXPECTED_MD5_DIGEST);
// THEN
assertThat(diff.digestsDiffer()).isFalse();
}
@Test
void should_pass_if_digest_is_MD5_and_updated() throws IOException, NoSuchAlgorithmException {
// GIVEN
InputStream inputStream = getClass().getResourceAsStream("/red.png");
MessageDigest digest = MessageDigest.getInstance("MD5");
digest.update(expected);
// WHEN
DigestDiff diff = digestDiff(inputStream, digest, EXPECTED_MD5_DIGEST);
// THEN
assertThat(diff.digestsDiffer()).isFalse();
}
}
|
Digests_digestDiff_Test
|
java
|
apache__camel
|
components/camel-flowable/src/generated/java/org/apache/camel/component/flowable/FlowableEndpointConfigurer.java
|
{
"start": 735,
"end": 3112
}
|
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
FlowableEndpoint target = (FlowableEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
FlowableEndpoint target = (FlowableEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
|
FlowableEndpointConfigurer
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/test/SpringTestContext.java
|
{
"start": 6620,
"end": 7020
}
|
class ____ implements MockMvcConfigurer {
private List<Filter> filters = new ArrayList<>();
void addFilter(Filter filter) {
this.filters.add(filter);
}
@Override
public RequestPostProcessor beforeMockMvcCreated(ConfigurableMockMvcBuilder<?> builder,
WebApplicationContext context) {
builder.addFilters(this.filters.toArray(new Filter[0]));
return null;
}
}
}
|
DeferAddFilter
|
java
|
apache__spark
|
mllib/src/test/java/org/apache/spark/ml/feature/JavaWord2VecSuite.java
|
{
"start": 1184,
"end": 2237
}
|
class ____ extends SharedSparkSession {
@Test
public void testJavaWord2Vec() {
StructType schema = new StructType(new StructField[]{
new StructField("text", new ArrayType(DataTypes.StringType, true), false, Metadata.empty())
});
Dataset<Row> documentDF = spark.createDataFrame(
Arrays.asList(
RowFactory.create(Arrays.asList("Hi I heard about Spark".split(" "))),
RowFactory.create(Arrays.asList("I wish Java could use case classes".split(" "))),
RowFactory.create(Arrays.asList("Logistic regression models are neat".split(" ")))),
schema);
Word2Vec word2Vec = new Word2Vec()
.setInputCol("text")
.setOutputCol("result")
.setVectorSize(3)
.setMinCount(0);
Word2VecModel model = word2Vec.fit(documentDF);
Dataset<Row> result = model.transform(documentDF);
for (Row r : result.select("result").collectAsList()) {
double[] polyFeatures = ((Vector) r.get(0)).toArray();
Assertions.assertEquals(3, polyFeatures.length);
}
}
}
|
JavaWord2VecSuite
|
java
|
quarkusio__quarkus
|
extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/DefaultTokenIntrospectionUserInfoCache.java
|
{
"start": 3946,
"end": 4523
}
|
class ____ {
volatile TokenIntrospection introspection;
volatile UserInfo userInfo;
public CacheEntry(TokenIntrospection introspection) {
this.introspection = introspection;
}
public CacheEntry(UserInfo userInfo) {
this.userInfo = userInfo;
}
}
public void clearCache() {
cache.clearCache();
}
public int getCacheSize() {
return cache.getCacheSize();
}
void shutdown(@Observes ShutdownEvent event, Vertx vertx) {
cache.stopTimer(vertx);
}
}
|
CacheEntry
|
java
|
spring-projects__spring-boot
|
module/spring-boot-webflux/src/main/java/org/springframework/boot/webflux/actuate/endpoint/web/AbstractWebFluxEndpointHandlerMapping.java
|
{
"start": 20578,
"end": 21007
}
|
class ____ implements RuntimeHintsRegistrar {
private final ReflectiveRuntimeHintsRegistrar reflectiveRegistrar = new ReflectiveRuntimeHintsRegistrar();
@Override
public void registerHints(RuntimeHints hints, @Nullable ClassLoader classLoader) {
this.reflectiveRegistrar.registerRuntimeHints(hints, WriteOperationHandler.class,
ReadOperationHandler.class);
}
}
}
|
AbstractWebFluxEndpointHandlerMappingRuntimeHints
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-client/src/test/java/org/springframework/security/oauth2/client/ClientCredentialsReactiveOAuth2AuthorizedClientProviderTests.java
|
{
"start": 2054,
"end": 9201
}
|
class ____ {
private ClientCredentialsReactiveOAuth2AuthorizedClientProvider authorizedClientProvider;
private ReactiveOAuth2AccessTokenResponseClient<OAuth2ClientCredentialsGrantRequest> accessTokenResponseClient;
private ClientRegistration clientRegistration;
private Authentication principal;
@BeforeEach
public void setup() {
this.authorizedClientProvider = new ClientCredentialsReactiveOAuth2AuthorizedClientProvider();
this.accessTokenResponseClient = mock(ReactiveOAuth2AccessTokenResponseClient.class);
this.authorizedClientProvider.setAccessTokenResponseClient(this.accessTokenResponseClient);
this.clientRegistration = TestClientRegistrations.clientCredentials().build();
this.principal = new TestingAuthenticationToken("principal", "password");
}
@Test
public void setAccessTokenResponseClientWhenClientIsNullThenThrowIllegalArgumentException() {
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizedClientProvider.setAccessTokenResponseClient(null))
.withMessage("accessTokenResponseClient cannot be null");
// @formatter:on
}
@Test
public void setClockSkewWhenNullThenThrowIllegalArgumentException() {
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizedClientProvider.setClockSkew(null))
.withMessage("clockSkew cannot be null");
// @formatter:on
}
@Test
public void setClockSkewWhenNegativeSecondsThenThrowIllegalArgumentException() {
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizedClientProvider.setClockSkew(Duration.ofSeconds(-1)))
.withMessage("clockSkew must be >= 0");
// @formatter:on
}
@Test
public void setClockWhenNullThenThrowIllegalArgumentException() {
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizedClientProvider.setClock(null))
.withMessage("clock cannot be null");
// @formatter:on
}
@Test
public void authorizeWhenContextIsNullThenThrowIllegalArgumentException() {
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizedClientProvider.authorize(null).block())
.withMessage("context cannot be null");
// @formatter:on
}
@Test
public void authorizeWhenNotClientCredentialsThenUnableToAuthorize() {
ClientRegistration clientRegistration = TestClientRegistrations.clientRegistration().build();
// @formatter:off
OAuth2AuthorizationContext authorizationContext = OAuth2AuthorizationContext
.withClientRegistration(clientRegistration)
.principal(this.principal)
.build();
// @formatter:on
assertThat(this.authorizedClientProvider.authorize(authorizationContext).block()).isNull();
}
@Test
public void authorizeWhenClientCredentialsAndNotAuthorizedThenAuthorize() {
OAuth2AccessTokenResponse accessTokenResponse = TestOAuth2AccessTokenResponses.accessTokenResponse().build();
given(this.accessTokenResponseClient.getTokenResponse(any())).willReturn(Mono.just(accessTokenResponse));
// @formatter:off
OAuth2AuthorizationContext authorizationContext = OAuth2AuthorizationContext
.withClientRegistration(this.clientRegistration)
.principal(this.principal)
.build();
// @formatter:on
OAuth2AuthorizedClient authorizedClient = this.authorizedClientProvider.authorize(authorizationContext).block();
assertThat(authorizedClient.getClientRegistration()).isSameAs(this.clientRegistration);
assertThat(authorizedClient.getPrincipalName()).isEqualTo(this.principal.getName());
assertThat(authorizedClient.getAccessToken()).isEqualTo(accessTokenResponse.getAccessToken());
}
@Test
public void authorizeWhenClientCredentialsAndTokenExpiredThenReauthorize() {
Instant issuedAt = Instant.now().minus(Duration.ofDays(1));
Instant expiresAt = issuedAt.plus(Duration.ofMinutes(60));
OAuth2AccessToken accessToken = new OAuth2AccessToken(OAuth2AccessToken.TokenType.BEARER, "access-token-1234",
issuedAt, expiresAt);
OAuth2AuthorizedClient authorizedClient = new OAuth2AuthorizedClient(this.clientRegistration,
this.principal.getName(), accessToken);
OAuth2AccessTokenResponse accessTokenResponse = TestOAuth2AccessTokenResponses.accessTokenResponse().build();
given(this.accessTokenResponseClient.getTokenResponse(any())).willReturn(Mono.just(accessTokenResponse));
// @formatter:off
OAuth2AuthorizationContext authorizationContext = OAuth2AuthorizationContext
.withAuthorizedClient(authorizedClient)
.principal(this.principal)
.build();
// @formatter:on
authorizedClient = this.authorizedClientProvider.authorize(authorizationContext).block();
assertThat(authorizedClient.getClientRegistration()).isSameAs(this.clientRegistration);
assertThat(authorizedClient.getPrincipalName()).isEqualTo(this.principal.getName());
assertThat(authorizedClient.getAccessToken()).isEqualTo(accessTokenResponse.getAccessToken());
}
@Test
public void authorizeWhenClientCredentialsAndTokenNotExpiredThenNotReauthorize() {
OAuth2AuthorizedClient authorizedClient = new OAuth2AuthorizedClient(this.clientRegistration,
this.principal.getName(), TestOAuth2AccessTokens.noScopes());
// @formatter:off
OAuth2AuthorizationContext authorizationContext = OAuth2AuthorizationContext
.withAuthorizedClient(authorizedClient)
.principal(this.principal)
.build();
// @formatter:on
assertThat(this.authorizedClientProvider.authorize(authorizationContext).block()).isNull();
}
// gh-7511
@Test
public void authorizeWhenClientCredentialsAndTokenNotExpiredButClockSkewForcesExpiryThenReauthorize() {
Instant now = Instant.now();
Instant issuedAt = now.minus(Duration.ofMinutes(60));
Instant expiresAt = now.minus(Duration.ofMinutes(1));
OAuth2AccessToken expiresInOneMinAccessToken = new OAuth2AccessToken(OAuth2AccessToken.TokenType.BEARER,
"access-token-1234", issuedAt, expiresAt);
OAuth2AuthorizedClient authorizedClient = new OAuth2AuthorizedClient(this.clientRegistration,
this.principal.getName(), expiresInOneMinAccessToken);
// Shorten the lifespan of the access token by 90 seconds, which will ultimately
// force it to expire on the client
this.authorizedClientProvider.setClockSkew(Duration.ofSeconds(90));
OAuth2AccessTokenResponse accessTokenResponse = TestOAuth2AccessTokenResponses.accessTokenResponse().build();
given(this.accessTokenResponseClient.getTokenResponse(any())).willReturn(Mono.just(accessTokenResponse));
// @formatter:off
OAuth2AuthorizationContext authorizationContext = OAuth2AuthorizationContext
.withAuthorizedClient(authorizedClient)
.principal(this.principal)
.build();
// @formatter:on
OAuth2AuthorizedClient reauthorizedClient = this.authorizedClientProvider.authorize(authorizationContext)
.block();
assertThat(reauthorizedClient.getClientRegistration()).isSameAs(this.clientRegistration);
assertThat(reauthorizedClient.getPrincipalName()).isEqualTo(this.principal.getName());
assertThat(reauthorizedClient.getAccessToken()).isEqualTo(accessTokenResponse.getAccessToken());
}
}
|
ClientCredentialsReactiveOAuth2AuthorizedClientProviderTests
|
java
|
quarkusio__quarkus
|
integration-tests/picocli/src/main/java/io/quarkus/it/picocli/HelloCommand.java
|
{
"start": 89,
"end": 358
}
|
class ____ implements Runnable {
@CommandLine.Option(names = { "-n", "--name" }, defaultValue = "World", description = "Who we will greet?")
String name;
@Override
public void run() {
System.out.println("Hello " + name + "!");
}
}
|
HelloCommand
|
java
|
apache__flink
|
flink-test-utils-parent/flink-test-utils-junit/src/main/java/org/apache/flink/core/testutils/EachCallbackWrapper.java
|
{
"start": 1124,
"end": 1723
}
|
class ____<C extends CustomExtension>
implements BeforeEachCallback, AfterEachCallback {
private final C customExtension;
public EachCallbackWrapper(C customExtension) {
this.customExtension = customExtension;
}
public C getCustomExtension() {
return customExtension;
}
@Override
public void afterEach(ExtensionContext context) throws Exception {
customExtension.after(context);
}
@Override
public void beforeEach(ExtensionContext context) throws Exception {
customExtension.before(context);
}
}
|
EachCallbackWrapper
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebApp.java
|
{
"start": 1434,
"end": 2516
}
|
class ____ extends WebApp implements YarnWebParams {
private Router router;
public RouterWebApp(Router router) {
this.router = router;
}
@Override
public void setup() {
if (router != null) {
bind(Router.class).toInstance(router);
}
route("/", RouterController.class);
route("/cluster", RouterController.class, "about");
route("/about", RouterController.class, "about");
route(pajoin("/apps", APP_SC, APP_STATE), RouterController.class, "apps");
route(pajoin("/nodes", NODE_SC), RouterController.class, "nodes");
route("/federation", RouterController.class, "federation");
route(pajoin("/nodelabels", NODE_SC), RouterController.class, "nodeLabels");
}
public ResourceConfig resourceConfig() {
ResourceConfig config = new ResourceConfig();
config.packages("org.apache.hadoop.yarn.server.router.webapp");
config.register(new JerseyBinder());
config.register(RouterWebServices.class);
config.register(new JettisonFeature()).register(JAXBContextResolver.class);
return config;
}
private
|
RouterWebApp
|
java
|
spring-projects__spring-framework
|
spring-orm/src/test/java/org/springframework/orm/jpa/persistenceunit/PersistenceManagedTypesTests.java
|
{
"start": 965,
"end": 2012
}
|
class ____ {
@Test
void createWithManagedClassNames() {
PersistenceManagedTypes managedTypes = PersistenceManagedTypes.of(
"com.example.One", "com.example.Two");
assertThat(managedTypes.getManagedClassNames()).containsExactly(
"com.example.One", "com.example.Two");
assertThat(managedTypes.getManagedPackages()).isEmpty();
assertThat(managedTypes.getPersistenceUnitRootUrl()).isNull();
}
@Test
void createWithNullManagedClasses() {
assertThatIllegalArgumentException().isThrownBy(() -> PersistenceManagedTypes.of((String[]) null));
}
@Test
void createWithManagedClassNamesAndPackages() {
PersistenceManagedTypes managedTypes = PersistenceManagedTypes.of(
List.of("com.example.One", "com.example.Two"), List.of("com.example"));
assertThat(managedTypes.getManagedClassNames()).containsExactly(
"com.example.One", "com.example.Two");
assertThat(managedTypes.getManagedPackages()).containsExactly("com.example");
assertThat(managedTypes.getPersistenceUnitRootUrl()).isNull();
}
}
|
PersistenceManagedTypesTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionRequestTests.java
|
{
"start": 561,
"end": 1588
}
|
class ____ extends AbstractXContentSerializingTestCase<Request> {
@Override
protected Request createTestInstance() {
RevertModelSnapshotAction.Request request = new RevertModelSnapshotAction.Request(
randomAlphaOfLengthBetween(1, 20),
randomAlphaOfLengthBetween(1, 20)
);
if (randomBoolean()) {
request.setDeleteInterveningResults(randomBoolean());
}
if (randomBoolean()) {
request.setForce(randomBoolean());
}
return request;
}
@Override
protected Request mutateInstance(Request instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<Request> instanceReader() {
return Request::new;
}
@Override
protected Request doParseInstance(XContentParser parser) {
return RevertModelSnapshotAction.Request.parseRequest(null, null, parser);
}
}
|
RevertModelSnapshotActionRequestTests
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/main/java/io/quarkus/arc/deployment/ArcTestSteps.java
|
{
"start": 901,
"end": 2181
}
|
class ____ {
@BuildStep
public void additionalBeans(BuildProducer<AdditionalBeanBuildItem> additionalBeans) {
// We need to register the bean implementation for TestApplicationClassPredicate
// TestApplicationClassPredicate is used programmatically in the ArC recorder when StartupEvent is fired
additionalBeans.produce(AdditionalBeanBuildItem.unremovableOf(PreloadedTestApplicationClassPredicate.class));
// In tests, register the ActivateSessionContextInterceptor and ActivateSessionContext interceptor binding
additionalBeans.produce(new AdditionalBeanBuildItem(ActivateSessionContextInterceptor.class));
additionalBeans.produce(new AdditionalBeanBuildItem("io.quarkus.test.ActivateSessionContext"));
}
@BuildStep
AnnotationsTransformerBuildItem addInterceptorBinding() {
return new AnnotationsTransformerBuildItem(
AnnotationTransformation.forClasses().whenClass(ActivateSessionContextInterceptor.class).transform(tc -> tc.add(
AnnotationInstance.builder(DotName.createSimple("io.quarkus.test.ActivateSessionContext")).build())));
}
// For some reason the annotation literal generated for io.quarkus.test.ActivateSessionContext lives in app
|
ArcTestSteps
|
java
|
quarkusio__quarkus
|
extensions/spring-data-jpa/deployment/src/test/java/io/quarkus/spring/data/deployment/LoginEvent.java
|
{
"start": 240,
"end": 909
}
|
class ____ {
@Id
@GeneratedValue
private Long id;
@ManyToOne
private User user;
private ZonedDateTime zonedDateTime;
private boolean processed;
public User getUser() {
return user;
}
public void setUser(User user) {
this.user = user;
}
public ZonedDateTime getZonedDateTime() {
return zonedDateTime;
}
public void setZonedDateTime(ZonedDateTime zonedDateTime) {
this.zonedDateTime = zonedDateTime;
}
public boolean isProcessed() {
return processed;
}
public void setProcessed(boolean processed) {
this.processed = processed;
}
}
|
LoginEvent
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/component/file/strategy/FileChangedReadLockLoggingLevelTest.java
|
{
"start": 908,
"end": 1425
}
|
class ____ extends FileChangedReadLockTest {
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(fileUri(
"in?initialDelay=0&delay=10&readLock=changed&readLockCheckInterval=100&readLockLoggingLevel=DEBUG"))
.to(fileUri("out"),
"mock:result");
}
};
}
}
|
FileChangedReadLockLoggingLevelTest
|
java
|
quarkusio__quarkus
|
independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/AnnotationLiteralProcessor.java
|
{
"start": 4362,
"end": 6226
}
|
class ____ available: " + annotationInstance);
}
AnnotationLiteralClassInfo literal = cache.getValue(new CacheKey(annotationClass));
ClassDesc generatedClass = ClassDesc.of(literal.generatedClassName);
if (literal.annotationMembers().isEmpty()) {
return bc.getStaticField(FieldDesc.of(generatedClass, "INSTANCE", generatedClass));
}
Expr[] ctorArgs = new Expr[literal.annotationMembers().size()];
int argIndex = 0;
for (MethodInfo annotationMember : literal.annotationMembers()) {
AnnotationValue value = annotationInstance.value(annotationMember.name());
if (value == null) {
value = annotationMember.defaultValue();
}
if (value == null) {
throw new IllegalStateException(String.format(
"Value is not set for %s.%s(). Most probably an older version of Jandex was used to index an application dependency. Make sure that Jandex 2.1+ is used.",
annotationMember.declaringClass().name(), annotationMember.name()));
}
Expr valueExpr = loadValue(bc, literal, annotationMember, value);
ctorArgs[argIndex] = valueExpr;
argIndex++;
}
ConstructorDesc ctor = ConstructorDesc.of(generatedClass, literal.annotationMembers()
.stream()
.map(it -> classDescOf(it.returnType()))
.toArray(ClassDesc[]::new));
return bc.new_(ctor, ctorArgs);
}
/**
* Generates a bytecode sequence to load given annotation member value.
*
* @param bc will receive the bytecode sequence for loading the annotation member value
* as a sequence of {@link BlockCreator} method calls
* @param literal data about the annotation literal
|
not
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/GraphqlComponentBuilderFactory.java
|
{
"start": 1385,
"end": 1848
}
|
interface ____ {
/**
* GraphQL (camel-graphql)
* Send GraphQL queries and mutations to external systems.
*
* Category: api
* Since: 3.0
* Maven coordinates: org.apache.camel:camel-graphql
*
* @return the dsl builder
*/
static GraphqlComponentBuilder graphql() {
return new GraphqlComponentBuilderImpl();
}
/**
* Builder for the GraphQL component.
*/
|
GraphqlComponentBuilderFactory
|
java
|
grpc__grpc-java
|
stub/src/main/java/io/grpc/stub/ServerCalls.java
|
{
"start": 2964,
"end": 3191
}
|
interface ____<ReqT, RespT> extends UnaryRequestMethod<ReqT, RespT> {
@Override void invoke(ReqT request, StreamObserver<RespT> responseObserver);
}
/**
* Adaptor to a server streaming method.
*/
public
|
UnaryMethod
|
java
|
playframework__playframework
|
core/play/src/main/java/play/http/HttpRequestHandler.java
|
{
"start": 301,
"end": 1363
}
|
interface ____ {
/**
* Get a handler for the given request.
*
* <p>In addition to retrieving a handler for the request, the request itself may be modified -
* typically it will be tagged with routing information. It is also acceptable to simply return
* the request as is. Play will switch to using the returned request from this point in in its
* request handling.
*
* <p>The reason why the API allows returning a modified request, rather than just wrapping the
* Handler in a new Handler that modifies the request, is so that Play can pass this request to
* other handlers, such as error handlers, or filters, and they will get the tagged/modified
* request.
*
* @param request The request to handle
* @return The possibly modified/tagged request, and a handler to handle it
*/
HandlerForRequest handlerForRequest(RequestHeader request);
/** @return a Scala HttpRequestHandler */
default play.api.http.HttpRequestHandler asScala() {
return new JavaHttpRequestHandlerAdapter(this);
}
}
|
HttpRequestHandler
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/aot/ContextAotProcessor.java
|
{
"start": 4597,
"end": 4836
}
|
class ____ generator
*/
protected ClassNameGenerator createClassNameGenerator() {
return new ClassNameGenerator(ClassName.get(getApplicationClass()));
}
/**
* Return the native image arguments to use.
* <p>By default, the main
|
name
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/aggfunctions/BatchApproxCountDistinctAggFunctionTest.java
|
{
"start": 12224,
"end": 13493
}
|
class ____
extends ApproxCountDistinctAggFunctionTestBase<TimestampData> {
@Override
protected List<List<TimestampData>> getInputValueSets() {
return Arrays.asList(
Arrays.asList(
TimestampData.fromEpochMillis(0),
TimestampData.fromEpochMillis(1000),
TimestampData.fromEpochMillis(100),
null,
TimestampData.fromEpochMillis(10)),
Arrays.asList(null, null, null, null, null),
Arrays.asList(
null,
TimestampData.fromEpochMillis(1),
TimestampData.fromEpochMillis(1)));
}
@Override
protected List<Long> getExpectedResults() {
return Arrays.asList(4L, 0L, 1L);
}
@Override
protected AggregateFunction<Long, HllBuffer> getAggregator() {
return new TimestampLtzApproxCountDistinctAggFunction(new LocalZonedTimestampType(6));
}
}
/** Test for {@link TimestampLtzApproxCountDistinctAggFunction}. */
@Nested
final
|
TimestampLtzApproxCountDistinctAggFunctionTest
|
java
|
elastic__elasticsearch
|
modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java
|
{
"start": 7931,
"end": 8633
}
|
class ____ extends DoubleUpDownRecorder implements DoubleCounter, OtelInstrument {
DoubleRecorder(String name) {
super(name, InstrumentType.DOUBLE_COUNTER);
}
@Override
public void add(double value) {
assert value >= 0;
super.add(value);
}
@Override
public void add(double value, Attributes attributes) {
assert value >= 0;
super.add(value, attributes);
}
@Override
public void add(double value, Attributes attributes, Context context) {
assert value >= 0;
super.add(value, attributes, context);
}
}
private
|
DoubleRecorder
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/DeadLetterChannelNotHandleNewExceptionTest.java
|
{
"start": 1301,
"end": 2365
}
|
class ____ {
@Handler
public void onException(Exchange exchange, Exception exception) {
throw new RuntimeException("error in errorhandler");
}
}
@Test
public void testDeadLetterChannelNotHandleNewException() {
try {
template.sendBody("direct:start", "Hello World");
fail("Should have thrown exception");
} catch (CamelExecutionException e) {
RuntimeException cause = assertIsInstanceOf(RuntimeException.class, e.getCause());
assertEquals("error in errorhandler", cause.getMessage());
}
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
errorHandler(deadLetterChannel("bean:" + BadErrorHandler.class.getName()).deadLetterHandleNewException(false));
from("direct:start").log("Incoming ${body}").throwException(new IllegalArgumentException("Forced"));
}
};
}
}
|
BadErrorHandler
|
java
|
apache__maven
|
compat/maven-compat/src/main/java/org/apache/maven/project/artifact/DefaultMetadataSource.java
|
{
"start": 1617,
"end": 2140
}
|
class ____ extends MavenMetadataSource {
@Inject
public DefaultMetadataSource(
RepositoryMetadataManager repositoryMetadataManager,
ArtifactFactory artifactFactory,
ProjectBuilder projectBuilder,
MavenMetadataCache cache,
LegacySupport legacySupport,
MavenRepositorySystem mavenRepositorySystem) {
super(repositoryMetadataManager, artifactFactory, projectBuilder, cache, legacySupport, mavenRepositorySystem);
}
}
|
DefaultMetadataSource
|
java
|
alibaba__druid
|
druid-demo-petclinic/src/main/java/org/springframework/samples/petclinic/owner/Pet.java
|
{
"start": 1395,
"end": 2167
}
|
class ____ extends NamedEntity {
@Column(name = "birth_date")
@DateTimeFormat(pattern = "yyyy-MM-dd")
private LocalDate birthDate;
@ManyToOne
@JoinColumn(name = "type_id")
private PetType type;
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.EAGER)
@JoinColumn(name = "pet_id")
@OrderBy("visit_date ASC")
private Set<Visit> visits = new LinkedHashSet<>();
public void setBirthDate(LocalDate birthDate) {
this.birthDate = birthDate;
}
public LocalDate getBirthDate() {
return this.birthDate;
}
public PetType getType() {
return this.type;
}
public void setType(PetType type) {
this.type = type;
}
public Collection<Visit> getVisits() {
return this.visits;
}
public void addVisit(Visit visit) {
getVisits().add(visit);
}
}
|
Pet
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/references/SomeOtherType.java
|
{
"start": 235,
"end": 1197
}
|
class ____ extends BaseType {
private String value;
public SomeOtherType(String value) {
this.value = value;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ( ( value == null ) ? 0 : value.hashCode() );
return result;
}
@Override
public boolean equals(Object obj) {
if ( this == obj ) {
return true;
}
if ( obj == null ) {
return false;
}
if ( getClass() != obj.getClass() ) {
return false;
}
SomeOtherType other = (SomeOtherType) obj;
if ( value == null ) {
return other.value == null;
}
else {
return value.equals( other.value );
}
}
}
|
SomeOtherType
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/dialect/hologres/parser/HologresLexer.java
|
{
"start": 215,
"end": 406
}
|
class ____
extends PGLexer {
public HologresLexer(String input, SQLParserFeature... features) {
super(input, features);
dbType = DbType.hologres;
}
}
|
HologresLexer
|
java
|
alibaba__nacos
|
prometheus/src/main/java/com/alibaba/nacos/prometheus/conf/PrometheusSecurityConfiguration.java
|
{
"start": 1911,
"end": 2438
}
|
class ____ {
@Bean
@Conditional(ConditionOnNoAuthPluginType.class)
public SecurityFilterChain prometheusSecurityFilterChain(HttpSecurity http) throws Exception {
http.authorizeHttpRequests(
(authorizeHttpRequests) -> authorizeHttpRequests.requestMatchers(PROMETHEUS_CONTROLLER_PATH,
PROMETHEUS_CONTROLLER_NAMESPACE_PATH, PROMETHEUS_CONTROLLER_SERVICE_PATH).permitAll());
return http.getOrBuild();
}
private static
|
PrometheusSecurityConfiguration
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
|
{
"start": 2894,
"end": 9701
}
|
class ____ implements Comparable<Container> {
@Private
@Unstable
public static Container newInstance(ContainerId containerId, NodeId nodeId,
String nodeHttpAddress, Resource resource, Priority priority,
Token containerToken) {
return newInstance(containerId, nodeId, nodeHttpAddress, resource, priority,
containerToken, ExecutionType.GUARANTEED);
}
@Private
@Unstable
public static Container newInstance(ContainerId containerId, NodeId nodeId,
String nodeHttpAddress, Resource resource, Priority priority,
Token containerToken, ExecutionType executionType) {
Container container = Records.newRecord(Container.class);
container.setId(containerId);
container.setNodeId(nodeId);
container.setNodeHttpAddress(nodeHttpAddress);
container.setResource(resource);
container.setPriority(priority);
container.setContainerToken(containerToken);
container.setExecutionType(executionType);
return container;
}
/**
* Get the globally unique identifier for the container.
* @return globally unique identifier for the container
*/
@Public
@Stable
public abstract ContainerId getId();
@Private
@Unstable
public abstract void setId(ContainerId id);
/**
* Get the identifier of the node on which the container is allocated.
* @return identifier of the node on which the container is allocated
*/
@Public
@Stable
public abstract NodeId getNodeId();
@Private
@Unstable
public abstract void setNodeId(NodeId nodeId);
/**
* Get the http uri of the node on which the container is allocated.
* @return http uri of the node on which the container is allocated
*/
@Public
@Stable
public abstract String getNodeHttpAddress();
@Private
@Unstable
public abstract void setNodeHttpAddress(String nodeHttpAddress);
/**
* Get the exposed ports of the node on which the container is allocated.
* @return exposed ports of the node on which the container is allocated
*/
@Public
@Stable
public abstract Map<String, List<Map<String, String>>> getExposedPorts();
@Private
@Unstable
public abstract void setExposedPorts(
Map<String, List<Map<String, String>>> ports);
/**
* Get the <code>Resource</code> allocated to the container.
* @return <code>Resource</code> allocated to the container
*/
@Public
@Stable
public abstract Resource getResource();
@Private
@Unstable
public abstract void setResource(Resource resource);
/**
* Get the <code>Priority</code> at which the <code>Container</code> was
* allocated.
* @return <code>Priority</code> at which the <code>Container</code> was
* allocated
*/
@Public
@Stable
public abstract Priority getPriority();
@Private
@Unstable
public abstract void setPriority(Priority priority);
/**
* Get the <code>ContainerToken</code> for the container.
* <p><code>ContainerToken</code> is the security token used by the framework
* to verify authenticity of any <code>Container</code>.</p>
*
* <p>The <code>ResourceManager</code>, on container allocation provides a
* secure token which is verified by the <code>NodeManager</code> on
* container launch.</p>
*
* <p>Applications do not need to care about <code>ContainerToken</code>, they
* are transparently handled by the framework - the allocated
* <code>Container</code> includes the <code>ContainerToken</code>.</p>
*
* @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
* @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)
*
* @return <code>ContainerToken</code> for the container
*/
@Public
@Stable
public abstract Token getContainerToken();
@Private
@Unstable
public abstract void setContainerToken(Token containerToken);
/**
* Get the <code>ExecutionType</code> for the container.
* @return <code>ExecutionType</code> for the container.
*/
@Private
@Unstable
public abstract ExecutionType getExecutionType();
/**
* Set the <code>ExecutionType</code> for the container.
* @param executionType ExecutionType
*/
@Private
@Unstable
public abstract void setExecutionType(ExecutionType executionType);
/**
* Get the optional <em>ID</em> corresponding to the original {@code
* ResourceRequest{@link #getAllocationRequestId()}}s which is satisfied by
* this allocated {@code Container}.
* <p>
* The scheduler may return multiple {@code AllocateResponse}s corresponding
* to the same ID as and when scheduler allocates {@code Container}s.
* <b>Applications</b> can continue to completely ignore the returned ID in
* the response and use the allocation for any of their outstanding requests.
* <p>
*
* @return the <em>ID</em> corresponding to the original allocation request
* which is satisfied by this allocation.
*/
@Public
@Evolving
public long getAllocationRequestId() {
throw new UnsupportedOperationException();
}
/**
* Set the optional <em>ID</em> corresponding to the original {@code
* ResourceRequest{@link #setAllocationRequestId(long)}
* etAllocationRequestId()}}s which is satisfied by this allocated {@code
* Container}.
* <p>
* The scheduler may return multiple {@code AllocateResponse}s corresponding
* to the same ID as and when scheduler allocates {@code Container}s.
* <b>Applications</b> can continue to completely ignore the returned ID in
* the response and use the allocation for any of their outstanding requests.
* If the ID is not set, scheduler will continue to work as previously and all
* allocated {@code Container}(s) will have the default ID, -1.
* <p>
*
* @param allocationRequestID the <em>ID</em> corresponding to the original
* allocation request which is satisfied by this
* allocation.
*/
@Private
@Unstable
public void setAllocationRequestId(long allocationRequestID) {
throw new UnsupportedOperationException();
}
/**
* Get the version of this container. The version will be incremented when
* a container is updated.
*
* @return version of this container.
*/
@Private
@Unstable
public int getVersion() {
return 0;
}
/**
* Set the version of this container.
* @param version of this container.
*/
@Private
@Unstable
public void setVersion(int version) {
throw new UnsupportedOperationException();
}
@Private
@Unstable
public Set<String> getAllocationTags() {
return Collections.emptySet();
}
@Private
@Unstable
public void setAllocationTags(Set<String> allocationTags) {
}
}
|
Container
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/ITestAzureFileSystemInstrumentation.java
|
{
"start": 2927,
"end": 21548
}
|
class ____ extends AbstractWasbTestBase {
protected static final Logger LOG =
LoggerFactory.getLogger(ITestAzureFileSystemInstrumentation.class);
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
@Test
public void testMetricTags() throws Exception {
String accountName =
getTestAccount().getRealAccount().getBlobEndpoint()
.getAuthority();
String containerName =
getTestAccount().getRealContainer().getName();
MetricsRecordBuilder myMetrics = getMyMetrics();
verify(myMetrics).add(argThat(
new TagMatcher("accountName", accountName)
));
verify(myMetrics).add(argThat(
new TagMatcher("containerName", containerName)
));
verify(myMetrics).add(argThat(
new TagMatcher("Context", "azureFileSystem")
));
verify(myMetrics).add(argThat(
new TagExistsMatcher("wasbFileSystemId")
));
}
@Test
public void testMetricsOnMkdirList() throws Exception {
long base = getBaseWebResponses();
// Create a directory
assertTrue(fs.mkdirs(new Path("a")));
// At the time of writing
// getAncestor uses 2 calls for each folder level /user/<name>/a
// plus 1 call made by checkContainer
// mkdir checks the hierarchy with 2 calls per level
// mkdirs calls storeEmptyDir to create the empty folder, which makes 5 calls
// For a total of 7 + 6 + 5 = 18 web responses
base = assertWebResponsesInRange(base, 1, 18);
assertEquals(1,
AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_DIRECTORIES_CREATED));
// List the root contents
assertEquals(1, getFileSystem().listStatus(new Path("/")).length);
base = assertWebResponsesEquals(base, 1);
assertNoErrors();
}
private BandwidthGaugeUpdater getBandwidthGaugeUpdater() {
NativeAzureFileSystem azureFs = (NativeAzureFileSystem) getFileSystem();
AzureNativeFileSystemStore azureStore = azureFs.getStore();
return azureStore.getBandwidthGaugeUpdater();
}
private static byte[] nonZeroByteArray(int size) {
byte[] data = new byte[size];
Arrays.fill(data, (byte)5);
return data;
}
@Test
public void testMetricsOnFileCreateRead() throws Exception {
long base = getBaseWebResponses();
assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
Path filePath = new Path("/metricsTest_webResponses");
final int FILE_SIZE = 1000;
// Suppress auto-update of bandwidth metrics so we get
// to update them exactly when we want to.
getBandwidthGaugeUpdater().suppressAutoUpdate();
// Create a file
Date start = new Date();
OutputStream outputStream = getFileSystem().create(filePath);
outputStream.write(nonZeroByteArray(FILE_SIZE));
outputStream.close();
long uploadDurationMs = new Date().getTime() - start.getTime();
// The exact number of requests/responses that happen to create a file
// can vary - at the time of writing this code it takes 10
// requests/responses for the 1000 byte file (33 for 100 MB),
// plus the initial container-check request but that
// can very easily change in the future. Just assert that we do roughly
// more than 2 but less than 15.
logOpResponseCount("Creating a 1K file", base);
base = assertWebResponsesInRange(base, 2, 15);
getBandwidthGaugeUpdater().triggerUpdate(true);
long bytesWritten = AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation());
assertTrue(bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2),
"The bytes written in the last second " + bytesWritten
+ " is pretty far from the expected range of around " + FILE_SIZE
+ " bytes plus a little overhead.");
long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
assertTrue(totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2),
"The total bytes written " + totalBytesWritten
+ " is pretty far from the expected range of around " + FILE_SIZE
+ " bytes plus a little overhead.");
long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
LOG.info("Upload rate: " + uploadRate + " bytes/second.");
long expectedRate = (FILE_SIZE * 1000L) / uploadDurationMs;
assertTrue(uploadRate >= expectedRate, "The upload rate " + uploadRate
+ " is below the expected range of around " + expectedRate
+ " bytes/second that the unit test observed. This should never be"
+ " the case since the test underestimates the rate by looking at "
+ " end-to-end time instead of just block upload time.");
long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_UPLOAD_LATENCY);
LOG.info("Upload latency: {}", uploadLatency);
long expectedLatency = uploadDurationMs; // We're uploading less than a block.
assertTrue(uploadLatency > 0,
"The upload latency " + uploadLatency
+ " should be greater than zero now that I've just uploaded a file.");
assertTrue(uploadLatency <= expectedLatency,
"The upload latency " + uploadLatency
+ " is more than the expected range of around " + expectedLatency
+ " milliseconds that the unit test observed. This should never be"
+ " the case since the test overestimates the latency by looking at "
+ " end-to-end time instead of just block upload time.");
// Read the file
start = new Date();
InputStream inputStream = getFileSystem().open(filePath);
int count = 0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
long downloadDurationMs = new Date().getTime() - start.getTime();
assertEquals(FILE_SIZE, count);
// Again, exact number varies. At the time of writing this code
// it takes 4 request/responses, so just assert a rough range between
// 1 and 10.
logOpResponseCount("Reading a 1K file", base);
base = assertWebResponsesInRange(base, 1, 10);
getBandwidthGaugeUpdater().triggerUpdate(false);
long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE, totalBytesRead);
long bytesRead = AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation());
assertTrue(bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2),
"The bytes read in the last second " + bytesRead
+ " is pretty far from the expected range of around " + FILE_SIZE
+ " bytes plus a little overhead.");
long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
LOG.info("Download rate: " + downloadRate + " bytes/second.");
expectedRate = (FILE_SIZE * 1000L) / downloadDurationMs;
assertTrue(downloadRate >= expectedRate,
"The download rate " + downloadRate
+ " is below the expected range of around " + expectedRate
+ " bytes/second that the unit test observed. This should never be"
+ " the case since the test underestimates the rate by looking at "
+ " end-to-end time instead of just block download time.");
long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_DOWNLOAD_LATENCY);
LOG.info("Download latency: " + downloadLatency);
expectedLatency = downloadDurationMs; // We're downloading less than a block.
assertTrue(downloadLatency > 0,
"The download latency " + downloadLatency
+ " should be greater than zero now that I've just downloaded a file.");
assertTrue(downloadLatency <= expectedLatency,
"The download latency " + downloadLatency
+ " is more than the expected range of around " + expectedLatency
+ " milliseconds that the unit test observed. This should never be"
+ " the case since the test overestimates the latency by looking at "
+ " end-to-end time instead of just block download time.");
assertNoErrors();
}
@Test
public void testMetricsOnBigFileCreateRead() throws Exception {
long base = getBaseWebResponses();
assertEquals(0, AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
Path filePath = new Path("/metricsTest_webResponses");
final int FILE_SIZE = 100 * 1024 * 1024;
// Suppress auto-update of bandwidth metrics so we get
// to update them exactly when we want to.
getBandwidthGaugeUpdater().suppressAutoUpdate();
// Create a file
OutputStream outputStream = getFileSystem().create(filePath);
outputStream.write(new byte[FILE_SIZE]);
outputStream.close();
// The exact number of requests/responses that happen to create a file
// can vary - at the time of writing this code it takes 34
// requests/responses for the 100 MB file,
// plus the initial container check request, but that
// can very easily change in the future. Just assert that we do roughly
// more than 20 but less than 50.
logOpResponseCount("Creating a 100 MB file", base);
base = assertWebResponsesInRange(base, 20, 50);
getBandwidthGaugeUpdater().triggerUpdate(true);
long totalBytesWritten = AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
assertTrue(totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2),
"The total bytes written " + totalBytesWritten
+ " is pretty far from the expected range of around " + FILE_SIZE
+ " bytes plus a little overhead.");
long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
LOG.info("Upload rate: " + uploadRate + " bytes/second.");
long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_UPLOAD_LATENCY);
LOG.info("Upload latency: " + uploadLatency);
assertTrue(uploadLatency > 0,
"The upload latency " + uploadLatency
+ " should be greater than zero now that I've just uploaded a file.");
// Read the file
InputStream inputStream = getFileSystem().open(filePath);
int count = 0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
assertEquals(FILE_SIZE, count);
// Again, exact number varies. At the time of writing this code
// it takes 27 request/responses, so just assert a rough range between
// 20 and 40.
logOpResponseCount("Reading a 100 MB file", base);
base = assertWebResponsesInRange(base, 20, 40);
getBandwidthGaugeUpdater().triggerUpdate(false);
long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE, totalBytesRead);
long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
LOG.info("Download rate: " + downloadRate + " bytes/second.");
long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_DOWNLOAD_LATENCY);
LOG.info("Download latency: " + downloadLatency);
assertTrue(downloadLatency > 0,
"The download latency " + downloadLatency
+ " should be greater than zero now that I've just downloaded a file.");
}
@Test
public void testMetricsOnFileRename() throws Exception {
long base = getBaseWebResponses();
Path originalPath = new Path("/metricsTest_RenameStart");
Path destinationPath = new Path("/metricsTest_RenameFinal");
// Create an empty file
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
assertTrue(getFileSystem().createNewFile(originalPath));
logOpResponseCount("Creating an empty file", base);
base = assertWebResponsesInRange(base, 2, 20);
assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
// Rename the file
assertTrue(
((FileSystem) getFileSystem()).rename(originalPath, destinationPath));
// Varies: at the time of writing this code it takes 7 requests/responses.
logOpResponseCount("Renaming a file", base);
base = assertWebResponsesInRange(base, 2, 15);
assertNoErrors();
}
@Test
public void testMetricsOnFileExistsDelete() throws Exception {
long base = getBaseWebResponses();
Path filePath = new Path("/metricsTest_delete");
// Check existence
assertFalse(getFileSystem().exists(filePath));
// At the time of writing this code it takes 2 requests/responses to
// check existence, which seems excessive, plus initial request for
// container check, plus 2 ancestor checks only in the secure case.
logOpResponseCount("Checking file existence for non-existent file", base);
base = assertWebResponsesInRange(base, 1, 5);
// Create an empty file
assertTrue(getFileSystem().createNewFile(filePath));
base = getCurrentWebResponses();
// Check existence again
assertTrue(getFileSystem().exists(filePath));
logOpResponseCount("Checking file existence for existent file", base);
base = assertWebResponsesInRange(base, 1, 4);
// Delete the file
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
assertTrue(getFileSystem().delete(filePath, false));
// At the time of writing this code it takes 4 requests/responses to
// delete, which seems excessive. Check for range 1-4 for now.
logOpResponseCount("Deleting a file", base);
base = assertWebResponsesInRange(base, 1, 4);
assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
assertNoErrors();
}
@Test
public void testMetricsOnDirRename() throws Exception {
long base = getBaseWebResponses();
Path originalDirName = new Path("/metricsTestDirectory_RenameStart");
Path innerFileName = new Path(originalDirName, "innerFile");
Path destDirName = new Path("/metricsTestDirectory_RenameFinal");
// Create an empty directory
assertTrue(getFileSystem().mkdirs(originalDirName));
base = getCurrentWebResponses();
// Create an inner file
assertTrue(getFileSystem().createNewFile(innerFileName));
base = getCurrentWebResponses();
// Rename the directory
assertTrue(getFileSystem().rename(originalDirName, destDirName));
// At the time of writing this code it takes 11 requests/responses
// to rename the directory with one file. Check for range 1-20 for now.
logOpResponseCount("Renaming a directory", base);
base = assertWebResponsesInRange(base, 1, 20);
assertNoErrors();
}
/**
* Recursive discovery of path depth
* @param path path to measure.
* @return depth, where "/" == 0.
*/
int depth(Path path) {
if (path.isRoot()) {
return 0;
} else {
return 1 + depth(path.getParent());
}
}
@Test
public void testClientErrorMetrics() throws Exception {
String fileName = "metricsTestFile_ClientError";
Path filePath = new Path("/"+fileName);
final int FILE_SIZE = 100;
OutputStream outputStream = null;
String leaseID = null;
try {
// Create a file
outputStream = getFileSystem().create(filePath);
leaseID = getTestAccount().acquireShortLease(fileName);
try {
outputStream.write(new byte[FILE_SIZE]);
outputStream.close();
assertTrue(false, "Should've thrown");
} catch (AzureException ex) {
assertTrue(
ex.getMessage().contains("lease"), "Unexpected exception: " + ex);
}
assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
} finally {
if(leaseID != null){
getTestAccount().releaseLease(leaseID, fileName);
}
IOUtils.closeStream(outputStream);
}
}
private void logOpResponseCount(String opName, long base) {
LOG.info("{} took {} web responses to complete.",
opName, getCurrentWebResponses() - base);
}
/**
* Gets (and asserts) the value of the wasb_web_responses counter just
* after the creation of the file system object.
*/
private long getBaseWebResponses() {
// The number of requests should start at 0
return assertWebResponsesEquals(0, 0);
}
/**
* Gets the current value of the wasb_web_responses counter.
*/
private long getCurrentWebResponses() {
return AzureMetricsTestUtil.getCurrentWebResponses(getInstrumentation());
}
/**
* Checks that the wasb_web_responses counter is at the given value.
* @param base The base value (before the operation of interest).
* @param expected The expected value for the operation of interest.
* @return The new base value now.
*/
private long assertWebResponsesEquals(long base, long expected) {
assertCounter(WASB_WEB_RESPONSES, base + expected, getMyMetrics());
return base + expected;
}
private void assertNoErrors() {
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_CLIENT_ERRORS));
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
}
/**
* Checks that the wasb_web_responses counter is in the given range.
* @param base The base value (before the operation of interest).
* @param inclusiveLowerLimit The lower limit for what it should increase by.
* @param inclusiveUpperLimit The upper limit for what it should increase by.
* @return The new base value now.
*/
private long assertWebResponsesInRange(long base,
long inclusiveLowerLimit,
long inclusiveUpperLimit) {
long currentResponses = getCurrentWebResponses();
long justOperation = currentResponses - base;
assertTrue(justOperation >= inclusiveLowerLimit && justOperation <= inclusiveUpperLimit,
String.format("Web responses expected in range [%d, %d], but was %d.",
inclusiveLowerLimit, inclusiveUpperLimit, justOperation));
return currentResponses;
}
/**
* Gets the metrics for the file system object.
* @return The metrics record.
*/
private MetricsRecordBuilder getMyMetrics() {
return getMetrics(getInstrumentation());
}
private AzureFileSystemInstrumentation getInstrumentation() {
return getFileSystem().getInstrumentation();
}
/**
* A matcher
|
ITestAzureFileSystemInstrumentation
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/builtin/jodatime/mapper/LocalDateToXmlGregorianCalendar.java
|
{
"start": 496,
"end": 798
}
|
interface ____ {
LocalDateToXmlGregorianCalendar INSTANCE = Mappers.getMapper( LocalDateToXmlGregorianCalendar.class );
@Mapping( target = "xMLGregorianCalendar", source = "localDate")
XmlGregorianCalendarBean toXmlGregorianCalendarBean( LocalDateBean in );
}
|
LocalDateToXmlGregorianCalendar
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_855/OrderedTarget.java
|
{
"start": 279,
"end": 833
}
|
class ____ {
private List<String> order = new LinkedList<String>();
public void setField0(String field0) {
order.add( "field0" );
}
public void setField1(String field1) {
order.add( "field1" );
}
public void setField2(String field2) {
order.add( "field2" );
}
public void setField3(String field3) {
order.add( "field3" );
}
public void setField4(String field4) {
order.add( "field4" );
}
public List<String> getOrder() {
return order;
}
}
|
OrderedTarget
|
java
|
apache__flink
|
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/stream/compact/CompactBucketWriter.java
|
{
"start": 1254,
"end": 2187
}
|
class ____<T> implements CompactWriter<T> {
private final BucketWriter<T, String> bucketWriter;
private final InProgressFileWriter<T, String> writer;
private CompactBucketWriter(
BucketWriter<T, String> bucketWriter, InProgressFileWriter<T, String> writer) {
this.bucketWriter = bucketWriter;
this.writer = writer;
}
@Override
public void write(T record) throws IOException {
// The currentTime is useless
this.writer.write(record, 0);
}
@Override
public void commit() throws IOException {
bucketWriter.recoverPendingFile(writer.closeForCommit()).commit();
}
public static <T> CompactWriter.Factory<T> factory(
SupplierWithException<BucketWriter<T, String>, IOException> factory) {
return new Factory<>(factory);
}
/** Factory to create {@link CompactBucketWriter}. */
private static
|
CompactBucketWriter
|
java
|
google__error-prone
|
check_api/src/test/java/com/google/errorprone/fixes/AppliedFixTest.java
|
{
"start": 1379,
"end": 2979
}
|
interface ____ breaking changes across JDK versions.
final EndPosTable endPositions =
(EndPosTable)
Proxy.newProxyInstance(
AppliedFixTest.class.getClassLoader(),
new Class<?>[] {EndPosTable.class},
(proxy, method, args) -> {
throw new UnsupportedOperationException();
});
// TODO(b/67738557): consolidate helpers for creating fake trees
JCTree node(int startPos, int endPos) {
return new JCTree() {
@Override
public Tag getTag() {
throw new UnsupportedOperationException();
}
@Override
public void accept(Visitor v) {
throw new UnsupportedOperationException();
}
@Override
public <R, D> R accept(TreeVisitor<R, D> v, D d) {
throw new UnsupportedOperationException();
}
@Override
public Kind getKind() {
throw new UnsupportedOperationException();
}
@Override
public int getStartPosition() {
return startPos;
}
@Override
public int getEndPosition(EndPosTable endPosTable) {
return endPos;
}
};
}
@Test
public void shouldApplySingleFixOnALine() {
JCTree node = node(11, 14);
AppliedFix fix = AppliedFix.apply("import org.me.B;", endPositions, SuggestedFix.delete(node));
assertThat(fix.snippet()).isEqualTo("import org.B;");
}
@Test
public void shouldReportOnlyTheChangedLineInNewSnippet() {
JCTree node = node(25, 26);
AppliedFix fix =
AppliedFix.apply(
"""
public
|
contains
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/fields/SourceTargetMapper.java
|
{
"start": 384,
"end": 680
}
|
interface ____ {
SourceTargetMapper INSTANCE = Mappers.getMapper( SourceTargetMapper.class );
@Mapping(target = "fieldWithMethods", source = "fieldOnlyWithGetter")
Target toTarget(Source source);
@InheritInverseConfiguration
Source toSource(Target target);
}
|
SourceTargetMapper
|
java
|
google__guice
|
extensions/dagger-adapter/test/com/google/inject/daggeradapter/ModuleSubcomponentsTest.java
|
{
"start": 922,
"end": 1031
}
|
class ____ extends TestCase {
@Module(subcomponents = TestSubcomponent.class)
static
|
ModuleSubcomponentsTest
|
java
|
apache__camel
|
components/camel-hazelcast/src/test/java/org/apache/camel/component/hazelcast/HazelcastMapProducerTest.java
|
{
"start": 1978,
"end": 13640
}
|
class ____ extends HazelcastCamelTestSupport implements Serializable {
private static final long serialVersionUID = 1L;
@Mock
private IMap<Object, Object> map;
@Override
protected void trainHazelcastInstance(HazelcastInstance hazelcastInstance) {
when(hazelcastInstance.getMap("foo")).thenReturn(map);
}
@Override
protected void verifyHazelcastInstance(HazelcastInstance hazelcastInstance) {
verify(hazelcastInstance, atLeastOnce()).getMap("foo");
}
@AfterEach
public void verifyMapMock() {
verifyNoMoreInteractions(map);
}
@Test
public void testWithInvalidOperation() {
assertThrows(CamelExecutionException.class,
() -> template.sendBody("direct:putInvalid", "my-foo"));
}
@Test
public void testPut() throws InterruptedException {
template.sendBodyAndHeader("direct:put", "my-foo", HazelcastConstants.OBJECT_ID, "4711");
verify(map).put("4711", "my-foo");
}
@Test
public void testPutWithOperationNumber() throws InterruptedException {
template.sendBodyAndHeader("direct:putWithOperationNumber", "my-foo", HazelcastConstants.OBJECT_ID, "4711");
verify(map).put("4711", "my-foo");
}
@Test
public void testPutWithOperationName() throws InterruptedException {
template.sendBodyAndHeader("direct:putWithOperationName", "my-foo", HazelcastConstants.OBJECT_ID, "4711");
verify(map).put("4711", "my-foo");
}
@Test
public void testPutWithTTL() throws InterruptedException {
Map<String, Object> headers = new HashMap<>();
headers.put(HazelcastConstants.OBJECT_ID, "4711");
headers.put(HazelcastConstants.TTL_VALUE, Long.valueOf(1));
headers.put(HazelcastConstants.TTL_UNIT, TimeUnit.MINUTES);
template.sendBodyAndHeaders("direct:put", "test", headers);
verify(map).put("4711", "test", 1, TimeUnit.MINUTES);
}
@Test
public void testUpdate() {
template.sendBodyAndHeader("direct:update", "my-fooo", HazelcastConstants.OBJECT_ID, "4711");
verify(map).lock("4711");
verify(map).replace("4711", "my-fooo");
verify(map).unlock("4711");
}
@Test
public void testGet() {
when(map.get("4711")).thenReturn("my-foo");
template.sendBodyAndHeader("direct:get", null, HazelcastConstants.OBJECT_ID, "4711");
String body = consumer.receiveBody("seda:out", 5000, String.class);
verify(map).get("4711");
assertEquals("my-foo", body);
}
@Test
public void testGetAllEmptySet() {
Set<Object> l = new HashSet<>();
Map t = new HashMap();
t.put("key1", "value1");
t.put("key2", "value2");
t.put("key3", "value3");
when(map.getAll(anySet())).thenReturn(t);
template.sendBodyAndHeader("direct:getAll", null, HazelcastConstants.OBJECT_ID, l);
String body = consumer.receiveBody("seda:out", 5000, String.class);
verify(map).getAll(l);
assertTrue(body.contains("key1=value1"));
assertTrue(body.contains("key2=value2"));
assertTrue(body.contains("key3=value3"));
}
@Test
public void testGetAllOnlyOneKey() {
Set<Object> l = new HashSet<>();
l.add("key1");
Map t = new HashMap();
t.put("key1", "value1");
when(map.getAll(l)).thenReturn(t);
template.sendBodyAndHeader("direct:getAll", null, HazelcastConstants.OBJECT_ID, l);
String body = consumer.receiveBody("seda:out", 5000, String.class);
verify(map).getAll(l);
assertEquals("{key1=value1}", body);
}
@Test
public void testDelete() {
template.sendBodyAndHeader("direct:delete", null, HazelcastConstants.OBJECT_ID, 4711);
verify(map).remove(4711);
}
@Test
public void testQuery() {
String sql = "bar > 1000";
when(map.values(any(SqlPredicate.class)))
.thenReturn(Arrays.<Object> asList(new Dummy("beta", 2000), new Dummy("gamma", 3000)));
template.sendBodyAndHeader("direct:queue", null, HazelcastConstants.QUERY, sql);
verify(map).values(any(SqlPredicate.class));
Collection<?> b1 = consumer.receiveBody("seda:out", 5000, Collection.class);
assertNotNull(b1);
assertEquals(2, b1.size());
}
@Test
public void testEmptyQuery() {
when(map.values()).thenReturn(
Arrays.<Object> asList(new Dummy("beta", 2000), new Dummy("gamma", 3000), new Dummy("delta", 4000)));
template.sendBody("direct:queue", null);
verify(map).values();
Collection<?> b1 = consumer.receiveBody("seda:out", 5000, Collection.class);
assertNotNull(b1);
assertEquals(3, b1.size());
}
@Test
public void testUpdateOldValue() throws InterruptedException {
Map<String, Object> headers = new HashMap<>();
headers.put(HazelcastConstants.OBJECT_ID, "4711");
headers.put(HazelcastConstants.OBJECT_VALUE, "my-foo");
template.sendBodyAndHeaders("direct:update", "replaced", headers);
verify(map).lock("4711");
verify(map).replace("4711", "my-foo", "replaced");
verify(map).unlock("4711");
}
@Test
public void testPutIfAbsent() throws InterruptedException {
Map<String, Object> headers = new HashMap<>();
headers.put(HazelcastConstants.OBJECT_ID, "4711");
template.sendBodyAndHeaders("direct:putIfAbsent", "replaced", headers);
verify(map).putIfAbsent("4711", "replaced");
}
@Test
public void testPutIfAbsentWithTtl() throws InterruptedException {
Map<String, Object> headers = new HashMap<>();
headers.put(HazelcastConstants.OBJECT_ID, "4711");
headers.put(HazelcastConstants.TTL_VALUE, Long.valueOf(1));
headers.put(HazelcastConstants.TTL_UNIT, TimeUnit.MINUTES);
template.sendBodyAndHeaders("direct:putIfAbsent", "replaced", headers);
verify(map).putIfAbsent("4711", "replaced", Long.valueOf(1), TimeUnit.MINUTES);
}
@Test
public void testEvict() throws InterruptedException {
Map<String, Object> headers = new HashMap<>();
headers.put(HazelcastConstants.OBJECT_ID, "4711");
template.sendBodyAndHeaders("direct:evict", "", headers);
verify(map).evict("4711");
}
@Test
public void testEvictAll() throws InterruptedException {
Map<String, Object> headers = new HashMap<>();
template.sendBodyAndHeaders("direct:evictAll", "", headers);
verify(map).evictAll();
}
@Test
public void testClear() throws InterruptedException {
template.sendBody("direct:clear", "test");
verify(map).clear();
}
@Test
public void testContainsKey() {
when(map.containsKey("testOk")).thenReturn(true);
when(map.containsKey("testKo")).thenReturn(false);
template.sendBodyAndHeader("direct:containsKey", null, HazelcastConstants.OBJECT_ID, "testOk");
Boolean body = consumer.receiveBody("seda:out", 5000, Boolean.class);
verify(map).containsKey("testOk");
assertEquals(true, body);
template.sendBodyAndHeader("direct:containsKey", null, HazelcastConstants.OBJECT_ID, "testKo");
body = consumer.receiveBody("seda:out", 5000, Boolean.class);
verify(map).containsKey("testKo");
assertEquals(false, body);
}
@Test
public void testContainsValue() {
when(map.containsValue("testOk")).thenReturn(true);
when(map.containsValue("testKo")).thenReturn(false);
template.sendBody("direct:containsValue", "testOk");
Boolean body = consumer.receiveBody("seda:out", 5000, Boolean.class);
verify(map).containsValue("testOk");
assertEquals(true, body);
template.sendBody("direct:containsValue", "testKo");
body = consumer.receiveBody("seda:out", 5000, Boolean.class);
verify(map).containsValue("testKo");
assertEquals(false, body);
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:putInvalid").setHeader(HazelcastConstants.OPERATION, constant("bogus"))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX));
from("direct:put").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.PUT))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX));
from("direct:putIfAbsent").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.PUT_IF_ABSENT))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX));
from("direct:update").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.UPDATE))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX));
from("direct:get").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.GET))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX))
.to("seda:out");
from("direct:getAll").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.GET_ALL))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX))
.to("seda:out");
from("direct:delete").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.DELETE))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX));
from("direct:queue").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.QUERY))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX))
.to("seda:out");
from("direct:clear").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.CLEAR))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX));
from("direct:evict").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.EVICT))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX))
.to("seda:out");
from("direct:evictAll").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.EVICT_ALL))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX))
.to("seda:out");
from("direct:containsKey").setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.CONTAINS_KEY))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX))
.to("seda:out");
from("direct:containsValue")
.setHeader(HazelcastConstants.OPERATION, constant(HazelcastOperation.CONTAINS_VALUE))
.to(String.format("hazelcast-%sfoo", HazelcastConstants.MAP_PREFIX))
.to("seda:out");
from("direct:putWithOperationNumber").toF("hazelcast-%sfoo?operation=%s", HazelcastConstants.MAP_PREFIX,
HazelcastOperation.PUT);
from("direct:putWithOperationName").toF("hazelcast-%sfoo?operation=PUT", HazelcastConstants.MAP_PREFIX);
}
};
}
}
|
HazelcastMapProducerTest
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
|
{
"start": 9550,
"end": 11512
}
|
class ____ implements ChannelFutureListener {
@Metric("Shuffle output in bytes")
MutableCounterLong shuffleOutputBytes;
@Metric("# of failed shuffle outputs")
MutableCounterInt shuffleOutputsFailed;
@Metric("# of succeeeded shuffle outputs")
MutableCounterInt shuffleOutputsOK;
@Metric("# of current shuffle connections")
MutableGaugeInt shuffleConnections;
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
shuffleOutputsOK.incr();
} else {
shuffleOutputsFailed.incr();
}
shuffleConnections.decr();
}
}
@SuppressWarnings("checkstyle:VisibilityModifier")
protected final MetricsSystem ms;
@SuppressWarnings("checkstyle:VisibilityModifier")
final ShuffleMetrics metrics;
ShuffleHandler(MetricsSystem ms) {
super(MAPREDUCE_SHUFFLE_SERVICEID);
this.ms = ms;
metrics = ms.register(new ShuffleMetrics());
}
public ShuffleHandler() {
this(DefaultMetricsSystem.instance());
}
/**
* Serialize the shuffle port into a ByteBuffer for use later on.
* @param port the port to be sent to the ApplciationMaster
* @return the serialized form of the port.
* @throws IOException on failure
*/
public static ByteBuffer serializeMetaData(int port) throws IOException {
//TODO these bytes should be versioned
DataOutputBuffer portDob = new DataOutputBuffer();
portDob.writeInt(port);
return ByteBuffer.wrap(portDob.getData(), 0, portDob.getLength());
}
/**
* A helper function to deserialize the metadata returned by ShuffleHandler.
* @param meta the metadata returned by the ShuffleHandler
* @return the port the Shuffle Handler is listening on to serve shuffle data.
* @throws IOException on failure
*/
public static int deserializeMetaData(ByteBuffer meta) throws IOException {
//TODO this should be returning a
|
ShuffleMetrics
|
java
|
google__dagger
|
dagger-producers/main/java/dagger/producers/internal/AnnotationUsages.java
|
{
"start": 1093,
"end": 1158
}
|
class ____ {}
private AnnotationUsages() {}
}
|
ProductionScopeUsage
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/math/MathBenchmarking.java
|
{
"start": 1096,
"end": 4328
}
|
class ____ {
static final int ARRAY_SIZE = 0x10000;
static final int ARRAY_MASK = 0x0ffff;
static final Random RANDOM_SOURCE = new Random(314159265358979L);
static final int MAX_EXPONENT = 100;
/*
* Duplicated from LongMath.
* binomial(biggestBinomials[k], k) fits in a long, but not
* binomial(biggestBinomials[k] + 1, k).
*/
static final int[] biggestBinomials = {
Integer.MAX_VALUE,
Integer.MAX_VALUE,
Integer.MAX_VALUE,
3810779,
121977,
16175,
4337,
1733,
887,
534,
361,
265,
206,
169,
143,
125,
111,
101,
94,
88,
83,
79,
76,
74,
72,
70,
69,
68,
67,
67,
66,
66,
66,
66
};
/**
* Generates values in a distribution equivalent to randomNonNegativeBigInteger but omitting zero.
*/
static BigInteger randomPositiveBigInteger(int numBits) {
BigInteger result;
do {
result = randomNonNegativeBigInteger(numBits);
} while (result.signum() == 0);
return result;
}
/**
* Generates a number in [0, 2^numBits) with an exponential distribution. The floor of the log2 of
* the result is chosen uniformly at random in [0, numBits), and then the result is chosen in that
* range uniformly at random. Zero is treated as having log2 == 0.
*/
static BigInteger randomNonNegativeBigInteger(int numBits) {
int digits = RANDOM_SOURCE.nextInt(numBits);
if (digits == 0) {
return new BigInteger(1, RANDOM_SOURCE);
} else {
return new BigInteger(digits, RANDOM_SOURCE).setBit(digits);
}
}
/**
* Equivalent to calling randomPositiveBigInteger(numBits) and then flipping the sign with 50%
* probability.
*/
static BigInteger randomNonZeroBigInteger(int numBits) {
BigInteger result = randomPositiveBigInteger(numBits);
return RANDOM_SOURCE.nextBoolean() ? result : result.negate();
}
/**
* Chooses a number in (-2^numBits, 2^numBits) at random, with density concentrated in numbers of
* lower magnitude.
*/
static BigInteger randomBigInteger(int numBits) {
while (true) {
if (RANDOM_SOURCE.nextBoolean()) {
return randomNonNegativeBigInteger(numBits);
}
BigInteger neg = randomNonNegativeBigInteger(numBits).negate();
if (neg.signum() != 0) {
return neg;
}
}
}
/**
* Generates a number in [0, 2^numBits) with an exponential distribution. The floor of the log2 of
* the absolute value of the result is chosen uniformly at random in [0, numBits), and then the
* result is chosen from those possibilities uniformly at random.
*
* <p>Zero is treated as having log2 == 0.
*/
static double randomDouble(int maxExponent) {
double result = RANDOM_SOURCE.nextDouble();
result = Math.scalb(result, RANDOM_SOURCE.nextInt(maxExponent + 1));
return RANDOM_SOURCE.nextBoolean() ? result : -result;
}
/** Returns a random integer between zero and {@code MAX_EXPONENT}. */
static int randomExponent() {
return RANDOM_SOURCE.nextInt(MAX_EXPONENT + 1);
}
static double randomPositiveDouble() {
return Math.exp(randomDouble(6));
}
private MathBenchmarking() {}
}
|
MathBenchmarking
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatistics.java
|
{
"start": 1127,
"end": 3368
}
|
interface ____ extends IOStatisticsSource {
/**
* Seek backwards, incrementing the seek and backward seek counters.
*
* @param negativeOffset how far was the seek?
* This is expected to be negative.
*/
void seekBackwards(long negativeOffset);
/**
* Record a forward seek, adding a seek operation, a forward
* seek operation, and any bytes skipped.
*
* @param skipped number of bytes skipped by reading from the stream.
* If the seek was implemented by a close + reopen, set this to zero.
*/
void seekForwards(long skipped);
/**
* Record a forward or backward seek, adding a seek operation, a forward or
* a backward seek operation, and number of bytes skipped.
*
* @param seekTo seek to the position.
* @param currentPos current position.
*/
void seek(long seekTo, long currentPos);
/**
* Increment the bytes read counter by the number of bytes;
* no-op if the argument is negative.
*
* @param bytes number of bytes read.
*/
void bytesRead(long bytes);
/**
* Record the total bytes read from buffer.
*
* @param bytes number of bytes that are read from buffer.
*/
void bytesReadFromBuffer(long bytes);
/**
* Records the total number of seeks done in the buffer.
*/
void seekInBuffer();
/**
* A {@code read(byte[] buf, int off, int len)} operation has started.
*/
void readOperationStarted();
/**
* Records a successful remote read operation.
*/
void remoteReadOperation();
/**
* Records the bytes read from readAhead buffer.
* @param bytes the bytes to be incremented.
*/
void readAheadBytesRead(long bytes);
/**
* Records bytes read remotely after nothing from readAheadBuffer was read.
* @param bytes the bytes to be incremented.
*/
void remoteBytesRead(long bytes);
/**
* Get the IOStatisticsStore instance from AbfsInputStreamStatistics.
* @return instance of IOStatisticsStore which extends IOStatistics.
*/
IOStatistics getIOStatistics();
/**
* Makes the string of all the AbfsInputStream statistics.
* @return the string with all the statistics.
*/
@Override
String toString();
}
|
AbfsInputStreamStatistics
|
java
|
alibaba__nacos
|
naming/src/main/java/com/alibaba/nacos/naming/push/v2/executor/SpiImplPushExecutorHolder.java
|
{
"start": 919,
"end": 1978
}
|
class ____ {
private static final SpiImplPushExecutorHolder INSTANCE = new SpiImplPushExecutorHolder();
private final Set<SpiPushExecutor> pushExecutors;
private SpiImplPushExecutorHolder() {
pushExecutors = new HashSet<>(NacosServiceLoader.load(SpiPushExecutor.class));
}
public static SpiImplPushExecutorHolder getInstance() {
return INSTANCE;
}
/**
* Try to find an {@link PushExecutor} implement by SPI which interest to execute this push.
*
* @param clientId client id
* @param subscriber subscriber infor
* @return {@link PushExecutor} which interest to execute this push, otherwise {@code Optional.empty()}
*/
public Optional<SpiPushExecutor> findPushExecutorSpiImpl(String clientId, Subscriber subscriber) {
for (SpiPushExecutor each : pushExecutors) {
if (each.isInterest(clientId, subscriber)) {
return Optional.of(each);
}
}
return Optional.empty();
}
}
|
SpiImplPushExecutorHolder
|
java
|
apache__camel
|
core/camel-support/src/main/java/org/apache/camel/support/CustomizersSupport.java
|
{
"start": 2415,
"end": 2461
}
|
class ____ policies
*/
private static
|
for
|
java
|
bumptech__glide
|
integration/okhttp/src/main/java/com/bumptech/glide/integration/okhttp/OkHttpStreamFetcher.java
|
{
"start": 842,
"end": 3454
}
|
class ____ implements DataFetcher<InputStream> {
private static final String TAG = "OkHttpFetcher";
private final OkHttpClient client;
private final GlideUrl url;
@SuppressWarnings("WeakerAccess")
@Synthetic
InputStream stream;
@SuppressWarnings("WeakerAccess")
@Synthetic
ResponseBody responseBody;
// Public API.
@SuppressWarnings("WeakerAccess")
public OkHttpStreamFetcher(OkHttpClient client, GlideUrl url) {
this.client = client;
this.url = url;
}
@Override
public void loadData(
@NonNull Priority priority, @NonNull final DataCallback<? super InputStream> callback) {
Request.Builder requestBuilder = new Request.Builder().url(url.toStringUrl());
for (Map.Entry<String, String> headerEntry : url.getHeaders().entrySet()) {
String key = headerEntry.getKey();
requestBuilder.addHeader(key, headerEntry.getValue());
}
Request request = requestBuilder.build();
client
.newCall(request)
.enqueue(
new com.squareup.okhttp.Callback() {
@Override
public void onFailure(Request request, IOException e) {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "OkHttp failed to obtain result", e);
}
callback.onLoadFailed(e);
}
@Override
public void onResponse(Response response) throws IOException {
responseBody = response.body();
if (response.isSuccessful()) {
long contentLength = responseBody.contentLength();
stream =
ContentLengthInputStream.obtain(responseBody.byteStream(), contentLength);
callback.onDataReady(stream);
} else {
callback.onLoadFailed(new HttpException(response.message(), response.code()));
}
}
});
}
@Override
public void cleanup() {
try {
if (stream != null) {
stream.close();
}
} catch (IOException e) {
// Ignored
}
if (responseBody != null) {
try {
responseBody.close();
} catch (IOException e) {
// Ignored.
}
}
}
@Override
public void cancel() {
// TODO: call cancel on the client when this method is called on a background thread. See #257
}
@NonNull
@Override
public Class<InputStream> getDataClass() {
return InputStream.class;
}
@NonNull
@Override
public DataSource getDataSource() {
return DataSource.REMOTE;
}
}
|
OkHttpStreamFetcher
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/writeAsArray/WriteAsArray_jsonType.java
|
{
"start": 907,
"end": 1209
}
|
class ____ {
private VO vo;
public VO getVo() {
return vo;
}
public void setVo(VO vo) {
this.vo = vo;
}
}
@JSONType(serialzeFeatures=SerializerFeature.BeanToArray, parseFeatures=Feature.SupportArrayToBean)
public static
|
Parent
|
java
|
micronaut-projects__micronaut-core
|
inject-groovy/src/test/groovy/io/micronaut/inject/annotation/TopLevel.java
|
{
"start": 975,
"end": 1021
}
|
interface ____ {
Nested nested();
}
|
TopLevel
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/BindableRuntimeHintsRegistrarTests.java
|
{
"start": 17794,
"end": 17928
}
|
class ____ {
private final Nested nested = new Nested();
public Nested getNested() {
return this.nested;
}
static
|
SampleType
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/crossproject/CrossProjectModeDecider.java
|
{
"start": 632,
"end": 1863
}
|
class ____ determine whether Cross-Project Search (CPS) applies to an inbound request.
* <p>
* CPS applicability is controlled at three levels:
* <ul>
* <li><b>Cluster level:</b> The {@code serverless.cross_project.enabled} setting determines
* whether CPS processing is available at all. In the future, all Serverless projects
* will support CPS, so this distinction will depend on whether the cluster is a
* Serverless cluster or not.</li>
* <li><b>API level:</b> The {@link org.elasticsearch.action.IndicesRequest.Replaceable#allowsCrossProject()}
* method determines whether a particular request type supports CPS processing.</li>
* <li><b>Request level:</b> An {@link org.elasticsearch.action.support.IndicesOptions} flag
* determines whether CPS should apply to the current
* request being processed. This fine-grained control is required because APIs that
* support CPS may also be used in contexts where CPS should not apply—for example,
* internal searches against the security system index to retrieve user roles, or CPS
* actions that execute in a flow where a parent action has already performed CPS
* processing.</li>
* </ul>
*/
public
|
to
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperatorTests.java
|
{
"start": 1878,
"end": 7340
}
|
class ____ extends SourceOperatorTestCase {
private final List<Directory> directories = new ArrayList<>();
private final List<IndexReader> readers = new ArrayList<>();
@Before
public void setUpDirectory() throws Exception {}
@After
public void closeIndex() throws IOException {
IOUtils.close(readers);
IOUtils.close(directories);
readers.clear();
directories.clear();
}
@Override
protected TimeSeriesSourceOperator.Factory simple(SimpleOptions options) {
if (readers.isEmpty()) {
var dir = newDirectory();
directories.add(dir);
readers.add(loadIndex(dir, between(1, 100)));
}
try {
return simple(List.of(new LuceneSliceQueue.QueryAndTags(new MatchAllDocsQuery(), List.of())));
} catch (Exception e) {
throw new AssertionError(e);
}
}
TimeSeriesSourceOperator.Factory simple(List<LuceneSliceQueue.QueryAndTags> queryAndTags) throws Exception {
List<LuceneSourceOperatorTests.MockShardContext> contexts = new ArrayList<>();
for (int i = 0; i < readers.size(); i++) {
contexts.add(new LuceneSourceOperatorTests.MockShardContext(readers.get(i), i));
}
return new TimeSeriesSourceOperator.Factory(
new IndexedByShardIdFromList<>(contexts),
c -> queryAndTags,
randomIntBetween(1, 4),
between(10, 100),
between(1, 100)
);
}
static IndexReader loadIndex(Directory directory, int numDocs) {
IndexWriterConfig config = newIndexWriterConfig();
try {
try (var writer = new IndexWriter(directory, config)) {
for (int d = 0; d < numDocs; d++) {
List<IndexableField> doc = new ArrayList<>();
doc.add(new SortedNumericDocValuesField("f", d));
writer.addDocument(doc);
if (random().nextInt(100) < 20) {
writer.commit();
}
}
writer.commit();
}
return DirectoryReader.open(directory);
} catch (IOException e) {
throw new AssertionError(e);
}
}
@Override
protected Matcher<String> expectedToStringOfSimple() {
return matchesRegex("TimeSeriesSourceOperator\\[shards = \\[test], maxPageSize = \\d+, remainingDocs = \\d+]");
}
@Override
protected Matcher<String> expectedDescriptionOfSimple() {
return matchesRegex("TimeSeriesSourceOperator\\[maxPageSize = \\d+, limit = \\d+]");
}
public void testSliceIndex() throws Exception {
int numShards = between(1, 10);
for (int i = 0; i < numShards; i++) {
var dir = newDirectory();
directories.add(dir);
readers.add(loadIndex(dir, between(10, 100)));
}
var factory = simple(
List.of(
new LuceneSliceQueue.QueryAndTags(SortedNumericDocValuesField.newSlowExactQuery("f", 3), List.of("t1")),
new LuceneSliceQueue.QueryAndTags(SortedNumericDocValuesField.newSlowExactQuery("f", 5), List.of("t2")),
new LuceneSliceQueue.QueryAndTags(SortedNumericDocValuesField.newSlowExactQuery("f", 7), List.of("t3"))
)
);
DriverContext driverContext = driverContext();
SourceOperator sourceOperator = factory.get(driverContext);
AtomicInteger lastSliceIndex = new AtomicInteger(-1);
SinkOperator sinkOperator = new PageConsumerOperator(p -> {
try {
// _doc, _slice_index, future_max_timestamp, tag
assertThat(p.getBlockCount(), equalTo(4));
assertThat(p.getBlock(0), instanceOf(DocBlock.class));
IntBlock sliceIndexBlock = p.getBlock(1);
IntVector sliceIndexVector = sliceIndexBlock.asVector();
assertNotNull(sliceIndexVector);
assertTrue(sliceIndexVector.isConstant());
int sliceIndex = sliceIndexVector.getInt(0);
assertThat(sliceIndex, greaterThanOrEqualTo(0));
// slice index is monotonic
assertThat(sliceIndex, either(equalTo(lastSliceIndex.get())).or(equalTo(lastSliceIndex.get() + 1)));
lastSliceIndex.set(sliceIndex);
LongBlock futureTimestampBlock = p.getBlock(2);
var longVector = futureTimestampBlock.asVector();
assertNotNull(longVector);
assertTrue(longVector.isConstant());
assertThat(longVector.getLong(0), equalTo(Long.MAX_VALUE));
BytesRefBlock tagBlock = p.getBlock(3);
var tagVector = tagBlock.asVector();
assertNotNull(tagVector);
} finally {
p.releaseBlocks();
}
});
Driver driver = new Driver(
"driver",
"test",
"cluster",
"node",
0,
0,
driverContext,
() -> "test",
sourceOperator,
List.of(),
sinkOperator,
TimeValue.timeValueNanos(1),
() -> {}
);
OperatorTestCase.runDriver(driver);
// assertThat(lastSliceIndex.get(), equalTo(numShards * 3 - 1));
}
}
|
TimeSeriesSourceOperatorTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/MultiTypeEsField.java
|
{
"start": 1434,
"end": 4899
}
|
class ____ extends EsField {
private final Map<String, Expression> indexToConversionExpressions;
public MultiTypeEsField(
String name,
DataType dataType,
boolean aggregatable,
Map<String, Expression> indexToConversionExpressions,
TimeSeriesFieldType timeSeriesFieldType
) {
super(name, dataType, Map.of(), aggregatable, timeSeriesFieldType);
this.indexToConversionExpressions = indexToConversionExpressions;
}
protected MultiTypeEsField(StreamInput in) throws IOException {
this(
((PlanStreamInput) in).readCachedString(),
DataType.readFrom(in),
in.readBoolean(),
in.readImmutableMap(i -> i.readNamedWriteable(Expression.class)),
readTimeSeriesFieldType(in)
);
}
@Override
public void writeContent(StreamOutput out) throws IOException {
((PlanStreamOutput) out).writeCachedString(getName());
getDataType().writeTo(out);
out.writeBoolean(isAggregatable());
out.writeMap(getIndexToConversionExpressions(), (o, v) -> out.writeNamedWriteable(v));
writeTimeSeriesFieldType(out);
}
public String getWriteableName() {
return "MultiTypeEsField";
}
public Map<String, Expression> getIndexToConversionExpressions() {
return indexToConversionExpressions;
}
public Expression getConversionExpressionForIndex(String indexName) {
return indexToConversionExpressions.get(indexName);
}
public static MultiTypeEsField resolveFrom(
InvalidMappedField invalidMappedField,
Map<String, Expression> typesToConversionExpressions
) {
Map<String, Set<String>> typesToIndices = invalidMappedField.getTypesToIndices();
DataType resolvedDataType = DataType.UNSUPPORTED;
Map<String, Expression> indexToConversionExpressions = new HashMap<>();
for (String typeName : typesToIndices.keySet()) {
Set<String> indices = typesToIndices.get(typeName);
Expression convertExpr = typesToConversionExpressions.get(typeName);
if (resolvedDataType == DataType.UNSUPPORTED) {
resolvedDataType = convertExpr.dataType();
} else if (resolvedDataType != convertExpr.dataType()) {
throw new IllegalArgumentException("Resolved data type mismatch: " + resolvedDataType + " != " + convertExpr.dataType());
}
for (String indexName : indices) {
indexToConversionExpressions.put(indexName, convertExpr);
}
}
return new MultiTypeEsField(
invalidMappedField.getName(),
resolvedDataType,
false,
indexToConversionExpressions,
invalidMappedField.getTimeSeriesFieldType()
);
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj) == false) {
return false;
}
if (obj instanceof MultiTypeEsField other) {
return super.equals(other) && indexToConversionExpressions.equals(other.indexToConversionExpressions);
}
return false;
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), indexToConversionExpressions);
}
@Override
public String toString() {
return super.toString() + " (" + indexToConversionExpressions + ")";
}
}
|
MultiTypeEsField
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/HelpTest.java
|
{
"start": 900,
"end": 1300
}
|
class ____ extends TestCase {
public void test_help_0() throws Exception {
String sql = "HELP 'contents'";
MySqlStatementParser parser = new MySqlStatementParser(sql);
SQLStatement stmt = parser.parseStatementList().get(0);
parser.match(Token.EOF);
String output = SQLUtils.toMySqlString(stmt);
assertEquals("HELP 'contents'", output);
}
}
|
HelpTest
|
java
|
apache__avro
|
lang/java/avro/src/main/java/org/apache/avro/io/Encoder.java
|
{
"start": 1024,
"end": 1836
}
|
class ____ two types of methods. One type of methods support the writing
* of leaf values (for example, {@link #writeLong} and {@link #writeString}).
* These methods have analogs in {@link Decoder}.
* <p/>
* The other type of methods support the writing of maps and arrays. These
* methods are {@link #writeArrayStart}, {@link #startItem}, and
* {@link #writeArrayEnd} (and similar methods for maps). Some implementations
* of {@link Encoder} handle the buffering required to break large maps and
* arrays into blocks, which is necessary for applications that want to do
* streaming. (See {@link #writeArrayStart} for details on these methods.)
* <p/>
* {@link EncoderFactory} contains Encoder construction and configuration
* facilities.
*
* @see EncoderFactory
* @see Decoder
*/
public abstract
|
has
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/views/ViewSerialization2Test.java
|
{
"start": 548,
"end": 1936
}
|
class ____
{
String nameNull = null;
String nameComplex = "complexValue";
String nameComplexHidden = "nameComplexHiddenValue";
SimpleTestData testData = new SimpleTestData( );
SimpleTestData[] testDataArray = new SimpleTestData[] { new SimpleTestData( ), null };
@JsonView( Views.View.class )
public String getNameNull()
{
return nameNull;
}
public void setNameNull( String nameNull )
{
this.nameNull = nameNull;
}
@JsonView( Views.View.class )
public String getNameComplex()
{
return nameComplex;
}
public void setNameComplex( String nameComplex )
{
this.nameComplex = nameComplex;
}
public String getNameComplexHidden()
{
return nameComplexHidden;
}
public void setNameComplexHidden( String nameComplexHidden )
{
this.nameComplexHidden = nameComplexHidden;
}
@JsonView( Views.View.class )
public SimpleTestData getTestData()
{
return testData;
}
public void setTestData( SimpleTestData testData )
{
this.testData = testData;
}
@JsonView( Views.View.class )
public SimpleTestData[] getTestDataArray()
{
return testDataArray;
}
public void setTestDataArray( SimpleTestData[] testDataArray )
{
this.testDataArray = testDataArray;
}
}
static
|
ComplexTestData
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterList.java
|
{
"start": 1475,
"end": 1717
}
|
class ____ extends TimelineFilter {
/**
* Specifies how filters in the filter list will be evaluated. AND means all
* the filters should match and OR means atleast one should match.
*/
@Private
@Unstable
public
|
TimelineFilterList
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptive/Failing.java
|
{
"start": 1499,
"end": 3174
}
|
class ____ extends StateWithExecutionGraph {
private final Context context;
Failing(
Context context,
ExecutionGraph executionGraph,
ExecutionGraphHandler executionGraphHandler,
OperatorCoordinatorHandler operatorCoordinatorHandler,
Logger logger,
Throwable failureCause,
ClassLoader userCodeClassLoader,
List<ExceptionHistoryEntry> failureCollection) {
super(
context,
executionGraph,
executionGraphHandler,
operatorCoordinatorHandler,
logger,
userCodeClassLoader,
failureCollection);
this.context = context;
getExecutionGraph().failJob(failureCause, System.currentTimeMillis());
}
@Override
public JobStatus getJobStatus() {
return JobStatus.FAILING;
}
@Override
public void cancel() {
context.goToCanceling(
getExecutionGraph(),
getExecutionGraphHandler(),
getOperatorCoordinatorHandler(),
getFailures());
}
@Override
void onFailure(Throwable failure, CompletableFuture<Map<String, String>> failureLabels) {
// We've already failed the execution graph, so there is noting else we can do.
}
@Override
void onGloballyTerminalState(JobStatus globallyTerminalState) {
Preconditions.checkState(globallyTerminalState == JobStatus.FAILED);
context.goToFinished(ArchivedExecutionGraph.createFrom(getExecutionGraph()));
}
/** Context of the {@link Failing} state. */
|
Failing
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/testng/FailingBeforeAndAfterMethodsTestNGTests.java
|
{
"start": 4076,
"end": 4298
}
|
class ____ implements TestExecutionListener {
@Override
public void beforeTestClass(TestContext testContext) {
fail("always failing beforeTestClass()");
}
}
static
|
AlwaysFailingBeforeTestClassTestExecutionListener
|
java
|
netty__netty
|
codec-stomp/src/main/java/io/netty/handler/codec/stomp/StompCommand.java
|
{
"start": 707,
"end": 928
}
|
enum ____ {
STOMP,
CONNECT,
CONNECTED,
SEND,
SUBSCRIBE,
UNSUBSCRIBE,
ACK,
NACK,
BEGIN,
ABORT,
COMMIT,
DISCONNECT,
MESSAGE,
RECEIPT,
ERROR,
UNKNOWN
}
|
StompCommand
|
java
|
apache__logging-log4j2
|
log4j-core-test/src/test/java/org/apache/logging/log4j/core/HostNameTest.java
|
{
"start": 1564,
"end": 3062
}
|
class ____ {
private final ListAppender host;
private final RollingFileAppender hostFile;
public HostNameTest(
@Named("HostTest") final ListAppender list, @Named("HostFile") final RollingFileAppender rolling) {
host = list.clear();
hostFile = rolling;
}
@Test
void testHostname(final LoggerContext context) {
final org.apache.logging.log4j.Logger testLogger = context.getLogger("org.apache.logging.log4j.hosttest");
testLogger.debug("Hello, {}", "World");
final List<String> msgs = host.getMessages();
assertThat(msgs, hasSize(1));
String expected = NetUtils.getLocalHostname() + Strings.LINE_SEPARATOR;
assertThat(msgs.get(0), endsWith(expected));
assertNotNull(hostFile.getFileName(), "No Host FileAppender file name");
expected = "target/" + NetUtils.getLocalHostname() + ".log";
String name = hostFile.getFileName();
assertEquals(
name,
expected,
"Incorrect HostFile FileAppender file name - expected " + expected + " actual - " + name);
name = hostFile.getFilePattern();
assertNotNull(name, "No file pattern");
expected = "target/" + NetUtils.getLocalHostname() + "-%d{MM-dd-yyyy}-%i.log";
assertEquals(
name,
expected,
"Incorrect HostFile FileAppender file pattern - expected " + expected + " actual - " + name);
}
}
|
HostNameTest
|
java
|
spring-projects__spring-boot
|
module/spring-boot-webmvc/src/main/java/org/springframework/boot/webmvc/autoconfigure/DispatcherServletAutoConfiguration.java
|
{
"start": 6031,
"end": 7446
}
|
class ____ extends SpringBootCondition {
@Override
public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) {
ConditionMessage.Builder message = ConditionMessage.forCondition("Default DispatcherServlet");
ConfigurableListableBeanFactory beanFactory = context.getBeanFactory();
Assert.state(beanFactory != null, "'beanFactory' must not be null");
List<String> dispatchServletBeans = Arrays
.asList(beanFactory.getBeanNamesForType(DispatcherServlet.class, false, false));
if (dispatchServletBeans.contains(DEFAULT_DISPATCHER_SERVLET_BEAN_NAME)) {
return ConditionOutcome
.noMatch(message.found("dispatcher servlet bean").items(DEFAULT_DISPATCHER_SERVLET_BEAN_NAME));
}
if (beanFactory.containsBean(DEFAULT_DISPATCHER_SERVLET_BEAN_NAME)) {
return ConditionOutcome
.noMatch(message.found("non dispatcher servlet bean").items(DEFAULT_DISPATCHER_SERVLET_BEAN_NAME));
}
if (dispatchServletBeans.isEmpty()) {
return ConditionOutcome.match(message.didNotFind("dispatcher servlet beans").atAll());
}
return ConditionOutcome.match(message.found("dispatcher servlet bean", "dispatcher servlet beans")
.items(Style.QUOTE, dispatchServletBeans)
.append("and none is named " + DEFAULT_DISPATCHER_SERVLET_BEAN_NAME));
}
}
@Order(Ordered.LOWEST_PRECEDENCE - 10)
private static final
|
DefaultDispatcherServletCondition
|
java
|
hibernate__hibernate-orm
|
local-build-plugins/src/main/java/org/hibernate/orm/post/DialectReportTask.java
|
{
"start": 7270,
"end": 8746
}
|
class ____ {
private final Class<?> dialectImplClass;
private final DialectClassDelegate dialectClassDelegate;
private final Object dialectRef;
public static DialectDelegate from(Class<?> dialectImplClass, DialectClassDelegate dialectClassDelegate) {
return new DialectDelegate( dialectImplClass, dialectClassDelegate );
}
public DialectDelegate(Class<?> dialectImplClass, DialectClassDelegate dialectClassDelegate) {
this.dialectImplClass = dialectImplClass;
this.dialectClassDelegate = dialectClassDelegate;
try {
this.dialectRef = dialectImplClass.getConstructor().newInstance();
}
catch (Exception e) {
throw new RuntimeException( "Unable to create DialectDelegate for " + dialectImplClass.getName(), e );
}
}
public String getSimpleName() {
return dialectImplClass.getSimpleName();
}
public DialectClassDelegate getDialectClassDelegate() {
return dialectClassDelegate;
}
public Class<?> getDialectImplClass() {
return dialectImplClass;
}
public Object getDialectReference() {
return dialectRef;
}
public String getMinimumVersion() {
try {
final Object versionRef = dialectClassDelegate.getVersionMethod().invoke( dialectRef );
return versionRef.toString();
}
catch (Exception e) {
throw new RuntimeException( "Unable to access " + DialectClassDelegate.MIN_VERSION_METHOD_NAME + " for " + dialectClassDelegate.loadedDialectClass.getName(), e );
}
}
}
}
|
DialectDelegate
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/exceptionhandling/ConstraintViolationExceptionHandlingTest.java
|
{
"start": 3447,
"end": 3602
}
|
class ____ {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
private long id;
@Column(unique = true)
private String uniqueString;
}
}
|
AInfo
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/assumptions/BDDAssumptionsTest.java
|
{
"start": 7133,
"end": 7511
}
|
class ____ {
private final Integer actual = 1;
@Test
void should_run_test_when_assumption_passes() {
thenCode(() -> given(actual).isOne()).doesNotThrowAnyException();
}
@Test
void should_ignore_test_when_assumption_fails() {
expectAssumptionNotMetException(() -> given(actual).isZero());
}
}
@Nested
|
BDDAssumptions_given_Integer_Test
|
java
|
google__guice
|
core/src/com/google/inject/multibindings/ProvidesIntoOptional.java
|
{
"start": 1750,
"end": 2002
}
|
enum ____ {
/** Corresponds to {@link OptionalBinder#setBinding}. */
ACTUAL,
/** Corresponds to {@link OptionalBinder#setDefault}. */
DEFAULT
}
/** Specifies if the binding is for the actual or default value. */
Type value();
}
|
Type
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
|
{
"start": 2687,
"end": 29956
}
|
class ____ {
{
SnapshotTestHelper.disableLogs();
GenericTestUtils.setLogLevel(INode.LOG, Level.TRACE);
}
static final long seed = 0;
static final short NUM_DATANODES = 1;
static final int BLOCKSIZE = 1024;
static final long txid = 1;
private final Path dir = new Path("/TestSnapshot");
private static final String testDir =
GenericTestUtils.getTestDir().getAbsolutePath();
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
public void createCluster() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
@BeforeEach
public void setUp() throws Exception {
conf = new Configuration();
createCluster();
}
@AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
/**
* Create a temp fsimage file for testing.
* @param dir The directory where the fsimage file resides
* @param imageTxId The transaction id of the fsimage
* @return The file of the image file
*/
private File getImageFile(String dir, long imageTxId) {
return new File(dir, String.format("%s_%019d", NameNodeFile.IMAGE,
imageTxId));
}
/**
* Create a temp file for dumping the fsdir
* @param dir directory for the temp file
* @param suffix suffix of of the temp file
* @return the temp file
*/
private File getDumpTreeFile(String dir, String suffix) {
return new File(dir, String.format("dumpTree_%s", suffix));
}
/**
* Dump the fsdir tree to a temp file
* @param fileSuffix suffix of the temp file for dumping
* @return the temp file
*/
private File dumpTree2File(String fileSuffix) throws IOException {
File file = getDumpTreeFile(testDir, fileSuffix);
SnapshotTestHelper.dumpTree2File(fsn.getFSDirectory(), file);
return file;
}
/** Append a file without closing the output stream */
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
throws IOException {
byte[] toAppend = new byte[length];
Random random = new Random();
random.nextBytes(toAppend);
HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
out.write(toAppend);
return out;
}
/** Save the fsimage to a temp file */
private File saveFSImageToTempFile() throws IOException {
SaveNamespaceContext context = new SaveNamespaceContext(fsn, txid,
new Canceler());
FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context,
conf);
FSImageCompression compression = FSImageCompression.createCompression(conf);
File imageFile = getImageFile(testDir, txid);
fsn.readLock(RwLockMode.GLOBAL);
try {
saver.save(imageFile, compression);
} finally {
fsn.readUnlock(RwLockMode.GLOBAL, "saveFSImage");
}
return imageFile;
}
/** Load the fsimage from a temp file */
private void loadFSImageFromTempFile(File imageFile) throws IOException {
FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, fsn);
fsn.writeLock(RwLockMode.GLOBAL);
fsn.getFSDirectory().writeLock();
try {
loader.load(imageFile, false);
fsn.getFSDirectory().updateCountForQuota();
} finally {
fsn.getFSDirectory().writeUnlock();
fsn.writeUnlock(RwLockMode.GLOBAL, "loadFSImageFromTempFile");
}
}
/**
* Test when there is snapshot taken on root
*/
@Test
public void testSnapshotOnRoot() throws Exception {
final Path root = new Path("/");
hdfs.allowSnapshot(root);
hdfs.createSnapshot(root, "s1");
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
final INodeDirectory rootNode = fsn.dir.getRoot();
assertTrue(rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty(),
"The children list of root should be empty");
// one snapshot on root: s1
DiffList<DirectoryDiff> diffList = rootNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
// check SnapshotManager's snapshottable directory list
assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
SnapshottableDirectoryStatus[] sdirs = fsn.getSnapshotManager()
.getSnapshottableDirListing(null);
assertEquals(root, sdirs[0].getFullPath());
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
/**
* Testing steps:
* <pre>
* 1. Creating/modifying directories/files while snapshots are being taken.
* 2. Dump the FSDirectory tree of the namesystem.
* 3. Save the namesystem to a temp file (FSImage saving).
* 4. Restart the cluster and format the namesystem.
* 5. Load the namesystem from the temp file (FSImage loading).
* 6. Dump the FSDirectory again and compare the two dumped string.
* </pre>
*/
@Test
public void testSaveLoadImage() throws Exception {
int s = 0;
// make changes to the namesystem
hdfs.mkdirs(dir);
SnapshotTestHelper.createSnapshot(hdfs, dir, "s" + ++s);
Path sub1 = new Path(dir, "sub1");
hdfs.mkdirs(sub1);
hdfs.setPermission(sub1, new FsPermission((short)0777));
Path sub11 = new Path(sub1, "sub11");
hdfs.mkdirs(sub11);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
Path sub1file1 = new Path(sub1, "sub1file1");
Path sub1file2 = new Path(sub1, "sub1file2");
DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, (short) 1, seed);
DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, (short) 1, seed);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
Path sub2 = new Path(dir, "sub2");
Path sub2file1 = new Path(sub2, "sub2file1");
Path sub2file2 = new Path(sub2, "sub2file2");
DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, (short) 1, seed);
DFSTestUtil.createFile(hdfs, sub2file2, BLOCKSIZE, (short) 1, seed);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
hdfs.setReplication(sub1file1, (short) 1);
hdfs.delete(sub1file2, true);
hdfs.setOwner(sub2, "dr.who", "unknown");
hdfs.delete(sub2file1, true);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
Path sub1_sub2file2 = new Path(sub1, "sub2file2");
hdfs.rename(sub2file2, sub1_sub2file2);
hdfs.rename(sub1file1, sub2file1);
checkImage(s);
hdfs.rename(sub2file1, sub2file2);
checkImage(s);
}
void checkImage(int s) throws IOException {
final String name = "s" + s;
// dump the fsdir tree
File fsnBefore = dumpTree2File(name + "_before");
// save the namesystem to a temp file
File imageFile = saveFSImageToTempFile();
long numSdirBefore = fsn.getNumSnapshottableDirs();
long numSnapshotBefore = fsn.getNumSnapshots();
SnapshottableDirectoryStatus[] dirBefore = hdfs.getSnapshottableDirListing();
// shutdown the cluster
cluster.shutdown();
// dump the fsdir tree
File fsnBetween = dumpTree2File(name + "_between");
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnBetween, true);
// restart the cluster, and format the cluster
cluster = new MiniDFSCluster.Builder(conf).format(true)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// load the namesystem from the temp file
loadFSImageFromTempFile(imageFile);
// dump the fsdir tree again
File fsnAfter = dumpTree2File(name + "_after");
// compare two dumped tree
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
long numSdirAfter = fsn.getNumSnapshottableDirs();
long numSnapshotAfter = fsn.getNumSnapshots();
SnapshottableDirectoryStatus[] dirAfter = hdfs.getSnapshottableDirListing();
assertEquals(numSdirBefore, numSdirAfter);
assertEquals(numSnapshotBefore, numSnapshotAfter);
assertEquals(dirBefore.length, dirAfter.length);
List<String> pathListBefore = new ArrayList<String>();
for (SnapshottableDirectoryStatus sBefore : dirBefore) {
pathListBefore.add(sBefore.getFullPath().toString());
}
for (SnapshottableDirectoryStatus sAfter : dirAfter) {
assertTrue(pathListBefore.contains(sAfter.getFullPath().toString()));
}
}
/**
* Test the fsimage saving/loading while file appending.
*/
@Test
@Timeout(value = 60)
public void testSaveLoadImageWithAppending() throws Exception {
Path sub1 = new Path(dir, "sub1");
Path sub1file1 = new Path(sub1, "sub1file1");
Path sub1file2 = new Path(sub1, "sub1file2");
DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, (short) 1, seed);
DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, (short) 1, seed);
// 1. create snapshot s0
hdfs.allowSnapshot(dir);
hdfs.createSnapshot(dir, "s0");
// 2. create snapshot s1 before appending sub1file1 finishes
HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// also append sub1file2
DFSTestUtil.appendFile(hdfs, sub1file2, BLOCKSIZE);
hdfs.createSnapshot(dir, "s1");
out.close();
// 3. create snapshot s2 before appending finishes
out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
hdfs.createSnapshot(dir, "s2");
out.close();
// 4. save fsimage before appending finishes
out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// dump fsdir
File fsnBefore = dumpTree2File("before");
// save the namesystem to a temp file
File imageFile = saveFSImageToTempFile();
// 5. load fsimage and compare
// first restart the cluster, and format the cluster
out.close();
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(true)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// then load the fsimage
loadFSImageFromTempFile(imageFile);
// dump the fsdir tree again
File fsnAfter = dumpTree2File("after");
// compare two dumped tree
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
}
/**
* Test the fsimage loading while there is file under construction.
*/
@Test
@Timeout(value = 60)
public void testLoadImageWithAppending() throws Exception {
Path sub1 = new Path(dir, "sub1");
Path sub1file1 = new Path(sub1, "sub1file1");
Path sub1file2 = new Path(sub1, "sub1file2");
DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, (short) 1, seed);
DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, (short) 1, seed);
hdfs.allowSnapshot(dir);
hdfs.createSnapshot(dir, "s0");
HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
/**
* Test fsimage loading when 1) there is an empty file loaded from fsimage,
* and 2) there is later an append operation to be applied from edit log.
*/
@Test
@Timeout(value = 60)
public void testLoadImageWithEmptyFile() throws Exception {
// create an empty file
Path file = new Path(dir, "file");
FSDataOutputStream out = hdfs.create(file);
out.close();
// save namespace
hdfs.setSafeMode(SafeModeAction.ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.LEAVE);
// append to the empty file
out = hdfs.append(file);
out.write(1);
out.close();
// restart cluster
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
FileStatus status = hdfs.getFileStatus(file);
assertEquals(1, status.getLen());
}
/**
* Testing a special case with snapshots. When the following steps happen:
* <pre>
* 1. Take snapshot s1 on dir.
* 2. Create new dir and files under subsubDir, which is descendant of dir.
* 3. Take snapshot s2 on dir.
* 4. Delete subsubDir.
* 5. Delete snapshot s2.
* </pre>
* When we merge the diff from s2 to s1 (since we deleted s2), we need to make
* sure all the files/dirs created after s1 should be destroyed. Otherwise
* we may save these files/dirs to the fsimage, and cause FileNotFound
* Exception while loading fsimage.
*/
@Test
@Timeout(value = 300)
public void testSaveLoadImageAfterSnapshotDeletion()
throws Exception {
// create initial dir and subdir
Path dir = new Path("/dir");
Path subDir = new Path(dir, "subdir");
Path subsubDir = new Path(subDir, "subsubdir");
hdfs.mkdirs(subsubDir);
// take snapshots on subdir and dir
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
// create new dir under initial dir
Path newDir = new Path(subsubDir, "newdir");
Path newFile = new Path(newDir, "newfile");
hdfs.mkdirs(newDir);
DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, (short) 1, seed);
// create another snapshot
SnapshotTestHelper.createSnapshot(hdfs, dir, "s2");
// delete subsubdir
hdfs.delete(subsubDir, true);
// delete snapshot s2
hdfs.deleteSnapshot(dir, "s2");
// restart cluster
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES)
.format(false).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// save namespace to fsimage
hdfs.setSafeMode(SafeModeAction.ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
/**
* Test parallel compressed fsimage can be loaded serially.
*/
@Test
public void testLoadParallelCompressedImageSerial() throws Exception {
int s = 0;
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
hdfs.mkdirs(dir);
SnapshotTestHelper.createSnapshot(hdfs, dir, "s");
Path sub1 = new Path(dir, "sub1");
Path sub1file1 = new Path(sub1, "sub1file1");
Path sub1file2 = new Path(sub1, "sub1file2");
DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, (short) 1, seed);
DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, (short) 1, seed);
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY, false);
checkImage(s);
}
void rename(Path src, Path dst) throws Exception {
printTree("Before rename " + src + " -> " + dst);
hdfs.rename(src, dst);
printTree("After rename " + src + " -> " + dst);
}
void createFile(Path directory, String filename) throws Exception {
final Path f = new Path(directory, filename);
DFSTestUtil.createFile(hdfs, f, 0, NUM_DATANODES, seed);
}
void appendFile(Path directory, String filename) throws Exception {
final Path f = new Path(directory, filename);
DFSTestUtil.appendFile(hdfs, f, "more data");
printTree("appended " + f);
}
void deleteSnapshot(Path directory, String snapshotName) throws Exception {
hdfs.deleteSnapshot(directory, snapshotName);
printTree("deleted snapshot " + snapshotName);
}
@Test
@Timeout(value = 60)
public void testDoubleRename() throws Exception {
final Path parent = new Path("/parent");
hdfs.mkdirs(parent);
final Path sub1 = new Path(parent, "sub1");
final Path sub1foo = new Path(sub1, "foo");
hdfs.mkdirs(sub1);
hdfs.mkdirs(sub1foo);
createFile(sub1foo, "file0");
printTree("before s0");
hdfs.allowSnapshot(parent);
hdfs.createSnapshot(parent, "s0");
createFile(sub1foo, "file1");
createFile(sub1foo, "file2");
final Path sub2 = new Path(parent, "sub2");
hdfs.mkdirs(sub2);
final Path sub2foo = new Path(sub2, "foo");
// mv /parent/sub1/foo to /parent/sub2/foo
rename(sub1foo, sub2foo);
hdfs.createSnapshot(parent, "s1");
hdfs.createSnapshot(parent, "s2");
printTree("created snapshots: s1, s2");
appendFile(sub2foo, "file1");
createFile(sub2foo, "file3");
final Path sub3 = new Path(parent, "sub3");
hdfs.mkdirs(sub3);
// mv /parent/sub2/foo to /parent/sub3/foo
rename(sub2foo, sub3);
hdfs.delete(sub3, true);
printTree("deleted " + sub3);
deleteSnapshot(parent, "s1");
restartCluster();
deleteSnapshot(parent, "s2");
restartCluster();
}
void restartCluster() throws Exception {
final File before = dumpTree2File("before.txt");
hdfs.setSafeMode(SafeModeAction.ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
final File after = dumpTree2File("after.txt");
SnapshotTestHelper.compareDumpedTreeInFile(before, after, true);
}
private final PrintWriter output = new PrintWriter(System.out, true);
private int printTreeCount = 0;
String printTree(String label) throws Exception {
output.println();
output.println();
output.println("***** " + printTreeCount++ + ": " + label);
final String b =
fsn.getFSDirectory().getINode("/").dumpTreeRecursively().toString();
output.println(b);
final String s = NamespacePrintVisitor.print2Sting(fsn);
assertEquals(b, s);
return b;
}
@Test
@Timeout(value = 60)
public void testFSImageWithDoubleRename() throws Exception {
final Path dir1 = new Path("/dir1");
final Path dir2 = new Path("/dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
Path dira = new Path(dir1, "dira");
Path dirx = new Path(dir1, "dirx");
Path dirb = new Path(dira, "dirb");
hdfs.mkdirs(dira);
hdfs.mkdirs(dirb);
hdfs.mkdirs(dirx);
hdfs.allowSnapshot(dir1);
hdfs.createSnapshot(dir1, "s0");
Path file1 = new Path(dirb, "file1");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, (short) 1, seed);
Path rennamePath = new Path(dirx, "dirb");
// mv /dir1/dira/dirb to /dir1/dirx/dirb
hdfs.rename(dirb, rennamePath);
hdfs.createSnapshot(dir1, "s1");
DFSTestUtil.appendFile(hdfs, new Path("/dir1/dirx/dirb/file1"),
"more data");
Path renamePath1 = new Path(dir2, "dira");
hdfs.mkdirs(renamePath1);
//mv dirx/dirb to /dir2/dira/dirb
hdfs.rename(rennamePath, renamePath1);
hdfs.delete(renamePath1, true);
hdfs.deleteSnapshot(dir1, "s1");
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
@Test
@Timeout(value = 60)
public void testFSImageWithRename1() throws Exception {
final Path dir1 = new Path("/dir1");
final Path dir2 = new Path("/dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
Path dira = new Path(dir1, "dira");
Path dirx = new Path(dir1, "dirx");
Path dirb = new Path(dirx, "dirb");
hdfs.mkdirs(dira);
hdfs.mkdirs(dirx);
hdfs.allowSnapshot(dir1);
hdfs.createSnapshot(dir1, "s0");
hdfs.mkdirs(dirb);
hdfs.createSnapshot(dir1, "s1");
Path rennamePath = new Path(dira, "dirb");
// mv /dir1/dirx/dirb to /dir1/dira/dirb
hdfs.rename(dirb, rennamePath);
hdfs.createSnapshot(dir1, "s2");
Path diry = new Path("/dir1/dira/dirb/diry");
hdfs.mkdirs(diry);
hdfs.createSnapshot(dir1, "s3");
Path file1 = new Path("/dir1/dira/dirb/diry/file1");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, (short) 1, seed);
hdfs.createSnapshot(dir1, "s4");
hdfs.delete(new Path("/dir1/dira/dirb"), true);
hdfs.deleteSnapshot(dir1, "s1");
hdfs.deleteSnapshot(dir1, "s3");
// file1 should exist in the last snapshot
assertTrue(hdfs.exists(
new Path("/dir1/.snapshot/s4/dira/dirb/diry/file1")));
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
@Test
@Timeout(value = 60)
public void testFSImageWithRename2() throws Exception {
final Path dir1 = new Path("/dir1");
final Path dir2 = new Path("/dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
Path dira = new Path(dir1, "dira");
Path dirx = new Path(dir1, "dirx");
Path dirb = new Path(dirx, "dirb");
hdfs.mkdirs(dira);
hdfs.mkdirs(dirx);
hdfs.allowSnapshot(dir1);
hdfs.createSnapshot(dir1, "s0");
hdfs.mkdirs(dirb);
hdfs.createSnapshot(dir1, "s1");
Path rennamePath = new Path(dira, "dirb");
// mv /dir1/dirx/dirb to /dir1/dira/dirb
hdfs.rename(dirb, rennamePath);
hdfs.createSnapshot(dir1, "s2");
Path file1 = new Path("/dir1/dira/dirb/file1");
DFSTestUtil.createFile(hdfs,
new Path(
"/dir1/dira/dirb/file1"), BLOCKSIZE, (short) 1, seed);
hdfs.createSnapshot(dir1, "s3");
hdfs.deleteSnapshot(dir1, "s1");
hdfs.deleteSnapshot(dir1, "s3");
assertTrue(hdfs.exists(file1));
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
@Test
@Timeout(value = 60)
public void testFSImageWithRename3() throws Exception {
final Path dir1 = new Path("/dir1");
final Path dir2 = new Path("/dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
Path dira = new Path(dir1, "dira");
Path dirx = new Path(dir1, "dirx");
Path dirb = new Path(dirx, "dirb");
hdfs.mkdirs(dira);
hdfs.mkdirs(dirx);
hdfs.allowSnapshot(dir1);
hdfs.createSnapshot(dir1, "s0");
hdfs.mkdirs(dirb);
hdfs.createSnapshot(dir1, "s1");
Path rennamePath = new Path(dira, "dirb");
// mv /dir1/dirx/dirb to /dir1/dira/dirb
hdfs.rename(dirb, rennamePath);
hdfs.createSnapshot(dir1, "s2");
Path diry = new Path("/dir1/dira/dirb/diry");
hdfs.mkdirs(diry);
hdfs.createSnapshot(dir1, "s3");
Path file1 = new Path("/dir1/dira/dirb/diry/file1");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, (short) 1, seed);
hdfs.createSnapshot(dir1, "s4");
hdfs.delete(new Path("/dir1/dira/dirb"), true);
hdfs.deleteSnapshot(dir1, "s1");
hdfs.deleteSnapshot(dir1, "s3");
// file1 should exist in the last snapshot
assertTrue(hdfs.exists(new Path(
"/dir1/.snapshot/s4/dira/dirb/diry/file1")));
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
@Test
@Timeout(value = 60)
public void testFSImageWithRename4() throws Exception {
final Path dir1 = new Path("/dir1");
final Path dir2 = new Path("/dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
Path dira = new Path(dir1, "dira");
Path dirx = new Path(dir1, "dirx");
Path dirb = new Path(dirx, "dirb");
hdfs.mkdirs(dira);
hdfs.mkdirs(dirx);
hdfs.allowSnapshot(dir1);
hdfs.createSnapshot(dir1, "s0");
hdfs.mkdirs(dirb);
hdfs.createSnapshot(dir1, "s1");
Path renamePath = new Path(dira, "dirb");
// mv /dir1/dirx/dirb to /dir1/dira/dirb
hdfs.rename(dirb, renamePath);
hdfs.createSnapshot(dir1, "s2");
Path diry = new Path("/dir1/dira/dirb/diry");
hdfs.mkdirs(diry);
hdfs.createSnapshot(dir1, "s3");
Path file1 = new Path("/dir1/dira/dirb/diry/file1");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, (short) 1, seed);
hdfs.createSnapshot(dir1, "s4");
hdfs.delete(new Path("/dir1/dira/dirb/diry/file1"), false);
hdfs.deleteSnapshot(dir1, "s1");
hdfs.deleteSnapshot(dir1, "s3");
// file1 should exist in the last snapshot
assertTrue(hdfs.exists(
new Path("/dir1/.snapshot/s4/dira/dirb/diry/file1")));
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
}
|
TestFSImageWithSnapshot
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/parser/AsmParserTest1.java
|
{
"start": 191,
"end": 446
}
|
class ____ extends TestCase {
public void test_asm() throws Exception {
A a = JSON.parseObject("{\"f1\":123}", A.class);
Assert.assertEquals(123, a.getF1());
Assert.assertNotNull(a.getF2());
}
public static
|
AsmParserTest1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.