language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableTakeUntilTest.java | {
"start": 6091,
"end": 14651
} | class ____ implements Publisher<String> {
Subscriber<? super String> subscriber;
Subscription upstream;
TestObservable(Subscription s) {
this.upstream = s;
}
/* used to simulate subscription */
public void sendOnCompleted() {
subscriber.onComplete();
}
/* used to simulate subscription */
public void sendOnNext(String value) {
subscriber.onNext(value);
}
/* used to simulate subscription */
public void sendOnError(Throwable e) {
subscriber.onError(e);
}
@Override
public void subscribe(Subscriber<? super String> subscriber) {
this.subscriber = subscriber;
subscriber.onSubscribe(upstream);
}
}
@Test
public void untilFires() {
PublishProcessor<Integer> source = PublishProcessor.create();
PublishProcessor<Integer> until = PublishProcessor.create();
TestSubscriberEx<Integer> ts = new TestSubscriberEx<>();
source.takeUntil(until).subscribe(ts);
assertTrue(source.hasSubscribers());
assertTrue(until.hasSubscribers());
source.onNext(1);
ts.assertValue(1);
until.onNext(1);
ts.assertValue(1);
ts.assertNoErrors();
ts.assertTerminated();
assertFalse("Source still has observers", source.hasSubscribers());
assertFalse("Until still has observers", until.hasSubscribers());
assertFalse("TestSubscriber is unsubscribed", ts.isCancelled());
}
@Test
public void mainCompletes() {
PublishProcessor<Integer> source = PublishProcessor.create();
PublishProcessor<Integer> until = PublishProcessor.create();
TestSubscriberEx<Integer> ts = new TestSubscriberEx<>();
source.takeUntil(until).subscribe(ts);
assertTrue(source.hasSubscribers());
assertTrue(until.hasSubscribers());
source.onNext(1);
source.onComplete();
ts.assertValue(1);
ts.assertNoErrors();
ts.assertTerminated();
assertFalse("Source still has observers", source.hasSubscribers());
assertFalse("Until still has observers", until.hasSubscribers());
assertFalse("TestSubscriber is unsubscribed", ts.isCancelled());
}
@Test
public void downstreamUnsubscribes() {
PublishProcessor<Integer> source = PublishProcessor.create();
PublishProcessor<Integer> until = PublishProcessor.create();
TestSubscriberEx<Integer> ts = new TestSubscriberEx<>();
source.takeUntil(until).take(1).subscribe(ts);
assertTrue(source.hasSubscribers());
assertTrue(until.hasSubscribers());
source.onNext(1);
ts.assertValue(1);
ts.assertNoErrors();
ts.assertTerminated();
assertFalse("Source still has observers", source.hasSubscribers());
assertFalse("Until still has observers", until.hasSubscribers());
assertFalse("TestSubscriber is unsubscribed", ts.isCancelled());
}
@Test
public void backpressure() {
PublishProcessor<Integer> until = PublishProcessor.create();
TestSubscriber<Integer> ts = new TestSubscriber<>(0L);
Flowable.range(1, 10).takeUntil(until).subscribe(ts);
assertTrue(until.hasSubscribers());
ts.request(1);
ts.assertValue(1);
ts.assertNoErrors();
ts.assertNotComplete();
until.onNext(5);
ts.assertComplete();
ts.assertNoErrors();
assertFalse("Until still has observers", until.hasSubscribers());
assertFalse("TestSubscriber is unsubscribed", ts.isCancelled());
}
@Test
public void dispose() {
TestHelper.checkDisposed(PublishProcessor.create().takeUntil(Flowable.never()));
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeFlowable(new Function<Flowable<Integer>, Flowable<Integer>>() {
@Override
public Flowable<Integer> apply(Flowable<Integer> c) throws Exception {
return c.takeUntil(Flowable.never());
}
});
}
@Test
public void untilPublisherMainSuccess() {
PublishProcessor<Integer> main = PublishProcessor.create();
PublishProcessor<Integer> other = PublishProcessor.create();
TestSubscriber<Integer> ts = main.takeUntil(other).test();
assertTrue("Main no subscribers?", main.hasSubscribers());
assertTrue("Other no subscribers?", other.hasSubscribers());
main.onNext(1);
main.onNext(2);
main.onComplete();
assertFalse("Main has subscribers?", main.hasSubscribers());
assertFalse("Other has subscribers?", other.hasSubscribers());
ts.assertResult(1, 2);
}
@Test
public void untilPublisherMainComplete() {
PublishProcessor<Integer> main = PublishProcessor.create();
PublishProcessor<Integer> other = PublishProcessor.create();
TestSubscriber<Integer> ts = main.takeUntil(other).test();
assertTrue("Main no subscribers?", main.hasSubscribers());
assertTrue("Other no subscribers?", other.hasSubscribers());
main.onComplete();
assertFalse("Main has subscribers?", main.hasSubscribers());
assertFalse("Other has subscribers?", other.hasSubscribers());
ts.assertResult();
}
@Test
public void untilPublisherMainError() {
PublishProcessor<Integer> main = PublishProcessor.create();
PublishProcessor<Integer> other = PublishProcessor.create();
TestSubscriber<Integer> ts = main.takeUntil(other).test();
assertTrue("Main no subscribers?", main.hasSubscribers());
assertTrue("Other no subscribers?", other.hasSubscribers());
main.onError(new TestException());
assertFalse("Main has subscribers?", main.hasSubscribers());
assertFalse("Other has subscribers?", other.hasSubscribers());
ts.assertFailure(TestException.class);
}
@Test
public void untilPublisherOtherOnNext() {
PublishProcessor<Integer> main = PublishProcessor.create();
PublishProcessor<Integer> other = PublishProcessor.create();
TestSubscriber<Integer> ts = main.takeUntil(other).test();
assertTrue("Main no subscribers?", main.hasSubscribers());
assertTrue("Other no subscribers?", other.hasSubscribers());
other.onNext(1);
assertFalse("Main has subscribers?", main.hasSubscribers());
assertFalse("Other has subscribers?", other.hasSubscribers());
ts.assertResult();
}
@Test
public void untilPublisherOtherOnComplete() {
PublishProcessor<Integer> main = PublishProcessor.create();
PublishProcessor<Integer> other = PublishProcessor.create();
TestSubscriber<Integer> ts = main.takeUntil(other).test();
assertTrue("Main no subscribers?", main.hasSubscribers());
assertTrue("Other no subscribers?", other.hasSubscribers());
other.onComplete();
assertFalse("Main has subscribers?", main.hasSubscribers());
assertFalse("Other has subscribers?", other.hasSubscribers());
ts.assertResult();
}
@Test
public void untilPublisherOtherError() {
PublishProcessor<Integer> main = PublishProcessor.create();
PublishProcessor<Integer> other = PublishProcessor.create();
TestSubscriber<Integer> ts = main.takeUntil(other).test();
assertTrue("Main no subscribers?", main.hasSubscribers());
assertTrue("Other no subscribers?", other.hasSubscribers());
other.onError(new TestException());
assertFalse("Main has subscribers?", main.hasSubscribers());
assertFalse("Other has subscribers?", other.hasSubscribers());
ts.assertFailure(TestException.class);
}
@Test
public void untilPublisherDispose() {
PublishProcessor<Integer> main = PublishProcessor.create();
PublishProcessor<Integer> other = PublishProcessor.create();
TestSubscriber<Integer> ts = main.takeUntil(other).test();
assertTrue("Main no subscribers?", main.hasSubscribers());
assertTrue("Other no subscribers?", other.hasSubscribers());
ts.cancel();
assertFalse("Main has subscribers?", main.hasSubscribers());
assertFalse("Other has subscribers?", other.hasSubscribers());
ts.assertEmpty();
}
}
| TestObservable |
java | spring-projects__spring-security | access/src/main/java/org/springframework/security/web/access/DefaultWebInvocationPrivilegeEvaluator.java | {
"start": 1598,
"end": 4816
} | class ____ implements WebInvocationPrivilegeEvaluator, ServletContextAware {
protected static final Log logger = LogFactory.getLog(DefaultWebInvocationPrivilegeEvaluator.class);
private final AbstractSecurityInterceptor securityInterceptor;
private @Nullable ServletContext servletContext;
public DefaultWebInvocationPrivilegeEvaluator(AbstractSecurityInterceptor securityInterceptor) {
Assert.notNull(securityInterceptor, "SecurityInterceptor cannot be null");
Assert.isTrue(FilterInvocation.class.equals(securityInterceptor.getSecureObjectClass()),
"AbstractSecurityInterceptor does not support FilterInvocations");
Assert.notNull(securityInterceptor.getAccessDecisionManager(),
"AbstractSecurityInterceptor must provide a non-null AccessDecisionManager");
this.securityInterceptor = securityInterceptor;
}
/**
* Determines whether the user represented by the supplied <tt>Authentication</tt>
* object is allowed to invoke the supplied URI.
* @param uri the URI excluding the context path (a default context path setting will
* be used)
*/
@Override
public boolean isAllowed(String uri, @Nullable Authentication authentication) {
return isAllowed(null, uri, null, authentication);
}
/**
* Determines whether the user represented by the supplied <tt>Authentication</tt>
* object is allowed to invoke the supplied URI, with the given .
* <p>
* Note the default implementation of <tt>FilterInvocationSecurityMetadataSource</tt>
* disregards the <code>contextPath</code> when evaluating which secure object
* metadata applies to a given request URI, so generally the <code>contextPath</code>
* is unimportant unless you are using a custom
* <code>FilterInvocationSecurityMetadataSource</code>.
* @param uri the URI excluding the context path
* @param contextPath the context path (may be null, in which case a default value
* will be used).
* @param method the HTTP method (or null, for any method)
* @param authentication the <tt>Authentication</tt> instance whose authorities should
* be used in evaluation whether access should be granted.
* @return true if access is allowed, false if denied
*/
@Override
public boolean isAllowed(@Nullable String contextPath, String uri, @Nullable String method,
@Nullable Authentication authentication) {
Assert.notNull(uri, "uri parameter is required");
FilterInvocation filterInvocation = new FilterInvocation(contextPath, uri, method, this.servletContext);
Collection<ConfigAttribute> attributes = this.securityInterceptor.obtainSecurityMetadataSource()
.getAttributes(filterInvocation);
if (attributes == null) {
return (!this.securityInterceptor.isRejectPublicInvocations());
}
if (authentication == null) {
return false;
}
try {
this.securityInterceptor.getAccessDecisionManager().decide(authentication, filterInvocation, attributes);
return true;
}
catch (AccessDeniedException ex) {
logger.debug(LogMessage.format("%s denied for %s", filterInvocation, authentication), ex);
return false;
}
}
@Override
public void setServletContext(ServletContext servletContext) {
this.servletContext = servletContext;
}
}
| DefaultWebInvocationPrivilegeEvaluator |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/calcite/RexModelCall.java | {
"start": 1325,
"end": 2459
} | class ____ extends RexCall {
private final ModelProvider modelProvider;
private final ContextResolvedModel contextResolvedModel;
public RexModelCall(
RelDataType outputType,
ContextResolvedModel contextResolvedModel,
ModelProvider modelProvider) {
super(outputType, new SqlSpecialOperator("Model", SqlKind.OTHER), List.of());
this.contextResolvedModel = contextResolvedModel;
this.modelProvider = modelProvider;
}
public ContextResolvedModel getContextResolvedModel() {
return contextResolvedModel;
}
public ModelProvider getModelProvider() {
return modelProvider;
}
@Override
protected String computeDigest(boolean withType) {
final StringBuilder sb = new StringBuilder(op.getName());
sb.append("(");
sb.append("MODEL ")
.append(contextResolvedModel.getIdentifier().asSummaryString())
.append(")");
if (withType) {
sb.append(":");
sb.append(type.getFullTypeString());
}
return sb.toString();
}
}
| RexModelCall |
java | apache__spark | sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java | {
"start": 1357,
"end": 1682
} | class ____ responsible for setting the ipAddress for operations executed via HiveServer2.
*
* - IP address is only set for operations that calls listeners with hookContext
* - IP address is only set if the underlying transport mechanism is socket
*
* @see org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext
*/
public | is |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/stubbing/answers/InvocationInfoTest.java | {
"start": 7873,
"end": 8024
} | class ____ {
void iAmNotAbstract() {}
}
return TheNotAbstract.class.getDeclaredMethod("iAmNotAbstract");
}
}
| TheNotAbstract |
java | google__dagger | javatests/dagger/hilt/android/testing/BindValueIntoMapTest.java | {
"start": 1701,
"end": 2356
} | interface ____ {
Map<String, String> getStringStringMap();
}
@Rule public final HiltAndroidRule rule = new HiltAndroidRule(this);
@Inject Provider<Map<String, String>> mapProvider;
@Test
public void testInjectedAndModified() throws Exception {
rule.inject();
Map<String, String> oldMap = mapProvider.get();
assertThat(oldMap).containsExactly(KEY1, VALUE1, KEY2, VALUE2);
boundValue1 = VALUE3;
Map<String, String> newMap = mapProvider.get();
assertThat(oldMap).containsExactly(KEY1, VALUE1, KEY2, VALUE2);
assertThat(newMap).containsExactly(KEY1, VALUE3, KEY2, VALUE2);
}
@MapKey
@ | BindValuesIntoMapEntryPoint |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/jdk/TypeRefinementForMapTest.java | {
"start": 2152,
"end": 2485
} | class ____ {
private String part0;
private String part1;
public CompoundKey(String part0, String part1) {
this.part0 = part0;
this.part1 = part1;
}
public String getPart0() { return part0; }
public String getPart1() { return part1; }
}
static | CompoundKey |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/mixins/MixinsWithBundlesTest.java | {
"start": 577,
"end": 629
} | interface ____ {
}
public abstract | ExposeStuff |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/compliance/tck2_2/GeneratedValueTests.java | {
"start": 15840,
"end": 16567
} | class ____ {
/**
* This entity has an explicit {@link jakarta.persistence.TableGenerator} defined,
* but does not define {@link jakarta.persistence.TableGenerator#table()}. In
* this case, the generator-name ("my_id_table")
*/
@Id
@GeneratedValue( strategy = GenerationType.TABLE, generator = "my_id_table" )
@jakarta.persistence.TableGenerator( name = "my_id_table", allocationSize = 25 )
public Integer id;
public String name;
}
@Entity
@jakarta.persistence.TableGenerator(
name = "my_id_table",
table = "my_real_id_table",
pkColumnName = "PK_COL",
valueColumnName = "VAL_COL",
pkColumnValue = "DT1_ID",
allocationSize = 25
)
public static | ExplicitTableGeneratorImplicitNameEntity |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/cache/ManyToOneTestReusedColumn.java | {
"start": 5431,
"end": 6001
} | class ____ extends Container {
@ManyToOne
@JoinColumn(name = "food_id")
@Fetch(FetchMode.SELECT)
private Cheese cheese;
public Cheese getCheese() {
return cheese;
}
public void setCheese(Cheese cheese) {
this.cheese = cheese;
}
}
@Entity(name = "Food")
@BatchSize(size = 500)
@Inheritance(strategy = InheritanceType.SINGLE_TABLE)
@Cacheable
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE)
@DiscriminatorColumn(discriminatorType = DiscriminatorType.STRING, name = "type")
@DiscriminatorValue(value = "FOOD")
public static | CheeseContainer |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowFunctions.java | {
"start": 1216,
"end": 2747
} | class ____ extends Command {
private final LikePattern pattern;
public ShowFunctions(Source source, LikePattern pattern) {
super(source);
this.pattern = pattern;
}
@Override
protected NodeInfo<ShowFunctions> info() {
return NodeInfo.create(this, ShowFunctions::new, pattern);
}
public LikePattern pattern() {
return pattern;
}
@Override
public List<Attribute> output() {
return asList(
new FieldAttribute(source(), "name", new KeywordEsField("name")),
new FieldAttribute(source(), "type", new KeywordEsField("type"))
);
}
@Override
public void execute(SqlSession session, ActionListener<Page> listener) {
FunctionRegistry registry = session.functionRegistry();
Collection<FunctionDefinition> functions = registry.listFunctions(pattern != null ? pattern.asJavaRegex() : null);
listener.onResponse(
of(session, functions.stream().map(f -> asList(f.name(), SqlFunctionTypeRegistry.INSTANCE.type(f.clazz()))).collect(toList()))
);
}
@Override
public int hashCode() {
return Objects.hash(pattern);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
ShowFunctions other = (ShowFunctions) obj;
return Objects.equals(pattern, other.pattern);
}
}
| ShowFunctions |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java | {
"start": 9208,
"end": 9915
} | class ____ implements Runnable {
private TestRpcService proxy;
private volatile boolean done;
SlowRPC(TestRpcService proxy) {
this.proxy = proxy;
done = false;
}
boolean isDone() {
return done;
}
@Override
public void run() {
try {
// this would hang until two fast pings happened
ping(true);
done = true;
} catch (ServiceException e) {
assertTrue(false, "SlowRPC ping exception " + e);
}
}
void ping(boolean shouldSlow) throws ServiceException {
// this would hang until two fast pings happened
proxy.slowPing(null, newSlowPingRequest(shouldSlow));
}
}
/**
* A basic | SlowRPC |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/instance/frombean/InstanceFromBeanTest.java | {
"start": 1045,
"end": 1264
} | class ____ {
private String id;
@PostConstruct
void init() {
this.id = UUID.randomUUID().toString();
}
String getId() {
return id;
}
}
}
| Alpha |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetReservationHomeSubClusterRequest.java | {
"start": 1192,
"end": 1312
} | class ____ obtain the home sub-cluster for the specified
* {@link ReservationId}.
*/
@Private
@Unstable
public abstract | to |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/error/ParseErrorTest_16.java | {
"start": 189,
"end": 499
} | class ____ extends TestCase {
public void test_for_error() throws Exception {
Exception error = null;
try {
JSON.parseObject("{\"value\":fale");
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
}
| ParseErrorTest_16 |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/support/discovery/EngineDiscoveryRequestResolver.java | {
"start": 7606,
"end": 8238
} | class ____<T extends TestDescriptor> {
private final List<Function<InitializationContext<T>, SelectorResolver>> resolverCreators = new ArrayList<>();
private final List<Function<InitializationContext<T>, TestDescriptor.Visitor>> visitorCreators = new ArrayList<>();
private Builder() {
}
/**
* Add a predefined resolver that resolves {@link ClasspathRootSelector
* ClasspathRootSelectors}, {@link ModuleSelector ModuleSelectors}, and
* {@link PackageSelector PackageSelectors} into {@link ClassSelector
* ClassSelectors} by scanning for classes that satisfy the supplied
* predicate in the respective | Builder |
java | netty__netty | transport-native-io_uring/src/test/java/io/netty/channel/uring/IoUringChannelConfigTest.java | {
"start": 1163,
"end": 2767
} | class ____ {
@BeforeAll
public static void loadJNI() {
assumeTrue(IoUring.isAvailable());
}
@Test
public void testIntegerOption() throws Exception {
IoUringSocketChannel channel = new IoUringSocketChannel();
IntegerUnixChannelOption opt = new IntegerUnixChannelOption("INT_OPT", 1, 2);
Integer zero = 0;
assertEquals(zero, channel.config().getOption(opt));
channel.config().setOption(opt, 1);
assertNotEquals(zero, channel.config().getOption(opt));
channel.fd().close();
}
@Test
public void testRawOption() throws Exception {
IoUringSocketChannel channel = new IoUringSocketChannel();
// Value for SOL_SOCKET and SO_REUSEADDR
// See https://github.com/torvalds/linux/blob/v5.17/include/uapi/asm-generic/socket.h
RawUnixChannelOption opt = new RawUnixChannelOption("RAW_OPT", 1, 2, 4);
CleanableDirectBuffer disabledCleanable = Buffer.allocateDirectBufferWithNativeOrder(4);
ByteBuffer disabled = disabledCleanable.buffer();
disabled.putInt(0).flip();
assertEquals(disabled, channel.config().getOption(opt));
CleanableDirectBuffer enabledCleanable = Buffer.allocateDirectBufferWithNativeOrder(4);
ByteBuffer enabled = enabledCleanable.buffer();
enabled.putInt(1).flip();
channel.config().setOption(opt, enabled);
assertNotEquals(disabled, channel.config().getOption(opt));
channel.fd().close();
disabledCleanable.clean();
enabledCleanable.clean();
}
}
| IoUringChannelConfigTest |
java | apache__logging-log4j2 | log4j-api-test/src/test/java/org/apache/logging/log4j/util/DeserializerHelper.java | {
"start": 1136,
"end": 2025
} | class ____ {
public static void main(final String... args) throws Exception {
final File file = new File(args[0]);
final Collection<String> allowedExtraClasses =
args.length > 1 ? Arrays.asList(args).subList(1, args.length) : Collections.emptyList();
ObjectInputStream in = null;
try {
in = new FilteredObjectInputStream(new FileInputStream(file), allowedExtraClasses);
final Object result = in.readObject();
System.out.println(result);
} catch (final Throwable t) {
System.err.println("Could not deserialize.");
throw t; // cause non-zero exit code
} finally {
try {
in.close();
} catch (final Throwable t) {
System.err.println("Error while closing: " + t);
}
}
}
}
| DeserializerHelper |
java | apache__avro | lang/java/avro/src/test/java/org/apache/avro/reflect/TestReflect.java | {
"start": 39904,
"end": 40234
} | class ____ {
}
@Test
void multipleAliasAnnotationsOnClass() {
check(MultipleAliasRecord.class,
"{\"type\":\"record\",\"name\":\"MultipleAliasRecord\",\"namespace\":\"org.apache.avro.reflect.TestReflect\",\"fields\":[],\"aliases\":[\"space1.alias1\",\"space2.alias2\"]}");
}
private static | MultipleAliasRecord |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/CheckReturnValueTest.java | {
"start": 24971,
"end": 25330
} | class ____ Object, not IFace, but /shrug
" new IFace() {};",
" }",
"}")
.doTest();
}
@Test
public void constructor_throwingContexts() {
compilationHelper
.addSourceLines(
"Foo.java",
"""
@com.google.errorprone.annotations.CheckReturnValue
public | is |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/test/java/org/apache/dubbo/config/spring/isolation/spring/annotation/consumer/dubbo/DemoServiceV1.java | {
"start": 1138,
"end": 1489
} | class ____ implements DemoService {
@DubboReference(version = "1.0.0", group = "Group1", scope = "remote", protocol = "dubbo")
private DemoService demoService;
@Override
public String sayName(String name) {
return demoService.sayName(name);
}
@Override
public Box getBox() {
return null;
}
}
| DemoServiceV1 |
java | spring-projects__spring-boot | module/spring-boot-sql/src/test/java/org/springframework/boot/sql/autoconfigure/init/OnDatabaseInitializationConditionTests.java | {
"start": 1271,
"end": 5050
} | class ____ {
@Test
void getMatchOutcomeWithPropertyNoSetMatches() {
OnDatabaseInitializationCondition condition = new OnTestDatabaseInitializationCondition("test.init-mode");
ConditionOutcome outcome = condition.getMatchOutcome(
mockConditionContext(TestPropertyValues.of("test.another", "noise")),
mock(AnnotatedTypeMetadata.class));
assertThat(outcome.isMatch()).isTrue();
}
@Test
void getMatchOutcomeWithPropertySetToAlwaysMatches() {
OnDatabaseInitializationCondition condition = new OnTestDatabaseInitializationCondition("test.init-mode");
ConditionOutcome outcome = condition.getMatchOutcome(
mockConditionContext(TestPropertyValues.of("test.init-mode=always")),
mock(AnnotatedTypeMetadata.class));
assertThat(outcome.isMatch()).isTrue();
}
@Test
void getMatchOutcomeWithPropertySetToEmbeddedMatches() {
OnDatabaseInitializationCondition condition = new OnTestDatabaseInitializationCondition("test.init-mode");
ConditionOutcome outcome = condition.getMatchOutcome(
mockConditionContext(TestPropertyValues.of("test.init-mode=embedded")),
mock(AnnotatedTypeMetadata.class));
assertThat(outcome.isMatch()).isTrue();
}
@Test
void getMatchOutcomeWithPropertySetToNeverDoesNotMatch() {
OnDatabaseInitializationCondition condition = new OnTestDatabaseInitializationCondition("test.init-mode");
ConditionOutcome outcome = condition.getMatchOutcome(
mockConditionContext(TestPropertyValues.of("test.init-mode=never")), mock(AnnotatedTypeMetadata.class));
assertThat(outcome.isMatch()).isFalse();
}
@Test
void getMatchOutcomeWithPropertySetToEmptyStringIsIgnored() {
OnDatabaseInitializationCondition condition = new OnTestDatabaseInitializationCondition("test.init-mode");
ConditionOutcome outcome = condition.getMatchOutcome(
mockConditionContext(TestPropertyValues.of("test.init-mode")), mock(AnnotatedTypeMetadata.class));
assertThat(outcome.isMatch()).isTrue();
}
@Test
void getMatchOutcomeWithMultiplePropertiesUsesFirstSet() {
OnDatabaseInitializationCondition condition = new OnTestDatabaseInitializationCondition("test.init-mode",
"test.schema-mode", "test.init-schema-mode");
ConditionOutcome outcome = condition.getMatchOutcome(
mockConditionContext(TestPropertyValues.of("test.init-schema-mode=embedded")),
mock(AnnotatedTypeMetadata.class));
assertThat(outcome.isMatch()).isTrue();
assertThat(outcome.getMessage()).isEqualTo("TestDatabase Initialization test.init-schema-mode is EMBEDDED");
}
@Test
void getMatchOutcomeHasDedicatedDescription() {
OnDatabaseInitializationCondition condition = new OnTestDatabaseInitializationCondition("test.init-mode");
ConditionOutcome outcome = condition.getMatchOutcome(
mockConditionContext(TestPropertyValues.of("test.init-mode=embedded")),
mock(AnnotatedTypeMetadata.class));
assertThat(outcome.getMessage()).isEqualTo("TestDatabase Initialization test.init-mode is EMBEDDED");
}
@Test
void getMatchOutcomeHasWhenPropertyIsNotSetHasDefaultDescription() {
OnDatabaseInitializationCondition condition = new OnTestDatabaseInitializationCondition("test.init-mode");
ConditionOutcome outcome = condition.getMatchOutcome(mockConditionContext(TestPropertyValues.empty()),
mock(AnnotatedTypeMetadata.class));
assertThat(outcome.getMessage()).isEqualTo("TestDatabase Initialization default value is EMBEDDED");
}
private ConditionContext mockConditionContext(TestPropertyValues propertyValues) {
MockEnvironment environment = new MockEnvironment();
propertyValues.applyTo(environment);
ConditionContext conditionContext = mock(ConditionContext.class);
given(conditionContext.getEnvironment()).willReturn(environment);
return conditionContext;
}
static | OnDatabaseInitializationConditionTests |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java | {
"start": 1447,
"end": 1514
} | class ____ updating mount table cache on all the router.
*/
public | for |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/eventtime/WatermarkGeneratorSupplier.java | {
"start": 1972,
"end": 2675
} | class ____ be used to register new metrics with Flink and to create a
* nested hierarchy based on the group names. See {@link MetricGroup} for more information
* for the metrics system.
*
* @see MetricGroup
*/
MetricGroup getMetricGroup();
/**
* Returns a {@link RelativeClock} that hides periods when input was not active and {@link
* WatermarkGenerator} could not have been executed due to execution being blocked by the
* runtime. For example a backpressure or watermark alignment blocking the progress.
*
* @see RelativeClock
*/
RelativeClock getInputActivityClock();
}
}
| can |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/transport/RemoteClusterSettings.java | {
"start": 1909,
"end": 6053
} | class ____ {
public static final TimeValue DEFAULT_INITIAL_CONNECTION_TIMEOUT = TimeValue.timeValueSeconds(30);
/**
* The initial connect timeout for remote cluster connections
*/
public static final Setting<TimeValue> REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING = Setting.positiveTimeSetting(
"cluster.remote.initial_connect_timeout",
DEFAULT_INITIAL_CONNECTION_TIMEOUT,
Setting.Property.NodeScope
);
/**
* The name of a node attribute to select nodes that should be connected to in the remote cluster.
* For instance a node can be configured with {@code node.attr.gateway: true} in order to be eligible as a gateway node between
* clusters. In that case {@code cluster.remote.node.attr: gateway} can be used to filter out other nodes in the remote cluster.
* The value of the setting is expected to be a boolean, {@code true} for nodes that can become gateways, {@code false} otherwise.
*/
public static final Setting<String> REMOTE_NODE_ATTRIBUTE = Setting.simpleString(
"cluster.remote.node.attr",
Setting.Property.NodeScope
);
public static final boolean DEFAULT_SKIP_UNAVAILABLE = true;
public static final Setting.AffixSetting<Boolean> REMOTE_CLUSTER_SKIP_UNAVAILABLE = Setting.affixKeySetting(
"cluster.remote.",
"skip_unavailable",
(ns, key) -> boolSetting(
key,
DEFAULT_SKIP_UNAVAILABLE,
new UnsupportedInCPSValidator<>(ns, key),
Setting.Property.Dynamic,
Setting.Property.NodeScope
)
);
public static final Setting.AffixSetting<TimeValue> REMOTE_CLUSTER_PING_SCHEDULE = Setting.affixKeySetting(
"cluster.remote.",
"transport.ping_schedule",
(ns, key) -> timeSetting(
key,
TransportSettings.PING_SCHEDULE,
new RemoteConnectionEnabled<>(ns, key),
Setting.Property.Dynamic,
Setting.Property.NodeScope
)
);
public static final Setting.AffixSetting<Compression.Enabled> REMOTE_CLUSTER_COMPRESS = Setting.affixKeySetting(
"cluster.remote.",
"transport.compress",
(ns, key) -> enumSetting(
Compression.Enabled.class,
key,
TransportSettings.TRANSPORT_COMPRESS,
new RemoteConnectionEnabled<>(ns, key),
Setting.Property.Dynamic,
Setting.Property.NodeScope
)
);
public static final Setting.AffixSetting<Compression.Scheme> REMOTE_CLUSTER_COMPRESSION_SCHEME = Setting.affixKeySetting(
"cluster.remote.",
"transport.compression_scheme",
(ns, key) -> enumSetting(
Compression.Scheme.class,
key,
TransportSettings.TRANSPORT_COMPRESSION_SCHEME,
new RemoteConnectionEnabled<>(ns, key),
Setting.Property.Dynamic,
Setting.Property.NodeScope
)
);
public static final Setting.AffixSetting<SecureString> REMOTE_CLUSTER_CREDENTIALS = Setting.affixKeySetting(
"cluster.remote.",
"credentials",
key -> SecureSetting.secureString(key, null)
);
public static final Setting.AffixSetting<RemoteConnectionStrategy.ConnectionStrategy> REMOTE_CONNECTION_MODE = Setting.affixKeySetting(
"cluster.remote.",
"mode",
key -> new Setting<>(
key,
RemoteConnectionStrategy.ConnectionStrategy.SNIFF.name(),
value -> RemoteConnectionStrategy.ConnectionStrategy.valueOf(value.toUpperCase(Locale.ROOT)),
Setting.Property.NodeScope,
Setting.Property.Dynamic
)
);
public static final int DEFAULT_MAX_PENDING_CONNECTION_LISTENERS = 1000;
// this setting is intentionally not registered, it is only used in tests
public static final Setting<Integer> REMOTE_MAX_PENDING_CONNECTION_LISTENERS = Setting.intSetting(
"cluster.remote.max_pending_connection_listeners",
DEFAULT_MAX_PENDING_CONNECTION_LISTENERS,
Setting.Property.NodeScope
);
public static | RemoteClusterSettings |
java | apache__camel | components/camel-platform-http/src/generated/java/org/apache/camel/component/platform/http/cookie/CookieConfigurationConfigurer.java | {
"start": 749,
"end": 4628
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("CookieDomain", java.lang.String.class);
map.put("CookieHttpOnly", boolean.class);
map.put("CookieMaxAge", java.lang.Long.class);
map.put("CookiePath", java.lang.String.class);
map.put("CookieSameSite", org.apache.camel.component.platform.http.cookie.CookieConfiguration.CookieSameSite.class);
map.put("CookieSecure", boolean.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.component.platform.http.cookie.CookieConfiguration target = (org.apache.camel.component.platform.http.cookie.CookieConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "cookiedomain":
case "cookieDomain": target.setCookieDomain(property(camelContext, java.lang.String.class, value)); return true;
case "cookiehttponly":
case "cookieHttpOnly": target.setCookieHttpOnly(property(camelContext, boolean.class, value)); return true;
case "cookiemaxage":
case "cookieMaxAge": target.setCookieMaxAge(property(camelContext, java.lang.Long.class, value)); return true;
case "cookiepath":
case "cookiePath": target.setCookiePath(property(camelContext, java.lang.String.class, value)); return true;
case "cookiesamesite":
case "cookieSameSite": target.setCookieSameSite(property(camelContext, org.apache.camel.component.platform.http.cookie.CookieConfiguration.CookieSameSite.class, value)); return true;
case "cookiesecure":
case "cookieSecure": target.setCookieSecure(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "cookiedomain":
case "cookieDomain": return java.lang.String.class;
case "cookiehttponly":
case "cookieHttpOnly": return boolean.class;
case "cookiemaxage":
case "cookieMaxAge": return java.lang.Long.class;
case "cookiepath":
case "cookiePath": return java.lang.String.class;
case "cookiesamesite":
case "cookieSameSite": return org.apache.camel.component.platform.http.cookie.CookieConfiguration.CookieSameSite.class;
case "cookiesecure":
case "cookieSecure": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.component.platform.http.cookie.CookieConfiguration target = (org.apache.camel.component.platform.http.cookie.CookieConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "cookiedomain":
case "cookieDomain": return target.getCookieDomain();
case "cookiehttponly":
case "cookieHttpOnly": return target.isCookieHttpOnly();
case "cookiemaxage":
case "cookieMaxAge": return target.getCookieMaxAge();
case "cookiepath":
case "cookiePath": return target.getCookiePath();
case "cookiesamesite":
case "cookieSameSite": return target.getCookieSameSite();
case "cookiesecure":
case "cookieSecure": return target.isCookieSecure();
default: return null;
}
}
}
| CookieConfigurationConfigurer |
java | apache__hadoop | hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/MapAttempt20LineHistoryEventEmitter.java | {
"start": 1215,
"end": 1677
} | class ____ extends
TaskAttempt20LineEventEmitter {
static List<SingleEventEmitter> nonFinals =
new LinkedList<SingleEventEmitter>();
static List<SingleEventEmitter> finals = new LinkedList<SingleEventEmitter>();
static {
nonFinals.addAll(taskEventNonFinalSEEs);
finals.add(new MapAttemptFinishedEventEmitter());
}
protected MapAttempt20LineHistoryEventEmitter() {
super();
}
static private | MapAttempt20LineHistoryEventEmitter |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/shuffle/ShuffleUtils.java | {
"start": 1202,
"end": 2691
} | class ____ {@code shuffleDescriptor}
* @param shuffleDescriptor concrete shuffle descriptor to check
* @param functionOfUnknownDescriptor supplier to call in case {@code shuffleDescriptor} is
* unknown
* @param functionOfKnownDescriptor function to call in case {@code shuffleDescriptor} is known
* @param <T> return type of called functions
* @param <SD> concrete type of {@code shuffleDescriptor} to check
* @return result of either function call
*/
@SuppressWarnings("unchecked")
public static <T, SD extends ShuffleDescriptor> T applyWithShuffleTypeCheck(
Class<SD> shuffleDescriptorClass,
ShuffleDescriptor shuffleDescriptor,
Function<UnknownShuffleDescriptor, T> functionOfUnknownDescriptor,
Function<SD, T> functionOfKnownDescriptor) {
if (shuffleDescriptor.isUnknown()) {
return functionOfUnknownDescriptor.apply((UnknownShuffleDescriptor) shuffleDescriptor);
} else if (shuffleDescriptorClass.equals(shuffleDescriptor.getClass())) {
return functionOfKnownDescriptor.apply((SD) shuffleDescriptor);
} else {
throw new IllegalArgumentException(
String.format(
"Unsupported ShuffleDescriptor type <%s>, only <%s> is supported",
shuffleDescriptor.getClass().getName(),
shuffleDescriptorClass.getName()));
}
}
}
| of |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryDefaultInEnumSwitchTest.java | {
"start": 24616,
"end": 25317
} | enum ____ {
ONE,
TWO,
THREE,
UNRECOGNIZED
}
boolean m(Case c) {
switch (c) {
case ONE:
case TWO:
return true;
case UNRECOGNIZED:
case THREE:
// This is a comment
System.out.println("Test");
}
return false;
}
}
""")
.doTest();
}
@Test
public void messageMovedAssertion() {
compilationHelper
.addSourceLines(
"in/Test.java",
"""
| Case |
java | quarkusio__quarkus | extensions/infinispan-cache/runtime/src/main/java/io/quarkus/cache/infinispan/runtime/CompositeKeyMarshallerBean.java | {
"start": 432,
"end": 1107
} | class ____ {
@Produces
public Schema compositeKeySchema() {
return new Schema.Builder("io.quarkus.cache.infinispan.internal.cache.proto")
.packageName(CompositeCacheKeyMarshaller.PACKAGE)
.addImport("org/infinispan/protostream/message-wrapping.proto")
.addMessage(CompositeCacheKeyMarshaller.NAME)
.addRepeatedField(Type.create("org.infinispan.protostream.WrappedMessage"), CompositeCacheKeyMarshaller.KEYS, 1)
.build();
}
@Produces
public MessageMarshaller compositeKeyMarshaller() {
return new CompositeCacheKeyMarshaller();
}
}
| CompositeKeyMarshallerBean |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/SinkManyBestEffort.java | {
"start": 7248,
"end": 9572
} | class ____<T> extends AtomicBoolean implements InnerProducer<T> {
final CoreSubscriber<? super T> actual;
@SuppressWarnings("deprecation")
final DirectInnerContainer<T> parent;
volatile long requested;
@SuppressWarnings("rawtypes")
static final AtomicLongFieldUpdater<DirectInner> REQUESTED = AtomicLongFieldUpdater.newUpdater(
DirectInner.class, "requested");
DirectInner(CoreSubscriber<? super T> actual, DirectInnerContainer<T> parent) {
this.actual = actual;
this.parent = parent;
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
Operators.addCap(REQUESTED, this, n);
}
}
@Override
public void cancel() {
if (compareAndSet(false, true)) {
parent.remove(this);
}
}
boolean isCancelled() {
return get();
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.PARENT) return parent;
if (key == Attr.CANCELLED) return isCancelled();
return InnerProducer.super.scanUnsafe(key);
}
@Override
public CoreSubscriber<? super T> actual() {
return actual;
}
/**
* Try to emit if the downstream is not cancelled and has some demand.
* @param value the value to emit
* @return true if enough demand and not cancelled, false otherwise
*/
boolean tryEmitNext(T value) {
if (requested != 0L) {
if (isCancelled()) {
return false;
}
actual.onNext(value);
if (requested != Long.MAX_VALUE) {
REQUESTED.decrementAndGet(this);
}
return true;
}
return false;
}
/**
* Emit a value to the downstream, unless it doesn't have sufficient demand.
* In that case, the downstream is terminated with an {@link Exceptions#failWithOverflow()}.
*
* @param value the value to emit
*/
void directEmitNext(T value) {
if (requested != 0L) {
actual.onNext(value);
if (requested != Long.MAX_VALUE) {
REQUESTED.decrementAndGet(this);
}
return;
}
parent.remove(this);
actual.onError(Exceptions.failWithOverflow("Can't deliver value due to lack of requests"));
}
void emitError(Throwable e) {
if (isCancelled()) {
return;
}
actual.onError(e);
}
void emitComplete() {
if (isCancelled()) {
return;
}
actual.onComplete();
}
}
}
| DirectInner |
java | apache__flink | flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/vector/reader/NestedColumnReader.java | {
"start": 3738,
"end": 11767
} | class ____ implements ColumnReader<WritableColumnVector> {
private final Map<ColumnDescriptor, NestedPrimitiveColumnReader> columnReaders;
private final boolean isUtcTimestamp;
private final PageReadStore pages;
private final ParquetField field;
public NestedColumnReader(boolean isUtcTimestamp, PageReadStore pages, ParquetField field) {
this.isUtcTimestamp = isUtcTimestamp;
this.pages = pages;
this.field = field;
this.columnReaders = new HashMap<>();
}
@Override
public void readToVector(int readNumber, WritableColumnVector vector) throws IOException {
readData(field, readNumber, vector, false);
}
private Tuple2<LevelDelegation, WritableColumnVector> readData(
ParquetField field, int readNumber, ColumnVector vector, boolean inside)
throws IOException {
if (field.getType() instanceof RowType) {
return readRow((ParquetGroupField) field, readNumber, vector, inside);
} else if (field.getType() instanceof MapType || field.getType() instanceof MultisetType) {
return readMap((ParquetGroupField) field, readNumber, vector, inside);
} else if (field.getType() instanceof ArrayType) {
return readArray((ParquetGroupField) field, readNumber, vector, inside);
} else {
return readPrimitive((ParquetPrimitiveField) field, readNumber, vector);
}
}
private Tuple2<LevelDelegation, WritableColumnVector> readRow(
ParquetGroupField field, int readNumber, ColumnVector vector, boolean inside)
throws IOException {
HeapRowVector heapRowVector = (HeapRowVector) vector;
LevelDelegation levelDelegation = null;
List<ParquetField> children = field.getChildren();
WritableColumnVector[] childrenVectors = heapRowVector.getFields();
WritableColumnVector[] finalChildrenVectors =
new WritableColumnVector[childrenVectors.length];
for (int i = 0; i < children.size(); i++) {
Tuple2<LevelDelegation, WritableColumnVector> tuple =
readData(children.get(i), readNumber, childrenVectors[i], true);
levelDelegation = tuple.f0;
finalChildrenVectors[i] = tuple.f1;
}
if (levelDelegation == null) {
throw new FlinkRuntimeException(
String.format("Row field does not have any children: %s.", field));
}
RowPosition rowPosition =
NestedPositionUtil.calculateRowOffsets(
field,
levelDelegation.getDefinitionLevel(),
levelDelegation.getRepetitionLevel());
// If row was inside the structure, then we need to renew the vector to reset the
// capacity.
if (inside) {
heapRowVector =
new HeapRowVector(rowPosition.getPositionsCount(), finalChildrenVectors);
} else {
heapRowVector.setFields(finalChildrenVectors);
}
if (rowPosition.getIsNull() != null) {
setFieldNullFalg(rowPosition.getIsNull(), heapRowVector);
}
return Tuple2.of(levelDelegation, heapRowVector);
}
private Tuple2<LevelDelegation, WritableColumnVector> readMap(
ParquetGroupField field, int readNumber, ColumnVector vector, boolean inside)
throws IOException {
HeapMapVector mapVector = (HeapMapVector) vector;
mapVector.reset();
List<ParquetField> children = field.getChildren();
Preconditions.checkArgument(
children.size() == 2,
"Maps must have two type parameters, found %s",
children.size());
Tuple2<LevelDelegation, WritableColumnVector> keyTuple =
readData(children.get(0), readNumber, mapVector.getKeyColumnVector(), true);
Tuple2<LevelDelegation, WritableColumnVector> valueTuple =
readData(children.get(1), readNumber, mapVector.getValueColumnVector(), true);
LevelDelegation levelDelegation = keyTuple.f0;
CollectionPosition collectionPosition =
NestedPositionUtil.calculateCollectionOffsets(
field,
levelDelegation.getDefinitionLevel(),
levelDelegation.getRepetitionLevel());
// If map was inside the structure, then we need to renew the vector to reset the
// capacity.
if (inside) {
mapVector =
new HeapMapVector(
collectionPosition.getValueCount(), keyTuple.f1, valueTuple.f1);
} else {
mapVector.setKeys(keyTuple.f1);
mapVector.setValues(valueTuple.f1);
}
if (collectionPosition.getIsNull() != null) {
setFieldNullFalg(collectionPosition.getIsNull(), mapVector);
}
mapVector.setLengths(collectionPosition.getLength());
mapVector.setOffsets(collectionPosition.getOffsets());
return Tuple2.of(levelDelegation, mapVector);
}
private Tuple2<LevelDelegation, WritableColumnVector> readArray(
ParquetGroupField field, int readNumber, ColumnVector vector, boolean inside)
throws IOException {
HeapArrayVector arrayVector = (HeapArrayVector) vector;
arrayVector.reset();
List<ParquetField> children = field.getChildren();
Preconditions.checkArgument(
children.size() == 1,
"Arrays must have a single type parameter, found %s",
children.size());
Tuple2<LevelDelegation, WritableColumnVector> tuple =
readData(children.get(0), readNumber, arrayVector.getChild(), true);
LevelDelegation levelDelegation = tuple.f0;
CollectionPosition collectionPosition =
NestedPositionUtil.calculateCollectionOffsets(
field,
levelDelegation.getDefinitionLevel(),
levelDelegation.getRepetitionLevel());
// If array was inside the structure, then we need to renew the vector to reset the
// capacity.
if (inside) {
arrayVector = new HeapArrayVector(collectionPosition.getValueCount(), tuple.f1);
} else {
arrayVector.setChild(tuple.f1);
}
if (collectionPosition.getIsNull() != null) {
setFieldNullFalg(collectionPosition.getIsNull(), arrayVector);
}
arrayVector.setLengths(collectionPosition.getLength());
arrayVector.setOffsets(collectionPosition.getOffsets());
return Tuple2.of(levelDelegation, arrayVector);
}
private Tuple2<LevelDelegation, WritableColumnVector> readPrimitive(
ParquetPrimitiveField field, int readNumber, ColumnVector vector) throws IOException {
ColumnDescriptor descriptor = field.getDescriptor();
NestedPrimitiveColumnReader reader = columnReaders.get(descriptor);
if (reader == null) {
reader =
new NestedPrimitiveColumnReader(
descriptor,
pages.getPageReader(descriptor),
isUtcTimestamp,
descriptor.getPrimitiveType(),
field.getType());
columnReaders.put(descriptor, reader);
}
WritableColumnVector writableColumnVector =
reader.readAndNewVector(readNumber, (WritableColumnVector) vector);
return Tuple2.of(reader.getLevelDelegation(), writableColumnVector);
}
private static void setFieldNullFalg(boolean[] nullFlags, AbstractHeapVector vector) {
for (int index = 0; index < vector.getLen() && index < nullFlags.length; index++) {
if (nullFlags[index]) {
vector.setNullAt(index);
}
}
}
}
| NestedColumnReader |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/ttl/TtlStateSnapshotTransformer.java | {
"start": 1383,
"end": 2432
} | class ____<T> implements CollectionStateSnapshotTransformer<T> {
private final TtlTimeProvider ttlTimeProvider;
final long ttl;
private final DataInputDeserializer div;
TtlStateSnapshotTransformer(@Nonnull TtlTimeProvider ttlTimeProvider, long ttl) {
this.ttlTimeProvider = ttlTimeProvider;
this.ttl = ttl;
this.div = new DataInputDeserializer();
}
<V> TtlValue<V> filterTtlValue(TtlValue<V> value) {
return expired(value) ? null : value;
}
private boolean expired(TtlValue<?> ttlValue) {
return expired(ttlValue.getLastAccessTimestamp());
}
boolean expired(long ts) {
return TtlUtils.expired(ts, ttl, ttlTimeProvider);
}
long deserializeTs(byte[] value) throws IOException {
div.setBuffer(value, 0, Long.BYTES);
return LongSerializer.INSTANCE.deserialize(div);
}
@Override
public TransformStrategy getFilterStrategy() {
return TransformStrategy.STOP_ON_FIRST_INCLUDED;
}
static | TtlStateSnapshotTransformer |
java | spring-projects__spring-boot | loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFile.java | {
"start": 18643,
"end": 20758
} | class ____ extends InputStream {
private final int uncompressedSize;
private final CloseableDataBlock content;
private long pos;
private long remaining;
private volatile boolean closed;
JarEntryInputStream(ZipContent.Entry entry) throws IOException {
this.uncompressedSize = entry.getUncompressedSize();
this.content = entry.openContent();
this.remaining = this.uncompressedSize;
}
@Override
public int read() throws IOException {
byte[] b = new byte[1];
return (read(b, 0, 1) == 1) ? b[0] & 0xFF : -1;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
int result;
synchronized (NestedJarFile.this) {
ensureOpen();
ByteBuffer dst = ByteBuffer.wrap(b, off, len);
int count = this.content.read(dst, this.pos);
if (count > 0) {
this.pos += count;
this.remaining -= count;
}
result = count;
}
return result;
}
@Override
public long skip(long n) throws IOException {
long result;
synchronized (NestedJarFile.this) {
result = (n > 0) ? maxForwardSkip(n) : maxBackwardSkip(n);
this.pos += result;
this.remaining -= result;
}
return result;
}
private long maxForwardSkip(long n) {
boolean willCauseOverflow = (this.pos + n) < 0;
return (willCauseOverflow || n > this.remaining) ? this.remaining : n;
}
private long maxBackwardSkip(long n) {
return Math.max(-this.pos, n);
}
@Override
public int available() {
return (this.remaining < Integer.MAX_VALUE) ? (int) this.remaining : Integer.MAX_VALUE;
}
private void ensureOpen() throws ZipException {
if (NestedJarFile.this.closed || this.closed) {
throw new ZipException("ZipFile closed");
}
}
@Override
public void close() throws IOException {
if (this.closed) {
return;
}
this.closed = true;
this.content.close();
NestedJarFile.this.resources.removeInputStream(this);
}
int getUncompressedSize() {
return this.uncompressedSize;
}
}
/**
* {@link ZipInflaterInputStream} to read and inflate jar entry content.
*/
private | JarEntryInputStream |
java | junit-team__junit5 | junit-platform-console/src/main/java/org/junit/platform/console/options/TestDiscoveryOptionsMixin.java | {
"start": 2159,
"end": 4647
} | class ____ {
@Option(names = { "--scan-classpath",
"--scan-class-path" }, converter = ClasspathEntriesConverter.class, paramLabel = "PATH", arity = "0..1", description = "Scan all directories on the classpath or explicit classpath roots. " //
+ "Without arguments, only directories on the system classpath as well as additional classpath " //
+ "entries supplied via -" + CP_OPTION + " (directories and JAR files) are scanned. " //
+ "Explicit classpath roots that are not on the classpath will be silently ignored. " //
+ "This option can be repeated.")
private @Nullable List<Path> selectedClasspathEntries;
@Option(names = "--scan-modules", description = "Scan all resolved modules for test discovery.")
private boolean scanModulepath;
@Option(names = { "-u",
"--select-uri" }, paramLabel = "URI", arity = "1..*", converter = SelectorConverter.Uri.class, description = "Select a URI for test discovery. This option can be repeated.")
private List<UriSelector> selectedUris = new ArrayList<>();
@Option(names = { "-f",
"--select-file" }, paramLabel = "FILE", arity = "1..*", converter = SelectorConverter.File.class, //
description = "Select a file for test discovery. "
+ "The line and column numbers can be provided as URI query parameters (e.g. foo.txt?line=12&column=34). "
+ "This option can be repeated.")
private List<FileSelector> selectedFiles = new ArrayList<>();
@Option(names = { "-d",
"--select-directory" }, paramLabel = "DIR", arity = "1..*", converter = SelectorConverter.Directory.class, description = "Select a directory for test discovery. This option can be repeated.")
private List<DirectorySelector> selectedDirectories = new ArrayList<>();
@Option(names = { "-o",
"--select-module" }, paramLabel = "NAME", arity = "1..*", converter = SelectorConverter.Module.class, description = "Select single module for test discovery. This option can be repeated.")
private List<ModuleSelector> selectedModules = new ArrayList<>();
@Option(names = { "-p",
"--select-package" }, paramLabel = "PKG", arity = "1..*", converter = SelectorConverter.Package.class, description = "Select a package for test discovery. This option can be repeated.")
private List<PackageSelector> selectedPackages = new ArrayList<>();
@Option(names = { "-c",
"--select-class" }, paramLabel = "CLASS", arity = "1..*", converter = SelectorConverter.Class.class, description = "Select a | SelectorOptions |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/UnloadedTypeDescription.java | {
"start": 309,
"end": 781
} | class ____ implements UnloadedClass {
private final TypeDescription typeDescription;
UnloadedTypeDescription(TypeDescription typeDescription) {
this.typeDescription = typeDescription;
}
@Override
public boolean hasAnnotation(Class<? extends Annotation> annotationType) {
return typeDescription.getDeclaredAnnotations().isAnnotationPresent( annotationType );
}
@Override
public String getName() {
return typeDescription.getName();
}
}
| UnloadedTypeDescription |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/nested/Parameter.java | {
"start": 754,
"end": 984
} | class ____ {
private final List<Name> names;
public Parameter() {
names = new ArrayList<>();
}
public List<Name> getNames() {
return names;
}
public void addName(Name name) {
names.add(name);
}
}
| Parameter |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adapter/DefaultResultPartition.java | {
"start": 1562,
"end": 3934
} | class ____ implements SchedulingResultPartition {
private final IntermediateResultPartitionID resultPartitionId;
private final IntermediateDataSetID intermediateDataSetId;
private final ResultPartitionType partitionType;
private final Supplier<ResultPartitionState> resultPartitionStateSupplier;
private DefaultExecutionVertex producer;
private final Supplier<List<ConsumerVertexGroup>> consumerVertexGroupsSupplier;
private final Supplier<List<ConsumedPartitionGroup>> consumerPartitionGroupSupplier;
DefaultResultPartition(
IntermediateResultPartitionID partitionId,
IntermediateDataSetID intermediateDataSetId,
ResultPartitionType partitionType,
Supplier<ResultPartitionState> resultPartitionStateSupplier,
Supplier<List<ConsumerVertexGroup>> consumerVertexGroupsSupplier,
Supplier<List<ConsumedPartitionGroup>> consumerPartitionGroupSupplier) {
this.resultPartitionId = checkNotNull(partitionId);
this.intermediateDataSetId = checkNotNull(intermediateDataSetId);
this.partitionType = checkNotNull(partitionType);
this.resultPartitionStateSupplier = checkNotNull(resultPartitionStateSupplier);
this.consumerVertexGroupsSupplier = checkNotNull(consumerVertexGroupsSupplier);
this.consumerPartitionGroupSupplier = checkNotNull(consumerPartitionGroupSupplier);
}
@Override
public IntermediateResultPartitionID getId() {
return resultPartitionId;
}
@Override
public IntermediateDataSetID getResultId() {
return intermediateDataSetId;
}
@Override
public ResultPartitionType getResultType() {
return partitionType;
}
@Override
public ResultPartitionState getState() {
return resultPartitionStateSupplier.get();
}
@Override
public DefaultExecutionVertex getProducer() {
return producer;
}
@Override
public List<ConsumerVertexGroup> getConsumerVertexGroups() {
return checkNotNull(consumerVertexGroupsSupplier.get());
}
@Override
public List<ConsumedPartitionGroup> getConsumedPartitionGroups() {
return consumerPartitionGroupSupplier.get();
}
void setProducer(DefaultExecutionVertex vertex) {
producer = checkNotNull(vertex);
}
}
| DefaultResultPartition |
java | spring-projects__spring-boot | module/spring-boot-webmvc-test/src/test/java/org/springframework/boot/webmvc/test/autoconfigure/mockmvc/WebMvcTestAllControllersIntegrationTests.java | {
"start": 1412,
"end": 2514
} | class ____ {
@Autowired
private MockMvcTester mvc;
@Autowired(required = false)
private ErrorAttributes errorAttributes;
@Test
void shouldFindController1() {
assertThat(this.mvc.get().uri("/one")).satisfies(hasBody("one"));
}
@Test
void shouldFindController2() {
assertThat(this.mvc.get().uri("/two")).satisfies(hasBody("hellotwo"));
}
@Test
void shouldFindControllerAdvice() {
assertThat(this.mvc.get().uri("/error")).satisfies(hasBody("recovered"));
}
@Test
void shouldRunValidationSuccess() {
assertThat(this.mvc.get().uri("/three/OK")).satisfies(hasBody("Hello OK"));
}
@Test
void shouldRunValidationFailure() {
assertThat(this.mvc.get().uri("/three/invalid")).failure()
.isInstanceOf(ServletException.class)
.hasCauseInstanceOf(ConstraintViolationException.class);
}
@Test
void shouldNotFilterErrorAttributes() {
assertThat(this.errorAttributes).isNotNull();
}
private Consumer<MvcTestResult> hasBody(String expected) {
return (result) -> assertThat(result).hasStatusOk().hasBodyTextEqualTo(expected);
}
}
| WebMvcTestAllControllersIntegrationTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/StructEmbeddableArrayTest.java | {
"start": 3538,
"end": 20865
} | class ____ implements AdditionalMappingContributor {
@Override
public void contribute(
AdditionalMappingContributions contributions,
InFlightMetadataCollector metadata,
ResourceStreamLocator resourceStreamLocator,
MetadataBuildingContext buildingContext) {
final Namespace namespace = new Namespace(
PhysicalNamingStrategyStandardImpl.INSTANCE,
null,
new Namespace.Name( null, null )
);
//---------------------------------------------------------
// PostgreSQL
//---------------------------------------------------------
contributions.contributeAuxiliaryDatabaseObject(
new NamedAuxiliaryDatabaseObject(
"PostgreSQL structFunction",
namespace,
"create function structFunction() returns structType array as $$ declare result structType; begin result.theBinary = bytea '\\x01'; result.theString = 'ABC'; result.theDouble = 0; result.theInt = 0; result.theLocalDateTime = timestamp '2022-12-01 01:00:00'; result.theUuid = '53886a8a-7082-4879-b430-25cb94415be8'::uuid; return array[result]; end $$ language plpgsql",
"drop function structFunction",
Set.of( PostgreSQLDialect.class.getName() )
)
);
contributions.contributeAuxiliaryDatabaseObject(
new NamedAuxiliaryDatabaseObject(
"PostgreSQL structProcedure",
namespace,
"create procedure structProcedure(INOUT result structType array) AS $$ declare res structType; begin res.theBinary = bytea '\\x01'; res.theString = 'ABC'; res.theDouble = 0; res.theInt = 0; res.theLocalDateTime = timestamp '2022-12-01 01:00:00'; res.theUuid = '53886a8a-7082-4879-b430-25cb94415be8'::uuid; result = array[res]; end $$ language plpgsql",
"drop procedure structProcedure",
Set.of( PostgreSQLDialect.class.getName() )
)
);
//---------------------------------------------------------
// PostgresPlus
//---------------------------------------------------------
contributions.contributeAuxiliaryDatabaseObject(
new NamedAuxiliaryDatabaseObject(
"PostgrePlus structFunction",
namespace,
"create function structFunction() returns structType array as $$ declare result structType; begin result.theBinary = bytea '\\x01'; result.theString = 'ABC'; result.theDouble = 0; result.theInt = 0; result.theLocalDateTime = timestamp '2022-12-01 01:00:00'; result.theUuid = '53886a8a-7082-4879-b430-25cb94415be8'::uuid; return array[result]; end $$ language plpgsql",
"drop function structFunction",
Set.of( PostgresPlusDialect.class.getName() )
)
);
contributions.contributeAuxiliaryDatabaseObject(
new NamedAuxiliaryDatabaseObject(
"PostgrePlus structProcedure",
namespace,
"create procedure structProcedure(result INOUT structType array) AS $$ declare res structType; begin res.theBinary = bytea '\\x01'; res.theString = 'ABC'; res.theDouble = 0; res.theInt = 0; res.theLocalDateTime = timestamp '2022-12-01 01:00:00'; res.theUuid = '53886a8a-7082-4879-b430-25cb94415be8'::uuid; result = array[res]; end $$ language plpgsql",
"drop procedure structProcedure",
Set.of( PostgresPlusDialect.class.getName() )
)
);
//---------------------------------------------------------
// Oracle
//---------------------------------------------------------
contributions.contributeAuxiliaryDatabaseObject(
new NamedAuxiliaryDatabaseObject(
"Oracle structFunction",
namespace,
"create function structFunction return structTypeArray is result structTypeArray; begin " +
"result := structTypeArray(structType(" +
"theBinary => hextoraw('01')," +
"theString => 'ABC'," +
"theDouble => 0," +
"theInt => 0," +
"theLocalDateTime => timestamp '2022-12-01 01:00:00'," +
"theUuid => hextoraw('53886a8a70824879b43025cb94415be8')," +
"converted_gender => null," +
"gender => null," +
"mutableValue => null," +
"ordinal_gender => null," +
"theBoolean => null," +
"theClob => null," +
"theDate => null," +
"theDuration => null," +
"theInstant => null," +
"theInteger => null," +
"theLocalDate => null," +
"theLocalTime => null," +
"theNumericBoolean => null," +
"theOffsetDateTime => null," +
"theStringBoolean => null," +
"theTime => null," +
"theTimestamp => null," +
"theUrl => null," +
"theZonedDateTime => null" +
")); return result; end;",
"drop function structFunction",
Set.of( OracleDialect.class.getName() )
)
);
contributions.contributeAuxiliaryDatabaseObject(
new NamedAuxiliaryDatabaseObject(
"Oracle structProcedure",
namespace,
"create procedure structProcedure(result OUT structTypeArray) AS begin " +
"result := structTypeArray(structType(" +
"theBinary => hextoraw('01')," +
"theString => 'ABC'," +
"theDouble => 0," +
"theInt => 0," +
"theLocalDateTime => timestamp '2022-12-01 01:00:00'," +
"theUuid => hextoraw('53886a8a70824879b43025cb94415be8')," +
"converted_gender => null," +
"gender => null," +
"mutableValue => null," +
"ordinal_gender => null," +
"theBoolean => null," +
"theClob => null," +
"theDate => null," +
"theDuration => null," +
"theInstant => null," +
"theInteger => null," +
"theLocalDate => null," +
"theLocalTime => null," +
"theNumericBoolean => null," +
"theOffsetDateTime => null," +
"theStringBoolean => null," +
"theTime => null," +
"theTimestamp => null," +
"theUrl => null," +
"theZonedDateTime => null" +
")); end;",
"drop procedure structProcedure",
Set.of( OracleDialect.class.getName() )
)
);
}
@BeforeEach
public void setUp(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
session.persist( new StructHolder( 1L, EmbeddableAggregate.createAggregate1() ) );
session.persist( new StructHolder( 2L, EmbeddableAggregate.createAggregate2() ) );
}
);
}
@AfterEach
protected void cleanupTest(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testUpdate(SessionFactoryScope scope) {
scope.inTransaction(
entityManager -> {
StructHolder structHolder = entityManager.find( StructHolder.class, 1L );
structHolder.setAggregate( EmbeddableAggregate.createAggregate2() );
entityManager.flush();
entityManager.clear();
assertStructEquals( EmbeddableAggregate.createAggregate2(), entityManager.find( StructHolder.class, 1L ).getAggregate() );
}
);
}
@Test
public void testFetch(SessionFactoryScope scope) {
scope.inSession(
entityManager -> {
List<StructHolder> structHolders = entityManager.createQuery( "from StructHolder b where b.id = 1", StructHolder.class ).getResultList();
assertEquals( 1, structHolders.size() );
assertEquals( 1L, structHolders.get( 0 ).getId() );
assertStructEquals( EmbeddableAggregate.createAggregate1(), structHolders.get( 0 ).getAggregate() );
}
);
}
@Test
public void testFetchNull(SessionFactoryScope scope) {
scope.inSession(
entityManager -> {
List<StructHolder> structHolders = entityManager.createQuery( "from StructHolder b where b.id = 2", StructHolder.class ).getResultList();
assertEquals( 1, structHolders.size() );
assertEquals( 2L, structHolders.get( 0 ).getId() );
assertStructEquals( EmbeddableAggregate.createAggregate2(), structHolders.get( 0 ).getAggregate() );
}
);
}
@Test
public void testDomainResult(SessionFactoryScope scope) {
scope.inSession(
entityManager -> {
List<EmbeddableAggregate[]> structs = entityManager.createQuery( "select b.aggregate from StructHolder b where b.id = 1", EmbeddableAggregate[].class ).getResultList();
assertEquals( 1, structs.size() );
assertStructEquals( new EmbeddableAggregate[]{ EmbeddableAggregate.createAggregate1() }, structs.get( 0 ) );
}
);
}
@Test
@SkipForDialect(dialectClass = OracleDialect.class, reason = "We have to use TABLE storage in this test because Oracle doesn't support LOBs in struct arrays, but TABLE is not indexed")
public void testSelectionItems(SessionFactoryScope scope) {
scope.inSession(
entityManager -> {
List<Tuple> tuples = entityManager.createQuery(
"select " +
"b.aggregate[1].theInt," +
"b.aggregate[1].theDouble," +
"b.aggregate[1].theBoolean," +
"b.aggregate[1].theNumericBoolean," +
"b.aggregate[1].theStringBoolean," +
"b.aggregate[1].theString," +
"b.aggregate[1].theInteger," +
"b.aggregate[1].theUrl," +
"b.aggregate[1].theClob," +
"b.aggregate[1].theBinary," +
"b.aggregate[1].theDate," +
"b.aggregate[1].theTime," +
"b.aggregate[1].theTimestamp," +
"b.aggregate[1].theInstant," +
"b.aggregate[1].theUuid," +
"b.aggregate[1].gender," +
"b.aggregate[1].convertedGender," +
"b.aggregate[1].ordinalGender," +
"b.aggregate[1].theDuration," +
"b.aggregate[1].theLocalDateTime," +
"b.aggregate[1].theLocalDate," +
"b.aggregate[1].theLocalTime," +
"b.aggregate[1].theZonedDateTime," +
"b.aggregate[1].theOffsetDateTime," +
"b.aggregate[1].mutableValue " +
"from StructHolder b where b.id = 1",
Tuple.class
).getResultList();
assertEquals( 1, tuples.size() );
final Tuple tuple = tuples.get( 0 );
final EmbeddableAggregate struct = new EmbeddableAggregate();
struct.setTheInt( tuple.get( 0, int.class ) );
struct.setTheDouble( tuple.get( 1, Double.class ) );
struct.setTheBoolean( tuple.get( 2, Boolean.class ) );
struct.setTheNumericBoolean( tuple.get( 3, Boolean.class ) );
struct.setTheStringBoolean( tuple.get( 4, Boolean.class ) );
struct.setTheString( tuple.get( 5, String.class ) );
struct.setTheInteger( tuple.get( 6, Integer.class ) );
struct.setTheUrl( tuple.get( 7, URL.class ) );
struct.setTheClob( tuple.get( 8, String.class ) );
struct.setTheBinary( tuple.get( 9, byte[].class ) );
struct.setTheDate( tuple.get( 10, Date.class ) );
struct.setTheTime( tuple.get( 11, Time.class ) );
struct.setTheTimestamp( tuple.get( 12, Timestamp.class ) );
struct.setTheInstant( tuple.get( 13, Instant.class ) );
struct.setTheUuid( tuple.get( 14, UUID.class ) );
struct.setGender( tuple.get( 15, EntityOfBasics.Gender.class ) );
struct.setConvertedGender( tuple.get( 16, EntityOfBasics.Gender.class ) );
struct.setOrdinalGender( tuple.get( 17, EntityOfBasics.Gender.class ) );
struct.setTheDuration( tuple.get( 18, Duration.class ) );
struct.setTheLocalDateTime( tuple.get( 19, LocalDateTime.class ) );
struct.setTheLocalDate( tuple.get( 20, LocalDate.class ) );
struct.setTheLocalTime( tuple.get( 21, LocalTime.class ) );
struct.setTheZonedDateTime( tuple.get( 22, ZonedDateTime.class ) );
struct.setTheOffsetDateTime( tuple.get( 23, OffsetDateTime.class ) );
struct.setMutableValue( tuple.get( 24, MutableValue.class ) );
EmbeddableAggregate.assertEquals( EmbeddableAggregate.createAggregate1(), struct );
}
);
}
@Test
public void testDeleteWhere(SessionFactoryScope scope) {
scope.inTransaction(
entityManager -> {
entityManager.createMutationQuery( "delete StructHolder b where b.aggregate is not null" ).executeUpdate();
assertNull( entityManager.find( StructHolder.class, 1L ) );
}
);
}
@Test
public void testUpdateAggregate(SessionFactoryScope scope) {
scope.inTransaction(
entityManager -> {
entityManager.createMutationQuery( "update StructHolder b set b.aggregate = null" ).executeUpdate();
assertNull( entityManager.find( StructHolder.class, 1L ).aggregate );
}
);
}
@Test
@FailureExpected(jiraKey = "HHH-18051")
public void testUpdateAggregateMember(SessionFactoryScope scope) {
scope.inTransaction(
entityManager -> {
entityManager.createMutationQuery( "update StructHolder b set b.aggregate[1].theString = null" ).executeUpdate();
EmbeddableAggregate struct = EmbeddableAggregate.createAggregate1();
struct.setTheString( null );
assertStructEquals( struct, entityManager.find( StructHolder.class, 1L ).getAggregate() );
}
);
}
@Test
@FailureExpected(jiraKey = "HHH-18051")
@SkipForDialect(dialectClass = OracleDialect.class, reason = "We have to use TABLE storage in this test because Oracle doesn't support LOBs in struct arrays, but TABLE is not indexed")
public void testUpdateMultipleAggregateMembers(SessionFactoryScope scope) {
scope.inTransaction(
entityManager -> {
entityManager.createMutationQuery( "update StructHolder b set b.aggregate[1].theString = null, b.aggregate[1].theUuid = null" ).executeUpdate();
EmbeddableAggregate struct = EmbeddableAggregate.createAggregate1();
struct.setTheString( null );
struct.setTheUuid( null );
assertStructEquals( struct, entityManager.find( StructHolder.class, 1L ).getAggregate() );
}
);
}
@Test
@FailureExpected(jiraKey = "HHH-18051")
@SkipForDialect(dialectClass = OracleDialect.class, reason = "We have to use TABLE storage in this test because Oracle doesn't support LOBs in struct arrays, but TABLE is not indexed")
public void testUpdateAllAggregateMembers(SessionFactoryScope scope) {
scope.inTransaction(
entityManager -> {
EmbeddableAggregate struct = EmbeddableAggregate.createAggregate1();
entityManager.createMutationQuery(
"update StructHolder b set " +
"b.aggregate[1].theInt = :theInt," +
"b.aggregate[1].theDouble = :theDouble," +
"b.aggregate[1].theBoolean = :theBoolean," +
"b.aggregate[1].theNumericBoolean = :theNumericBoolean," +
"b.aggregate[1].theStringBoolean = :theStringBoolean," +
"b.aggregate[1].theString = :theString," +
"b.aggregate[1].theInteger = :theInteger," +
"b.aggregate[1].theUrl = :theUrl," +
"b.aggregate[1].theClob = :theClob," +
"b.aggregate[1].theBinary = :theBinary," +
"b.aggregate[1].theDate = :theDate," +
"b.aggregate[1].theTime = :theTime," +
"b.aggregate[1].theTimestamp = :theTimestamp," +
"b.aggregate[1].theInstant = :theInstant," +
"b.aggregate[1].theUuid = :theUuid," +
"b.aggregate[1].gender = :gender," +
"b.aggregate[1].convertedGender = :convertedGender," +
"b.aggregate[1].ordinalGender = :ordinalGender," +
"b.aggregate[1].theDuration = :theDuration," +
"b.aggregate[1].theLocalDateTime = :theLocalDateTime," +
"b.aggregate[1].theLocalDate = :theLocalDate," +
"b.aggregate[1].theLocalTime = :theLocalTime," +
"b.aggregate[1].theZonedDateTime = :theZonedDateTime," +
"b.aggregate[1].theOffsetDateTime = :theOffsetDateTime," +
"b.aggregate[1].mutableValue = :mutableValue " +
"where b.id = 2"
)
.setParameter( "theInt", struct.getTheInt() )
.setParameter( "theDouble", struct.getTheDouble() )
.setParameter( "theBoolean", struct.isTheBoolean() )
.setParameter( "theNumericBoolean", struct.isTheNumericBoolean() )
.setParameter( "theStringBoolean", struct.isTheStringBoolean() )
.setParameter( "theString", struct.getTheString() )
.setParameter( "theInteger", struct.getTheInteger() )
.setParameter( "theUrl", struct.getTheUrl() )
.setParameter( "theClob", struct.getTheClob() )
.setParameter( "theBinary", struct.getTheBinary() )
.setParameter( "theDate", struct.getTheDate() )
.setParameter( "theTime", struct.getTheTime() )
.setParameter( "theTimestamp", struct.getTheTimestamp() )
.setParameter( "theInstant", struct.getTheInstant() )
.setParameter( "theUuid", struct.getTheUuid() )
.setParameter( "gender", struct.getGender() )
.setParameter( "convertedGender", struct.getConvertedGender() )
.setParameter( "ordinalGender", struct.getOrdinalGender() )
.setParameter( "theDuration", struct.getTheDuration() )
.setParameter( "theLocalDateTime", struct.getTheLocalDateTime() )
.setParameter( "theLocalDate", struct.getTheLocalDate() )
.setParameter( "theLocalTime", struct.getTheLocalTime() )
.setParameter( "theZonedDateTime", struct.getTheZonedDateTime() )
.setParameter( "theOffsetDateTime", struct.getTheOffsetDateTime() )
.setParameter( "mutableValue", struct.getMutableValue() )
.executeUpdate();
assertStructEquals( EmbeddableAggregate.createAggregate1(), entityManager.find( StructHolder.class, 2L ).getAggregate() );
}
);
}
@Test
public void testNativeQuery(SessionFactoryScope scope) {
scope.inTransaction(
entityManager -> {
//noinspection unchecked
List<Object> resultList = entityManager.createNativeQuery(
"select b.aggregate from StructHolder b where b.id = 1",
// Using Object. | StructEmbeddableArrayTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerProfilingHandler.java | {
"start": 1889,
"end": 3632
} | class ____
extends AbstractRestHandler<
RestfulGateway, ProfilingRequestBody, ProfilingInfo, EmptyMessageParameters> {
private final long maxDurationInSeconds;
private final ProfilingService profilingService;
public JobManagerProfilingHandler(
GatewayRetriever<? extends RestfulGateway> leaderRetriever,
Duration timeout,
Map<String, String> responseHeaders,
MessageHeaders<ProfilingRequestBody, ProfilingInfo, EmptyMessageParameters>
messageHeaders,
final Configuration configuration) {
super(leaderRetriever, timeout, responseHeaders, messageHeaders);
this.maxDurationInSeconds =
configuration.get(RestOptions.MAX_PROFILING_DURATION).getSeconds();
this.profilingService = ProfilingService.getInstance(configuration);
}
@Override
protected CompletableFuture<ProfilingInfo> handleRequest(
@Nonnull HandlerRequest<ProfilingRequestBody> request, @Nonnull RestfulGateway gateway)
throws RestHandlerException {
ProfilingRequestBody profilingRequest = request.getRequestBody();
int duration = profilingRequest.getDuration();
if (duration <= 0 || duration > maxDurationInSeconds) {
return FutureUtils.completedExceptionally(
new IllegalArgumentException(
String.format(
"`duration` must be set between (0s, %ds].",
maxDurationInSeconds)));
}
return profilingService.requestProfiling(
"JobManager", duration, profilingRequest.getMode());
}
}
| JobManagerProfilingHandler |
java | spring-projects__spring-framework | spring-jdbc/src/test/java/org/springframework/jdbc/datasource/init/AbstractDatabasePopulatorTests.java | {
"start": 1267,
"end": 7030
} | class ____ extends AbstractDatabaseInitializationTests {
private static final String COUNT_DAVE_SQL = "select COUNT(NAME) from T_TEST where NAME='Dave'";
private static final String COUNT_KEITH_SQL = "select COUNT(NAME) from T_TEST where NAME='Keith'";
protected final ResourceDatabasePopulator databasePopulator = new ResourceDatabasePopulator();
@Test
void scriptWithSingleLineCommentsAndFailedDrop() {
databasePopulator.addScript(resource("db-schema-failed-drop-comments.sql"));
databasePopulator.addScript(resource("db-test-data.sql"));
databasePopulator.setIgnoreFailedDrops(true);
DatabasePopulatorUtils.execute(databasePopulator, db);
assertTestDatabaseCreated();
}
@Test
void scriptWithStandardEscapedLiteral() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-escaped-literal.sql"));
DatabasePopulatorUtils.execute(databasePopulator, db);
assertTestDatabaseCreated("'Keith'");
}
@Test
void scriptWithMySqlEscapedLiteral() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-mysql-escaped-literal.sql"));
DatabasePopulatorUtils.execute(databasePopulator, db);
assertTestDatabaseCreated("\\$Keith\\$");
}
@Test
void scriptWithMultipleStatements() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-multiple.sql"));
DatabasePopulatorUtils.execute(databasePopulator, db);
assertThat(jdbcTemplate.queryForObject(COUNT_KEITH_SQL, Integer.class)).isEqualTo(1);
assertThat(jdbcTemplate.queryForObject(COUNT_DAVE_SQL, Integer.class)).isEqualTo(1);
}
@Test
void scriptWithMultipleStatementsAndLongSeparator() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-endings.sql"));
databasePopulator.setSeparator("@@");
DatabasePopulatorUtils.execute(databasePopulator, db);
assertThat(jdbcTemplate.queryForObject(COUNT_KEITH_SQL, Integer.class)).isEqualTo(1);
assertThat(jdbcTemplate.queryForObject(COUNT_DAVE_SQL, Integer.class)).isEqualTo(1);
}
@Test
void scriptWithMultipleStatementsAndWhitespaceSeparator() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-whitespace.sql"));
databasePopulator.setSeparator("/\n");
DatabasePopulatorUtils.execute(databasePopulator, db);
assertThat(jdbcTemplate.queryForObject(COUNT_KEITH_SQL, Integer.class)).isEqualTo(1);
assertThat(jdbcTemplate.queryForObject(COUNT_DAVE_SQL, Integer.class)).isEqualTo(1);
}
@Test
void scriptWithMultipleStatementsAndNewlineSeparator() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-newline.sql"));
DatabasePopulatorUtils.execute(databasePopulator, db);
assertThat(jdbcTemplate.queryForObject(COUNT_KEITH_SQL, Integer.class)).isEqualTo(1);
assertThat(jdbcTemplate.queryForObject(COUNT_DAVE_SQL, Integer.class)).isEqualTo(1);
}
@Test
void scriptWithMultipleStatementsAndMultipleNewlineSeparator() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-multi-newline.sql"));
databasePopulator.setSeparator("\n\n");
DatabasePopulatorUtils.execute(databasePopulator, db);
assertThat(jdbcTemplate.queryForObject(COUNT_KEITH_SQL, Integer.class)).isEqualTo(1);
assertThat(jdbcTemplate.queryForObject(COUNT_DAVE_SQL, Integer.class)).isEqualTo(1);
}
@Test
void scriptWithEolBetweenTokens() {
databasePopulator.addScript(usersSchema());
databasePopulator.addScript(resource("users-data.sql"));
DatabasePopulatorUtils.execute(databasePopulator, db);
assertUsersDatabaseCreated("Brannen");
}
@Test
void scriptWithCommentsWithinStatements() {
databasePopulator.addScript(usersSchema());
databasePopulator.addScript(resource("users-data-with-comments.sql"));
DatabasePopulatorUtils.execute(databasePopulator, db);
assertUsersDatabaseCreated("Brannen", "Hoeller");
}
@Test
void scriptWithoutStatementSeparator() {
databasePopulator.setSeparator(ScriptUtils.EOF_STATEMENT_SEPARATOR);
databasePopulator.addScript(resource("drop-users-schema.sql"));
databasePopulator.addScript(resource("users-schema-without-separator.sql"));
databasePopulator.addScript(resource("users-data-without-separator.sql"));
DatabasePopulatorUtils.execute(databasePopulator, db);
assertUsersDatabaseCreated("Brannen");
}
@Test
void constructorWithMultipleScriptResources() {
final ResourceDatabasePopulator populator = new ResourceDatabasePopulator(usersSchema(),
resource("users-data-with-comments.sql"));
DatabasePopulatorUtils.execute(populator, db);
assertUsersDatabaseCreated("Brannen", "Hoeller");
}
@Test
void scriptWithSelectStatements() {
databasePopulator.addScript(defaultSchema());
databasePopulator.addScript(resource("db-test-data-select.sql"));
DatabasePopulatorUtils.execute(databasePopulator, db);
assertThat(jdbcTemplate.queryForObject(COUNT_KEITH_SQL, Integer.class)).isEqualTo(1);
assertThat(jdbcTemplate.queryForObject(COUNT_DAVE_SQL, Integer.class)).isEqualTo(1);
}
/**
* See SPR-9457
*/
@Test
void usesBoundConnectionIfAvailable() throws SQLException {
TransactionSynchronizationManager.initSynchronization();
Connection connection = DataSourceUtils.getConnection(db);
DatabasePopulator populator = mock();
DatabasePopulatorUtils.execute(populator, db);
verify(populator).populate(connection);
}
private void assertTestDatabaseCreated() {
assertTestDatabaseCreated("Keith");
}
private void assertTestDatabaseCreated(String name) {
assertThat(jdbcTemplate.queryForObject("select NAME from T_TEST", String.class)).isEqualTo(name);
}
}
| AbstractDatabasePopulatorTests |
java | elastic__elasticsearch | libs/entitlement/qa/entitlement-test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/VersionSpecificNetworkChecks.java | {
"start": 1008,
"end": 2928
} | class ____ {
@EntitlementTest(expectedAccess = SERVER_ONLY, fromJavaVersion = 18)
static void createInetAddressResolverProvider() {
new InetAddressResolverProvider() {
@Override
public InetAddressResolver get(Configuration configuration) {
return null;
}
@Override
public String name() {
return "TEST";
}
};
}
@EntitlementTest(expectedAccess = PLUGINS)
static void httpClientSend() throws InterruptedException {
try (HttpClient httpClient = HttpClient.newBuilder().build()) {
// Shutdown the client, so the send action will shortcut before actually executing any network operation
// (but after it run our check in the prologue)
httpClient.shutdown();
try {
httpClient.send(HttpRequest.newBuilder(URI.create("http://localhost")).build(), HttpResponse.BodyHandlers.discarding());
} catch (IOException e) {
// Expected, since we shut down the client
}
}
}
@EntitlementTest(expectedAccess = PLUGINS)
static void httpClientSendAsync() {
try (HttpClient httpClient = HttpClient.newBuilder().build()) {
// Shutdown the client, so the send action will return before actually executing any network operation
// (but after it run our check in the prologue)
httpClient.shutdown();
var future = httpClient.sendAsync(
HttpRequest.newBuilder(URI.create("http://localhost")).build(),
HttpResponse.BodyHandlers.discarding()
);
assert future.isCompletedExceptionally();
future.exceptionally(ex -> {
assert ex instanceof IOException;
return null;
});
}
}
}
| VersionSpecificNetworkChecks |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/metamodel/model/domain/internal/AttributeContainer.java | {
"start": 694,
"end": 2764
} | interface ____<J> {
void addAttribute(PersistentAttribute<J,?> attribute);
/**
* Callback used when we have a singular id attribute of some form - either a simple id
* or an aggregated composite id ({@link jakarta.persistence.EmbeddedId})
*/
default void applyIdAttribute(SingularPersistentAttribute<J, ?> idAttribute) {
throw new UnsupportedMappingException(
"AttributeContainer [" + getClass().getName() + "] does not support identifiers"
);
}
default void applyNonAggregatedIdAttributes(
Set<SingularPersistentAttribute<? super J, ?>> idAttributes,
EmbeddableDomainType<?> idClassType) {
throw new UnsupportedMappingException(
"AttributeContainer [" + getClass().getName() + "] does not support identifiers"
);
}
/**
* todo (6.0) : we still need to implement this properly and the contract may change
* - specifically I am not certain we will be able to re-use `SingularPersistentAttribute`
* because of its dependence on declaring-type, etc that we may not be able to do
*/
default void applyIdClassAttributes(Set<SingularPersistentAttribute<? super J, ?>> idClassAttributes) {
throw new UnsupportedMappingException(
"AttributeContainer [" + getClass().getName() + "] does not support identifiers"
);
}
default void applyVersionAttribute(SingularPersistentAttribute<J, ?> versionAttribute) {
throw new UnsupportedMappingException(
"AttributeContainer [" + getClass().getName() + "] does not support versions"
);
}
default void applyNaturalIdAttribute(PersistentAttribute<J, ?> versionAttribute) {
throw new UnsupportedMappingException(
"AttributeContainer [" + getClass().getName() + "] does not support natural ids"
);
}
default void addConcreteGenericAttribute(PersistentAttribute<J, ?> idAttribute) {
throw new UnsupportedMappingException(
"AttributeContainer [" + getClass().getName() + "] does not generic embeddables"
);
}
/**
* Called when configuration of the type is complete
*/
void finishUp();
}
}
| InFlightAccess |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/mvdedupe/BatchEncoder.java | {
"start": 21074,
"end": 21750
} | class ____ extends DirectEncoder {
private final BytesRef scratch = new BytesRef();
DirectBytesRefs(BytesRefBlock block) {
super(block);
}
@Override
protected int readValueAtBlockIndex(int valueIndex, BytesRefBuilder dst) {
var v = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch);
int start = dst.length();
dst.grow(start + Integer.BYTES + v.length);
intHandle.set(dst.bytes(), start, v.length);
dst.setLength(start + Integer.BYTES);
dst.append(v);
return Integer.BYTES + v.length;
}
}
private static | DirectBytesRefs |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorsFormat.java | {
"start": 4959,
"end": 5486
} | class ____ extends RandomVectorScorer.AbstractRandomVectorScorer {
private final byte[] query;
private final ByteVectorValues byteValues;
HammingVectorScorer(ByteVectorValues byteValues, byte[] query) {
super(byteValues);
this.query = query;
this.byteValues = byteValues;
}
@Override
public float score(int i) throws IOException {
return hammingScore(byteValues.vectorValue(i), query);
}
}
static | HammingVectorScorer |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/model/LazyHeaderFactory.java | {
"start": 329,
"end": 789
} | interface ____ {
/**
* Returns an http header, or {@code null} if no header could be built.
*
* <p>Returning {@code null} or an empty String from this method will result in this particular
* key/value being excluded from the headers provided in the request. If there are multiple
* factories or values for a particular key, any non-null values will still be included for that
* key.
*/
@Nullable
String buildHeader();
}
| LazyHeaderFactory |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/recording/RecorderContext.java | {
"start": 2721,
"end": 2802
} | class ____ proxy
*/
<T> RuntimeValue<T> newInstance(String name);
}
| instance |
java | google__guice | extensions/servlet/test/com/google/inject/servlet/ServletPipelineRequestDispatcherTest.java | {
"start": 1937,
"end": 12180
} | class ____ extends TestCase {
private static final Key<HttpServlet> HTTP_SERLVET_KEY = Key.get(HttpServlet.class);
private static final String A_KEY = "thinglyDEgintly" + new Date() + UUID.randomUUID();
private static final String A_VALUE =
ServletPipelineRequestDispatcherTest.class.toString() + new Date() + UUID.randomUUID();
public final void testIncludeManagedServlet() throws IOException, ServletException {
String pattern = "blah.html";
final ServletDefinition servletDefinition =
new ServletDefinition(
Key.get(HttpServlet.class),
UriPatternType.get(UriPatternType.SERVLET, pattern),
new HashMap<String, String>(),
null);
final Injector injector = mock(Injector.class);
final Binding<HttpServlet> binding = mock(Binding.class);
final HttpServletRequest requestMock = mock(HttpServletRequest.class);
when(requestMock.getAttribute(A_KEY)).thenReturn(A_VALUE);
final boolean[] run = new boolean[1];
final HttpServlet mockServlet =
new HttpServlet() {
@Override
protected void service(
HttpServletRequest request, HttpServletResponse httpServletResponse)
throws ServletException, IOException {
run[0] = true;
final Object o = request.getAttribute(A_KEY);
assertEquals("Wrong attrib returned - " + o, A_VALUE, o);
}
};
when(binding.acceptScopingVisitor((BindingScopingVisitor) any())).thenReturn(true);
when(injector.getBinding(Key.get(HttpServlet.class))).thenReturn(binding);
when(injector.getInstance(HTTP_SERLVET_KEY)).thenReturn(mockServlet);
final Key<ServletDefinition> servetDefsKey = Key.get(TypeLiteral.get(ServletDefinition.class));
Binding<ServletDefinition> mockBinding = mock(Binding.class);
when(injector.findBindingsByType(eq(servetDefsKey.getTypeLiteral())))
.thenReturn(ImmutableList.<Binding<ServletDefinition>>of(mockBinding));
Provider<ServletDefinition> bindingProvider = Providers.of(servletDefinition);
when(mockBinding.getProvider()).thenReturn(bindingProvider);
// Have to init the Servlet before we can dispatch to it.
servletDefinition.init(null, injector, Sets.<HttpServlet>newIdentityHashSet());
final RequestDispatcher dispatcher =
new ManagedServletPipeline(injector).getRequestDispatcher(pattern);
assertNotNull(dispatcher);
dispatcher.include(requestMock, mock(HttpServletResponse.class));
assertTrue("Include did not dispatch to our servlet!", run[0]);
verify(requestMock).setAttribute(REQUEST_DISPATCHER_REQUEST, true);
verify(requestMock).removeAttribute(REQUEST_DISPATCHER_REQUEST);
}
public final void testForwardToManagedServlet() throws IOException, ServletException {
String pattern = "blah.html";
final ServletDefinition servletDefinition =
new ServletDefinition(
Key.get(HttpServlet.class),
UriPatternType.get(UriPatternType.SERVLET, pattern),
new HashMap<String, String>(),
null);
final Injector injector = mock(Injector.class);
final Binding<HttpServlet> binding = mock(Binding.class);
final HttpServletRequest requestMock = mock(HttpServletRequest.class);
final HttpServletResponse mockResponse = mock(HttpServletResponse.class);
when(requestMock.getAttribute(A_KEY)).thenReturn(A_VALUE);
when(mockResponse.isCommitted()).thenReturn(false);
final List<String> paths = new ArrayList<>();
final HttpServlet mockServlet =
new HttpServlet() {
@Override
protected void service(
HttpServletRequest request, HttpServletResponse httpServletResponse)
throws ServletException, IOException {
paths.add(request.getRequestURI());
final Object o = request.getAttribute(A_KEY);
assertEquals("Wrong attrib returned - " + o, A_VALUE, o);
}
};
when(binding.acceptScopingVisitor((BindingScopingVisitor) any())).thenReturn(true);
when(injector.getBinding(Key.get(HttpServlet.class))).thenReturn(binding);
when(injector.getInstance(HTTP_SERLVET_KEY)).thenReturn(mockServlet);
final Key<ServletDefinition> servetDefsKey = Key.get(TypeLiteral.get(ServletDefinition.class));
Binding<ServletDefinition> mockBinding = mock(Binding.class);
when(injector.findBindingsByType(eq(servetDefsKey.getTypeLiteral())))
.thenReturn(ImmutableList.<Binding<ServletDefinition>>of(mockBinding));
Provider<ServletDefinition> bindingProvider = Providers.of(servletDefinition);
when(mockBinding.getProvider()).thenReturn(bindingProvider);
// Have to init the Servlet before we can dispatch to it.
servletDefinition.init(null, injector, Sets.<HttpServlet>newIdentityHashSet());
final RequestDispatcher dispatcher =
new ManagedServletPipeline(injector).getRequestDispatcher(pattern);
assertNotNull(dispatcher);
dispatcher.forward(requestMock, mockResponse);
assertTrue("Include did not dispatch to our servlet!", paths.contains(pattern));
verify(requestMock).setAttribute(REQUEST_DISPATCHER_REQUEST, true);
verify(requestMock).removeAttribute(REQUEST_DISPATCHER_REQUEST);
verify(mockResponse).resetBuffer();
}
public final void testForwardToManagedServletFailureOnCommittedBuffer()
throws IOException, ServletException {
IllegalStateException expected = null;
try {
forwardToManagedServletFailureOnCommittedBuffer();
} catch (IllegalStateException ise) {
expected = ise;
} finally {
assertNotNull("Expected IllegalStateException was not thrown", expected);
}
}
public final void forwardToManagedServletFailureOnCommittedBuffer()
throws IOException, ServletException {
String pattern = "blah.html";
final ServletDefinition servletDefinition =
new ServletDefinition(
Key.get(HttpServlet.class),
UriPatternType.get(UriPatternType.SERVLET, pattern),
new HashMap<String, String>(),
null);
final Injector injector = mock(Injector.class);
final Binding<HttpServlet> binding = mock(Binding.class);
final HttpServletRequest mockRequest = mock(HttpServletRequest.class);
final HttpServletResponse mockResponse = mock(HttpServletResponse.class);
when(mockResponse.isCommitted()).thenReturn(true);
final HttpServlet mockServlet =
new HttpServlet() {
@Override
protected void service(
HttpServletRequest request, HttpServletResponse httpServletResponse)
throws ServletException, IOException {
final Object o = request.getAttribute(A_KEY);
assertEquals("Wrong attrib returned - " + o, A_VALUE, o);
}
};
when(binding.acceptScopingVisitor((BindingScopingVisitor) any())).thenReturn(true);
when(injector.getBinding(Key.get(HttpServlet.class))).thenReturn(binding);
when(injector.getInstance(Key.get(HttpServlet.class))).thenReturn(mockServlet);
final Key<ServletDefinition> servetDefsKey = Key.get(TypeLiteral.get(ServletDefinition.class));
Binding<ServletDefinition> mockBinding = mock(Binding.class);
when(injector.findBindingsByType(eq(servetDefsKey.getTypeLiteral())))
.thenReturn(ImmutableList.<Binding<ServletDefinition>>of(mockBinding));
Provider<ServletDefinition> bindingProvider = Providers.of(servletDefinition);
when(mockBinding.getProvider()).thenReturn(bindingProvider);
// Have to init the Servlet before we can dispatch to it.
servletDefinition.init(null, injector, Sets.<HttpServlet>newIdentityHashSet());
final RequestDispatcher dispatcher =
new ManagedServletPipeline(injector).getRequestDispatcher(pattern);
assertNotNull(dispatcher);
dispatcher.forward(mockRequest, mockResponse);
}
public final void testWrappedRequestUriAndUrlConsistency() {
final HttpServletRequest mockRequest = mock(HttpServletRequest.class);
when(mockRequest.getScheme()).thenReturn("http");
when(mockRequest.getServerName()).thenReturn("the.server");
when(mockRequest.getServerPort()).thenReturn(12345);
HttpServletRequest wrappedRequest = ManagedServletPipeline.wrapRequest(mockRequest, "/new-uri");
assertEquals("/new-uri", wrappedRequest.getRequestURI());
assertEquals("http://the.server:12345/new-uri", wrappedRequest.getRequestURL().toString());
}
public final void testWrappedRequestUrlNegativePort() {
final HttpServletRequest mockRequest = mock(HttpServletRequest.class);
when(mockRequest.getScheme()).thenReturn("http");
when(mockRequest.getServerName()).thenReturn("the.server");
when(mockRequest.getServerPort()).thenReturn(-1);
HttpServletRequest wrappedRequest = ManagedServletPipeline.wrapRequest(mockRequest, "/new-uri");
assertEquals("/new-uri", wrappedRequest.getRequestURI());
assertEquals("http://the.server/new-uri", wrappedRequest.getRequestURL().toString());
}
public final void testWrappedRequestUrlDefaultPort() {
final HttpServletRequest mockRequest = mock(HttpServletRequest.class);
when(mockRequest.getScheme()).thenReturn("http");
when(mockRequest.getServerName()).thenReturn("the.server");
when(mockRequest.getServerPort()).thenReturn(80);
HttpServletRequest wrappedRequest = ManagedServletPipeline.wrapRequest(mockRequest, "/new-uri");
assertEquals("/new-uri", wrappedRequest.getRequestURI());
assertEquals("http://the.server/new-uri", wrappedRequest.getRequestURL().toString());
}
public final void testWrappedRequestUrlDefaultHttpsPort() {
final HttpServletRequest mockRequest = mock(HttpServletRequest.class);
when(mockRequest.getScheme()).thenReturn("https");
when(mockRequest.getServerName()).thenReturn("the.server");
when(mockRequest.getServerPort()).thenReturn(443);
HttpServletRequest wrappedRequest = ManagedServletPipeline.wrapRequest(mockRequest, "/new-uri");
assertEquals("/new-uri", wrappedRequest.getRequestURI());
assertEquals("https://the.server/new-uri", wrappedRequest.getRequestURL().toString());
}
}
| ServletPipelineRequestDispatcherTest |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/CollectionBinder.java | {
"start": 1178,
"end": 3079
} | class ____ extends IndexedElementsBinder<Collection<Object>> {
CollectionBinder(Context context) {
super(context);
}
@Override
protected @Nullable Object bindAggregate(ConfigurationPropertyName name, Bindable<?> target,
AggregateElementBinder elementBinder) {
ResolvableType aggregateType = ResolvableType.forClassWithGenerics(List.class,
target.getType().asCollection().getGenerics());
ResolvableType elementType = target.getType().asCollection().getGeneric();
IndexedCollectionSupplier result = new IndexedCollectionSupplier(
() -> CollectionFactory.createCollection(List.class, elementType.resolve(), 0));
bindIndexed(name, target, elementBinder, aggregateType, elementType, result);
if (result.wasSupplied()) {
return result.get();
}
return null;
}
@Override
protected Collection<Object> merge(Supplier<Collection<Object>> existing, Collection<Object> additional) {
Collection<Object> existingCollection = getExistingIfPossible(existing);
if (existingCollection == null) {
return additional;
}
try {
existingCollection.clear();
existingCollection.addAll(additional);
return copyIfPossible(existingCollection);
}
catch (UnsupportedOperationException ex) {
return createNewCollection(additional);
}
}
private @Nullable Collection<Object> getExistingIfPossible(Supplier<Collection<Object>> existing) {
try {
return existing.get();
}
catch (Exception ex) {
return null;
}
}
private Collection<Object> copyIfPossible(Collection<Object> collection) {
try {
return createNewCollection(collection);
}
catch (Exception ex) {
return collection;
}
}
private Collection<Object> createNewCollection(Collection<Object> collection) {
Collection<Object> result = CollectionFactory.createCollection(collection.getClass(), collection.size());
result.addAll(collection);
return result;
}
}
| CollectionBinder |
java | quarkusio__quarkus | integration-tests/test-extension/extension/runtime/src/main/java/io/quarkus/extest/runtime/graal/Target_XmlConfig.java | {
"start": 323,
"end": 981
} | class ____ {
@Substitute
private String address;
@Substitute
private int port;
@Substitute
private ArrayList<XData> dataList;
@Substitute
public String getAddress() {
return address;
}
@Substitute
public int getPort() {
return port;
}
@Substitute
public ArrayList<XData> getDataList() {
return dataList;
}
@Substitute
@Override
public String toString() {
return "Target_XmlConfig{" +
"address='" + address + '\'' +
", port=" + port +
", dataList=" + dataList +
'}';
}
}
| Target_XmlConfig |
java | google__error-prone | core/src/test/java/com/google/errorprone/fixes/SuggestedFixesTest.java | {
"start": 18626,
"end": 18772
} | interface ____ {}")
.expectUnchanged()
.addInputLines(
"in/AddAnnotation.java",
"""
| SomeAnnotation |
java | quarkusio__quarkus | extensions/oidc/runtime/src/main/java/io/quarkus/oidc/OidcTenantConfig.java | {
"start": 7984,
"end": 11749
} | class ____ implements io.quarkus.oidc.runtime.OidcTenantConfig.IntrospectionCredentials {
/**
* Name
*/
public Optional<String> name = Optional.empty();
/**
* Secret
*/
public Optional<String> secret = Optional.empty();
/**
* Include OpenId Connect Client ID configured with `quarkus.oidc.client-id`.
*/
public boolean includeClientId = true;
public Optional<String> getName() {
return name;
}
public void setName(String name) {
this.name = Optional.of(name);
}
public Optional<String> getSecret() {
return secret;
}
public void setSecret(String secret) {
this.secret = Optional.of(secret);
}
public boolean isIncludeClientId() {
return includeClientId;
}
public void setIncludeClientId(boolean includeClientId) {
this.includeClientId = includeClientId;
}
private void addConfigMappingValues(io.quarkus.oidc.runtime.OidcTenantConfig.IntrospectionCredentials mapping) {
name = mapping.name();
secret = mapping.secret();
includeClientId = mapping.includeClientId();
}
@Override
public Optional<String> name() {
return name;
}
@Override
public Optional<String> secret() {
return secret;
}
@Override
public boolean includeClientId() {
return includeClientId;
}
}
/**
* Configuration to find and parse a custom claim containing the roles information.
*
* @deprecated use the {@link #roles()} method instead
*/
@Deprecated(since = "3.18", forRemoval = true)
public Roles roles = new Roles();
/**
* Configuration how to validate the token claims.
*
* @deprecated use the {@link #token()} method instead
*/
@Deprecated(since = "3.18", forRemoval = true)
public Token token = new Token();
/**
* RP Initiated, BackChannel and FrontChannel Logout configuration
*
* @deprecated use the {@link #logout()} method
*/
@Deprecated(since = "3.18", forRemoval = true)
public Logout logout = new Logout();
/**
* Configuration of the certificate chain which can be used to verify tokens.
* If the certificate chain truststore is configured, the tokens can be verified using the certificate
* chain inlined in the Base64-encoded format as an `x5c` header in the token itself.
* <p/>
* The certificate chain inlined in the token is verified.
* Signature of every certificate in the chain but the root certificate is verified by the next certificate in the chain.
* Thumbprint of the root certificate in the chain must match a thumbprint of one of the certificates in the truststore.
* <p/>
* Additionally, a direct trust in the leaf chain certificate which will be used to verify the token signature must
* be established.
* By default, the leaf certificate's thumbprint must match a thumbprint of one of the certificates in the truststore.
* If the truststore does not have the leaf certificate imported, then the leaf certificate must be identified by its Common
* Name.
*
* @deprecated use {@link #certificateChain()} method instead
*/
@Deprecated(since = "3.18", forRemoval = true)
public CertificateChain certificateChain = new CertificateChain();
/**
* @deprecated use the {@link OidcTenantConfigBuilder.CertificateChainBuilder} builder
*/
@Deprecated(since = "3.18", forRemoval = true)
public static | IntrospectionCredentials |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/builder/xml/dynamic/DynamicSqlSourceTest.java | {
"start": 18164,
"end": 19231
} | class ____ {
public String id;
Bean(String property) {
this.id = property;
}
public String getId() {
return id;
}
public void setId(String property) {
this.id = property;
}
}
@MethodSource
@ParameterizedTest
void testShrinkWhitespacesInSql(SqlNode input, boolean shrinkWhitespaces, String expected) {
Configuration config = new Configuration();
config.setShrinkWhitespacesInSql(shrinkWhitespaces);
String actual = new DynamicSqlSource(config, input).getBoundSql(null).getSql();
assertEquals(expected, actual);
}
static Stream<Arguments> testShrinkWhitespacesInSql() {
return Stream.of(
Arguments.arguments(
new StaticTextSqlNode("\t\n\n SELECT * \n FROM user\n \t WHERE user_id = 1\n\t "), false,
"SELECT * \n FROM user\n \t WHERE user_id = 1"),
Arguments.arguments(new StaticTextSqlNode("\t\n\n SELECT * \n FROM user\n \t WHERE user_id = 1\n\t"), true,
"SELECT * FROM user WHERE user_id = 1"));
}
}
| Bean |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/DatabindContext.java | {
"start": 1208,
"end": 5467
} | class ____
{
/**
* Let's limit length of error messages, for cases where underlying data
* may be very large -- no point in spamming logs with megabytes of meaningless
* data.
*/
private final static int MAX_ERROR_STR_LEN = 500;
/*
/**********************************************************************
/* Generic config access
/**********************************************************************
*/
/**
* Accessor to currently active configuration (both per-request configs
* and per-mapper config).
*/
public abstract MapperConfig<?> getConfig();
/**
* Convenience method for accessing serialization view in use (if any); equivalent to:
*<pre>
* getConfig().getAnnotationIntrospector();
*</pre>
*/
public abstract AnnotationIntrospector getAnnotationIntrospector();
/*
/**********************************************************************
/* Access to specific config settings
/**********************************************************************
*/
/**
* Convenience method for checking whether specified Mapper
* feature is enabled or not.
* Shortcut for:
*<pre>
* getConfig().isEnabled(feature);
*</pre>
*/
public abstract boolean isEnabled(MapperFeature feature);
public final boolean isAnnotationProcessingEnabled() {
return isEnabled(MapperFeature.USE_ANNOTATIONS);
}
/**
* Method for checking whether specified datatype
* feature is enabled or not.
*
* @since 2.14
*/
public abstract boolean isEnabled(DatatypeFeature feature);
/**
* @since 2.15
*/
public abstract DatatypeFeatures getDatatypeFeatures();
/**
* Convenience method for accessing serialization view in use (if any); equivalent to:
*<pre>
* getConfig().canOverrideAccessModifiers();
*</pre>
*/
public abstract boolean canOverrideAccessModifiers();
/**
* Accessor for locating currently active view, if any;
* returns null if no view has been set.
*/
public abstract Class<?> getActiveView();
public abstract Locale getLocale();
public abstract TimeZone getTimeZone();
public abstract JsonFormat.Value getDefaultPropertyFormat(Class<?> baseType);
/*
/**********************************************************************
/* Generic attributes
/**********************************************************************
*/
/**
* Method for accessing attributes available in this context.
* Per-call attributes have highest precedence; attributes set
* via {@link ObjectReader} or {@link ObjectWriter} have lower
* precedence.
*
* @param key Key of the attribute to get
* @return Value of the attribute, if any; null otherwise
*/
public abstract Object getAttribute(Object key);
/**
* Method for setting per-call value of given attribute.
* This will override any previously defined value for the
* attribute within this context.
*
* @param key Key of the attribute to set
* @param value Value to set attribute to
*
* @return This context object, to allow chaining
*/
public abstract DatabindContext setAttribute(Object key, Object value);
/*
/**********************************************************************
/* Type instantiation/resolution
/**********************************************************************
*/
/**
* Convenience method for constructing {@link JavaType} for given JDK
* type (usually {@link java.lang.Class})
*/
public JavaType constructType(Type type) {
if (type == null) {
return null;
}
return getTypeFactory().constructType(type);
}
/**
* Convenience method for constructing subtypes, retaining generic
* type parameter (if any).
*<p>
* Note: since 2.11 handling has varied a bit across serialization, deserialization.
*/
public abstract JavaType constructSpecializedType(JavaType baseType, Class<?> subclass);
/**
* Lookup method called when code needs to resolve | DatabindContext |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/web/AnnotationConfigWebContextLoaderTests.java | {
"start": 894,
"end": 1665
} | class ____ {
private static final String[] EMPTY_STRING_ARRAY = new String[0];
private static final Class<?>[] EMPTY_CLASS_ARRAY = new Class<?>[0];
@Test
@SuppressWarnings("deprecation")
void configMustNotContainLocations() {
AnnotationConfigWebContextLoader loader = new AnnotationConfigWebContextLoader();
WebMergedContextConfiguration mergedConfig = new WebMergedContextConfiguration(getClass(),
new String[] { "config.xml" }, EMPTY_CLASS_ARRAY, null, EMPTY_STRING_ARRAY, EMPTY_STRING_ARRAY,
EMPTY_STRING_ARRAY, "resource/path", loader, null, null);
assertThatIllegalStateException()
.isThrownBy(() -> loader.loadContext(mergedConfig))
.withMessageContaining("does not support resource locations");
}
}
| AnnotationConfigWebContextLoaderTests |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/annotation/ProfileValueUtilsTests.java | {
"start": 7804,
"end": 7961
} | class ____ {
public void nonAnnotatedMethod() {
}
}
@SuppressWarnings("unused")
@IfProfileValue(name = NAME, value = VALUE)
private static | NonAnnotated |
java | apache__dubbo | dubbo-plugin/dubbo-rest-openapi/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/openapi/model/Discriminator.java | {
"start": 997,
"end": 2375
} | class ____ extends Node<Discriminator> {
private String propertyName;
private Map<String, String> mapping;
public String getPropertyName() {
return propertyName;
}
public Discriminator setPropertyName(String propertyName) {
this.propertyName = propertyName;
return this;
}
public Map<String, String> getMapping() {
return mapping;
}
public Discriminator setMapping(Map<String, String> mapping) {
this.mapping = mapping;
return this;
}
public Discriminator addMapping(String key, String value) {
if (mapping == null) {
mapping = new LinkedHashMap<>();
}
mapping.put(key, value);
return this;
}
public Discriminator removeMapping(String key) {
if (mapping != null) {
mapping.remove(key);
}
return this;
}
@Override
public Discriminator clone() {
Discriminator clone = super.clone();
if (mapping != null) {
clone.setMapping(new LinkedHashMap<>(mapping));
}
return clone;
}
@Override
public Map<String, Object> writeTo(Map<String, Object> node, Context context) {
write(node, "propertyName", propertyName);
write(node, "mapping", mapping);
writeExtensions(node);
return node;
}
}
| Discriminator |
java | quarkusio__quarkus | independent-projects/bootstrap/core/src/main/java/io/quarkus/bootstrap/classloading/ClassPathElement.java | {
"start": 2058,
"end": 2339
} | class ____ resource if it exists
*/
ClassPathResource getResource(String name);
/**
* Returns a set of all known resources.
*
* @return A set representing all known resources
*/
Set<String> getProvidedResources();
/**
* Whether this | path |
java | quarkusio__quarkus | extensions/cache/deployment/src/test/java/io/quarkus/cache/test/runtime/DuplicatedContextHandlingTest.java | {
"start": 695,
"end": 7240
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest().withApplicationRoot(jar -> jar
.addClass(CachedService.class));
@Inject
CachedService cachedService;
@Inject
Vertx vertx;
@Test
@ActivateRequestContext
void testDuplicatedContextHandlingWhenCalledFromNoContext() {
cachedService.direct(false).await().indefinitely();
cachedService.direct(true).await().indefinitely();
}
@Test
@ActivateRequestContext
void testDuplicatedContextHandlingWhenCalledOnContext() throws InterruptedException {
ContextInternal context = (ContextInternal) vertx.getOrCreateContext();
if (context.isDuplicate()) {
context = context.duplicate();
}
CountDownLatch latch = new CountDownLatch(1);
Context tmp = context;
context.runOnContext(x -> {
cachedService.direct(false)
.invoke(() -> {
if (!tmp.equals(Vertx.currentContext())) {
throw new AssertionError("Expected to go back on the caller context");
}
})
.subscribe().with(y -> latch.countDown());
});
Assertions.assertTrue(latch.await(1, TimeUnit.SECONDS));
CountDownLatch latch2 = new CountDownLatch(1);
context.runOnContext(x -> {
cachedService.direct(true)
.invoke(() -> {
if (!tmp.equals(Vertx.currentContext())) {
throw new AssertionError("Expected to go back on the caller context");
}
})
.subscribe().with(y -> latch2.countDown());
});
Assertions.assertTrue(latch2.await(1, TimeUnit.SECONDS));
CountDownLatch latch3 = new CountDownLatch(1);
context.runOnContext(x -> {
cachedService.direct(false)
.invoke(() -> {
if (!tmp.equals(Vertx.currentContext())) {
throw new AssertionError("Expected to go back on the caller context");
}
})
.subscribe().with(y -> latch3.countDown());
});
Assertions.assertTrue(latch3.await(1, TimeUnit.SECONDS));
}
@Test
@ActivateRequestContext
void testDuplicatedContextHandlingWhenCalledOnDifferentContexts() throws InterruptedException {
ContextInternal context = (ContextInternal) vertx.getOrCreateContext();
context = context.duplicate();
var context2 = context.duplicate();
CountDownLatch latch = new CountDownLatch(1);
Context tmp = context;
context.runOnContext(x -> {
cachedService.direct(false)
.invoke(() -> {
if (!tmp.equals(Vertx.currentContext())) {
throw new AssertionError("Expected to go back on the caller context");
}
})
.subscribe().with(y -> latch.countDown());
});
Assertions.assertTrue(latch.await(1, TimeUnit.SECONDS));
CountDownLatch latch2 = new CountDownLatch(1);
context2.runOnContext(x -> {
cachedService.direct(false)
.invoke(() -> {
if (!context2.equals(Vertx.currentContext())) {
throw new AssertionError("Expected to go back on the caller context");
}
})
.subscribe().with(y -> latch2.countDown());
});
Assertions.assertTrue(latch2.await(1, TimeUnit.SECONDS));
}
@Test
@ActivateRequestContext
void testDuplicatedContextHandlingWhenCalledContextAndAnsweredFromAnotherContext() throws InterruptedException {
ContextInternal context = (ContextInternal) vertx.getOrCreateContext();
context = context.duplicate();
var context2 = context.duplicate();
CountDownLatch latch = new CountDownLatch(1);
Context tmp = context;
context.runOnContext(x -> {
cachedService.directOnAnotherContext(false)
.invoke(() -> {
if (!tmp.equals(Vertx.currentContext())) {
throw new AssertionError("Expected to go back on the caller context");
}
})
.subscribe().with(y -> latch.countDown());
});
Assertions.assertTrue(latch.await(1, TimeUnit.SECONDS));
CountDownLatch latch2 = new CountDownLatch(1);
context2.runOnContext(x -> {
cachedService.directOnAnotherContext(false)
.invoke(() -> {
if (!context2.equals(Vertx.currentContext())) {
throw new AssertionError("Expected to go back on the caller context");
}
})
.subscribe().with(y -> latch2.countDown());
});
Assertions.assertTrue(latch2.await(1, TimeUnit.SECONDS));
}
@RepeatedTest(10)
void testWithAsyncTaskRestoringContext() throws InterruptedException {
var rootContext = vertx.getOrCreateContext();
var duplicatedContext1 = ((ContextInternal) rootContext).duplicate();
CountDownLatch latch = new CountDownLatch(1);
duplicatedContext1.runOnContext(x -> {
cachedService.async()
.subscribeAsCompletionStage()
.whenComplete((s, t) -> {
Assertions.assertEquals(duplicatedContext1, Vertx.currentContext());
latch.countDown();
});
});
var duplicatedContext2 = ((ContextInternal) rootContext).duplicate();
CountDownLatch latch2 = new CountDownLatch(1);
duplicatedContext2.runOnContext(x -> {
cachedService.async()
.subscribeAsCompletionStage()
.whenComplete((s, t) -> {
Assertions.assertEquals(duplicatedContext2, Vertx.currentContext());
latch2.countDown();
});
});
Assertions.assertTrue(latch.await(2, TimeUnit.SECONDS));
Assertions.assertTrue(latch2.await(2, TimeUnit.SECONDS));
}
@ApplicationScoped
public static | DuplicatedContextHandlingTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java | {
"start": 1376,
"end": 3219
} | class ____ {
private static int blockSize = 1024;
private static int numBlocks = 2;
private final FileContextTestHelper helper = new FileContextTestHelper();
private FileContext fc;
@BeforeEach
public void setup() throws IOException {
fc = FileContext.getLocalFSFileContext();
}
@AfterEach
public void tearDown() throws IOException {
fc.delete(helper.getTestRootPath(fc), true);
}
private void checkDeleteOnExitData(int size, FileContext fc, Path... paths) {
assertEquals(size, FileContext.DELETE_ON_EXIT.size());
Set<Path> set = FileContext.DELETE_ON_EXIT.get(fc);
assertEquals(paths.length, (set == null ? 0 : set.size()));
for (Path path : paths) {
assertTrue(set.contains(path));
}
}
@Test
public void testDeleteOnExit() throws Exception {
// Create deleteOnExit entries
Path file1 = helper.getTestRootPath(fc, "file1");
createFile(fc, file1, numBlocks, blockSize);
fc.deleteOnExit(file1);
checkDeleteOnExitData(1, fc, file1);
// Ensure shutdown hook is added
assertTrue(ShutdownHookManager.get().hasShutdownHook(FileContext.FINALIZER));
Path file2 = helper.getTestRootPath(fc, "dir1/file2");
createFile(fc, file2, numBlocks, blockSize);
fc.deleteOnExit(file2);
checkDeleteOnExitData(1, fc, file1, file2);
Path dir = helper.getTestRootPath(fc, "dir3/dir4/dir5/dir6");
createFile(fc, dir, numBlocks, blockSize);
fc.deleteOnExit(dir);
checkDeleteOnExitData(1, fc, file1, file2, dir);
// trigger deleteOnExit and ensure the registered
// paths are cleaned up
FileContext.FINALIZER.run();
checkDeleteOnExitData(0, fc, new Path[0]);
assertFalse(exists(fc, file1));
assertFalse(exists(fc, file2));
assertFalse(exists(fc, dir));
}
}
| TestFileContextDeleteOnExit |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java | {
"start": 32493,
"end": 38629
} | class ____ extends IntervalsSourceProvider {
public static final String NAME = "fuzzy";
private final String term;
private final int prefixLength;
private final boolean transpositions;
private final Fuzziness fuzziness;
private final String analyzer;
private final String useField;
public Fuzzy(String term, int prefixLength, boolean transpositions, Fuzziness fuzziness, String analyzer, String useField) {
this.term = term;
this.prefixLength = prefixLength;
this.transpositions = transpositions;
this.fuzziness = fuzziness;
this.analyzer = analyzer;
this.useField = useField;
}
public Fuzzy(StreamInput in) throws IOException {
this.term = in.readString();
this.prefixLength = in.readVInt();
this.transpositions = in.readBoolean();
this.fuzziness = new Fuzziness(in);
this.analyzer = in.readOptionalString();
this.useField = in.readOptionalString();
}
@Override
public IntervalsSource getSource(SearchExecutionContext context, TextFamilyFieldType fieldType) {
NamedAnalyzer analyzer = null;
if (this.analyzer != null) {
analyzer = context.getIndexAnalyzers().get(this.analyzer);
}
if (useField != null) {
fieldType = useField(context.getFieldType(useField));
}
if (analyzer == null) {
analyzer = fieldType.getTextSearchInfo().searchAnalyzer();
}
// Fuzzy queries only work with unicode content so it's legal to call utf8ToString here.
String normalizedTerm = analyzer.normalize(fieldType.name(), term).utf8ToString();
IntervalsSource source = fieldType.fuzzyIntervals(
normalizedTerm,
fuzziness.asDistance(term),
prefixLength,
transpositions,
context
);
if (useField != null) {
source = Intervals.fixField(useField, source);
}
return source;
}
@Override
public void extractFields(Set<String> fields) {
if (useField != null) {
fields.add(useField);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Fuzzy fuzzy = (Fuzzy) o;
return prefixLength == fuzzy.prefixLength
&& transpositions == fuzzy.transpositions
&& Objects.equals(term, fuzzy.term)
&& Objects.equals(fuzziness, fuzzy.fuzziness)
&& Objects.equals(analyzer, fuzzy.analyzer)
&& Objects.equals(useField, fuzzy.useField);
}
@Override
public int hashCode() {
return Objects.hash(term, prefixLength, transpositions, fuzziness, analyzer, useField);
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(term);
out.writeVInt(prefixLength);
out.writeBoolean(transpositions);
fuzziness.writeTo(out);
out.writeOptionalString(analyzer);
out.writeOptionalString(useField);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.field("term", term);
builder.field("prefix_length", prefixLength);
builder.field("transpositions", transpositions);
fuzziness.toXContent(builder, params);
if (analyzer != null) {
builder.field("analyzer", analyzer);
}
if (useField != null) {
builder.field("use_field", useField);
}
builder.endObject();
return builder;
}
private static final ConstructingObjectParser<Fuzzy, Void> PARSER = new ConstructingObjectParser<>(NAME, args -> {
String term = (String) args[0];
int prefixLength = (args[1] == null) ? FuzzyQueryBuilder.DEFAULT_PREFIX_LENGTH : (int) args[1];
boolean transpositions = (args[2] == null) ? FuzzyQueryBuilder.DEFAULT_TRANSPOSITIONS : (boolean) args[2];
Fuzziness fuzziness = (args[3] == null) ? FuzzyQueryBuilder.DEFAULT_FUZZINESS : (Fuzziness) args[3];
String analyzer = (String) args[4];
String useField = (String) args[5];
return new Fuzzy(term, prefixLength, transpositions, fuzziness, analyzer, useField);
});
static {
PARSER.declareString(constructorArg(), new ParseField("term"));
PARSER.declareInt(optionalConstructorArg(), new ParseField("prefix_length"));
PARSER.declareBoolean(optionalConstructorArg(), new ParseField("transpositions"));
PARSER.declareField(optionalConstructorArg(), (p, c) -> Fuzziness.parse(p), Fuzziness.FIELD, ObjectParser.ValueType.VALUE);
PARSER.declareString(optionalConstructorArg(), new ParseField("analyzer"));
PARSER.declareString(optionalConstructorArg(), new ParseField("use_field"));
}
public static Fuzzy fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
String getTerm() {
return term;
}
int getPrefixLength() {
return prefixLength;
}
boolean isTranspositions() {
return transpositions;
}
Fuzziness getFuzziness() {
return fuzziness;
}
String getAnalyzer() {
return analyzer;
}
String getUseField() {
return useField;
}
}
public static | Fuzzy |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/jdk/JDKKeyDeserializer.java | {
"start": 1209,
"end": 10077
} | class ____ extends KeyDeserializer
{
public final static int TYPE_BOOLEAN = 1;
public final static int TYPE_BYTE = 2;
public final static int TYPE_SHORT = 3;
public final static int TYPE_CHAR = 4;
public final static int TYPE_INT = 5;
public final static int TYPE_LONG = 6;
public final static int TYPE_FLOAT = 7;
public final static int TYPE_DOUBLE = 8;
public final static int TYPE_LOCALE = 9;
public final static int TYPE_DATE = 10;
public final static int TYPE_CALENDAR = 11;
public final static int TYPE_UUID = 12;
public final static int TYPE_URI = 13;
public final static int TYPE_URL = 14;
public final static int TYPE_CLASS = 15;
public final static int TYPE_CURRENCY = 16;
public final static int TYPE_BYTE_ARRAY = 17; // since 2.9
protected final int _kind;
protected final Class<?> _keyClass;
/**
* Some types that are deserialized using a helper deserializer.
*/
protected final JDKFromStringDeserializer _deser;
protected JDKKeyDeserializer(int kind, Class<?> cls) {
this(kind, cls, null);
}
protected JDKKeyDeserializer(int kind, Class<?> cls, JDKFromStringDeserializer deser) {
_kind = kind;
_keyClass = cls;
_deser = deser;
}
public static JDKKeyDeserializer forType(Class<?> raw)
{
int kind;
// first common types:
if (raw == String.class || raw == Object.class
|| raw == CharSequence.class
// see [databind#2115]:
|| raw == Serializable.class) {
return StringKD.forType(raw);
}
if (raw == UUID.class) {
kind = TYPE_UUID;
} else if (raw == Integer.class) {
kind = TYPE_INT;
} else if (raw == Long.class) {
kind = TYPE_LONG;
} else if (raw == Date.class) {
kind = TYPE_DATE;
} else if (raw == Calendar.class) {
kind = TYPE_CALENDAR;
// then less common ones...
} else if (raw == Boolean.class) {
kind = TYPE_BOOLEAN;
} else if (raw == Byte.class) {
kind = TYPE_BYTE;
} else if (raw == Character.class) {
kind = TYPE_CHAR;
} else if (raw == Short.class) {
kind = TYPE_SHORT;
} else if (raw == Float.class) {
kind = TYPE_FLOAT;
} else if (raw == Double.class) {
kind = TYPE_DOUBLE;
} else if (raw == URI.class) {
kind = TYPE_URI;
} else if (raw == URL.class) {
kind = TYPE_URL;
} else if (raw == Class.class) {
kind = TYPE_CLASS;
} else if (raw == Locale.class) {
JDKFromStringDeserializer deser = JDKFromStringDeserializer.findDeserializer(Locale.class);
return new JDKKeyDeserializer(TYPE_LOCALE, raw, deser);
} else if (raw == Currency.class) {
JDKFromStringDeserializer deser = JDKFromStringDeserializer.findDeserializer(Currency.class);
return new JDKKeyDeserializer(TYPE_CURRENCY, raw, deser);
} else if (raw == byte[].class) {
kind = TYPE_BYTE_ARRAY;
} else {
return null;
}
return new JDKKeyDeserializer(kind, raw);
}
@Override
public Object deserializeKey(String key, DeserializationContext ctxt)
throws JacksonException
{
if (key == null) { // is this even legal call?
return null;
}
try {
Object result = _parse(key, ctxt);
if (result != null) {
return result;
}
} catch (JacksonException e) {
throw e;
} catch (Exception re) {
return ctxt.handleWeirdKey(_keyClass, key, "not a valid representation, problem: (%s) %s",
re.getClass().getName(),
ClassUtil.exceptionMessage(re));
}
if (ClassUtil.isEnumType(_keyClass)
&& ctxt.getConfig().isEnabled(EnumFeature.READ_UNKNOWN_ENUM_VALUES_AS_NULL)) {
return null;
}
return ctxt.handleWeirdKey(_keyClass, key, "not a valid representation");
}
public Class<?> getKeyClass() { return _keyClass; }
// NOTE: throws plain `Exception` for convenience, handled by caller
protected Object _parse(String key, DeserializationContext ctxt)
throws Exception
{
switch (_kind) {
case TYPE_BOOLEAN:
if ("true".equals(key)) {
return Boolean.TRUE;
}
if ("false".equals(key)) {
return Boolean.FALSE;
}
return ctxt.handleWeirdKey(_keyClass, key, "value not 'true' or 'false'");
case TYPE_BYTE:
{
int value = _parseInt(key);
// allow range up to 255, inclusive (to support "unsigned" byte)
if (value < Byte.MIN_VALUE || value > 255) {
return ctxt.handleWeirdKey(_keyClass, key, "overflow, value cannot be represented as 8-bit value");
}
return Byte.valueOf((byte) value);
}
case TYPE_SHORT:
{
int value = _parseInt(key);
if (value < Short.MIN_VALUE || value > Short.MAX_VALUE) {
return ctxt.handleWeirdKey(_keyClass, key, "overflow, value cannot be represented as 16-bit value");
// fall-through and truncate if need be
}
return Short.valueOf((short) value);
}
case TYPE_CHAR:
if (key.length() == 1) {
return Character.valueOf(key.charAt(0));
}
return ctxt.handleWeirdKey(_keyClass, key, "can only convert 1-character Strings");
case TYPE_INT:
return _parseInt(key);
case TYPE_LONG:
return _parseLong(key);
case TYPE_FLOAT:
// Bounds/range checks would be tricky here, so let's not bother even trying...
return Float.valueOf((float) _parseDouble(key));
case TYPE_DOUBLE:
return _parseDouble(key);
case TYPE_LOCALE:
case TYPE_CURRENCY:
try {
return _deser._deserialize(key, ctxt);
} catch (IllegalArgumentException | IOException e) {
return _weirdKey(ctxt, key, e);
}
case TYPE_DATE:
return ctxt.parseDate(key);
case TYPE_CALENDAR:
return ctxt.constructCalendar(ctxt.parseDate(key));
case TYPE_UUID:
try {
return UUID.fromString(key);
} catch (Exception e) {
return _weirdKey(ctxt, key, e);
}
case TYPE_URI:
try {
return URI.create(key);
} catch (Exception e) {
return _weirdKey(ctxt, key, e);
}
case TYPE_URL:
try {
return new URL(key);
} catch (MalformedURLException e) {
return _weirdKey(ctxt, key, e);
}
case TYPE_CLASS:
try {
return ctxt.findClass(key);
} catch (Exception e) {
return ctxt.handleWeirdKey(_keyClass, key, "unable to parse key as Class");
}
case TYPE_BYTE_ARRAY:
try {
return ctxt.getConfig().getBase64Variant().decode(key);
} catch (IllegalArgumentException e) {
return _weirdKey(ctxt, key, e);
}
default:
throw new IllegalStateException("Internal error: unknown key type "+_keyClass);
}
}
/*
/**********************************************************************
/* Helper methods for sub-classes
/**********************************************************************
*/
protected int _parseInt(String key) throws IllegalArgumentException {
return NumberInput.parseInt(key);
}
protected long _parseLong(String key) throws IllegalArgumentException {
return NumberInput.parseLong(key);
}
protected double _parseDouble(String key) throws IllegalArgumentException {
return NumberInput.parseDouble(key, false);
}
protected Object _weirdKey(DeserializationContext ctxt, String key, Exception e) throws JacksonException {
return ctxt.handleWeirdKey(_keyClass, key, "problem: %s",
ClassUtil.exceptionMessage(e));
}
/*
/**********************************************************************
/* First: the standard "String as String" deserializer
/**********************************************************************
*/
@JacksonStdImpl
final static | JDKKeyDeserializer |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/annotation/DataTypeHint.java | {
"start": 6217,
"end": 9607
} | class ____ reflectively extracted.
*
* @see DataType#bridgedTo(Class)
*/
Class<?> bridgedTo() default void.class;
/**
* Adds a hint that defines a custom serializer that should be used for serializing and
* deserializing opaque RAW types. It is used if {@link #value()} is explicitly defined as an
* unparameterized {@code RAW} string or if (possibly nested) fields in a structured type need
* to be handled as an opaque type.
*
* <p>By default, Flink's default RAW serializer is used.
*
* @see DataTypes#RAW(Class, TypeSerializer)
*/
Class<? extends TypeSerializer<?>> rawSerializer() default UnknownSerializer.class;
// --------------------------------------------------------------------------------------------
// Group of data types specification
// --------------------------------------------------------------------------------------------
/**
* This hint parameter influences the extraction of a {@link TypeInference} in functions. It
* adds a hint for accepting pre-defined groups of similar types, i.e., more than just one
* explicit data type.
*
* <p>Note: This hint parameter is only interpreted when used in function hints or next to
* arguments of implementation methods. It has highest precedence above all other hint
* parameter.
*
* <p>Some examples:
*
* <pre>{@code
* // expects an integer for the first input argument and allows any data type for the second
* @FunctionHint(
* input = [@DataTypeHint("INT"), @DataTypeHint(inputGroup = ANY)],
* output = @DataTypeHint("BOOLEAN")
* )
*
* // expects an integer for the first input argument and allows any data type for the second
* eval(int i, @DataTypeHint(inputGroup = ANY) Object o)
* }</pre>
*/
InputGroup inputGroup() default InputGroup.UNKNOWN;
// --------------------------------------------------------------------------------------------
// Parameterization of the reflection-based extraction
// --------------------------------------------------------------------------------------------
/**
* Version that describes the expected behavior of the reflection-based data type extraction.
*
* <p>It is meant for future backward compatibility. Whenever the extraction logic is changed,
* old function and structured type classes should still return the same data type as before
* when versioned accordingly.
*
* <p>By default, the version is always the most recent one.
*/
ExtractionVersion version() default ExtractionVersion.UNKNOWN;
/**
* Defines that a RAW data type may be used for all classes that cannot be mapped to any
* SQL-like data type or cause an error.
*
* <p>By default, this parameter is set to {@code false} which means that an exception is thrown
* for unmapped types. This is helpful to identify and fix faulty implementations. It is
* generally recommended to use SQL-like types instead of enabling RAW opaque types.
*
* <p>If RAW types cannot be avoided, they should be enabled only in designated areas (i.e.,
* within package prefixes using {@link #allowRawPattern()}) in order to not swallow all errors.
* However, this parameter globally enables RAW types for the annotated | is |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/conditional/qualifier/ConditionalMethodWithClassQualifiersMapper.java | {
"start": 1457,
"end": 1943
} | interface ____ {
@Condition
@Named("american")
static boolean isAmericanCitizen(EmployeeDto employerDto) {
return "US".equals( employerDto.getCountry() );
}
@Condition
@Named("british")
static boolean isBritishCitizen(EmployeeDto employeeDto) {
return "UK".equals( employeeDto.getCountry() );
}
}
@Qualifier
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.CLASS)
@ | StaticUtil |
java | alibaba__fastjson | src/test/java/com/alibaba/json/test/benchmark/RyuDoubleBenchmark.java | {
"start": 94,
"end": 1165
} | class ____ {
private final static int COUNT = 1000 * 1000 * 10;
public static void main(String[] args) throws Exception {
double v = 0.5390050566444644; //new java.util.Random().nextDouble();
System.out.println(v);
for (int i = 0; i < 10; ++i) {
f0(v); // 2505, 1865
}
// System.out.println();
//
// for (int i = 0; i < 10; ++i) {
// f1(v); // 752, 571
// }
}
public static void f0(double v) throws Exception {
long start = System.currentTimeMillis();
for (int i = 0; i < COUNT; ++i) {
Double.toString(v);
}
long millis = System.currentTimeMillis() - start;
System.out.println("jdk : " + millis);
}
public static void f1(double v) throws Exception {
long start = System.currentTimeMillis();
for (int i = 0; i < COUNT; ++i) {
RyuDouble.toString(v);
}
long millis = System.currentTimeMillis() - start;
System.out.println("ryu : " + millis);
}
}
| RyuDoubleBenchmark |
java | google__guice | core/src/com/google/inject/internal/InternalFactory.java | {
"start": 8502,
"end": 9844
} | class ____ {
static MethodHandle getHandleAndMaybeUpdateCache(
InternalFactory<?> factory, boolean linked, MethodHandleResult result) {
if (result.cachability == MethodHandleResult.Cachability.NEVER) {
return result.methodHandle;
}
// Update the cache under the factory lock to ensure that we pick a consistent winner.
// NOTE: we could perform the cache updates outside of the lock and use a CAS style pattern
// but that seems like overkill since the `updateCache` methods are very fast and simple.
HandleCache cache;
synchronized (factory) {
cache = factory.handleCache.updateCache(linked, result);
factory.handleCache = cache;
}
return cache.getHandle(linked);
}
static final HandleCache EMPTY = new EmptyCache();
/** Returns the cached handle or `null` matching the {@code linked} setting. */
@Nullable
abstract MethodHandle getHandle(boolean linked);
/**
* Updates the cache using the given result for the {@code linked} setting.
*
* <p>Returns a new cache object that should be used to update the cache.
*/
abstract HandleCache updateCache(boolean linked, MethodHandleResult result);
/** An always empty cache, suitable for starting the cache process. */
private static final | HandleCache |
java | netty__netty | codec-native-quic/src/test/java/io/netty/handler/codec/quic/QuicheQuicCodecTest.java | {
"start": 1326,
"end": 4171
} | class ____<B extends QuicCodecBuilder<B>> extends AbstractQuicTest {
protected abstract B newCodecBuilder();
@Test
public void testDefaultVersionIsV1() {
B builder = newCodecBuilder();
assertEquals(0x0000_0001, builder.version);
}
@Test
public void testFlushStrategyUsedWithBytes() {
testFlushStrategy(true);
}
@Test
public void testFlushStrategyUsedWithPackets() {
testFlushStrategy(false);
}
private void testFlushStrategy(boolean useBytes) {
final int bytes = 8;
final AtomicInteger numBytesTracker = new AtomicInteger();
final AtomicInteger numPacketsTracker = new AtomicInteger();
final AtomicInteger flushCount = new AtomicInteger();
B builder = newCodecBuilder();
builder.flushStrategy((numPackets, numBytes) -> {
numPacketsTracker.set(numPackets);
numBytesTracker.set(numBytes);
if (useBytes) {
return numBytes > 8;
}
if (numPackets == 2) {
return true;
}
return false;
});
EmbeddedChannel channel = new EmbeddedChannel(new ChannelOutboundHandlerAdapter() {
@Override
public void flush(ChannelHandlerContext ctx) throws Exception {
flushCount.incrementAndGet();
super.flush(ctx);
}
}, builder.build());
assertEquals(0, numPacketsTracker.get());
assertEquals(0, numBytesTracker.get());
assertEquals(0, flushCount.get());
channel.write(new DatagramPacket(Unpooled.buffer().writeZero(bytes), new InetSocketAddress(0)));
assertEquals(1, numPacketsTracker.get());
assertEquals(8, numBytesTracker.get());
assertEquals(0, flushCount.get());
channel.write(new DatagramPacket(Unpooled.buffer().writeZero(bytes), new InetSocketAddress(0)));
assertEquals(2, numPacketsTracker.get());
assertEquals(16, numBytesTracker.get());
assertEquals(1, flushCount.get());
// As a flush did happen we should see two packets in the outbound queue.
for (int i = 0; i < 2; i++) {
DatagramPacket packet = channel.readOutbound();
assertNotNull(packet);
packet.release();
}
ChannelFuture future = channel.write(new DatagramPacket(Unpooled.buffer().writeZero(bytes),
new InetSocketAddress(0)));
assertEquals(1, numPacketsTracker.get());
assertEquals(8, numBytesTracker.get());
assertEquals(1, flushCount.get());
// We never flushed the last datagram packet so it should be failed.
assertFalse(channel.finish());
assertTrue(future.isDone());
assertFalse(future.isSuccess());
}
}
| QuicheQuicCodecTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/mapping/WrapperArrayHandlingTests.java | {
"start": 731,
"end": 1837
} | class ____ {
@Test
@ServiceRegistry(
settings = @Setting( name = AvailableSettings.JPA_COMPLIANCE, value = "true" )
)
void testComplianceEnabled(ServiceRegistryScope scope) {
try ( SessionFactory sessionFactory = buildSessionFactory( scope ) ) {
// we expect this one to pass
}
}
private SessionFactory buildSessionFactory(ServiceRegistryScope scope) {
final MetadataSources metadataSources = new MetadataSources( scope.getRegistry() );
final Metadata metadata = metadataSources.addAnnotatedClasses( TheEntity.class ).buildMetadata();
return metadata.buildSessionFactory();
}
@Test
@ServiceRegistry(
settings = @Setting( name = AvailableSettings.JPA_COMPLIANCE, value = "false" )
)
void testComplianceDisabled(ServiceRegistryScope scope) {
try ( SessionFactory sessionFactory = buildSessionFactory( scope ) ) {
// however, this one should fall because DISALLOW is the default
fail( "Expecting an exception due to DISALLOW" );
}
catch (Exception expected) {
}
}
@Entity( name = "TheEntity" )
@Table( name = "TheEntity" )
public static | WrapperArrayHandlingTests |
java | apache__flink | flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroDeserializationSchema.java | {
"start": 4805,
"end": 5148
} | class ____ which deserialize. Should be one of: {@link
* org.apache.avro.specific.SpecificRecord}, {@link org.apache.avro.generic.GenericRecord}.
* @param reader reader's Avro schema. Should be provided if recordClazz is {@link
* GenericRecord}
* @param encoding encoding approach to use. Identifies the Avro decoder | to |
java | spring-projects__spring-boot | core/spring-boot-test/src/main/java/org/springframework/boot/test/context/TestConfiguration.java | {
"start": 2492,
"end": 3366
} | class ____ well as for external calls to this configuration's
* {@code @Bean} methods, e.g. from another configuration class. If this is not needed
* since each of this particular configuration's {@code @Bean} methods is
* self-contained and designed as a plain factory method for container use, switch
* this flag to {@code false} in order to avoid CGLIB subclass processing.
* <p>
* Turning off bean method interception effectively processes {@code @Bean} methods
* individually like when declared on non-{@code @Configuration} classes, a.k.a.
* "@Bean Lite Mode" (see {@link Bean @Bean's javadoc}). It is therefore behaviorally
* equivalent to removing the {@code @Configuration} stereotype.
* @return whether to proxy {@code @Bean} methods
* @since 2.2.1
*/
@AliasFor(annotation = Configuration.class)
boolean proxyBeanMethods() default true;
}
| as |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/InterceptedInferenceQueryBuilder.java | {
"start": 3056,
"end": 22788
} | class ____<T extends AbstractQueryBuilder<T>> extends AbstractQueryBuilder<
InterceptedInferenceQueryBuilder<T>> {
public static final NodeFeature NEW_SEMANTIC_QUERY_INTERCEPTORS = new NodeFeature("search.new_semantic_query_interceptors");
static final TransportVersion INFERENCE_RESULTS_MAP_WITH_CLUSTER_ALIAS = TransportVersion.fromName(
"inference_results_map_with_cluster_alias"
);
protected final T originalQuery;
protected final Map<FullyQualifiedInferenceId, InferenceResults> inferenceResultsMap;
protected final SetOnce<Map<FullyQualifiedInferenceId, InferenceResults>> inferenceResultsMapSupplier;
protected final boolean ccsRequest;
protected InterceptedInferenceQueryBuilder(T originalQuery) {
this(originalQuery, null);
}
protected InterceptedInferenceQueryBuilder(T originalQuery, Map<FullyQualifiedInferenceId, InferenceResults> inferenceResultsMap) {
Objects.requireNonNull(originalQuery, "original query must not be null");
this.originalQuery = originalQuery;
this.inferenceResultsMap = inferenceResultsMap != null ? Map.copyOf(inferenceResultsMap) : null;
this.inferenceResultsMapSupplier = null;
this.ccsRequest = false;
}
@SuppressWarnings("unchecked")
protected InterceptedInferenceQueryBuilder(StreamInput in) throws IOException {
super(in);
this.originalQuery = (T) in.readNamedWriteable(QueryBuilder.class);
if (in.getTransportVersion().supports(INFERENCE_RESULTS_MAP_WITH_CLUSTER_ALIAS)) {
this.inferenceResultsMap = in.readOptional(
i1 -> i1.readImmutableMap(FullyQualifiedInferenceId::new, i2 -> i2.readNamedWriteable(InferenceResults.class))
);
} else {
this.inferenceResultsMap = convertFromBwcInferenceResultsMap(
in.readOptional(i1 -> i1.readImmutableMap(i2 -> i2.readNamedWriteable(InferenceResults.class)))
);
}
if (in.getTransportVersion().supports(SEMANTIC_SEARCH_CCS_SUPPORT)) {
this.ccsRequest = in.readBoolean();
} else {
this.ccsRequest = false;
}
this.inferenceResultsMapSupplier = null;
}
protected InterceptedInferenceQueryBuilder(
InterceptedInferenceQueryBuilder<T> other,
Map<FullyQualifiedInferenceId, InferenceResults> inferenceResultsMap,
SetOnce<Map<FullyQualifiedInferenceId, InferenceResults>> inferenceResultsMapSupplier,
boolean ccsRequest
) {
this.originalQuery = other.originalQuery;
this.inferenceResultsMap = inferenceResultsMap;
this.inferenceResultsMapSupplier = inferenceResultsMapSupplier;
this.ccsRequest = ccsRequest;
}
/**
* <p>
* Get the fields queried by the original query.
* </p>
* <p>
* Multi-field queries should return a field map, where the map value is the boost applied to that field or field pattern.
* Single-field queries should return a single-entry field map, where the map value is 1.0.
* </p>
* <p>
* Implementations should <i>always</i> return a non-null map. If no fields are specified in the original query, an empty map should be
* returned.
* </p>
*
* @return A map of the fields queried by the original query
*/
protected abstract Map<String, Float> getFields();
/**
* Get the original query's query text. If not available, {@code null} should be returned.
*
* @return The original query's query text
*/
protected abstract String getQuery();
/**
* Rewrite to a backwards-compatible form of the query builder, depending on the value of
* {@link QueryRewriteContext#getMinTransportVersion()}. If no rewrites are required, the implementation should return {@code this}.
*
* @param queryRewriteContext The query rewrite context
* @return The query builder rewritten to a backwards-compatible form
*/
protected abstract QueryBuilder doRewriteBwC(QueryRewriteContext queryRewriteContext) throws IOException;
/**
* Generate a copy of {@code this}.
*
* @param inferenceResultsMap The inference results map
* @param inferenceResultsMapSupplier The inference results map supplier
* @param ccsRequest Flag indicating if this is a CCS request
* @return A copy of {@code this} with the provided inference results map
*/
protected abstract QueryBuilder copy(
Map<FullyQualifiedInferenceId, InferenceResults> inferenceResultsMap,
SetOnce<Map<FullyQualifiedInferenceId, InferenceResults>> inferenceResultsMapSupplier,
boolean ccsRequest
);
/**
* Rewrite to a {@link QueryBuilder} appropriate for a specific index's mappings. The implementation can use
* {@code indexMetadataContext} to get the index's mappings.
*
* @param inferenceFields A field map of the inference fields queried in this index. Every entry will be a concrete field.
* @param nonInferenceFields A field map of the non-inference fields queried in this index. Entries may be concrete fields or
* field patterns.
* @param indexMetadataContext The index metadata context.
* @return A query rewritten for the index's mappings.
*/
protected abstract QueryBuilder queryFields(
Map<String, Float> inferenceFields,
Map<String, Float> nonInferenceFields,
QueryRewriteContext indexMetadataContext
);
/**
* If the implementation should resolve wildcards in field patterns to inference fields.
*/
protected abstract boolean resolveWildcards();
/**
* If the implementation should fall back to the {@code index.query.default_field} index setting when
* {@link InterceptedInferenceQueryBuilder#getFields()} returns an empty map.
*/
protected abstract boolean useDefaultFields();
/**
* Get the query-time inference ID override. If not applicable or available, {@code null} should be returned.
*/
protected FullyQualifiedInferenceId getInferenceIdOverride() {
return null;
}
/**
* Perform any custom coordinator node validation. This is executed prior to generating inference results.
*
* @param resolvedIndices The resolved indices
*/
protected void coordinatorNodeValidate(ResolvedIndices resolvedIndices) {}
/**
* A hook for subclasses to do additional rewriting and inference result fetching while we are on the coordinator node.
* An example usage is {@link InterceptedInferenceKnnVectorQueryBuilder} which needs to rewrite the knn queries filters.
*/
protected InterceptedInferenceQueryBuilder<T> customDoRewriteGetInferenceResults(QueryRewriteContext queryRewriteContext)
throws IOException {
return this;
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
if (inferenceResultsMapSupplier != null) {
throw new IllegalStateException(
"inferenceResultsMapSupplier must be null, can't serialize suppliers, missing a rewriteAndFetch?"
);
}
out.writeNamedWriteable(originalQuery);
if (out.getTransportVersion().supports(INFERENCE_RESULTS_MAP_WITH_CLUSTER_ALIAS)) {
out.writeOptional(
(o, v) -> o.writeMap(v, StreamOutput::writeWriteable, StreamOutput::writeNamedWriteable),
inferenceResultsMap
);
} else {
out.writeOptional((o1, v) -> o1.writeMap(v, (o2, id) -> {
if (id.clusterAlias().equals(LOCAL_CLUSTER_GROUP_KEY) == false) {
throw new IllegalArgumentException("Cannot serialize remote cluster inference results in a mixed-version cluster");
}
o2.writeString(id.inferenceId());
}, StreamOutput::writeNamedWriteable), inferenceResultsMap);
}
if (out.getTransportVersion().supports(SEMANTIC_SEARCH_CCS_SUPPORT)) {
out.writeBoolean(ccsRequest);
} else if (ccsRequest) {
throw new IllegalArgumentException(
"One or more nodes does not support "
+ originalQuery.getName()
+ " query cross-cluster search when querying a ["
+ SemanticTextFieldMapper.CONTENT_TYPE
+ "] field. Please update all nodes to at least Elasticsearch "
+ SEMANTIC_SEARCH_CCS_SUPPORT.toReleaseVersion()
+ "."
);
}
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(getName(), originalQuery);
}
@Override
protected Query doToQuery(SearchExecutionContext context) {
throw new UnsupportedOperationException("Query should be rewritten to a different type");
}
@Override
protected boolean doEquals(InterceptedInferenceQueryBuilder<T> other) {
return Objects.equals(originalQuery, other.originalQuery)
&& Objects.equals(inferenceResultsMap, other.inferenceResultsMap)
&& Objects.equals(inferenceResultsMapSupplier, other.inferenceResultsMapSupplier)
&& Objects.equals(ccsRequest, other.ccsRequest);
}
@Override
protected int doHashCode() {
return Objects.hash(originalQuery, inferenceResultsMap, inferenceResultsMapSupplier, ccsRequest);
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return originalQuery.getMinimalSupportedVersion();
}
@Override
protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException {
QueryRewriteContext indexMetadataContext = queryRewriteContext.convertToIndexMetadataContext();
if (indexMetadataContext != null) {
// We are performing an index metadata rewrite on the data node
return doRewriteBuildQuery(indexMetadataContext);
}
ResolvedIndices resolvedIndices = queryRewriteContext.getResolvedIndices();
if (resolvedIndices != null) {
// We are preforming a coordinator node rewrite
return doRewriteGetInferenceResults(queryRewriteContext);
}
return this;
}
private QueryBuilder doRewriteBuildQuery(QueryRewriteContext indexMetadataContext) {
Map<String, Float> queryFields = getFields();
if (useDefaultFields() && queryFields.isEmpty()) {
queryFields = getDefaultFields(indexMetadataContext.getIndexSettings().getSettings());
}
Map<String, Float> inferenceFieldsToQuery = getInferenceFieldsMap(indexMetadataContext, queryFields, resolveWildcards());
Map<String, Float> nonInferenceFieldsToQuery = new HashMap<>(queryFields);
nonInferenceFieldsToQuery.keySet().removeAll(inferenceFieldsToQuery.keySet());
return queryFields(inferenceFieldsToQuery, nonInferenceFieldsToQuery, indexMetadataContext);
}
private QueryBuilder doRewriteGetInferenceResults(QueryRewriteContext queryRewriteContext) throws IOException {
QueryBuilder rewrittenBwC = doRewriteBwC(queryRewriteContext);
if (rewrittenBwC != this) {
return rewrittenBwC;
}
// NOTE: This logic misses when ccs_minimize_roundtrips=false and only a remote cluster is querying a semantic text field.
// In this case, the remote data node will receive the original query, which will in turn result in an error about querying an
// unsupported field type.
ResolvedIndices resolvedIndices = queryRewriteContext.getResolvedIndices();
Set<FullyQualifiedInferenceId> inferenceIds = getInferenceIdsForFields(
resolvedIndices.getConcreteLocalIndicesMetadata().values(),
queryRewriteContext.getLocalClusterAlias(),
getFields(),
resolveWildcards(),
useDefaultFields()
);
// If we are handling a CCS request, always retain the intercepted query logic so that we can get inference results generated on
// the local cluster from the inference results map when rewriting on remote cluster data nodes. This can be necessary when:
// - A query specifies an inference ID override
// - Only non-inference fields are queried on the remote cluster
if (inferenceIds.isEmpty() && this.ccsRequest == false) {
// Not querying a semantic text field
return originalQuery;
}
// Validate early to prevent partial failures
coordinatorNodeValidate(resolvedIndices);
boolean ccsRequest = this.ccsRequest || resolvedIndices.getRemoteClusterIndices().isEmpty() == false;
if (ccsRequest && queryRewriteContext.isCcsMinimizeRoundTrips() == false) {
throw new IllegalArgumentException(
originalQuery.getName()
+ " query does not support cross-cluster search when querying a ["
+ SemanticTextFieldMapper.CONTENT_TYPE
+ "] field when [ccs_minimize_roundtrips] is false"
);
}
InterceptedInferenceQueryBuilder<T> rewritten = customDoRewriteGetInferenceResults(queryRewriteContext);
return rewritten.doRewriteWaitForInferenceResults(queryRewriteContext, inferenceIds, ccsRequest);
}
private QueryBuilder doRewriteWaitForInferenceResults(
QueryRewriteContext queryRewriteContext,
Set<FullyQualifiedInferenceId> inferenceIds,
boolean ccsRequest
) {
if (inferenceResultsMapSupplier != null) {
// Additional inference results have already been requested, and we are waiting for them to continue the rewrite process
return getNewInferenceResultsFromSupplier(inferenceResultsMapSupplier, this, m -> copy(m, null, ccsRequest));
}
FullyQualifiedInferenceId inferenceIdOverride = getInferenceIdOverride();
if (inferenceIdOverride != null) {
inferenceIds = Set.of(inferenceIdOverride);
}
SetOnce<Map<FullyQualifiedInferenceId, InferenceResults>> newInferenceResultsMapSupplier = getInferenceResults(
queryRewriteContext,
inferenceIds,
inferenceResultsMap,
getQuery()
);
QueryBuilder rewritten = this;
if (newInferenceResultsMapSupplier == null) {
// No additional inference results are required
if (inferenceResultsMap != null) {
// The inference results map is fully populated, so we can perform error checking
inferenceResultsErrorCheck(inferenceResultsMap);
} else {
// No inference results have been collected yet, indicating we don't need any to rewrite this query.
// This can happen when pre-computed inference results are provided by the user.
// Set an empty inference results map so that rewriting can continue.
rewritten = copy(Map.of(), null, ccsRequest);
}
} else {
rewritten = copy(inferenceResultsMap, newInferenceResultsMapSupplier, ccsRequest);
}
return rewritten;
}
private static Set<FullyQualifiedInferenceId> getInferenceIdsForFields(
Collection<IndexMetadata> indexMetadataCollection,
String clusterAlias,
Map<String, Float> fields,
boolean resolveWildcards,
boolean useDefaultFields
) {
Set<FullyQualifiedInferenceId> fullyQualifiedInferenceIds = new HashSet<>();
for (IndexMetadata indexMetadata : indexMetadataCollection) {
final Map<String, Float> indexQueryFields = (useDefaultFields && fields.isEmpty())
? getDefaultFields(indexMetadata.getSettings())
: fields;
Map<String, InferenceFieldMetadata> indexInferenceFields = indexMetadata.getInferenceFields();
for (String indexQueryField : indexQueryFields.keySet()) {
if (indexInferenceFields.containsKey(indexQueryField)) {
// No wildcards in field name
InferenceFieldMetadata inferenceFieldMetadata = indexInferenceFields.get(indexQueryField);
fullyQualifiedInferenceIds.add(
new FullyQualifiedInferenceId(clusterAlias, inferenceFieldMetadata.getSearchInferenceId())
);
continue;
}
if (resolveWildcards) {
if (Regex.isMatchAllPattern(indexQueryField)) {
indexInferenceFields.values()
.forEach(
ifm -> fullyQualifiedInferenceIds.add(
new FullyQualifiedInferenceId(clusterAlias, ifm.getSearchInferenceId())
)
);
} else if (Regex.isSimpleMatchPattern(indexQueryField)) {
indexInferenceFields.values()
.stream()
.filter(ifm -> Regex.simpleMatch(indexQueryField, ifm.getName()))
.forEach(
ifm -> fullyQualifiedInferenceIds.add(
new FullyQualifiedInferenceId(clusterAlias, ifm.getSearchInferenceId())
)
);
}
}
}
}
return fullyQualifiedInferenceIds;
}
private static Map<String, Float> getInferenceFieldsMap(
QueryRewriteContext indexMetadataContext,
Map<String, Float> queryFields,
boolean resolveWildcards
) {
Map<String, InferenceFieldMetadata> indexInferenceFields = indexMetadataContext.getMappingLookup().inferenceFields();
Map<InferenceFieldMetadata, Float> matchingInferenceFields = getMatchingInferenceFields(
indexInferenceFields,
queryFields,
resolveWildcards
);
return matchingInferenceFields.entrySet().stream().collect(Collectors.toMap(e -> e.getKey().getName(), Map.Entry::getValue));
}
private static Map<String, Float> getDefaultFields(Settings settings) {
List<String> defaultFieldsList = settings.getAsList(DEFAULT_FIELD_SETTING.getKey(), DEFAULT_FIELD_SETTING.getDefault(settings));
return QueryParserHelper.parseFieldsAndWeights(defaultFieldsList);
}
private static void inferenceResultsErrorCheck(Map<FullyQualifiedInferenceId, InferenceResults> inferenceResultsMap) {
for (var entry : inferenceResultsMap.entrySet()) {
String inferenceId = entry.getKey().inferenceId();
InferenceResults inferenceResults = entry.getValue();
if (inferenceResults instanceof ErrorInferenceResults errorInferenceResults) {
// Use InferenceException here so that the status code is set by the cause
throw new InferenceException(
"Inference ID [" + inferenceId + "] query inference error",
errorInferenceResults.getException()
);
} else if (inferenceResults instanceof WarningInferenceResults warningInferenceResults) {
throw new IllegalStateException(
"Inference ID [" + inferenceId + "] query inference warning: " + warningInferenceResults.getWarning()
);
}
}
}
}
| InterceptedInferenceQueryBuilder |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java | {
"start": 604,
"end": 2729
} | interface ____ {
/**
* Called when unassigned shard is initialized. Does not include initializing relocation target shards.
*/
default void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) {}
/**
* Called when an initializing shard is started.
*/
default void shardStarted(ShardRouting initializingShard, ShardRouting startedShard) {}
/**
* Called when relocation of a started shard is initiated.
*/
default void relocationStarted(ShardRouting startedShard, ShardRouting targetRelocatingShard, String reason) {}
/**
* Called when an unassigned shard's unassigned information was updated
*/
default void unassignedInfoUpdated(ShardRouting unassignedShard, UnassignedInfo newUnassignedInfo) {}
/**
* Called when a relocating shard's failure information was updated
*/
default void relocationFailureInfoUpdated(ShardRouting relocatedShard, RelocationFailureInfo relocationFailureInfo) {}
/**
* Called when a shard is failed or cancelled.
*/
default void shardFailed(ShardRouting failedShard, UnassignedInfo unassignedInfo) {}
/**
* Called on relocation source when relocation completes after relocation target is started.
*/
default void relocationCompleted(ShardRouting removedRelocationSource) {}
/**
* Called on replica relocation target when replica relocation source fails. Promotes the replica relocation target to ordinary
* initializing shard.
*/
default void relocationSourceRemoved(ShardRouting removedReplicaRelocationSource) {}
/**
* Called when started replica is promoted to primary.
*/
default void replicaPromoted(ShardRouting replicaShard) {}
/**
* Called when an initializing replica is reinitialized. This happens when a primary relocation completes, which
* reinitializes all currently initializing replicas as their recovery source node changes
*/
default void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica) {}
| RoutingChangesObserver |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/net/impl/quic/QuicStreamImpl.java | {
"start": 1435,
"end": 5304
} | class ____ extends SocketBase<QuicStreamImpl> implements QuicStreamInternal {
private final QuicConnection connection;
private final ContextInternal context;
private final QuicStreamChannel channel;
private final NetworkMetrics<?> streamMetrics;
private final boolean bidirectional;
private final boolean localCreated;
private Handler<Integer> resetHandler;
private Handler<IdleStateEvent> idleHandler;
public QuicStreamImpl(QuicConnection connection, ContextInternal context, QuicStreamChannel channel, NetworkMetrics<?> streamMetrics, ChannelHandlerContext chctx) {
super(context, chctx);
this.connection = connection;
this.context = context;
this.channel = channel;
this.streamMetrics = streamMetrics;
this.bidirectional = channel.type() == QuicStreamType.BIDIRECTIONAL;
this.localCreated = channel.isLocalCreated();
}
@Override
public long id() {
return channel.streamId();
}
@Override
public QuicStreamInternal idleHandler(Handler<IdleStateEvent> handler) {
this.idleHandler = handler;
return this;
}
@Override
public QuicStream resetHandler(@Nullable Handler<Integer> handler) {
this.resetHandler = handler;
return this;
}
@Override
public Future<Void> reset(int error) {
PromiseInternal<Void> promise = context.promise();
ChannelFuture shutdownPromise = channel.shutdownOutput(error);
shutdownPromise.addListener(promise);
return promise.future();
}
@Override
public Future<Void> abort(int error) {
PromiseInternal<Void> promise = context.promise();
ChannelFuture shutdownPromise = channel.shutdownInput(error);
shutdownPromise.addListener(promise);
return promise.future();
}
@Override
public NetworkMetrics<?> metrics() {
return streamMetrics;
}
@Override
public Future<Void> writeMessage(Object message) {
if (bidirectional || localCreated) {
return super.writeMessage(message);
} else {
return context.failedFuture("Unidirectional stream created by the remote endpoint cannot be written to");
}
}
@Override
protected long sizeof(Object msg) {
if (msg instanceof QuicStreamFrame) {
return ((QuicStreamFrame)msg).content().readableBytes();
} else {
return super.sizeof(msg);
}
}
@Override
public Future<Void> end() {
PromiseInternal<Void> promise = context.promise();
writeToChannel(new MessageWrite() {
@Override
public void write() {
ChannelFuture shutdownPromise = channel.shutdownOutput();
shutdownPromise.addListener(promise);
}
@Override
public void cancel(Throwable cause) {
promise.fail(cause);
}
});
return promise.future();
}
@Override
protected void handleIdle(IdleStateEvent event) {
Handler<IdleStateEvent> handler = idleHandler;
if (handler != null) {
context.dispatch(event, handler);
} else {
super.handleIdle(event);
}
}
@Override
protected void handleEvent(Object event) {
if (event == ChannelInputShutdownEvent.INSTANCE) {
handleEnd();
} else {
super.handleEvent(event);
}
}
@Override
protected boolean handleException(Throwable t) {
if (t instanceof QuicException) {
QuicException quicException = (QuicException) t;
if (quicException.error() == null && "STREAM_RESET".equals(quicException.getMessage())) {
Handler<Integer> handler = resetHandler;
if (handler != null) {
context.emit(0, handler);
return false;
}
}
}
return super.handleException(t);
}
@Override
public boolean isLocalCreated() {
return localCreated;
}
@Override
public boolean isBidirectional() {
return bidirectional;
}
@Override
public QuicConnection connection() {
return connection;
}
}
| QuicStreamImpl |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/id/entities/Footballer.java | {
"start": 417,
"end": 1529
} | class ____ {
private String firstname;
private String lastname;
private String club;
public Footballer() {
}
public Footballer(String firstname, String lastname, String club) {
this.firstname = firstname;
this.lastname = lastname;
this.club = club;
}
public boolean equals(Object o) {
if ( this == o ) return true;
if ( !( o instanceof Footballer ) ) return false;
final Footballer footballer = (Footballer) o;
if ( !firstname.equals( footballer.firstname ) ) return false;
if ( !lastname.equals( footballer.lastname ) ) return false;
return true;
}
public int hashCode() {
int result;
result = firstname.hashCode();
result = 29 * result + lastname.hashCode();
return result;
}
@Id
public String getFirstname() {
return firstname;
}
public void setFirstname(String firstname) {
this.firstname = firstname;
}
@Id
public String getLastname() {
return lastname;
}
public void setLastname(String lastname) {
this.lastname = lastname;
}
public String getClub() {
return club;
}
public void setClub(String club) {
this.club = club;
}
}
| Footballer |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy-multipart/deployment/src/test/java/io/quarkus/resteasy/multipart/LargeMultipartPayloadTest.java | {
"start": 1857,
"end": 2142
} | class ____ {
@POST
@Path("/multipart")
@Produces(MediaType.TEXT_PLAIN)
@Consumes(MediaType.MULTIPART_FORM_DATA)
public String postForm(@MultipartForm final FormBody ignored) {
return "ignored";
}
}
public static | Resource |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/pool/vendor/PGValidConnectionChecker.java | {
"start": 880,
"end": 1825
} | class ____ extends ValidConnectionCheckerAdapter implements ValidConnectionChecker, Serializable {
private static final long serialVersionUID = -2227528634302168877L;
private String defaultValidateQuery = "SELECT 'x'";
public PGValidConnectionChecker() {
configFromProperties(System.getProperties());
}
/**
* pgsql Driver 9.0以及以下版本不支持setQueryTimeout,可通过设置validationQueryTimeout小于0兼容低版本
*/
public boolean isValidConnection(Connection conn,
String validateQuery,
int validationQueryTimeout) throws Exception {
if (conn.isClosed()) {
return false;
}
if (StringUtils.isEmpty(validateQuery)) {
validateQuery = this.defaultValidateQuery;
}
return ValidConnectionCheckerAdapter.execValidQuery(conn, validateQuery, validationQueryTimeout);
}
}
| PGValidConnectionChecker |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/decorators/interceptor/InterceptorAndDecoratorTest.java | {
"start": 2153,
"end": 2524
} | class ____ implements Converter<String> {
@Inject
@Delegate
Converter<String> delegate;
@Override
public String convert(String value) {
return delegate.convert(value.trim());
}
}
@Target({ TYPE, METHOD })
@Retention(RUNTIME)
@Documented
@InterceptorBinding
public @ | TrimConverterDecorator |
java | apache__spark | sql/catalyst/src/test/java/org/apache/spark/sql/catalyst/expressions/TestThrowExceptionMethod.java | {
"start": 918,
"end": 1362
} | class ____ implements Serializable {
public int invoke(int i) throws IOException {
if (i != 0) {
return i * 2;
} else {
throw new IOException("Invoke the method that throw IOException");
}
}
public static int staticInvoke(int i) throws IOException {
if (i != 0) {
return i * 2;
} else {
throw new IOException("StaticInvoke the method that throw IOException");
}
}
}
| TestThrowExceptionMethod |
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/ClassOrderer.java | {
"start": 2331,
"end": 2438
} | class ____: {@value}
*
* <h4>Supported Values</h4>
*
* <p>Supported values include fully qualified | name |
java | spring-projects__spring-boot | module/spring-boot-quartz/src/test/java/org/springframework/boot/quartz/autoconfigure/QuartzAutoConfigurationTests.java | {
"start": 22987,
"end": 23313
} | class ____ {
@Bean
QuartzDataSourceScriptDatabaseInitializer customInitializer(DataSource dataSource,
QuartzJdbcProperties properties) {
return new QuartzDataSourceScriptDatabaseInitializer(dataSource, properties);
}
}
@Configuration(proxyBeanMethods = false)
static | CustomQuartzDatabaseInitializerConfiguration |
java | apache__camel | core/camel-management/src/main/java/org/apache/camel/management/mbean/ManagedDisabled.java | {
"start": 1181,
"end": 1657
} | class ____ extends ManagedProcessor implements ManagedDisabledMBean {
public ManagedDisabled(CamelContext context, DisabledProcessor processor, ProcessorDefinition<?> definition) {
super(context, processor, definition);
}
@Override
public DisabledProcessor getProcessor() {
return (DisabledProcessor) super.getProcessor();
}
@Override
public String getNodeType() {
return getProcessor().getNodeType();
}
}
| ManagedDisabled |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/pool/TestConnectError.java | {
"start": 979,
"end": 2948
} | class ____ extends TestCase {
private DruidDataSource dataSource;
private MockDriver driver;
protected void setUp() throws Exception {
driver = new MockDriver() {
private AtomicInteger count = new AtomicInteger();
public Connection connect(String url, Properties info) throws SQLException {
// create first connection successfully.
if (count.getAndIncrement() % 2 == 0) {
throw new SQLException();
}
try {
Thread.sleep(10);
} catch (InterruptedException e) {
throw new SQLException();
}
return super.connect(url, info);
}
};
assertEquals(0, DruidDataSourceStatManager.getInstance().getDataSourceList().size());
dataSource = new DruidDataSource();
dataSource.setDriver(driver);
dataSource.setRemoveAbandoned(true);
dataSource.setRemoveAbandonedTimeoutMillis(1000 * 180);
dataSource.setLogAbandoned(true);
dataSource.setTimeBetweenEvictionRunsMillis(10);
dataSource.setMinEvictableIdleTimeMillis(300 * 1000);
dataSource.setMaxActive(20);
dataSource.setUrl("jdbc:mock:TestConnectError");
}
protected void tearDown() throws Exception {
dataSource.close();
assertEquals(0, DruidDataSourceStatManager.getInstance().getDataSourceList().size());
}
public void test_connect_error() throws Exception {
assertEquals(0, dataSource.getCreateErrorCount());
int count = 10;
Connection[] connections = new Connection[count];
for (int i = 0; i < count; ++i) {
connections[i] = dataSource.getConnection();
}
for (int i = 0; i < count; ++i) {
connections[i].close();
}
assertEquals(10, dataSource.getCreateErrorCount());
}
}
| TestConnectError |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java | {
"start": 1542,
"end": 10757
} | class ____ {
private static final int COMPUTE_FAIR_SHARES_ITERATIONS = 25;
private ComputeFairShares() {
}
/**
* Compute fair share of the given schedulables.Fair share is an allocation of
* shares considering only active schedulables ie schedulables which have
* running apps.
*
* @param schedulables given schedulables.
* @param totalResources totalResources.
* @param type type of the resource.
*/
public static void computeShares(
Collection<? extends Schedulable> schedulables, Resource totalResources,
String type) {
computeSharesInternal(schedulables, totalResources, type, false);
}
/**
* Compute the steady fair share of the given queues. The steady fair
* share is an allocation of shares considering all queues, i.e.,
* active and inactive.
*
* @param queues {@link FSQueue}s whose shares are to be updated.
* @param totalResources totalResources.
* @param type type of the resource.
*/
public static void computeSteadyShares(
Collection<? extends FSQueue> queues, Resource totalResources,
String type) {
computeSharesInternal(queues, totalResources, type, true);
}
/**
* Given a set of Schedulables and a number of slots, compute their weighted
* fair shares. The min and max shares and of the Schedulables are assumed to
* be set beforehand. We compute the fairest possible allocation of shares to
* the Schedulables that respects their min and max shares.
* <p>
* To understand what this method does, we must first define what weighted
* fair sharing means in the presence of min and max shares. If there
* were no minimum or maximum shares, then weighted fair sharing would be
* achieved if the ratio of slotsAssigned / weight was equal for each
* Schedulable and all slots were assigned. Minimum and maximum shares add a
* further twist - Some Schedulables may have a min share higher than their
* assigned share or a max share lower than their assigned share.
* <p>
* To deal with these possibilities, we define an assignment of slots as being
* fair if there exists a ratio R such that: Schedulables S where S.minShare
* {@literal >} R * S.weight are given share S.minShare - Schedulables S
* where S.maxShare {@literal <} R * S.weight are given S.maxShare -
* All other Schedulables S are assigned share R * S.weight -
* The sum of all the shares is totalSlots.
* <p>
* We call R the weight-to-slots ratio because it converts a Schedulable's
* weight to the number of slots it is assigned.
* <p>
* We compute a fair allocation by finding a suitable weight-to-slot ratio R.
* To do this, we use binary search. Given a ratio R, we compute the number of
* slots that would be used in total with this ratio (the sum of the shares
* computed using the conditions above). If this number of slots is less than
* totalSlots, then R is too small and more slots could be assigned. If the
* number of slots is more than totalSlots, then R is too large.
* <p>
* We begin the binary search with a lower bound on R of 0 (which means that
* all Schedulables are only given their minShare) and an upper bound computed
* to be large enough that too many slots are given (by doubling R until we
* use more than totalResources resources). The helper method
* resourceUsedWithWeightToResourceRatio computes the total resources used
* with a given value of R.
* <p>
* The running time of this algorithm is linear in the number of Schedulables,
* because resourceUsedWithWeightToResourceRatio is linear-time and the
* number of iterations of binary search is a constant (dependent on desired
* precision).
*/
private static void computeSharesInternal(
Collection<? extends Schedulable> allSchedulables,
Resource totalResources, String type, boolean isSteadyShare) {
Collection<Schedulable> schedulables = new ArrayList<>();
long takenResources = handleFixedFairShares(
allSchedulables, schedulables, isSteadyShare, type);
if (schedulables.isEmpty()) {
return;
}
// Find an upper bound on R that we can use in our binary search. We start
// at R = 1 and double it until we have either used all the resources or we
// have met all Schedulables' max shares.
long totalMaxShare = 0;
for (Schedulable sched : schedulables) {
long maxShare = sched.getMaxShare().getResourceValue(type);
totalMaxShare = safeAdd(maxShare, totalMaxShare);
if (totalMaxShare == Long.MAX_VALUE) {
break;
}
}
long totalResource = Math.max((totalResources.getResourceValue(type) -
takenResources), 0);
totalResource = Math.min(totalMaxShare, totalResource);
double rMax = 1.0;
while (resourceUsedWithWeightToResourceRatio(rMax, schedulables, type)
< totalResource) {
rMax *= 2.0;
}
// Perform the binary search for up to COMPUTE_FAIR_SHARES_ITERATIONS steps
double left = 0;
double right = rMax;
for (int i = 0; i < COMPUTE_FAIR_SHARES_ITERATIONS; i++) {
double mid = (left + right) / 2.0;
long plannedResourceUsed = resourceUsedWithWeightToResourceRatio(
mid, schedulables, type);
if (plannedResourceUsed == totalResource) {
right = mid;
break;
} else if (plannedResourceUsed < totalResource) {
left = mid;
} else {
right = mid;
}
}
// Set the fair shares based on the value of R we've converged to
for (Schedulable sched : schedulables) {
Resource target;
if (isSteadyShare) {
target = ((FSQueue) sched).getSteadyFairShare();
} else {
target = sched.getFairShare();
}
target.setResourceValue(type, computeShare(sched, right, type));
}
}
/**
* Compute the resources that would be used given a weight-to-resource ratio
* w2rRatio, for use in the computeFairShares algorithm as described in
* {@link #computeSharesInternal}.
*/
private static long resourceUsedWithWeightToResourceRatio(double w2rRatio,
Collection<? extends Schedulable> schedulables, String type) {
long resourcesTaken = 0;
for (Schedulable sched : schedulables) {
long share = computeShare(sched, w2rRatio, type);
resourcesTaken = safeAdd(resourcesTaken, share);
if (resourcesTaken == Long.MAX_VALUE) {
break;
}
}
return resourcesTaken;
}
/**
* Compute the resources assigned to a Schedulable given a particular
* weight-to-resource ratio w2rRatio.
*/
private static long computeShare(Schedulable sched, double w2rRatio,
String type) {
double share = sched.getWeight() * w2rRatio;
share = Math.max(share, sched.getMinShare().getResourceValue(type));
share = Math.min(share, sched.getMaxShare().getResourceValue(type));
return (long) share;
}
/**
* Helper method to handle Schedulabes with fixed fairshares.
* Returns the resources taken by fixed fairshare schedulables,
* and adds the remaining to the passed nonFixedSchedulables.
*/
private static long handleFixedFairShares(
Collection<? extends Schedulable> schedulables,
Collection<Schedulable> nonFixedSchedulables,
boolean isSteadyShare, String type) {
long totalResource = 0;
for (Schedulable sched : schedulables) {
long fixedShare = getFairShareIfFixed(sched, isSteadyShare, type);
if (fixedShare < 0) {
nonFixedSchedulables.add(sched);
} else {
Resource target;
if (isSteadyShare) {
target = ((FSQueue)sched).getSteadyFairShare();
} else {
target = sched.getFairShare();
}
target.setResourceValue(type, fixedShare);
totalResource = safeAdd(totalResource, fixedShare);
}
}
return totalResource;
}
/**
* Get the fairshare for the {@link Schedulable} if it is fixed,
* -1 otherwise.
*
* The fairshare is fixed if either the maxShare is 0, weight is 0,
* or the Schedulable is not active for instantaneous fairshare.
*/
private static long getFairShareIfFixed(Schedulable sched,
boolean isSteadyShare, String type) {
// Check if maxShare is 0
if (sched.getMaxShare().getResourceValue(type) <= 0) {
return 0;
}
// For instantaneous fairshares, check if queue is active
if (!isSteadyShare &&
(sched instanceof FSQueue) && !((FSQueue)sched).isActive()) {
return 0;
}
// Check if weight is 0
if (sched.getWeight() <= 0) {
long minShare = sched.getMinShare().getResourceValue(type);
return (minShare <= 0) ? 0 : minShare;
}
return -1;
}
/**
* Safely add two long values. The result will always be a valid long value.
* If the addition caused an overflow the return value will be set to
* <code>Long.MAX_VALUE</code>.
* @param a first long to add
* @param b second long to add
* @return result of the addition
*/
private static long safeAdd(long a, long b) {
try {
return addExact(a, b);
} catch (ArithmeticException ae) {
return Long.MAX_VALUE;
}
}
}
| ComputeFairShares |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/runtime/LongScriptFieldDistanceFeatureQuery.java | {
"start": 1173,
"end": 3282
} | class ____ extends AbstractScriptFieldQuery<AbstractLongFieldScript> {
private final long origin;
private final long pivot;
public LongScriptFieldDistanceFeatureQuery(
Script script,
Function<LeafReaderContext, AbstractLongFieldScript> leafFactory,
String fieldName,
long origin,
long pivot
) {
super(script, fieldName, leafFactory);
this.origin = origin;
this.pivot = pivot;
}
@Override
protected boolean matches(AbstractLongFieldScript scriptContext, int docId) {
scriptContext.runForDoc(docId);
return scriptContext.count() > 0;
}
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new Weight(this) {
@Override
public boolean isCacheable(LeafReaderContext ctx) {
return false;
}
@Override
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
return new DefaultScorerSupplier(
new DistanceScorer(scriptContextFunction().apply(context), context.reader().maxDoc(), boost)
);
}
@Override
public Explanation explain(LeafReaderContext context, int doc) {
AbstractLongFieldScript script = scriptContextFunction().apply(context);
script.runForDoc(doc);
long value = valueWithMinAbsoluteDistance(script);
float score = score(boost, distanceFor(value));
return Explanation.match(
score,
"Distance score, computed as weight * pivot / (pivot + abs(value - origin)) from:",
Explanation.match(boost, "weight"),
Explanation.match(pivot, "pivot"),
Explanation.match(origin, "origin"),
Explanation.match(value, "current value")
);
}
};
}
private | LongScriptFieldDistanceFeatureQuery |
java | micronaut-projects__micronaut-core | websocket/src/main/java/io/micronaut/websocket/WebSocketSession.java | {
"start": 1403,
"end": 7821
} | interface ____ extends MutableConvertibleValues<Object>, AutoCloseable {
/**
* The ID of the session.
*
* @return The ID of the session
*/
String getId();
/**
* @return Only the attributes of the session
*/
MutableConvertibleValues<Object> getAttributes();
/**
* Whether the session is open.
*
* @return True if it is
*/
boolean isOpen();
/**
* Whether the session is writable. It may not be writable, if the buffer is currently full
*
* @return True if it is
*/
boolean isWritable();
/**
* Whether the connection is secure.
*
* @return True if it is secure
*/
boolean isSecure();
/**
* The current open sessions.
*
* @return The open sessions
*/
Set<? extends WebSocketSession> getOpenSessions();
/**
* The request URI this session was opened under.
*
* @return The request URI
*/
URI getRequestURI();
/**
* The protocol version of the WebSocket protocol currently being used.
*
* @return The protocol version
*/
String getProtocolVersion();
/**
* Send the given message to the remote peer.
* The resulting {@link Publisher} does not start sending until subscribed to.
* If you return it from Micronaut annotated methods such as {@link io.micronaut.websocket.annotation.OnOpen} and {@link io.micronaut.websocket.annotation.OnMessage},
* Micronaut will subscribe to it and send the message without blocking.
*
* @param message The message
* @param mediaType The media type of the message. Used to look up an appropriate codec via the {@link io.micronaut.http.codec.MediaTypeCodecRegistry}.
* @param <T> The message type
* @return A {@link Publisher} that either emits an error or emits the message once it has been published successfully.
*/
<T> Publisher<T> send(T message, MediaType mediaType);
/**
* Send the given message to the remote peer asynchronously.
*
* @param message The message
* @param mediaType The media type of the message. Used to look up an appropriate codec via the {@link io.micronaut.http.codec.MediaTypeCodecRegistry}.
* @param <T> The message type
* @return A {@link CompletableFuture} that tracks the execution. {@link CompletableFuture#get()} and related methods will return the message on success, on error throw the underlying Exception.
*/
<T> CompletableFuture<T> sendAsync(T message, MediaType mediaType);
/**
* Send the given message to the remote peer synchronously.
*
* @param message The message
* @param mediaType The media type of the message. Used to look up an appropriate codec via the {@link io.micronaut.http.codec.MediaTypeCodecRegistry}.
*/
default void sendSync(Object message, MediaType mediaType) {
try {
sendAsync(message, mediaType).get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new WebSocketSessionException("Send Interrupted");
} catch (ExecutionException e) {
throw new WebSocketSessionException("Send Failure: " + e.getMessage(), e);
}
}
/**
* Send the given message to the remote peer.
* The resulting {@link Publisher} does not start sending until subscribed to.
* If you return it from Micronaut annotated methods such as {@link io.micronaut.websocket.annotation.OnOpen} and {@link io.micronaut.websocket.annotation.OnMessage},
* Micronaut will subscribe to it and send the message without blocking.
*
* @param message The message
* @param <T> The message type
* @return A {@link Publisher} that either emits an error or emits the message once it has been published successfully.
*/
default <T> Publisher<T> send(T message) {
return send(message, MediaType.APPLICATION_JSON_TYPE);
}
/**
* Send the given message to the remote peer asynchronously.
*
* @param message The message
* @param <T> The message type
* @return A {@link CompletableFuture} that tracks the execution. {@link CompletableFuture#get()} and related methods will return the message on success, on error throw the underlying Exception.
*/
default <T> CompletableFuture<T> sendAsync(T message) {
return sendAsync(message, MediaType.APPLICATION_JSON_TYPE);
}
/**
* Send the given message to the remote peer synchronously.
*
* @param message The message
*/
default void sendSync(Object message) {
sendSync(message, MediaType.APPLICATION_JSON_TYPE);
}
/**
* Send a ping through this WebSocket. The pong reply can be intercepted using a
* {@link io.micronaut.websocket.annotation.OnMessage @OnMessage} method that accepts a
* {@link WebSocketPongMessage}.
*
* @param content The content of the ping. The remote should return the same content in its
* {@link WebSocketPongMessage}.
* @return A future that completes when the ping has been sent. (Not when the pong has been received!)
*/
@NonNull
default CompletableFuture<?> sendPingAsync(@NonNull byte[] content) {
throw new UnsupportedOperationException("Ping not supported by this implementation");
}
/**
* The subprotocol if one is used.
*
* @return The subprotocol
*/
default Optional<String> getSubprotocol() {
return Optional.empty();
}
/**
* The request parameters used to create this session.
*
* @return The request parameters
*/
default ConvertibleMultiValues<String> getRequestParameters() {
return ConvertibleMultiValues.empty();
}
/**
* Any matching URI path variables.
*
* @return The path variables
*/
default ConvertibleValues<Object> getUriVariables() {
return ConvertibleValues.empty();
}
/**
* The user {@link Principal} used to create the session.
*
* @return The {@link Principal}
*/
default Optional<Principal> getUserPrincipal() {
return Optional.empty();
}
@Override
void close();
/**
* Close the session with the given event.
*
* @param closeReason The close event
*/
void close(CloseReason closeReason);
}
| WebSocketSession |
java | micronaut-projects__micronaut-core | http-client/src/test/groovy/io/micronaut/http/client/DefaultMethodClient3.java | {
"start": 967,
"end": 1245
} | class ____ implements IDefaultMethodClient {
@Get(produces = MediaType.TEXT_PLAIN, consumes = MediaType.TEXT_PLAIN)
public abstract String index2();
@Mutating
public String defaultMethod2(String zzz) {
return index(zzz) + " 2";
}
}
| DefaultMethodClient3 |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/endpoint/annotation/EndpointDiscovererTests.java | {
"start": 19275,
"end": 19565
} | class ____ extends TestEndpoint {
@WriteOperation
void updateWithMoreArguments(String foo, String bar, String baz) {
}
}
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Endpoint
@FilteredEndpoint(SpecializedEndpointFilter.class)
@ | TestEndpointSubclass |
java | elastic__elasticsearch | x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java | {
"start": 15658,
"end": 19815
} | enum ____ {
AD(
false,
AD_ROLE_MAPPING,
Settings.builder()
.put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".domain_name", ActiveDirectorySessionFactoryTests.AD_DOMAIN)
.put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".group_search.base_dn", "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com")
.put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".group_search.scope", randomBoolean() ? SUB_TREE : ONE_LEVEL)
.put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".url", smbFixture.getAdLdapUrl())
.put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".follow_referrals", ActiveDirectorySessionFactoryTests.FOLLOW_REFERRALS)
.build(),
"active_directory"
),
AD_LDAP_GROUPS_FROM_SEARCH(
true,
AD_ROLE_MAPPING,
Settings.builder()
.put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".url", smbFixture.getAdLdapUrl())
.put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".group_search.base_dn", "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com")
.put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".group_search.scope", randomBoolean() ? SUB_TREE : ONE_LEVEL)
.putList(
XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".user_dn_templates",
"cn={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"
)
.put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".follow_referrals", ActiveDirectorySessionFactoryTests.FOLLOW_REFERRALS)
.build(),
"ldap"
),
AD_LDAP_GROUPS_FROM_ATTRIBUTE(
true,
AD_ROLE_MAPPING,
Settings.builder()
.put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".url", smbFixture.getAdLdapUrl())
.putList(
XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".user_dn_templates",
"cn={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"
)
.put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".follow_referrals", ActiveDirectorySessionFactoryTests.FOLLOW_REFERRALS)
.build(),
"ldap"
);
final String type;
final boolean mapGroupsAsRoles;
final boolean loginWithCommonName;
private final RoleMappingEntry[] roleMappings;
final Settings settings;
RealmConfig(boolean loginWithCommonName, RoleMappingEntry[] roleMappings, Settings settings, String type) {
this.settings = settings;
this.loginWithCommonName = loginWithCommonName;
this.roleMappings = roleMappings;
this.mapGroupsAsRoles = randomBoolean();
this.type = type;
}
public Settings buildSettings(List<String> certificateAuthorities) {
return buildSettings(certificateAuthorities, randomInt());
}
protected Settings buildSettings(List<String> certificateAuthorities, int order) {
Settings.Builder builder = Settings.builder()
.put("xpack.security.authc.realms." + type + ".external.order", order)
.put("xpack.security.authc.realms." + type + ".external.ssl.verification_mode", SslVerificationMode.CERTIFICATE)
.put("xpack.security.authc.realms." + type + ".external.unmapped_groups_as_roles", mapGroupsAsRoles)
.put(this.settings)
.putList("xpack.security.authc.realms." + type + ".external.ssl.certificate_authorities", certificateAuthorities);
return builder.build();
}
public List<RoleMappingEntry> selectRoleMappings(Supplier<Boolean> shouldPickFileContent) {
// if mapGroupsAsRoles is turned on we use empty role mapping
if (mapGroupsAsRoles) {
return Collections.emptyList();
} else {
return Arrays.stream(this.roleMappings).map(e -> e.pickEntry(shouldPickFileContent)).collect(Collectors.toList());
}
}
}
}
| RealmConfig |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/physical/stream/StreamPhysicalAsyncCorrelateRule.java | {
"start": 2029,
"end": 6333
} | class ____ extends ConverterRule {
public static final RelOptRule INSTANCE =
new StreamPhysicalAsyncCorrelateRule(
Config.INSTANCE.withConversion(
FlinkLogicalCorrelate.class,
FlinkConventions.LOGICAL(),
FlinkConventions.STREAM_PHYSICAL(),
"StreamPhysicalAsyncCorrelateRule"));
protected StreamPhysicalAsyncCorrelateRule(Config config) {
super(config);
}
// find only calc and table function
private boolean findAsyncTableFunction(RelNode node) {
if (node instanceof FlinkLogicalTableFunctionScan) {
FlinkLogicalTableFunctionScan scan = (FlinkLogicalTableFunctionScan) node;
return AsyncUtil.isAsyncCall(scan.getCall(), FunctionKind.ASYNC_TABLE);
} else if (node instanceof FlinkLogicalCalc) {
FlinkLogicalCalc calc = (FlinkLogicalCalc) node;
RelNode child = ((RelSubset) calc.getInput()).getOriginal();
return findAsyncTableFunction(child);
}
return false;
}
@Override
public boolean matches(RelOptRuleCall call) {
FlinkLogicalCorrelate correlate = call.rel(0);
RelNode right = ((RelSubset) correlate.getRight()).getOriginal();
return findAsyncTableFunction(right);
}
@Override
public RelNode convert(RelNode rel) {
FlinkLogicalCorrelate correlate = (FlinkLogicalCorrelate) rel;
RelTraitSet traitSet = rel.getTraitSet().replace(FlinkConventions.STREAM_PHYSICAL());
RelNode convInput =
RelOptRule.convert(correlate.getInput(0), FlinkConventions.STREAM_PHYSICAL());
RelNode right = correlate.getInput(1);
return convertToCorrelate(
right, correlate, traitSet, convInput, Optional.empty(), Optional.empty());
}
public RelNode convertToCorrelate(
RelNode relNode,
FlinkLogicalCorrelate correlate,
RelTraitSet traitSet,
RelNode convInput,
Optional<List<RexNode>> projections,
Optional<RexNode> condition) {
if (relNode instanceof RelSubset) {
RelSubset rel = (RelSubset) relNode;
return convertToCorrelate(
rel.getRelList().get(0),
correlate,
traitSet,
convInput,
projections,
condition);
} else if (relNode instanceof FlinkLogicalCalc) {
FlinkLogicalCalc calc = (FlinkLogicalCalc) relNode;
RelNode tableScan = StreamPhysicalCorrelateRule.getTableScan(calc);
FlinkLogicalCalc newCalc = StreamPhysicalCorrelateRule.getMergedCalc(calc);
// The projections are not handled here or in the base version, so currently we match
// that functionality.
return convertToCorrelate(
tableScan,
correlate,
traitSet,
convInput,
Optional.ofNullable(
newCalc.getProgram().getProjectList() == null
? null
: newCalc.getProgram().getProjectList().stream()
.map(newCalc.getProgram()::expandLocalRef)
.collect(Collectors.toList())),
Optional.ofNullable(
newCalc.getProgram().getCondition() == null
? null
: newCalc.getProgram()
.expandLocalRef(newCalc.getProgram().getCondition())));
} else {
FlinkLogicalTableFunctionScan scan = (FlinkLogicalTableFunctionScan) relNode;
return new StreamPhysicalAsyncCorrelate(
correlate.getCluster(),
traitSet,
convInput,
scan,
projections,
condition,
correlate.getRowType(),
correlate.getJoinType());
}
}
}
| StreamPhysicalAsyncCorrelateRule |
java | alibaba__nacos | console/src/main/java/com/alibaba/nacos/console/handler/ServerStateHandler.java | {
"start": 834,
"end": 1518
} | interface ____ {
/**
* Get the current state of the server.
*
* @return a map containing the server state
* @throws NacosException if an error occurs while retrieving the server state
*/
Map<String, String> getServerState() throws NacosException;
/**
* Get the announcement content based on the language.
*
* @param language the language for the announcement
* @return the announcement content
*/
String getAnnouncement(String language);
/**
* Get the console UI guide information.
*
* @return the console UI guide information
*/
String getConsoleUiGuide();
}
| ServerStateHandler |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/catalog/DataTypeFactory.java | {
"start": 4280,
"end": 6002
} | class ____ cases where no serializer is known and a generic
* serializer should be used. The factory will create {@link DataTypes#RAW(Class,
* TypeSerializer)} with Flink's default RAW serializer that is automatically configured.
*
* <p>Note: This type is a black box within the table ecosystem and is only deserialized at the
* edges of the API.
*/
<T> DataType createRawDataType(Class<T> clazz);
/**
* Creates a RAW type for the given {@link TypeInformation}. Since type information does not
* contain a {@link TypeSerializer} yet. The serializer will be generated by considering the
* current configuration.
*
* <p>Note: This type is a black box within the table ecosystem and is only deserialized at the
* edges of the API.
*/
<T> DataType createRawDataType(TypeInformation<T> typeInfo);
// --------------------------------------------------------------------------------------------
// LogicalType creation
// --------------------------------------------------------------------------------------------
/**
* Creates a {@link LogicalType} by a fully or partially defined name.
*
* <p>The factory will parse and resolve the name of a type to a {@link LogicalType}. This
* includes both built-in types and user-defined types (see {@link DistinctType} and {@link
* StructuredType}).
*/
LogicalType createLogicalType(String typeString);
/**
* Creates a {@link LogicalType} from an {@link UnresolvedIdentifier} for resolving user-defined
* types (see {@link DistinctType} and {@link StructuredType}).
*/
LogicalType createLogicalType(UnresolvedIdentifier identifier);
}
| in |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GoogleMailEndpointBuilderFactory.java | {
"start": 1565,
"end": 24983
} | interface ____
extends
EndpointConsumerBuilder {
default AdvancedGoogleMailEndpointConsumerBuilder advanced() {
return (AdvancedGoogleMailEndpointConsumerBuilder) this;
}
/**
* Google mail application name. Example would be camel-google-mail/1.0.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param applicationName the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder applicationName(String applicationName) {
doSetProperty("applicationName", applicationName);
return this;
}
/**
* Client ID of the mail application.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param clientId the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder clientId(String clientId) {
doSetProperty("clientId", clientId);
return this;
}
/**
* Delegate for wide-domain service account.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param delegate the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder delegate(String delegate) {
doSetProperty("delegate", delegate);
return this;
}
/**
* Sets the name of a parameter to be passed in the exchange In Body.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param inBody the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder inBody(String inBody) {
doSetProperty("inBody", inBody);
return this;
}
/**
* Specifies the level of permissions you want a calendar application to
* have to a user account. See
* https://developers.google.com/identity/protocols/googlescopes for
* more info. Multiple scopes can be separated by comma.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param scopes the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder scopes(String scopes) {
doSetProperty("scopes", scopes);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param sendEmptyMessageWhenIdle the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder sendEmptyMessageWhenIdle(boolean sendEmptyMessageWhenIdle) {
doSetProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param sendEmptyMessageWhenIdle the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder sendEmptyMessageWhenIdle(String sendEmptyMessageWhenIdle) {
doSetProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffErrorThreshold the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder backoffErrorThreshold(int backoffErrorThreshold) {
doSetProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffErrorThreshold the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder backoffErrorThreshold(String backoffErrorThreshold) {
doSetProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffIdleThreshold the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder backoffIdleThreshold(int backoffIdleThreshold) {
doSetProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffIdleThreshold the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder backoffIdleThreshold(String backoffIdleThreshold) {
doSetProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
*
* The option is a: <code>int</code> type.
*
* Group: scheduler
*
* @param backoffMultiplier the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder backoffMultiplier(int backoffMultiplier) {
doSetProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
*
* The option will be converted to a <code>int</code> type.
*
* Group: scheduler
*
* @param backoffMultiplier the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder backoffMultiplier(String backoffMultiplier) {
doSetProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* Milliseconds before the next poll.
*
* The option is a: <code>long</code> type.
*
* Default: 500
* Group: scheduler
*
* @param delay the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder delay(long delay) {
doSetProperty("delay", delay);
return this;
}
/**
* Milliseconds before the next poll.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 500
* Group: scheduler
*
* @param delay the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder delay(String delay) {
doSetProperty("delay", delay);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: scheduler
*
* @param greedy the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder greedy(boolean greedy) {
doSetProperty("greedy", greedy);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: scheduler
*
* @param greedy the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder greedy(String greedy) {
doSetProperty("greedy", greedy);
return this;
}
/**
* Milliseconds before the first poll starts.
*
* The option is a: <code>long</code> type.
*
* Default: 1000
* Group: scheduler
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder initialDelay(long initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Milliseconds before the first poll starts.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 1000
* Group: scheduler
*
* @param initialDelay the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder initialDelay(String initialDelay) {
doSetProperty("initialDelay", initialDelay);
return this;
}
/**
* Specifies a maximum limit of number of fires. So if you set it to 1,
* the scheduler will only fire once. If you set it to 5, it will only
* fire five times. A value of zero or negative means fire forever.
*
* The option is a: <code>long</code> type.
*
* Default: 0
* Group: scheduler
*
* @param repeatCount the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder repeatCount(long repeatCount) {
doSetProperty("repeatCount", repeatCount);
return this;
}
/**
* Specifies a maximum limit of number of fires. So if you set it to 1,
* the scheduler will only fire once. If you set it to 5, it will only
* fire five times. A value of zero or negative means fire forever.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 0
* Group: scheduler
*
* @param repeatCount the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder repeatCount(String repeatCount) {
doSetProperty("repeatCount", repeatCount);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
*
* The option is a: <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: TRACE
* Group: scheduler
*
* @param runLoggingLevel the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder runLoggingLevel(org.apache.camel.LoggingLevel runLoggingLevel) {
doSetProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
*
* The option will be converted to a
* <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: TRACE
* Group: scheduler
*
* @param runLoggingLevel the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder runLoggingLevel(String runLoggingLevel) {
doSetProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
*
* The option is a:
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
*
* Group: scheduler
*
* @param scheduledExecutorService the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder scheduledExecutorService(ScheduledExecutorService scheduledExecutorService) {
doSetProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
*
* The option will be converted to a
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
*
* Group: scheduler
*
* @param scheduledExecutorService the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder scheduledExecutorService(String scheduledExecutorService) {
doSetProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz
* component. Use value spring or quartz for built in scheduler.
*
* The option is a: <code>java.lang.Object</code> type.
*
* Default: none
* Group: scheduler
*
* @param scheduler the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder scheduler(Object scheduler) {
doSetProperty("scheduler", scheduler);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz
* component. Use value spring or quartz for built in scheduler.
*
* The option will be converted to a <code>java.lang.Object</code> type.
*
* Default: none
* Group: scheduler
*
* @param scheduler the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder scheduler(String scheduler) {
doSetProperty("scheduler", scheduler);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz, Spring based scheduler. This is a multi-value
* option with prefix: scheduler.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* schedulerProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: scheduler
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder schedulerProperties(String key, Object value) {
doSetMultiValueProperty("schedulerProperties", "scheduler." + key, value);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz, Spring based scheduler. This is a multi-value
* option with prefix: scheduler.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* schedulerProperties(String, Object) method to add a value (call the
* method multiple times to set more values).
*
* Group: scheduler
*
* @param values the values
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder schedulerProperties(Map values) {
doSetMultiValueProperties("schedulerProperties", "scheduler.", values);
return this;
}
/**
* Whether the scheduler should be auto started.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param startScheduler the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder startScheduler(boolean startScheduler) {
doSetProperty("startScheduler", startScheduler);
return this;
}
/**
* Whether the scheduler should be auto started.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param startScheduler the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder startScheduler(String startScheduler) {
doSetProperty("startScheduler", startScheduler);
return this;
}
/**
* Time unit for initialDelay and delay options.
*
* The option is a: <code>java.util.concurrent.TimeUnit</code> type.
*
* Default: MILLISECONDS
* Group: scheduler
*
* @param timeUnit the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder timeUnit(TimeUnit timeUnit) {
doSetProperty("timeUnit", timeUnit);
return this;
}
/**
* Time unit for initialDelay and delay options.
*
* The option will be converted to a
* <code>java.util.concurrent.TimeUnit</code> type.
*
* Default: MILLISECONDS
* Group: scheduler
*
* @param timeUnit the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder timeUnit(String timeUnit) {
doSetProperty("timeUnit", timeUnit);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param useFixedDelay the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder useFixedDelay(boolean useFixedDelay) {
doSetProperty("useFixedDelay", useFixedDelay);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: scheduler
*
* @param useFixedDelay the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder useFixedDelay(String useFixedDelay) {
doSetProperty("useFixedDelay", useFixedDelay);
return this;
}
/**
* OAuth 2 access token. This typically expires after an hour so
* refreshToken is recommended for long term usage.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param accessToken the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder accessToken(String accessToken) {
doSetProperty("accessToken", accessToken);
return this;
}
/**
* Client secret of the mail application.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param clientSecret the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder clientSecret(String clientSecret) {
doSetProperty("clientSecret", clientSecret);
return this;
}
/**
* OAuth 2 refresh token. Using this, the Google Mail component can
* obtain a new accessToken whenever the current one expires - a
* necessity if the application is long-lived.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param refreshToken the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder refreshToken(String refreshToken) {
doSetProperty("refreshToken", refreshToken);
return this;
}
/**
* Service account key in json format to authenticate an application as
* a service account. Accept base64 adding the prefix base64:.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param serviceAccountKey the value to set
* @return the dsl builder
*/
default GoogleMailEndpointConsumerBuilder serviceAccountKey(String serviceAccountKey) {
doSetProperty("serviceAccountKey", serviceAccountKey);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the Google Mail component.
*/
public | GoogleMailEndpointConsumerBuilder |
java | spring-projects__spring-boot | loader/spring-boot-jarmode-tools/src/test/java/org/springframework/boot/jarmode/tools/HelpCommandTests.java | {
"start": 1074,
"end": 1882
} | class ____ {
private HelpCommand command;
private TestPrintStream out;
@TempDir
@SuppressWarnings("NullAway.Init")
Path temp;
@BeforeEach
void setup() {
Context context = Mockito.mock(Context.class);
given(context.getArchiveFile()).willReturn(this.temp.resolve("test.jar").toFile());
this.command = new HelpCommand(context, List.of(new TestCommand()), "tools");
this.out = new TestPrintStream(this);
}
@Test
void shouldPrintAllCommands() {
this.command.run(this.out, Collections.emptyList());
assertThat(this.out).hasSameContentAsResource("help-output.txt");
}
@Test
void shouldPrintCommandSpecificHelp() {
this.command.run(this.out, List.of("test"));
System.out.println(this.out);
assertThat(this.out).hasSameContentAsResource("help-test-output.txt");
}
}
| HelpCommandTests |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/struct/TestParentChildReferences.java | {
"start": 3942,
"end": 4480
} | class ____ {
protected Parent parent;
protected final String value; // So that the bean is not empty of properties
public Child(@JsonProperty("value") String value) { this.value = value; }
public String getValue() { return value; }
@JsonBackReference
public Parent getParent() { return parent; }
public void setParent(Parent parent) { this.parent = parent; }
}
@JsonTypeInfo(use=Id.NAME)
@JsonSubTypes({@JsonSubTypes.Type(ConcreteNode.class)})
static abstract | Child |
java | junit-team__junit5 | junit-platform-commons/src/main/java/org/junit/platform/commons/support/conversion/ConversionSupport.java | {
"start": 905,
"end": 2030
} | class ____ {
private static final List<StringToObjectConverter> stringToObjectConverters = List.of( //
new StringToBooleanConverter(), //
new StringToCharacterConverter(), //
new StringToNumberConverter(), //
new StringToClassConverter(), //
new StringToEnumConverter(), //
new StringToJavaTimeConverter(), //
new StringToCommonJavaTypesConverter(), //
new FallbackStringToObjectConverter() //
);
private ConversionSupport() {
/* no-op */
}
/**
* Convert the supplied source {@code String} into an instance of the specified
* target type.
*
* <p>If the target type is {@code String}, the source {@code String} will not
* be modified.
*
* <p>Some forms of conversion require a {@link ClassLoader}. If none is
* provided, the {@linkplain ClassLoaderUtils#getDefaultClassLoader() default
* ClassLoader} will be used.
*
* <p>This method is able to convert strings into primitive types and their
* corresponding wrapper types ({@link Boolean}, {@link Character}, {@link Byte},
* {@link Short}, {@link Integer}, {@link Long}, {@link Float}, and
* {@link Double}), | ConversionSupport |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java | {
"start": 1280,
"end": 9917
} | class ____ extends ESTestCase {
public void testInterruption() throws Exception {
final PlainActionFuture<Object> future = new PlainActionFuture<>() {
@Override
public void onResponse(Object value) {
throw new AssertionError("should not be called");
}
};
// test all possible methods that can be interrupted
final Runnable runnable = () -> {
final int method = randomIntBetween(0, 2);
switch (method) {
case 0 -> future.actionGet();
case 1 -> future.actionGet(TimeValue.timeValueSeconds(30));
case 2 -> future.actionGet(30, TimeUnit.SECONDS);
default -> throw new AssertionError(method);
}
};
final CyclicBarrier barrier = new CyclicBarrier(2);
final Thread main = Thread.currentThread();
final Thread thread = new Thread(() -> {
safeAwait(barrier);
main.interrupt();
});
thread.start();
final AtomicBoolean interrupted = new AtomicBoolean();
safeAwait(barrier);
try {
runnable.run();
} catch (final IllegalStateException e) {
interrupted.set(Thread.interrupted());
}
// we check this here instead of in the catch block to ensure that the catch block executed
assertTrue(interrupted.get());
thread.join();
}
public void testNoResult() {
assumeTrue("assertions required for this test", Assertions.ENABLED);
final var future = new PlainActionFuture<>();
expectThrows(AssertionError.class, future::result);
}
public void testUnwrapException() {
checkUnwrap(new RemoteTransportException("test", new RuntimeException()), RuntimeException.class, RemoteTransportException.class);
checkUnwrap(
new RemoteTransportException("test", new Exception()),
UncategorizedExecutionException.class,
RemoteTransportException.class
);
checkUnwrap(new Exception(), UncategorizedExecutionException.class, Exception.class);
checkUnwrap(new ElasticsearchException("test", new Exception()), ElasticsearchException.class, ElasticsearchException.class);
}
private void checkUnwrap(Exception exception, Class<? extends Exception> actionGetException, Class<? extends Exception> getException) {
final var future = new PlainActionFuture<>();
future.onFailure(exception);
assertEquals(actionGetException, expectThrows(RuntimeException.class, future::actionGet).getClass());
assertEquals(actionGetException, expectThrows(RuntimeException.class, () -> future.actionGet(10, TimeUnit.SECONDS)).getClass());
assertEquals(getException, expectThrows(ExecutionException.class, future::get).getCause().getClass());
assertEquals(getException, expectThrows(ExecutionException.class, () -> future.get(10, TimeUnit.SECONDS)).getCause().getClass());
if (exception instanceof RuntimeException) {
expectThrows(ExecutionException.class, getException, future::result);
expectThrows(ExecutionException.class, getException, expectIgnoresInterrupt(future::result));
assertEquals(getException, expectThrows(Exception.class, () -> FutureUtils.get(future)).getClass());
assertEquals(getException, expectThrows(Exception.class, () -> FutureUtils.get(future, 10, TimeUnit.SECONDS)).getClass());
} else {
expectThrows(ExecutionException.class, getException, future::result);
expectThrows(ExecutionException.class, getException, expectIgnoresInterrupt(future::result));
assertEquals(getException, expectThrowsWrapped(() -> FutureUtils.get(future)).getClass());
assertEquals(getException, expectThrowsWrapped(() -> FutureUtils.get(future, 10, TimeUnit.SECONDS)).getClass());
}
assertCapturesInterrupt(future::get);
assertCapturesInterrupt(() -> future.get(10, TimeUnit.SECONDS));
assertPropagatesInterrupt(future::actionGet);
assertPropagatesInterrupt(() -> future.actionGet(10, TimeUnit.SECONDS));
}
private static Throwable expectThrowsWrapped(ThrowingRunnable runnable) {
return expectThrows(UncategorizedExecutionException.class, ExecutionException.class, runnable).getCause();
}
public void testCancelException() {
final var future = new PlainActionFuture<>();
future.cancel(randomBoolean());
assertCancellation(future::get);
assertCancellation(future::actionGet);
assertCancellation(() -> future.get(10, TimeUnit.SECONDS));
assertCancellation(() -> future.actionGet(10, TimeUnit.SECONDS));
assertCancellation(future::result);
try {
Thread.currentThread().interrupt();
assertCancellation(future::result);
} finally {
assertTrue(Thread.interrupted());
}
assertCapturesInterrupt(future::get);
assertCapturesInterrupt(() -> future.get(10, TimeUnit.SECONDS));
assertPropagatesInterrupt(future::actionGet);
assertPropagatesInterrupt(() -> future.actionGet(10, TimeUnit.SECONDS));
}
public void testAssertCompleteAllowedAllowsConcurrentCompletesFromSamePool() {
final AtomicReference<PlainActionFuture<?>> futureReference = new AtomicReference<>(new PlainActionFuture<>());
final var executorName = randomFrom(ThreadPool.Names.GENERIC, ThreadPool.Names.MANAGEMENT);
final var running = new AtomicBoolean(true);
try (TestThreadPool threadPool = new TestThreadPool(getTestName())) {
// We only need 4 threads to reproduce this issue reliably, using more threads
// just increases the run time due to the additional synchronisation
final var threadCount = Math.min(threadPool.info(executorName).getMax(), 4);
final var startBarrier = new CyclicBarrier(threadCount + 1);
// N threads competing to complete the futures
for (int i = 0; i < threadCount; i++) {
threadPool.executor(executorName).execute(() -> {
safeAwait(startBarrier);
while (running.get()) {
futureReference.get().onResponse(null);
}
});
}
// The race can only occur once per completion, so we provide
// a stream of new futures to the competing threads to
// maximise the probability it occurs. Providing them
// with new futures while they spin proved to be much
// more reliable at reproducing the issue than releasing
// them all from a barrier to complete a single future.
safeAwait(startBarrier);
for (int i = 0; i < 20; i++) {
futureReference.set(new PlainActionFuture<>());
safeSleep(1);
}
running.set(false);
}
}
private static void assertCancellation(ThrowingRunnable runnable) {
final var cancellationException = expectThrows(CancellationException.class, runnable);
assertEquals("Task was cancelled.", cancellationException.getMessage());
assertNull(cancellationException.getCause());
}
private static void assertCapturesInterrupt(ThrowingRunnable runnable) {
try {
Thread.currentThread().interrupt();
final var interruptedException = expectThrows(InterruptedException.class, runnable);
assertNull(interruptedException.getMessage());
assertNull(interruptedException.getCause());
} finally {
assertFalse(Thread.interrupted());
}
}
private static void assertPropagatesInterrupt(ThrowingRunnable runnable) {
try {
Thread.currentThread().interrupt();
final var interruptedException = expectThrows(IllegalStateException.class, InterruptedException.class, runnable);
assertNull(interruptedException.getMessage());
assertNull(interruptedException.getCause());
} finally {
assertTrue(Thread.interrupted());
}
}
private static ThrowingRunnable expectIgnoresInterrupt(ThrowingRunnable runnable) {
return () -> {
try {
Thread.currentThread().interrupt();
runnable.run();
} finally {
assertTrue(Thread.interrupted());
}
};
}
}
| PlainActionFutureTests |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/tls/TrustAllTest.java | {
"start": 741,
"end": 1750
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(Client.class))
.withConfigurationResource("trust-all-test-application.properties");
WireMockServer server;
URI baseUri;
@BeforeEach
public void setUp() {
server = new WireMockServer(wireMockConfig().dynamicHttpsPort());
server.stubFor(WireMock.get("/ssl")
.willReturn(aResponse().withBody("hello").withStatus(200)));
server.start();
baseUri = URI.create("https://localhost:" + server.httpsPort());
}
@AfterEach
public void stop() {
server.stop();
}
@Test
void shouldWorkWithTrustAllAndSelfSignedCert() {
Client client = RestClientBuilder.newBuilder()
.baseUri(baseUri)
.build(Client.class);
assertThat(client.get()).isEqualTo("hello");
}
@Path("ssl")
| TrustAllTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/ast/tree/expression/UnaryOperation.java | {
"start": 806,
"end": 2542
} | class ____ implements Expression, DomainResultProducer {
private final UnaryArithmeticOperator operator;
private final Expression operand;
private final BasicValuedMapping type;
public UnaryOperation(UnaryArithmeticOperator operator, Expression operand, BasicValuedMapping type) {
this.operator = operator;
this.operand = operand;
this.type = type;
}
public UnaryArithmeticOperator getOperator() {
return operator;
}
public Expression getOperand() {
return operand;
}
@Override
public MappingModelExpressible getExpressionType() {
return type;
}
@Override
public void accept(SqlAstWalker walker) {
walker.visitUnaryOperationExpression( this );
}
@Override
public DomainResult createDomainResult(
String resultVariable,
DomainResultCreationState creationState) {
final SqlSelection sqlSelection = creationState.getSqlAstCreationState().getSqlExpressionResolver().resolveSqlSelection(
this,
type.getJdbcMapping().getJdbcJavaType(),
null,
creationState.getSqlAstCreationState().getCreationContext().getMappingMetamodel().getTypeConfiguration()
);
return new BasicResult<>(
sqlSelection.getValuesArrayPosition(),
resultVariable,
type.getJdbcMapping()
);
}
@Override
public void applySqlSelections(DomainResultCreationState creationState) {
final SqlAstCreationState sqlAstCreationState = creationState.getSqlAstCreationState();
final SqlExpressionResolver sqlExpressionResolver = sqlAstCreationState.getSqlExpressionResolver();
sqlExpressionResolver.resolveSqlSelection(
this,
type.getJdbcMapping().getJdbcJavaType(),
null,
sqlAstCreationState.getCreationContext().getMappingMetamodel().getTypeConfiguration()
);
}
}
| UnaryOperation |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemVolumeManager.java | {
"start": 2016,
"end": 15519
} | class ____ {
private long maxBytes;
private final AtomicLong usedBytes = new AtomicLong(0);
UsedBytesCount(long maxBytes) {
this.maxBytes = maxBytes;
}
/**
* Try to reserve more bytes.
*
* @param bytesCount The number of bytes to add.
*
* @return The new number of usedBytes if we succeeded;
* -1 if we failed.
*/
long reserve(long bytesCount) {
while (true) {
long cur = usedBytes.get();
long next = cur + bytesCount;
if (next > maxBytes) {
return -1;
}
if (usedBytes.compareAndSet(cur, next)) {
return next;
}
}
}
/**
* Release some bytes that we're using.
*
* @param bytesCount The number of bytes to release.
*
* @return The new number of usedBytes.
*/
long release(long bytesCount) {
return usedBytes.addAndGet(-bytesCount);
}
long getUsedBytes() {
return usedBytes.get();
}
long getMaxBytes() {
return maxBytes;
}
long getAvailableBytes() {
return maxBytes - usedBytes.get();
}
void setMaxBytes(long maxBytes) {
this.maxBytes = maxBytes;
}
}
private static final Logger LOG =
LoggerFactory.getLogger(PmemVolumeManager.class);
public static final String CACHE_DIR = "hdfs_pmem_cache";
private static PmemVolumeManager pmemVolumeManager = null;
private final ArrayList<String> pmemVolumes = new ArrayList<>();
// Maintain which pmem volume a block is cached to.
private final Map<ExtendedBlockId, Byte> blockKeyToVolume =
new ConcurrentHashMap<>();
private final List<UsedBytesCount> usedBytesCounts = new ArrayList<>();
private boolean cacheRecoveryEnabled;
/**
* The total cache capacity in bytes of persistent memory.
*/
private long cacheCapacity;
private static long maxBytesPerPmem = -1;
private int count = 0;
private byte nextIndex = 0;
private PmemVolumeManager(String[] pmemVolumesConfig,
boolean cacheRecoveryEnabled) throws IOException {
if (pmemVolumesConfig == null || pmemVolumesConfig.length == 0) {
throw new IOException("The persistent memory volume, " +
DFSConfigKeys.DFS_DATANODE_PMEM_CACHE_DIRS_KEY +
" is not configured!");
}
this.cacheRecoveryEnabled = cacheRecoveryEnabled;
this.loadVolumes(pmemVolumesConfig);
cacheCapacity = 0L;
for (UsedBytesCount counter : usedBytesCounts) {
cacheCapacity += counter.getMaxBytes();
}
}
public synchronized static void init(
String[] pmemVolumesConfig, boolean cacheRecoveryEnabled)
throws IOException {
if (pmemVolumeManager == null) {
pmemVolumeManager = new PmemVolumeManager(pmemVolumesConfig,
cacheRecoveryEnabled);
}
}
public static PmemVolumeManager getInstance() {
if (pmemVolumeManager == null) {
throw new RuntimeException(
"The pmemVolumeManager should be instantiated!");
}
return pmemVolumeManager;
}
@VisibleForTesting
public static void reset() {
pmemVolumeManager = null;
}
@VisibleForTesting
public static void setMaxBytes(long maxBytes) {
maxBytesPerPmem = maxBytes;
}
public long getCacheUsed() {
long usedBytes = 0L;
for (UsedBytesCount counter : usedBytesCounts) {
usedBytes += counter.getUsedBytes();
}
return usedBytes;
}
public long getCacheCapacity() {
return cacheCapacity;
}
/**
* Try to reserve more bytes on persistent memory.
*
* @param key The ExtendedBlockId for a block.
*
* @param bytesCount The number of bytes to add.
*
* @return The new number of usedBytes if we succeeded;
* -1 if we failed.
*/
synchronized long reserve(ExtendedBlockId key, long bytesCount) {
try {
byte index = chooseVolume(bytesCount);
long usedBytes = usedBytesCounts.get(index).reserve(bytesCount);
// Put the entry into blockKeyToVolume if reserving bytes succeeded.
if (usedBytes > 0) {
blockKeyToVolume.put(key, index);
}
return usedBytes;
} catch (IOException e) {
LOG.warn(e.getMessage());
return -1L;
}
}
/**
* Release some bytes that we're using on persistent memory.
*
* @param key The ExtendedBlockId for a block.
*
* @param bytesCount The number of bytes to release.
*
* @return The new number of usedBytes.
*/
long release(ExtendedBlockId key, long bytesCount) {
Byte index = blockKeyToVolume.remove(key);
return usedBytesCounts.get(index).release(bytesCount);
}
/**
* Load and verify the configured pmem volumes.
*
* @throws IOException If there is no available pmem volume.
*/
private void loadVolumes(String[] volumes)
throws IOException {
// Check whether the volume exists
for (byte n = 0; n < volumes.length; n++) {
try {
File pmemDir = new File(volumes[n]);
File realPmemDir = verifyIfValidPmemVolume(pmemDir);
if (!cacheRecoveryEnabled) {
// Clean up the cache left before, if any.
cleanup(realPmemDir);
}
this.pmemVolumes.add(realPmemDir.getPath());
long maxBytes;
if (maxBytesPerPmem == -1) {
maxBytes = realPmemDir.getUsableSpace();
} else {
maxBytes = maxBytesPerPmem;
}
UsedBytesCount usedBytesCount = new UsedBytesCount(maxBytes);
this.usedBytesCounts.add(usedBytesCount);
LOG.info("Added persistent memory - {} with size={}",
volumes[n], maxBytes);
} catch (IllegalArgumentException e) {
LOG.error("Failed to parse persistent memory volume " + volumes[n], e);
continue;
} catch (IOException e) {
LOG.error("Bad persistent memory volume: " + volumes[n], e);
continue;
}
}
count = pmemVolumes.size();
if (count == 0) {
throw new IOException(
"At least one valid persistent memory volume is required!");
}
}
void cleanup(File realPmemDir) {
try {
FileUtils.cleanDirectory(realPmemDir);
} catch (IOException e) {
LOG.error("Failed to clean up " + realPmemDir.getPath(), e);
}
}
void cleanup() {
// Remove all files under the volume.
for (String pmemVolume : pmemVolumes) {
cleanup(new File(pmemVolume));
}
}
/**
* Recover cache from the cached files in the configured pmem volumes.
*/
public Map<ExtendedBlockId, MappableBlock> recoverCache(
String bpid, MappableBlockLoader cacheLoader) throws IOException {
final Map<ExtendedBlockId, MappableBlock> keyToMappableBlock
= new ConcurrentHashMap<>();
for (byte volumeIndex = 0; volumeIndex < pmemVolumes.size();
volumeIndex++) {
long maxBytes = usedBytesCounts.get(volumeIndex).getMaxBytes();
long usedBytes = 0;
File cacheDir = new File(pmemVolumes.get(volumeIndex), bpid);
Collection<File> cachedFileList = FileUtils.listFiles(cacheDir,
TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE);
// Scan the cached files in pmem volumes for cache recovery.
for (File cachedFile : cachedFileList) {
MappableBlock mappableBlock = cacheLoader.
getRecoveredMappableBlock(cachedFile, bpid, volumeIndex);
ExtendedBlockId key = mappableBlock.getKey();
keyToMappableBlock.put(key, mappableBlock);
usedBytes += cachedFile.length();
}
// Update maxBytes and cache capacity according to cache space
// used by recovered cached files.
usedBytesCounts.get(volumeIndex).setMaxBytes(maxBytes + usedBytes);
cacheCapacity += usedBytes;
usedBytesCounts.get(volumeIndex).reserve(usedBytes);
}
return keyToMappableBlock;
}
public void recoverBlockKeyToVolume(ExtendedBlockId key, byte volumeIndex) {
blockKeyToVolume.put(key, volumeIndex);
}
@VisibleForTesting
static File verifyIfValidPmemVolume(File pmemDir)
throws IOException {
if (!pmemDir.exists()) {
final String message = pmemDir + " does not exist";
throw new IOException(message);
}
if (!pmemDir.isDirectory()) {
final String message = pmemDir + " is not a directory";
throw new IllegalArgumentException(message);
}
File realPmemDir = new File(getRealPmemDir(pmemDir.getPath()));
if (!realPmemDir.exists() && !realPmemDir.mkdir()) {
throw new IOException("Failed to create " + realPmemDir.getPath());
}
String uuidStr = UUID.randomUUID().toString();
String testFilePath = realPmemDir.getPath() + "/.verify.pmem." + uuidStr;
byte[] contents = uuidStr.getBytes(StandardCharsets.UTF_8);
RandomAccessFile testFile = null;
MappedByteBuffer out = null;
try {
testFile = new RandomAccessFile(testFilePath, "rw");
out = testFile.getChannel().map(FileChannel.MapMode.READ_WRITE, 0,
contents.length);
if (out == null) {
throw new IOException(
"Failed to map the test file under " + realPmemDir);
}
out.put(contents);
// Forces to write data to storage device containing the mapped file
out.force();
return realPmemDir;
} catch (IOException e) {
throw new IOException(
"Exception while writing data to persistent storage dir: " +
realPmemDir, e);
} finally {
if (out != null) {
out.clear();
}
if (testFile != null) {
IOUtils.closeStream(testFile);
NativeIO.POSIX.munmap(out);
try {
FsDatasetUtil.deleteMappedFile(testFilePath);
} catch (IOException e) {
LOG.warn("Failed to delete test file " + testFilePath +
" from persistent memory", e);
}
}
}
}
/**
* Create cache subdirectory specified with blockPoolId.
*/
public void createBlockPoolDir(String bpid) throws IOException {
for (String volume : pmemVolumes) {
File cacheDir = new File(volume, bpid);
if (!cacheDir.exists() && !cacheDir.mkdir()) {
throw new IOException("Failed to create " + cacheDir.getPath());
}
}
}
public static String getRealPmemDir(String rawPmemDir) {
return new File(rawPmemDir, CACHE_DIR).getAbsolutePath();
}
/**
* Choose a persistent memory volume based on a specific algorithm.
* Currently it is a round-robin policy.
*
* TODO: Refine volume selection policy by considering storage utilization.
*/
synchronized Byte chooseVolume(long bytesCount) throws IOException {
if (count == 0) {
throw new IOException("No usable persistent memory is found");
}
int k = 0;
long maxAvailableSpace = 0L;
while (k++ != count) {
if (nextIndex == count) {
nextIndex = 0;
}
byte index = nextIndex++;
long availableBytes = usedBytesCounts.get(index).getAvailableBytes();
if (availableBytes >= bytesCount) {
return index;
}
if (availableBytes > maxAvailableSpace) {
maxAvailableSpace = availableBytes;
}
}
throw new IOException("There is no enough persistent memory space " +
"for caching. The current max available space is " +
maxAvailableSpace + ", but " + bytesCount + "is required.");
}
@VisibleForTesting
String getVolumeByIndex(Byte index) {
return pmemVolumes.get(index);
}
ArrayList<String> getVolumes() {
return pmemVolumes;
}
/**
* A cache file is named after the corresponding BlockId.
* Thus, cache file name can be inferred according to BlockId.
*/
public String idToCacheFileName(ExtendedBlockId key) {
return String.valueOf(key.getBlockId());
}
/**
* Create and get the directory where a cache file with this key and
* volumeIndex should be stored. Use hierarchical strategy of storing
* blocks to avoid keeping cache files under one directory.
*
* @param volumeIndex The index of pmem volume where a replica will be
* cached to or has been cached to.
*
* @param key The replica's ExtendedBlockId.
*
* @return A path to which the block replica is mapped.
*/
public String idToCacheFilePath(Byte volumeIndex, ExtendedBlockId key)
throws IOException {
final String cacheSubdirPrefix = "subdir";
long blockId = key.getBlockId();
String bpid = key.getBlockPoolId();
int d1 = (int) ((blockId >> 16) & 0x1F);
int d2 = (int) ((blockId >> 8) & 0x1F);
String parentDir = pmemVolumes.get(volumeIndex) + "/" + bpid;
String subDir = cacheSubdirPrefix + d1 + "/" + cacheSubdirPrefix + d2;
File filePath = new File(parentDir, subDir);
if (!filePath.exists() && !filePath.mkdirs()) {
throw new IOException("Failed to create " + filePath.getPath());
}
return filePath.getAbsolutePath() + "/" + idToCacheFileName(key);
}
/**
* The cache file path is pmemVolume/BlockPoolId/subdir#/subdir#/BlockId.
*/
public String getCachePath(ExtendedBlockId key) throws IOException {
Byte volumeIndex = blockKeyToVolume.get(key);
if (volumeIndex == null) {
return null;
}
return idToCacheFilePath(volumeIndex, key);
}
@VisibleForTesting
Map<ExtendedBlockId, Byte> getBlockKeyToVolume() {
return blockKeyToVolume;
}
}
| UsedBytesCount |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.