language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cdi/general/hibernatesearch/standard/HibernateSearchStandardCdiSupportTest.java | {
"start": 3567,
"end": 11952
} | class ____ {
@Test
@ExtendWith( Monitor.Resetter.class )
@CdiContainer(beanClasses = {
TheApplicationScopedBean.class,
TheNamedApplicationScopedBean.class,
TheMainNamedApplicationScopedBeanImpl.class,
TheAlternativeNamedApplicationScopedBeanImpl.class,
TheSharedApplicationScopedBean.class,
TheDependentBean.class,
TheNamedDependentBean.class,
TheMainNamedDependentBeanImpl.class,
TheAlternativeNamedDependentBeanImpl.class,
TheNestedDependentBean.class,
TheNonHibernateBeanConsumer.class
})
public void testIt(CdiContainerScope cdiContainerScope) {
assertFalse( cdiContainerScope.isContainerAvailable() );
final TheFallbackBeanInstanceProducer fallbackBeanInstanceProducer = new TheFallbackBeanInstanceProducer();
final HibernateSearchSimulatedIntegrator beanConsumingIntegrator = new HibernateSearchSimulatedIntegrator( fallbackBeanInstanceProducer );
try (SeContainer cdiContainer = cdiContainerScope.getContainer()) {
// Simulate CDI bean consumers outside of Hibernate ORM
Instance<TheNonHibernateBeanConsumer> nonHibernateBeanConsumerInstance =
cdiContainer.getBeanManager().createInstance().select( TheNonHibernateBeanConsumer.class );
nonHibernateBeanConsumerInstance.get();
// Expect the shared bean to have been instantiated already, but only that one
assertEquals( 0, Monitor.theApplicationScopedBean().currentInstantiationCount() );
assertEquals( 0, Monitor.theMainNamedApplicationScopedBean().currentInstantiationCount() );
assertEquals( 0, Monitor.theAlternativeNamedApplicationScopedBean().currentInstantiationCount() );
assertEquals( 1, Monitor.theSharedApplicationScopedBean().currentInstantiationCount() );
assertEquals( 0, Monitor.theDependentBean().currentInstantiationCount() );
assertEquals( 0, Monitor.theMainNamedDependentBean().currentInstantiationCount() );
assertEquals( 0, Monitor.theAlternativeNamedDependentBean().currentInstantiationCount() );
assertEquals( 0, fallbackBeanInstanceProducer.currentInstantiationCount() );
assertEquals( 0, fallbackBeanInstanceProducer.currentNamedInstantiationCount() );
// Nested dependent bean: 1 instance per bean that depends on it
assertEquals( 1, Monitor.theNestedDependentBean().currentInstantiationCount() );
try (SessionFactoryImplementor sessionFactory = buildSessionFactory( cdiContainer, beanConsumingIntegrator )) {
// Here, the HibernateSearchSimulatedIntegrator has just been integrated and has requested beans
// See HibernateSearchSimulatedIntegrator for a detailed list of requested beans
beanConsumingIntegrator.ensureInstancesInitialized();
// Application scope: maximum 1 instance as soon as at least one was requested
assertEquals( 1, Monitor.theApplicationScopedBean().currentInstantiationCount() );
assertEquals( 1, Monitor.theMainNamedApplicationScopedBean().currentInstantiationCount() );
assertEquals( 0, Monitor.theAlternativeNamedApplicationScopedBean().currentInstantiationCount() );
assertEquals( 1, Monitor.theSharedApplicationScopedBean().currentInstantiationCount() );
// Dependent scope: 1 instance per bean we requested explicitly
assertEquals( 2, Monitor.theDependentBean().currentInstantiationCount() );
assertEquals( 2, Monitor.theMainNamedDependentBean().currentInstantiationCount() );
assertEquals( 0, Monitor.theAlternativeNamedDependentBean().currentInstantiationCount() );
// Reflection-instantiated: 1 instance per bean we requested explicitly
assertEquals( 2, fallbackBeanInstanceProducer.currentInstantiationCount() );
assertEquals( 2, fallbackBeanInstanceProducer.currentNamedInstantiationCount() );
// Nested dependent bean: 1 instance per bean that depends on it
assertEquals( 7, Monitor.theNestedDependentBean().currentInstantiationCount() );
// Expect one PostConstruct call per CDI bean instance
assertEquals( 1, Monitor.theApplicationScopedBean().currentPostConstructCount() );
assertEquals( 1, Monitor.theMainNamedApplicationScopedBean().currentPostConstructCount() );
assertEquals( 0, Monitor.theAlternativeNamedApplicationScopedBean().currentPostConstructCount() );
assertEquals( 1, Monitor.theSharedApplicationScopedBean().currentPostConstructCount() );
assertEquals( 2, Monitor.theDependentBean().currentPostConstructCount() );
assertEquals( 2, Monitor.theMainNamedDependentBean().currentPostConstructCount() );
assertEquals( 0, Monitor.theAlternativeNamedDependentBean().currentPostConstructCount() );
assertEquals( 7, Monitor.theNestedDependentBean().currentPostConstructCount() );
// Expect no PreDestroy call yet
assertEquals( 0, Monitor.theApplicationScopedBean().currentPreDestroyCount() );
assertEquals( 0, Monitor.theMainNamedApplicationScopedBean().currentPreDestroyCount() );
assertEquals( 0, Monitor.theAlternativeNamedApplicationScopedBean().currentPreDestroyCount() );
assertEquals( 0, Monitor.theSharedApplicationScopedBean().currentPreDestroyCount() );
assertEquals( 0, Monitor.theDependentBean().currentPreDestroyCount() );
assertEquals( 0, Monitor.theMainNamedDependentBean().currentPreDestroyCount() );
assertEquals( 0, Monitor.theAlternativeNamedDependentBean().currentPreDestroyCount() );
assertEquals( 0, Monitor.theNestedDependentBean().currentPreDestroyCount() );
}
// Here, the HibernateSearchSimulatedIntegrator has just been disintegrated and has released beans
// release() should have an effect on exclusively used application-scoped beans
assertEquals( 1, Monitor.theApplicationScopedBean().currentPreDestroyCount() );
assertEquals( 1, Monitor.theMainNamedApplicationScopedBean().currentPreDestroyCount() );
assertEquals( 0, Monitor.theAlternativeNamedApplicationScopedBean().currentPreDestroyCount() );
// release() should have no effect on shared application-scoped beans (they will be released when they are no longer used)
assertEquals( 0, Monitor.theSharedApplicationScopedBean().currentPreDestroyCount() );
// release() should have an effect on dependent-scoped beans
assertEquals( 2, Monitor.theDependentBean().currentPreDestroyCount() );
assertEquals( 2, Monitor.theMainNamedDependentBean().currentPreDestroyCount() );
assertEquals( 0, Monitor.theAlternativeNamedDependentBean().currentPreDestroyCount() );
// The nested dependent bean instances should have been destroyed along with the beans that depend on them
// (the instances used in application-scoped beans should not have been destroyed)
assertEquals( 6, Monitor.theNestedDependentBean().currentPreDestroyCount() );
}
// After the CDI context has ended, PreDestroy should have been called on every created CDI bean
// (see the assertions about instantiations above for an explanation of the expected counts)
assertEquals( 1, Monitor.theApplicationScopedBean().currentPreDestroyCount() );
assertEquals( 1, Monitor.theMainNamedApplicationScopedBean().currentPreDestroyCount() );
assertEquals( 0, Monitor.theAlternativeNamedApplicationScopedBean().currentPreDestroyCount() );
assertEquals( 1, Monitor.theSharedApplicationScopedBean().currentPreDestroyCount() );
assertEquals( 2, Monitor.theDependentBean().currentPreDestroyCount() );
assertEquals( 2, Monitor.theMainNamedDependentBean().currentPreDestroyCount() );
assertEquals( 0, Monitor.theAlternativeNamedDependentBean().currentPreDestroyCount() );
assertEquals( 7, Monitor.theNestedDependentBean().currentPreDestroyCount() );
}
private SessionFactoryImplementor buildSessionFactory(SeContainer cdiContainer,
HibernateSearchSimulatedIntegrator beanConsumingIntegrator) {
BootstrapServiceRegistry bsr = new BootstrapServiceRegistryBuilder()
.applyIntegrator( beanConsumingIntegrator )
.build();
final StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistryBuilder( bsr )
.applySetting( AvailableSettings.HBM2DDL_AUTO, Action.CREATE_DROP )
.applySetting( AvailableSettings.CDI_BEAN_MANAGER, cdiContainer.getBeanManager() )
.build();
try {
return (SessionFactoryImplementor) new MetadataSources( ssr )
.addAnnotatedClass( TheEntity.class )
.buildMetadata()
.getSessionFactoryBuilder()
.build();
}
catch ( Exception e ) {
StandardServiceRegistryBuilder.destroy( ssr );
throw e;
}
}
}
| HibernateSearchStandardCdiSupportTest |
java | playframework__playframework | documentation/manual/working/commonGuide/filters/code/javaguide/detailed/filters/csp/AssetCache.java | {
"start": 279,
"end": 415
} | class ____ {
public List<String> cspHashes() {
return Collections.singletonList("sha256-HELLO");
}
}
// #java-asset-cache
| AssetCache |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/lifecycle/internal/DefaultLifecyclePluginAnalyzer.java | {
"start": 1909,
"end": 2043
} | class ____ not part of any public api and can be changed or deleted without prior notice.
*
* @since 3.0
*/
@Singleton
@Named
public | is |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/dataflow/DataFlow.java | {
"start": 10045,
"end": 10694
} | class ____ {
abstract ForwardTransferFunction<?, ?> transferFunction();
abstract ControlFlowGraph cfg();
// Should not be used for hashCode or equals
private ProcessingEnvironment environment;
private static AnalysisParams create(
ForwardTransferFunction<?, ?> transferFunction,
ControlFlowGraph cfg,
ProcessingEnvironment environment) {
AnalysisParams ap = new AutoValue_DataFlow_AnalysisParams(transferFunction, cfg);
ap.environment = environment;
return ap;
}
ProcessingEnvironment environment() {
return environment;
}
}
private DataFlow() {}
}
| AnalysisParams |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/transport/RemoteClusterCredentialsManagerTests.java | {
"start": 854,
"end": 5880
} | class ____ extends ESTestCase {
public void testResolveRemoteClusterCredentials() {
final String clusterAlias = randomAlphaOfLength(9);
final String otherClusterAlias = randomAlphaOfLength(10);
final RemoteClusterCredentialsManager credentialsManager = new RemoteClusterCredentialsManager(Settings.EMPTY);
{
final String secret = randomAlphaOfLength(20);
final Settings settings = buildSettingsWithCredentials(clusterAlias, secret);
final RemoteClusterCredentialsManager.UpdateRemoteClusterCredentialsResult actual = credentialsManager.updateClusterCredentials(
settings
);
assertThat(actual.addedClusterAliases(), containsInAnyOrder(clusterAlias));
assertThat(actual.removedClusterAliases(), is(empty()));
assertThat(credentialsManager.resolveCredentials(clusterAlias).toString(), equalTo(secret));
assertThat(credentialsManager.hasCredentials(otherClusterAlias), is(false));
}
{
final String updatedSecret = randomAlphaOfLength(21);
final RemoteClusterCredentialsManager.UpdateRemoteClusterCredentialsResult actual = credentialsManager.updateClusterCredentials(
buildSettingsWithCredentials(clusterAlias, updatedSecret)
);
assertThat(credentialsManager.resolveCredentials(clusterAlias).toString(), equalTo(updatedSecret));
assertThat(actual.addedClusterAliases(), is(empty()));
assertThat(actual.removedClusterAliases(), is(empty()));
}
{
final RemoteClusterCredentialsManager.UpdateRemoteClusterCredentialsResult actual = credentialsManager.updateClusterCredentials(
Settings.EMPTY
);
assertThat(actual.addedClusterAliases(), is(empty()));
assertThat(actual.removedClusterAliases(), containsInAnyOrder(clusterAlias));
assertThat(credentialsManager.hasCredentials(clusterAlias), is(false));
}
}
public void testUpdateRemoteClusterCredentials() {
final String clusterAlias = randomAlphaOfLength(9);
final String otherClusterAlias = randomAlphaOfLength(10);
final RemoteClusterCredentialsManager credentialsManager = new RemoteClusterCredentialsManager(Settings.EMPTY);
// addition
{
final RemoteClusterCredentialsManager.UpdateRemoteClusterCredentialsResult actual = credentialsManager.updateClusterCredentials(
buildSettingsWithRandomCredentialsForAliases(clusterAlias, otherClusterAlias)
);
assertThat(actual.addedClusterAliases(), containsInAnyOrder(clusterAlias, otherClusterAlias));
assertThat(actual.removedClusterAliases(), is(empty()));
}
// update and removal
{
final RemoteClusterCredentialsManager.UpdateRemoteClusterCredentialsResult actual = credentialsManager.updateClusterCredentials(
buildSettingsWithRandomCredentialsForAliases(clusterAlias)
);
assertThat(actual.addedClusterAliases(), is(empty()));
assertThat(actual.removedClusterAliases(), containsInAnyOrder(otherClusterAlias));
}
// addition and removal
{
final RemoteClusterCredentialsManager.UpdateRemoteClusterCredentialsResult actual = credentialsManager.updateClusterCredentials(
buildSettingsWithRandomCredentialsForAliases(otherClusterAlias)
);
assertThat(actual.addedClusterAliases(), containsInAnyOrder(otherClusterAlias));
assertThat(actual.removedClusterAliases(), containsInAnyOrder(clusterAlias));
}
// removal
{
final RemoteClusterCredentialsManager.UpdateRemoteClusterCredentialsResult actual = credentialsManager.updateClusterCredentials(
Settings.EMPTY
);
assertThat(actual.addedClusterAliases(), is(empty()));
assertThat(actual.removedClusterAliases(), containsInAnyOrder(otherClusterAlias));
}
}
private Settings buildSettingsWithCredentials(String clusterAlias, String secret) {
final Settings.Builder builder = Settings.builder();
final MockSecureSettings secureSettings = new MockSecureSettings();
secureSettings.setString("cluster.remote." + clusterAlias + ".credentials", secret);
return builder.setSecureSettings(secureSettings).build();
}
private Settings buildSettingsWithRandomCredentialsForAliases(String... clusterAliases) {
final Settings.Builder builder = Settings.builder();
final MockSecureSettings secureSettings = new MockSecureSettings();
for (var alias : clusterAliases) {
secureSettings.setString("cluster.remote." + alias + ".credentials", randomAlphaOfLength(42));
}
return builder.setSecureSettings(secureSettings).build();
}
}
| RemoteClusterCredentialsManagerTests |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/bytearray/ByteArrayAssert_asHexString_Test.java | {
"start": 1507,
"end": 3955
} | class ____ {
private static final byte[] BYTES = new byte[] { -1, 0, 1 };
@Test
void should_pass() {
// GIVEN
// WHEN / THEN
assertThat(BYTES).asHexString()
.startsWith("FF")
.isEqualTo("FF0001");
}
@Test
void should_fail_if_actual_does_not_match() {
// GIVEN
byte[] actual = new byte[] { -1, 0, 1 };
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actual).asHexString().isEqualTo("010203"));
// THEN
assertThat(assertionError).hasMessage(shouldBeEqualMessage("\"FF0001\"", "\"010203\""))
.isExactlyInstanceOf(AssertionFailedError.class);
}
@Test
void should_fail_if_actual_is_null() {
// GIVEN
byte[] bytes = null;
// WHEN
var error = expectAssertionError(() -> assertThat(bytes).asHexString());
// THEN
assertThat(error).hasMessage(actualIsNull());
}
@Test
void should_pass_with_soft_assertions() {
// GIVEN
SoftAssertions softly = new SoftAssertions();
// WHEN / THEN
softly.assertThat(BYTES).asHexString().isEqualTo("FF0001");
softly.assertAll();
}
@Test
void should_fail_with_soft_assertions_capturing_all_errors() {
// GIVEN
SoftAssertions softly = new SoftAssertions();
// WHEN
softly.assertThat(BYTES)
.asHexString()
.isEqualTo("010203")
.isBlank();
var assertionError = expectAssertionError(softly::assertAll);
// THEN
assertThat(assertionError).hasMessageContainingAll("Multiple Failures (2 failures)",
"-- failure 1 --",
shouldBeEqualMessage("\"FF0001\"", "\"010203\""),
"-- failure 2 --",
"Expecting blank but was: \"FF0001\"")
.isExactlyInstanceOf(AssertJMultipleFailuresError.class);
}
@Test
void should_ignore_test_when_assumption_for_internally_created_hex_string_assertion_fails() {
expectAssumptionNotMetException(() -> assumeThat(BYTES).asHexString().isEqualTo("other"));
}
@Test
void should_run_test_when_assumption_for_internally_created_string_passes() {
assertThatCode(() -> assumeThat(BYTES).asHexString().startsWith("FF")).doesNotThrowAnyException();
}
}
| ByteArrayAssert_asHexString_Test |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/abilities/source/LimitPushDownSpec.java | {
"start": 1732,
"end": 3223
} | class ____ extends SourceAbilitySpecBase {
public static final String FIELD_NAME_LIMIT = "limit";
@JsonProperty(FIELD_NAME_LIMIT)
private final long limit;
@JsonCreator
public LimitPushDownSpec(@JsonProperty(FIELD_NAME_LIMIT) long limit) {
this.limit = limit;
}
@Override
public void apply(DynamicTableSource tableSource, SourceAbilityContext context) {
if (tableSource instanceof SupportsLimitPushDown) {
((SupportsLimitPushDown) tableSource).applyLimit(limit);
} else {
throw new TableException(
String.format(
"%s does not support SupportsLimitPushDown.",
tableSource.getClass().getName()));
}
}
@Override
public boolean needAdjustFieldReferenceAfterProjection() {
return false;
}
@Override
public String getDigests(SourceAbilityContext context) {
return "limit=[" + this.limit + "]";
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
LimitPushDownSpec that = (LimitPushDownSpec) o;
return limit == that.limit;
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), limit);
}
}
| LimitPushDownSpec |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/engine/EngineJobListener.java | {
"start": 78,
"end": 263
} | interface ____ {
void onEngineJobComplete(EngineJob<?> engineJob, Key key, EngineResource<?> resource);
void onEngineJobCancelled(EngineJob<?> engineJob, Key key);
}
| EngineJobListener |
java | apache__spark | core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java | {
"start": 10569,
"end": 10692
} | class ____ extends RuntimeException {
}
@Test
public void writeFailurePropagates() throws Exception {
| PandaException |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/core/startup/RuntimeDeploymentManager.java | {
"start": 2883,
"end": 16584
} | class ____ {
public static final ServerRestHandler[] EMPTY_REST_HANDLER_ARRAY = new ServerRestHandler[0];
private final DeploymentInfo info;
private final Supplier<Executor> executorSupplier;
private final Supplier<Executor> virtualExecutorSupplier;
private final Consumer<Closeable> closeTaskHandler;
private final RequestContextFactory requestContextFactory;
private final ThreadSetupAction threadSetupAction;
private final String rootPath;
private ArrayList<RequestMapper.RequestPath<RestInitialHandler.InitialMatch>> classMappers;
public RuntimeDeploymentManager(DeploymentInfo info,
Supplier<Executor> executorSupplier,
Supplier<Executor> virtualExecutorSupplier,
Consumer<Closeable> closeTaskHandler,
RequestContextFactory requestContextFactory, ThreadSetupAction threadSetupAction, String rootPath) {
this.info = info;
this.executorSupplier = executorSupplier;
this.virtualExecutorSupplier = virtualExecutorSupplier;
this.closeTaskHandler = closeTaskHandler;
this.requestContextFactory = requestContextFactory;
this.threadSetupAction = threadSetupAction;
this.rootPath = rootPath;
}
public Deployment deploy() {
ResourceInterceptors interceptors = info.getInterceptors();
ServerSerialisers serialisers = info.getSerialisers();
Features features = info.getFeatures();
ExceptionMapping exceptionMapping = info.getExceptionMapping();
List<ResourceClass> resourceClasses = info.getResourceClasses();
List<ResourceClass> locatableResourceClasses = info.getLocatableResourceClasses();
ParamConverterProviders paramConverterProviders = info.getParamConverterProviders();
Supplier<Application> applicationSupplier = info.getApplicationSupplier();
String applicationPath = info.getApplicationPath();
DynamicEntityWriter dynamicEntityWriter = new DynamicEntityWriter(serialisers);
RuntimeExceptionMapper exceptionMapper = new RuntimeExceptionMapper(exceptionMapping,
Thread.currentThread().getContextClassLoader());
ConfigurationImpl configurationImpl = configureFeatures(features, interceptors, exceptionMapper);
RuntimeInterceptorDeployment interceptorDeployment = new RuntimeInterceptorDeployment(info, configurationImpl,
closeTaskHandler);
ResourceLocatorHandler resourceLocatorHandler = new ResourceLocatorHandler(
new Function<>() {
@Override
public BeanFactory.BeanInstance<?> apply(Class<?> aClass) {
return info.getFactoryCreator().apply(aClass).createInstance();
}
}, info.getClientProxyUnwrapper());
// sanitise the prefix for our usage to make it either an empty string, or something which starts with a / and does not
// end with one
String prefix = rootPath;
if (prefix != null) {
prefix = sanitizePathPrefix(prefix);
} else {
prefix = "";
}
if ((applicationPath != null) && !applicationPath.isEmpty()) {
prefix = prefix + sanitizePathPrefix(applicationPath);
}
// to use it inside lambdas
String finalPrefix = prefix;
List<GenericRuntimeConfigurableServerRestHandler<?>> runtimeConfigurableServerRestHandlers = new ArrayList<>();
RuntimeResourceDeployment runtimeResourceDeployment = new RuntimeResourceDeployment(info, executorSupplier,
virtualExecutorSupplier,
interceptorDeployment, dynamicEntityWriter, resourceLocatorHandler, requestContextFactory.isDefaultBlocking());
List<ResourceClass> possibleSubResource = new ArrayList<>(locatableResourceClasses);
possibleSubResource.addAll(resourceClasses); //the TCK uses normal resources also as sub resources
Map<String, List<String>> disabledEndpoints = new HashMap<>();
for (int i = 0; i < possibleSubResource.size(); i++) {
ResourceClass clazz = possibleSubResource.get(i);
Map<String, TreeMap<URITemplate, List<RequestMapper.RequestPath<RuntimeResource>>>> templates = new HashMap<>();
URITemplate classPathTemplate = clazz.getPath() == null ? null : new URITemplate(clazz.getPath(), true);
for (int j = 0; j < clazz.getMethods().size(); j++) {
ResourceMethod method = clazz.getMethods().get(j);
RuntimeResource runtimeResource = runtimeResourceDeployment.buildResourceMethod(
clazz, (ServerResourceMethod) method, true, classPathTemplate, info);
addRuntimeConfigurableHandlers(runtimeResource, runtimeConfigurableServerRestHandlers);
RuntimeMappingDeployment.buildMethodMapper(templates, method, runtimeResource);
}
Map<String, RequestMapper<RuntimeResource>> mappersByMethod = new RuntimeMappingDeployment(templates)
.buildClassMapper();
boolean isResourceClassDisabled = (clazz.getIsDisabled() != null) && clazz.getIsDisabled().get();
if (isResourceClassDisabled) {
mappersByMethod.forEach(new BiConsumer<>() {
@Override
public void accept(String method, RequestMapper<RuntimeResource> mapper) {
for (int i = 0; i < mapper.getTemplates().size(); i++) {
RequestMapper.RequestPath<RuntimeResource> path = mapper.getTemplates().get(i);
String templateWithoutSlash = path.template.template.startsWith("/")
? path.template.template.substring(1)
: path.template.template;
String fullPath = clazz.getPath().endsWith("/")
? finalPrefix + clazz.getPath() + templateWithoutSlash
: finalPrefix + clazz.getPath() + "/" + templateWithoutSlash;
if (!disabledEndpoints.containsKey(fullPath)) {
disabledEndpoints.put(fullPath, new ArrayList<>());
}
disabledEndpoints.get(fullPath).add(method);
}
}
});
} else {
resourceLocatorHandler.addResource(loadClass(clazz.getClassName()), mappersByMethod);
}
}
//it is possible that multiple resource classes use the same path
//we use this map to merge them
Map<MappersKey, Map<String, TreeMap<URITemplate, List<RequestMapper.RequestPath<RuntimeResource>>>>> mappers = new TreeMap<>();
for (int i = 0; i < resourceClasses.size(); i++) {
ResourceClass clazz = resourceClasses.get(i);
if ((clazz.getIsDisabled() != null) && clazz.getIsDisabled().get()) {
continue;
}
URITemplate classTemplate = new URITemplate(clazz.getPath(), true);
MappersKey key = new MappersKey(classTemplate);
var perClassMappers = mappers.get(key);
if (perClassMappers == null) {
mappers.put(key, perClassMappers = new HashMap<>());
}
for (int j = 0; j < clazz.getMethods().size(); j++) {
ResourceMethod method = clazz.getMethods().get(j);
RuntimeResource runtimeResource = runtimeResourceDeployment.buildResourceMethod(
clazz, (ServerResourceMethod) method, false, classTemplate, info);
addRuntimeConfigurableHandlers(runtimeResource, runtimeConfigurableServerRestHandlers);
RuntimeMappingDeployment.buildMethodMapper(perClassMappers, method, runtimeResource);
}
}
classMappers = new ArrayList<>(mappers.size());
mappers.forEach(this::forEachMapperEntry);
List<ServerRestHandler> abortHandlingChain = new ArrayList<>(3);
if (interceptorDeployment.getGlobalInterceptorHandler() != null) {
abortHandlingChain.add(interceptorDeployment.getGlobalInterceptorHandler());
}
if (info.getPreExceptionMapperHandler() != null) {
abortHandlingChain.add(info.getPreExceptionMapperHandler());
}
abortHandlingChain.add(new ExceptionHandler());
abortHandlingChain.add(ResponseHandler.NO_CUSTOMIZER_INSTANCE);
if (!interceptors.getContainerResponseFilters().getGlobalResourceInterceptors().isEmpty()) {
abortHandlingChain.addAll(interceptorDeployment.getGlobalResponseInterceptorHandlers());
}
abortHandlingChain.add(new ResponseWriterHandler(dynamicEntityWriter));
//pre matching interceptors are run first
List<ServerRestHandler> preMatchHandlers = new ArrayList<>();
for (int i = 0; i < info.getGlobalHandlerCustomizers().size(); i++) {
preMatchHandlers
.addAll(info.getGlobalHandlerCustomizers().get(i).handlers(HandlerChainCustomizer.Phase.BEFORE_PRE_MATCH,
null, null));
}
if (!interceptors.getContainerRequestFilters().getPreMatchInterceptors().isEmpty()) {
preMatchHandlers = new ArrayList<>(interceptorDeployment.getPreMatchContainerRequestFilters().size());
for (Map.Entry<ResourceInterceptor<ContainerRequestFilter>, ContainerRequestFilter> entry : interceptorDeployment
.getPreMatchContainerRequestFilters()
.entrySet()) {
preMatchHandlers
.add(new ResourceRequestFilterHandler(entry.getValue(), true, entry.getKey().isNonBlockingRequired(),
entry.getKey().isWithFormRead()));
}
}
for (int i = 0; i < info.getGlobalHandlerCustomizers().size(); i++) {
preMatchHandlers
.addAll(info.getGlobalHandlerCustomizers().get(i).handlers(HandlerChainCustomizer.Phase.AFTER_PRE_MATCH,
null, null));
}
return new Deployment(exceptionMapping, info.getCtxResolvers(), serialisers,
abortHandlingChain.toArray(EMPTY_REST_HANDLER_ARRAY), dynamicEntityWriter,
prefix, paramConverterProviders, configurationImpl, applicationSupplier,
threadSetupAction, requestContextFactory, preMatchHandlers, classMappers,
runtimeConfigurableServerRestHandlers, exceptionMapper, info.isServletPresent(),
info.getResteasyReactiveConfig(),
disabledEndpoints);
}
private void forEachMapperEntry(MappersKey key,
Map<String, TreeMap<URITemplate, List<RequestMapper.RequestPath<RuntimeResource>>>> classTemplates) {
int classTemplateNameCount = key.path.countPathParamNames();
RuntimeMappingDeployment runtimeMappingDeployment = new RuntimeMappingDeployment(classTemplates);
ClassRoutingHandler classRoutingHandler = new ClassRoutingHandler(runtimeMappingDeployment.buildClassMapper(),
classTemplateNameCount, info.isServletPresent());
classMappers.add(new RequestMapper.RequestPath<>(true, key.path,
new RestInitialHandler.InitialMatch(new ServerRestHandler[] { classRoutingHandler },
runtimeMappingDeployment.getMaxMethodTemplateNameCount() + classTemplateNameCount)));
}
private void addRuntimeConfigurableHandlers(RuntimeResource runtimeResource,
List<GenericRuntimeConfigurableServerRestHandler<?>> runtimeConfigurableServerRestHandlers) {
for (ServerRestHandler serverRestHandler : runtimeResource.getHandlerChain()) {
if (serverRestHandler instanceof GenericRuntimeConfigurableServerRestHandler) {
runtimeConfigurableServerRestHandlers.add((GenericRuntimeConfigurableServerRestHandler<?>) serverRestHandler);
}
}
}
private ConfigurationImpl configureFeatures(Features features, ResourceInterceptors interceptors,
RuntimeExceptionMapper exceptionMapping) {
ConfigurationImpl configuration = new ConfigurationImpl(RuntimeType.SERVER);
if (features.getResourceFeatures().isEmpty()) {
return configuration;
}
FeatureContextImpl featureContext = new FeatureContextImpl(interceptors, exceptionMapping,
configuration, info.getFactoryCreator());
List<ResourceFeature> resourceFeatures = features.getResourceFeatures();
for (int i = 0; i < resourceFeatures.size(); i++) {
ResourceFeature resourceFeature = resourceFeatures.get(i);
Feature feature = resourceFeature.getFactory().createInstance().getInstance();
boolean enabled = feature.configure(featureContext);
if (enabled) {
configuration.addEnabledFeature(feature);
}
}
if (featureContext.isFiltersNeedSorting()) {
interceptors.sort();
}
return configuration;
}
private String sanitizePathPrefix(String prefix) {
prefix = prefix.trim();
if (prefix.equals("/"))
prefix = "";
// add leading slash
if (!prefix.startsWith("/"))
prefix = "/" + prefix;
// remove trailing slash
if (prefix.endsWith("/"))
prefix = prefix.substring(0, prefix.length() - 1);
return prefix;
}
private static | RuntimeDeploymentManager |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/BeanInjectRouteBuilderTest.java | {
"start": 1247,
"end": 2844
} | class ____ extends ContextTestSupport {
@BeanInject
private FooBar foo;
@Override
protected Registry createCamelRegistry() throws Exception {
Registry registry = super.createCamelRegistry();
registry.bind("foo", new FooBar());
return registry;
}
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
// manual post process us as ContextTestSupport in camel-core doesn't do
// that out of the box
CamelBeanPostProcessor post = PluginHelper.getBeanPostProcessor(context);
post.postProcessBeforeInitialization(this, "MyRoute");
post.postProcessAfterInitialization(this, "MyRoute");
return context;
}
@Test
public void testBeanInject() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
template.sendBody("direct:start", "World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").process(new Processor() {
@Override
public void process(Exchange exchange) {
String out = foo.hello(exchange.getIn().getBody(String.class));
exchange.getIn().setBody(out);
}
}).to("mock:result");
}
};
}
}
| BeanInjectRouteBuilderTest |
java | hibernate__hibernate-orm | tooling/hibernate-maven-plugin/src/test/java/org/hibernate/orm/tooling/maven/HibernateEnhancerMojoTest.java | {
"start": 27367,
"end": 32616
} | class ____ { "+
" private Bar bar; " +
" Bar getBar() { return bar; } " +
" public void setBar(Bar b) { bar = b; } " +
"}";
File fooJavaFile = new File(fooFolder, "Foo.java");
Files.writeString(fooJavaFile.toPath(), fooSource);
File fooClassFile = new File(fooFolder, "Foo.class");
JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
URL url = Entity.class.getProtectionDomain().getCodeSource().getLocation();
String classpath = new File(url.toURI()).getAbsolutePath();
String[] options = List.of(
"-cp",
classpath,
barJavaFile.getAbsolutePath(),
fooJavaFile.getAbsolutePath()).toArray(new String[] {});
compiler.run(null, null, null, options);
String barBytesString = new String(Files.readAllBytes(barClassFile.toPath()));
String fooBytesString = new String(Files.readAllBytes(fooClassFile.toPath()));
List<File> sourceSet = new ArrayList<File>();
sourceSet.add(barClassFile);
sourceSet.add(fooClassFile);
sourceSetField.set(enhanceMojo, sourceSet);
assertTrue(logMessages.isEmpty());
executeMethod.invoke(enhanceMojo);
assertNotEquals(barBytesString, new String(Files.readAllBytes(barClassFile.toPath())));
assertEquals(fooBytesString, new String(Files.readAllBytes(fooClassFile.toPath())));
URLClassLoader classLoader = new URLClassLoader(
new URL[] {classesDirectory.toURI().toURL()},
getClass().getClassLoader());
Class<?> barClass = classLoader.loadClass("org.foo.Bar");
assertNotNull(barClass);
Method m = barClass.getMethod("$$_hibernate_getEntityInstance", new Class[]{});
assertNotNull(m);
Class<?> fooClass = classLoader.loadClass("org.foo.Foo");
try {
m = fooClass.getMethod("$$_hibernate_getEntityInstance", new Class[]{});
fail();
} catch (NoSuchMethodException e) {
assertEquals("org.foo.Foo.$$_hibernate_getEntityInstance()", e.getMessage());
}
classLoader.close();
// verify in the log messages at least if all the needed methods have been invoked
assertTrue(logMessages.contains(DEBUG + HibernateEnhancerMojo.STARTING_EXECUTION_OF_ENHANCE_MOJO));
assertTrue(logMessages.contains(DEBUG + HibernateEnhancerMojo.ADDED_DEFAULT_FILESET_WITH_BASE_DIRECTORY.formatted(classesDirectory)));
assertTrue(logMessages.contains(DEBUG + HibernateEnhancerMojo.STARTING_ASSEMBLY_OF_SOURCESET));
assertTrue(logMessages.contains(DEBUG + HibernateEnhancerMojo.CREATE_BYTECODE_ENHANCER));
assertTrue(logMessages.contains(DEBUG + HibernateEnhancerMojo.STARTING_TYPE_DISCOVERY));
assertTrue(logMessages.contains(DEBUG + HibernateEnhancerMojo.STARTING_CLASS_ENHANCEMENT));
assertTrue(logMessages.contains(DEBUG + HibernateEnhancerMojo.ENDING_EXECUTION_OF_ENHANCE_MOJO));
}
@Test
void testProcessParameters() throws Exception {
Method processParametersMethod = HibernateEnhancerMojo.class.getDeclaredMethod(
"processParameters",
new Class[] {});
processParametersMethod.setAccessible(true);
Field enableLazyInitializationField = HibernateEnhancerMojo.class.getDeclaredField("enableLazyInitialization");
enableLazyInitializationField.setAccessible(true);
Field enableDirtyTrackingField = HibernateEnhancerMojo.class.getDeclaredField("enableDirtyTracking");
enableDirtyTrackingField.setAccessible(true);
assertTrue(logMessages.isEmpty());
assertNull(fileSetsField.get(enhanceMojo));
processParametersMethod.invoke(enhanceMojo);
assertEquals(3, logMessages.size());
assertTrue(logMessages.contains(WARNING + HibernateEnhancerMojo.ENABLE_LAZY_INITIALIZATION_DEPRECATED));
assertTrue(logMessages.contains(WARNING + HibernateEnhancerMojo.ENABLE_DIRTY_TRACKING_DEPRECATED));
assertTrue(logMessages.contains(DEBUG + HibernateEnhancerMojo.ADDED_DEFAULT_FILESET_WITH_BASE_DIRECTORY.formatted(classesDirectory)));
FileSet[] fileSets = (FileSet[])fileSetsField.get(enhanceMojo);
assertNotNull(fileSets);
assertEquals(1, fileSets.length);
assertEquals(classesDirectory.getAbsolutePath(), fileSets[0].getDirectory());
fileSetsField.set(enhanceMojo, null);
logMessages.clear();
enableLazyInitializationField.set(enhanceMojo, Boolean.TRUE);
enableDirtyTrackingField.set(enhanceMojo, Boolean.TRUE);
processParametersMethod.invoke(enhanceMojo);
assertEquals(1, logMessages.size());
assertTrue(logMessages.contains(DEBUG + HibernateEnhancerMojo.ADDED_DEFAULT_FILESET_WITH_BASE_DIRECTORY.formatted(classesDirectory)));
}
private Log createLog() {
return (Log)Proxy.newProxyInstance(
getClass().getClassLoader(),
new Class[] { Log.class},
new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if ("info".equals(method.getName())) {
logMessages.add(INFO + args[0]);
} else if ("warn".equals(method.getName())) {
logMessages.add(WARNING + args[0]);
} else if ("error".equals(method.getName())) {
logMessages.add(ERROR + args[0]);
} else if ("debug".equals(method.getName())) {
logMessages.add(DEBUG + args[0]);
}
return null;
}
});
}
static final String DEBUG = "[DEBUG] ";
static final String ERROR = "[ERROR] ";
static final String WARNING = "[WARNING] ";
static final String INFO = "[INFO] ";
}
| Foo |
java | apache__flink | flink-formats/flink-orc/src/main/java/org/apache/flink/orc/OrcFileFormatFactory.java | {
"start": 5617,
"end": 7774
} | class ____
implements BulkDecodingFormat<RowData>,
ProjectableDecodingFormat<BulkFormat<RowData, FileSourceSplit>>,
FileBasedStatisticsReportableInputFormat {
private final ReadableConfig formatOptions;
private List<ResolvedExpression> filters;
public OrcBulkDecodingFormat(ReadableConfig formatOptions) {
this.formatOptions = formatOptions;
}
@Override
public BulkFormat<RowData, FileSourceSplit> createRuntimeDecoder(
DynamicTableSource.Context sourceContext,
DataType producedDataType,
int[][] projections) {
List<OrcFilters.Predicate> orcPredicates = new ArrayList<>();
if (filters != null) {
for (Expression pred : filters) {
OrcFilters.Predicate orcPred = OrcFilters.toOrcPredicate(pred);
if (orcPred != null) {
orcPredicates.add(orcPred);
}
}
}
return OrcColumnarRowInputFormat.createPartitionedFormat(
OrcShim.defaultShim(),
getOrcConfiguration(formatOptions),
(RowType) producedDataType.getLogicalType(),
Collections.emptyList(),
null,
Projection.of(projections).toTopLevelIndexes(),
orcPredicates,
VectorizedColumnBatch.DEFAULT_SIZE,
sourceContext::createTypeInformation);
}
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.insertOnly();
}
@Override
public void applyFilters(List<ResolvedExpression> filters) {
this.filters = filters;
}
@Override
public TableStats reportStatistics(List<Path> files, DataType producedDataType) {
return OrcFormatStatisticsReportUtil.getTableStatistics(
files, producedDataType, getOrcConfiguration(formatOptions));
}
}
}
| OrcBulkDecodingFormat |
java | apache__camel | components/camel-sql/src/test/java/org/apache/camel/component/sql/SqlDataSourceRefTest.java | {
"start": 1482,
"end": 3739
} | class ____ extends CamelTestSupport {
private EmbeddedDatabase db;
@Override
protected void bindToRegistry(Registry registry) throws Exception {
// START SNIPPET: e2
// this is the database we create with some initial data for our unit test
db = new EmbeddedDatabaseBuilder()
.setName(getClass().getSimpleName())
.setType(EmbeddedDatabaseType.H2)
.addScript("sql/createAndPopulateDatabase.sql").build();
// END SNIPPET: e2
registry.bind("jdbc/myDataSource", db);
}
@Test
public void testSimpleBody() throws Exception {
// START SNIPPET: e3
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
// send the query to direct that will route it to the sql where we will execute the query
// and bind the parameters with the data from the body. The body only contains one value
// in this case (XXX) but if we should use multi values then the body will be iterated
// so we could supply a List<String> instead containing each binding value.
template.sendBody("direct:simple", "XXX");
mock.assertIsSatisfied();
// the result is a List
List<?> received = assertIsInstanceOf(List.class, mock.getReceivedExchanges().get(0).getIn().getBody());
// and each row in the list is a Map
Map<?, ?> row = assertIsInstanceOf(Map.class, received.get(0));
// and we should be able the get the project from the map that should be Linux
assertEquals("Linux", row.get("PROJECT"));
// END SNIPPET: e3
}
@Override
public void doPostTearDown() throws Exception {
if (db != null) {
db.shutdown();
}
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// START SNIPPET: e1
from("direct:simple")
.to("sql:select * from projects where license = # order by id?dataSource=#jdbc/myDataSource")
.to("mock:result");
// END SNIPPET: e1
}
};
}
}
| SqlDataSourceRefTest |
java | google__guava | guava/src/com/google/common/util/concurrent/TimeLimiter.java | {
"start": 3064,
"end": 5412
} | interface ____ wish the returned proxy to implement
* @param timeoutDuration with timeoutUnit, the maximum length of time that callers are willing to
* wait on each method call to the proxy
* @param timeoutUnit with timeoutDuration, the maximum length of time that callers are willing to
* wait on each method call to the proxy
* @return a time-limiting proxy
* @throws IllegalArgumentException if {@code interfaceType} is a regular class, enum, or
* annotation type, rather than an interface
*/
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
<T> T newProxy(T target, Class<T> interfaceType, long timeoutDuration, TimeUnit timeoutUnit);
/**
* Returns an instance of {@code interfaceType} that delegates all method calls to the {@code
* target} object, enforcing the specified time limit on each call. This time-limited delegation
* is also performed for calls to {@link Object#equals}, {@link Object#hashCode}, and {@link
* Object#toString}.
*
* <p>If the target method call finishes before the limit is reached, the return value or
* exception is propagated to the caller exactly as-is. If, on the other hand, the time limit is
* reached, the proxy will attempt to abort the call to the target, and will throw an {@link
* UncheckedTimeoutException} to the caller.
*
* <p>It is important to note that the primary purpose of the proxy object is to return control to
* the caller when the timeout elapses; aborting the target method call is of secondary concern.
* The particular nature and strength of the guarantees made by the proxy is
* implementation-dependent. However, it is important that each of the methods on the target
* object behaves appropriately when its thread is interrupted.
*
* <p>For example, to return the value of {@code target.someMethod()}, but substitute {@code
* DEFAULT_VALUE} if this method call takes over 50 ms, you can use this code:
*
* <pre>
* TimeLimiter limiter = . . .;
* TargetType proxy = limiter.newProxy(target, TargetType.class, Duration.ofMillis(50));
* try {
* return proxy.someMethod();
* } catch (UncheckedTimeoutException e) {
* return DEFAULT_VALUE;
* }
* </pre>
*
* @param target the object to proxy
* @param interfaceType the | you |
java | elastic__elasticsearch | x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/StagnatingIndicesFinderTests.java | {
"start": 1900,
"end": 13382
} | class ____ extends ESTestCase {
public void testStagnatingIndicesFinder() {
var maxTimeOnAction = randomTimeValueInDays();
var maxTimeOnStep = randomTimeValueInDays();
Long maxRetriesPerStep = randomLongBetween(2, 100);
var idxMd1 = randomIndexMetadata();
var idxMd2 = randomIndexMetadata();
var idxMd3 = randomIndexMetadata();
var stagnatingIndices = List.of(idxMd1.indexName, idxMd3.indexName);
var mockedTimeSupplier = mock(LongSupplier.class);
var instant = (long) randomIntBetween(100000, 200000);
var ruleCreator = Stream.<IlmHealthIndicatorService.RuleConfig>of(
(now, indexMetadata) -> now == instant && stagnatingIndices.contains(indexMetadata.getIndex().getName())
).map(rc -> (IlmHealthIndicatorService.RuleCreator) (expectedMaxTimeOnAction, expectedMaxTimeOnStep, expectedMaxRetriesPerStep) -> {
assertEquals(expectedMaxTimeOnAction, maxTimeOnAction);
assertEquals(expectedMaxTimeOnStep, maxTimeOnStep);
assertEquals(expectedMaxRetriesPerStep, maxRetriesPerStep);
return rc;
}).toList();
// Per the evaluator, the timeSupplier _must_ be called only twice
when(mockedTimeSupplier.getAsLong()).thenReturn(instant, instant);
var stagnatedIdx1 = indexMetadataFrom(idxMd1);
var stagnatedIdx3 = indexMetadataFrom(idxMd3);
var finder = createStagnatingIndicesFinder(
ruleCreator,
maxTimeOnAction,
maxTimeOnStep,
maxRetriesPerStep,
mockedTimeSupplier,
indexMetadataUnmanaged(randomAlphaOfLength(10)), // non-managed by ILM
stagnatedIdx1, // should be stagnated
indexMetadataFrom(idxMd2), // won't be stagnated
stagnatedIdx3, // should be stagnated
indexMetadataUnmanaged(randomAlphaOfLength(10)) // non-managed by ILM
);
var foundIndices = finder.find();
assertThat(foundIndices, hasSize(2));
assertThat(foundIndices, containsInAnyOrder(stagnatedIdx1, stagnatedIdx3));
}
public void testRecreateRules() {
var maxTimeOnAction = randomTimeValueInDays();
var maxTimeOnStep = randomTimeValueInDays();
long maxRetriesPerStep = randomLongBetween(2, 100);
var accumulator = new ArrayList<Integer>();
var numberOfRuleConfigs = randomIntBetween(3, 20);
var ruleCreators = IntStream.range(0, numberOfRuleConfigs)
.mapToObj(
i -> (IlmHealthIndicatorService.RuleCreator) (
expectedMaxTimeOnAction,
expectedMaxTimeOnStep,
expectedMaxRetriesPerStep) -> {
accumulator.add(i);
return new RuleConfigHolder(expectedMaxTimeOnAction, expectedMaxTimeOnStep, expectedMaxRetriesPerStep);
}
)
.toList();
var finder = createStagnatingIndicesFinder(
ruleCreators,
maxTimeOnAction,
maxTimeOnStep,
maxRetriesPerStep,
mock(LongSupplier.class)
);
// Test: safety-net ensuring that the settings are gathered when the object is created
assertEquals(accumulator, IntStream.range(0, numberOfRuleConfigs).boxed().toList());
var rules = finder.rules();
assertThat(rules, hasSize(numberOfRuleConfigs));
// all the rules will have the same value, so it's enough checking the first one
assertEquals(rules.iterator().next(), new RuleConfigHolder(maxTimeOnAction, maxTimeOnStep, maxRetriesPerStep));
accumulator.clear();
maxTimeOnAction = randomTimeValueInDays();
maxTimeOnStep = randomTimeValueInDays();
maxRetriesPerStep = randomLongBetween(2, 100);
// Test: the method `recreateRules` works as expected
finder.recreateRules(
Settings.builder()
.put(MAX_TIME_ON_ACTION_SETTING.getKey(), maxTimeOnAction)
.put(MAX_TIME_ON_STEP_SETTING.getKey(), maxTimeOnStep)
.put(MAX_RETRIES_PER_STEP_SETTING.getKey(), maxRetriesPerStep)
.build()
);
var newRules = finder.rules();
assertThat(rules, hasSize(numberOfRuleConfigs));
assertNotSame(rules, newRules);
// all the rules will have the same value, so it's enough checking the first one
assertEquals(newRules.iterator().next(), new RuleConfigHolder(maxTimeOnAction, maxTimeOnStep, maxRetriesPerStep));
assertEquals(accumulator, IntStream.range(0, numberOfRuleConfigs).boxed().toList());
accumulator.clear();
rules = finder.rules();
maxTimeOnAction = randomTimeValueInDays();
maxTimeOnStep = randomTimeValueInDays();
maxRetriesPerStep = randomLongBetween(2, 100);
// Test: Force a settings update, ensuring that the method `recreateRules` is called
finder.clusterService()
.getClusterSettings()
.applySettings(
Settings.builder()
.put(MAX_TIME_ON_ACTION_SETTING.getKey(), maxTimeOnAction)
.put(MAX_TIME_ON_STEP_SETTING.getKey(), maxTimeOnStep)
.put(MAX_RETRIES_PER_STEP_SETTING.getKey(), maxRetriesPerStep)
.build()
);
newRules = finder.rules();
assertThat(rules, hasSize(numberOfRuleConfigs));
assertNotSame(rules, newRules);
// all the rules will have the same value, so it's enough checking the first one
assertEquals(newRules.iterator().next(), new RuleConfigHolder(maxTimeOnAction, maxTimeOnStep, maxRetriesPerStep));
assertEquals(accumulator, IntStream.range(0, numberOfRuleConfigs).boxed().toList());
}
record RuleConfigHolder(TimeValue maxTimeOnAction, TimeValue maxTimeOnStep, Long maxRetries)
implements
IlmHealthIndicatorService.RuleConfig {
@Override
public boolean test(Long now, IndexMetadata indexMetadata) {
return false;
}
}
public void testStagnatingIndicesEvaluator() {
var idxMd1 = randomIndexMetadata();
var indexMetadata = indexMetadataFrom(idxMd1);
Long moment = 111333111222L;
{
// no rule matches
var executions = randomIntBetween(3, 200);
var calls = new AtomicInteger(0);
var rules = IntStream.range(0, executions).mapToObj(i -> (IlmHealthIndicatorService.RuleConfig) (now, idxMd) -> {
assertEquals(now, moment);
assertSame(idxMd, indexMetadata);
calls.incrementAndGet();
return false;
}).toList();
assertFalse(isStagnated(rules, moment, indexMetadata));
assertEquals(calls.get(), executions);
}
{
var calls = new AtomicReference<>(new ArrayList<Integer>());
var rules = List.<IlmHealthIndicatorService.RuleConfig>of((now, idxMd) -> { // will be called
assertEquals(now, moment);
assertSame(idxMd, indexMetadata);
calls.get().add(1);
return false;
}, (now, idxMd) -> { // will be called and cut the execution
assertEquals(now, moment);
assertSame(idxMd, indexMetadata);
calls.get().add(2);
return true;
}, (now, idxMd) -> { // won't be called
assertEquals(now, moment);
assertSame(idxMd, indexMetadata);
calls.get().add(3);
return true;
}, (now, idxMd) -> { // won't be called
assertEquals(now, moment);
assertSame(idxMd, indexMetadata);
calls.get().add(4);
return false;
});
assertTrue(isStagnated(rules, moment, indexMetadata));
assertEquals(calls.get(), List.of(1, 2));
}
}
private static TimeValue randomTimeValueInDays() {
return randomTimeValue(1, 1000, TimeUnit.DAYS);
}
private static IndexMetadata indexMetadataUnmanaged(String indexName) {
return indexMetadataFrom(new IndexMetadataTestCase(indexName, null, null));
}
private static IndexMetadata indexMetadataFrom(IndexMetadataTestCase indexMetadataTestCase) {
var settings = settings(IndexVersion.current());
var indexMetadataBuilder = IndexMetadata.builder(indexMetadataTestCase.indexName);
if (indexMetadataTestCase.ilmState != null) {
settings.put(LifecycleSettings.LIFECYCLE_NAME, indexMetadataTestCase.policyName);
indexMetadataBuilder.putCustom(ILM_CUSTOM_METADATA_KEY, indexMetadataTestCase.ilmState.asMap());
}
return indexMetadataBuilder.settings(settings)
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
}
private IlmHealthIndicatorService.StagnatingIndicesFinder createStagnatingIndicesFinder(
Collection<IlmHealthIndicatorService.RuleCreator> ruleCreator,
TimeValue maxTimeOnAction,
TimeValue maxTimeOnStep,
long maxRetriesPerStep,
LongSupplier timeSupplier,
IndexMetadata... indicesMetadata
) {
var clusterService = mock(ClusterService.class);
var state = mock(ClusterState.class);
var metadataBuilder = Metadata.builder();
Arrays.stream(indicesMetadata).forEach(im -> metadataBuilder.put(im, false));
when(state.metadata()).thenReturn(metadataBuilder.build());
when(clusterService.state()).thenReturn(state);
var settings = Settings.builder()
.put(MAX_TIME_ON_ACTION_SETTING.getKey(), maxTimeOnAction)
.put(MAX_TIME_ON_STEP_SETTING.getKey(), maxTimeOnStep)
.put(MAX_RETRIES_PER_STEP_SETTING.getKey(), maxRetriesPerStep)
.build();
when(clusterService.getSettings()).thenReturn(settings);
when(clusterService.getClusterSettings()).thenReturn(
new ClusterSettings(
settings,
Set.of(
IlmHealthIndicatorService.MAX_TIME_ON_ACTION_SETTING,
IlmHealthIndicatorService.MAX_TIME_ON_STEP_SETTING,
IlmHealthIndicatorService.MAX_RETRIES_PER_STEP_SETTING
)
)
);
return new IlmHealthIndicatorService.StagnatingIndicesFinder(clusterService, ruleCreator, timeSupplier);
}
static IndexMetadataTestCase randomIndexMetadata() {
return new IndexMetadataTestCase(
randomAlphaOfLength(10),
randomAlphaOfLength(10),
LifecycleExecutionState.builder()
.setPhase(randomAlphaOfLength(5))
.setAction(randomAlphaOfLength(10))
.setActionTime((long) randomIntBetween(0, 10000))
.setStep(randomAlphaOfLength(20))
.setStepTime((long) randomIntBetween(0, 10000))
.setFailedStepRetryCount(randomIntBetween(0, 1000))
.build()
);
}
record IndexMetadataTestCase(String indexName, String policyName, LifecycleExecutionState ilmState) {}
}
| StagnatingIndicesFinderTests |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1100/Issue1112.java | {
"start": 206,
"end": 870
} | class ____ extends TestCase {
public void test_for_issue_1() throws Exception {
JSONObject object = new JSONObject();
object.put("123", "abc");
assertEquals("abc", JSONPath.eval(object, "$.123"));
}
public void test_for_issue_2() throws Exception {
JSONObject object = new JSONObject();
object.put("345_xiu", "abc");
assertEquals("abc", JSONPath.eval(object, "$.345_xiu"));
}
public void test_for_issue_3() throws Exception {
JSONObject object = new JSONObject();
object.put("345.xiu", "abc");
assertEquals("abc", JSONPath.eval(object, "$.345\\.xiu"));
}
}
| Issue1112 |
java | apache__dubbo | dubbo-plugin/dubbo-qos/src/test/java/org/apache/dubbo/qos/command/impl/MockLivenessProbe.java | {
"start": 964,
"end": 1294
} | class ____ implements LivenessProbe {
private static boolean checkReturnValue = false;
@Override
public boolean check() {
return checkReturnValue;
}
public static void setCheckReturnValue(boolean checkReturnValue) {
MockLivenessProbe.checkReturnValue = checkReturnValue;
}
}
| MockLivenessProbe |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy-common/runtime/src/main/java/io/quarkus/resteasy/common/runtime/config/ThresholdConverter.java | {
"start": 597,
"end": 2396
} | class ____ implements Converter<Threshold>, Serializable {
public static final Threshold NONE = Threshold.of(-1L, SizeUnit.BYTE);
public static final Threshold DEFAULT = Threshold.of(512L, SizeUnit.KILOBYTE);
private static final Pattern PATTERN = Pattern.compile("(?<size>-?(?!0)\\d+)\\s*(?<unit>(?:ZB|EB|TB|PB|GB|MB|KB|B)\\b)?");
public ThresholdConverter() {
}
/**
* The converter accepts a value which start with a number by implicitly appending `PT` to it.
* If the value consists only of a number, it implicitly treats the value as seconds.
* Otherwise, tries to convert the value assuming that it is in the accepted ISO-8601 duration format.
*
* @param value duration as String
* @return {@link Duration}
*/
@Override
public Threshold convert(String value) {
value = value.trim();
if (value.isEmpty()) {
return null;
}
// The value should be something like 1 MB or 1MB
final Matcher matcher = PATTERN.matcher(value.toUpperCase(Locale.ROOT));
if (!matcher.find()) {
return DEFAULT;
}
final String stringSize = matcher.group("size");
final String stringUnit = matcher.group("unit");
final long size;
if (stringSize == null || stringSize.isBlank()) {
return DEFAULT;
} else {
size = Long.parseLong(stringSize);
}
if (size < 0L) {
return NONE;
}
SizeUnit unit = null;
for (SizeUnit u : SizeUnit.values()) {
if (u.abbreviation().equals(stringUnit)) {
unit = u;
break;
}
}
return Threshold.of(size, unit == null ? SizeUnit.BYTE : unit);
}
}
| ThresholdConverter |
java | apache__spark | sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetVectorUpdaterFactory.java | {
"start": 52435,
"end": 59675
} | class ____ extends DecimalUpdater {
private final int parquetScale;
private final int arrayLen;
FixedLenByteArrayToDecimalUpdater(ColumnDescriptor descriptor, DecimalType sparkType) {
super(sparkType);
LogicalTypeAnnotation typeAnnotation =
descriptor.getPrimitiveType().getLogicalTypeAnnotation();
this.parquetScale = ((DecimalLogicalTypeAnnotation) typeAnnotation).getScale();
this.arrayLen = descriptor.getPrimitiveType().getTypeLength();
}
@Override
public void skipValues(int total, VectorizedValuesReader valuesReader) {
valuesReader.skipFixedLenByteArray(total, arrayLen);
}
@Override
public void readValue(
int offset,
WritableColumnVector values,
VectorizedValuesReader valuesReader) {
BigInteger value = new BigInteger(valuesReader.readBinary(arrayLen).getBytesUnsafe());
BigDecimal decimal = new BigDecimal(value, this.parquetScale);
writeDecimal(offset, values, decimal);
}
@Override
public void decodeSingleDictionaryId(
int offset,
WritableColumnVector values,
WritableColumnVector dictionaryIds,
Dictionary dictionary) {
BigInteger value =
new BigInteger(dictionary.decodeToBinary(dictionaryIds.getDictId(offset)).getBytesUnsafe());
BigDecimal decimal = new BigDecimal(value, this.parquetScale);
writeDecimal(offset, values, decimal);
}
}
private static int rebaseDays(int julianDays, final boolean failIfRebase) {
if (failIfRebase) {
if (julianDays < RebaseDateTime.lastSwitchJulianDay()) {
throw DataSourceUtils.newRebaseExceptionInRead("Parquet");
} else {
return julianDays;
}
} else {
return RebaseDateTime.rebaseJulianToGregorianDays(julianDays);
}
}
private static long rebaseTimestamp(
long julianMicros,
final boolean failIfRebase,
final String format,
final String timeZone) {
if (failIfRebase) {
if (julianMicros < RebaseDateTime.lastSwitchJulianTs()) {
throw DataSourceUtils.newRebaseExceptionInRead(format);
} else {
return julianMicros;
}
} else {
return RebaseDateTime.rebaseJulianToGregorianMicros(timeZone, julianMicros);
}
}
private static long rebaseMicros(
long julianMicros,
final boolean failIfRebase,
final String timeZone) {
return rebaseTimestamp(julianMicros, failIfRebase, "Parquet", timeZone);
}
private static long rebaseInt96(
long julianMicros,
final boolean failIfRebase,
final String timeZone) {
return rebaseTimestamp(julianMicros, failIfRebase, "Parquet INT96", timeZone);
}
private boolean shouldConvertTimestamps() {
return convertTz != null && !convertTz.equals(UTC);
}
/**
* Helper function to construct exception for parquet schema mismatch.
*/
private SchemaColumnConvertNotSupportedException constructConvertNotSupportedException(
ColumnDescriptor descriptor,
DataType sparkType) {
return new SchemaColumnConvertNotSupportedException(
Arrays.toString(descriptor.getPath()),
descriptor.getPrimitiveType().getPrimitiveTypeName().toString(),
sparkType.catalogString());
}
private static boolean canReadAsIntDecimal(ColumnDescriptor descriptor, DataType dt) {
if (!DecimalType.is32BitDecimalType(dt)) return false;
return isDecimalTypeMatched(descriptor, dt) && isSameDecimalScale(descriptor, dt);
}
private static boolean canReadAsLongDecimal(ColumnDescriptor descriptor, DataType dt) {
if (!DecimalType.is64BitDecimalType(dt)) return false;
return isDecimalTypeMatched(descriptor, dt) && isSameDecimalScale(descriptor, dt);
}
private static boolean canReadAsBinaryDecimal(ColumnDescriptor descriptor, DataType dt) {
if (!DecimalType.isByteArrayDecimalType(dt)) return false;
return isDecimalTypeMatched(descriptor, dt) && isSameDecimalScale(descriptor, dt);
}
private static boolean canReadAsDecimal(ColumnDescriptor descriptor, DataType dt) {
if (!(dt instanceof DecimalType)) return false;
return isDecimalTypeMatched(descriptor, dt);
}
private static boolean isLongDecimal(DataType dt) {
if (dt instanceof DecimalType d) {
return d.precision() == 20 && d.scale() == 0;
}
return false;
}
private static boolean isDateTypeMatched(ColumnDescriptor descriptor) {
LogicalTypeAnnotation typeAnnotation = descriptor.getPrimitiveType().getLogicalTypeAnnotation();
return typeAnnotation instanceof DateLogicalTypeAnnotation;
}
private static boolean isSignedIntAnnotation(LogicalTypeAnnotation typeAnnotation) {
if (!(typeAnnotation instanceof IntLogicalTypeAnnotation)) return false;
IntLogicalTypeAnnotation intAnnotation = (IntLogicalTypeAnnotation) typeAnnotation;
return intAnnotation.isSigned();
}
private static boolean isDecimalTypeMatched(ColumnDescriptor descriptor, DataType dt) {
DecimalType requestedType = (DecimalType) dt;
LogicalTypeAnnotation typeAnnotation = descriptor.getPrimitiveType().getLogicalTypeAnnotation();
if (typeAnnotation instanceof DecimalLogicalTypeAnnotation) {
DecimalLogicalTypeAnnotation parquetType = (DecimalLogicalTypeAnnotation) typeAnnotation;
// If the required scale is larger than or equal to the physical decimal scale in the Parquet
// metadata, we can upscale the value as long as the precision also increases by as much so
// that there is no loss of precision.
int scaleIncrease = requestedType.scale() - parquetType.getScale();
int precisionIncrease = requestedType.precision() - parquetType.getPrecision();
return scaleIncrease >= 0 && precisionIncrease >= scaleIncrease;
} else if (typeAnnotation == null || isSignedIntAnnotation(typeAnnotation)) {
// Allow reading signed integers (which may be un-annotated) as decimal as long as the
// requested decimal type is large enough to represent all possible values.
PrimitiveType.PrimitiveTypeName typeName =
descriptor.getPrimitiveType().getPrimitiveTypeName();
int integerPrecision = requestedType.precision() - requestedType.scale();
switch (typeName) {
case INT32:
return integerPrecision >= DecimalType$.MODULE$.IntDecimal().precision();
case INT64:
return integerPrecision >= DecimalType$.MODULE$.LongDecimal().precision();
default:
return false;
}
}
return false;
}
private static boolean isSameDecimalScale(ColumnDescriptor descriptor, DataType dt) {
DecimalType d = (DecimalType) dt;
LogicalTypeAnnotation typeAnnotation = descriptor.getPrimitiveType().getLogicalTypeAnnotation();
if (typeAnnotation instanceof DecimalLogicalTypeAnnotation) {
DecimalLogicalTypeAnnotation decimalType = (DecimalLogicalTypeAnnotation) typeAnnotation;
return decimalType.getScale() == d.scale();
} else if (typeAnnotation == null || isSignedIntAnnotation(typeAnnotation)) {
// Consider signed integers (which may be un-annotated) as having scale 0.
return d.scale() == 0;
}
return false;
}
}
| FixedLenByteArrayToDecimalUpdater |
java | apache__spark | sql/core/src/test/java/test/org/apache/spark/sql/streaming/JavaDataStreamReaderWriterSuite.java | {
"start": 1361,
"end": 2576
} | class ____ {
private SparkSession spark;
private String input;
@BeforeEach
public void setUp() {
spark = new TestSparkSession();
input = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "input").toString();
}
@AfterEach
public void tearDown() {
try {
Utils.deleteRecursively(new File(input));
} finally {
spark.stop();
spark = null;
}
}
@Test
public void testForeachBatchAPI() throws TimeoutException {
StreamingQuery query = spark
.readStream()
.textFile(input)
.writeStream()
.foreachBatch((VoidFunction2<Dataset<String>, Long>) (v1, v2) -> {})
.start();
query.stop();
}
@Test
public void testForeachAPI() throws TimeoutException {
StreamingQuery query = spark
.readStream()
.textFile(input)
.writeStream()
.foreach(new ForeachWriter<String>() {
@Override
public boolean open(long partitionId, long epochId) {
return true;
}
@Override
public void process(String value) {}
@Override
public void close(Throwable errorOrNull) {}
})
.start();
query.stop();
}
}
| JavaDataStreamReaderWriterSuite |
java | apache__camel | components/camel-netty-http/src/test/java/org/apache/camel/component/netty/http/NettyHttpMuteExceptionTest.java | {
"start": 1376,
"end": 2398
} | class ____ extends BaseNettyTest {
@Test
public void testMuteException() throws Exception {
HttpGet get = new HttpGet("http://localhost:" + getPort() + "/foo");
get.addHeader("Accept", "application/text");
try (CloseableHttpClient client = HttpClients.createDefault();
CloseableHttpResponse response = client.execute(get)) {
String body = EntityUtils.toString(response.getEntity(), "UTF-8");
assertNotNull(body);
assertEquals("", body);
assertEquals(500, response.getCode());
}
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("netty-http:http://0.0.0.0:{{port}}/foo?muteException=true")
.to("mock:input")
.throwException(new IllegalArgumentException("Camel cannot do this"));
}
};
}
}
| NettyHttpMuteExceptionTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/loader/ast/internal/AbstractMultiIdEntityLoader.java | {
"start": 12706,
"end": 14365
} | interface ____<T> {
void consume(int position, EntityKey entityKey, T resolvedRef);
}
private <R> List<Object> loadFromCaches(
MultiIdLoadOptions loadOptions,
LockOptions lockOptions,
ResolutionConsumer<R> resolutionConsumer,
Object id,
EntityKey entityKey,
List<Object> unresolvedIds, int i,
SharedSessionContractImplementor session) {
// look for it in the Session first
final var entry = loadFromSessionCache( entityKey, lockOptions, GET, session );
final Object sessionEntity;
if ( loadOptions.getSessionCheckMode() == SessionCheckMode.ENABLED ) {
sessionEntity = entry.entity();
if ( sessionEntity != null
&& loadOptions.getRemovalsMode() == RemovalsMode.REPLACE
&& !entry.isManaged() ) {
resolutionConsumer.consume( i, entityKey, null );
return unresolvedIds;
}
}
else {
sessionEntity = null;
}
final Object cachedEntity =
sessionEntity == null && loadOptions.isSecondLevelCacheCheckingEnabled()
? loadFromSecondLevelCache( entityKey, lockOptions, session )
: sessionEntity;
if ( cachedEntity != null ) {
//noinspection unchecked
resolutionConsumer.consume( i, entityKey, (R) cachedEntity );
}
else {
if ( unresolvedIds == null ) {
unresolvedIds = new ArrayList<>();
}
unresolvedIds.add( id );
}
return unresolvedIds;
}
private Object loadFromSecondLevelCache(EntityKey entityKey, LockOptions lockOptions, SharedSessionContractImplementor session) {
final var persister = getLoadable().getEntityPersister();
return session.loadFromSecondLevelCache( persister, entityKey, null, lockOptions.getLockMode() );
}
}
| ResolutionConsumer |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/RequestMappingHandlerAdapterTests.java | {
"start": 16974,
"end": 17174
} | class ____ {
@SuppressWarnings("unused")
@ModelAttribute
public void addAttributes(Model model) {
model.addAttribute("attr3", "gAttr3");
}
}
/**
* This | ModelAttributeNotUsedPackageAdvice |
java | spring-projects__spring-framework | spring-tx/src/main/java/org/springframework/transaction/TransactionException.java | {
"start": 870,
"end": 1315
} | class ____ extends NestedRuntimeException {
/**
* Constructor for TransactionException.
* @param msg the detail message
*/
public TransactionException(String msg) {
super(msg);
}
/**
* Constructor for TransactionException.
* @param msg the detail message
* @param cause the root cause from the transaction API in use
*/
public TransactionException(String msg, Throwable cause) {
super(msg, cause);
}
}
| TransactionException |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/common/DirectoryEventHandler.java | {
"start": 886,
"end": 1844
} | interface ____ {
/**
* A no-op implementation of {@link DirectoryEventHandler}.
*/
DirectoryEventHandler NOOP = new DirectoryEventHandler() {
@Override public void handleAssignment(TopicIdPartition partition, Uuid directoryId, String reason, Runnable callback) {}
@Override public void handleFailure(Uuid directoryId) {}
};
/**
* Handle the assignment of a topic partition to a directory.
* @param directoryId The directory ID
* @param partition The topic partition
* @param reason The reason
* @param callback Callback to apply when the request is completed.
*/
void handleAssignment(TopicIdPartition partition, Uuid directoryId, String reason, Runnable callback);
/**
* Handle the transition of an online log directory to the offline state.
* @param directoryId The directory ID
*/
void handleFailure(Uuid directoryId);
}
| DirectoryEventHandler |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/spi/ServerMessageBodyReader.java | {
"start": 244,
"end": 603
} | interface ____<T> extends MessageBodyReader<T> {
boolean isReadable(Class<?> type, Type genericType,
ResteasyReactiveResourceInfo lazyMethod, MediaType mediaType);
T readFrom(Class<T> type, Type genericType, MediaType mediaType,
ServerRequestContext context) throws WebApplicationException, IOException;
}
| ServerMessageBodyReader |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/AuditTable.java | {
"start": 413,
"end": 728
} | interface ____ {
/**
* The name of the table
*/
String value();
/**
* The schema of the table. Defaults to the schema of the annotated entity.
*/
String schema() default "";
/**
* The catalog of the table. Defaults to the catalog of the annotated entity.
*/
String catalog() default "";
}
| AuditTable |
java | netty__netty | transport-classes-epoll/src/main/java/io/netty/channel/epoll/NativeStaticallyReferencedJniMethods.java | {
"start": 948,
"end": 1226
} | class ____ to call a JNI method which has not
* yet been registered.</li>
* <li>java.lang.UnsatisfiedLinkError is thrown because native method has not yet been registered.</li>
* </ol>
* Static members which call JNI methods must not be declared in this class!
*/
final | attempt |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/InheritanceDeleteBatchTest.java | {
"start": 1707,
"end": 2905
} | class ____ implements SettingProvider.Provider<String> {
@Override
public String getSetting() {
return InlineMutationStrategy.class.getName();
}
}
@AfterEach
void tearDown(SessionFactoryScope factoryScope) {
factoryScope.dropData();
}
@BeforeEach
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.persist( new TestEntity( 1 ) );
session.persist( new TestEntityType1( 2 ) );
session.persist( new TestEntityType2( 3 ) );
session.persist( new TestEntityType2( 4 ) );
} );
}
@Test
public void testDelete(SessionFactoryScope scope) {
StatisticsImplementor statistics = scope.getSessionFactory().getStatistics();
statistics.clear();
scope.inTransaction( session -> {
for ( int i = 1; i <= 4; i++ ) {
Query deleteQuery = session.createQuery( "delete TestEntity e where e.id = :id" );
deleteQuery.setParameter( "id", i );
deleteQuery.executeUpdate();
assertThat( statistics.getPrepareStatementCount(), is( 4L ) );
statistics.clear();
}
} );
}
@Entity(name = "TestEntity")
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "test_entity")
public static | TableMutationStrategyProvider |
java | apache__camel | components/camel-aws/camel-aws2-sqs/src/main/java/org/apache/camel/component/aws2/sqs/Sqs2Configuration.java | {
"start": 1129,
"end": 24689
} | class ____ implements Cloneable {
// common properties
private String queueName;
@UriParam(label = "advanced")
@Metadata(autowired = true)
private SqsClient amazonSQSClient;
@UriParam(label = "security", secret = true)
private String accessKey;
@UriParam(label = "security", secret = true)
private String secretKey;
@UriParam(label = "security", secret = true)
private String sessionToken;
@UriParam(defaultValue = "amazonaws.com")
private String amazonAWSHost = "amazonaws.com";
@UriParam(secret = true)
private String queueOwnerAWSAccountId;
@UriParam(enums = "ap-south-2,ap-south-1,eu-south-1,eu-south-2,us-gov-east-1,me-central-1,il-central-1,ca-central-1,eu-central-1,us-iso-west-1,eu-central-2,eu-isoe-west-1,us-west-1,us-west-2,af-south-1,eu-north-1,eu-west-3,eu-west-2,eu-west-1,ap-northeast-3,ap-northeast-2,ap-northeast-1,me-south-1,sa-east-1,ap-east-1,cn-north-1,ca-west-1,us-gov-west-1,ap-southeast-1,ap-southeast-2,us-iso-east-1,ap-southeast-3,ap-southeast-4,us-east-1,us-east-2,cn-northwest-1,us-isob-east-1,aws-global,aws-cn-global,aws-us-gov-global,aws-iso-global,aws-iso-b-global")
private String region;
@UriParam(label = "proxy", enums = "HTTP,HTTPS", defaultValue = "HTTPS")
private Protocol proxyProtocol = Protocol.HTTPS;
@UriParam(label = "proxy")
private String proxyHost;
@UriParam(label = "proxy")
private Integer proxyPort;
@UriParam
private boolean autoCreateQueue;
@UriParam(label = "security")
private boolean trustAllCertificates;
@UriParam
private boolean overrideEndpoint;
@UriParam
private String uriEndpointOverride;
@UriParam(label = "producer")
private Integer delaySeconds;
@UriParam(label = "advanced")
private boolean delayQueue;
// consumer properties
@UriParam(label = "consumer", defaultValue = "true")
private boolean deleteAfterRead = true;
@UriParam(label = "consumer", defaultValue = "true")
private boolean deleteIfFiltered = true;
@UriParam(label = "consumer")
private Integer visibilityTimeout;
@UriParam(label = "consumer")
private String attributeNames;
@UriParam(label = "consumer")
private String messageAttributeNames;
@UriParam(label = "consumer")
private Integer waitTimeSeconds;
@UriParam(label = "consumer")
private Integer defaultVisibilityTimeout;
@UriParam(label = "consumer")
private boolean extendMessageVisibility;
@UriParam(label = "consumer")
private String kmsMasterKeyId;
@UriParam(label = "consumer")
private Integer kmsDataKeyReusePeriodSeconds;
@UriParam(label = "consumer")
private boolean serverSideEncryptionEnabled;
@UriParam(label = "consumer", defaultValue = "1")
private int concurrentConsumers = 1;
@UriParam(label = "consumer", defaultValue = "50")
private int concurrentRequestLimit = 50;
@UriParam(label = "consumer")
private String sortAttributeName;
// producer properties
@UriParam(label = "producer", javaType = "java.lang.String", enums = "useConstant,useExchangeId,usePropertyValue")
private MessageGroupIdStrategy messageGroupIdStrategy;
@UriParam(label = "producer", javaType = "java.lang.String", defaultValue = "useExchangeId",
enums = "useExchangeId,useContentBasedDeduplication")
private MessageDeduplicationIdStrategy messageDeduplicationIdStrategy = new ExchangeIdMessageDeduplicationIdStrategy();
@UriParam(label = "producer")
private Sqs2Operations operation;
@UriParam(label = "producer", defaultValue = ",")
private String batchSeparator = ",";
@UriParam(label = "producer", defaultValue = "WARN", enums = "WARN,WARN_ONCE,IGNORE,FAIL")
private String messageHeaderExceededLimit = "WARN";
// queue properties
@UriParam(label = "queue")
private Integer maximumMessageSize;
@UriParam(label = "queue")
private Integer messageRetentionPeriod;
@UriParam(label = "queue")
private Integer receiveMessageWaitTimeSeconds;
@UriParam(label = "queue")
@Metadata(supportFileReference = true)
private String policy;
@UriParam(label = "queue")
private String queueUrl;
// dead letter queue properties
@UriParam(label = "queue")
private String redrivePolicy;
// Likely used only for testing
@UriParam(defaultValue = "https")
private String protocol = "https";
@UriParam(label = "security")
private boolean useDefaultCredentialsProvider;
@UriParam(label = "security")
private boolean useProfileCredentialsProvider;
@UriParam(label = "security")
private boolean useSessionCredentials;
@UriParam(label = "security")
private String profileCredentialsName;
/**
* Whether the queue is a FIFO queue
*/
boolean isFifoQueue() {
// AWS docs suggest this is valid derivation.
// FIFO queue names must end with .fifo, and standard queues cannot
return queueName.endsWith(".fifo");
}
public String getAmazonAWSHost() {
return amazonAWSHost;
}
/**
* The hostname of the Amazon AWS cloud.
*/
public void setAmazonAWSHost(String amazonAWSHost) {
this.amazonAWSHost = amazonAWSHost;
}
public String getQueueName() {
return queueName;
}
/**
* Name of queue. The queue will be created if they don't already exist.
*/
public void setQueueName(String queueName) {
this.queueName = queueName;
}
public String getAccessKey() {
return accessKey;
}
/**
* Amazon AWS Access Key
*/
public void setAccessKey(String accessKey) {
this.accessKey = accessKey;
}
public String getSecretKey() {
return secretKey;
}
/**
* Amazon AWS Secret Key
*/
public void setSecretKey(String secretKey) {
this.secretKey = secretKey;
}
public String getSessionToken() {
return sessionToken;
}
/**
* Amazon AWS Session Token used when the user needs to assume an IAM role
*/
public void setSessionToken(String sessionToken) {
this.sessionToken = sessionToken;
}
public boolean isDeleteAfterRead() {
return deleteAfterRead;
}
/**
* Delete message from SQS after it has been read
*/
public void setDeleteAfterRead(boolean deleteAfterRead) {
this.deleteAfterRead = deleteAfterRead;
}
public SqsClient getAmazonSQSClient() {
return amazonSQSClient;
}
/**
* To use the AmazonSQS client
*/
public void setAmazonSQSClient(SqsClient amazonSQSClient) {
this.amazonSQSClient = amazonSQSClient;
}
public Integer getVisibilityTimeout() {
return visibilityTimeout;
}
/**
* The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being
* retrieved by a ReceiveMessage request to set in the com.amazonaws.services.sqs.model.SetQueueAttributesRequest.
* This only makes sense if it's different from defaultVisibilityTimeout. It changes the queue visibility timeout
* attribute permanently.
*/
public void setVisibilityTimeout(Integer visibilityTimeout) {
this.visibilityTimeout = visibilityTimeout;
}
public String getAttributeNames() {
return attributeNames;
}
/**
* A list of attribute names to receive when consuming. Multiple names can be separated by comma.
*/
public void setAttributeNames(String attributeNames) {
this.attributeNames = attributeNames;
}
public String getMessageAttributeNames() {
return messageAttributeNames;
}
/**
* A list of message attribute names to receive when consuming. Multiple names can be separated by comma.
*/
public void setMessageAttributeNames(String messageAttributeNames) {
this.messageAttributeNames = messageAttributeNames;
}
public Integer getDefaultVisibilityTimeout() {
return defaultVisibilityTimeout;
}
/**
* The default visibility timeout (in seconds)
*/
public void setDefaultVisibilityTimeout(Integer defaultVisibilityTimeout) {
this.defaultVisibilityTimeout = defaultVisibilityTimeout;
}
public Integer getDelaySeconds() {
return delaySeconds;
}
/**
* Delay sending messages for a number of seconds.
*/
public void setDelaySeconds(Integer delaySeconds) {
this.delaySeconds = delaySeconds;
}
public boolean isDelayQueue() {
return delayQueue;
}
/**
* Define if you want to apply delaySeconds option to the queue or on single messages
*/
public void setDelayQueue(boolean delayQueue) {
this.delayQueue = delayQueue;
}
public Integer getMaximumMessageSize() {
return maximumMessageSize;
}
/**
* The maximumMessageSize (in bytes) an SQS message can contain for this queue.
*/
public void setMaximumMessageSize(Integer maximumMessageSize) {
this.maximumMessageSize = maximumMessageSize;
}
public Integer getMessageRetentionPeriod() {
return messageRetentionPeriod;
}
/**
* The messageRetentionPeriod (in seconds) a message will be retained by SQS for this queue.
*/
public void setMessageRetentionPeriod(Integer messageRetentionPeriod) {
this.messageRetentionPeriod = messageRetentionPeriod;
}
public String getPolicy() {
return policy;
}
/**
* The policy for this queue. It can be loaded by default from classpath, but you can prefix with "classpath:",
* "file:", or "http:" to load the resource from different systems.
*/
public void setPolicy(String policy) {
this.policy = policy;
}
public String getRedrivePolicy() {
return redrivePolicy;
}
/**
* Specify the policy that send message to DeadLetter queue. See detail at Amazon docs.
*/
public void setRedrivePolicy(String redrivePolicy) {
this.redrivePolicy = redrivePolicy;
}
public boolean isExtendMessageVisibility() {
return this.extendMessageVisibility;
}
/**
* If enabled, then a scheduled background task will keep extending the message visibility on SQS. This is needed if
* it takes a long time to process the message. If set to true defaultVisibilityTimeout must be set. See details at
* Amazon docs.
*/
public void setExtendMessageVisibility(boolean extendMessageVisibility) {
this.extendMessageVisibility = extendMessageVisibility;
}
public Integer getReceiveMessageWaitTimeSeconds() {
return receiveMessageWaitTimeSeconds;
}
/**
* If you do not specify WaitTimeSeconds in the request, the queue attribute ReceiveMessageWaitTimeSeconds is used
* to determine how long to wait.
*/
public void setReceiveMessageWaitTimeSeconds(Integer receiveMessageWaitTimeSeconds) {
this.receiveMessageWaitTimeSeconds = receiveMessageWaitTimeSeconds;
}
public Integer getWaitTimeSeconds() {
return waitTimeSeconds;
}
/**
* Duration in seconds (0 to 20) that the ReceiveMessage action call will wait until a message is in the queue to
* include in the response.
*/
public void setWaitTimeSeconds(Integer waitTimeSeconds) {
this.waitTimeSeconds = waitTimeSeconds;
}
public String getQueueOwnerAWSAccountId() {
return queueOwnerAWSAccountId;
}
/**
* Specify the queue owner aws account id when you need to connect the queue with a different account owner.
*/
public void setQueueOwnerAWSAccountId(String queueOwnerAWSAccountId) {
this.queueOwnerAWSAccountId = queueOwnerAWSAccountId;
}
public boolean isDeleteIfFiltered() {
return deleteIfFiltered;
}
/**
* Whether to send the DeleteMessage to the SQS queue if the exchange has property with key
* {@link Sqs2Constants#SQS_DELETE_FILTERED} (CamelAwsSqsDeleteFiltered) set to true.
*/
public void setDeleteIfFiltered(boolean deleteIfFiltered) {
this.deleteIfFiltered = deleteIfFiltered;
}
public String getRegion() {
return region;
}
/**
* The region in which SQS client needs to work. When using this parameter, the configuration will expect the
* lowercase name of the region (for example, ap-east-1) You'll need to use the name Region.EU_WEST_1.id()
*/
public void setRegion(String region) {
this.region = region;
}
public int getConcurrentConsumers() {
return concurrentConsumers;
}
/**
* Allows you to use multiple threads to poll the sqs queue to increase throughput
*/
public void setConcurrentConsumers(int concurrentConsumers) {
this.concurrentConsumers = concurrentConsumers;
}
public int getConcurrentRequestLimit() {
return concurrentRequestLimit;
}
/**
* The maximum number of concurrent receive request send to AWS in single consumer polling.
*/
public void setConcurrentRequestLimit(int concurrentRequestLimit) {
this.concurrentRequestLimit = concurrentRequestLimit;
}
public String getSortAttributeName() {
return sortAttributeName;
}
/**
* The name of the message attribute used for sorting the messages. When specified, the messages polled by the
* consumer will be sorted by this attribute. This configuration may be of importance when you configure
* maxMessagesPerPoll parameter exceeding 10. In such cases, the messages will be fetched concurrently so the
* ordering is not guaranteed.
*/
public void setSortAttributeName(String sortAttributeName) {
this.sortAttributeName = sortAttributeName;
}
public String getQueueUrl() {
return queueUrl;
}
/**
* To define the queueUrl explicitly. All other parameters, which would influence the queueUrl, are ignored. This
* parameter is intended to be used to connect to a mock implementation of SQS, for testing purposes.
*/
public void setQueueUrl(String queueUrl) {
this.queueUrl = queueUrl;
}
public Protocol getProxyProtocol() {
return proxyProtocol;
}
/**
* To define a proxy protocol when instantiating the SQS client
*/
public void setProxyProtocol(Protocol proxyProtocol) {
this.proxyProtocol = proxyProtocol;
}
public String getProxyHost() {
return proxyHost;
}
/**
* To define a proxy host when instantiating the SQS client
*/
public void setProxyHost(String proxyHost) {
this.proxyHost = proxyHost;
}
public Integer getProxyPort() {
return proxyPort;
}
/**
* To define a proxy port when instantiating the SQS client
*/
public void setProxyPort(Integer proxyPort) {
this.proxyPort = proxyPort;
}
public String getKmsMasterKeyId() {
return kmsMasterKeyId;
}
/**
* The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK.
*/
public void setKmsMasterKeyId(String kmsMasterKeyId) {
this.kmsMasterKeyId = kmsMasterKeyId;
}
public Integer getKmsDataKeyReusePeriodSeconds() {
return kmsDataKeyReusePeriodSeconds;
}
/**
* The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before
* calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24
* hours). Default: 300 (5 minutes).
*/
public void setKmsDataKeyReusePeriodSeconds(Integer kmsDataKeyReusePeriodSeconds) {
this.kmsDataKeyReusePeriodSeconds = kmsDataKeyReusePeriodSeconds;
}
public boolean isServerSideEncryptionEnabled() {
return serverSideEncryptionEnabled;
}
/**
* Define if Server Side Encryption is enabled or not on the queue
*/
public void setServerSideEncryptionEnabled(boolean serverSideEncryptionEnabled) {
this.serverSideEncryptionEnabled = serverSideEncryptionEnabled;
}
/**
* Only for FIFO queues. Strategy for setting the messageGroupId on the message. It can be one of the following
* options: *useConstant*, *useExchangeId*, *usePropertyValue*. For the *usePropertyValue* option, the value of
* property "CamelAwsMessageGroupId" will be used.
*/
public void setMessageGroupIdStrategy(String strategy) {
if ("useConstant".equalsIgnoreCase(strategy)) {
messageGroupIdStrategy = new ConstantMessageGroupIdStrategy();
} else if ("useExchangeId".equalsIgnoreCase(strategy)) {
messageGroupIdStrategy = new ExchangeIdMessageGroupIdStrategy();
} else if ("usePropertyValue".equalsIgnoreCase(strategy)) {
messageGroupIdStrategy = new PropertyValueMessageGroupIdStrategy();
} else {
throw new IllegalArgumentException("Unrecognised MessageGroupIdStrategy: " + strategy);
}
}
public void setMessageGroupIdStrategy(MessageGroupIdStrategy messageGroupIdStrategy) {
this.messageGroupIdStrategy = messageGroupIdStrategy;
}
public MessageGroupIdStrategy getMessageGroupIdStrategy() {
return messageGroupIdStrategy;
}
public MessageDeduplicationIdStrategy getMessageDeduplicationIdStrategy() {
return messageDeduplicationIdStrategy;
}
/**
* Only for FIFO queues. Strategy for setting the messageDeduplicationId on the message. It can be one of the
* following options: *useExchangeId*, *useContentBasedDeduplication*. For the *useContentBasedDeduplication*
* option, no messageDeduplicationId will be set on the message.
*/
public void setMessageDeduplicationIdStrategy(String strategy) {
if ("useExchangeId".equalsIgnoreCase(strategy)) {
messageDeduplicationIdStrategy = new ExchangeIdMessageDeduplicationIdStrategy();
} else if ("useContentBasedDeduplication".equalsIgnoreCase(strategy)) {
messageDeduplicationIdStrategy = new NullMessageDeduplicationIdStrategy();
} else {
throw new IllegalArgumentException("Unrecognised MessageDeduplicationIdStrategy: " + strategy);
}
}
public void setMessageDeduplicationIdStrategy(MessageDeduplicationIdStrategy messageDeduplicationIdStrategy) {
this.messageDeduplicationIdStrategy = messageDeduplicationIdStrategy;
}
public Sqs2Operations getOperation() {
return operation;
}
/**
* The operation to do in case the user don't want to send only a message
*/
public void setOperation(Sqs2Operations operation) {
this.operation = operation;
}
public boolean isAutoCreateQueue() {
return autoCreateQueue;
}
/**
* Setting the auto-creation of the queue
*/
public void setAutoCreateQueue(boolean autoCreateQueue) {
this.autoCreateQueue = autoCreateQueue;
}
public String getProtocol() {
return protocol;
}
/**
* The underlying protocol used to communicate with SQS
*/
public void setProtocol(String protocol) {
this.protocol = protocol;
}
public boolean isTrustAllCertificates() {
return trustAllCertificates;
}
/**
* If we want to trust all certificates in case of overriding the endpoint
*/
public void setTrustAllCertificates(boolean trustAllCertificates) {
this.trustAllCertificates = trustAllCertificates;
}
public boolean isUseDefaultCredentialsProvider() {
return useDefaultCredentialsProvider;
}
/**
* Set whether the SQS client should expect to load credentials on an AWS infra instance or to expect static
* credentials to be passed in.
*/
public void setUseDefaultCredentialsProvider(boolean useDefaultCredentialsProvider) {
this.useDefaultCredentialsProvider = useDefaultCredentialsProvider;
}
/**
* Set whether the SQS client should expect to load credentials through a profile credentials provider.
*/
public void setUseProfileCredentialsProvider(boolean useProfileCredentialsProvider) {
this.useProfileCredentialsProvider = useProfileCredentialsProvider;
}
public boolean isUseProfileCredentialsProvider() {
return useProfileCredentialsProvider;
}
public boolean isUseSessionCredentials() {
return useSessionCredentials;
}
/**
* Set whether the SQS client should expect to use Session Credentials. This is useful in a situation in which the
* user needs to assume an IAM role for doing operations in SQS.
*/
public void setUseSessionCredentials(boolean useSessionCredentials) {
this.useSessionCredentials = useSessionCredentials;
}
public String getBatchSeparator() {
return batchSeparator;
}
/**
* Set the separator when passing a String to send batch message operation
*/
public void setBatchSeparator(String batchSeparator) {
this.batchSeparator = batchSeparator;
}
public String getMessageHeaderExceededLimit() {
return messageHeaderExceededLimit;
}
/**
* What to do if sending to AWS SQS has more messages than AWS allows (currently only maximum 10 message headers are
* allowed).
*
* WARN will log a WARN about the limit is for each additional header, so the message can be sent to AWS. WARN_ONCE
* will only log one time a WARN about the limit is hit, and drop additional headers, so the message can be sent to
* AWS. IGNORE will ignore (no logging) and drop additional headers, so the message can be sent to AWS. FAIL will
* cause an exception to be thrown and the message is not sent to AWS.
*/
public void setMessageHeaderExceededLimit(String messageHeaderExceededLimit) {
this.messageHeaderExceededLimit = messageHeaderExceededLimit;
}
public boolean isOverrideEndpoint() {
return overrideEndpoint;
}
/**
* Set the need for overriding the endpoint. This option needs to be used in combination with the
* uriEndpointOverride option
*/
public void setOverrideEndpoint(boolean overrideEndpoint) {
this.overrideEndpoint = overrideEndpoint;
}
public String getUriEndpointOverride() {
return uriEndpointOverride;
}
/**
* Set the overriding uri endpoint. This option needs to be used in combination with overrideEndpoint option
*/
public void setUriEndpointOverride(String uriEndpointOverride) {
this.uriEndpointOverride = uriEndpointOverride;
}
public String getProfileCredentialsName() {
return profileCredentialsName;
}
/**
* If using a profile credentials provider, this parameter will set the profile name
*/
public void setProfileCredentialsName(String profileCredentialsName) {
this.profileCredentialsName = profileCredentialsName;
}
// *************************************************
//
// *************************************************
public Sqs2Configuration copy() {
try {
return (Sqs2Configuration) super.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeCamelException(e);
}
}
}
| Sqs2Configuration |
java | spring-projects__spring-boot | module/spring-boot-opentelemetry/src/main/java/org/springframework/boot/opentelemetry/autoconfigure/logging/otlp/OtlpLoggingConfigurations.java | {
"start": 2363,
"end": 3332
} | class ____ implements OtlpLoggingConnectionDetails {
private final OtlpLoggingProperties properties;
PropertiesOtlpLoggingConnectionDetails(OtlpLoggingProperties properties) {
this.properties = properties;
}
@Override
public String getUrl(Transport transport) {
Assert.state(transport == this.properties.getTransport(),
"Requested transport %s doesn't match configured transport %s".formatted(transport,
this.properties.getTransport()));
String endpoint = this.properties.getEndpoint();
Assert.state(endpoint != null, "'endpoint' must not be null");
return endpoint;
}
}
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnClass(OtlpHttpLogRecordExporter.class)
@ConditionalOnMissingBean({ OtlpGrpcLogRecordExporter.class, OtlpHttpLogRecordExporter.class })
@ConditionalOnBean(OtlpLoggingConnectionDetails.class)
@ConditionalOnEnabledLoggingExport("otlp")
static | PropertiesOtlpLoggingConnectionDetails |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/propertyref/inheritence/joined/Account.java | {
"start": 224,
"end": 730
} | class ____ implements Serializable {
private String accountId;
private char type;
/**
* @return Returns the accountId.
*/
public String getAccountId() {
return accountId;
}
/**
* @param accountId The accountId to set.
*/
public void setAccountId(String accountId) {
this.accountId = accountId;
}
/**
* @return Returns the type.
*/
public char getType() {
return type;
}
/**
* @param type The type to set.
*/
public void setType(char type) {
this.type = type;
}
}
| Account |
java | apache__camel | components/camel-mllp/src/main/java/org/apache/camel/component/mllp/MllpInvalidMessageException.java | {
"start": 924,
"end": 1293
} | class ____ extends MllpException {
public MllpInvalidMessageException(String message, byte[] hl7Message, boolean logPhi) {
super(message, hl7Message, logPhi);
}
public MllpInvalidMessageException(String message, byte[] hl7Message, Throwable cause, boolean logPhi) {
super(message, hl7Message, cause, logPhi);
}
}
| MllpInvalidMessageException |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/DistributedSQLCounter.java | {
"start": 1379,
"end": 5288
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(DistributedSQLCounter.class);
private final String field;
private final String table;
private final SQLConnectionFactory connectionFactory;
public DistributedSQLCounter(String field, String table,
SQLConnectionFactory connectionFactory) {
this.field = field;
this.table = table;
this.connectionFactory = connectionFactory;
}
/**
* Obtains the value of the counter.
*
* @return counter value.
* @throws SQLException if querying the database fails.
*/
public int selectCounterValue() throws SQLException {
try (Connection connection = connectionFactory.getConnection()) {
return selectCounterValue(false, connection);
}
}
private int selectCounterValue(boolean forUpdate, Connection connection) throws SQLException {
String query = String.format("SELECT %s FROM %s %s", field, table,
forUpdate ? "FOR UPDATE" : "");
LOG.debug("Select counter statement: " + query);
try (Statement statement = connection.createStatement();
ResultSet result = statement.executeQuery(query)) {
if (result.next()) {
return result.getInt(field);
} else {
throw new IllegalStateException("Counter table not initialized: " + table);
}
}
}
/**
* Sets the counter to the given value.
*
* @param value Value to assign to counter.
* @throws SQLException if querying the database fails.
*/
public void updateCounterValue(int value) throws SQLException {
try (Connection connection = connectionFactory.getConnection(true)) {
updateCounterValue(value, connection);
}
}
/**
* Sets the counter to the given value.
*
* @param value Value to assign to counter.
* @param connection Connection to database hosting the counter table.
* @throws SQLException if querying the database fails.
*/
public void updateCounterValue(int value, Connection connection) throws SQLException {
String queryText = String.format("UPDATE %s SET %s = ?", table, field);
LOG.debug("Update counter statement: " + queryText + ". Value: " + value);
try (PreparedStatement statement = connection.prepareStatement(queryText)) {
statement.setInt(1, value);
statement.execute();
}
}
/**
* Increments the counter by the given amount and
* returns the previous counter value.
*
* @param amount Amount to increase the counter.
* @return Previous counter value.
* @throws SQLException if querying the database fails.
*/
public int incrementCounterValue(int amount) throws SQLException {
// Disabling auto-commit to ensure that all statements on this transaction
// are committed at once.
try (Connection connection = connectionFactory.getConnection(false)) {
// Preventing dirty reads and non-repeatable reads to ensure that the
// value read will not be updated by a different connection.
if (connection.getTransactionIsolation() < Connection.TRANSACTION_REPEATABLE_READ) {
connection.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
}
try {
// Reading the counter value "FOR UPDATE" to lock the value record,
// forcing other connections to wait until this transaction is committed.
int lastValue = selectCounterValue(true, connection);
// Calculate the new counter value and handling overflow by
// resetting the counter to 0.
int newValue = lastValue + amount;
if (newValue < 0) {
lastValue = 0;
newValue = amount;
}
updateCounterValue(newValue, connection);
connection.commit();
return lastValue;
} catch (Exception e) {
// Rollback transaction to release table locks
connection.rollback();
throw e;
}
}
}
}
| DistributedSQLCounter |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/util/internal/PrivateMaxEntriesMap.java | {
"start": 38632,
"end": 41171
} | class ____<K, V> {
static final int DEFAULT_CONCURRENCY_LEVEL = 16;
static final int DEFAULT_INITIAL_CAPACITY = 16;
int concurrencyLevel;
int initialCapacity;
long capacity;
public Builder() {
capacity = -1;
initialCapacity = DEFAULT_INITIAL_CAPACITY;
concurrencyLevel = DEFAULT_CONCURRENCY_LEVEL;
}
/**
* Specifies the initial capacity of the hash table (default {@code 16}).
* This is the number of key-value pairs that the hash table can hold
* before a resize operation is required.
*
* @param initialCapacity the initial capacity used to size the hash table
* to accommodate this many entries.
* @throws IllegalArgumentException if the initialCapacity is negative
*/
public Builder<K, V> initialCapacity(int initialCapacity) {
checkArgument(initialCapacity >= 0);
this.initialCapacity = initialCapacity;
return this;
}
/**
* Specifies the maximum capacity to coerce the map to and may
* exceed it temporarily.
*
* @param capacity the threshold to bound the map by
* @throws IllegalArgumentException if the maximumCapacity is negative
*/
public Builder<K, V> maximumCapacity(long capacity) {
checkArgument(capacity >= 0);
this.capacity = capacity;
return this;
}
/**
* Specifies the estimated number of concurrently updating threads. The
* implementation performs internal sizing to try to accommodate this many
* threads (default {@code 16}).
*
* @param concurrencyLevel the estimated number of concurrently updating
* threads
* @throws IllegalArgumentException if the concurrencyLevel is less than or
* equal to zero
*/
public Builder<K, V> concurrencyLevel(int concurrencyLevel) {
checkArgument(concurrencyLevel > 0);
this.concurrencyLevel = concurrencyLevel;
return this;
}
/**
* Creates a new {@link PrivateMaxEntriesMap} instance.
*
* @throws IllegalStateException if the maximum capacity was
* not set
*/
public PrivateMaxEntriesMap<K, V> build() {
checkState(capacity >= 0);
return new PrivateMaxEntriesMap<K, V>(this);
}
}
}
| Builder |
java | apache__camel | components/camel-mongodb/src/main/java/org/apache/camel/component/mongodb/MongoDbConsumerType.java | {
"start": 855,
"end": 978
} | enum ____ {
tailable,
changeStreams
// more consumer types to be included in future versions
}
| MongoDbConsumerType |
java | apache__camel | components/camel-mail/src/test/java/org/apache/camel/component/mail/MailBindingAttachmentFileTest.java | {
"start": 1529,
"end": 3334
} | class ____ {
private final MailBinding binding = new MailBinding();
@ParameterizedTest
@MethodSource("fileNames")
public void shouldSanitizeAttachmentFileNames(String name) throws MessagingException, IOException {
final Session session = Session.getInstance(new Properties());
final Message message = new MimeMessage(session);
final Multipart multipart = new MimeMultipart();
final MimeBodyPart part = new MimeBodyPart();
part.attachFile(name);
multipart.addBodyPart(part);
message.setContent(multipart);
final Map<String, Attachment> attachments = new HashMap<>();
binding.extractAttachmentsFromMail(message, attachments);
assertThat(attachments).containsKey("file.txt");
final Attachment attachment = attachments.get("file.txt");
final DataHandler dataHandler = attachment.getDataHandler();
assertThat(dataHandler.getName()).isEqualTo("file.txt");
}
public static Iterable<String> fileNames() {
return Arrays.asList("file.txt", "../file.txt", "..\\file.txt", "/absolute/file.txt", "c:\\absolute\\file.txt");
}
@Test
public void testSkipEmptyName() throws MessagingException, IOException {
final Session session = Session.getInstance(new Properties());
final Message message = new MimeMessage(session);
final Multipart multipart = new MimeMultipart();
final MimeBodyPart part = new MimeBodyPart();
part.attachFile("");
multipart.addBodyPart(part);
message.setContent(multipart);
final Map<String, Attachment> attachments = new HashMap<>();
binding.extractAttachmentsFromMail(message, attachments);
assertThat(attachments).isEmpty();
}
}
| MailBindingAttachmentFileTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java | {
"start": 935,
"end": 1026
} | class ____ extends HttpOpParam<PostOpParam.Op> {
/** Post operations. */
public | PostOpParam |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/CheckedExceptionNotThrownTest.java | {
"start": 2142,
"end": 2473
} | class ____ {
void test() throws Exception {
Thread.sleep(1);
}
}
""")
.expectUnchanged()
.doTest();
}
@Test
public void overridable_noChange() {
helper
.addInputLines(
"Test.java",
"""
public | Test |
java | google__guice | core/test/com/google/inject/MethodInterceptionTest.java | {
"start": 20636,
"end": 20750
} | class ____<R> {
@Intercept
public R testReturn() {
return null;
}
}
public static | GenericReturn |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client-jaxrs/deployment/src/test/java/io/quarkus/jaxrs/client/reactive/deployment/test/ClientResponseFilterTestCase.java | {
"start": 933,
"end": 1627
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(Endpoint.class));
@TestHTTPResource
URL url;
private Client client;
@BeforeEach
public void before() {
client = ClientBuilder.newClient().register(TestClientResponseFilter.class);
}
@AfterEach
public void after() {
client.close();
}
@Test
public void test() {
Response response = client.target(url.toExternalForm() + "/hello").request().get();
assertEquals(200, response.getStatus());
}
public static | ClientResponseFilterTestCase |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java | {
"start": 4475,
"end": 18787
} | class ____ implements ContainerStateTransitionListener {
public static final Map<ContainerId, Integer> RUNNING_TRANSITIONS = new ConcurrentHashMap<>();
public void init(Context context) {
}
public void preTransition(ContainerImpl op,
org.apache.hadoop.yarn.server.nodemanager
.containermanager.container.ContainerState
beforeState,
ContainerEvent eventToBeProcessed) {
}
public void postTransition(
ContainerImpl op,
org.apache.hadoop.yarn.server.nodemanager.containermanager.container
.ContainerState beforeState,
org.apache.hadoop.yarn.server.nodemanager.containermanager.container
.ContainerState afterState,
ContainerEvent processedEvent) {
if (beforeState != afterState &&
afterState == org.apache.hadoop.yarn.server.nodemanager.containermanager.container
.ContainerState.RUNNING) {
RUNNING_TRANSITIONS.compute(op.getContainerId(),
(containerId, counter) -> counter == null ? 1 : ++counter);
}
}
}
public void setup() throws YarnException, IOException, InterruptedException, TimeoutException {
conf = new YarnConfiguration();
conf.set(YarnConfiguration.NM_CONTAINER_STATE_TRANSITION_LISTENERS,
DebugSumContainerStateListener.class.getName());
startYarnCluster();
startYarnClient();
UserGroupInformation.setLoginUser(UserGroupInformation
.createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken());
nmTokenCache = new NMTokenCache();
startRMClient();
startNMClient();
}
private void startYarnCluster() {
yarnCluster = new MiniYARNCluster(TestNMClient.class.getName(), 3, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
assertEquals(STATE.STARTED, yarnCluster.getServiceState());
}
private void startYarnClient()
throws IOException, YarnException, InterruptedException, TimeoutException {
yarnClient = (YarnClientImpl) YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
assertEquals(STATE.STARTED, yarnClient.getServiceState());
nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
ApplicationSubmissionContext appContext =
yarnClient.createApplication().getApplicationSubmissionContext();
ApplicationId appId = appContext.getApplicationId();
appContext.setApplicationName("Test");
Priority pri = Priority.newInstance(0);
appContext.setPriority(pri);
appContext.setQueue("default");
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
appContext.setAMContainerSpec(amContainer);
appContext.setUnmanagedAM(true);
SubmitApplicationRequest appRequest = Records.newRecord(SubmitApplicationRequest.class);
appRequest.setApplicationSubmissionContext(appContext);
yarnClient.submitApplication(appContext);
GenericTestUtils.waitFor(() -> yarnCluster.getResourceManager().getRMContext().getRMApps()
.get(appId).getCurrentAppAttempt().getAppAttemptState() == RMAppAttemptState.LAUNCHED,
100, 30_000, "Failed to start app");
appAttempt = yarnCluster.getResourceManager().getRMContext().getRMApps()
.get(appId).getCurrentAppAttempt();
}
private void startRMClient() {
rmClient = (AMRMClientImpl<ContainerRequest>) AMRMClient.createAMRMClient();
rmClient.setNMTokenCache(nmTokenCache);
rmClient.init(conf);
rmClient.start();
assertEquals(STATE.STARTED, rmClient.getServiceState());
}
private void startNMClient() {
nmClient = (NMClientImpl) NMClient.createNMClient();
nmClient.setNMTokenCache(rmClient.getNMTokenCache());
nmClient.init(conf);
nmClient.start();
assertEquals(STATE.STARTED, nmClient.getServiceState());
}
public void tearDown() throws InterruptedException {
rmClient.stop();
yarnClient.stop();
yarnCluster.stop();
}
@Test
@Timeout(value = 180)
public void testNMClientNoCleanupOnStop()
throws YarnException, IOException, InterruptedException, TimeoutException {
runTest(() -> {
stopNmClient();
assertFalse(nmClient.startedContainers.isEmpty());
nmClient.cleanupRunningContainers();
assertEquals(0, nmClient.startedContainers.size());
});
}
@Test
@Timeout(value = 200)
public void testNMClient()
throws YarnException, IOException, InterruptedException, TimeoutException {
runTest(() -> {
// stop the running containers on close
assertFalse(nmClient.startedContainers.isEmpty());
nmClient.cleanupRunningContainersOnStop(true);
assertTrue(nmClient.getCleanupRunningContainers().get());
nmClient.stop();
});
}
public void runTest(
Runnable test
) throws IOException, InterruptedException, YarnException, TimeoutException {
setup();
rmClient.registerApplicationMaster("Host", 10_000, "");
testContainerManagement(nmClient, allocateContainers(rmClient));
rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null);
test.run();
tearDown();
}
private void stopNmClient() {
assertNotNull(nmClient, "Null nmClient");
// leave one unclosed
assertEquals(1, nmClient.startedContainers.size());
// default true
assertTrue(nmClient.getCleanupRunningContainers().get());
nmClient.cleanupRunningContainersOnStop(false);
assertFalse(nmClient.getCleanupRunningContainers().get());
nmClient.stop();
}
private Set<Container> allocateContainers(
AMRMClientImpl<ContainerRequest> client
) throws YarnException, IOException {
for (int i = 0; i < NUMBER_OF_CONTAINERS; ++i) {
client.addContainerRequest(new ContainerRequest(
Resource.newInstance(1024, 0),
new String[] {nodeReports.get(0).getNodeId().getHost()},
new String[] {nodeReports.get(0).getRackName()},
Priority.newInstance(0)
));
}
Set<Container> allocatedContainers = new TreeSet<>();
while (allocatedContainers.size() < NUMBER_OF_CONTAINERS) {
AllocateResponse allocResponse = client.allocate(0.1f);
allocatedContainers.addAll(allocResponse.getAllocatedContainers());
for (NMToken token : allocResponse.getNMTokens()) {
client.getNMTokenCache().setToken(token.getNodeId().toString(), token.getToken());
}
if (allocatedContainers.size() < NUMBER_OF_CONTAINERS) {
sleep(100);
}
}
return allocatedContainers;
}
private void testContainerManagement(
NMClientImpl client, Set<Container> containers
) throws YarnException, IOException {
int size = containers.size();
int i = 0;
for (Container container : containers) {
// getContainerStatus shouldn't be called before startContainer,
// otherwise, NodeManager cannot find the container
assertYarnException(
() -> client.getContainerStatus(container.getId(), container.getNodeId()),
IS_NOT_HANDLED_BY_THIS_NODEMANAGER);
// upadateContainerResource shouldn't be called before startContainer,
// otherwise, NodeManager cannot find the container
assertYarnException(
() -> client.updateContainerResource(container),
IS_NOT_HANDLED_BY_THIS_NODEMANAGER);
// restart shouldn't be called before startContainer,
// otherwise, NodeManager cannot find the container
assertYarnException(
() -> client.restartContainer(container.getId()),
UNKNOWN_CONTAINER);
// rollback shouldn't be called before startContainer,
// otherwise, NodeManager cannot find the container
assertYarnException(
() -> client.rollbackLastReInitialization(container.getId()),
UNKNOWN_CONTAINER);
// commit shouldn't be called before startContainer,
// otherwise, NodeManager cannot find the container
assertYarnException(
() -> client.commitLastReInitialization(container.getId()),
UNKNOWN_CONTAINER);
// stopContainer shouldn't be called before startContainer,
// otherwise, an exception will be thrown
assertYarnException(
() -> client.stopContainer(container.getId(), container.getNodeId()),
IS_NOT_HANDLED_BY_THIS_NODEMANAGER);
Credentials ts = new Credentials();
DataOutputBuffer dob = new DataOutputBuffer();
ts.writeTokenStorageToStream(dob);
ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
ContainerLaunchContext clc = Records.newRecord(ContainerLaunchContext.class);
clc.setCommands(Shell.WINDOWS
? Arrays.asList("ping", "-n", "10000000", "127.0.0.1", ">nul")
: Arrays.asList("sleep", "1000000")
);
clc.setTokens(securityTokens);
client.startContainer(container, clc);
List<Integer> exitStatuses = Arrays.asList(-1000, -105);
// leave one container unclosed
if (++i < size) {
testContainer(client, i, container, clc, exitStatuses);
}
}
}
private void testContainer(NMClientImpl client, int i, Container container,
ContainerLaunchContext clc, List<Integer> exitCode)
throws YarnException, IOException {
testGetContainerStatus(container, i, ContainerState.RUNNING, "",
exitCode);
waitForContainerRunningTransitionCount(container, 1);
testIncreaseContainerResource(container);
testRestartContainer(container);
testGetContainerStatus(container, i, ContainerState.RUNNING,
"will be Restarted", exitCode);
waitForContainerRunningTransitionCount(container, 2);
if (i % 2 == 0) {
testReInitializeContainer(container, clc, false);
testGetContainerStatus(container, i, ContainerState.RUNNING,
"will be Re-initialized", exitCode);
waitForContainerRunningTransitionCount(container, 3);
testContainerRollback(container, true);
testGetContainerStatus(container, i, ContainerState.RUNNING,
"will be Rolled-back", exitCode);
waitForContainerRunningTransitionCount(container, 4);
testContainerCommit(container, false);
testReInitializeContainer(container, clc, false);
testGetContainerStatus(container, i, ContainerState.RUNNING,
"will be Re-initialized", exitCode);
waitForContainerRunningTransitionCount(container, 5);
testContainerCommit(container, true);
} else {
testReInitializeContainer(container, clc, true);
testGetContainerStatus(container, i, ContainerState.RUNNING,
"will be Re-initialized", exitCode);
waitForContainerRunningTransitionCount(container, 3);
testContainerRollback(container, false);
testContainerCommit(container, false);
}
client.stopContainer(container.getId(), container.getNodeId());
testGetContainerStatus(container, i, ContainerState.COMPLETE,
"killed by the ApplicationMaster", exitCode);
}
private void waitForContainerRunningTransitionCount(Container container, long transitions) {
while (DebugSumContainerStateListener.RUNNING_TRANSITIONS
.getOrDefault(container.getId(), 0) != transitions) {
sleep(500);
}
}
private void testGetContainerStatus(Container container, int index,
ContainerState state, String diagnostics,
List<Integer> exitStatuses)
throws YarnException, IOException {
while (true) {
sleep(250);
ContainerStatus status = nmClient.getContainerStatus(
container.getId(), container.getNodeId());
// NodeManager may still need some time to get the stable
// container status
if (status.getState() == state) {
assertEquals(container.getId(), status.getContainerId());
assertTrue(status.getDiagnostics().contains(diagnostics),
index + ": " + status.getDiagnostics());
assertTrue(exitStatuses.contains(status.getExitStatus()),
"Exit Statuses are supposed to be in: " + exitStatuses +
", but the actual exit status code is: " +
status.getExitStatus());
break;
}
}
}
@SuppressWarnings("deprecation")
private void testIncreaseContainerResource(Container container) {
assertYarnException(
() -> nmClient.increaseContainerResource(container),
container.getId() + " has update version ");
}
private void testRestartContainer(Container container) throws IOException, YarnException {
nmClient.restartContainer(container.getId());
}
private void testContainerRollback(Container container, boolean enabled)
throws IOException, YarnException {
if (enabled) {
nmClient.rollbackLastReInitialization(container.getId());
} else {
assertYarnException(
() -> nmClient.rollbackLastReInitialization(container.getId()),
"Nothing to rollback to");
}
}
private void testContainerCommit(Container container, boolean enabled)
throws IOException, YarnException {
if (enabled) {
nmClient.commitLastReInitialization(container.getId());
} else {
assertYarnException(
() -> nmClient.commitLastReInitialization(container.getId()),
"Nothing to Commit");
}
}
private void testReInitializeContainer(
Container container, ContainerLaunchContext clc, boolean autoCommit
) throws IOException, YarnException {
nmClient.reInitializeContainer(container.getId(), clc, autoCommit);
}
private void assertYarnException(Executable runnable, String text) {
YarnException e = assertThrows(YarnException.class, runnable);
assertTrue(e.getMessage().contains(text),
String.format("The thrown exception is not expected cause it has text [%s]"
+ ", what not contains text [%s]", e.getMessage(), text));
}
private void sleep(int sleepTime) {
try {
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
}
| DebugSumContainerStateListener |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java | {
"start": 7141,
"end": 10859
} | class ____ extends DriverContext {
volatile Thread thread;
AssertingDriverContext() {
super(
new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()),
TestBlockFactory.getNonBreakingInstance()
);
}
@Override
public boolean addReleasable(Releasable releasable) {
checkThread();
return super.addReleasable(releasable);
}
@Override
public boolean removeReleasable(Releasable releasable) {
checkThread();
return super.removeReleasable(releasable);
}
@Override
public Snapshot getSnapshot() {
// can be called by either the Driver thread or the runner thread, but typically the runner
return super.getSnapshot();
}
@Override
public boolean isFinished() {
// can be called by either the Driver thread or the runner thread
return super.isFinished();
}
public void finish() {
checkThread();
super.finish();
}
void checkThread() {
if (thread == null) {
thread = Thread.currentThread();
}
assertThat(thread, equalTo(Thread.currentThread()));
}
}
record TestDriver(DriverContext driverContext, int numReleasables, BigArrays bigArrays) implements Callable<Void> {
@Override
public Void call() {
int extraToAdd = randomInt(16);
Set<Releasable> releasables = IntStream.range(0, numReleasables + extraToAdd)
.mapToObj(i -> randomReleasable(bigArrays))
.collect(toIdentitySet());
assertThat(releasables, hasSize(numReleasables + extraToAdd));
Set<Releasable> toRemove = randomNFromCollection(releasables, extraToAdd);
for (var r : releasables) {
driverContext.addReleasable(r);
if (toRemove.contains(r)) {
driverContext.removeReleasable(r);
r.close();
}
}
assertThat(driverContext.workingSet, hasSize(numReleasables));
driverContext.finish();
return null;
}
}
// Selects a number of random elements, n, from the given Set.
static <T> Set<T> randomNFromCollection(Set<T> input, int n) {
final int size = input.size();
if (n < 0 || n > size) {
throw new IllegalArgumentException(n + " is out of bounds for set of size:" + size);
}
if (n == size) {
return input;
}
Set<T> result = Collections.newSetFromMap(new IdentityHashMap<>());
Set<Integer> selected = new HashSet<>();
while (selected.size() < n) {
int idx = randomValueOtherThanMany(selected::contains, () -> randomInt(size - 1));
selected.add(idx);
result.add(input.stream().skip(idx).findFirst().get());
}
assertThat(result.size(), equalTo(n));
assertTrue(input.containsAll(result));
return result;
}
static Releasable randomReleasable(BigArrays bigArrays) {
return switch (randomInt(3)) {
case 0 -> new NoOpReleasable();
case 1 -> new ReleasablePoint(1, 2);
case 2 -> new CheckableReleasable();
case 3 -> bigArrays.newLongArray(32, false);
default -> throw new AssertionError();
};
}
record ReleasablePoint(int x, int y) implements Releasable {
@Override
public void close() {}
}
static | AssertingDriverContext |
java | google__guice | core/test/com/google/inject/RestrictedBindingSourceTest.java | {
"start": 2395,
"end": 2975
} | class ____ extends AbstractModule {
@Provides
@GatewayIpAdress
int provideIpAddress() {
return 21321566;
}
@Override
protected void configure() {
bind(String.class).annotatedWith(Hostname.class).toInstance("google.com");
}
}
@Test
public void networkLibraryCanProvideItsBindings() {
Guice.createInjector(new NetworkModule());
}
@RestrictedBindingSource(
explanation = USE_ROUTING_MODULE,
permits = {NetworkLibrary.class})
@ImplementedBy(RoutingTableImpl.class) // For testing untargetted bindings.
| NetworkModule |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AEncryptionMethods.java | {
"start": 934,
"end": 1038
} | enum ____ to centralize the encryption methods and
* the value required in the configuration.
*/
public | is |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/InexactVarargsConditionalTest.java | {
"start": 1949,
"end": 2467
} | class ____ {
public static void main(String[] args) {
Object[] a = {1, 2};
Object b = "hello";
f(0, a);
f(0, b);
for (boolean flag : new boolean[] {true, false}) {
f(0, 1, flag ? a : b);
}
}
static void f(int x, Object... xs) {
System.err.println(Arrays.deepToString(xs));
}
}
""")
.doTest();
}
}
| Test |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringRoutingSlipTest.java | {
"start": 1048,
"end": 1307
} | class ____ extends RoutingSlipTest {
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this,
"org/apache/camel/spring/processor/routingSlip.xml");
}
}
| SpringRoutingSlipTest |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authorization/method/MethodAuthorizationDeniedHandler.java | {
"start": 1122,
"end": 2785
} | interface ____ {
/**
* Handle denied method invocations, implementations might either throw an
* {@link org.springframework.security.authorization.AuthorizationDeniedException} or
* a replacement result instead of invoking the method, e.g. a masked value.
* @param methodInvocation the {@link MethodInvocation} related to the authorization
* denied
* @param authorizationResult the authorization denied result
* @return a replacement result for the denied method invocation, or null, or a
* {@link reactor.core.publisher.Mono} for reactive applications
*/
@Nullable Object handleDeniedInvocation(MethodInvocation methodInvocation, AuthorizationResult authorizationResult);
/**
* Handle denied method invocations, implementations might either throw an
* {@link org.springframework.security.authorization.AuthorizationDeniedException} or
* a replacement result instead of invoking the method, e.g. a masked value. By
* default, this method invokes
* {@link #handleDeniedInvocation(MethodInvocation, AuthorizationResult)}.
* @param methodInvocationResult the object containing the {@link MethodInvocation}
* and the result produced
* @param authorizationResult the authorization denied result
* @return a replacement result for the denied method invocation, or null, or a
* {@link reactor.core.publisher.Mono} for reactive applications
*/
default @Nullable Object handleDeniedInvocationResult(MethodInvocationResult methodInvocationResult,
AuthorizationResult authorizationResult) {
return handleDeniedInvocation(methodInvocationResult.getMethodInvocation(), authorizationResult);
}
}
| MethodAuthorizationDeniedHandler |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/join/stream/multijoin/StreamingMultiJoinOperatorTestBase.java | {
"start": 21222,
"end": 22729
} | class ____ extends AbstractRichFunction
implements JoinCondition {
private final int leftKeyFieldIndex;
private final int rightKeyFieldIndex;
public SpecificInputsEquiKeyCondition(int leftKeyFieldIndex, int rightKeyFieldIndex) {
this.leftKeyFieldIndex = leftKeyFieldIndex;
this.rightKeyFieldIndex = rightKeyFieldIndex;
}
@Override
public boolean apply(RowData left, RowData right) {
if (left == null || right == null) {
return false;
}
if (left.isNullAt(leftKeyFieldIndex) || right.isNullAt(rightKeyFieldIndex)) {
return false;
}
String keyLeft = left.getString(leftKeyFieldIndex).toString();
String keyRight = right.getString(rightKeyFieldIndex).toString();
return keyLeft.equals(keyRight);
}
private int calculateActualFieldIndex(int inputIndex, int fieldIndex) {
int actualIndex = fieldIndex;
// Add the arity of all previous inputs
for (int i = 0; i < inputIndex; i++) {
actualIndex += 3; // all our inputs in our test setup have 3 fields
}
return actualIndex;
}
}
/**
* Condition for comparing a Long field from the left input as greater than a Long field from
* the right input. Example: leftInput.field > rightInput.field
*/
protected static | SpecificInputsEquiKeyCondition |
java | apache__avro | lang/java/thrift/src/test/java/org/apache/avro/thrift/test/Test.java | {
"start": 53921,
"end": 65177
} | class ____ extends org.apache.thrift.scheme.StandardScheme<Test> {
public void read(org.apache.thrift.protocol.TProtocol iprot, Test struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // BOOL_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
struct.boolField = iprot.readBool();
struct.setBoolFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // BYTE_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.BYTE) {
struct.byteField = iprot.readByte();
struct.setByteFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 16: // BYTE_OPTIONAL_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.BYTE) {
struct.byteOptionalField = iprot.readByte();
struct.setByteOptionalFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 3: // I16_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.I16) {
struct.i16Field = iprot.readI16();
struct.setI16FieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 15: // I16_OPTIONAL_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.I16) {
struct.i16OptionalField = iprot.readI16();
struct.setI16OptionalFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 4: // I32_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.i32Field = iprot.readI32();
struct.setI32FieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 5: // I64_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.i64Field = iprot.readI64();
struct.setI64FieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 6: // DOUBLE_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
struct.doubleField = iprot.readDouble();
struct.setDoubleFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 7: // STRING_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.stringField = iprot.readString();
struct.setStringFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 8: // BINARY_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.binaryField = iprot.readBinary();
struct.setBinaryFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 9: // MAP_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
{
org.apache.thrift.protocol.TMap _map0 = iprot.readMapBegin();
struct.mapField = new java.util.HashMap<java.lang.String, java.lang.Integer>(2 * _map0.size);
@org.apache.thrift.annotation.Nullable
java.lang.String _key1;
int _val2;
for (int _i3 = 0; _i3 < _map0.size; ++_i3) {
_key1 = iprot.readString();
_val2 = iprot.readI32();
struct.mapField.put(_key1, _val2);
}
iprot.readMapEnd();
}
struct.setMapFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 10: // LIST_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list4 = iprot.readListBegin();
struct.listField = new java.util.ArrayList<java.lang.Integer>(_list4.size);
int _elem5;
for (int _i6 = 0; _i6 < _list4.size; ++_i6) {
_elem5 = iprot.readI32();
struct.listField.add(_elem5);
}
iprot.readListEnd();
}
struct.setListFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 11: // SET_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
{
org.apache.thrift.protocol.TSet _set7 = iprot.readSetBegin();
struct.setField = new java.util.HashSet<java.lang.Integer>(2 * _set7.size);
int _elem8;
for (int _i9 = 0; _i9 < _set7.size; ++_i9) {
_elem8 = iprot.readI32();
struct.setField.add(_elem8);
}
iprot.readSetEnd();
}
struct.setSetFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 12: // ENUM_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.enumField = org.apache.avro.thrift.test.E.findByValue(iprot.readI32());
struct.setEnumFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 13: // STRUCT_FIELD
if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
struct.structField = new Nested();
struct.structField.read(iprot);
struct.setStructFieldIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 14: // FOO_OR_BAR
if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
struct.fooOrBar = new FooOrBar();
struct.fooOrBar.read(iprot);
struct.setFooOrBarIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, Test struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(BOOL_FIELD_FIELD_DESC);
oprot.writeBool(struct.boolField);
oprot.writeFieldEnd();
oprot.writeFieldBegin(BYTE_FIELD_FIELD_DESC);
oprot.writeByte(struct.byteField);
oprot.writeFieldEnd();
oprot.writeFieldBegin(I16_FIELD_FIELD_DESC);
oprot.writeI16(struct.i16Field);
oprot.writeFieldEnd();
if (struct.isSetI32Field()) {
oprot.writeFieldBegin(I32_FIELD_FIELD_DESC);
oprot.writeI32(struct.i32Field);
oprot.writeFieldEnd();
}
oprot.writeFieldBegin(I64_FIELD_FIELD_DESC);
oprot.writeI64(struct.i64Field);
oprot.writeFieldEnd();
oprot.writeFieldBegin(DOUBLE_FIELD_FIELD_DESC);
oprot.writeDouble(struct.doubleField);
oprot.writeFieldEnd();
if (struct.stringField != null) {
oprot.writeFieldBegin(STRING_FIELD_FIELD_DESC);
oprot.writeString(struct.stringField);
oprot.writeFieldEnd();
}
if (struct.binaryField != null) {
if (struct.isSetBinaryField()) {
oprot.writeFieldBegin(BINARY_FIELD_FIELD_DESC);
oprot.writeBinary(struct.binaryField);
oprot.writeFieldEnd();
}
}
if (struct.mapField != null) {
oprot.writeFieldBegin(MAP_FIELD_FIELD_DESC);
{
oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
org.apache.thrift.protocol.TType.I32, struct.mapField.size()));
for (java.util.Map.Entry<java.lang.String, java.lang.Integer> _iter10 : struct.mapField.entrySet()) {
oprot.writeString(_iter10.getKey());
oprot.writeI32(_iter10.getValue());
}
oprot.writeMapEnd();
}
oprot.writeFieldEnd();
}
if (struct.listField != null) {
oprot.writeFieldBegin(LIST_FIELD_FIELD_DESC);
{
oprot.writeListBegin(
new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.listField.size()));
for (int _iter11 : struct.listField) {
oprot.writeI32(_iter11);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
if (struct.setField != null) {
oprot.writeFieldBegin(SET_FIELD_FIELD_DESC);
{
oprot.writeSetBegin(
new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I32, struct.setField.size()));
for (int _iter12 : struct.setField) {
oprot.writeI32(_iter12);
}
oprot.writeSetEnd();
}
oprot.writeFieldEnd();
}
if (struct.enumField != null) {
oprot.writeFieldBegin(ENUM_FIELD_FIELD_DESC);
oprot.writeI32(struct.enumField.getValue());
oprot.writeFieldEnd();
}
if (struct.structField != null) {
oprot.writeFieldBegin(STRUCT_FIELD_FIELD_DESC);
struct.structField.write(oprot);
oprot.writeFieldEnd();
}
if (struct.fooOrBar != null) {
oprot.writeFieldBegin(FOO_OR_BAR_FIELD_DESC);
struct.fooOrBar.write(oprot);
oprot.writeFieldEnd();
}
if (struct.isSetI16OptionalField()) {
oprot.writeFieldBegin(I16_OPTIONAL_FIELD_FIELD_DESC);
oprot.writeI16(struct.i16OptionalField);
oprot.writeFieldEnd();
}
if (struct.isSetByteOptionalField()) {
oprot.writeFieldBegin(BYTE_OPTIONAL_FIELD_FIELD_DESC);
oprot.writeByte(struct.byteOptionalField);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static | TestStandardScheme |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/CustomDeserializersTest.java | {
"start": 8954,
"end": 9516
} | class ____ extends DelegatingDeserializer
{
public MyStringDeserializer(ValueDeserializer<?> newDel) {
super(newDel);
}
@Override
protected ValueDeserializer<?> newDelegatingInstance(ValueDeserializer<?> newDel) {
return new MyStringDeserializer(newDel);
}
@Override
public Object deserialize(JsonParser p, DeserializationContext ctxt)
{
Object ob = _delegatee.deserialize(p, ctxt);
return "MY:"+ob;
}
}
static | MyStringDeserializer |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/node/NodeContext2049Test.java | {
"start": 1301,
"end": 1943
} | class ____ extends ValueInstantiator {
@Override
public String getValueTypeDesc() {
return List.class.getName();
}
@Override
public Object createUsingDefault(DeserializationContext ctxt) throws JacksonException {
return new ArrayList<>();
}
@Override
public ValueInstantiator createContextual(DeserializationContext ctxt,
BeanDescription.Supplier beanDescRef) {
return this;
}
@Override
public Class<?> getValueClass() {
return List.class;
}
}
static | ListValueInstantiator |
java | elastic__elasticsearch | modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilder.java | {
"start": 7094,
"end": 9316
} | class ____ extends ScoreFunction {
private static final ConstructingObjectParser<Sigmoid, Void> PARSER = new ConstructingObjectParser<>(
"sigmoid",
a -> new Sigmoid((Float) a[0], ((Float) a[1]).floatValue())
);
static {
PARSER.declareFloat(ConstructingObjectParser.constructorArg(), new ParseField("pivot"));
PARSER.declareFloat(ConstructingObjectParser.constructorArg(), new ParseField("exponent"));
}
private final float pivot;
private final float exp;
public Sigmoid(float pivot, float exp) {
this.pivot = pivot;
this.exp = exp;
}
private Sigmoid(StreamInput in) throws IOException {
this(in.readFloat(), in.readFloat());
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
Sigmoid that = (Sigmoid) obj;
return pivot == that.pivot && exp == that.exp;
}
@Override
public int hashCode() {
return Objects.hash(pivot, exp);
}
@Override
void writeTo(StreamOutput out) throws IOException {
out.writeByte((byte) 2);
out.writeFloat(pivot);
out.writeFloat(exp);
}
@Override
void doXContent(XContentBuilder builder) throws IOException {
builder.startObject("sigmoid");
builder.field("pivot", pivot);
builder.field("exponent", exp);
builder.endObject();
}
@Override
Query toQuery(String field, String feature, boolean positiveScoreImpact) {
return FeatureField.newSigmoidQuery(field, feature, DEFAULT_BOOST, pivot, exp);
}
}
/**
* A scoring function that scores documents as simply {@code S}
* where S is the indexed value of the static feature.
*/
public static | Sigmoid |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/config/MonitorResource.java | {
"start": 1657,
"end": 3018
} | class ____ implements org.apache.logging.log4j.core.util.Builder<MonitorResource> {
@PluginBuilderAttribute
@Required(message = "No URI provided")
private URI uri;
public Builder setUri(final URI uri) {
this.uri = uri;
return this;
}
@Override
public MonitorResource build() {
return new MonitorResource(uri);
}
}
private MonitorResource(final URI uri) {
this.uri = requireNonNull(uri, "uri");
if (!"file".equals(uri.getScheme())) {
final String message =
String.format("Only `file` scheme is supported in monitor resource URIs! Illegal URI: `%s`", uri);
throw new IllegalArgumentException(message);
}
}
public URI getUri() {
return uri;
}
@Override
public int hashCode() {
return uri.hashCode();
}
@Override
public boolean equals(final Object object) {
if (this == object) {
return true;
}
if (!(object instanceof MonitorResource)) {
return false;
}
final MonitorResource other = (MonitorResource) object;
return this.uri == other.uri;
}
@Override
public String toString() {
return String.format("MonitorResource{%s}", uri);
}
}
| Builder |
java | spring-projects__spring-boot | module/spring-boot-integration/src/main/java/org/springframework/boot/integration/autoconfigure/IntegrationAutoConfiguration.java | {
"start": 16324,
"end": 16499
} | class ____ {
}
@ConditionalOnProperty({ "spring.integration.rsocket.client.host",
"spring.integration.rsocket.client.port" })
static | WebSocketAddressConfigured |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/ChildFirstClassLoader.java | {
"start": 2304,
"end": 4542
} | class ____ go parent-first
for (String alwaysParentFirstPattern : alwaysParentFirstPatterns) {
if (name.startsWith(alwaysParentFirstPattern)) {
return super.loadClassWithoutExceptionHandling(name, resolve);
}
}
try {
// check the URLs
c = findClass(name);
} catch (ClassNotFoundException e) {
// let URLClassLoader do it, which will eventually call the parent
c = super.loadClassWithoutExceptionHandling(name, resolve);
}
} else if (resolve) {
resolveClass(c);
}
return c;
}
@Override
public URL getResource(String name) {
// first, try and find it via the URLClassloader
URL urlClassLoaderResource = findResource(name);
if (urlClassLoaderResource != null) {
return urlClassLoaderResource;
}
// delegate to super
return super.getResource(name);
}
@Override
public Enumeration<URL> getResources(String name) throws IOException {
// first get resources from URLClassloader
Enumeration<URL> urlClassLoaderResources = findResources(name);
final List<URL> result = new ArrayList<>();
while (urlClassLoaderResources.hasMoreElements()) {
result.add(urlClassLoaderResources.nextElement());
}
// get parent urls
Enumeration<URL> parentResources = getParent().getResources(name);
while (parentResources.hasMoreElements()) {
result.add(parentResources.nextElement());
}
return new Enumeration<URL>() {
Iterator<URL> iter = result.iterator();
public boolean hasMoreElements() {
return iter.hasNext();
}
public URL nextElement() {
return iter.next();
}
};
}
static {
ClassLoader.registerAsParallelCapable();
}
@Override
public MutableURLClassLoader copy() {
return new ChildFirstClassLoader(
getURLs(), getParent(), alwaysParentFirstPatterns, classLoadingExceptionHandler);
}
}
| should |
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/docker/ApiVersion.java | {
"start": 1016,
"end": 4070
} | class ____ implements Comparable<ApiVersion> {
private static final Pattern PATTERN = Pattern.compile("^v?(\\d+)\\.(\\d*)$");
private static final Comparator<ApiVersion> COMPARATOR = Comparator.comparing(ApiVersion::getMajor)
.thenComparing(ApiVersion::getMinor);
private final int major;
private final int minor;
private ApiVersion(int major, int minor) {
this.major = major;
this.minor = minor;
}
/**
* Return the major version number.
* @return the major version
*/
int getMajor() {
return this.major;
}
/**
* Return the minor version number.
* @return the minor version
*/
int getMinor() {
return this.minor;
}
/**
* Returns if this API version supports the given version. A {@code 0.x} matches only
* the same version number. A 1.x or higher release matches when the versions have the
* same major version and a minor that is equal or greater.
* @param other the version to check against
* @return if the specified API version is supported
*/
public boolean supports(ApiVersion other) {
if (equals(other)) {
return true;
}
if (this.major == 0 || this.major != other.major) {
return false;
}
return this.minor >= other.minor;
}
/**
* Returns if this API version supports any of the given versions.
* @param others the versions to check against
* @return if any of the specified API versions are supported
* @see #supports(ApiVersion)
*/
public boolean supportsAny(ApiVersion... others) {
for (ApiVersion other : others) {
if (supports(other)) {
return true;
}
}
return false;
}
@Override
public boolean equals(@Nullable Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
ApiVersion other = (ApiVersion) obj;
return (this.major == other.major) && (this.minor == other.minor);
}
@Override
public int hashCode() {
return this.major * 31 + this.minor;
}
@Override
public String toString() {
return this.major + "." + this.minor;
}
/**
* Factory method to parse a string into an {@link ApiVersion} instance.
* @param value the value to parse.
* @return the corresponding {@link ApiVersion}
* @throws IllegalArgumentException if the value could not be parsed
*/
public static ApiVersion parse(String value) {
Assert.hasText(value, "'value' must not be empty");
Matcher matcher = PATTERN.matcher(value);
Assert.isTrue(matcher.matches(),
() -> "'value' [%s] must contain a well formed version number".formatted(value));
try {
int major = Integer.parseInt(matcher.group(1));
int minor = Integer.parseInt(matcher.group(2));
return new ApiVersion(major, minor);
}
catch (NumberFormatException ex) {
throw new IllegalArgumentException("'value' must contain a well formed version number [" + value + "]", ex);
}
}
public static ApiVersion of(int major, int minor) {
return new ApiVersion(major, minor);
}
@Override
public int compareTo(ApiVersion other) {
return COMPARATOR.compare(this, other);
}
}
| ApiVersion |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/json/JobVertexIDDeserializer.java | {
"start": 1278,
"end": 1688
} | class ____ extends StdDeserializer<JobVertexID> {
private static final long serialVersionUID = 3051901462549718924L;
protected JobVertexIDDeserializer() {
super(JobVertexID.class);
}
@Override
public JobVertexID deserialize(JsonParser p, DeserializationContext ctxt) throws IOException {
return JobVertexID.fromHexString(p.getValueAsString());
}
}
| JobVertexIDDeserializer |
java | apache__kafka | core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java | {
"start": 1503,
"end": 5421
} | class ____ {
private KafkaConfig config = null;
private Metrics metrics = null;
private Time time = Time.SYSTEM;
private Scheduler scheduler = null;
private LogManager logManager = null;
private QuotaManagers quotaManagers = null;
private MetadataCache metadataCache = null;
private LogDirFailureChannel logDirFailureChannel = null;
private AlterPartitionManager alterPartitionManager = null;
private BrokerTopicStats brokerTopicStats = null;
public ReplicaManagerBuilder setConfig(KafkaConfig config) {
this.config = config;
return this;
}
public ReplicaManagerBuilder setMetrics(Metrics metrics) {
this.metrics = metrics;
return this;
}
public ReplicaManagerBuilder setTime(Time time) {
this.time = time;
return this;
}
public ReplicaManagerBuilder setScheduler(Scheduler scheduler) {
this.scheduler = scheduler;
return this;
}
public ReplicaManagerBuilder setLogManager(LogManager logManager) {
this.logManager = logManager;
return this;
}
public ReplicaManagerBuilder setQuotaManagers(QuotaManagers quotaManagers) {
this.quotaManagers = quotaManagers;
return this;
}
public ReplicaManagerBuilder setMetadataCache(MetadataCache metadataCache) {
this.metadataCache = metadataCache;
return this;
}
public ReplicaManagerBuilder setLogDirFailureChannel(LogDirFailureChannel logDirFailureChannel) {
this.logDirFailureChannel = logDirFailureChannel;
return this;
}
public ReplicaManagerBuilder setAlterPartitionManager(AlterPartitionManager alterPartitionManager) {
this.alterPartitionManager = alterPartitionManager;
return this;
}
public ReplicaManagerBuilder setBrokerTopicStats(BrokerTopicStats brokerTopicStats) {
this.brokerTopicStats = brokerTopicStats;
return this;
}
public ReplicaManager build() {
if (config == null) config = new KafkaConfig(Map.of());
if (logManager == null) throw new RuntimeException("You must set logManager");
if (metadataCache == null) throw new RuntimeException("You must set metadataCache");
if (logDirFailureChannel == null) throw new RuntimeException("You must set logDirFailureChannel");
if (alterPartitionManager == null) throw new RuntimeException("You must set alterIsrManager");
if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().isRemoteStorageSystemEnabled());
// Initialize metrics in the end just before passing it to ReplicaManager to ensure ReplicaManager closes the
// metrics correctly. There might be a resource leak if it is initialized and an exception occurs between
// its initialization and creation of ReplicaManager.
if (metrics == null) metrics = new Metrics();
return new ReplicaManager(config,
metrics,
time,
scheduler,
logManager,
Option.empty(),
quotaManagers,
metadataCache,
logDirFailureChannel,
alterPartitionManager,
brokerTopicStats,
Option.empty(),
Option.empty(),
Option.empty(),
Option.empty(),
Option.empty(),
Option.empty(),
() -> -1L,
Option.empty(),
DirectoryEventHandler.NOOP,
new DelayedActionQueue());
}
}
| ReplicaManagerBuilder |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/TypeConverterDelegate.java | {
"start": 1556,
"end": 1933
} | class ____ converting property values to target types.
*
* <p>Works on a given {@link PropertyEditorRegistrySupport} instance.
* Used as a delegate by {@link BeanWrapperImpl} and {@link SimpleTypeConverter}.
*
* @author Juergen Hoeller
* @author Rob Harrop
* @author Dave Syer
* @author Yanming Zhou
* @since 2.0
* @see BeanWrapperImpl
* @see SimpleTypeConverter
*/
| for |
java | apache__camel | components/camel-huawei/camel-huaweicloud-dms/src/main/java/org/apache/camel/component/huaweicloud/dms/DmsMeta.java | {
"start": 2066,
"end": 7352
} | class ____ {
public static final HttpRequestDef<CreateInstanceRequest, CreateInstanceResponse> CREATE_INSTANCE = genForcreateInstance();
public static final HttpRequestDef<DeleteInstanceRequest, DeleteInstanceResponse> DELETE_INSTANCE = genFordeleteInstance();
public static final HttpRequestDef<ListInstancesRequest, ListInstancesResponse> LIST_INSTANCES = genForlistInstances();
public static final HttpRequestDef<QueryInstanceRequest, DmsInstance> QUERY_INSTANCE = genForqueryInstance();
public static final HttpRequestDef<UpdateInstanceRequest, UpdateInstanceResponse> UPDATE_INSTANCE = genForupdateInstsance();
public static final String JSON_CONTENT_TYPE = "application/json";
public static final String INSTANCE_ID = "instance_id";
private DmsMeta() {
}
private static HttpRequestDef<CreateInstanceRequest, CreateInstanceResponse> genForcreateInstance() {
// basic
HttpRequestDef.Builder<CreateInstanceRequest, CreateInstanceResponse> builder
= HttpRequestDef.builder(HttpMethod.POST, CreateInstanceRequest.class, CreateInstanceResponse.class)
.withName("CreateInstanceKafka")
.withUri("/v1.0/{project_id}/instances")
.withContentType(JSON_CONTENT_TYPE);
// requests
builder.withRequestField("body",
LocationType.Body,
FieldExistence.NON_NULL_NON_EMPTY,
CreateInstanceRequestBody.class,
f -> f.withMarshaller(CreateInstanceRequest::getBody, CreateInstanceRequest::setBody));
return builder.build();
}
private static HttpRequestDef<DeleteInstanceRequest, DeleteInstanceResponse> genFordeleteInstance() {
// basic
HttpRequestDef.Builder<DeleteInstanceRequest, DeleteInstanceResponse> builder
= HttpRequestDef.builder(HttpMethod.DELETE, DeleteInstanceRequest.class, DeleteInstanceResponse.class)
.withName("DeleteInstance")
.withUri("/v1.0/{project_id}/instances/{instance_id}")
.withContentType(JSON_CONTENT_TYPE);
// requests
builder.withRequestField(INSTANCE_ID,
LocationType.Path,
FieldExistence.NON_NULL_NON_EMPTY,
String.class,
f -> f.withMarshaller(DeleteInstanceRequest::getInstanceId, DeleteInstanceRequest::setInstanceId));
return builder.build();
}
private static HttpRequestDef<ListInstancesRequest, ListInstancesResponse> genForlistInstances() {
// basic
HttpRequestDef.Builder<ListInstancesRequest, ListInstancesResponse> builder
= HttpRequestDef.builder(HttpMethod.GET, ListInstancesRequest.class, ListInstancesResponse.class)
.withName("ListInstances")
.withUri("/v1.0/{project_id}/instances")
.withContentType(JSON_CONTENT_TYPE);
// requests
builder.withRequestField("engine",
LocationType.Query,
FieldExistence.NULL_IGNORE,
String.class,
f -> f.withMarshaller(ListInstancesRequest::getEngine, ListInstancesRequest::setEngine));
return builder.build();
}
private static HttpRequestDef<QueryInstanceRequest, DmsInstance> genForqueryInstance() {
// basic
HttpRequestDef.Builder<QueryInstanceRequest, DmsInstance> builder
= HttpRequestDef.builder(HttpMethod.GET, QueryInstanceRequest.class, DmsInstance.class)
.withName("QueryInstance")
.withUri("/v1.0/{project_id}/instances/{instance_id}")
.withContentType(JSON_CONTENT_TYPE);
// requests
builder.withRequestField(INSTANCE_ID,
LocationType.Path,
FieldExistence.NON_NULL_NON_EMPTY,
String.class,
f -> f.withMarshaller(QueryInstanceRequest::getInstanceId, QueryInstanceRequest::setInstanceId));
return builder.build();
}
private static HttpRequestDef<UpdateInstanceRequest, UpdateInstanceResponse> genForupdateInstsance() {
// basic
HttpRequestDef.Builder<UpdateInstanceRequest, UpdateInstanceResponse> builder
= HttpRequestDef.builder(HttpMethod.PUT, UpdateInstanceRequest.class, UpdateInstanceResponse.class)
.withName("UpdateInstance")
.withUri("/v1.0/{project_id}/instances/{instance_id}")
.withContentType(JSON_CONTENT_TYPE);
// requests
builder.withRequestField(INSTANCE_ID,
LocationType.Path,
FieldExistence.NON_NULL_NON_EMPTY,
String.class,
f -> f.withMarshaller(UpdateInstanceRequest::getInstanceId, UpdateInstanceRequest::setInstanceId));
builder.withRequestField("body",
LocationType.Body,
FieldExistence.NON_NULL_NON_EMPTY,
UpdateInstanceRequestBody.class,
f -> f.withMarshaller(UpdateInstanceRequest::getBody, UpdateInstanceRequest::setBody));
return builder.build();
}
}
| DmsMeta |
java | elastic__elasticsearch | modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java | {
"start": 19673,
"end": 23923
} | interface ____ {
ReturnsPrimitiveInt newInstance();
}
public static final ScriptContext<Factory> CONTEXT = new ScriptContext<>("returnsprimitiveint", Factory.class);
public static final String[] PARAMETERS = new String[] {};
public abstract int execute();
}
public void testReturnsPrimitiveInt() throws Exception {
assertEquals(
1,
scriptEngine.compile("testReturnsPrimitiveInt0", "1", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()
);
assertEquals(
1,
scriptEngine.compile("testReturnsPrimitiveInt1", "(int) 1L", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()
);
assertEquals(
1,
scriptEngine.compile("testReturnsPrimitiveInt2", "(int) 1.1d", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()
);
assertEquals(
1,
scriptEngine.compile("testReturnsPrimitiveInt3", "(int) 1.1f", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()
);
assertEquals(
1,
scriptEngine.compile("testReturnsPrimitiveInt4", "Integer.valueOf(1)", ReturnsPrimitiveInt.CONTEXT, emptyMap())
.newInstance()
.execute()
);
assertEquals(
1,
scriptEngine.compile("testReturnsPrimitiveInt5", "def i = 1; i", ReturnsPrimitiveInt.CONTEXT, emptyMap())
.newInstance()
.execute()
);
assertEquals(
1,
scriptEngine.compile("testReturnsPrimitiveInt6", "def i = Integer.valueOf(1); i", ReturnsPrimitiveInt.CONTEXT, emptyMap())
.newInstance()
.execute()
);
assertEquals(
2,
scriptEngine.compile("testReturnsPrimitiveInt7", "1 + 1", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()
);
String debug = Debugger.toString(ReturnsPrimitiveInt.class, "1", new CompilerSettings(), PAINLESS_BASE_WHITELIST);
assertThat(debug, containsString("ICONST_1"));
// The important thing here is that we have the bytecode for returning an integer instead of an object
assertThat(debug, containsString("IRETURN"));
Exception e = expectScriptThrows(
ClassCastException.class,
() -> scriptEngine.compile("testReturnsPrimitiveInt8", "1L", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()
);
assertEquals("Cannot cast from [long] to [int].", e.getMessage());
e = expectScriptThrows(
ClassCastException.class,
() -> scriptEngine.compile("testReturnsPrimitiveInt9", "1.1f", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()
);
assertEquals("Cannot cast from [float] to [int].", e.getMessage());
e = expectScriptThrows(
ClassCastException.class,
() -> scriptEngine.compile("testReturnsPrimitiveInt10", "1.1d", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()
);
assertEquals("Cannot cast from [double] to [int].", e.getMessage());
expectScriptThrows(
ClassCastException.class,
() -> scriptEngine.compile("testReturnsPrimitiveInt11", "def i = 1L; i", ReturnsPrimitiveInt.CONTEXT, emptyMap())
.newInstance()
.execute()
);
expectScriptThrows(
ClassCastException.class,
() -> scriptEngine.compile("testReturnsPrimitiveInt12", "def i = 1.1f; i", ReturnsPrimitiveInt.CONTEXT, emptyMap())
.newInstance()
.execute()
);
expectScriptThrows(
ClassCastException.class,
() -> scriptEngine.compile("testReturnsPrimitiveInt13", "def i = 1.1d; i", ReturnsPrimitiveInt.CONTEXT, emptyMap())
.newInstance()
.execute()
);
assertEquals(
0,
scriptEngine.compile("testReturnsPrimitiveInt14", "int i = 0", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()
);
}
public abstract static | Factory |
java | quarkusio__quarkus | test-framework/common/src/main/java/io/quarkus/test/common/TestStatus.java | {
"start": 40,
"end": 603
} | class ____ {
private final Throwable testErrorCause;
public TestStatus(Throwable testErrorCause) {
this.testErrorCause = testErrorCause;
}
/**
* @return the error cause that was thrown during either `BeforeAll`, `BeforeEach`, test method, `AfterAll` or
* `AfterEach` phases.
*/
public Throwable getTestErrorCause() {
return testErrorCause;
}
/**
* @return whether the test has failed.
*/
public boolean isTestFailed() {
return getTestErrorCause() != null;
}
}
| TestStatus |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateVersionIncompatibleException.java | {
"start": 1059,
"end": 1472
} | class ____ extends YarnException {
private static final long serialVersionUID = 1364408L;
public RMStateVersionIncompatibleException(Throwable cause) {
super(cause);
}
public RMStateVersionIncompatibleException(String message) {
super(message);
}
public RMStateVersionIncompatibleException(String message, Throwable cause) {
super(message, cause);
}
}
| RMStateVersionIncompatibleException |
java | quarkusio__quarkus | integration-tests/injectmock/src/test/java/io/quarkus/it/mockbean/FooBarObserver.java | {
"start": 225,
"end": 502
} | class ____ {
@Inject
Event<Foo.Bar> event;
Foo.Bar fireBar() {
Foo.Bar bar = new Foo.Bar(new ArrayList<>());
event.fire(bar);
return bar;
}
void onBar(@Observes Foo.Bar bar) {
bar.getNames().add("baz");
}
}
| FooBarObserver |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/struct/FormatFeatureUnwrapSingleTest.java | {
"start": 1783,
"end": 1951
} | class ____ {
@JsonFormat(with={ JsonFormat.Feature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED })
public boolean[] v = { true };
}
static | UnwrapBooleanArray |
java | grpc__grpc-java | okhttp/src/main/java/io/grpc/okhttp/PlaintextHandshakerSocketFactory.java | {
"start": 847,
"end": 1376
} | class ____ implements HandshakerSocketFactory {
@Override
public HandshakeResult handshake(Socket socket, Attributes attributes) throws IOException {
attributes = attributes.toBuilder()
.set(Grpc.TRANSPORT_ATTR_LOCAL_ADDR, socket.getLocalSocketAddress())
.set(Grpc.TRANSPORT_ATTR_REMOTE_ADDR, socket.getRemoteSocketAddress())
.set(GrpcAttributes.ATTR_SECURITY_LEVEL, SecurityLevel.NONE)
.build();
return new HandshakeResult(socket, attributes, null);
}
}
| PlaintextHandshakerSocketFactory |
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/ClassOrderer.java | {
"start": 5392,
"end": 6550
} | class ____ implements ClassOrderer {
public DisplayName() {
}
/**
* Sort the classes encapsulated in the supplied
* {@link ClassOrdererContext} alphanumerically based on their display
* names.
*/
@Override
public void orderClasses(ClassOrdererContext context) {
context.getClassDescriptors().sort(comparator);
}
private static final Comparator<ClassDescriptor> comparator = Comparator.comparing(
ClassDescriptor::getDisplayName);
}
/**
* {@code ClassOrderer} that sorts classes based on the {@link Order @Order}
* annotation.
*
* <p>Any classes that are assigned the same order value will be sorted
* arbitrarily adjacent to each other.
*
* <p>Any classes not annotated with {@code @Order} will be assigned the
* {@linkplain Order#DEFAULT default order} value which will effectively cause them
* to appear at the end of the sorted list, unless certain classes are assigned
* an explicit order value greater than the default order value. Any classes
* assigned an explicit order value greater than the default order value will
* appear after non-annotated classes in the sorted list.
*/
| DisplayName |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/map/MapAssert_raw_map_assertions_chained_after_base_assertions_Test.java | {
"start": 968,
"end": 3222
} | class ____ {
@SuppressWarnings({ "unchecked", "rawtypes" })
@Disabled
@Test
void raw_map_mixing_assertions_from_AbstractAssert_and_AbstractMapAssert() {
Description description = emptyDescription();
Map map1 = new java.util.HashMap();
map1.put("Key1", "Value1");
map1.put("Key2", "Value2");
// try all base assertions followed by map specific ones using generics
assertThat(map1).as("desc")
.containsOnlyKeys("Key1", "Key2")
.as(description)
.containsOnlyKeys("Key1", "Key2")
.describedAs(description)
.describedAs("describedAs")
.has(null)
.hasSameClassAs(map1)
.hasToString(map1.toString())
.is(null)
.isEqualTo(map1)
.isExactlyInstanceOf(Map.class)
.isIn(new ArrayList<>())
.isIn(Map.class)
.isInstanceOf(Map.class)
.isInstanceOfAny(Map.class, String.class)
.isNot(null)
.isNotEqualTo(null)
.isNotEmpty()
.isNotExactlyInstanceOf(String.class)
.isNotIn(new ArrayList<>())
.isNotIn(Map.class)
.isNotInstanceOf(Map.class)
.isNotInstanceOfAny(Map.class, String.class)
.isNotNull()
.isNotOfAnyClassIn(Map.class, String.class)
.isNotSameAs(null)
.isOfAnyClassIn(Map.class, String.class)
.isSameAs("")
.overridingErrorMessage("")
.withFailMessage("")
.withThreadDumpOnError()
.containsOnlyKeys("Key1", "Key2");
}
// https://github.com/assertj/assertj/issues/485
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
void test_bug_485() {
Map map1 = new java.util.HashMap<>();
map1.put("Key1", "Value1");
map1.put("Key2", "Value2");
assertThat(map1).as("").containsOnlyKeys("Key1", "Key2");
}
}
| MapAssert_raw_map_assertions_chained_after_base_assertions_Test |
java | apache__flink | flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesResourceManagerDriverConfiguration.java | {
"start": 1007,
"end": 1528
} | class ____ {
private final String clusterId;
@Nullable private final String webInterfaceUrl;
public KubernetesResourceManagerDriverConfiguration(
String clusterId, @Nullable String webInterfaceUrl) {
this.clusterId = clusterId;
this.webInterfaceUrl = webInterfaceUrl;
}
public String getClusterId() {
return clusterId;
}
@Nullable
public String getWebInterfaceUrl() {
return webInterfaceUrl;
}
}
| KubernetesResourceManagerDriverConfiguration |
java | spring-projects__spring-boot | module/spring-boot-jdbc/src/test/java/org/springframework/boot/jdbc/DataSourceBuilderTests.java | {
"start": 24163,
"end": 24712
} | class ____ extends URLClassLoader {
private final String[] hiddenPackages;
HidePackagesClassLoader(String... hiddenPackages) {
super(new URL[0], HidePackagesClassLoader.class.getClassLoader());
this.hiddenPackages = hiddenPackages;
}
@Override
protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
if (Arrays.stream(this.hiddenPackages).anyMatch(name::startsWith)) {
throw new ClassNotFoundException();
}
return super.loadClass(name, resolve);
}
}
static | HidePackagesClassLoader |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/event/ApplicationContextEventTests.java | {
"start": 27629,
"end": 28058
} | class ____ implements ApplicationEventPublisherAware, InitializingBean {
private ApplicationEventPublisher publisher;
@Override
public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) {
this.publisher = applicationEventPublisher;
}
@Override
public void afterPropertiesSet() {
this.publisher.publishEvent(new MyEvent(this));
}
}
public static | EventPublishingInitMethod |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/input/none/NoneInput.java | {
"start": 619,
"end": 1894
} | class ____ implements Input {
public static final String TYPE = "none";
public static final NoneInput INSTANCE = new NoneInput();
private NoneInput() {}
@Override
public String type() {
return TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.startObject().endObject();
}
public static NoneInput parse(String watchId, XContentParser parser) throws IOException {
if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
String formattedMessage = "could not parse [{}] input for watch [{}]. expected an empty object but found [{}] instead";
throw new ElasticsearchParseException(formattedMessage, TYPE, watchId, parser.currentToken());
}
if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
String formattedMessage = "could not parse [{}] input for watch [{}]. expected an empty object but found [{}] instead";
throw new ElasticsearchParseException(formattedMessage, TYPE, watchId, parser.currentToken());
}
return INSTANCE;
}
public static Builder builder() {
return Builder.INSTANCE;
}
public static | NoneInput |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WrappedLogMetaRequest.java | {
"start": 1530,
"end": 1699
} | class ____ the request to a {@link ContainerLogsRequest}
* and calls #readAggregatedLogsMeta on the
* {@link LogAggregationFileController}.
* class.
*/
public | translates |
java | apache__maven | impl/maven-executor/src/main/java/org/apache/maven/api/cli/ExecutorRequest.java | {
"start": 1654,
"end": 6547
} | interface ____ {
/**
* The Maven command.
*/
String MVN = "mvn";
/**
* The command to execute, ie "mvn".
*/
@Nonnull
String command();
/**
* The immutable list of arguments to pass to the command.
*/
@Nonnull
List<String> arguments();
/**
* Returns the current working directory for the Maven execution.
* This is typically the directory from which Maven was invoked.
*
* @return the current working directory path
*/
@Nonnull
Path cwd();
/**
* Returns the Maven installation directory.
* This is usually set by the Maven launcher script using the "maven.home" system property.
*
* @return the Maven installation directory path
*/
@Nonnull
Path installationDirectory();
/**
* Returns the user's home directory.
* This is typically obtained from the "user.home" system property.
*
* @return the user's home directory path
*/
@Nonnull
Path userHomeDirectory();
/**
* Returns the map of Java System Properties to set before executing process.
*
* @return an Optional containing the map of Java System Properties, or empty if not specified
*/
@Nonnull
Optional<Map<String, String>> jvmSystemProperties();
/**
* Returns the map of environment variables to set before executing process.
* This property is used ONLY by executors that spawn a new JVM.
*
* @return an Optional containing the map of environment variables, or empty if not specified
*/
@Nonnull
Optional<Map<String, String>> environmentVariables();
/**
* Returns the list of extra JVM arguments to be passed to the forked process.
* These arguments allow for customization of the JVM environment in which tool will run.
* This property is used ONLY by executors that spawn a new JVM.
*
* @return an Optional containing the list of extra JVM arguments, or empty if not specified
*/
@Nonnull
Optional<List<String>> jvmArguments();
/**
* Optional provider for STD in of the Maven. If given, this provider will be piped into std input of
* Maven.
*
* @return an Optional containing the stdin provider, or empty if not specified.
*/
Optional<InputStream> stdIn();
/**
* Optional consumer for STD out of the Maven. If given, this consumer will get all output from the std out of
* Maven. Note: whether consumer gets to consume anything depends on invocation arguments passed in
* {@link #arguments()}, as if log file is set, not much will go to stdout.
*
* @return an Optional containing the stdout consumer, or empty if not specified.
*/
Optional<OutputStream> stdOut();
/**
* Optional consumer for STD err of the Maven. If given, this consumer will get all output from the std err of
* Maven. Note: whether consumer gets to consume anything depends on invocation arguments passed in
* {@link #arguments()}, as if log file is set, not much will go to stderr.
*
* @return an Optional containing the stderr consumer, or empty if not specified.
*/
Optional<OutputStream> stdErr();
/**
* Indicate if {@code ~/.mavenrc} should be skipped during execution.
* <p>
* Affected only for forked executor by adding MAVEN_SKIP_RC environment variable
*/
boolean skipMavenRc();
/**
* Returns {@link Builder} created from this instance.
*/
@Nonnull
default Builder toBuilder() {
return new Builder(
command(),
arguments(),
cwd(),
installationDirectory(),
userHomeDirectory(),
jvmSystemProperties().orElse(null),
environmentVariables().orElse(null),
jvmArguments().orElse(null),
stdIn().orElse(null),
stdOut().orElse(null),
stdErr().orElse(null),
skipMavenRc());
}
/**
* Returns new builder pre-set to run Maven. The discovery of maven home is attempted, user cwd and home are
* also discovered by standard means.
*/
@Nonnull
static Builder mavenBuilder(@Nullable Path installationDirectory) {
return new Builder(
MVN,
null,
getCanonicalPath(Paths.get(System.getProperty("user.dir"))),
installationDirectory != null
? getCanonicalPath(installationDirectory)
: discoverInstallationDirectory(),
getCanonicalPath(Paths.get(System.getProperty("user.home"))),
null,
null,
null,
null,
null,
null,
false);
}
| ExecutorRequest |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-api/src/test/java/org/apache/dubbo/rpc/filter/ContextFilterTest.java | {
"start": 1558,
"end": 3095
} | class ____ {
Filter contextFilter = new ContextFilter(ApplicationModel.defaultModel());
Invoker<DemoService> invoker;
Invocation invocation;
@SuppressWarnings("unchecked")
@Test
void testSetContext() {
invocation = mock(Invocation.class);
given(invocation.getMethodName()).willReturn("$enumlength");
given(invocation.getParameterTypes()).willReturn(new Class<?>[] {Enum.class});
given(invocation.getArguments()).willReturn(new Object[] {"hello"});
given(invocation.getObjectAttachments()).willReturn(null);
invoker = mock(Invoker.class);
given(invoker.isAvailable()).willReturn(true);
given(invoker.getInterface()).willReturn(DemoService.class);
AppResponse result = new AppResponse();
result.setValue("High");
given(invoker.invoke(invocation)).willReturn(result);
URL url = URL.valueOf("test://test:11/test?group=dubbo&version=1.1");
given(invoker.getUrl()).willReturn(url);
contextFilter.invoke(invoker, invocation);
assertNotNull(RpcContext.getServiceContext().getInvoker());
}
@Test
void testWithAttachments() {
URL url = URL.valueOf("test://test:11/test?group=dubbo&version=1.1");
Invoker<DemoService> invoker = new MyInvoker<DemoService>(url);
Invocation invocation = new MockInvocation();
Result result = contextFilter.invoke(invoker, invocation);
assertNotNull(RpcContext.getServiceContext().getInvoker());
}
}
| ContextFilterTest |
java | apache__camel | components/camel-cxf/camel-cxf-soap/src/test/java/org/apache/camel/component/cxf/jaxws/CxfSchemaValidationTest.java | {
"start": 1897,
"end": 8148
} | class ____ extends CamelTestSupport {
protected static final String PORT_NAME_PROP = "portName={http://camel.apache.org/wsdl-first}soap";
protected static final String SERVICE_NAME = "{http://camel.apache.org/wsdl-first}PersonService";
protected static final String SERVICE_NAME_PROP = "serviceName=" + SERVICE_NAME;
protected static final String WSDL_URL_PROP = "wsdlURL=classpath:person.wsdl";
protected final String serviceAddressValidationEnabled = "http://localhost:" + CXFTestSupport.getPort1()
+ "/" + getClass().getSimpleName() + "/PersonService";
protected final String serviceAddressValidationDisabled = "http://localhost:" + CXFTestSupport.getPort2()
+ "/" + getClass().getSimpleName() + "/PersonService";
protected final String cxfServerUriValidationEnabled = "cxf://" + serviceAddressValidationEnabled + "?"
+ PORT_NAME_PROP + "&" + SERVICE_NAME_PROP + "&" + WSDL_URL_PROP
+ "&dataFormat=payload&schemaValidationEnabled=true";
protected final String cxfServerUriValidationDisabled
= "cxf://" + serviceAddressValidationDisabled + "?" + PORT_NAME_PROP + "&"
+ SERVICE_NAME_PROP + "&" + WSDL_URL_PROP + "&dataFormat=payload";
protected final String cxfProducerUriValidationEnabled
= "cxf://" + serviceAddressValidationDisabled + "?" + PORT_NAME_PROP + "&"
+ SERVICE_NAME_PROP + "&" + WSDL_URL_PROP + "&dataFormat=payload&schemaValidationEnabled=true";
protected final String cxfProducerUriValidationDisabled
= "cxf://" + serviceAddressValidationDisabled + "?" + PORT_NAME_PROP + "&"
+ SERVICE_NAME_PROP + "&" + WSDL_URL_PROP + "&dataFormat=payload";
protected final String clientUriValidationEnabled = "direct:validationEnabled";
protected final String clientUriValidationDisabled = "direct:validationDisabled";
protected final String notValidRequest = "<GetPerson xmlns='http://camel.apache.org/wsdl-first/types'>"
//Max Length: 30,
+ "<personId>4yLKOBllJjx4SCXRMXoNiOFEzQfCNA8BSBsyPUaQ</personId>"
+ "</GetPerson>";
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from(cxfServerUriValidationEnabled).to("direct:result");
from(cxfServerUriValidationDisabled).to("direct:result");
from(clientUriValidationEnabled).to(cxfProducerUriValidationEnabled);
from(clientUriValidationDisabled).to(cxfProducerUriValidationDisabled);
from("direct:result")
.process(exchange -> {
String xml = "<GetPersonResponse xmlns=\"http://camel.apache.org/wsdl-first/types\">"
+ "<personId>123</personId><ssn>456</ssn><name>Donald Duck</name>"
+ "</GetPersonResponse>";
exchange.getMessage().setBody(xml);
});
}
};
}
@Test
public void schemaValidationDisabledServerTest() throws Exception {
// invoke the service with a non-valid message
try {
invokeService(serviceAddressValidationDisabled, RandomStringUtils.random(40, true, true));
} catch (SOAPFaultException e) {
fail("Do not expect an exception here");
}
}
@Test
public void schemaValidationEnabledServerTest() throws Exception {
//first, invoke service with valid message. No exception should be thrown
invokeService(serviceAddressValidationEnabled, RandomStringUtils.random(10, true, true));
// then invoke the service with a non-valid message
/*
Generate a personId string that should cause a validation error:
<simpleType name="MyStringType">
<restriction base="string">
<maxLength value="30" />
</restriction>
</simpleType>
......
<xsd:element name="personId" type="tns:MyStringType"/>
*/
try {
invokeService(serviceAddressValidationEnabled, RandomStringUtils.random(40, true, true));
fail("expect a Validation exception here");
} catch (SOAPFaultException e) {
assertEquals("the length of the value is 40, but the required maximum is 30.", e.getMessage(), "");
}
}
@Test
public void schemaValidationEnabledClientTest() {
Exchange ex = template.send(clientUriValidationEnabled, exchange -> {
exchange.getMessage().setBody(notValidRequest);
});
assertNotNull(ex.getException());
assertTrue(ex.getException().getMessage().contains("cvc-maxLength-valid"));
}
@Test
public void schemaValidationDisabledClientTest() {
Exchange ex = template.send(clientUriValidationDisabled, exchange -> {
exchange.getMessage().setBody(notValidRequest);
});
assertNull(ex.getException());
}
private void invokeService(String address, String personIdParam) throws Exception {
URL wsdlURL = getClass().getClassLoader().getResource("person.wsdl");
PersonService ss = new PersonService(wsdlURL, QName.valueOf(SERVICE_NAME));
Person client = ss.getSoap();
Client c = ClientProxy.getClient(client);
c.getInInterceptors().add(new LoggingInInterceptor());
c.getOutInterceptors().add(new LoggingOutInterceptor());
((BindingProvider) client).getRequestContext()
.put(BindingProvider.ENDPOINT_ADDRESS_PROPERTY, address);
Holder<String> personId = new Holder<>();
personId.value = personIdParam;
Holder<String> ssn = new Holder<>();
Holder<String> name = new Holder<>();
client.getPerson(personId, ssn, name);
}
}
| CxfSchemaValidationTest |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/dynamic/support/ParametrizedTypeInformationUnitTests.java | {
"start": 4340,
"end": 4398
} | interface ____ extends List<Integer> {
}
}
| ListOfInteger |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/ext/beans/JavaBeansAnnotationsImpl.java | {
"start": 341,
"end": 2065
} | class ____ extends JavaBeansAnnotations
{
public final static JavaBeansAnnotationsImpl instance = new JavaBeansAnnotationsImpl();
@SuppressWarnings("unused") // compiler warns, just needed side-effects
private final Class<?> _bogus;
public JavaBeansAnnotationsImpl() {
// Trigger loading of annotations that only JDK 7 has, to trigger
// early fail (see [databind#2466])
Class<?> cls = Transient.class;
cls = ConstructorProperties.class;
_bogus = cls;
}
@Override
public Boolean findTransient(Annotated a) {
Transient t = a.getAnnotation(Transient.class);
if (t != null) {
return t.value();
}
return null;
}
@Override
public Boolean hasCreatorAnnotation(Annotated a) {
ConstructorProperties props = a.getAnnotation(ConstructorProperties.class);
// 08-Nov-2015, tatu: One possible check would be to ensure there is at least
// one name iff constructor has arguments. But seems unnecessary for now.
if (props != null) {
return Boolean.TRUE;
}
return null;
}
@Override
public PropertyName findConstructorName(AnnotatedParameter p)
{
AnnotatedWithParams ctor = p.getOwner();
if (ctor != null) {
ConstructorProperties props = ctor.getAnnotation(ConstructorProperties.class);
if (props != null) {
String[] names = props.value();
int ix = p.getIndex();
if (ix < names.length) {
return PropertyName.construct(names[ix]);
}
}
}
return null;
}
}
| JavaBeansAnnotationsImpl |
java | apache__camel | components/camel-smpp/src/generated/java/org/apache/camel/component/smpp/SmppEndpointUriFactory.java | {
"start": 514,
"end": 4096
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":host:port";
private static final String[] SCHEMES = new String[]{"smpp", "smpps"};
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(45);
props.add("addressRange");
props.add("alphabet");
props.add("bridgeErrorHandler");
props.add("dataCoding");
props.add("destAddr");
props.add("destAddrNpi");
props.add("destAddrTon");
props.add("encoding");
props.add("enquireLinkTimer");
props.add("exceptionHandler");
props.add("exchangePattern");
props.add("host");
props.add("httpProxyHost");
props.add("httpProxyPassword");
props.add("httpProxyPort");
props.add("httpProxyUsername");
props.add("initialReconnectDelay");
props.add("interfaceVersion");
props.add("lazySessionCreation");
props.add("lazyStartProducer");
props.add("maxReconnect");
props.add("messageReceiverRouteId");
props.add("numberingPlanIndicator");
props.add("password");
props.add("pduProcessorDegree");
props.add("pduProcessorQueueCapacity");
props.add("port");
props.add("priorityFlag");
props.add("protocolId");
props.add("proxyHeaders");
props.add("reconnectDelay");
props.add("registeredDelivery");
props.add("replaceIfPresentFlag");
props.add("serviceType");
props.add("sessionStateListener");
props.add("singleDLR");
props.add("sourceAddr");
props.add("sourceAddrNpi");
props.add("sourceAddrTon");
props.add("splittingPolicy");
props.add("systemId");
props.add("systemType");
props.add("transactionTimer");
props.add("typeOfNumber");
props.add("usingSSL");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(2);
secretProps.add("password");
secretProps.add("systemId");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
for (String s : SCHEMES) {
if (s.equals(scheme)) {
return true;
}
}
return false;
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "host", "localhost", false, copy);
uri = buildPathParameter(syntax, uri, "port", 2775, false, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return true;
}
}
| SmppEndpointUriFactory |
java | apache__camel | components/camel-jsonpath/src/main/java/org/apache/camel/jsonpath/JsonPathAdapter.java | {
"start": 942,
"end": 1741
} | interface ____ {
/**
* Initializes the adapter
*
* @param camelContext the CamelContext
*/
void init(CamelContext camelContext);
/**
* Attempt to read/convert the message body into a {@link Map} type
*
* @param body the message body
* @param exchange the Camel exchange
* @return converted as {@link Map} or <tt>null</tt> if not possible
*/
Map readValue(Object body, Exchange exchange);
/**
* Attempts to write the value as a JSOn {@link String} value.
*
* @param value the value
* @param exchange the Camel exchange
* @return written as {@link String} JSON or <tt>null</tt> if not possible
*/
String writeAsString(Object value, Exchange exchange);
}
| JsonPathAdapter |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeFlatMapSingleTest.java | {
"start": 925,
"end": 5365
} | class ____ extends RxJavaTest {
@Test
public void flatMapSingleValue() {
Maybe.just(1).flatMapSingle(new Function<Integer, SingleSource<Integer>>() {
@Override public SingleSource<Integer> apply(final Integer integer) throws Exception {
if (integer == 1) {
return Single.just(2);
}
return Single.just(1);
}
})
.toSingle()
.test()
.assertResult(2);
}
@Test
public void flatMapSingleValueDifferentType() {
Maybe.just(1).flatMapSingle(new Function<Integer, SingleSource<String>>() {
@Override public SingleSource<String> apply(final Integer integer) throws Exception {
if (integer == 1) {
return Single.just("2");
}
return Single.just("1");
}
})
.toSingle()
.test()
.assertResult("2");
}
@Test
public void flatMapSingleValueNull() {
Maybe.just(1).flatMapSingle(new Function<Integer, SingleSource<Integer>>() {
@Override public SingleSource<Integer> apply(final Integer integer) throws Exception {
return null;
}
})
.toSingle()
.to(TestHelper.<Integer>testConsumer())
.assertNoValues()
.assertError(NullPointerException.class)
.assertErrorMessage("The mapper returned a null SingleSource");
}
@Test
public void flatMapSingleValueErrorThrown() {
Maybe.just(1).flatMapSingle(new Function<Integer, SingleSource<Integer>>() {
@Override public SingleSource<Integer> apply(final Integer integer) throws Exception {
throw new RuntimeException("something went terribly wrong!");
}
})
.toSingle()
.to(TestHelper.<Integer>testConsumer())
.assertNoValues()
.assertError(RuntimeException.class)
.assertErrorMessage("something went terribly wrong!");
}
@Test
public void flatMapSingleError() {
RuntimeException exception = new RuntimeException("test");
Maybe.error(exception).flatMapSingle(new Function<Object, SingleSource<Object>>() {
@Override public SingleSource<Object> apply(final Object integer) throws Exception {
return Single.just(new Object());
}
})
.toSingle()
.test()
.assertError(exception);
}
@Test
public void flatMapSingleEmpty() {
Maybe.<Integer>empty().flatMapSingle(new Function<Integer, SingleSource<Integer>>() {
@Override public SingleSource<Integer> apply(final Integer integer) throws Exception {
return Single.just(2);
}
})
.toSingle()
.test()
.assertNoValues()
.assertError(NoSuchElementException.class);
}
@Test
public void dispose() {
TestHelper.checkDisposed(Maybe.just(1).flatMapSingle(new Function<Integer, SingleSource<Integer>>() {
@Override
public SingleSource<Integer> apply(final Integer integer) throws Exception {
return Single.just(2);
}
}).toSingle());
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeMaybeToSingle(new Function<Maybe<Integer>, SingleSource<Integer>>() {
@Override
public SingleSource<Integer> apply(Maybe<Integer> m) throws Exception {
return m.flatMapSingle(new Function<Integer, SingleSource<Integer>>() {
@Override
public SingleSource<Integer> apply(final Integer integer) throws Exception {
return Single.just(2);
}
}).toSingle();
}
});
}
@Test
public void singleErrors() {
Maybe.just(1)
.flatMapSingle(new Function<Integer, SingleSource<Integer>>() {
@Override
public SingleSource<Integer> apply(final Integer integer) throws Exception {
return Single.error(new TestException());
}
})
.toSingle()
.test()
.assertFailure(TestException.class);
}
}
| MaybeFlatMapSingleTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/join/AttributeJoinWithSingleTableInheritanceTest.java | {
"start": 10286,
"end": 10480
} | class ____ extends ChildEntityA {
public SubChildEntityA2() {
}
public SubChildEntityA2(Integer id) {
super( id );
}
}
@Entity( name = "ChildEntityB" )
public static | SubChildEntityA2 |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java | {
"start": 3941,
"end": 4998
} | class ____ {
String name;
Monitor(String name) {
this.name = name;
}
}
}
@Test
@Timeout(value = 30)
public void testThreadDumpAndDeadlocks() throws Exception {
new Deadlock();
String s = null;
while (true) {
s = TimedOutTestsListener.buildDeadlockInfo();
if (s != null)
break;
Thread.sleep(100);
}
assertEquals(3, countStringOccurrences(s, "BLOCKED"));
RuntimeException failure =
new RuntimeException(TimedOutTestsListener.TEST_TIMED_OUT_PREFIX);
StringWriter writer = new StringWriter();
new TimedOutTestsListener(new PrintWriter(writer)).testFailure(failure);
String out = writer.toString();
assertTrue(out.contains("THREAD DUMP"));
assertTrue(out.contains("DEADLOCKS DETECTED"));
System.out.println(out);
}
private int countStringOccurrences(String s, String substr) {
int n = 0;
int index = 0;
while ((index = s.indexOf(substr, index) + 1) != 0) {
n++;
}
return n;
}
}
| Monitor |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java | {
"start": 27119,
"end": 33525
} | class ____ {
private String id;
private SourceConfig source;
private DestConfig dest;
private TimeValue frequency;
private SyncConfig syncConfig;
private String description;
private Map<String, String> headers;
private TransformConfigVersion transformVersion;
private Instant createTime;
private PivotConfig pivotConfig;
private LatestConfig latestConfig;
private SettingsConfig settings;
private Map<String, Object> metadata;
private RetentionPolicyConfig retentionPolicyConfig;
public Builder() {}
public Builder(TransformConfig config) {
this.id = config.id;
this.source = config.source;
this.dest = config.dest;
this.frequency = config.frequency;
this.syncConfig = config.syncConfig;
this.description = config.description;
this.transformVersion = config.transformVersion;
this.createTime = config.createTime;
this.pivotConfig = config.pivotConfig;
this.latestConfig = config.latestConfig;
this.settings = config.settings;
this.metadata = config.metadata;
this.retentionPolicyConfig = config.retentionPolicyConfig;
}
public Builder setId(String id) {
this.id = id;
return this;
}
String getId() {
return id;
}
public Builder setSource(SourceConfig source) {
this.source = source;
return this;
}
SourceConfig getSource() {
return source;
}
public Builder setDest(DestConfig dest) {
this.dest = dest;
return this;
}
DestConfig getDest() {
return dest;
}
public Builder setFrequency(TimeValue frequency) {
this.frequency = frequency;
return this;
}
TimeValue getFrequency() {
return frequency;
}
public Builder setSyncConfig(SyncConfig syncConfig) {
this.syncConfig = syncConfig;
return this;
}
SyncConfig getSyncConfig() {
return syncConfig;
}
public Builder setDescription(String description) {
this.description = description;
return this;
}
String getDescription() {
return description;
}
public Builder setSettings(SettingsConfig settings) {
this.settings = settings;
return this;
}
SettingsConfig getSettings() {
return settings;
}
public Builder setMetadata(Map<String, Object> metadata) {
this.metadata = metadata;
return this;
}
Map<String, Object> getMetadata() {
return metadata;
}
public Builder setHeaders(Map<String, String> headers) {
this.headers = headers;
return this;
}
public Map<String, String> getHeaders() {
return headers;
}
public Builder setPivotConfig(PivotConfig pivotConfig) {
this.pivotConfig = pivotConfig;
return this;
}
PivotConfig getPivotConfig() {
return pivotConfig;
}
public Builder setLatestConfig(LatestConfig latestConfig) {
this.latestConfig = latestConfig;
return this;
}
public LatestConfig getLatestConfig() {
return latestConfig;
}
Builder setVersion(TransformConfigVersion version) {
this.transformVersion = version;
return this;
}
TransformConfigVersion getVersion() {
return transformVersion;
}
public Builder setRetentionPolicyConfig(RetentionPolicyConfig retentionPolicyConfig) {
this.retentionPolicyConfig = retentionPolicyConfig;
return this;
}
public TransformConfig build() {
return new TransformConfig(
id,
source,
dest,
frequency,
syncConfig,
headers,
pivotConfig,
latestConfig,
description,
settings,
metadata,
retentionPolicyConfig,
createTime,
transformVersion == null ? null : transformVersion.toString()
);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
final TransformConfig.Builder that = (TransformConfig.Builder) other;
return Objects.equals(this.id, that.id)
&& Objects.equals(this.source, that.source)
&& Objects.equals(this.dest, that.dest)
&& Objects.equals(this.frequency, that.frequency)
&& Objects.equals(this.syncConfig, that.syncConfig)
&& Objects.equals(this.headers, that.headers)
&& Objects.equals(this.pivotConfig, that.pivotConfig)
&& Objects.equals(this.latestConfig, that.latestConfig)
&& Objects.equals(this.description, that.description)
&& Objects.equals(this.settings, that.settings)
&& Objects.equals(this.metadata, that.metadata)
&& Objects.equals(this.retentionPolicyConfig, that.retentionPolicyConfig)
&& Objects.equals(this.createTime, that.createTime)
&& Objects.equals(this.transformVersion, that.transformVersion);
}
@Override
public int hashCode() {
return Objects.hash(
id,
source,
dest,
frequency,
syncConfig,
headers,
pivotConfig,
latestConfig,
description,
settings,
metadata,
retentionPolicyConfig,
createTime,
transformVersion
);
}
}
}
| Builder |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/EnabledFetchProfile.java | {
"start": 433,
"end": 545
} | class ____ with the
/// [@FetchProfile][org.hibernate.annotations.FetchProfile].
///
/// For example, this | annotated |
java | google__guava | android/guava/src/com/google/common/cache/CacheBuilderSpec.java | {
"start": 17386,
"end": 17791
} | class ____ extends DurationParser {
@Override
protected void parseDuration(CacheBuilderSpec spec, long duration, TimeUnit unit) {
checkArgument(spec.accessExpirationTimeUnit == null, "expireAfterAccess already set");
spec.accessExpirationDuration = duration;
spec.accessExpirationTimeUnit = unit;
}
}
/** Parse expireAfterWrite */
private static final | AccessDurationParser |
java | quarkusio__quarkus | extensions/kubernetes-service-binding/runtime/src/main/java/io/quarkus/kubernetes/service/binding/runtime/KubernetesServiceBindingConfigSourceFactoryBuilder.java | {
"start": 168,
"end": 447
} | class ____ implements ConfigBuilder {
@Override
public SmallRyeConfigBuilder configBuilder(final SmallRyeConfigBuilder builder) {
return builder.withSources(new KubernetesServiceBindingConfigSourceFactory());
}
}
| KubernetesServiceBindingConfigSourceFactoryBuilder |
java | spring-projects__spring-boot | module/spring-boot-devtools/src/intTest/java/org/springframework/boot/devtools/tests/DevToolsWithLazyInitializationIntegrationTests.java | {
"start": 1056,
"end": 2409
} | class ____ extends AbstractDevToolsIntegrationTests {
@ParameterizedTest(name = "{0}")
@MethodSource("parameters")
void addARequestMappingToAnExistingControllerWhenLazyInit(ApplicationLauncher applicationLauncher)
throws Exception {
launchApplication(applicationLauncher, "--spring.main.lazy-initialization=true");
TestRestTemplate template = new TestRestTemplate();
String urlBase = "http://localhost:" + awaitServerPort();
assertThat(template.getForObject(urlBase + "/one", String.class)).isEqualTo("one");
assertThat(template.getForEntity(urlBase + "/two", String.class).getStatusCode())
.isEqualTo(HttpStatus.NOT_FOUND);
controller("com.example.ControllerOne").withRequestMapping("one").withRequestMapping("two").build();
urlBase = "http://localhost:" + awaitServerPort();
assertThat(template.getForObject(urlBase + "/one", String.class)).isEqualTo("one");
assertThat(template.getForObject(urlBase + "/two", String.class)).isEqualTo("two");
}
static Object[] parameters() {
Directories directories = new Directories(buildOutput, temp);
return new Object[] { new Object[] { new LocalApplicationLauncher(directories) },
new Object[] { new ExplodedRemoteApplicationLauncher(directories) },
new Object[] { new JarFileRemoteApplicationLauncher(directories) } };
}
}
| DevToolsWithLazyInitializationIntegrationTests |
java | grpc__grpc-java | api/src/main/java/io/grpc/KnownLength.java | {
"start": 1003,
"end": 1211
} | interface ____ {
/**
* Returns the total number of bytes that can be read (or skipped over) from this object until all
* bytes have been read out.
*/
int available() throws IOException;
}
| KnownLength |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java | {
"start": 25033,
"end": 25088
} | class ____ DiskBalancer.
* <p>
*/
public static | for |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java | {
"start": 1994,
"end": 3500
} | enum ____ {
/**
* REST type API keys can authenticate on the HTTP interface
*/
REST,
/**
* Cross cluster type API keys can authenticate on the dedicated remote cluster server interface
*/
CROSS_CLUSTER;
public static Type parse(String value) {
return switch (value.toLowerCase(Locale.ROOT)) {
case "rest" -> REST;
case "cross_cluster" -> CROSS_CLUSTER;
default -> throw new IllegalArgumentException(
"invalid API key type ["
+ value
+ "] expected one of ["
+ Stream.of(values()).map(Type::value).collect(Collectors.joining(","))
+ "]"
);
};
}
public static Type fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
if (token == null) {
token = parser.nextToken();
}
XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_STRING, token, parser);
return parse(parser.text());
}
public String value() {
return name().toLowerCase(Locale.ROOT);
}
}
public record Version(int version) implements VersionId<Version> {
@Override
public int id() {
return version;
}
}
public static | Type |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java | {
"start": 3065,
"end": 8860
} | class ____
* the specified name and period.
*
* @param name A name used to identify this instance.
*
* @param period The frequency, in milliseconds, at which metrics are
* analyzed.
*
* @throws IllegalArgumentException
* If name is null or empty.
* If period is less than 1000 or greater than 30000 milliseconds.
*/
ClientThrottlingAnalyzer(String name, int period)
throws IllegalArgumentException {
Preconditions.checkArgument(
StringUtils.isNotEmpty(name),
"The argument 'name' cannot be null or empty.");
Preconditions.checkArgument(
period >= MIN_ANALYSIS_PERIOD_MS && period <= MAX_ANALYSIS_PERIOD_MS,
"The argument 'period' must be between 1000 and 30000.");
this.name = name;
this.analysisPeriodMs = period;
this.blobMetrics = new AtomicReference<BlobOperationMetrics>(
new BlobOperationMetrics(System.currentTimeMillis()));
this.timer = new Timer(
String.format("wasb-timer-client-throttling-analyzer-%s", name), true);
this.timer.schedule(new TimerTaskImpl(),
analysisPeriodMs,
analysisPeriodMs);
}
/**
* Updates metrics with results from the current storage operation.
*
* @param count The count of bytes transferred.
*
* @param isFailedOperation True if the operation failed; otherwise false.
*/
public void addBytesTransferred(long count, boolean isFailedOperation) {
BlobOperationMetrics metrics = blobMetrics.get();
if (isFailedOperation) {
metrics.bytesFailed.addAndGet(count);
metrics.operationsFailed.incrementAndGet();
} else {
metrics.bytesSuccessful.addAndGet(count);
metrics.operationsSuccessful.incrementAndGet();
}
}
/**
* Suspends the current storage operation, as necessary, to reduce throughput.
*/
public void suspendIfNecessary() {
int duration = sleepDuration;
if (duration > 0) {
try {
Thread.sleep(duration);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
@VisibleForTesting
int getSleepDuration() {
return sleepDuration;
}
private int analyzeMetricsAndUpdateSleepDuration(BlobOperationMetrics metrics,
int sleepDuration) {
final double percentageConversionFactor = 100;
double bytesFailed = metrics.bytesFailed.get();
double bytesSuccessful = metrics.bytesSuccessful.get();
double operationsFailed = metrics.operationsFailed.get();
double operationsSuccessful = metrics.operationsSuccessful.get();
double errorPercentage = (bytesFailed <= 0)
? 0
: percentageConversionFactor
* bytesFailed
/ (bytesFailed + bytesSuccessful);
long periodMs = metrics.endTime - metrics.startTime;
double newSleepDuration;
if (errorPercentage < MIN_ACCEPTABLE_ERROR_PERCENTAGE) {
++consecutiveNoErrorCount;
// Decrease sleepDuration in order to increase throughput.
double reductionFactor =
(consecutiveNoErrorCount * analysisPeriodMs
>= RAPID_SLEEP_DECREASE_TRANSITION_PERIOD_MS)
? RAPID_SLEEP_DECREASE_FACTOR
: SLEEP_DECREASE_FACTOR;
newSleepDuration = sleepDuration * reductionFactor;
} else if (errorPercentage < MAX_EQUILIBRIUM_ERROR_PERCENTAGE) {
// Do not modify sleepDuration in order to stabilize throughput.
newSleepDuration = sleepDuration;
} else {
// Increase sleepDuration in order to minimize error rate.
consecutiveNoErrorCount = 0;
// Increase sleep duration in order to reduce throughput and error rate.
// First, calculate target throughput: bytesSuccessful / periodMs.
// Next, calculate time required to send *all* data (assuming next period
// is similar to previous) at the target throughput: (bytesSuccessful
// + bytesFailed) * periodMs / bytesSuccessful. Next, subtract periodMs to
// get the total additional delay needed.
double additionalDelayNeeded = 5 * analysisPeriodMs;
if (bytesSuccessful > 0) {
additionalDelayNeeded = (bytesSuccessful + bytesFailed)
* periodMs
/ bytesSuccessful
- periodMs;
}
// amortize the additional delay needed across the estimated number of
// requests during the next period
newSleepDuration = additionalDelayNeeded
/ (operationsFailed + operationsSuccessful);
final double maxSleepDuration = analysisPeriodMs;
final double minSleepDuration = sleepDuration * SLEEP_INCREASE_FACTOR;
// Add 1 ms to avoid rounding down and to decrease proximity to the server
// side ingress/egress limit. Ensure that the new sleep duration is
// larger than the current one to more quickly reduce the number of
// errors. Don't allow the sleep duration to grow unbounded, after a
// certain point throttling won't help, for example, if there are far too
// many tasks/containers/nodes no amount of throttling will help.
newSleepDuration = Math.max(newSleepDuration, minSleepDuration) + 1;
newSleepDuration = Math.min(newSleepDuration, maxSleepDuration);
}
if (LOG.isDebugEnabled()) {
LOG.debug(String.format(
"%5.5s, %10d, %10d, %10d, %10d, %6.2f, %5d, %5d, %5d",
name,
(int) bytesFailed,
(int) bytesSuccessful,
(int) operationsFailed,
(int) operationsSuccessful,
errorPercentage,
periodMs,
(int) sleepDuration,
(int) newSleepDuration));
}
return (int) newSleepDuration;
}
/**
* Timer callback implementation for periodically analyzing metrics.
*/
| with |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/resource/bitmap/BitmapTransformation.java | {
"start": 4713,
"end": 5653
} | class ____ them to be the
* size of the Bitmap we're going to transform before calling this method.
*
* @param pool A {@link com.bumptech.glide.load.engine.bitmap_recycle.BitmapPool} that can be used
* to obtain and return intermediate {@link Bitmap}s used in this transformation. For every
* {@link android.graphics.Bitmap} obtained from the pool during this transformation, a {@link
* android.graphics.Bitmap} must also be returned.
* @param toTransform The {@link android.graphics.Bitmap} to transform.
* @param outWidth The ideal width of the transformed bitmap (the transformed width does not need
* to match exactly).
* @param outHeight The ideal height of the transformed bitmap (the transformed height does not
* need to match exactly).
*/
protected abstract Bitmap transform(
@NonNull BitmapPool pool, @NonNull Bitmap toTransform, int outWidth, int outHeight);
}
| converts |
java | apache__avro | lang/java/avro/src/test/java/org/apache/avro/TestSchemaCompatibility.java | {
"start": 5757,
"end": 19144
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(TestSchemaCompatibility.class);
// -----------------------------------------------------------------------------------------------
private static final Schema WRITER_SCHEMA = Schema.createRecord(list(
new Schema.Field("oldfield1", INT_SCHEMA, null, null), new Schema.Field("oldfield2", STRING_SCHEMA, null, null)));
@Test
void validateSchemaPairMissingField() {
final List<Field> readerFields = list(new Schema.Field("oldfield1", INT_SCHEMA, null, null));
final Schema reader = Schema.createRecord(null, null, null, false, readerFields);
final SchemaCompatibility.SchemaPairCompatibility expectedResult = new SchemaCompatibility.SchemaPairCompatibility(
SchemaCompatibility.SchemaCompatibilityResult.compatible(), reader, WRITER_SCHEMA,
SchemaCompatibility.READER_WRITER_COMPATIBLE_MESSAGE);
// Test omitting a field.
assertEquals(expectedResult, checkReaderWriterCompatibility(reader, WRITER_SCHEMA));
}
@Test
void validateSchemaPairMissingSecondField() {
final List<Schema.Field> readerFields = list(new Schema.Field("oldfield2", STRING_SCHEMA, null, null));
final Schema reader = Schema.createRecord(null, null, null, false, readerFields);
final SchemaCompatibility.SchemaPairCompatibility expectedResult = new SchemaCompatibility.SchemaPairCompatibility(
SchemaCompatibility.SchemaCompatibilityResult.compatible(), reader, WRITER_SCHEMA,
SchemaCompatibility.READER_WRITER_COMPATIBLE_MESSAGE);
// Test omitting other field.
assertEquals(expectedResult, checkReaderWriterCompatibility(reader, WRITER_SCHEMA));
}
@Test
void validateSchemaPairAllFields() {
final List<Schema.Field> readerFields = list(new Schema.Field("oldfield1", INT_SCHEMA, null, null),
new Schema.Field("oldfield2", STRING_SCHEMA, null, null));
final Schema reader = Schema.createRecord(null, null, null, false, readerFields);
final SchemaCompatibility.SchemaPairCompatibility expectedResult = new SchemaCompatibility.SchemaPairCompatibility(
SchemaCompatibility.SchemaCompatibilityResult.compatible(), reader, WRITER_SCHEMA,
SchemaCompatibility.READER_WRITER_COMPATIBLE_MESSAGE);
// Test with all fields.
assertEquals(expectedResult, checkReaderWriterCompatibility(reader, WRITER_SCHEMA));
}
@Test
void validateSchemaNewFieldWithDefault() {
final List<Schema.Field> readerFields = list(new Schema.Field("oldfield1", INT_SCHEMA, null, null),
new Schema.Field("newfield1", INT_SCHEMA, null, 42));
final Schema reader = Schema.createRecord(null, null, null, false, readerFields);
final SchemaCompatibility.SchemaPairCompatibility expectedResult = new SchemaCompatibility.SchemaPairCompatibility(
SchemaCompatibility.SchemaCompatibilityResult.compatible(), reader, WRITER_SCHEMA,
SchemaCompatibility.READER_WRITER_COMPATIBLE_MESSAGE);
// Test new field with default value.
assertEquals(expectedResult, checkReaderWriterCompatibility(reader, WRITER_SCHEMA));
}
@Test
void validateSchemaNewField() {
final List<Schema.Field> readerFields = list(new Schema.Field("oldfield1", INT_SCHEMA, null, null),
new Schema.Field("newfield1", INT_SCHEMA, null, null));
final Schema reader = Schema.createRecord(null, null, null, false, readerFields);
SchemaPairCompatibility compatibility = checkReaderWriterCompatibility(reader, WRITER_SCHEMA);
// Test new field without default value.
assertEquals(SchemaCompatibility.SchemaCompatibilityType.INCOMPATIBLE, compatibility.getType());
assertEquals(SchemaCompatibility.SchemaCompatibilityResult.incompatible(
SchemaIncompatibilityType.READER_FIELD_MISSING_DEFAULT_VALUE, reader, WRITER_SCHEMA, "newfield1",
asList("", "fields", "1")), compatibility.getResult());
assertEquals(String.format(
"Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n",
WRITER_SCHEMA.toString(true), reader.toString(true)), compatibility.getDescription());
assertEquals(reader, compatibility.getReader());
assertEquals(WRITER_SCHEMA, compatibility.getWriter());
}
@Test
void validateArrayWriterSchema() {
final Schema validReader = Schema.createArray(STRING_SCHEMA);
final Schema invalidReader = Schema.createMap(STRING_SCHEMA);
final SchemaCompatibility.SchemaPairCompatibility validResult = new SchemaCompatibility.SchemaPairCompatibility(
SchemaCompatibility.SchemaCompatibilityResult.compatible(), validReader, STRING_ARRAY_SCHEMA,
SchemaCompatibility.READER_WRITER_COMPATIBLE_MESSAGE);
final SchemaCompatibility.SchemaPairCompatibility invalidResult = new SchemaCompatibility.SchemaPairCompatibility(
SchemaCompatibility.SchemaCompatibilityResult.incompatible(
SchemaIncompatibilityType.TYPE_MISMATCH, invalidReader, STRING_ARRAY_SCHEMA,
"reader type: MAP not compatible with writer type: ARRAY", Collections.singletonList("")),
invalidReader, STRING_ARRAY_SCHEMA,
String.format(
"Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n",
STRING_ARRAY_SCHEMA.toString(true), invalidReader.toString(true)));
assertEquals(validResult, checkReaderWriterCompatibility(validReader, STRING_ARRAY_SCHEMA));
assertEquals(invalidResult, checkReaderWriterCompatibility(invalidReader, STRING_ARRAY_SCHEMA));
}
@Test
void validatePrimitiveWriterSchema() {
final Schema validReader = Schema.create(Schema.Type.STRING);
final SchemaCompatibility.SchemaPairCompatibility validResult = new SchemaCompatibility.SchemaPairCompatibility(
SchemaCompatibility.SchemaCompatibilityResult.compatible(), validReader, STRING_SCHEMA,
SchemaCompatibility.READER_WRITER_COMPATIBLE_MESSAGE);
final SchemaCompatibility.SchemaPairCompatibility invalidResult = new SchemaCompatibility.SchemaPairCompatibility(
SchemaCompatibility.SchemaCompatibilityResult.incompatible(SchemaIncompatibilityType.TYPE_MISMATCH, INT_SCHEMA,
STRING_SCHEMA, "reader type: INT not compatible with writer type: STRING", Collections.singletonList("")),
INT_SCHEMA, STRING_SCHEMA,
String.format(
"Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n",
STRING_SCHEMA.toString(true), INT_SCHEMA.toString(true)));
assertEquals(validResult, checkReaderWriterCompatibility(validReader, STRING_SCHEMA));
assertEquals(invalidResult, checkReaderWriterCompatibility(INT_SCHEMA, STRING_SCHEMA));
}
/**
* Reader union schema must contain all writer union branches.
*/
@Test
void unionReaderWriterSubsetIncompatibility() {
final Schema unionWriter = Schema.createUnion(list(INT_SCHEMA, STRING_SCHEMA, LONG_SCHEMA));
final Schema unionReader = Schema.createUnion(list(INT_SCHEMA, STRING_SCHEMA));
final SchemaPairCompatibility result = checkReaderWriterCompatibility(unionReader, unionWriter);
assertEquals(SchemaCompatibilityType.INCOMPATIBLE, result.getType());
assertEquals("/2", result.getResult().getIncompatibilities().get(0).getLocation());
}
@Test
void unionWriterSimpleReaderIncompatibility() {
Schema mandatorySchema = SchemaBuilder.record("Account").fields().name("age").type().intType().noDefault()
.endRecord();
Schema optionalSchema = SchemaBuilder.record("Account").fields().optionalInt("age").endRecord();
SchemaPairCompatibility compatibility = checkReaderWriterCompatibility(mandatorySchema, optionalSchema);
assertEquals(SchemaCompatibilityType.INCOMPATIBLE, compatibility.getType());
Incompatibility incompatibility = compatibility.getResult().getIncompatibilities().get(0);
assertEquals("reader type: INT not compatible with writer type: NULL", incompatibility.getMessage());
assertEquals("/fields/0/type/0", incompatibility.getLocation());
}
// -----------------------------------------------------------------------------------------------
/**
* Collection of reader/writer schema pair that are compatible.
*/
public static final List<ReaderWriter> COMPATIBLE_READER_WRITER_TEST_CASES = list(
new ReaderWriter(BOOLEAN_SCHEMA, BOOLEAN_SCHEMA),
new ReaderWriter(INT_SCHEMA, INT_SCHEMA),
new ReaderWriter(LONG_SCHEMA, INT_SCHEMA), new ReaderWriter(LONG_SCHEMA, LONG_SCHEMA),
// Avro spec says INT/LONG can be promoted to FLOAT/DOUBLE.
// This is arguable as this causes a loss of precision.
new ReaderWriter(FLOAT_SCHEMA, INT_SCHEMA), new ReaderWriter(FLOAT_SCHEMA, LONG_SCHEMA),
new ReaderWriter(DOUBLE_SCHEMA, LONG_SCHEMA),
new ReaderWriter(DOUBLE_SCHEMA, INT_SCHEMA), new ReaderWriter(DOUBLE_SCHEMA, FLOAT_SCHEMA),
new ReaderWriter(STRING_SCHEMA, STRING_SCHEMA),
new ReaderWriter(BYTES_SCHEMA, BYTES_SCHEMA),
new ReaderWriter(INT_ARRAY_SCHEMA, INT_ARRAY_SCHEMA), new ReaderWriter(LONG_ARRAY_SCHEMA, INT_ARRAY_SCHEMA),
new ReaderWriter(INT_MAP_SCHEMA, INT_MAP_SCHEMA), new ReaderWriter(LONG_MAP_SCHEMA, INT_MAP_SCHEMA),
new ReaderWriter(ENUM1_AB_SCHEMA, ENUM1_AB_SCHEMA), new ReaderWriter(ENUM1_ABC_SCHEMA, ENUM1_AB_SCHEMA),
new ReaderWriter(ENUM1_AB_SCHEMA_DEFAULT, ENUM1_ABC_SCHEMA),
new ReaderWriter(ENUM1_AB_SCHEMA, ENUM1_AB_SCHEMA_NAMESPACE_1),
new ReaderWriter(ENUM1_AB_SCHEMA_NAMESPACE_1, ENUM1_AB_SCHEMA),
new ReaderWriter(ENUM1_AB_SCHEMA_NAMESPACE_1, ENUM1_AB_SCHEMA_NAMESPACE_2),
// String-to/from-bytes, introduced in Avro 1.7.7
new ReaderWriter(STRING_SCHEMA, BYTES_SCHEMA), new ReaderWriter(BYTES_SCHEMA, STRING_SCHEMA),
// Tests involving unions:
new ReaderWriter(EMPTY_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
new ReaderWriter(FLOAT_UNION_SCHEMA, EMPTY_UNION_SCHEMA), new ReaderWriter(FLOAT_UNION_SCHEMA, INT_UNION_SCHEMA),
new ReaderWriter(FLOAT_UNION_SCHEMA, LONG_UNION_SCHEMA),
new ReaderWriter(FLOAT_UNION_SCHEMA, INT_LONG_UNION_SCHEMA), new ReaderWriter(INT_UNION_SCHEMA, INT_UNION_SCHEMA),
new ReaderWriter(INT_STRING_UNION_SCHEMA, STRING_INT_UNION_SCHEMA),
new ReaderWriter(INT_UNION_SCHEMA, EMPTY_UNION_SCHEMA), new ReaderWriter(LONG_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
new ReaderWriter(LONG_UNION_SCHEMA, INT_UNION_SCHEMA), new ReaderWriter(FLOAT_UNION_SCHEMA, INT_UNION_SCHEMA),
new ReaderWriter(DOUBLE_UNION_SCHEMA, INT_UNION_SCHEMA), new ReaderWriter(FLOAT_UNION_SCHEMA, LONG_UNION_SCHEMA),
new ReaderWriter(DOUBLE_UNION_SCHEMA, LONG_UNION_SCHEMA),
new ReaderWriter(FLOAT_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
new ReaderWriter(DOUBLE_UNION_SCHEMA, FLOAT_UNION_SCHEMA),
new ReaderWriter(STRING_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
new ReaderWriter(STRING_UNION_SCHEMA, BYTES_UNION_SCHEMA),
new ReaderWriter(BYTES_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
new ReaderWriter(BYTES_UNION_SCHEMA, STRING_UNION_SCHEMA),
new ReaderWriter(DOUBLE_UNION_SCHEMA, INT_FLOAT_UNION_SCHEMA),
// Readers capable of reading all branches of a union are compatible
new ReaderWriter(FLOAT_SCHEMA, INT_FLOAT_UNION_SCHEMA), new ReaderWriter(LONG_SCHEMA, INT_LONG_UNION_SCHEMA),
new ReaderWriter(DOUBLE_SCHEMA, INT_FLOAT_UNION_SCHEMA),
new ReaderWriter(DOUBLE_SCHEMA, INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA),
// Special case of singleton unions:
new ReaderWriter(FLOAT_SCHEMA, FLOAT_UNION_SCHEMA), new ReaderWriter(INT_UNION_SCHEMA, INT_SCHEMA),
new ReaderWriter(INT_SCHEMA, INT_UNION_SCHEMA),
// Fixed types
new ReaderWriter(FIXED_4_BYTES, FIXED_4_BYTES),
// Tests involving records:
new ReaderWriter(EMPTY_RECORD1, EMPTY_RECORD1), new ReaderWriter(EMPTY_RECORD1, A_INT_RECORD1),
new ReaderWriter(A_INT_RECORD1, A_INT_RECORD1), new ReaderWriter(A_DINT_RECORD1, A_INT_RECORD1),
new ReaderWriter(A_DINT_RECORD1, A_DINT_RECORD1), new ReaderWriter(A_INT_RECORD1, A_DINT_RECORD1),
new ReaderWriter(A_LONG_RECORD1, A_INT_RECORD1),
new ReaderWriter(A_INT_RECORD1, A_INT_B_INT_RECORD1), new ReaderWriter(A_DINT_RECORD1, A_INT_B_INT_RECORD1),
new ReaderWriter(A_INT_B_DINT_RECORD1, A_INT_RECORD1), new ReaderWriter(A_DINT_B_DINT_RECORD1, EMPTY_RECORD1),
new ReaderWriter(A_DINT_B_DINT_RECORD1, A_INT_RECORD1),
new ReaderWriter(A_INT_B_INT_RECORD1, A_DINT_B_DINT_RECORD1),
new ReaderWriter(INT_LIST_RECORD, INT_LIST_RECORD), new ReaderWriter(LONG_LIST_RECORD, LONG_LIST_RECORD),
new ReaderWriter(LONG_LIST_RECORD, INT_LIST_RECORD),
new ReaderWriter(NULL_SCHEMA, NULL_SCHEMA),
new ReaderWriter(ENUM_AB_ENUM_DEFAULT_A_RECORD, ENUM_ABC_ENUM_DEFAULT_A_RECORD),
new ReaderWriter(ENUM_AB_FIELD_DEFAULT_A_ENUM_DEFAULT_B_RECORD, ENUM_ABC_FIELD_DEFAULT_B_ENUM_DEFAULT_A_RECORD),
// This is comparing two records that have an inner array of records with
// different namespaces.
new ReaderWriter(NS_RECORD1, NS_RECORD2), new ReaderWriter(WITHOUT_NS, WITH_NS));
// -----------------------------------------------------------------------------------------------
/**
* The reader/writer pairs that are incompatible are now moved to specific test
* classes, one | TestSchemaCompatibility |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/iterable/IterableAssert_isSubsetOf_with_Array_Test.java | {
"start": 1117,
"end": 1678
} | class ____ extends IterableAssertBaseTest {
private final List<String> values = newArrayList("Yoda", "Luke");
@Override
protected ConcreteIterableAssert<Object> invoke_api_method() {
return assertions.isSubsetOf(values);
}
@Override
protected void verify_internal_effects() {
verify(iterables).assertIsSubsetOf(getInfo(assertions), getActual(assertions), values);
}
@Test
void invoke_api_like_user() {
assertThat(newArrayList("Luke", "Yoda")).isSubsetOf("Yoda", "Luke", "Chewbacca");
}
}
| IterableAssert_isSubsetOf_with_Array_Test |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/Subselect.java | {
"start": 1024,
"end": 1739
} | class ____ maps a table, and another
* entity which is defined by a {@code @Subselect} involving the same table.
* In this case, a stateful session is vulnerable to data aliasing effects,
* and it's the responsibility of client code to ensure that changes to the
* first entity are flushed to the database before reading the same data via
* the second entity. The {@link Synchronize @Synchronize} annotation can
* help alleviate this problem, but it's an incomplete solution. We therefore
* recommend the use of {@linkplain org.hibernate.StatelessSession stateless
* sessions} in this situation.
*
* @see Synchronize
* @see View
*
* @author Sharath Reddy
*/
@Target(TYPE)
@Retention(RUNTIME)
public @ | which |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/impl/NullsConstantProvider.java | {
"start": 504,
"end": 2586
} | class ____
implements NullValueProvider, java.io.Serializable
{
private static final long serialVersionUID = 1L;
private final static NullsConstantProvider SKIPPER = new NullsConstantProvider(null);
private final static NullsConstantProvider NULLER = new NullsConstantProvider(null);
protected final Object _nullValue;
protected final AccessPattern _access;
protected NullsConstantProvider(Object nvl) {
_nullValue = nvl;
_access = (_nullValue == null) ? AccessPattern.ALWAYS_NULL
: AccessPattern.CONSTANT;
}
/**
* Static accessor for a stateless instance used as marker, to indicate
* that all input `null` values should be skipped (ignored), so that
* no corresponding property value is set (with POJOs), and no content
* values (array/Collection elements, Map entries) are added.
*/
public static NullsConstantProvider skipper() {
return SKIPPER;
}
public static NullsConstantProvider nuller() {
return NULLER;
}
public static NullsConstantProvider forValue(Object nvl) {
if (nvl == null) {
return NULLER;
}
return new NullsConstantProvider(nvl);
}
/**
* Utility method that can be used to check if given null value provider
* is "skipper", marker provider that means that all input `null`s should
* be skipped (ignored), instead of converted
*/
public static boolean isSkipper(NullValueProvider p) {
return (p == SKIPPER);
}
/**
* Utility method that can be used to check if given null value provider
* is "nuller", no-operation provider that will always simply return
* Java `null` for any and all input `null`s.
*/
public static boolean isNuller(NullValueProvider p) {
return (p == NULLER);
}
@Override
public AccessPattern getNullAccessPattern() {
return _access;
}
@Override
public Object getNullValue(DeserializationContext ctxt) {
return _nullValue;
}
}
| NullsConstantProvider |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/annotation/web/configurers/SessionManagementConfigurer.java | {
"start": 30434,
"end": 33854
} | class ____ {
private ConcurrencyControlConfigurer() {
}
/**
* Controls the maximum number of sessions for a user. The default is to allow any
* number of users.
* @param maximumSessions the maximum number of sessions for a user
* @return the {@link ConcurrencyControlConfigurer} for further customizations
*/
public ConcurrencyControlConfigurer maximumSessions(int maximumSessions) {
SessionManagementConfigurer.this.sessionLimit = SessionLimit.of(maximumSessions);
return this;
}
/**
* Determines the behaviour when a session limit is detected.
* @param sessionLimit the {@link SessionLimit} to check the maximum number of
* sessions for a user
* @return the {@link ConcurrencyControlConfigurer} for further customizations
* @since 6.5
*/
public ConcurrencyControlConfigurer maximumSessions(SessionLimit sessionLimit) {
SessionManagementConfigurer.this.sessionLimit = sessionLimit;
return this;
}
/**
* The URL to redirect to if a user tries to access a resource and their session
* has been expired due to too many sessions for the current user. The default is
* to write a simple error message to the response.
* @param expiredUrl the URL to redirect to
* @return the {@link ConcurrencyControlConfigurer} for further customizations
*/
public ConcurrencyControlConfigurer expiredUrl(String expiredUrl) {
SessionManagementConfigurer.this.expiredUrl = expiredUrl;
return this;
}
/**
* Determines the behaviour when an expired session is detected.
* @param expiredSessionStrategy the {@link SessionInformationExpiredStrategy} to
* use when an expired session is detected.
* @return the {@link ConcurrencyControlConfigurer} for further customizations
*/
public ConcurrencyControlConfigurer expiredSessionStrategy(
SessionInformationExpiredStrategy expiredSessionStrategy) {
SessionManagementConfigurer.this.expiredSessionStrategy = expiredSessionStrategy;
return this;
}
/**
* If true, prevents a user from authenticating when the
* {@link #maximumSessions(int)} has been reached. Otherwise (default), the user
* who authenticates is allowed access and an existing user's session is expired.
* The user's who's session is forcibly expired is sent to
* {@link #expiredUrl(String)}. The advantage of this approach is if a user
* accidentally does not log out, there is no need for an administrator to
* intervene or wait till their session expires.
* @param maxSessionsPreventsLogin true to have an error at time of
* authentication, else false (default)
* @return the {@link ConcurrencyControlConfigurer} for further customizations
*/
public ConcurrencyControlConfigurer maxSessionsPreventsLogin(boolean maxSessionsPreventsLogin) {
SessionManagementConfigurer.this.maxSessionsPreventsLogin = maxSessionsPreventsLogin;
return this;
}
/**
* Controls the {@link SessionRegistry} implementation used. The default is
* {@link SessionRegistryImpl} which is an in memory implementation.
* @param sessionRegistry the {@link SessionRegistry} to use
* @return the {@link ConcurrencyControlConfigurer} for further customizations
*/
public ConcurrencyControlConfigurer sessionRegistry(SessionRegistry sessionRegistry) {
SessionManagementConfigurer.this.sessionRegistry = sessionRegistry;
return this;
}
}
}
| ConcurrencyControlConfigurer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.