language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/XqueryComponentBuilderFactory.java | {
"start": 1387,
"end": 1855
} | interface ____ {
/**
* XQuery (camel-saxon)
* Query and/or transform XML payloads using XQuery and Saxon.
*
* Category: transformation
* Since: 1.0
* Maven coordinates: org.apache.camel:camel-saxon
*
* @return the dsl builder
*/
static XqueryComponentBuilder xquery() {
return new XqueryComponentBuilderImpl();
}
/**
* Builder for the XQuery component.
*/
| XqueryComponentBuilderFactory |
java | spring-projects__spring-boot | configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/source/generation/AbstractPropertiesSource.java | {
"start": 838,
"end": 1262
} | class ____ {
/**
* Description of this simple property.
*/
private String name = "boot";
/**
* Whether it is enabled.
*/
private boolean enabled;
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
public boolean isEnabled() {
return this.enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
}
| AbstractPropertiesSource |
java | apache__camel | components/camel-as2/camel-as2-api/src/main/java/org/apache/camel/component/as2/api/AS2AsynchronousMDNManager.java | {
"start": 2480,
"end": 7817
} | class ____ {
//
// AS2 HTTP Context Attribute Keys
//
/**
* Prefix for all AS2 HTTP Context Attributes used by the AS2 Asynchronous MDN Manager.
*/
public static final String CAMEL_AS2_ASYNC_MDN_PREFIX = "camel-as2.async-mdn.";
/**
* The HTTP Context Attribute containing the HTTP request message transporting the EDI message
*/
public static final String HTTP_REQUEST = HttpCoreContext.HTTP_REQUEST;
/**
* The HTTP Context Attribute containing the HTTP response message transporting the EDI message
*/
public static final String HTTP_RESPONSE = HttpCoreContext.HTTP_RESPONSE;
/**
* The HTTP Context Attribute containing the AS2 Connection used to send request message.
*/
public static final String AS2_CONNECTION = CAMEL_AS2_ASYNC_MDN_PREFIX + "as2-connection";
/**
* The HTTP Context Attribute indicating the AS2 name of MDN sender.
*/
public static final String RECIPIENT_ADDRESS = CAMEL_AS2_ASYNC_MDN_PREFIX + "recipient-address";
/**
* The HTTP Context Attribute containing an asynchronous MDN receipt.
*/
public static final String ASYNCHRONOUS_MDN = CAMEL_AS2_ASYNC_MDN_PREFIX + "asynchronous-mdn";
private HttpProcessor httpProcessor;
@SuppressWarnings("unused")
private Certificate[] signingCertificateChain;
@SuppressWarnings("unused")
private PrivateKey signingPrivateKey;
private String userName;
private String password;
private String accessToken;
public AS2AsynchronousMDNManager(String as2Version,
String userAgent,
String senderFQDN,
Certificate[] signingCertificateChain,
PrivateKey signingPrivateKey,
String userName,
String password,
String accessToken) {
this.signingCertificateChain = signingCertificateChain;
this.signingPrivateKey = signingPrivateKey;
this.userName = userName;
this.password = password;
this.accessToken = accessToken;
// Build Processor
httpProcessor = HttpProcessorBuilder.create().add(new RequestAsynchronousMDN(as2Version, senderFQDN))
.add(new RequestTargetHost()).add(new RequestUserAgent(userAgent)).add(new RequestDate())
.add(new RequestContent(true)).add(new RequestConnControl()).add(new RequestExpectContinue())
.build();
}
// Sends the signed or unsigned AS2-MDN to the URI requested by the sender of the AS2 message.
public HttpCoreContext send(
MultipartMimeEntity multipartMimeEntity,
String contentType,
String recipientDeliveryAddress)
throws HttpException {
ObjectHelper.notNull(multipartMimeEntity, "multipartMimeEntity");
ObjectHelper.notNull(contentType, "contentType");
ObjectHelper.notNull(recipientDeliveryAddress, "recipientDeliveryAddress");
URI uri = URI.create(recipientDeliveryAddress);
int buffSize = 8 * 1024;
Http1Config h1Config = Http1Config.custom().setBufferSize(buffSize).build();
HttpConnectionFactory<ManagedHttpClientConnection> connFactory
= ManagedHttpClientConnectionFactory.builder().http1Config(h1Config).build();
try (HttpClientConnection httpConnection = connFactory.createConnection(new Socket(uri.getHost(), uri.getPort()))) {
// Add Context attributes
HttpCoreContext httpContext = HttpCoreContext.create();
httpContext.setAttribute(RECIPIENT_ADDRESS, recipientDeliveryAddress);
ClassicHttpRequest request = new BasicClassicHttpRequest("POST", uri);
request.setHeader(AS2Header.CONTENT_TYPE, contentType);
AS2HeaderUtils.addAuthorizationHeader(request, userName, password, accessToken);
httpContext.setAttribute(HttpCoreContext.HTTP_REQUEST, request);
multipartMimeEntity.setMainBody(true);
EntityUtils.setMessageEntity(request, multipartMimeEntity);
HttpResponse response;
try {
httpContext.setAttribute(AS2_CONNECTION, httpConnection);
response = send(httpConnection, request, httpContext);
} catch (IOException e) {
throw new HttpException("Failed to send http request message", e);
}
httpContext.setAttribute(HTTP_RESPONSE, response);
return httpContext;
} catch (Exception e) {
throw new HttpException("failed to send MDN", e);
}
}
private HttpResponse send(HttpClientConnection httpConnection, ClassicHttpRequest request, HttpCoreContext httpContext)
throws HttpException, IOException {
// Execute Request
HttpRequestExecutor httpExecutor = new HttpRequestExecutor();
httpExecutor.preProcess(request, httpProcessor, httpContext);
ClassicHttpResponse response = httpExecutor.execute(request, httpConnection, httpContext);
httpExecutor.postProcess(response, httpProcessor, httpContext);
return response;
}
}
| AS2AsynchronousMDNManager |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java | {
"start": 2364,
"end": 2508
} | interface ____ extends Supplier<Collection<Tuple<Number, Map<String, Object>>>> {
}
protected abstract static | NumberWithAttributesObserver |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java | {
"start": 1429,
"end": 6668
} | class ____ {
private ViewFileSystemUtil() {
// Private Constructor
}
/**
* Check if the FileSystem is a ViewFileSystem.
*
* @param fileSystem file system.
* @return true if the fileSystem is ViewFileSystem
*/
public static boolean isViewFileSystem(final FileSystem fileSystem) {
return fileSystem.getScheme().equals(FsConstants.VIEWFS_SCHEME);
}
/**
* Check if the FileSystem is a ViewFileSystemOverloadScheme.
*
* @param fileSystem file system.
* @return true if the fileSystem is ViewFileSystemOverloadScheme
*/
public static boolean isViewFileSystemOverloadScheme(
final FileSystem fileSystem) {
return fileSystem instanceof ViewFileSystemOverloadScheme;
}
/**
* Get FsStatus for all ViewFsMountPoints matching path for the given
* ViewFileSystem.
*
* Say ViewFileSystem has following mount points configured
* (1) hdfs://NN0_host:port/sales mounted on /dept/sales
* (2) hdfs://NN1_host:port/marketing mounted on /dept/marketing
* (3) hdfs://NN2_host:port/eng_usa mounted on /dept/eng/usa
* (4) hdfs://NN3_host:port/eng_asia mounted on /dept/eng/asia
*
* For the above config, here is a sample list of paths and their matching
* mount points while getting FsStatus
*
* Path Description Matching MountPoint
*
* "/" Root ViewFileSystem lists all (1), (2), (3), (4)
* mount points.
*
* "/dept" Not a mount point, but a valid (1), (2), (3), (4)
* internal dir in the mount tree
* and resolved down to "/" path.
*
* "/dept/sales" Matches a mount point (1)
*
* "/dept/sales/india" Path is over a valid mount point (1)
* and resolved down to
* "/dept/sales"
*
* "/dept/eng" Not a mount point, but a valid (1), (2), (3), (4)
* internal dir in the mount tree
* and resolved down to "/" path.
*
* "/erp" Doesn't match or leads to or
* over any valid mount points None
*
*
* @param fileSystem - ViewFileSystem on which mount point exists
* @param path - URI for which FsStatus is requested
* @return Map of ViewFsMountPoint and FsStatus
* @throws IOException raised on errors performing I/O.
*/
public static Map<MountPoint, FsStatus> getStatus(
FileSystem fileSystem, Path path) throws IOException {
if (!(isViewFileSystem(fileSystem)
|| isViewFileSystemOverloadScheme(fileSystem))) {
throw new UnsupportedFileSystemException("FileSystem '"
+ fileSystem.getUri() + "'is not a ViewFileSystem.");
}
ViewFileSystem viewFileSystem = (ViewFileSystem) fileSystem;
String viewFsUriPath = viewFileSystem.getUriPath(path);
boolean isPathOverMountPoint = false;
boolean isPathLeadingToMountPoint = false;
boolean isPathIncludesAllMountPoint = false;
Map<MountPoint, FsStatus> mountPointMap = new HashMap<>();
for (MountPoint mountPoint : viewFileSystem.getMountPoints()) {
String[] mountPointPathComponents = InodeTree.breakIntoPathComponents(
mountPoint.getMountedOnPath().toString());
String[] incomingPathComponents =
InodeTree.breakIntoPathComponents(viewFsUriPath);
int pathCompIndex;
for (pathCompIndex = 0; pathCompIndex < mountPointPathComponents.length &&
pathCompIndex < incomingPathComponents.length; pathCompIndex++) {
if (!mountPointPathComponents[pathCompIndex].equals(
incomingPathComponents[pathCompIndex])) {
break;
}
}
if (pathCompIndex >= mountPointPathComponents.length) {
// Patch matches or over a valid mount point
isPathOverMountPoint = true;
mountPointMap.clear();
updateMountPointFsStatus(viewFileSystem, mountPointMap, mountPoint,
new Path(viewFsUriPath));
break;
} else {
if (pathCompIndex > 1) {
// Path is in the mount tree
isPathLeadingToMountPoint = true;
} else if (incomingPathComponents.length <= 1) {
// Special case of "/" path
isPathIncludesAllMountPoint = true;
}
updateMountPointFsStatus(viewFileSystem, mountPointMap, mountPoint,
mountPoint.getMountedOnPath());
}
}
if (!isPathOverMountPoint && !isPathLeadingToMountPoint &&
!isPathIncludesAllMountPoint) {
throw new NotInMountpointException(path, "getStatus");
}
return mountPointMap;
}
/**
* Update FsStatus for the given the mount point.
*
* @param viewFileSystem
* @param mountPointMap
* @param mountPoint
* @param path
*/
private static void updateMountPointFsStatus(
final ViewFileSystem viewFileSystem,
final Map<MountPoint, FsStatus> mountPointMap,
final MountPoint mountPoint, final Path path) throws IOException {
FsStatus fsStatus = viewFileSystem.getStatus(path);
mountPointMap.put(mountPoint, fsStatus);
}
}
| ViewFileSystemUtil |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/booleanarrays/BooleanArrays_assertContainsOnly_Test.java | {
"start": 1788,
"end": 4013
} | class ____ extends BooleanArraysBaseTest {
@Test
void should_pass_if_actual_contains_given_values_only() {
arrays.assertContainsOnly(someInfo(), actual, arrayOf(true, false));
}
@Test
void should_pass_if_actual_contains_given_values_only_in_different_order() {
arrays.assertContainsOnly(someInfo(), actual, arrayOf(false, true));
}
@Test
void should_pass_if_actual_contains_given_values_only_more_than_once() {
actual = arrayOf(true, false, true, false);
arrays.assertContainsOnly(someInfo(), actual, arrayOf(true, false));
}
@Test
void should_pass_if_actual_contains_given_values_only_even_if_duplicated() {
arrays.assertContainsOnly(someInfo(), actual, arrayOf(true, false, true, false));
}
@Test
void should_pass_if_actual_and_given_values_are_empty() {
actual = emptyArray();
arrays.assertContainsOnly(someInfo(), actual, emptyArray());
}
@Test
void should_fail_if_array_of_values_to_look_for_is_empty_and_actual_is_not() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertContainsOnly(someInfo(), actual, emptyArray()));
}
@Test
void should_throw_error_if_array_of_values_to_look_for_is_null() {
assertThatNullPointerException().isThrownBy(() -> arrays.assertContainsOnly(someInfo(), actual,
null))
.withMessage(valuesToLookForIsNull());
}
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertContainsOnly(someInfo(), null, arrayOf(true)))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_actual_does_not_contain_given_values_only() {
AssertionInfo info = someInfo();
actual = arrayOf(true);
boolean[] expected = { false };
Throwable error = catchThrowable(() -> arrays.assertContainsOnly(info, actual, expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldContainOnly(actual, expected, newArrayList(false), newArrayList(true)));
}
}
| BooleanArrays_assertContainsOnly_Test |
java | bumptech__glide | annotation/compiler/src/main/java/com/bumptech/glide/annotation/compiler/GlideAnnotationProcessor.java | {
"start": 1886,
"end": 3245
} | class ____ both static and instance
* versions of methods in all {@link com.bumptech.glide.annotation.GlideExtension}s.
* <li>If one or more methods in one or more {@link
* com.bumptech.glide.annotation.GlideExtension} annotated classes are annotated with
* {@link GlideType}:
* <ul>
* <li>A {@code com.bumptech.glide.RequestManager} implementation containing a
* generated method for each method annotated with {@link GlideType}.
* <li>A {@code
* com.bumptech.glide.manager.RequestManagerRetriever.RequestManagerFactory}
* implementation that produces the generated {@code
* com.bumptech.glide.RequestManager}s.
* <li>A {@code com.bumptech.glide.Glide} look-alike that implements all static
* methods in the {@code com.bumptech.glide.Glide} singleton and returns the
* generated {@code com.bumptech.glide.RequestManager} implementation when
* appropriate.
* </ul>
* </ul>
* </ul>
*
* <p>{@code AppGlideModule} implementations must only be included in applications, not in
* libraries. There must be exactly one {@code AppGlideModule} implementation per Application. The
* {@code AppGlideModule} | and |
java | quarkusio__quarkus | independent-projects/tools/registry-client/src/main/java/io/quarkus/registry/catalog/CategoryImpl.java | {
"start": 2097,
"end": 5220
} | class ____ implements Category.Mutable {
protected String id;
protected String name;
protected String description;
protected Map<String, Object> metadata;
Builder() {
}
@JsonIgnore
Builder(Category config) {
this.id = config.getId();
this.name = config.getName();
this.description = config.getDescription();
this.metadata = config.getMetadata();
}
@Override
public String getId() {
return id;
}
public Builder setId(String id) {
this.id = id;
return this;
}
@Override
public String getName() {
return name;
}
public Builder setName(String name) {
this.name = name;
return this;
}
@Override
public String getDescription() {
return description;
}
public Builder setDescription(String description) {
this.description = description;
return this;
}
@Override
public Map<String, Object> getMetadata() {
return metadata == null ? metadata = new HashMap<>() : metadata;
}
@Override
public Builder setMetadata(Map<String, Object> newValues) {
metadata = JsonBuilder.modifiableMapOrNull(newValues, HashMap::new);
return this;
}
@Override
public Builder setMetadata(String name, Object value) {
getMetadata().put(name, value);
return this;
}
@Override
public Builder removeMetadata(String key) {
getMetadata().remove(key);
return this;
}
@Override
public CategoryImpl build() {
return new CategoryImpl(this);
}
@Override
public boolean equals(Object o) {
return categoryEquals(this, o);
}
@Override
public int hashCode() {
return Objects.hash(id, name, description, getMetadata());
}
@Override
public String toString() {
return categoryToString(this);
}
}
static final boolean categoryEquals(Category c, Object o) {
if (c == o)
return true;
if (!(o instanceof Category))
return false;
Category category = (Category) o;
return Objects.equals(c.getId(), category.getId())
&& Objects.equals(c.getName(), category.getName())
&& Objects.equals(c.getDescription(), category.getDescription())
&& Objects.equals(c.getMetadata(), category.getMetadata());
}
static final String categoryToString(Category c) {
return "Category{" +
"id='" + c.getId() + '\'' +
", name='" + c.getName() + '\'' +
", description='" + c.getDescription() + '\'' +
", metadata=" + c.getMetadata() +
", builder=" + (c instanceof Builder) +
'}';
}
}
| Builder |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/spi/ReadOnlyThreadContextMap.java | {
"start": 1161,
"end": 1346
} | interface ____ be accessed
* by applications via the {@link ThreadContext#getThreadContextMap()} method.
* </p>
*
* @see ThreadContext#getThreadContextMap()
* @since 2.8
*/
public | can |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/main/java/io/quarkus/resteasy/reactive/server/deployment/QuarkusMultipartReturnTypeHandler.java | {
"start": 843,
"end": 2111
} | class ____ implements EndpointIndexer.MultipartReturnTypeIndexerExtension {
private final Map<String, Boolean> multipartOutputGeneratedPopulators = new HashMap<>();
final BuildProducer<GeneratedClassBuildItem> generatedClassBuildItemBuildProducer;
final Predicate<String> applicationClassPredicate;
final BuildProducer<ReflectiveClassBuildItem> reflectiveClassProducer;
public QuarkusMultipartReturnTypeHandler(BuildProducer<GeneratedClassBuildItem> generatedClassBuildItemBuildProducer,
Predicate<String> applicationClassPredicate, BuildProducer<ReflectiveClassBuildItem> reflectiveClassProducer) {
this.generatedClassBuildItemBuildProducer = generatedClassBuildItemBuildProducer;
this.applicationClassPredicate = applicationClassPredicate;
this.reflectiveClassProducer = reflectiveClassProducer;
}
@Override
public boolean handleMultipartForReturnType(AdditionalWriters additionalWriters, ClassInfo multipartClassInfo,
IndexView index) {
String className = multipartClassInfo.name().toString();
Boolean canHandle = multipartOutputGeneratedPopulators.get(className);
if (canHandle != null) {
// we've already seen this | QuarkusMultipartReturnTypeHandler |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/builders/HttpSecurityDeferAddFilterTests.java | {
"start": 9878,
"end": 10358
} | class ____ {
@Bean
SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.addFilterAfter(new MyFilter(), WebAsyncManagerIntegrationFilter.class)
.addFilterAfter(new MyFilter(), ExceptionTranslationFilter.class)
.addFilterBefore(new MyOtherFilter(), MyFilter.class);
// @formatter:on
return http.build();
}
}
@Configuration
@EnableWebSecurity
static | MyOtherFilterBeforeToMyFilterMultipleAfterConfig |
java | quarkusio__quarkus | extensions/hibernate-envers/deployment/src/test/java/io/quarkus/hibernate/orm/envers/config/EnversAuditStrategyTestCase.java | {
"start": 386,
"end": 937
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyAuditedEntity.class, EnversTestAuditStrategyResource.class, AbstractEnversResource.class)
.addAsResource("application-with-audit-strategy.properties", "application.properties"));
@Test
public void testAuditStrategy() {
RestAssured.when().get("/envers-audit-strategy").then()
.body(is("OK"));
}
}
| EnversAuditStrategyTestCase |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java | {
"start": 26730,
"end": 27286
} | class ____ extends
CombineOutputCollector<IntWritable, Text> {
final private Map<IntWritable, Text> collect = new HashMap<IntWritable, Text>();
public FakeCollector(Counter outCounter, Progressable progressable) {
super(outCounter, progressable);
}
@Override
public synchronized void collect(IntWritable key, Text value)
throws IOException {
collect.put(key, value);
super.collect(key, value);
}
public Map<IntWritable, Text> getCollect() {
return collect;
}
}
}
| FakeCollector |
java | apache__flink | flink-python/src/main/java/org/apache/flink/table/runtime/operators/python/AbstractOneInputPythonFunctionOperator.java | {
"start": 1213,
"end": 1312
} | class ____ all one input stream operators to execute Python functions. */
@Internal
public abstract | for |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/convert/support/StringToBooleanConverter.java | {
"start": 970,
"end": 1625
} | class ____ implements Converter<String, Boolean> {
private static final Set<String> trueValues = Set.of("true", "on", "yes", "1");
private static final Set<String> falseValues = Set.of("false", "off", "no", "0");
@Override
public @Nullable Boolean convert(String source) {
String value = source.trim();
if (value.isEmpty()) {
return null;
}
value = value.toLowerCase(Locale.ROOT);
if (trueValues.contains(value)) {
return Boolean.TRUE;
}
else if (falseValues.contains(value)) {
return Boolean.FALSE;
}
else {
throw new IllegalArgumentException("Invalid boolean value '" + source + "'");
}
}
}
| StringToBooleanConverter |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/ldap/AbstractLdapAuthenticationManagerFactory.java | {
"start": 1739,
"end": 7087
} | class ____<T extends AbstractLdapAuthenticator> {
AbstractLdapAuthenticationManagerFactory(BaseLdapPathContextSource contextSource) {
this.contextSource = contextSource;
}
private BaseLdapPathContextSource contextSource;
private String[] userDnPatterns;
private LdapAuthoritiesPopulator ldapAuthoritiesPopulator;
private GrantedAuthoritiesMapper authoritiesMapper;
private UserDetailsContextMapper userDetailsContextMapper;
private String userSearchFilter;
private String userSearchBase = "";
/**
* Sets the {@link BaseLdapPathContextSource} used to perform LDAP authentication.
* @param contextSource the {@link BaseLdapPathContextSource} used to perform LDAP
* authentication
*/
public void setContextSource(BaseLdapPathContextSource contextSource) {
this.contextSource = contextSource;
}
/**
* Gets the {@link BaseLdapPathContextSource} used to perform LDAP authentication.
* @return the {@link BaseLdapPathContextSource} used to perform LDAP authentication
*/
protected final BaseLdapPathContextSource getContextSource() {
return this.contextSource;
}
/**
* Sets the {@link LdapAuthoritiesPopulator} used to obtain a list of granted
* authorities for an LDAP user.
* @param ldapAuthoritiesPopulator the {@link LdapAuthoritiesPopulator} to use
*/
public void setLdapAuthoritiesPopulator(LdapAuthoritiesPopulator ldapAuthoritiesPopulator) {
this.ldapAuthoritiesPopulator = ldapAuthoritiesPopulator;
}
/**
* Sets the {@link GrantedAuthoritiesMapper} used for converting the authorities
* loaded from storage to a new set of authorities which will be associated to the
* {@link UsernamePasswordAuthenticationToken}.
* @param authoritiesMapper the {@link GrantedAuthoritiesMapper} used for mapping the
* user's authorities
*/
public void setAuthoritiesMapper(GrantedAuthoritiesMapper authoritiesMapper) {
this.authoritiesMapper = authoritiesMapper;
}
/**
* Sets a custom strategy to be used for creating the {@link UserDetails} which will
* be stored as the principal in the {@link Authentication}.
* @param userDetailsContextMapper the strategy instance
*/
public void setUserDetailsContextMapper(UserDetailsContextMapper userDetailsContextMapper) {
this.userDetailsContextMapper = userDetailsContextMapper;
}
/**
* If your users are at a fixed location in the directory (i.e. you can work out the
* DN directly from the username without doing a directory search), you can use this
* attribute to map directly to the DN. It maps directly to the userDnPatterns
* property of AbstractLdapAuthenticator. The value is a specific pattern used to
* build the user's DN, for example "uid={0},ou=people". The key "{0}" must be present
* and will be substituted with the username.
* @param userDnPatterns the LDAP patterns for finding the usernames
*/
public void setUserDnPatterns(String... userDnPatterns) {
this.userDnPatterns = userDnPatterns;
}
/**
* The LDAP filter used to search for users (optional). For example "(uid={0})". The
* substituted parameter is the user's login name.
* @param userSearchFilter the LDAP filter used to search for users
*/
public void setUserSearchFilter(String userSearchFilter) {
this.userSearchFilter = userSearchFilter;
}
/**
* Search base for user searches. Defaults to "". Only used with
* {@link #setUserSearchFilter(String)}.
* @param userSearchBase search base for user searches
*/
public void setUserSearchBase(String userSearchBase) {
this.userSearchBase = userSearchBase;
}
/**
* Returns the configured {@link AuthenticationManager} that can be used to perform
* LDAP authentication.
* @return the configured {@link AuthenticationManager}
*/
public final AuthenticationManager createAuthenticationManager() {
LdapAuthenticationProvider ldapAuthenticationProvider = getProvider();
return new ProviderManager(ldapAuthenticationProvider);
}
private LdapAuthenticationProvider getProvider() {
AbstractLdapAuthenticator authenticator = getAuthenticator();
LdapAuthenticationProvider provider;
if (this.ldapAuthoritiesPopulator != null) {
provider = new LdapAuthenticationProvider(authenticator, this.ldapAuthoritiesPopulator);
}
else {
provider = new LdapAuthenticationProvider(authenticator);
}
if (this.authoritiesMapper != null) {
provider.setAuthoritiesMapper(this.authoritiesMapper);
}
if (this.userDetailsContextMapper != null) {
provider.setUserDetailsContextMapper(this.userDetailsContextMapper);
}
return provider;
}
private AbstractLdapAuthenticator getAuthenticator() {
AbstractLdapAuthenticator authenticator = createDefaultLdapAuthenticator();
if (this.userSearchFilter != null) {
authenticator.setUserSearch(
new FilterBasedLdapUserSearch(this.userSearchBase, this.userSearchFilter, this.contextSource));
}
if (this.userDnPatterns != null && this.userDnPatterns.length > 0) {
authenticator.setUserDnPatterns(this.userDnPatterns);
}
authenticator.afterPropertiesSet();
return authenticator;
}
/**
* Allows subclasses to supply the default {@link AbstractLdapAuthenticator}.
* @return the {@link AbstractLdapAuthenticator} that will be configured for LDAP
* authentication
*/
protected abstract T createDefaultLdapAuthenticator();
}
| AbstractLdapAuthenticationManagerFactory |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java | {
"start": 15036,
"end": 30470
} | class ____ extends FileStatus {
private static final long serialVersionUID = 0x21f276d8;
private final FileStatus realStatus;
private final String strippedRoot;
private NflyStatus(ChRootedFileSystem realFs, FileStatus realStatus)
throws IOException {
this.realStatus = realStatus;
this.strippedRoot = realFs.stripOutRoot(realStatus.getPath());
}
String stripRoot() throws IOException {
return strippedRoot;
}
@Override
public long getLen() {
return realStatus.getLen();
}
@Override
public boolean isFile() {
return realStatus.isFile();
}
@Override
public boolean isDirectory() {
return realStatus.isDirectory();
}
@Override
public boolean isSymlink() {
return realStatus.isSymlink();
}
@Override
public long getBlockSize() {
return realStatus.getBlockSize();
}
@Override
public short getReplication() {
return realStatus.getReplication();
}
@Override
public long getModificationTime() {
return realStatus.getModificationTime();
}
@Override
public long getAccessTime() {
return realStatus.getAccessTime();
}
@Override
public FsPermission getPermission() {
return realStatus.getPermission();
}
@Override
public String getOwner() {
return realStatus.getOwner();
}
@Override
public String getGroup() {
return realStatus.getGroup();
}
@Override
public Path getPath() {
return realStatus.getPath();
}
@Override
public void setPath(Path p) {
realStatus.setPath(p);
}
@Override
public Path getSymlink() throws IOException {
return realStatus.getSymlink();
}
@Override
public void setSymlink(Path p) {
realStatus.setSymlink(p);
}
@Override
public boolean equals(Object o) {
return realStatus.equals(o);
}
@Override
public int hashCode() {
return realStatus.hashCode();
}
@Override
public String toString() {
return realStatus.toString();
}
}
@Override
public URI getUri() {
return nflyURI;
}
/**
* Category: READ.
*
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
* @return input stream according to nfly flags (closest, most recent)
* @throws IOException
* @throws FileNotFoundException iff all destinations generate this exception
*/
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
// TODO proxy stream for reads
final List<IOException> ioExceptions =
new ArrayList<IOException>(nodes.length);
int numNotFounds = 0;
final MRNflyNode[] mrNodes = workSet();
// naively iterate until one can be opened
//
for (final MRNflyNode nflyNode : mrNodes) {
try {
if (nflyFlags.contains(NflyKey.repairOnRead)
|| nflyFlags.contains(NflyKey.readMostRecent)) {
// calling file status to avoid pulling bytes prematurely
nflyNode.updateFileStatus(f);
} else {
return nflyNode.getFs().open(f, bufferSize);
}
} catch (FileNotFoundException fnfe) {
nflyNode.status = notFoundStatus(f);
numNotFounds++;
processThrowable(nflyNode, "open", fnfe, ioExceptions, f);
} catch (Throwable t) {
processThrowable(nflyNode, "open", t, ioExceptions, f);
}
}
if (nflyFlags.contains(NflyKey.readMostRecent)) {
// sort from most recent to least recent
Arrays.sort(mrNodes);
}
final FSDataInputStream fsdisAfterRepair = repairAndOpen(mrNodes, f,
bufferSize);
if (fsdisAfterRepair != null) {
return fsdisAfterRepair;
}
mayThrowFileNotFound(ioExceptions, numNotFounds);
throw MultipleIOException.createIOException(ioExceptions);
}
private static FileStatus notFoundStatus(Path f) {
return new FileStatus(-1, false, 0, 0, 0, f);
}
/**
* Iterate all available nodes in the proximity order to attempt repair of all
* FileNotFound nodes.
*
* @param mrNodes work set copy of nodes
* @param f path to repair and open
* @param bufferSize buffer size for read RPC
* @return the closest/most recent replica stream AFTER repair
*/
private FSDataInputStream repairAndOpen(MRNflyNode[] mrNodes, Path f,
int bufferSize) {
long maxMtime = 0L;
for (final MRNflyNode srcNode : mrNodes) {
if (srcNode.status == null // not available
|| srcNode.status.getLen() < 0L) { // not found
continue; // not available
}
if (srcNode.status.getModificationTime() > maxMtime) {
maxMtime = srcNode.status.getModificationTime();
}
// attempt to repair all notFound nodes with srcNode
//
for (final MRNflyNode dstNode : mrNodes) {
if (dstNode.status == null // not available
|| srcNode.compareTo(dstNode) == 0) { // same mtime
continue;
}
try {
// status is absolute from the underlying mount, making it chrooted
//
final FileStatus srcStatus = srcNode.cloneStatus();
srcStatus.setPath(f);
final Path tmpPath = getNflyTmpPath(f);
FileUtil.copy(srcNode.getFs(), srcStatus, dstNode.getFs(), tmpPath,
false, // don't delete
true, // overwrite
getConf());
dstNode.getFs().delete(f, false);
if (dstNode.getFs().rename(tmpPath, f)) {
try {
dstNode.getFs().setTimes(f, srcNode.status.getModificationTime(),
srcNode.status.getAccessTime());
} finally {
// save getFileStatus rpc
srcStatus.setPath(dstNode.getFs().makeQualified(f));
dstNode.status = srcStatus;
}
}
} catch (IOException ioe) {
// can blame the source by statusSet.clear(ai), however, it would
// cost an extra RPC, so just rely on the loop below that will attempt
// an open anyhow
//
LOG.info(f + " " + srcNode + "->" + dstNode + ": Failed to repair",
ioe);
}
}
}
// Since Java7, QuickSort is used instead of MergeSort.
// QuickSort may not be stable and thus the equal most recent nodes, may no
// longer appear in the NetworkTopology order.
//
if (maxMtime > 0) {
final List<MRNflyNode> mrList = new ArrayList<MRNflyNode>();
for (final MRNflyNode openNode : mrNodes) {
if (openNode.status != null && openNode.status.getLen() >= 0L) {
if (openNode.status.getModificationTime() == maxMtime) {
mrList.add(openNode);
}
}
}
// assert mrList.size > 0
final MRNflyNode[] readNodes = mrList.toArray(new MRNflyNode[0]);
topology.sortByDistance(myNode, readNodes, readNodes.length);
for (final MRNflyNode rNode : readNodes) {
try {
return rNode.getFs().open(f, bufferSize);
} catch (IOException e) {
LOG.info(f + ": Failed to open at " + rNode.getFs().getUri());
}
}
}
return null;
}
private void mayThrowFileNotFound(List<IOException> ioExceptions,
int numNotFounds) throws FileNotFoundException {
if (numNotFounds == nodes.length) {
throw (FileNotFoundException)ioExceptions.get(nodes.length - 1);
}
}
// WRITE
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return new FSDataOutputStream(new NflyOutputStream(f, permission, overwrite,
bufferSize, replication, blockSize, progress), statistics);
}
// WRITE
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
return null;
}
// WRITE
@Override
public boolean rename(Path src, Path dst) throws IOException {
final List<IOException> ioExceptions = new ArrayList<IOException>();
int numNotFounds = 0;
boolean succ = true;
for (final NflyNode nflyNode : nodes) {
try {
succ &= nflyNode.fs.rename(src, dst);
} catch (FileNotFoundException fnfe) {
numNotFounds++;
processThrowable(nflyNode, "rename", fnfe, ioExceptions, src, dst);
} catch (Throwable t) {
processThrowable(nflyNode, "rename", t, ioExceptions, src, dst);
succ = false;
}
}
mayThrowFileNotFound(ioExceptions, numNotFounds);
// if all destinations threw exceptions throw, otherwise return
//
if (ioExceptions.size() == nodes.length) {
throw MultipleIOException.createIOException(ioExceptions);
}
return succ;
}
// WRITE
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
final List<IOException> ioExceptions = new ArrayList<IOException>();
int numNotFounds = 0;
boolean succ = true;
for (final NflyNode nflyNode : nodes) {
try {
succ &= nflyNode.fs.delete(f);
} catch (FileNotFoundException fnfe) {
numNotFounds++;
processThrowable(nflyNode, "delete", fnfe, ioExceptions, f);
} catch (Throwable t) {
processThrowable(nflyNode, "delete", t, ioExceptions, f);
succ = false;
}
}
mayThrowFileNotFound(ioExceptions, numNotFounds);
// if all destinations threw exceptions throw, otherwise return
//
if (ioExceptions.size() == nodes.length) {
throw MultipleIOException.createIOException(ioExceptions);
}
return succ;
}
/**
* Returns the closest non-failing destination's result.
*
* @param f given path
* @return array of file statuses according to nfly modes
* @throws FileNotFoundException
* @throws IOException
*/
@Override
public FileStatus[] listStatus(Path f) throws FileNotFoundException,
IOException {
final List<IOException> ioExceptions =
new ArrayList<IOException>(nodes.length);
final MRNflyNode[] mrNodes = workSet();
if (nflyFlags.contains(NflyKey.readMostRecent)) {
int numNotFounds = 0;
for (final MRNflyNode nflyNode : mrNodes) {
try {
nflyNode.updateFileStatus(f);
} catch (FileNotFoundException fnfe) {
numNotFounds++;
processThrowable(nflyNode, "listStatus", fnfe, ioExceptions, f);
} catch (Throwable t) {
processThrowable(nflyNode, "listStatus", t, ioExceptions, f);
}
}
mayThrowFileNotFound(ioExceptions, numNotFounds);
Arrays.sort(mrNodes);
}
int numNotFounds = 0;
for (final MRNflyNode nflyNode : mrNodes) {
try {
final FileStatus[] realStats = nflyNode.getFs().listStatus(f);
final FileStatus[] nflyStats = new FileStatus[realStats.length];
for (int i = 0; i < realStats.length; i++) {
nflyStats[i] = new NflyStatus(nflyNode.getFs(), realStats[i]);
}
return nflyStats;
} catch (FileNotFoundException fnfe) {
numNotFounds++;
processThrowable(nflyNode, "listStatus", fnfe, ioExceptions, f);
} catch (Throwable t) {
processThrowable(nflyNode, "listStatus", t, ioExceptions, f);
}
}
mayThrowFileNotFound(ioExceptions, numNotFounds);
throw MultipleIOException.createIOException(ioExceptions);
}
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(Path f)
throws FileNotFoundException, IOException {
// TODO important for splits
return super.listLocatedStatus(f);
}
@Override
public void setWorkingDirectory(Path newDir) {
for (final NflyNode nflyNode : nodes) {
nflyNode.fs.setWorkingDirectory(newDir);
}
}
@Override
public Path getWorkingDirectory() {
return nodes[0].fs.getWorkingDirectory(); // 0 is as good as any
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
boolean succ = true;
for (final NflyNode nflyNode : nodes) {
succ &= nflyNode.fs.mkdirs(f, permission);
}
return succ;
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
// TODO proxy stream for reads
final List<IOException> ioExceptions =
new ArrayList<IOException>(nodes.length);
int numNotFounds = 0;
final MRNflyNode[] mrNodes = workSet();
long maxMtime = Long.MIN_VALUE;
int maxMtimeIdx = Integer.MIN_VALUE;
// naively iterate until one can be returned
//
for (int i = 0; i < mrNodes.length; i++) {
MRNflyNode nflyNode = mrNodes[i];
try {
nflyNode.updateFileStatus(f);
if (nflyFlags.contains(NflyKey.readMostRecent)) {
final long nflyTime = nflyNode.status.getModificationTime();
if (nflyTime > maxMtime) {
maxMtime = nflyTime;
maxMtimeIdx = i;
}
} else {
return nflyNode.nflyStatus();
}
} catch (FileNotFoundException fnfe) {
numNotFounds++;
processThrowable(nflyNode, "getFileStatus", fnfe, ioExceptions, f);
} catch (Throwable t) {
processThrowable(nflyNode, "getFileStatus", t, ioExceptions, f);
}
}
if (maxMtimeIdx >= 0) {
return mrNodes[maxMtimeIdx].nflyStatus();
}
mayThrowFileNotFound(ioExceptions, numNotFounds);
throw MultipleIOException.createIOException(ioExceptions);
}
private static void processThrowable(NflyNode nflyNode, String op,
Throwable t, List<IOException> ioExceptions,
Path... f) {
final String errMsg = Arrays.toString(f)
+ ": failed to " + op + " " + nflyNode.fs.getUri();
final IOException ioex;
if (t instanceof FileNotFoundException) {
ioex = new FileNotFoundException(errMsg);
ioex.initCause(t);
} else {
ioex = new IOException(errMsg, t);
}
if (ioExceptions != null) {
ioExceptions.add(ioex);
}
}
/**
* Initializes an nfly mountpoint in viewfs.
*
* @param uris destinations to replicate writes to
* @param conf file system configuration
* @param settings comma-separated list of k=v pairs.
* @return an Nfly filesystem
* @throws IOException
*/
static FileSystem createFileSystem(URI[] uris, Configuration conf,
String settings, FsGetter fsGetter) throws IOException {
// assert settings != null
int minRepl = DEFAULT_MIN_REPLICATION;
EnumSet<NflyKey> nflyFlags = EnumSet.noneOf(NflyKey.class);
final String[] kvPairs = StringUtils.split(settings);
for (String kv : kvPairs) {
final String[] kvPair = StringUtils.split(kv, '=');
if (kvPair.length != 2) {
throw new IllegalArgumentException(kv);
}
NflyKey nflyKey = NflyKey.valueOf(kvPair[0]);
switch (nflyKey) {
case minReplication:
minRepl = Integer.parseInt(kvPair[1]);
break;
case repairOnRead:
case readMostRecent:
if (Boolean.valueOf(kvPair[1])) {
nflyFlags.add(nflyKey);
}
break;
default:
throw new IllegalArgumentException(nflyKey + ": Infeasible");
}
}
return new NflyFSystem(uris, conf, minRepl, nflyFlags, fsGetter);
}
}
| NflyStatus |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/refcolnames/misc/Misc5Test.java | {
"start": 933,
"end": 1247
} | class ____ {
@Test
void test(SessionFactoryScope scope) {
scope.inTransaction(x -> {
Animal a = new Animal();
a.name = "Dragon";
a.key = "xxxx";
Animal b = new Animal();
b.name = "Lizard";
b.key = "yyyy";
a.relatives.add(b);
});
}
@Entity
@Table(name = "animal")
public static | Misc5Test |
java | mockito__mockito | mockito-core/src/testFixtures/java/org/mockitoutil/SimplePerRealmReloadingClassLoader.java | {
"start": 5850,
"end": 5899
} | class ____ Callable");
}
public | implementing |
java | spring-projects__spring-framework | spring-orm/src/main/java/org/springframework/orm/jpa/persistenceunit/DefaultPersistenceUnitManager.java | {
"start": 17787,
"end": 17953
} | class ____ contract.
* <p>It is not required to specify a LoadTimeWeaver: Most providers will be able
* to provide a subset of their functionality without | transformer |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/BeanWrapperAutoGrowingTests.java | {
"start": 11289,
"end": 11376
} | class ____ {
private NestedNoDefaultConstructor() {
}
}
}
| NestedNoDefaultConstructor |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/inference/OperatorBindingCallContext.java | {
"start": 10028,
"end": 12442
} | class ____ implements TableSemantics {
private final DataType dataType;
private final int[] partitionByColumns;
private final int timeColumn;
private final @Nullable ChangelogMode changelogMode;
public static OperatorBindingTableSemantics create(
DataType tableDataType,
StaticArgument staticArg,
RexTableArgCall tableArgCall,
int timeColumn,
@Nullable ChangelogMode changelogMode) {
checkNoOrderBy(tableArgCall);
return new OperatorBindingTableSemantics(
createDataType(tableDataType, staticArg),
tableArgCall.getPartitionKeys(),
timeColumn,
changelogMode);
}
private OperatorBindingTableSemantics(
DataType dataType,
int[] partitionByColumns,
int timeColumn,
@Nullable ChangelogMode changelogMode) {
this.dataType = dataType;
this.partitionByColumns = partitionByColumns;
this.timeColumn = timeColumn;
this.changelogMode = changelogMode;
}
private static void checkNoOrderBy(RexTableArgCall tableArgCall) {
if (tableArgCall.getOrderKeys().length > 0) {
throw new ValidationException("ORDER BY clause is currently not supported.");
}
}
private static DataType createDataType(DataType tableDataType, StaticArgument staticArg) {
final DataType dataType = staticArg.getDataType().orElse(null);
if (dataType != null) {
// Typed table argument
return dataType;
}
// Untyped table arguments
return tableDataType;
}
@Override
public DataType dataType() {
return dataType;
}
@Override
public int[] partitionByColumns() {
return partitionByColumns;
}
@Override
public int[] orderByColumns() {
return new int[0];
}
@Override
public int timeColumn() {
return timeColumn;
}
@Override
public Optional<ChangelogMode> changelogMode() {
return Optional.ofNullable(changelogMode);
}
}
}
| OperatorBindingTableSemantics |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java | {
"start": 29351,
"end": 33546
} | class ____<K, V> extends AdminApiHandler.Batched<K, V> {
private final Map<Set<K>, ApiResult<K, V>> expectedRequests = new HashMap<>();
private final MockLookupStrategy<K> lookupStrategy;
private final Map<K, Boolean> retriableUnsupportedVersionKeys;
private MockAdminApiHandler(MockLookupStrategy<K> lookupStrategy) {
this.lookupStrategy = lookupStrategy;
this.retriableUnsupportedVersionKeys = new ConcurrentHashMap<>();
}
@Override
public String apiName() {
return "mock-api";
}
@Override
public AdminApiLookupStrategy<K> lookupStrategy() {
return lookupStrategy;
}
public void expectRequest(Set<K> keys, ApiResult<K, V> result) {
expectedRequests.put(keys, result);
}
@Override
public AbstractRequest.Builder<?> buildBatchedRequest(int brokerId, Set<K> keys) {
// The request is just a placeholder in these tests
assertTrue(expectedRequests.containsKey(keys), "Unexpected fulfillment request for keys " + keys);
return new MetadataRequest.Builder(Collections.emptyList(), false);
}
@Override
public ApiResult<K, V> handleResponse(Node broker, Set<K> keys, AbstractResponse response) {
return Optional.ofNullable(expectedRequests.get(keys)).orElseThrow(() ->
new AssertionError("Unexpected fulfillment request for keys " + keys)
);
}
@Override
public Map<K, Throwable> handleUnsupportedVersionException(
int brokerId,
UnsupportedVersionException exception,
Set<K> keys
) {
return keys
.stream()
.filter(k -> !retriableUnsupportedVersionKeys.containsKey(k))
.collect(Collectors.toMap(k -> k, k -> exception));
}
public void reset() {
expectedRequests.clear();
}
public void addRetriableUnsupportedVersionKey(K key) {
retriableUnsupportedVersionKeys.put(key, Boolean.TRUE);
}
}
private static <K, V> Map<K, V> map(K key, V value) {
return Collections.singletonMap(key, value);
}
private static <K, V> Map<K, V> map(K k1, V v1, K k2, V v2) {
HashMap<K, V> map = new HashMap<>(2);
map.put(k1, v1);
map.put(k2, v2);
return map;
}
private static <K, V> Map<K, V> map(K k1, V v1, K k2, V v2, K k3, V v3) {
HashMap<K, V> map = new HashMap<>(3);
map.put(k1, v1);
map.put(k2, v2);
map.put(k3, v3);
return map;
}
private static ApiResult<String, Long> completed(String key, Long value) {
return new ApiResult<>(map(key, value), emptyMap(), Collections.emptyList());
}
private static ApiResult<String, Long> failed(String key, Throwable exception) {
return new ApiResult<>(emptyMap(), map(key, exception), Collections.emptyList());
}
private static ApiResult<String, Long> unmapped(String... keys) {
return new ApiResult<>(emptyMap(), emptyMap(), Arrays.asList(keys));
}
private static ApiResult<String, Long> completed(String k1, Long v1, String k2, Long v2) {
return new ApiResult<>(map(k1, v1, k2, v2), emptyMap(), Collections.emptyList());
}
private static ApiResult<String, Long> emptyFulfillment() {
return new ApiResult<>(emptyMap(), emptyMap(), Collections.emptyList());
}
private static LookupResult<String> failedLookup(String key, Throwable exception) {
return new LookupResult<>(map(key, exception), emptyMap());
}
private static LookupResult<String> emptyLookup() {
return new LookupResult<>(emptyMap(), emptyMap());
}
private static LookupResult<String> mapped(String key, Integer brokerId) {
return new LookupResult<>(emptyMap(), map(key, brokerId));
}
private static LookupResult<String> mapped(String k1, Integer broker1, String k2, Integer broker2) {
return new LookupResult<>(emptyMap(), map(k1, broker1, k2, broker2));
}
}
| MockAdminApiHandler |
java | grpc__grpc-java | core/src/main/java/io/grpc/internal/ClientCallImpl.java | {
"start": 8625,
"end": 13271
} | class ____ extends ContextRunnable {
ClosedByNotFoundCompressor() {
super(context);
}
@Override
public void runInContext() {
closeObserver(
finalObserver,
Status.INTERNAL.withDescription(
String.format("Unable to find compressor by name %s", compressorName)),
new Metadata());
}
}
callExecutor.execute(new ClosedByNotFoundCompressor());
return;
}
} else {
compressor = Codec.Identity.NONE;
}
prepareHeaders(headers, decompressorRegistry, compressor, fullStreamDecompression);
Deadline effectiveDeadline = effectiveDeadline();
boolean contextIsDeadlineSource = effectiveDeadline != null
&& effectiveDeadline.equals(context.getDeadline());
cancellationHandler = new CancellationHandler(effectiveDeadline, contextIsDeadlineSource);
boolean deadlineExceeded = effectiveDeadline != null && cancellationHandler.remainingNanos <= 0;
if (!deadlineExceeded) {
stream = clientStreamProvider.newStream(method, callOptions, headers, context);
} else {
ClientStreamTracer[] tracers =
GrpcUtil.getClientStreamTracers(callOptions, headers, 0,
false, false);
String deadlineName = contextIsDeadlineSource ? "Context" : "CallOptions";
Long nameResolutionDelay = callOptions.getOption(NAME_RESOLUTION_DELAYED);
String description = String.format(
"ClientCall started after %s deadline was exceeded %.9f seconds ago. "
+ "Name resolution delay %.9f seconds.", deadlineName,
cancellationHandler.remainingNanos / NANO_TO_SECS,
nameResolutionDelay == null ? 0 : nameResolutionDelay / NANO_TO_SECS);
stream = new FailingClientStream(DEADLINE_EXCEEDED.withDescription(description), tracers);
}
if (callExecutorIsDirect) {
stream.optimizeForDirectExecutor();
}
if (callOptions.getAuthority() != null) {
stream.setAuthority(callOptions.getAuthority());
}
if (callOptions.getMaxInboundMessageSize() != null) {
stream.setMaxInboundMessageSize(callOptions.getMaxInboundMessageSize());
}
if (callOptions.getMaxOutboundMessageSize() != null) {
stream.setMaxOutboundMessageSize(callOptions.getMaxOutboundMessageSize());
}
if (effectiveDeadline != null) {
stream.setDeadline(effectiveDeadline);
}
stream.setCompressor(compressor);
if (fullStreamDecompression) {
stream.setFullStreamDecompression(fullStreamDecompression);
}
stream.setDecompressorRegistry(decompressorRegistry);
channelCallsTracer.reportCallStarted();
stream.start(new ClientStreamListenerImpl(observer));
// Delay any sources of cancellation after start(), because most of the transports are broken if
// they receive cancel before start. Issue #1343 has more details
// Propagate later Context cancellation to the remote side.
cancellationHandler.setUp();
}
private void applyMethodConfig() {
MethodInfo info = callOptions.getOption(MethodInfo.KEY);
if (info == null) {
return;
}
if (info.timeoutNanos != null) {
Deadline newDeadline = Deadline.after(info.timeoutNanos, TimeUnit.NANOSECONDS);
Deadline existingDeadline = callOptions.getDeadline();
// If the new deadline is sooner than the existing deadline, swap them.
if (existingDeadline == null || newDeadline.compareTo(existingDeadline) < 0) {
callOptions = callOptions.withDeadline(newDeadline);
}
}
if (info.waitForReady != null) {
callOptions =
info.waitForReady ? callOptions.withWaitForReady() : callOptions.withoutWaitForReady();
}
if (info.maxInboundMessageSize != null) {
Integer existingLimit = callOptions.getMaxInboundMessageSize();
if (existingLimit != null) {
callOptions =
callOptions.withMaxInboundMessageSize(
Math.min(existingLimit, info.maxInboundMessageSize));
} else {
callOptions = callOptions.withMaxInboundMessageSize(info.maxInboundMessageSize);
}
}
if (info.maxOutboundMessageSize != null) {
Integer existingLimit = callOptions.getMaxOutboundMessageSize();
if (existingLimit != null) {
callOptions =
callOptions.withMaxOutboundMessageSize(
Math.min(existingLimit, info.maxOutboundMessageSize));
} else {
callOptions = callOptions.withMaxOutboundMessageSize(info.maxOutboundMessageSize);
}
}
}
private final | ClosedByNotFoundCompressor |
java | google__dagger | javatests/dagger/functional/guava/OptionalBindingComponents.java | {
"start": 1280,
"end": 1418
} | interface ____ {}
/** A value object that contains various optionally-bound objects. */
@AutoValue
public abstract static | SomeQualifier |
java | apache__camel | components/camel-micrometer/src/generated/java/org/apache/camel/component/micrometer/MicrometerEndpointConfigurer.java | {
"start": 737,
"end": 3689
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
MicrometerEndpoint target = (MicrometerEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "action": target.setAction(property(camelContext, java.lang.String.class, value)); return true;
case "decrement": target.setDecrement(property(camelContext, java.lang.String.class, value)); return true;
case "increment": target.setIncrement(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "metricsdescription":
case "metricsDescription": target.setMetricsDescription(property(camelContext, java.lang.String.class, value)); return true;
case "tags": target.setTags(property(camelContext, java.util.Map.class, value)); return true;
case "value": target.setValue(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "action": return java.lang.String.class;
case "decrement": return java.lang.String.class;
case "increment": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "metricsdescription":
case "metricsDescription": return java.lang.String.class;
case "tags": return java.util.Map.class;
case "value": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
MicrometerEndpoint target = (MicrometerEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "action": return target.getAction();
case "decrement": return target.getDecrement();
case "increment": return target.getIncrement();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "metricsdescription":
case "metricsDescription": return target.getMetricsDescription();
case "tags": return target.getTags();
case "value": return target.getValue();
default: return null;
}
}
@Override
public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "tags": return java.lang.String.class;
default: return null;
}
}
}
| MicrometerEndpointConfigurer |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/objectarray/ObjectArrayAssert_allMatch_with_description_Test.java | {
"start": 1000,
"end": 1578
} | class ____ extends ObjectArrayAssertBaseTest {
private Predicate<Object> predicate;
@BeforeEach
void beforeOnce() {
predicate = o -> o != null;
}
@Override
protected ObjectArrayAssert<Object> invoke_api_method() {
return assertions.allMatch(predicate, "custom");
}
@Override
protected void verify_internal_effects() {
verify(iterables).assertAllMatch(getInfo(assertions), newArrayList(getActual(assertions)), predicate,
new PredicateDescription("custom"));
}
}
| ObjectArrayAssert_allMatch_with_description_Test |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java | {
"start": 8419,
"end": 10138
} | class ____ extends Configured {
private Options options;
private LinkedList<String> order = new LinkedList<>();
private String header;
protected CommandHandler(String header) {
this(new YarnConfiguration());
this.header = header;
}
protected CommandHandler(Configuration conf) {
super(conf);
options = buildOptions();
}
public boolean canHandleCommand(CommandLine parse) {
ArrayList<Option> arrayList = new ArrayList<Option>(options.getOptions());
return arrayList.stream().anyMatch(opt -> parse.hasOption(opt.getOpt()));
}
public abstract int handleCommand(CommandLine parse)
throws IOException, YarnException;
public abstract Options buildOptions();
public Options getOptions() {
return options;
}
public boolean getHelp(String cmd, StringBuilder strcnd, boolean addDesc) {
Option opt = options.getOption(cmd);
if (opt != null) {
strcnd.append(DEFAULT_SEPARATOR).append(" -").append(opt.getOpt());
if (opt.hasArg()) {
strcnd.append(" <").append(opt.getArgName()).append(">");
}
if (addDesc) {
strcnd.append(DEFAULT_SEPARATOR).append("\t")
.append(opt.getDescription());
}
}
return opt == null;
}
public void getHelp(StringBuilder builder, boolean description) {
builder.append(DEFAULT_SEPARATOR).append(DEFAULT_SEPARATOR)
.append(header);
for (String option : order) {
getHelp(option, builder, description);
}
}
protected void addOrder(String key){
order.add(key);
}
}
/**
* Client commands handler.
*/
public static | CommandHandler |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/ognl_enum/PersonMapper.java | {
"start": 757,
"end": 1079
} | interface ____ {
Person.Type getType();
}
List<Person> selectAllByType(Person.Type type);
List<Person> selectAllByTypeNameAttribute(Person.Type type);
List<Person> selectAllByTypeWithInterface(PersonType personType);
List<Person> selectAllByTypeNameAttributeWithInterface(PersonType personType);
}
| PersonType |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/server/csrf/WebSessionServerCsrfTokenRepository.java | {
"start": 1152,
"end": 4010
} | class ____ implements ServerCsrfTokenRepository {
private static final String DEFAULT_CSRF_PARAMETER_NAME = "_csrf";
private static final String DEFAULT_CSRF_HEADER_NAME = "X-CSRF-TOKEN";
private static final String DEFAULT_CSRF_TOKEN_ATTR_NAME = WebSessionServerCsrfTokenRepository.class.getName()
.concat(".CSRF_TOKEN");
private String parameterName = DEFAULT_CSRF_PARAMETER_NAME;
private String headerName = DEFAULT_CSRF_HEADER_NAME;
private String sessionAttributeName = DEFAULT_CSRF_TOKEN_ATTR_NAME;
@Override
public Mono<CsrfToken> generateToken(ServerWebExchange exchange) {
return Mono.fromCallable(() -> createCsrfToken()).subscribeOn(Schedulers.boundedElastic());
}
@Override
public Mono<Void> saveToken(ServerWebExchange exchange, @Nullable CsrfToken token) {
return exchange.getSession()
.doOnNext((session) -> putToken(session.getAttributes(), token))
.flatMap((session) -> session.changeSessionId());
}
private void putToken(Map<String, Object> attributes, @Nullable CsrfToken token) {
if (token == null) {
attributes.remove(this.sessionAttributeName);
}
else {
attributes.put(this.sessionAttributeName, token);
}
}
@Override
@SuppressWarnings("NullAway") // https://github.com/uber/NullAway/issues/1290
public Mono<CsrfToken> loadToken(ServerWebExchange exchange) {
return exchange.getSession()
.filter((session) -> session.getAttributes().containsKey(this.sessionAttributeName))
.mapNotNull((session) -> session.getAttribute(this.sessionAttributeName));
}
/**
* Sets the {@link ServerWebExchange} parameter name that the {@link CsrfToken} is
* expected to appear on
* @param parameterName the new parameter name to use
*/
public void setParameterName(String parameterName) {
Assert.hasLength(parameterName, "parameterName cannot be null or empty");
this.parameterName = parameterName;
}
/**
* Sets the header name that the {@link CsrfToken} is expected to appear on and the
* header that the response will contain the {@link CsrfToken}.
* @param headerName the new header name to use
*/
public void setHeaderName(String headerName) {
Assert.hasLength(headerName, "headerName cannot be null or empty");
this.headerName = headerName;
}
/**
* Sets the {@link WebSession} attribute name that the {@link CsrfToken} is stored in
* @param sessionAttributeName the new attribute name to use
*/
public void setSessionAttributeName(String sessionAttributeName) {
Assert.hasLength(sessionAttributeName, "sessionAttributeName cannot be null or empty");
this.sessionAttributeName = sessionAttributeName;
}
private CsrfToken createCsrfToken() {
return new DefaultCsrfToken(this.headerName, this.parameterName, createNewToken());
}
private String createNewToken() {
return UUID.randomUUID().toString();
}
}
| WebSessionServerCsrfTokenRepository |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/it/vertx/VertxThreadFactoryTest.java | {
"start": 591,
"end": 877
} | class ____ extends VertxTestBase {
@Test
public void testJsonObject() {
vertx.runOnContext(v -> {
Thread current = Thread.currentThread();
assertEquals(CustomVertxThread.class, current.getClass());
testComplete();
});
await();
}
}
| VertxThreadFactoryTest |
java | quarkusio__quarkus | extensions/hibernate-validator/deployment/src/test/java/io/quarkus/hibernate/validator/test/config/ConfigMappingInjectionInValidatorTest.java | {
"start": 1681,
"end": 1866
} | interface ____ {
String message() default "";
Class<?>[] groups() default {};
Class<? extends Payload>[] payload() default {};
}
public static | ValidEntity |
java | netty__netty | codec-compression/src/test/java/io/netty/handler/codec/compression/JZlibIntegrationTest.java | {
"start": 739,
"end": 1053
} | class ____ extends AbstractIntegrationTest {
@Override
protected EmbeddedChannel createEncoder() {
return new EmbeddedChannel(new JZlibEncoder());
}
@Override
protected EmbeddedChannel createDecoder() {
return new EmbeddedChannel(new JZlibDecoder(0));
}
}
| JZlibIntegrationTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/tasks/TaskManagerTests.java | {
"start": 3281,
"end": 22661
} | class ____ extends ESTestCase {
private ThreadPool threadPool;
@Before
public void setupThreadPool() {
threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName());
}
@After
public void terminateThreadPool() {
terminate(threadPool);
}
/**
* Makes sure that tasks that attempt to store themselves on completion retry if
* they don't succeed at first.
*/
public void testResultsServiceRetryTotalTime() {
Iterator<TimeValue> times = TaskResultsService.STORE_BACKOFF_POLICY.iterator();
long total = 0;
while (times.hasNext()) {
total += times.next().millis();
}
assertEquals(600000L, total);
}
public void testTrackingChannelTask() throws Exception {
final TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Set.of(), Tracer.NOOP);
Set<Task> cancelledTasks = ConcurrentCollections.newConcurrentSet();
final var transportServiceMock = mock(TransportService.class);
when(transportServiceMock.getThreadPool()).thenReturn(threadPool);
taskManager.setTaskCancellationService(new TaskCancellationService(transportServiceMock) {
@Override
void cancelTaskAndDescendants(CancellableTask task, String reason, boolean waitForCompletion, ActionListener<Void> listener) {
assertThat(reason, equalTo("channel was closed"));
assertFalse(waitForCompletion);
assertTrue("task [" + task + "] was cancelled already", cancelledTasks.add(task));
}
});
Map<TcpChannel, Set<Task>> pendingTasks = new HashMap<>();
Set<Task> expectedCancelledTasks = new HashSet<>();
FakeTcpChannel[] channels = new FakeTcpChannel[randomIntBetween(1, 10)];
List<Releasable> stopTrackingTasks = new ArrayList<>();
for (int i = 0; i < channels.length; i++) {
channels[i] = new SingleThreadedTcpChannel();
}
int iterations = randomIntBetween(1, 200);
for (int i = 0; i < iterations; i++) {
final List<Releasable> subset = randomSubsetOf(stopTrackingTasks);
stopTrackingTasks.removeAll(subset);
Releasables.close(subset);
final FakeTcpChannel channel = randomFrom(channels);
final Task task = taskManager.register("transport", "test", new CancellableRequest(Integer.toString(i)));
if (channel.isOpen() && randomBoolean()) {
channel.close();
expectedCancelledTasks.addAll(pendingTasks.getOrDefault(channel, Collections.emptySet()));
}
final Releasable stopTracking = taskManager.startTrackingCancellableChannelTask(channel, (CancellableTask) task);
if (channel.isOpen()) {
pendingTasks.computeIfAbsent(channel, k -> new HashSet<>()).add(task);
stopTrackingTasks.add(() -> {
stopTracking.close();
assertTrue(pendingTasks.get(channel).remove(task));
expectedCancelledTasks.remove(task);
});
} else {
expectedCancelledTasks.add(task);
}
}
assertBusy(() -> assertThat(expectedCancelledTasks, everyItem(in(cancelledTasks))), 30, TimeUnit.SECONDS);
for (FakeTcpChannel channel : channels) {
channel.close();
}
assertThat(taskManager.numberOfChannelPendingTaskTrackers(), equalTo(0));
}
public void testTrackingTaskAndCloseChannelConcurrently() throws Exception {
final TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Set.of(), Tracer.NOOP);
Set<CancellableTask> cancelledTasks = ConcurrentCollections.newConcurrentSet();
final var transportServiceMock = mock(TransportService.class);
when(transportServiceMock.getThreadPool()).thenReturn(threadPool);
taskManager.setTaskCancellationService(new TaskCancellationService(transportServiceMock) {
@Override
void cancelTaskAndDescendants(CancellableTask task, String reason, boolean waitForCompletion, ActionListener<Void> listener) {
assertTrue("task [" + task + "] was cancelled already", cancelledTasks.add(task));
}
});
Set<Task> expectedCancelledTasks = ConcurrentCollections.newConcurrentSet();
FakeTcpChannel[] channels = new FakeTcpChannel[randomIntBetween(1, 10)];
for (int i = 0; i < channels.length; i++) {
channels[i] = new FakeTcpChannel();
}
Thread[] threads = new Thread[randomIntBetween(2, 8)];
Phaser phaser = new Phaser(threads.length);
for (int t = 0; t < threads.length; t++) {
String threadName = "thread-" + t;
threads[t] = new Thread(() -> {
phaser.arriveAndAwaitAdvance();
int iterations = randomIntBetween(50, 500);
for (int i = 0; i < iterations; i++) {
final FakeTcpChannel channel = randomFrom(channels);
if (randomBoolean()) {
final Task task = taskManager.register("transport", "test", new CancellableRequest(threadName + ":" + i));
expectedCancelledTasks.add(task);
taskManager.startTrackingCancellableChannelTask(channel, (CancellableTask) task);
if (randomInt(100) < 5) {
randomFrom(channels).close();
}
} else {
final TaskId taskId = new TaskId("node", between(1, 100));
final TcpTransportChannel tcpTransportChannel = TestTransportChannels.newFakeTcpTransportChannel(
"node-" + i,
channel,
threadPool,
"action-" + i,
randomIntBetween(0, 1000),
TransportVersion.current()
);
taskManager.setBan(taskId, "test", tcpTransportChannel);
}
}
});
threads[t].start();
}
for (FakeTcpChannel channel : channels) {
channel.close();
}
for (Thread thread : threads) {
thread.join();
}
assertBusy(() -> assertThat(cancelledTasks, equalTo(expectedCancelledTasks)), 1, TimeUnit.MINUTES);
assertBusy(() -> assertThat(taskManager.getBannedTaskIds(), empty()), 1, TimeUnit.MINUTES);
assertThat(taskManager.numberOfChannelPendingTaskTrackers(), equalTo(0));
}
public void testRemoveBansOnChannelDisconnects() throws Exception {
final TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Set.of(), Tracer.NOOP);
final var transportServiceMock = mock(TransportService.class);
when(transportServiceMock.getThreadPool()).thenReturn(threadPool);
taskManager.setTaskCancellationService(new TaskCancellationService(transportServiceMock) {
@Override
void cancelTaskAndDescendants(CancellableTask task, String reason, boolean waitForCompletion, ActionListener<Void> listener) {}
});
Map<TaskId, Set<TcpChannel>> installedBans = new HashMap<>();
FakeTcpChannel[] channels = new FakeTcpChannel[randomIntBetween(1, 10)];
for (int i = 0; i < channels.length; i++) {
channels[i] = new SingleThreadedTcpChannel();
}
int iterations = randomIntBetween(1, 200);
for (int i = 0; i < iterations; i++) {
final FakeTcpChannel channel = randomFrom(channels);
if (channel.isOpen() && randomBoolean()) {
channel.close();
}
TaskId taskId = new TaskId("node-" + randomIntBetween(1, 3), randomIntBetween(1, 100));
installedBans.computeIfAbsent(taskId, t -> new HashSet<>()).add(channel);
taskManager.setBan(
taskId,
"test",
TestTransportChannels.newFakeTcpTransportChannel(
"node",
channel,
threadPool,
"action",
randomIntBetween(1, 10000),
TransportVersion.current()
)
);
}
final Set<TaskId> expectedBannedTasks = installedBans.entrySet()
.stream()
.filter(e -> e.getValue().stream().anyMatch(CloseableChannel::isOpen))
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
assertBusy(() -> assertThat(taskManager.getBannedTaskIds(), equalTo(expectedBannedTasks)), 30, TimeUnit.SECONDS);
for (FakeTcpChannel channel : channels) {
channel.close();
}
assertBusy(() -> assertThat(taskManager.getBannedTaskIds(), empty()));
assertThat(taskManager.numberOfChannelPendingTaskTrackers(), equalTo(0));
}
public void testTaskAccounting() {
final TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Set.of());
final Task task1 = taskManager.register("transport", "test", new CancellableRequest("thread 1"));
final Task task2 = taskManager.register("transport", "test", new CancellableRequest("thread 2"));
final MockConnection connection1 = new MockConnection();
final MockConnection connection2 = new MockConnection();
Releasable releasableConnection1 = taskManager.registerChildConnection(task1.getId(), connection1);
Releasable releasableConnection2 = taskManager.registerChildConnection(task2.getId(), connection2);
Releasable releasableConnection3 = taskManager.registerChildConnection(task1.getId(), connection1);
assertEquals(2, taskManager.childTasksPerConnection(task1.getId(), connection1).intValue());
assertEquals(1, taskManager.childTasksPerConnection(task2.getId(), connection2).intValue());
releasableConnection1.close();
assertEquals(1, taskManager.childTasksPerConnection(task1.getId(), connection1).intValue());
releasableConnection2.close();
assertNull(taskManager.childTasksPerConnection(task2.getId(), connection2));
releasableConnection3.close();
assertNull(taskManager.childTasksPerConnection(task1.getId(), connection1));
}
/**
* Check that registering a task also causes tracing to be started on that task.
*/
public void testRegisterTaskStartsTracingIfTraceParentExists() {
final Tracer mockTracer = mock(Tracer.class);
final TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Set.of(), mockTracer);
// fake an APM trace context
threadPool.getThreadContext().putTransient(Task.APM_TRACE_CONTEXT, new Object());
final boolean hasParentTask = randomBoolean();
final TaskId parentTask = hasParentTask ? new TaskId("parentNode", 1) : TaskId.EMPTY_TASK_ID;
try (var ignored = threadPool.getThreadContext().newTraceContext()) {
final Task task = taskManager.register("testType", "testAction", new TaskAwareRequest() {
@Override
public void setParentTask(TaskId taskId) {}
@Override
public void setRequestId(long requestId) {}
@Override
public TaskId getParentTask() {
return parentTask;
}
});
Map<String, Object> attributes = hasParentTask
? Map.of(Tracer.AttributeKeys.TASK_ID, task.getId(), Tracer.AttributeKeys.PARENT_TASK_ID, parentTask.toString())
: Map.of(Tracer.AttributeKeys.TASK_ID, task.getId());
verify(mockTracer).startTrace(any(), eq(task), eq("testAction"), eq(attributes));
taskManager.unregister(task);
verify(mockTracer).stopTrace(task); // always attempt stopping to guard against leaks
}
}
/**
* Check that registering a task also causes tracing to be started on that task.
*/
public void testRegisterTaskSkipsTracingIfTraceParentMissing() {
final Tracer mockTracer = mock(Tracer.class);
final TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Set.of(), mockTracer);
// no trace parent
try (var ignored = threadPool.getThreadContext().newTraceContext()) {
final Task task = taskManager.register("testType", "testAction", new TaskAwareRequest() {
@Override
public void setParentTask(TaskId taskId) {}
@Override
public void setRequestId(long requestId) {}
@Override
public TaskId getParentTask() {
return TaskId.EMPTY_TASK_ID;
}
});
}
verifyNoInteractions(mockTracer);
}
/**
* Check that unregistering a task also causes tracing to be stopped on that task.
*/
public void testUnregisterTaskStopsTracingIfTraceContextExists() {
final Tracer mockTracer = mock(Tracer.class);
final TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Set.of(), mockTracer);
final Task task = taskManager.register("testType", "testAction", new TaskAwareRequest() {
@Override
public void setParentTask(TaskId taskId) {}
@Override
public void setRequestId(long requestId) {}
@Override
public TaskId getParentTask() {
return TaskId.EMPTY_TASK_ID;
}
});
// fake an APM trace context
threadPool.getThreadContext().putTransient(Task.APM_TRACE_CONTEXT, null);
taskManager.unregister(task);
verify(mockTracer).stopTrace(task);
}
/**
* Check that unregistering a task also causes tracing to be stopped on that task.
*/
public void testUnregisterTaskStopsTracingIfTraceContextMissing() {
final Tracer mockTracer = mock(Tracer.class);
final TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Set.of(), mockTracer);
final Task task = taskManager.register("testType", "testAction", new TaskAwareRequest() {
@Override
public void setParentTask(TaskId taskId) {}
@Override
public void setRequestId(long requestId) {}
@Override
public TaskId getParentTask() {
return TaskId.EMPTY_TASK_ID;
}
});
// no trace context
taskManager.unregister(task);
verify(mockTracer).stopTrace(task); // always attempt stopping to guard against leaks
verifyNoMoreInteractions(mockTracer);
}
/**
* Check that registering and executing a task also causes tracing to be started if a trace parent exists.
*/
public void testRegisterAndExecuteStartsTracingIfTraceParentExists() {
final Tracer mockTracer = mock(Tracer.class);
final TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Set.of(), mockTracer);
// fake an APM trace context
threadPool.getThreadContext().putTransient(Task.APM_TRACE_CONTEXT, new Object());
final Task task = taskManager.registerAndExecute(
"testType",
new TransportAction<ActionRequest, ActionResponse>(
"actionName",
new ActionFilters(Set.of()),
taskManager,
EsExecutors.DIRECT_EXECUTOR_SERVICE
) {
@Override
protected void doExecute(Task task, ActionRequest request, ActionListener<ActionResponse> listener) {
listener.onResponse(new ActionResponse() {
@Override
public void writeTo(StreamOutput out) {}
});
}
},
new ActionRequest() {
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public TaskId getParentTask() {
return TaskId.EMPTY_TASK_ID;
}
},
null,
ActionTestUtils.assertNoFailureListener(r -> {})
);
verify(mockTracer).startTrace(any(), eq(task), eq("actionName"), anyMap());
verify(mockTracer).stopTrace(task); // always attempt stopping to guard against leaks
}
/**
* Check that registering and executing a task skips tracing if trace parent does not exists.
*/
public void testRegisterAndExecuteSkipsTracingIfTraceParentMissing() {
final Tracer mockTracer = mock(Tracer.class);
final TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Set.of(), mockTracer);
// clean thread context without trace parent
final Task task = taskManager.registerAndExecute(
"testType",
new TransportAction<ActionRequest, ActionResponse>(
"actionName",
new ActionFilters(Set.of()),
taskManager,
EsExecutors.DIRECT_EXECUTOR_SERVICE
) {
@Override
protected void doExecute(Task task, ActionRequest request, ActionListener<ActionResponse> listener) {
listener.onResponse(new ActionResponse() {
@Override
public void writeTo(StreamOutput out) {}
});
}
},
new ActionRequest() {
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public TaskId getParentTask() {
return TaskId.EMPTY_TASK_ID;
}
},
null,
ActionTestUtils.assertNoFailureListener(r -> {})
);
verify(mockTracer).stopTrace(task); // always attempt stopping to guard against leaks
verifyNoMoreInteractions(mockTracer);
}
public void testRegisterWithEnabledDisabledTracing() {
final Tracer mockTracer = mock(Tracer.class);
final TaskManager taskManager = spy(new TaskManager(Settings.EMPTY, threadPool, Set.of(), mockTracer));
taskManager.register("type", "action", makeTaskRequest(true, 123), false);
verify(taskManager, times(0)).maybeStartTrace(any(), any());
taskManager.register("type", "action", makeTaskRequest(false, 234), false);
verify(taskManager, times(0)).maybeStartTrace(any(), any());
clearInvocations(taskManager);
taskManager.register("type", "action", makeTaskRequest(true, 345), true);
verify(taskManager, times(1)).maybeStartTrace(any(), any());
clearInvocations(taskManager);
taskManager.register("type", "action", makeTaskRequest(false, 456), true);
verify(taskManager, times(1)).maybeStartTrace(any(), any());
}
static | TaskManagerTests |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/producer/ProducerWithAbstractClassWithInterfaceInterceptionAndBindingsSourceTest.java | {
"start": 2575,
"end": 2843
} | class ____ extends MyNonbeanBase {
@Override
public String hello1() {
return "hello1";
}
@Override
public String hello2() {
return "hello2";
}
}
@MyBinding1
static abstract | MyNonbeanImpl |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/clientproxy/constructor/ClientProxyConstructorGuardTest.java | {
"start": 467,
"end": 901
} | class ____ {
@RegisterExtension
public ArcTestContainer container = new ArcTestContainer(Moo.class);
@Test
public void testProducer() throws IOException {
Moo moo = Arc.container().instance(Moo.class).get();
assertTrue(moo instanceof ClientProxy);
assertEquals(10, ((Moo) ((ClientProxy) moo).arc_contextualInstance()).val);
}
@ApplicationScoped
static | ClientProxyConstructorGuardTest |
java | apache__flink | flink-kubernetes/src/test/java/org/apache/flink/kubernetes/KubernetesResourceManagerDriverTest.java | {
"start": 16520,
"end": 28283
} | class ____ extends ResourceManagerDriverTestBase<KubernetesWorkerNode>.Context {
private final KubernetesPod previousAttemptPod =
new TestingKubernetesPod(CLUSTER_ID + "-taskmanager-1-1");
private final CompletableFuture<WatchCallbackHandler<KubernetesPod>>
setWatchPodsAndDoCallbackFuture = new CompletableFuture<>();
private final CompletableFuture<Void> closeKubernetesWatchFuture =
new CompletableFuture<>();
private final List<TestingFlinkKubeClient.MockKubernetesWatch> podsWatches =
new ArrayList<>();
private final CompletableFuture<String> stopAndCleanupClusterFuture =
new CompletableFuture<>();
private final CompletableFuture<KubernetesPod> createTaskManagerPodFuture =
new CompletableFuture<>();
private final CompletableFuture<String> stopPodFuture = new CompletableFuture<>();
final TestingFlinkKubeClient.Builder flinkKubeClientBuilder =
TestingFlinkKubeClient.builder()
.setWatchPodsAndDoCallbackFunction(
(ignore, handler) -> {
setWatchPodsAndDoCallbackFuture.complete(handler);
final TestingFlinkKubeClient.MockKubernetesWatch watch =
new TestingFlinkKubeClient.MockKubernetesWatch() {
@Override
public void close() {
super.close();
closeKubernetesWatchFuture.complete(null);
}
};
podsWatches.add(watch);
return CompletableFuture.supplyAsync(() -> watch);
})
.setStopAndCleanupClusterConsumer(stopAndCleanupClusterFuture::complete)
.setCreateTaskManagerPodFunction(
(pod) -> {
createTaskManagerPodFuture.complete(pod);
getPodCallbackHandler()
.onAdded(
Collections.singletonList(
new TestingKubernetesPod(
pod.getName(), true, false)));
return FutureUtils.completedVoidFuture();
})
.setStopPodFunction(
(podName) -> {
stopPodFuture.complete(podName);
return FutureUtils.completedVoidFuture();
});
private TestingFlinkKubeClient flinkKubeClient;
FlinkKubeClient.WatchCallbackHandler<KubernetesPod> getPodCallbackHandler() {
try {
return setWatchPodsAndDoCallbackFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
} catch (Exception e) {
fail("Cannot get WatchCallbackHandler, cause: " + e.getMessage());
}
return null;
}
List<TestingFlinkKubeClient.MockKubernetesWatch> getPodsWatches() {
return podsWatches;
}
CompletableFuture<WatchCallbackHandler<KubernetesPod>>
getSetWatchPodsAndDoCallbackFuture() {
return setWatchPodsAndDoCallbackFuture;
}
@Override
protected void prepareRunTest() {
flinkConfig.set(KubernetesConfigOptions.CLUSTER_ID, CLUSTER_ID);
flinkConfig.set(
TaskManagerOptions.RPC_PORT, String.valueOf(Constants.TASK_MANAGER_RPC_PORT));
flinkKubeClient = flinkKubeClientBuilder.build();
}
@Override
protected void preparePreviousAttemptWorkers() {
flinkKubeClientBuilder.setGetPodsWithLabelsFunction(
(ignore) -> Collections.singletonList(previousAttemptPod));
}
@Override
protected ResourceManagerDriver<KubernetesWorkerNode> createResourceManagerDriver() {
return new KubernetesResourceManagerDriver(
flinkConfig, flinkKubeClient, KUBERNETES_RESOURCE_MANAGER_CONFIGURATION);
}
@Override
protected void validateInitialization() throws Exception {
assertThat(getPodCallbackHandler()).isNotNull();
}
@Override
protected void validateWorkersRecoveredFromPreviousAttempt(
Collection<KubernetesWorkerNode> workers) {
assertThat(workers).hasSize(1);
final ResourceID resourceId = workers.iterator().next().getResourceID();
assertThat(resourceId).asString().isEqualTo(previousAttemptPod.getName());
}
@Override
protected void validateTermination() throws Exception {
closeKubernetesWatchFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
}
@Override
protected void validateDeregisterApplication() throws Exception {
assertThat(stopAndCleanupClusterFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS))
.isEqualTo(CLUSTER_ID);
}
@Override
protected void validateRequestedResources(
Collection<TaskExecutorProcessSpec> taskExecutorProcessSpecs) throws Exception {
assertThat(taskExecutorProcessSpecs).hasSize(1);
final TaskExecutorProcessSpec taskExecutorProcessSpec =
taskExecutorProcessSpecs.iterator().next();
final KubernetesPod pod = createTaskManagerPodFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
final ResourceRequirements resourceRequirements =
pod.getInternalResource().getSpec().getContainers().get(0).getResources();
assertThat(
resourceRequirements
.getRequests()
.get(Constants.RESOURCE_NAME_MEMORY)
.getAmount())
.isEqualTo(
String.valueOf(
taskExecutorProcessSpec
.getTotalProcessMemorySize()
.getMebiBytes()));
assertThat(
resourceRequirements
.getRequests()
.get(Constants.RESOURCE_NAME_CPU)
.getAmount())
.isEqualTo(
String.valueOf(
taskExecutorProcessSpec
.getCpuCores()
.getValue()
.doubleValue()));
assertThat(
resourceRequirements
.getLimits()
.get(Constants.RESOURCE_NAME_MEMORY)
.getAmount())
.isEqualTo(
String.valueOf(
taskExecutorProcessSpec
.getTotalProcessMemorySize()
.getMebiBytes()));
assertThat(
resourceRequirements
.getLimits()
.get(Constants.RESOURCE_NAME_CPU)
.getAmount())
.isEqualTo(
String.valueOf(
taskExecutorProcessSpec
.getCpuCores()
.getValue()
.doubleValue()));
}
@Override
protected void validateReleaseResources(Collection<KubernetesWorkerNode> workerNodes)
throws Exception {
assertThat(workerNodes).hasSize(1);
final ResourceID resourceId = workerNodes.iterator().next().getResourceID();
assertThat(stopPodFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS))
.isEqualTo(resourceId.toString());
}
void testOnPodTerminated(Consumer<List<KubernetesPod>> sendPodTerminatedEvent)
throws Exception {
testOnPodTerminated(sendPodTerminatedEvent, true, true);
}
void testOnPodTerminated(
Consumer<List<KubernetesPod>> sendPodTerminatedEvent,
boolean isPodScheduled,
boolean isPodTerminated)
throws Exception {
final CompletableFuture<KubernetesWorkerNode> requestResourceFuture =
new CompletableFuture<>();
final CompletableFuture<ResourceID> onWorkerTerminatedConsumer =
new CompletableFuture<>();
resourceEventHandlerBuilder.setOnWorkerTerminatedConsumer(
(resourceId, ignore) -> onWorkerTerminatedConsumer.complete(resourceId));
runTest(
() -> {
// request new pod and send onAdded event
runInMainThread(
() ->
getDriver()
.requestResource(TASK_EXECUTOR_PROCESS_SPEC)
.thenAccept(requestResourceFuture::complete));
final KubernetesPod pod =
createTaskManagerPodFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
// prepare validation
// - pod removed
// - onTerminated is called in main thread with correct resource id
final CompletableFuture<Void> validationFuture =
CompletableFuture.allOf(
stopPodFuture.thenAccept(
(podname) ->
assertThat(podname)
.isEqualTo(pod.getName())),
onWorkerTerminatedConsumer.thenAccept(
(resourceId) -> {
validateInMainThread();
assertThat(resourceId)
.asString()
.isEqualTo(pod.getName());
}));
sendPodTerminatedEvent.accept(
Collections.singletonList(
new TestingKubernetesPod(
pod.getName(), isPodScheduled, isPodTerminated)));
// make sure finishing validation
validationFuture.get(TIMEOUT_SEC, TimeUnit.SECONDS);
});
}
}
}
| Context |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobCounterUpdateEvent.java | {
"start": 980,
"end": 1526
} | class ____ extends JobEvent {
List<CounterIncrementalUpdate> counterUpdates = null;
public JobCounterUpdateEvent(JobId jobId) {
super(jobId, JobEventType.JOB_COUNTER_UPDATE);
counterUpdates = new ArrayList<JobCounterUpdateEvent.CounterIncrementalUpdate>();
}
public void addCounterUpdate(Enum<?> key, long incrValue) {
counterUpdates.add(new CounterIncrementalUpdate(key, incrValue));
}
public List<CounterIncrementalUpdate> getCounterUpdates() {
return counterUpdates;
}
public static | JobCounterUpdateEvent |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/byte_/ByteAssert_isOne_Test.java | {
"start": 878,
"end": 1167
} | class ____ extends ByteAssertBaseTest {
@Override
protected ByteAssert invoke_api_method() {
return assertions.isOne();
}
@Override
protected void verify_internal_effects() {
verify(bytes).assertIsOne(getInfo(assertions), getActual(assertions));
}
}
| ByteAssert_isOne_Test |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/annotations/Results.java | {
"start": 1016,
"end": 1694
} | interface ____ {
* @Results({
* @Result(property = "id", column = "id", id = true),
* @Result(property = "name", column = "name"),
* @Result(property = "email" column = "id", one = @One(select = "selectUserEmailById", fetchType = FetchType.LAZY)),
* @Result(property = "telephoneNumbers" column = "id", many = @Many(select = "selectAllUserTelephoneNumberById", fetchType = FetchType.LAZY))
* })
* @Select("SELECT id, name FROM users WHERE id = #{id}")
* User selectById(int id);
* }
* </pre>
*
* @author Clinton Begin
*/
@Documented
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
public @ | UserMapper |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/lazyonetoone/LazyOneToOneWithEntityGraphTest.java | {
"start": 1266,
"end": 2537
} | class ____ {
@BeforeAll
void setUp(SessionFactoryScope scope) {
scope.inTransaction(session -> {
// Create company
Company company = new Company();
company.id = 1L;
company.name = "Hibernate";
session.persist(company);
// Create project
Project project = new Project();
project.id = 1L;
session.persist(project);
// Create employee
Employee employee = new Employee();
employee.id = 1L;
employee.company = company;
employee.projects = List.of(project);
session.persist(employee);
});
}
@AfterAll
void tearDown(SessionFactoryScope scope) {
scope.inTransaction(session -> {
scope.getSessionFactory().getSchemaManager().truncateMappedObjects();
});
}
@Test
void reproducerTest(SessionFactoryScope scope) {
scope.inTransaction(session -> {
// Load employee using entity graph
Employee employee = session.createQuery(
"select e from Employee e where e.id = :id", Employee.class)
.setParameter("id", 1L)
.setHint("javax.persistence.fetchgraph", session.getEntityGraph("employee.projects"))
.getSingleResult();
assertTrue(isInitialized(employee.projects));
assertEquals("Hibernate", employee.company.name);
});
}
@Entity(name = "Company")
public static | LazyOneToOneWithEntityGraphTest |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoin.java | {
"start": 2434,
"end": 3041
} | class ____ extends KTableKTableAbstractJoinValueGetterSupplier<K, VOut, V1, V2> {
KTableKTableInnerJoinValueGetterSupplier(final KTableValueGetterSupplier<K, V1> valueGetterSupplier1,
final KTableValueGetterSupplier<K, V2> valueGetterSupplier2) {
super(valueGetterSupplier1, valueGetterSupplier2);
}
public KTableValueGetter<K, VOut> get() {
return new KTableKTableInnerJoinValueGetter(valueGetterSupplier1.get(), valueGetterSupplier2.get());
}
}
private | KTableKTableInnerJoinValueGetterSupplier |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/mapping/RequestMappingRegistry.java | {
"start": 1155,
"end": 1440
} | interface ____ {
void register(Invoker<?> invoker);
void unregister(Invoker<?> invoker);
HandlerMeta lookup(HttpRequest request);
boolean exists(String path, String method);
Collection<Registration> getRegistrations();
void destroy();
}
| RequestMappingRegistry |
java | apache__avro | lang/java/avro/src/test/java/org/apache/avro/util/TestRandomData.java | {
"start": 1374,
"end": 4912
} | class ____ {
private long seed;
private int count;
private File file;
private GenericData genericData;
private SpecificData specificData;
private Schema specificSchema;
private ReflectData reflectData;
private Schema reflectedSchema;
@Before
public void setUp() throws Exception {
file = Files.createTempFile("randomData", ".avro").toFile();
seed = System.currentTimeMillis();
count = new Random().nextInt(50) + 75;
genericData = GenericData.get();
specificData = SpecificData.get();
specificSchema = specificData.getSchema(SpecificTestRecord.class);
reflectData = ReflectData.get();
reflectedSchema = reflectData.getSchema(ReflectTestRecord.class);
}
@Test
public void testRandomDataFromGenericToGeneric() throws IOException {
checkWrite(genericData, TEST_SCHEMA);
checkRead(genericData, TEST_SCHEMA);
}
@Test
public void testRandomDataFromGenericToSpecific() throws IOException {
checkWrite(genericData, TEST_SCHEMA);
checkRead(specificData, specificSchema);
}
@Test
public void testRandomDataFromGenericToReflected() throws IOException {
checkWrite(genericData, TEST_SCHEMA);
checkRead(reflectData, reflectedSchema);
}
@Test
public void testRandomDataFromSpecificToGeneric() throws IOException {
checkWrite(specificData, specificSchema);
checkRead(genericData, TEST_SCHEMA);
}
@Test
public void testRandomDataFromSpecificToSpecific() throws IOException {
checkWrite(specificData, specificSchema);
checkRead(specificData, specificSchema);
}
@Test
public void testRandomDataFromSpecificToReflected() throws IOException {
checkWrite(specificData, specificSchema);
checkRead(reflectData, reflectedSchema);
}
@Test
public void testRandomDataFromReflectedToGeneric() throws IOException {
checkWrite(reflectData, reflectedSchema);
checkRead(genericData, TEST_SCHEMA);
}
@Test
public void testRandomDataFromReflectedToSpecific() throws IOException {
checkWrite(reflectData, reflectedSchema);
checkRead(specificData, specificSchema);
}
@Test
public void testRandomDataFromReflectedToReflected() throws IOException {
checkWrite(reflectData, reflectedSchema);
checkRead(reflectData, reflectedSchema);
}
private void checkWrite(GenericData genericData, Schema schema) throws IOException {
// noinspection unchecked
try (DataFileWriter<Object> writer = new DataFileWriter<Object>(genericData.createDatumWriter(schema))) {
writer.create(schema, file);
for (Object datum : new RandomData(genericData, schema, this.count, seed)) {
writer.append(datum);
}
}
}
private void checkRead(GenericData genericData, Schema schema) throws IOException {
// noinspection unchecked
try (DataFileReader<Object> reader = new DataFileReader<Object>(file, genericData.createDatumReader(schema))) {
for (Object expected : new RandomData(genericData, schema, this.count, seed)) {
assertEquals(expected, reader.next());
}
}
}
/*
* Test classes: they implement the same schema, but one is a SpecificRecord and
* the other uses a reflected schema.
*/
public static final String TEST_SCHEMA_JSON = "{\"type\":\"record\",\"name\":\"Record\",\"fields\":[{\"name\":\"x\",\"type\":\"int\"},{\"name\":\"y\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}]}";
public static final Schema TEST_SCHEMA = new Schema.Parser().parse(TEST_SCHEMA_JSON);
public static | TestRandomData |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/UpdateGroupForbiddenRequestHeader.java | {
"start": 1527,
"end": 2312
} | class ____ extends TopicRequestHeader {
@CFNotNull
@RocketMQResource(ResourceType.GROUP)
private String group;
@CFNotNull
@RocketMQResource(ResourceType.TOPIC)
private String topic;
private Boolean readable;
@Override
public void checkFields() throws RemotingCommandException {
}
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public String getGroup() {
return group;
}
public void setGroup(String group) {
this.group = group;
}
public Boolean getReadable() {
return readable;
}
public void setReadable(Boolean readable) {
this.readable = readable;
}
}
| UpdateGroupForbiddenRequestHeader |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/oracle/ast/stmt/OracleSetTransactionStatement.java | {
"start": 792,
"end": 1585
} | class ____ extends OracleStatementImpl implements OracleStatement {
private boolean readOnly;
private boolean write;
private SQLExpr name;
@Override
public void accept0(OracleASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, name);
}
visitor.endVisit(this);
}
public SQLExpr getName() {
return name;
}
public void setName(SQLExpr name) {
this.name = name;
}
public boolean isReadOnly() {
return readOnly;
}
public void setReadOnly(boolean readOnly) {
this.readOnly = readOnly;
}
public boolean isWrite() {
return write;
}
public void setWrite(boolean write) {
this.write = write;
}
}
| OracleSetTransactionStatement |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Utils.java | {
"start": 2240,
"end": 2395
} | class ____ implements PathFilter {
public boolean accept(Path path) {
return !"_logs".equals(path.getName());
}
}
}
}
| OutputLogFilter |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/internal/Maps.java | {
"start": 4352,
"end": 32102
} | class ____ {
private static final Maps INSTANCE = new Maps();
public static Maps instance() {
return INSTANCE;
}
// TODO reduce the visibility of the fields annotated with @VisibleForTesting
Failures failures = Failures.instance();
// TODO reduce the visibility of the fields annotated with @VisibleForTesting
Conditions conditions = Conditions.instance();
public <K, V> void assertAllSatisfy(AssertionInfo info, Map<K, V> actual,
BiConsumer<? super K, ? super V> entryRequirements) {
requireNonNull(entryRequirements, "The BiConsumer<K, V> expressing the assertions requirements must not be null");
assertNotNull(info, actual);
List<UnsatisfiedRequirement> unsatisfiedRequirements = actual.entrySet().stream()
.map(entry -> failsRequirements(entryRequirements, entry))
.flatMap(Optional::stream)
.collect(toList());
if (!unsatisfiedRequirements.isEmpty())
throw failures.failure(info, elementsShouldSatisfy(actual, unsatisfiedRequirements, info));
}
private static <K, V> Optional<UnsatisfiedRequirement> failsRequirements(BiConsumer<? super K, ? super V> entryRequirements,
Entry<K, V> entry) {
try {
entryRequirements.accept(entry.getKey(), entry.getValue());
} catch (AssertionError ex) {
return Optional.of(new UnsatisfiedRequirement(entry, ex));
}
return Optional.empty();
}
public <K, V> void assertAnySatisfy(AssertionInfo info, Map<K, V> actual,
BiConsumer<? super K, ? super V> entryRequirements) {
requireNonNull(entryRequirements, "The BiConsumer<K, V> expressing the assertions requirements must not be null");
assertNotNull(info, actual);
List<UnsatisfiedRequirement> unsatisfiedRequirements = new ArrayList<>();
for (Entry<K, V> entry : actual.entrySet()) {
Optional<UnsatisfiedRequirement> result = failsRequirements(entryRequirements, entry);
if (result.isEmpty()) return; // entry satisfied the requirements
unsatisfiedRequirements.add(result.get());
}
throw failures.failure(info, elementsShouldSatisfyAny(actual, unsatisfiedRequirements, info));
}
public <K, V> void assertNoneSatisfy(AssertionInfo info, Map<K, V> actual, BiConsumer<? super K, ? super V> entryRequirements) {
requireNonNull(entryRequirements, "The BiConsumer<K, V> expressing the assertions requirements must not be null");
assertNotNull(info, actual);
List<Entry<K, V>> erroneousEntries = actual.entrySet().stream()
.map(entry -> failsRestrictions(entry, entryRequirements))
.flatMap(Optional::stream)
.collect(toList());
if (!erroneousEntries.isEmpty()) throw failures.failure(info, noElementsShouldSatisfy(actual, erroneousEntries));
}
private <V, K> Optional<Entry<K, V>> failsRestrictions(Entry<K, V> entry,
BiConsumer<? super K, ? super V> entryRequirements) {
try {
entryRequirements.accept(entry.getKey(), entry.getValue());
} catch (AssertionError e) {
// element is supposed not to meet the given restrictions
return Optional.empty();
}
// element meets the given restrictions!
return Optional.of(entry);
}
public void assertNullOrEmpty(AssertionInfo info, Map<?, ?> actual) {
if (actual != null && !actual.isEmpty()) throw failures.failure(info, shouldBeNullOrEmpty(actual));
}
public void assertEmpty(AssertionInfo info, Map<?, ?> actual) {
assertNotNull(info, actual);
if (!actual.isEmpty()) throw failures.failure(info, shouldBeEmpty(actual));
}
public void assertNotEmpty(AssertionInfo info, Map<?, ?> actual) {
assertNotNull(info, actual);
if (actual.isEmpty()) throw failures.failure(info, shouldNotBeEmpty());
}
public void assertHasSize(AssertionInfo info, Map<?, ?> actual, int expectedSize) {
assertNotNull(info, actual);
checkSizes(actual, actual.size(), expectedSize, info);
}
public void assertHasSizeGreaterThan(AssertionInfo info, Map<?, ?> actual, int boundary) {
assertNotNull(info, actual);
checkSizeGreaterThan(actual, boundary, actual.size(), info);
}
public void assertHasSizeGreaterThanOrEqualTo(AssertionInfo info, Map<?, ?> actual, int boundary) {
assertNotNull(info, actual);
checkSizeGreaterThanOrEqualTo(actual, boundary, actual.size(), info);
}
public void assertHasSizeLessThan(AssertionInfo info, Map<?, ?> actual, int boundary) {
assertNotNull(info, actual);
checkSizeLessThan(actual, boundary, actual.size(), info);
}
public void assertHasSizeLessThanOrEqualTo(AssertionInfo info, Map<?, ?> actual, int boundary) {
assertNotNull(info, actual);
checkSizeLessThanOrEqualTo(actual, boundary, actual.size(), info);
}
public void assertHasSizeBetween(AssertionInfo info, Map<?, ?> actual, int lowerBoundary, int higherBoundary) {
assertNotNull(info, actual);
checkSizeBetween(actual, lowerBoundary, higherBoundary, actual.size(), info);
}
public void assertHasSameSizeAs(AssertionInfo info, Map<?, ?> map, Iterable<?> other) {
assertNotNull(info, map);
hasSameSizeAsCheck(info, map, other, map.size());
}
public void assertHasSameSizeAs(AssertionInfo info, Map<?, ?> map, Object other) {
assertNotNull(info, map);
assertIsArray(info, other);
hasSameSizeAsCheck(info, map, other, map.size());
}
public void assertHasSameSizeAs(AssertionInfo info, Map<?, ?> map, Map<?, ?> other) {
assertNotNull(info, map);
hasSameSizeAsCheck(info, map, other, map.size());
}
public <K, V> void assertContains(AssertionInfo info, Map<K, V> actual, Entry<? extends K, ? extends V>[] entries,
BiPredicate<? super V, ? super V> valueEquals) {
failIfNull(entries);
assertNotNull(info, actual);
// if both actual and values are empty, then assertion passes.
if (actual.isEmpty() && entries.length == 0) return;
failIfEntriesIsEmptySinceActualIsNotEmpty(info, actual, entries);
failIfAnyEntryNotFoundInActualMap(info, actual, entries, valueEquals);
}
@SuppressWarnings("unchecked")
public <K, V> void assertContainsAllEntriesOf(AssertionInfo info, Map<K, V> actual, Map<? extends K, ? extends V> other,
BiPredicate<? super V, ? super V> valueEquals) {
failIfNull(other);
assertNotNull(info, actual);
// assertion passes if other is empty since actual contains all other entries.
if (other.isEmpty()) return;
failIfAnyEntryNotFoundInActualMap(info, actual, other.entrySet().toArray(new Entry[0]), valueEquals);
}
public <K, V> void assertContainsAnyOf(AssertionInfo info, Map<K, V> actual, Entry<? extends K, ? extends V>[] entries,
BiPredicate<? super V, ? super V> valueEquals) {
failIfNull(entries);
assertNotNull(info, actual);
// if both actual and values are empty, then assertion passes.
if (actual.isEmpty() && entries.length == 0) return;
failIfEntriesIsEmptySinceActualIsNotEmpty(info, actual, entries);
for (Entry<? extends K, ? extends V> entry : entries) {
if (containsEntry(actual, entry, valueEquals)) return;
}
throw failures.failure(info, shouldContainAnyOf(actual, entries));
}
public <K, V> void assertHasEntrySatisfying(AssertionInfo info, Map<K, V> actual, K key, Condition<? super V> valueCondition) {
assertContainsKey(info, actual, key);
conditions.assertIsNotNull(valueCondition);
V value = actual.get(key);
if (!valueCondition.matches(value)) throw failures.failure(info, elementsShouldBe(actual, value, valueCondition));
}
public <K, V> void assertHasEntrySatisfying(AssertionInfo info, Map<K, V> actual, K key,
Consumer<? super V> valueRequirements) {
assertContainsKey(info, actual, key);
requireNonNull(valueRequirements, "The Consumer<V> expressing the assertions requirements must not be null");
V value = actual.get(key);
valueRequirements.accept(value);
}
public <K, V> void assertHasEntrySatisfying(AssertionInfo info, Map<K, V> actual,
Condition<? super Entry<K, V>> entryCondition) {
assertNotNull(info, actual);
conditions.assertIsNotNull(entryCondition);
for (Entry<K, V> entry : actual.entrySet()) {
if (entryCondition.matches(entry)) return;
}
throw failures.failure(info, shouldContainEntry(actual, entryCondition));
}
public <K, V> void assertHasEntrySatisfyingConditions(AssertionInfo info, Map<K, V> actual, Condition<? super K> keyCondition,
Condition<? super V> valueCondition) {
assertNotNull(info, actual);
conditions.assertIsNotNull(keyCondition, "The condition to evaluate for entries key should not be null");
conditions.assertIsNotNull(valueCondition, "The condition to evaluate for entries value should not be null");
for (Entry<K, V> entry : actual.entrySet()) {
if (keyCondition.matches(entry.getKey()) && valueCondition.matches(entry.getValue())) return;
}
throw failures.failure(info, shouldContainEntry(actual, keyCondition, valueCondition));
}
public <K> void assertHasKeySatisfying(AssertionInfo info, Map<K, ?> actual, Condition<? super K> keyCondition) {
assertNotNull(info, actual);
conditions.assertIsNotNull(keyCondition);
for (K key : actual.keySet()) {
if (keyCondition.matches(key)) return;
}
throw failures.failure(info, shouldContainKey(actual, keyCondition));
}
public <V> void assertHasValueSatisfying(AssertionInfo info, Map<?, V> actual, Condition<? super V> valueCondition) {
assertNotNull(info, actual);
conditions.assertIsNotNull(valueCondition);
for (V value : actual.values()) {
if (valueCondition.matches(value)) return;
}
throw failures.failure(info, shouldContainValue(actual, valueCondition));
}
public <K, V> void assertDoesNotContain(AssertionInfo info, Map<K, V> actual, Entry<? extends K, ? extends V>[] entries,
BiPredicate<? super V, ? super V> valueEquals) {
failIfNullOrEmpty(entries);
assertNotNull(info, actual);
Set<Entry<? extends K, ? extends V>> found = new LinkedHashSet<>();
for (Entry<? extends K, ? extends V> entry : entries) {
if (containsEntry(actual, entry, valueEquals)) {
found.add(entry);
}
}
if (!found.isEmpty()) throw failures.failure(info, shouldNotContain(actual, entries, found));
}
public <K, V> void assertContainsKeys(AssertionInfo info, Map<K, V> actual, K[] keys) {
assertNotNull(info, actual);
requireNonNull(keys, () -> keysToLookForIsNull("array of keys"));
if (actual.isEmpty() && keys.length == 0) return;
failIfEmpty(keys, () -> keysToLookForIsEmpty("array of keys"));
Set<K> notFound = getNotFoundKeys(actual, keys);
if (!notFound.isEmpty()) throw failures.failure(info, shouldContainKeys(actual, notFound));
}
public <K, V> void assertContainsKey(AssertionInfo info, Map<K, V> actual, K key) {
assertContainsKeys(info, actual, array(key));
}
public <K, V> void assertDoesNotContainKey(AssertionInfo info, Map<K, V> actual, K key) {
assertNotNull(info, actual);
if (containsKey(actual, key)) throw failures.failure(info, shouldNotContainKey(actual, key));
}
public <K, V> void assertDoesNotContainKeys(AssertionInfo info, Map<K, V> actual, K[] keys) {
assertNotNull(info, actual);
requireNonNull(keys, () -> keysToLookForIsNull("array of keys"));
Set<K> found = getFoundKeys(actual, keys);
if (!found.isEmpty()) throw failures.failure(info, shouldNotContainKeys(actual, found));
}
public <K, V> void assertContainsOnlyKeys(AssertionInfo info, Map<K, V> actual, K[] keys) {
assertContainsOnlyKeys(info, actual, "array of keys", keys);
}
public <K, V> void assertContainsOnlyKeys(AssertionInfo info, Map<K, V> actual, Iterable<? extends K> keys) {
assertContainsOnlyKeys(info, actual, "keys iterable", toArray(keys));
}
private <K, V> void assertContainsOnlyKeys(AssertionInfo info, Map<K, V> actual, String placeholderForErrorMessages, K[] keys) {
assertNotNull(info, actual);
requireNonNull(keys, () -> keysToLookForIsNull(placeholderForErrorMessages));
if (actual.isEmpty() && keys.length == 0) {
return;
}
failIfEmpty(keys, () -> keysToLookForIsEmpty(placeholderForErrorMessages));
Set<K> notFound = getNotFoundKeys(actual, keys);
Set<K> notExpected = getNotExpectedKeys(actual, keys);
if (!notFound.isEmpty() || !notExpected.isEmpty())
throw failures.failure(info, shouldContainOnlyKeys(actual, keys, notFound, notExpected));
}
private static <K> Set<K> getFoundKeys(Map<K, ?> actual, K[] expectedKeys) {
// Stream API avoided for performance reasons
Set<K> found = new LinkedHashSet<>();
for (K expectedKey : expectedKeys) {
if (containsKey(actual, expectedKey)) found.add(expectedKey);
}
return found;
}
private static <K> Set<K> getNotFoundKeys(Map<K, ?> actual, K[] expectedKeys) {
// Stream API avoided for performance reasons
Set<K> notFound = new LinkedHashSet<>();
for (K expectedKey : expectedKeys) {
if (!containsKey(actual, expectedKey)) notFound.add(expectedKey);
}
return notFound;
}
private static <K> boolean containsKey(Map<K, ?> actual, K key) {
try {
return actual.containsKey(key);
} catch (NullPointerException e) {
if (key == null) return false; // null keys not permitted
throw e;
}
}
private static <K> Set<K> getNotExpectedKeys(Map<K, ?> actual, K[] expectedKeys) {
// Stream API avoided for performance reasons
try {
Map<K, ?> clonedMap = clone(actual);
for (K expectedKey : expectedKeys) {
clonedMap.remove(expectedKey);
}
return clonedMap.keySet();
} catch (NoSuchMethodException | RuntimeException e) {
// actual cannot be cloned or is unmodifiable, falling back to LinkedHashMap
Map<K, ?> copiedMap = new LinkedHashMap<>(actual);
for (K expectedKey : expectedKeys) {
copiedMap.remove(expectedKey);
}
return copiedMap.keySet();
}
}
@SuppressWarnings("unchecked")
private static <K, V> Map<K, V> clone(Map<K, V> map) throws NoSuchMethodException {
if (isMultiValueMapAdapterInstance(map)) throw new IllegalArgumentException("Cannot clone MultiValueMapAdapter");
try {
if (map instanceof Cloneable) {
return (Map<K, V>) map.getClass().getMethod("clone").invoke(map);
}
try {
// try with copying constructor
return map.getClass().getConstructor(map.getClass()).newInstance(map);
} catch (NoSuchMethodException e) {
// try with default constructor
Map<K, V> newMap = map.getClass().getConstructor().newInstance();
newMap.putAll(map);
return newMap;
}
} catch (IllegalAccessException | InvocationTargetException | InstantiationException e) {
throw new IllegalStateException(e);
}
}
private static boolean isMultiValueMapAdapterInstance(Map<?, ?> map) {
return isInstanceOf(map, "org.springframework.util.MultiValueMapAdapter");
}
private static boolean isInstanceOf(Object object, String className) {
try {
Class<?> type = Class.forName(className);
return type.isInstance(object);
} catch (ClassNotFoundException e) {
return false;
}
}
public <K, V> void assertContainsValue(AssertionInfo info, Map<K, V> actual, V value,
BiPredicate<? super V, ? super V> valueEquals) {
assertNotNull(info, actual);
if (!containsValue(actual, value, valueEquals)) throw failures.failure(info, shouldContainValue(actual, value));
}
public <K, V> void assertContainsValues(AssertionInfo info, Map<K, V> actual, V[] values,
BiPredicate<? super V, ? super V> valueEquals) {
assertNotNull(info, actual);
requireNonNull(values, "The array of values to look for should not be null");
if (actual.isEmpty() && values.length == 0) return;
failIfEmpty(values, valuesToLookForIsEmpty());
Set<V> notFound = getNotFoundValues(actual, values, valueEquals);
if (!notFound.isEmpty()) throw failures.failure(info, shouldContainValues(actual, notFound));
}
public <K, V> void assertDoesNotContainValue(AssertionInfo info, Map<K, V> actual, V value,
BiPredicate<? super V, ? super V> valueEquals) {
assertNotNull(info, actual);
if (containsValue(actual, value, valueEquals)) throw failures.failure(info, shouldNotContainValue(actual, value));
}
private static <V> Set<V> getNotFoundValues(Map<?, V> actual, V[] expectedValues,
BiPredicate<? super V, ? super V> valueEquals) {
// Stream API avoided for performance reasons
Set<V> notFound = new LinkedHashSet<>();
for (V expectedValue : expectedValues) {
if (!containsValue(actual, expectedValue, valueEquals)) notFound.add(expectedValue);
}
return notFound;
}
private static <V> boolean containsValue(Map<?, V> actual, V value, BiPredicate<? super V, ? super V> valueEquals) {
try {
return valueEquals == null ? actual.containsValue(value) : containsValueAccordingToCustomEquals(actual, value, valueEquals);
} catch (NullPointerException e) {
if (value == null) return false; // null values not permitted
throw e;
}
}
private static <V> boolean containsValueAccordingToCustomEquals(Map<?, V> actual, V value,
BiPredicate<? super V, ? super V> valueEquals) {
return actual.values().stream().anyMatch(actualValue -> valueEquals.test(value, actualValue));
}
public <K, V> void assertContainsOnly(AssertionInfo info, Map<K, V> actual, Entry<? extends K, ? extends V>[] entries,
BiPredicate<? super V, ? super V> valueEquals) {
doCommonContainsCheck(info, actual, entries);
if (actual.isEmpty() && entries.length == 0) return;
failIfEntriesIsEmptySinceActualIsNotEmpty(info, actual, entries);
Set<Entry<? extends K, ? extends V>> notFound = getNotFoundEntries(actual, entries, valueEquals);
Set<Entry<K, V>> notExpected = getNotExpectedEntries(actual, entries, valueEquals);
if (!(notFound.isEmpty() && notExpected.isEmpty()))
throw failures.failure(info, shouldContainOnly(actual, entries, notFound, notExpected));
}
private <K, V> Set<Entry<? extends K, ? extends V>> getNotFoundEntries(Map<K, V> actual,
Entry<? extends K, ? extends V>[] entries,
BiPredicate<? super V, ? super V> valueEquals) {
// Stream API avoided for performance reasons
Set<Entry<? extends K, ? extends V>> notFound = new LinkedHashSet<>();
for (Entry<? extends K, ? extends V> entry : entries) {
if (!containsEntry(actual, entry, valueEquals)) notFound.add(entry);
}
return notFound;
}
private <K, V> Set<Entry<K, V>> getNotExpectedEntries(Map<K, V> actual, Entry<? extends K, ? extends V>[] entries,
BiPredicate<? super V, ? super V> valueEquals) {
// Stream API avoided for performance reasons
Set<Entry<K, V>> notExpected = new LinkedHashSet<>();
for (Entry<K, V> entry : mapWithoutExpectedEntries(actual, entries, valueEquals).entrySet()) {
MapEntry<K, V> mapEntry = entry(entry.getKey(), entry.getValue());
notExpected.add(mapEntry);
}
return notExpected;
}
private <K, V> Map<K, V> mapWithoutExpectedEntries(Map<K, V> actual, Entry<? extends K, ? extends V>[] expectedEntries,
BiPredicate<? super V, ? super V> valueEquals) {
try {
Map<K, V> clonedMap = clone(actual);
removeEntries(clonedMap, expectedEntries, valueEquals);
return clonedMap;
} catch (NoSuchMethodException | RuntimeException e) {
// actual cannot be cloned or is unmodifiable, falling back to LinkedHashMap
Map<K, V> copiedMap = new LinkedHashMap<>(actual);
removeEntries(copiedMap, expectedEntries, valueEquals);
return copiedMap;
}
}
private <K, V> void removeEntries(Map<K, V> map, Entry<? extends K, ? extends V>[] entries,
BiPredicate<? super V, ? super V> valueEquals) {
// Stream API avoided for performance reasons
for (Entry<? extends K, ? extends V> entry : entries) {
// must perform deep equals comparison on values as Map.remove(Object, Object) relies on
// Objects.equals which does not handle deep equality (e.g. arrays in map entry values)
if (containsEntry(map, entry, valueEquals)) map.remove(entry.getKey());
}
}
public <K, V> void assertContainsExactly(AssertionInfo info, Map<K, V> actual, Entry<? extends K, ? extends V>[] entries,
BiPredicate<? super V, ? super V> valueEquals) {
doCommonContainsCheck(info, actual, entries);
if (actual.isEmpty() && entries.length == 0) return;
failIfEntriesIsEmptySinceActualIsNotEmpty(info, actual, entries);
assertHasSameSizeAs(info, actual, entries);
Set<Entry<? extends K, ? extends V>> notFound = new LinkedHashSet<>();
Set<Entry<? extends K, ? extends V>> notExpected = new LinkedHashSet<>();
compareActualMapAndExpectedEntries(actual, entries, notExpected, notFound, valueEquals);
if (notExpected.isEmpty() && notFound.isEmpty()) {
// check entries order
int index = 0;
for (K keyFromActual : actual.keySet()) {
if (!deepEquals(keyFromActual, entries[index].getKey())) {
Entry<K, V> actualEntry = entry(keyFromActual, actual.get(keyFromActual));
throw failures.failure(info, elementsDifferAtIndex(actualEntry, entries[index], index));
}
index++;
}
// all entries are in the same order.
return;
}
throw failures.failure(info, shouldContainExactly(actual, asList(entries), notFound, notExpected));
}
private <K, V> void compareActualMapAndExpectedEntries(Map<K, V> actual, Entry<? extends K, ? extends V>[] entries,
Set<Entry<? extends K, ? extends V>> notExpected,
Set<Entry<? extends K, ? extends V>> notFound,
BiPredicate<? super V, ? super V> valueEquals) {
Map<K, V> expectedEntries = entriesToMap(entries);
Map<K, V> actualEntries = new LinkedHashMap<>(actual);
for (Entry<K, V> entry : expectedEntries.entrySet()) {
if (containsEntry(actualEntries, entry(entry.getKey(), entry.getValue()), valueEquals)) {
// this is an expected entry
actualEntries.remove(entry.getKey());
} else {
// this is a not found entry
notFound.add(entry(entry.getKey(), entry.getValue()));
}
}
// All remaining entries from actual copy are not expected entries.
for (Entry<K, V> entry : actualEntries.entrySet()) {
notExpected.add(entry(entry.getKey(), entry.getValue()));
}
}
private <K, V> void doCommonContainsCheck(AssertionInfo info, Map<K, V> actual, Entry<? extends K, ? extends V>[] entries) {
assertNotNull(info, actual);
failIfNull(entries);
}
private <K, V> void failIfAnyEntryNotFoundInActualMap(AssertionInfo info, Map<K, V> actual,
Entry<? extends K, ? extends V>[] entries,
BiPredicate<? super V, ? super V> valueEquals) {
Set<Entry<? extends K, ? extends V>> entriesWithKeyNotFound = new LinkedHashSet<>();
Set<Entry<? extends K, ? extends V>> entriesWithWrongValue = new LinkedHashSet<>();
for (Entry<? extends K, ? extends V> entry : entries) {
requireNonNull(entry, ErrorMessages.entryToLookForIsNull());
if (!actual.containsKey(entry.getKey())) entriesWithKeyNotFound.add(entry);
else if (!containsEntry(actual, entry, valueEquals))
entriesWithWrongValue.add(entry); // can only be wrong value since key was found
}
if (!entriesWithWrongValue.isEmpty() || !entriesWithKeyNotFound.isEmpty())
throw failures.failure(info, shouldContainEntries(actual, entries, entriesWithWrongValue, entriesWithKeyNotFound,
info.representation()));
}
private static <K, V> Map<K, V> entriesToMap(Entry<? extends K, ? extends V>[] entries) {
Map<K, V> expectedEntries = new LinkedHashMap<>();
for (Entry<? extends K, ? extends V> entry : entries) {
expectedEntries.put(entry.getKey(), entry.getValue());
}
return expectedEntries;
}
private static <K> void failIfEmpty(K[] keys, String errorMessage) {
checkArgument(keys.length > 0, errorMessage);
}
private static <K> void failIfEmpty(K[] keys, Supplier<String> errorMessageSupplier) {
checkArgument(keys.length > 0, errorMessageSupplier);
}
private static <K, V> void failIfEmpty(Entry<? extends K, ? extends V>[] entries) {
checkArgument(entries.length > 0, "The array of entries to look for should not be empty");
}
private static <K, V> void failIfNullOrEmpty(Entry<? extends K, ? extends V>[] entries) {
failIfNull(entries);
failIfEmpty(entries);
}
private static <K, V> void failIfNull(Entry<? extends K, ? extends V>[] entries) {
requireNonNull(entries, ErrorMessages.entriesToLookForIsNull());
}
private static <K, V> void failIfNull(Map<? extends K, ? extends V> map) {
requireNonNull(map, ErrorMessages.mapOfEntriesToLookForIsNull());
}
private <K, V> boolean containsEntry(Map<K, V> actual, Entry<? extends K, ? extends V> entry,
BiPredicate<? super V, ? super V> valueEquals) {
requireNonNull(entry, ErrorMessages.entryToLookForIsNull());
boolean keyFound = actual.containsKey(entry.getKey());
if (!keyFound) return false;
V actualEntryValue = actual.get(entry.getKey());
V expectedEntryValue = entry.getValue();
return areEqual(actualEntryValue, expectedEntryValue, valueEquals);
}
private <V> boolean areEqual(V actual, V expected, BiPredicate<? super V, ? super V> valueEquals) {
return valueEquals != null ? valueEquals.test(actual, expected) : deepEquals(actual, expected);
}
private void assertNotNull(AssertionInfo info, Map<?, ?> actual) {
Objects.instance().assertNotNull(info, actual);
}
// this should be only called when actual is not empty
private <K, V> void failIfEntriesIsEmptySinceActualIsNotEmpty(AssertionInfo info, Map<K, V> actual,
Entry<? extends K, ? extends V>[] entries) {
if (entries.length == 0) throw failures.failure(info, shouldBeEmpty(actual));
}
}
| Maps |
java | apache__logging-log4j2 | log4j-core-test/src/main/java/org/apache/logging/log4j/core/test/SystemPropertyTestRule.java | {
"start": 1108,
"end": 2769
} | class ____ implements TestRule {
public static SystemPropertyTestRule create(final String name, final String value) {
return new SystemPropertyTestRule(name, value);
}
private final String name;
private final Supplier<String> valueSupplier;
private String value;
protected SystemPropertyTestRule(final String name, final String value) {
this(name, () -> value);
}
protected SystemPropertyTestRule(final String name, final Supplier<String> value) {
this.name = Objects.requireNonNull(name, "name");
this.valueSupplier = value;
}
@Override
public Statement apply(final Statement base, final Description description) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
final String oldValue = System.getProperty(name);
try {
value = valueSupplier.get();
System.setProperty(name, value);
base.evaluate();
} finally {
// Restore if previously set
if (oldValue != null) {
System.setProperty(name, oldValue);
}
}
}
};
}
public String getName() {
return name;
}
public String getValue() {
return value;
}
public Supplier<String> getValueSupplier() {
return valueSupplier;
}
@Override
public String toString() {
// Value might be a secret...
return "SystemPropertyTestRule [name=" + name + "]";
}
}
| SystemPropertyTestRule |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/ingest/IngestStatsTests.java | {
"start": 986,
"end": 20362
} | class ____ extends ESTestCase {
public void testSerialization() throws IOException {
IngestStats.Stats totalStats = new IngestStats.Stats(50, 100, 200, 300);
List<IngestStats.PipelineStat> pipelineStats = createPipelineStats();
Map<ProjectId, Map<String, List<IngestStats.ProcessorStat>>> processorStats = createProcessorStats(pipelineStats);
IngestStats ingestStats = new IngestStats(totalStats, pipelineStats, processorStats);
IngestStats serializedStats = serialize(ingestStats);
assertIngestStats(ingestStats, serializedStats);
}
public void testIdentitySerialization() throws IOException {
IngestStats serializedStats = serialize(IngestStats.IDENTITY);
assertThat(serializedStats, sameInstance(IngestStats.IDENTITY));
}
public void testProcessorNameAndTypeIdentitySerialization() throws IOException {
IngestStats.Builder builder = new IngestStats.Builder();
ProjectId projectId = randomProjectIdOrDefault();
builder.addPipelineMetrics(projectId, "pipeline_id", new IngestPipelineMetric());
builder.addProcessorMetrics(projectId, "pipeline_id", "set", "set", new IngestMetric());
builder.addProcessorMetrics(projectId, "pipeline_id", "set:foo", "set", new IngestMetric());
builder.addProcessorMetrics(projectId, "pipeline_id", "set:bar", "set", new IngestMetric());
builder.addTotalMetrics(new IngestMetric());
IngestStats serializedStats = serialize(builder.build());
List<IngestStats.ProcessorStat> processorStats = serializedStats.processorStats().get(projectId).get("pipeline_id");
// these are just table stakes
assertThat(processorStats.get(0).name(), is("set"));
assertThat(processorStats.get(0).type(), is("set"));
assertThat(processorStats.get(1).name(), is("set:foo"));
assertThat(processorStats.get(1).type(), is("set"));
assertThat(processorStats.get(2).name(), is("set:bar"));
assertThat(processorStats.get(2).type(), is("set"));
// this is actually interesting, though -- we're canonical-izing these strings to keep our heap usage under control
final String set = processorStats.get(0).name();
assertThat(processorStats.get(0).name(), sameInstance(set));
assertThat(processorStats.get(0).type(), sameInstance(set));
assertThat(processorStats.get(1).type(), sameInstance(set));
assertThat(processorStats.get(2).type(), sameInstance(set));
}
public void testBytesStatsSerialization() throws IOException {
{
IngestPipelineMetric metric = new IngestPipelineMetric();
IngestStats.ByteStats byteStats = metric.createByteStats();
assertThat(byteStats, sameInstance(IngestStats.ByteStats.IDENTITY));
IngestStats.ByteStats serializedByteStats = serialize(byteStats);
assertThat(serializedByteStats, sameInstance(IngestStats.ByteStats.IDENTITY));
assertThat(IngestStats.ByteStats.merge(IngestStats.ByteStats.IDENTITY, byteStats), sameInstance(byteStats));
}
{
long ingestBytes = randomLongBetween(0, Long.MAX_VALUE);
long producedBytes = randomLongBetween(0, Long.MAX_VALUE);
IngestPipelineMetric metric = new IngestPipelineMetric();
metric.preIngestBytes(ingestBytes);
metric.postIngestBytes(producedBytes);
IngestStats.ByteStats byteStats = metric.createByteStats();
assertThat(byteStats.bytesIngested(), equalTo(ingestBytes));
assertThat(byteStats.bytesProduced(), equalTo(producedBytes));
IngestStats.ByteStats serializedByteStats = serialize(byteStats);
assertThat(serializedByteStats.bytesIngested(), equalTo(ingestBytes));
assertThat(serializedByteStats.bytesProduced(), equalTo(producedBytes));
assertThat(IngestStats.ByteStats.merge(byteStats, IngestStats.ByteStats.IDENTITY), sameInstance(byteStats));
assertThat(IngestStats.ByteStats.merge(IngestStats.ByteStats.IDENTITY, byteStats), sameInstance(byteStats));
assertThat(
IngestStats.ByteStats.merge(IngestStats.ByteStats.IDENTITY, IngestStats.ByteStats.IDENTITY),
sameInstance(IngestStats.ByteStats.IDENTITY)
);
}
}
public void testStatsMerge() {
var first = randomStats();
var second = randomStats();
assertEquals(
new IngestStats.Stats(
first.ingestCount() + second.ingestCount(),
first.ingestTimeInMillis() + second.ingestTimeInMillis(),
first.ingestCurrent() + second.ingestCurrent(),
first.ingestFailedCount() + second.ingestFailedCount()
),
IngestStats.Stats.merge(first, second)
);
}
public void testPipelineStatsMerge() {
var first = List.of(
randomPipelineStat("project-1", "pipeline-1"),
randomPipelineStat("project-1", "pipeline-1"),
randomPipelineStat("project-1", "pipeline-2"),
randomPipelineStat("project-2", "pipeline-1"),
randomPipelineStat("project-1", "pipeline-3")
);
var second = List.of(
randomPipelineStat("project-1", "pipeline-2"),
randomPipelineStat("project-1", "pipeline-1"),
randomPipelineStat("project-2", "pipeline-2"),
randomPipelineStat("project-2", "pipeline-1"),
randomPipelineStat("project-3", "pipeline-1")
);
assertThat(
IngestStats.PipelineStat.merge(first, second),
containsInAnyOrder(
new IngestStats.PipelineStat(
ProjectId.fromId("project-1"),
"pipeline-1",
merge(first.get(0).stats(), first.get(1).stats(), second.get(1).stats()),
merge(first.get(0).byteStats(), first.get(1).byteStats(), second.get(1).byteStats())
),
new IngestStats.PipelineStat(
ProjectId.fromId("project-1"),
"pipeline-2",
merge(first.get(2).stats(), second.get(0).stats()),
IngestStats.ByteStats.merge(first.get(2).byteStats(), second.get(0).byteStats())
),
new IngestStats.PipelineStat(
ProjectId.fromId("project-2"),
"pipeline-1",
merge(first.get(3).stats(), second.get(3).stats()),
IngestStats.ByteStats.merge(first.get(3).byteStats(), second.get(3).byteStats())
),
new IngestStats.PipelineStat(ProjectId.fromId("project-2"), "pipeline-2", second.get(2).stats(), second.get(2).byteStats()),
new IngestStats.PipelineStat(ProjectId.fromId("project-1"), "pipeline-3", first.get(4).stats(), first.get(4).byteStats()),
new IngestStats.PipelineStat(ProjectId.fromId("project-3"), "pipeline-1", second.get(4).stats(), second.get(4).byteStats())
)
);
}
public void testProcessorStatsMergeZeroCounts() {
{
var expected = randomPipelineProcessorStats();
var first = Map.of(ProjectId.fromId("project-1"), Map.of("pipeline-1", expected));
// merging with an empty map yields the non-empty map
assertEquals(IngestStats.merge(Map.of(), first), first);
assertEquals(IngestStats.merge(first, Map.of()), first);
// it's the same exact reference, in fact
assertSame(expected, IngestStats.merge(Map.of(), first).get(ProjectId.fromId("project-1")).get("pipeline-1"));
assertSame(expected, IngestStats.merge(first, Map.of()).get(ProjectId.fromId("project-1")).get("pipeline-1"));
}
{
var expected = randomPipelineProcessorStats();
var first = Map.of(ProjectId.fromId("project-1"), Map.of("pipeline-1", expected));
var zero = List.of(
new IngestStats.ProcessorStat("proc-1", "type-1", zeroStats()),
new IngestStats.ProcessorStat("proc-1", "type-2", zeroStats()),
new IngestStats.ProcessorStat("proc-2", "type-1", zeroStats()),
new IngestStats.ProcessorStat("proc-3", "type-3", zeroStats())
);
var second = Map.of(ProjectId.fromId("project-1"), Map.of("pipeline-1", zero));
// merging with a zero map yields the non-zero map
assertEquals(IngestStats.merge(second, first), first);
assertEquals(IngestStats.merge(first, second), first);
// it's the same exact reference, in fact
assertSame(expected, IngestStats.merge(second, first).get(ProjectId.fromId("project-1")).get("pipeline-1"));
assertSame(expected, IngestStats.merge(first, second).get(ProjectId.fromId("project-1")).get("pipeline-1"));
}
}
public void testProcessorStatsMerge() {
var first = Map.of(
ProjectId.fromId("project-1"),
Map.of("pipeline-1", randomPipelineProcessorStats(), "pipeline-2", randomPipelineProcessorStats()),
ProjectId.fromId("project-2"),
Map.of("pipeline-1", randomPipelineProcessorStats())
);
var second = Map.of(
ProjectId.fromId("project-2"),
Map.of("pipeline-1", randomPipelineProcessorStats()),
ProjectId.fromId("project-1"),
Map.of("pipeline-2", randomPipelineProcessorStats(), "pipeline-1", randomPipelineProcessorStats())
);
assertEquals(
IngestStats.merge(first, second),
Map.of(
ProjectId.fromId("project-1"),
Map.of(
"pipeline-1",
expectedPipelineProcessorStats(
first.get(ProjectId.fromId("project-1")).get("pipeline-1"),
second.get(ProjectId.fromId("project-1")).get("pipeline-1")
),
"pipeline-2",
expectedPipelineProcessorStats(
first.get(ProjectId.fromId("project-1")).get("pipeline-2"),
second.get(ProjectId.fromId("project-1")).get("pipeline-2")
)
),
ProjectId.fromId("project-2"),
Map.of(
"pipeline-1",
expectedPipelineProcessorStats(
first.get(ProjectId.fromId("project-2")).get("pipeline-1"),
second.get(ProjectId.fromId("project-2")).get("pipeline-1")
)
)
)
);
}
public void testProcessorStatsMergeHeterogeneous() {
// if a pipeline has heterogeneous *non-zero* stats, then we defer to the one with a smaller total ingest count
var first = Map.of(
ProjectId.fromId("project-1"),
Map.of(
"pipeline-1",
List.of(
new IngestStats.ProcessorStat("name-1", "type-1", new IngestStats.Stats(randomLongBetween(1, 100), 0, 0, 0)),
new IngestStats.ProcessorStat("name-2", "type-2", new IngestStats.Stats(randomLongBetween(1, 100), 0, 0, 0))
)
)
);
var expected = List.of(new IngestStats.ProcessorStat("name-1", "type-1", new IngestStats.Stats(1, 0, 0, 0)));
var second = Map.of(ProjectId.fromId("project-1"), Map.of("pipeline-1", expected));
assertEquals(second, IngestStats.merge(first, second));
assertSame(expected, IngestStats.merge(second, first).get(ProjectId.fromId("project-1")).get("pipeline-1"));
}
private static List<IngestStats.ProcessorStat> expectedPipelineProcessorStats(
List<IngestStats.ProcessorStat> first,
List<IngestStats.ProcessorStat> second
) {
return List.of(
new IngestStats.ProcessorStat("proc-1", "type-1", merge(first.get(0).stats(), second.get(0).stats())),
new IngestStats.ProcessorStat("proc-1", "type-2", merge(first.get(1).stats(), second.get(1).stats())),
new IngestStats.ProcessorStat("proc-2", "type-1", merge(first.get(2).stats(), second.get(2).stats())),
new IngestStats.ProcessorStat("proc-3", "type-3", merge(first.get(3).stats(), second.get(3).stats()))
);
}
private static List<IngestStats.ProcessorStat> randomPipelineProcessorStats() {
return List.of(
randomProcessorStat("proc-1", "type-1"),
randomProcessorStat("proc-1", "type-2"),
randomProcessorStat("proc-2", "type-1"),
randomProcessorStat("proc-3", "type-3")
);
}
private static IngestStats.Stats merge(IngestStats.Stats... stats) {
return Arrays.stream(stats).reduce(IngestStats.Stats.IDENTITY, IngestStats.Stats::merge);
}
private static IngestStats.ByteStats merge(IngestStats.ByteStats... stats) {
return Arrays.stream(stats).reduce(new IngestStats.ByteStats(0, 0), IngestStats.ByteStats::merge);
}
private static List<IngestStats.PipelineStat> createPipelineStats() {
IngestStats.PipelineStat pipeline1Stats = new IngestStats.PipelineStat(
ProjectId.fromId("project1"),
"pipeline1",
new IngestStats.Stats(3, 3, 3, 3),
new IngestStats.ByteStats(123, 456)
);
IngestStats.PipelineStat pipeline2Stats = new IngestStats.PipelineStat(
ProjectId.fromId("project2"),
"pipeline2",
new IngestStats.Stats(47, 97, 197, 297),
new IngestStats.ByteStats(1234567, 34567890)
);
IngestStats.PipelineStat pipeline3Stats = new IngestStats.PipelineStat(
ProjectId.fromId("project1"),
"pipeline3",
new IngestStats.Stats(0, 0, 0, 0),
new IngestStats.ByteStats(0, 0)
);
return List.of(pipeline1Stats, pipeline2Stats, pipeline3Stats);
}
private static Map<ProjectId, Map<String, List<IngestStats.ProcessorStat>>> createProcessorStats(
List<IngestStats.PipelineStat> pipelineStats
) {
assert (pipelineStats.size() >= 2);
IngestStats.ProcessorStat processor1Stat = new IngestStats.ProcessorStat("processor1", "type", new IngestStats.Stats(1, 1, 1, 1));
IngestStats.ProcessorStat processor2Stat = new IngestStats.ProcessorStat("processor2", "type", new IngestStats.Stats(2, 2, 2, 2));
IngestStats.ProcessorStat processor3Stat = new IngestStats.ProcessorStat(
"processor3",
"type",
new IngestStats.Stats(47, 97, 197, 297)
);
// pipeline1 -> processor1,processor2; pipeline2 -> processor3
return Map.of(
ProjectId.fromId("project1"),
Map.of(pipelineStats.get(0).pipelineId(), List.of(processor1Stat, processor2Stat)),
ProjectId.fromId("project2"),
Map.of(pipelineStats.get(1).pipelineId(), List.of(processor3Stat))
);
}
private static IngestStats serialize(IngestStats stats) throws IOException {
var out = new BytesStreamOutput();
stats.writeTo(out);
var in = out.bytes().streamInput();
return IngestStats.read(in);
}
private static IngestStats.ByteStats serialize(IngestStats.ByteStats stats) throws IOException {
var out = new BytesStreamOutput();
stats.writeTo(out);
var in = out.bytes().streamInput();
return IngestStats.readByteStats(in);
}
private static void assertIngestStats(IngestStats ingestStats, IngestStats serializedStats) {
assertNotSame(ingestStats, serializedStats);
assertNotSame(ingestStats.totalStats(), serializedStats.totalStats());
assertNotSame(ingestStats.pipelineStats(), serializedStats.pipelineStats());
assertNotSame(ingestStats.processorStats(), serializedStats.processorStats());
assertEquals(ingestStats.totalStats(), serializedStats.totalStats());
assertEquals(ingestStats.pipelineStats().size(), serializedStats.pipelineStats().size());
for (IngestStats.PipelineStat serializedPipelineStat : serializedStats.pipelineStats()) {
assertEquals(
getPipelineStats(ingestStats.pipelineStats(), serializedPipelineStat.pipelineId()),
serializedPipelineStat.stats()
);
assertEquals(
getPipelineByteStats(ingestStats.pipelineStats(), serializedPipelineStat.pipelineId()),
serializedPipelineStat.byteStats()
);
List<IngestStats.ProcessorStat> serializedProcessorStats = serializedStats.processorStats()
.getOrDefault(serializedPipelineStat.projectId(), Map.of())
.get(serializedPipelineStat.pipelineId());
List<IngestStats.ProcessorStat> processorStat = ingestStats.processorStats()
.getOrDefault(serializedPipelineStat.projectId(), Map.of())
.get(serializedPipelineStat.pipelineId());
if (processorStat != null) {
Iterator<IngestStats.ProcessorStat> it = processorStat.iterator();
// intentionally enforcing the identical ordering
for (IngestStats.ProcessorStat serializedProcessorStat : serializedProcessorStats) {
IngestStats.ProcessorStat ps = it.next();
assertEquals(ps.name(), serializedProcessorStat.name());
assertEquals(ps.type(), serializedProcessorStat.type());
assertEquals(ps.stats(), serializedProcessorStat.stats());
}
assertFalse(it.hasNext());
}
}
}
private static IngestStats.Stats getPipelineStats(List<IngestStats.PipelineStat> pipelineStats, String id) {
return pipelineStats.stream()
.filter(p1 -> p1.pipelineId().equals(id))
.findFirst()
.map(IngestStats.PipelineStat::stats)
.orElse(null);
}
private static IngestStats.ByteStats getPipelineByteStats(List<IngestStats.PipelineStat> pipelineStats, String id) {
return pipelineStats.stream()
.filter(p1 -> p1.pipelineId().equals(id))
.findFirst()
.map(IngestStats.PipelineStat::byteStats)
.orElse(null);
}
private static IngestStats.ProcessorStat randomProcessorStat(String name, String type) {
return new IngestStats.ProcessorStat(name, type, randomStats());
}
private static IngestStats.PipelineStat randomPipelineStat(String projectId, String pipelineId) {
return new IngestStats.PipelineStat(ProjectId.fromId(projectId), pipelineId, randomStats(), randomByteStats());
}
private static IngestStats.Stats randomStats() {
return new IngestStats.Stats(randomLong(), randomLong(), randomLong(), randomLong());
}
private static IngestStats.Stats zeroStats() {
return new IngestStats.Stats(0, 0, 0, 0);
}
private static IngestStats.ByteStats randomByteStats() {
return new IngestStats.ByteStats(randomLong(), randomLong());
}
}
| IngestStatsTests |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerUpgradeTestSpecifications.java | {
"start": 14668,
"end": 14998
} | class ____
implements TypeSerializerUpgradeTestBase.UpgradeVerifier<
DifferentFieldTypePojoSchemaVerifier.PojoWithStringField> {
@ClassRelocator.RelocateClass("TestPojoWithDifferentFieldType")
@SuppressWarnings("WeakerAccess")
public static | DifferentFieldTypePojoSchemaVerifier |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/util/datetime/FastDatePrinter.java | {
"start": 36945,
"end": 37015
} | class ____ output the twelve hour field.</p>
*/
private static | to |
java | elastic__elasticsearch | modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongCounterAdapter.java | {
"start": 1483,
"end": 1879
} | class ____ extends AbstractInstrument.Builder<LongCounter> {
private Builder(String name, String description, String unit) {
super(name, description, unit);
}
@Override
public LongCounter build(Meter meter) {
return Objects.requireNonNull(meter).counterBuilder(name).setDescription(description).setUnit(unit).build();
}
}
}
| Builder |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkRejectionIT.java | {
"start": 1237,
"end": 3637
} | class ____ extends ESIntegTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put("thread_pool.write.size", 1)
.put("thread_pool.write.queue_size", 1)
.build();
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(InternalSettingsPlugin.class);
}
@Override
public Settings indexSettings() {
return Settings.builder()
.put(super.indexSettings())
// sync global checkpoint quickly so we can verify seq_no_stats aligned between all copies after tests.
.put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s")
.build();
}
@Override
protected int numberOfReplicas() {
return 1;
}
protected int numberOfShards() {
return 5;
}
public void testBulkRejectionAfterDynamicMappingUpdate() throws Exception {
final String index = "test";
assertAcked(prepareCreate(index));
ensureGreen();
final BulkRequest request1 = new BulkRequest();
for (int i = 0; i < 500; ++i) {
request1.add(new IndexRequest(index).source(Collections.singletonMap("key" + i, "value" + i)))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
}
// Huge request to keep the write pool busy so that requests waiting on a mapping update in the other bulk request get rejected
// by the write pool
final BulkRequest request2 = new BulkRequest();
for (int i = 0; i < 10_000; ++i) {
request2.add(new IndexRequest(index).source(Collections.singletonMap("key", "valuea" + i)))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
}
final ActionFuture<BulkResponse> bulkFuture1 = client().bulk(request1);
final ActionFuture<BulkResponse> bulkFuture2 = client().bulk(request2);
try {
bulkFuture1.actionGet();
bulkFuture2.actionGet();
} catch (EsRejectedExecutionException e) {
// ignored, one of the two bulk requests was rejected outright due to the write queue being full
}
internalCluster().assertSeqNos();
}
}
| BulkRejectionIT |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/sqm/param/EmptyEntityCollectionParameterTests.java | {
"start": 752,
"end": 1442
} | class ____ {
@Test
void testNoPersistenceExceptionThrown(SessionFactoryScope scope) {
// without fixing, the following exception would be thrown:
// Converting `org.hibernate.type.descriptor.java.spi.JdbcTypeRecommendationException` to JPA `PersistenceException` :
// Could not determine recommended JdbcType for `org.hibernate.orm.test.query.sqm.param.EmptyEntityCollectionParameterTests$ContentEntry`
scope.inTransaction( session ->
session.createQuery( "FROM DbEntity WHERE content IN (:vals)", DbEntity.class )
.setParameter( "vals", Collections.emptyList() )
.list()
);
}
@Entity(name = "DbEntity")
static | EmptyEntityCollectionParameterTests |
java | netty__netty | example/src/main/java/io/netty/example/socksproxy/SocksServerInitializer.java | {
"start": 940,
"end": 1286
} | class ____ extends ChannelInitializer<SocketChannel> {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(
new LoggingHandler(LogLevel.DEBUG),
new SocksPortUnificationServerHandler(),
SocksServerHandler.INSTANCE);
}
}
| SocksServerInitializer |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/util/ResettableExternalBuffer.java | {
"start": 9244,
"end": 20227
} | class ____ implements ResettableRowBuffer.ResettableIterator {
MutableObjectIterator<BinaryRowData> currentIterator;
// memory for file reader to store read result
List<MemorySegment> freeMemory = null;
BlockChannelReader<MemorySegment> fileReader;
int currentChannelID = -1;
BinaryRowData reuse = binaryRowSerializer.createInstance();
BinaryRowData row;
int beginRow;
int nextRow;
// reuse in memory buffer iterator to reduce initialization cost.
InMemoryBuffer.InMemoryBufferIterator reusableMemoryIterator;
// value of resetCount of buffer when this iterator is created.
// used to check validity.
int versionSnapshot;
// if this iterator is closed
boolean closed;
private BufferIterator(int beginRow) {
this.beginRow = Math.min(beginRow, numRows);
this.nextRow = this.beginRow;
this.versionSnapshot = externalBufferVersion;
this.closed = false;
}
private void checkValidity() {
if (closed) {
throw new RuntimeException("This iterator is closed!");
} else if (this.versionSnapshot != externalBufferVersion) {
throw new RuntimeException("This iterator is no longer valid!");
}
}
@Override
public void reset() throws IOException {
checkValidity();
resetImpl();
}
private void resetImpl() throws IOException {
closeCurrentFileReader();
nextRow = beginRow;
currentChannelID = -1;
currentIterator = null;
row = null;
reuse.clear();
}
@Override
public void close() {
if (closed) {
return;
}
try {
resetImpl();
} catch (IOException e) {
throw new RuntimeException(e);
}
if (freeMemory != null) {
freeMemory.clear();
}
if (reusableMemoryIterator != null) {
reusableMemoryIterator.close();
}
closed = true;
iterOpenedCount--;
}
@Override
public boolean advanceNext() {
checkValidity();
try {
updateIteratorIfNeeded();
// get from curr iterator or new iterator.
while (true) {
if (currentIterator != null && (row = currentIterator.next(reuse)) != null) {
this.nextRow++;
return true;
} else {
if (!nextIterator()) {
return false;
}
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private boolean nextIterator() throws IOException {
if (currentChannelID == -1) {
// First call to next iterator. Fetch iterator according to beginRow.
if (isRowAllInFixedPart) {
gotoAllInFixedPartRow(beginRow);
} else {
gotoVariableLengthRow(beginRow);
}
} else if (currentChannelID == Integer.MAX_VALUE) {
// The last one is in memory, so the end.
return false;
} else if (currentChannelID < spilledChannelIDs.size() - 1) {
// Next spilled iterator.
nextSpilledIterator();
} else {
// It is the last iterator.
newMemoryIterator();
}
return true;
}
private boolean iteratorNeedsUpdate() {
int size = spilledChannelRowOffsets.size();
return size > 0
&& currentChannelID == Integer.MAX_VALUE
&& nextRow <= spilledChannelRowOffsets.get(size - 1);
}
private void updateIteratorIfNeeded() throws IOException {
if (iteratorNeedsUpdate()) {
reuse.clear();
reusableMemoryIterator = null;
if (isRowAllInFixedPart) {
gotoAllInFixedPartRow(nextRow);
} else {
gotoVariableLengthRow(nextRow);
}
}
}
@Override
public BinaryRowData getRow() {
return row;
}
private void closeCurrentFileReader() throws IOException {
if (fileReader != null) {
fileReader.close();
fileReader = null;
}
}
private void gotoAllInFixedPartRow(int beginRow) throws IOException {
// Find which channel contains the row.
int beginChannel = upperBound(beginRow, spilledChannelRowOffsets);
// Find the row number in its own channel (0-indexed).
int beginRowInChannel = getBeginIndexInChannel(beginRow, beginChannel);
if (beginRow == numRows) {
// Row number out of range! Should return an "empty" iterator.
newMemoryIterator(beginRowInChannel, inMemoryBuffer.getCurrentDataBufferOffset());
return;
}
// Fixed length. Calculate offset directly.
long numRecordsInSegment = segmentSize / rowLength;
long offset =
(beginRowInChannel / numRecordsInSegment) * segmentSize
+ (beginRowInChannel % numRecordsInSegment) * rowLength;
if (beginChannel < spilledChannelRowOffsets.size()) {
// Data on disk
newSpilledIterator(beginChannel, offset);
} else {
// Data in memory
newMemoryIterator(beginRowInChannel, offset);
}
}
private void gotoVariableLengthRow(int beginRow) throws IOException {
// Find which channel contains the row.
int beginChannel = upperBound(beginRow, spilledChannelRowOffsets);
// Find the row number in its own channel (0-indexed).
int beginRowInChannel = getBeginIndexInChannel(beginRow, beginChannel);
if (beginRow == numRows) {
// Row number out of range! Should return an "empty" iterator.
newMemoryIterator(beginRowInChannel, inMemoryBuffer.getCurrentDataBufferOffset());
return;
}
if (beginChannel < spilledChannelRowOffsets.size()) {
// Data on disk
newSpilledIterator(beginChannel);
} else {
// Data in memory
newMemoryIterator();
}
nextRow -= beginRowInChannel;
for (int i = 0; i < beginRowInChannel; i++) {
advanceNext();
}
}
private void nextSpilledIterator() throws IOException {
newSpilledIterator(currentChannelID + 1);
}
private void newSpilledIterator(int channelID) throws IOException {
newSpilledIterator(channelID, 0);
}
private void newSpilledIterator(int channelID, long offset) throws IOException {
ChannelWithMeta channel = spilledChannelIDs.get(channelID);
currentChannelID = channelID;
// close current reader first.
closeCurrentFileReader();
// calculate segment number
int segmentNum = (int) (offset / segmentSize);
long seekPosition = segmentNum * segmentSize;
// new reader.
this.fileReader = ioManager.createBlockChannelReader(channel.getChannel());
if (offset > 0) {
// seek to the beginning of that segment
fileReader.seekToPosition(seekPosition);
}
ChannelReaderInputView inView =
new HeaderlessChannelReaderInputView(
fileReader,
getReadMemory(),
channel.getBlockCount() - segmentNum,
channel.getNumBytesInLastBlock(),
false,
offset - seekPosition);
this.currentIterator =
new BinaryRowChannelInputViewIterator(inView, binaryRowSerializer);
}
private void newMemoryIterator() throws IOException {
newMemoryIterator(0, 0);
}
private void newMemoryIterator(int beginRow, long offset) throws IOException {
currentChannelID = Integer.MAX_VALUE;
// close curr reader first.
closeCurrentFileReader();
if (reusableMemoryIterator == null) {
reusableMemoryIterator = inMemoryBuffer.newIterator(beginRow, offset);
} else {
reusableMemoryIterator.reset(inMemoryBuffer.recordCount, offset);
}
this.currentIterator = reusableMemoryIterator;
}
private int getBeginIndexInChannel(int beginRow, int beginChannel) {
if (beginChannel > 0) {
return beginRow - spilledChannelRowOffsets.get(beginChannel - 1);
} else {
return beginRow;
}
}
private List<MemorySegment> getReadMemory() {
if (freeMemory == null) {
freeMemory = new ArrayList<>();
for (int i = 0; i < READ_BUFFER; i++) {
freeMemory.add(MemorySegmentFactory.allocateUnpooledSegment(segmentSize));
}
}
return freeMemory;
}
// Find the index of the first element which is strictly greater than `goal` in `list`.
// `list` must be sorted.
// If every element in `list` is not larger than `goal`, return `list.size()`.
private int upperBound(int goal, List<Integer> list) {
if (list.size() == 0) {
return 0;
}
if (list.get(list.size() - 1) <= goal) {
return list.size();
}
// Binary search
int head = 0;
int tail = list.size() - 1;
int mid;
while (head < tail) {
mid = (head + tail) / 2;
if (list.get(mid) <= goal) {
head = mid + 1;
} else {
tail = mid;
}
}
return head;
}
}
@VisibleForTesting
List<ChannelWithMeta> getSpillChannels() {
return spilledChannelIDs;
}
/**
* In memory buffer that stores records to memorySegments, returns a iterator that map from
* memory.
*/
private | BufferIterator |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java | {
"start": 33815,
"end": 33992
} | class ____ extends Options.BooleanOption implements
Option {
AppendIfExistsOption(boolean value) {
super(value);
}
}
static | AppendIfExistsOption |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1200/Issue1271.java | {
"start": 1144,
"end": 1315
} | class ____ {
private int a;
public int getA() {
return a;
}
public void setA(int a) {
this.a = a;
}
}
}
| B |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/dump/QueryScopeInfo.java | {
"start": 957,
"end": 2504
} | class ____ {
/**
* Categories to be returned by {@link QueryScopeInfo#getCategory()} to avoid instanceof checks.
*/
public static final byte INFO_CATEGORY_JM = 0;
public static final byte INFO_CATEGORY_TM = 1;
public static final byte INFO_CATEGORY_JOB = 2;
public static final byte INFO_CATEGORY_TASK = 3;
public static final byte INFO_CATEGORY_OPERATOR = 4;
public static final byte INFO_CATEGORY_JM_OPERATOR = 5;
/** The remaining scope not covered by specific fields. */
public final String scope;
private QueryScopeInfo(String scope) {
this.scope = scope;
}
/**
* Create a copy of this QueryScopeInfo and append the given scope.
*
* @param userScope scope to append
* @return modified copy of this QueryScopeInfo
*/
public abstract QueryScopeInfo copy(String userScope);
/**
* Returns the category for this QueryScopeInfo.
*
* @return category
*/
public abstract byte getCategory();
@Override
public String toString() {
return "QueryScopeInfo{"
+ "scope='"
+ scope
+ '\''
+ ", category='"
+ getCategory()
+ '\''
+ '}';
}
protected String concatScopes(String additionalScope) {
return scope.isEmpty() ? additionalScope : scope + "." + additionalScope;
}
/** Container for the job manager scope. Stores no additional information. */
public static | QueryScopeInfo |
java | ReactiveX__RxJava | src/jmh/java/io/reactivex/rxjava3/core/XMapYPerf.java | {
"start": 1069,
"end": 15326
} | class ____ {
@Param({ "1", "10", "100", "1000", "10000", "100000", "1000000" })
public int times;
Flowable<Integer> flowFlatMapIterable1;
Flowable<Integer> flowFlatMapIterable0;
Flowable<Integer> flowFlatMapFlowable0;
Flowable<Integer> flowFlatMapFlowable1;
Flowable<Integer> flowFlatMapSingle1;
Flowable<Integer> flowFlatMapMaybe1;
Flowable<Integer> flowFlatMapMaybe0;
Completable flowFlatMapCompletable0;
// oooooooooooooooooooooooooooooooooooooooooo
Flowable<Integer> flowFlatMapSingleAsFlow1;
Flowable<Integer> flowFlatMapMaybeAsFlow1;
Flowable<Integer> flowFlatMapMaybeAsFlow0;
Flowable<Integer> flowFlatMapCompletableAsFlow0;
Flowable<Integer> flowFlatMapIterableAsFlow1;
Flowable<Integer> flowFlatMapIterableAsFlow0;
// -----------------------------------------------------------------
Observable<Integer> obsFlatMapIterable0;
Observable<Integer> obsFlatMapIterable1;
Observable<Integer> obsFlatMapObservable0;
Observable<Integer> obsFlatMapObservable1;
Observable<Integer> obsFlatMapSingle1;
Observable<Integer> obsFlatMapMaybe1;
Observable<Integer> obsFlatMapMaybe0;
Completable obsFlatMapCompletable0;
// oooooooooooooooooooooooooooooooooooooooooo
Observable<Integer> obsFlatMapSingleAsObs1;
Observable<Integer> obsFlatMapMaybeAsObs1;
Observable<Integer> obsFlatMapMaybeAsObs0;
Observable<Integer> obsFlatMapCompletableAsObs0;
Observable<Integer> obsFlatMapIterableAsObs1;
Observable<Integer> obsFlatMapIterableAsObs0;
@Setup
public void setup() {
Integer[] values = new Integer[times];
Arrays.fill(values, 777);
Flowable<Integer> fsource = Flowable.fromArray(values);
flowFlatMapFlowable1 = fsource.flatMap(new Function<Integer, Publisher<Integer>>() {
@Override
public Publisher<Integer> apply(Integer v) {
return Flowable.just(v);
}
});
flowFlatMapFlowable0 = fsource.flatMap(new Function<Integer, Publisher<Integer>>() {
@Override
public Publisher<Integer> apply(Integer v) {
return Flowable.empty();
}
});
flowFlatMapSingle1 = fsource.flatMapSingle(new Function<Integer, SingleSource<Integer>>() {
@Override
public SingleSource<Integer> apply(Integer v) {
return Single.just(v);
}
});
flowFlatMapMaybe1 = fsource.flatMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v) {
return Maybe.just(v);
}
});
flowFlatMapMaybe0 = fsource.flatMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v) {
return Maybe.empty();
}
});
flowFlatMapCompletable0 = fsource.flatMapCompletable(new Function<Integer, CompletableSource>() {
@Override
public CompletableSource apply(Integer v) {
return Completable.complete();
}
});
flowFlatMapIterable1 = fsource.flatMapIterable(new Function<Integer, Iterable<Integer>>() {
@Override
public Iterable<Integer> apply(Integer v) {
return Collections.singletonList(v);
}
});
flowFlatMapIterable0 = fsource.flatMapIterable(new Function<Integer, Iterable<Integer>>() {
@Override
public Iterable<Integer> apply(Integer v) {
return Collections.emptyList();
}
});
flowFlatMapSingle1 = fsource.flatMapSingle(new Function<Integer, SingleSource<Integer>>() {
@Override
public SingleSource<Integer> apply(Integer v) {
return Single.just(v);
}
});
flowFlatMapMaybe1 = fsource.flatMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v) {
return Maybe.just(v);
}
});
flowFlatMapMaybe0 = fsource.flatMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v) {
return Maybe.empty();
}
});
flowFlatMapCompletable0 = fsource.flatMapCompletable(new Function<Integer, CompletableSource>() {
@Override
public CompletableSource apply(Integer v) {
return Completable.complete();
}
});
// ooooooooooooooooooooooooo
flowFlatMapSingleAsFlow1 = fsource.flatMap(new Function<Integer, Publisher<Integer>>() {
@Override
public Publisher<Integer> apply(Integer v) {
return Single.just(v).toFlowable();
}
});
flowFlatMapMaybeAsFlow1 = fsource.flatMap(new Function<Integer, Publisher<Integer>>() {
@Override
public Publisher<Integer> apply(Integer v) {
return Maybe.just(v).toFlowable();
}
});
flowFlatMapMaybeAsFlow0 = fsource.flatMap(new Function<Integer, Publisher<Integer>>() {
@Override
public Publisher<Integer> apply(Integer v) {
return Maybe.<Integer>empty().toFlowable();
}
});
flowFlatMapCompletableAsFlow0 = fsource.flatMap(new Function<Integer, Publisher<Integer>>() {
@Override
public Publisher<Integer> apply(Integer v) {
return Completable.complete().toFlowable();
}
});
flowFlatMapIterableAsFlow1 = fsource.flatMap(new Function<Integer, Publisher<Integer>>() {
@Override
public Publisher<Integer> apply(Integer v) {
return Flowable.fromIterable(Collections.singletonList(v));
}
});
flowFlatMapIterableAsFlow0 = fsource.flatMap(new Function<Integer, Publisher<Integer>>() {
@Override
public Publisher<Integer> apply(Integer v) {
return Flowable.fromIterable(Collections.emptyList());
}
});
// -------------------------------------------------------------------
Observable<Integer> osource = Observable.fromArray(values);
obsFlatMapObservable1 = osource.flatMap(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer v) {
return Observable.just(v);
}
});
obsFlatMapObservable0 = osource.flatMap(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer v) {
return Observable.empty();
}
});
obsFlatMapSingle1 = osource.flatMapSingle(new Function<Integer, SingleSource<Integer>>() {
@Override
public SingleSource<Integer> apply(Integer v) {
return Single.just(v);
}
});
obsFlatMapMaybe1 = osource.flatMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v) {
return Maybe.just(v);
}
});
obsFlatMapMaybe0 = osource.flatMapMaybe(new Function<Integer, MaybeSource<Integer>>() {
@Override
public MaybeSource<Integer> apply(Integer v) {
return Maybe.empty();
}
});
obsFlatMapCompletable0 = osource.flatMapCompletable(new Function<Integer, CompletableSource>() {
@Override
public CompletableSource apply(Integer v) {
return Completable.complete();
}
});
obsFlatMapIterable1 = osource.flatMapIterable(new Function<Integer, Iterable<Integer>>() {
@Override
public Iterable<Integer> apply(Integer v) {
return Collections.singletonList(v);
}
});
obsFlatMapIterable0 = osource.flatMapIterable(new Function<Integer, Iterable<Integer>>() {
@Override
public Iterable<Integer> apply(Integer v) {
return Collections.emptyList();
}
});
// ooooooooooooooooooooooooo
obsFlatMapSingleAsObs1 = osource.flatMap(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer v) {
return Single.just(v).toObservable();
}
});
obsFlatMapMaybeAsObs1 = osource.flatMap(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer v) {
return Maybe.just(v).toObservable();
}
});
obsFlatMapMaybeAsObs0 = osource.flatMap(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer v) {
return Maybe.<Integer>empty().toObservable();
}
});
obsFlatMapCompletableAsObs0 = osource.flatMap(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer v) {
return Completable.complete().toObservable();
}
});
obsFlatMapIterableAsObs1 = osource.flatMap(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer v) {
return Observable.fromIterable(Collections.singletonList(v));
}
});
obsFlatMapIterableAsObs0 = osource.flatMap(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer v) {
return Observable.fromIterable(Collections.emptyList());
}
});
}
@Benchmark
public void flowFlatMapIterable1(Blackhole bh) {
flowFlatMapIterable1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapIterable0(Blackhole bh) {
flowFlatMapIterable0.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapFlowable0(Blackhole bh) {
flowFlatMapFlowable0.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapFlowable1(Blackhole bh) {
flowFlatMapFlowable1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapSingle1(Blackhole bh) {
flowFlatMapSingle1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapMaybe1(Blackhole bh) {
flowFlatMapMaybe1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapMaybe0(Blackhole bh) {
flowFlatMapMaybe0.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapCompletable0(Blackhole bh) {
flowFlatMapCompletable0.subscribe(new PerfConsumer(bh));
}
// oooooooooooooooooooooooooooooooo
@Benchmark
public void flowFlatMapIterableAsFlow1(Blackhole bh) {
flowFlatMapIterableAsFlow1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapIterableAsFlow0(Blackhole bh) {
flowFlatMapIterableAsFlow0.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapSingleAsFlow1(Blackhole bh) {
flowFlatMapSingleAsFlow1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapMaybeAsFlow1(Blackhole bh) {
flowFlatMapMaybeAsFlow1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapMaybeAsFlow0(Blackhole bh) {
flowFlatMapMaybeAsFlow0.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void flowFlatMapCompletableAsFlow0(Blackhole bh) {
flowFlatMapCompletableAsFlow0.subscribe(new PerfConsumer(bh));
}
// --------------------------------------------------------------------------------
@Benchmark
public void obsFlatMapIterable0(Blackhole bh) {
obsFlatMapIterable0.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapIterable1(Blackhole bh) {
obsFlatMapIterable1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapObservable0(Blackhole bh) {
obsFlatMapObservable0.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapObservable1(Blackhole bh) {
obsFlatMapObservable1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapSingle1(Blackhole bh) {
obsFlatMapSingle1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapMaybe1(Blackhole bh) {
obsFlatMapMaybe1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapMaybe0(Blackhole bh) {
obsFlatMapMaybe0.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapCompletable0(Blackhole bh) {
obsFlatMapCompletable0.subscribe(new PerfConsumer(bh));
}
// oooooooooooooooooooooooooooooooo
@Benchmark
public void obsFlatMapIterableAsObs1(Blackhole bh) {
obsFlatMapIterableAsObs1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapIterableAsObs0(Blackhole bh) {
obsFlatMapIterableAsObs0.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapSingleAsObs1(Blackhole bh) {
obsFlatMapSingleAsObs1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapMaybeAsObs1(Blackhole bh) {
obsFlatMapMaybeAsObs1.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapMaybeAsObs0(Blackhole bh) {
obsFlatMapMaybeAsObs0.subscribe(new PerfConsumer(bh));
}
@Benchmark
public void obsFlatMapCompletableAsObs0(Blackhole bh) {
obsFlatMapCompletableAsObs0.subscribe(new PerfConsumer(bh));
}
}
| XMapYPerf |
java | apache__logging-log4j2 | log4j-layout-template-json/src/main/java/org/apache/logging/log4j/layout/template/json/util/QueueingRecyclerFactory.java | {
"start": 968,
"end": 1499
} | class ____ implements RecyclerFactory {
private final Supplier<Queue<Object>> queueSupplier;
public QueueingRecyclerFactory(final Supplier<Queue<Object>> queueSupplier) {
this.queueSupplier = queueSupplier;
}
@Override
public <V> Recycler<V> create(final Supplier<V> supplier, final Consumer<V> cleaner) {
@SuppressWarnings("unchecked")
final Queue<V> queue = (Queue<V>) queueSupplier.get();
return new QueueingRecycler<>(supplier, cleaner, queue);
}
}
| QueueingRecyclerFactory |
java | spring-projects__spring-boot | module/spring-boot-gson/src/test/java/org/springframework/boot/gson/autoconfigure/GsonTesterAutoConfigurationTests.java | {
"start": 1503,
"end": 2312
} | class ____ {
private final ApplicationContextRunner runner = new ApplicationContextRunner().withConfiguration(AutoConfigurations
.of(JsonTestersAutoConfiguration.class, GsonAutoConfiguration.class, GsonTesterTestAutoConfiguration.class));
@Test
void hintsAreContributed() {
this.runner.withPropertyValues("spring.test.jsontesters.enabled=true").prepare((context) -> {
TestGenerationContext generationContext = new TestGenerationContext();
new ApplicationContextAotGenerator().processAheadOfTime(
(GenericApplicationContext) context.getSourceApplicationContext(), generationContext);
ReflectionHintsPredicates hints = RuntimeHintsPredicates.reflection();
assertThat(hints.onType(GsonTester.class)).accepts(generationContext.getRuntimeHints());
});
}
}
| GsonTesterAutoConfigurationTests |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/Checkpoints.java | {
"start": 2864,
"end": 16130
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(Checkpoints.class);
/** Magic number at the beginning of every checkpoint metadata file, for sanity checks. */
public static final int HEADER_MAGIC_NUMBER = 0x4960672d;
// ------------------------------------------------------------------------
// Writing out checkpoint metadata
// ------------------------------------------------------------------------
public static void storeCheckpointMetadata(
CheckpointMetadata checkpointMetadata, OutputStream out) throws IOException {
DataOutputStream dos = new DataOutputStream(out);
storeCheckpointMetadata(checkpointMetadata, dos);
}
public static void storeCheckpointMetadata(
CheckpointMetadata checkpointMetadata, DataOutputStream out) throws IOException {
storeCheckpointMetadata(checkpointMetadata, out, MetadataV6Serializer.INSTANCE);
}
public static void storeCheckpointMetadata(
CheckpointMetadata checkpointMetadata,
DataOutputStream out,
MetadataSerializer serializer)
throws IOException {
// write generic header
out.writeInt(HEADER_MAGIC_NUMBER);
out.writeInt(serializer.getVersion());
serializer.serialize(checkpointMetadata, out);
}
// ------------------------------------------------------------------------
// Reading and validating checkpoint metadata
// ------------------------------------------------------------------------
public static CheckpointMetadata loadCheckpointMetadata(
DataInputStream in, ClassLoader classLoader, String externalPointer)
throws IOException {
checkNotNull(in, "input stream");
checkNotNull(classLoader, "classLoader");
final int magicNumber = in.readInt();
if (magicNumber == HEADER_MAGIC_NUMBER) {
final int version = in.readInt();
final MetadataSerializer serializer = MetadataSerializers.getSerializer(version);
return serializer.deserialize(in, classLoader, externalPointer);
} else {
throw new IOException(
"Unexpected magic number. This can have multiple reasons: "
+ "(1) You are trying to load a Flink 1.0 savepoint, which is not supported by this "
+ "version of Flink. (2) The file you were pointing to is not a savepoint at all. "
+ "(3) The savepoint file has been corrupted.");
}
}
public static CompletedCheckpoint loadAndValidateCheckpoint(
JobID jobId,
Map<JobVertexID, ExecutionJobVertex> tasks,
CompletedCheckpointStorageLocation location,
ClassLoader classLoader,
boolean allowNonRestoredState,
CheckpointProperties checkpointProperties)
throws IOException {
checkNotNull(jobId, "jobId");
checkNotNull(tasks, "tasks");
checkNotNull(location, "location");
checkNotNull(classLoader, "classLoader");
final StreamStateHandle metadataHandle = location.getMetadataHandle();
final String checkpointPointer = location.getExternalPointer();
// (1) load the savepoint
final CheckpointMetadata checkpointMetadata;
try (InputStream in = metadataHandle.openInputStream()) {
DataInputStream dis = new DataInputStream(in);
checkpointMetadata = loadCheckpointMetadata(dis, classLoader, checkpointPointer);
}
// generate mapping from operator to task
Map<OperatorID, ExecutionJobVertex> operatorToJobVertexMapping = new HashMap<>();
for (ExecutionJobVertex task : tasks.values()) {
for (OperatorIDPair operatorIDPair : task.getOperatorIDs()) {
operatorToJobVertexMapping.put(operatorIDPair.getGeneratedOperatorID(), task);
operatorIDPair
.getUserDefinedOperatorID()
.ifPresent(id -> operatorToJobVertexMapping.put(id, task));
}
}
// (2) validate it (parallelism, etc)
HashMap<OperatorID, OperatorState> operatorStates =
CollectionUtil.newHashMapWithExpectedSize(
checkpointMetadata.getOperatorStates().size());
for (OperatorState operatorState : checkpointMetadata.getOperatorStates()) {
ExecutionJobVertex executionJobVertex =
operatorToJobVertexMapping.get(operatorState.getOperatorID());
if (executionJobVertex != null) {
if (executionJobVertex.getMaxParallelism() == operatorState.getMaxParallelism()
|| executionJobVertex.canRescaleMaxParallelism(
operatorState.getMaxParallelism())) {
operatorStates.put(operatorState.getOperatorID(), operatorState);
} else {
String msg =
String.format(
"Failed to rollback to checkpoint/savepoint %s. "
+ "Max parallelism mismatch between checkpoint/savepoint state and new program. "
+ "Cannot map operator %s with max parallelism %d to new program with "
+ "max parallelism %d. This indicates that the program has been changed "
+ "in a non-compatible way after the checkpoint/savepoint.",
checkpointMetadata,
operatorState.getOperatorID(),
operatorState.getMaxParallelism(),
executionJobVertex.getMaxParallelism());
throw new IllegalStateException(msg);
}
} else if (allowNonRestoredState) {
LOG.info(
"Skipping savepoint state for operator {}.", operatorState.getOperatorID());
} else {
if (operatorState.getCoordinatorState() != null) {
throwNonRestoredStateException(
checkpointPointer, operatorState.getOperatorID());
}
for (OperatorSubtaskState operatorSubtaskState : operatorState.getStates()) {
if (operatorSubtaskState.hasState()) {
throwNonRestoredStateException(
checkpointPointer, operatorState.getOperatorID());
}
}
LOG.info(
"Skipping empty savepoint state for operator {}.",
operatorState.getOperatorID());
}
}
return new CompletedCheckpoint(
jobId,
checkpointMetadata.getCheckpointId(),
0L,
0L,
operatorStates,
checkpointMetadata.getMasterStates(),
checkpointProperties,
location,
null,
checkpointMetadata.getCheckpointProperties());
}
private static void throwNonRestoredStateException(
String checkpointPointer, OperatorID operatorId) {
String msg =
String.format(
"Failed to rollback to checkpoint/savepoint %s. "
+ "Cannot map checkpoint/savepoint state for operator %s to the new program, "
+ "because the operator is not available in the new program. If "
+ "you want to allow to skip this, you can set the --allowNonRestoredState "
+ "option on the CLI.",
checkpointPointer, operatorId);
throw new IllegalStateException(msg);
}
// ------------------------------------------------------------------------
// Savepoint Disposal Hooks
// ------------------------------------------------------------------------
public static void disposeSavepoint(
String pointer, CheckpointStorage checkpointStorage, ClassLoader classLoader)
throws IOException, FlinkException {
checkNotNull(pointer, "location");
checkNotNull(checkpointStorage, "stateBackend");
checkNotNull(classLoader, "classLoader");
final CompletedCheckpointStorageLocation checkpointLocation =
checkpointStorage.resolveCheckpoint(pointer);
final StreamStateHandle metadataHandle = checkpointLocation.getMetadataHandle();
// load the savepoint object (the metadata) to have all the state handles that we need
// to dispose of all state
final CheckpointMetadata metadata;
try (InputStream in = metadataHandle.openInputStream();
DataInputStream dis = new DataInputStream(in)) {
metadata = loadCheckpointMetadata(dis, classLoader, pointer);
}
Exception exception = null;
// first dispose the savepoint metadata, so that the savepoint is not
// addressable any more even if the following disposal fails
try {
metadataHandle.discardState();
} catch (Exception e) {
exception = e;
}
// now dispose the savepoint data
try {
metadata.dispose();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
// now dispose the location (directory, table, whatever)
try {
checkpointLocation.disposeStorageLocation();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
// forward exceptions caught in the process
if (exception != null) {
ExceptionUtils.rethrowIOException(exception);
}
}
public static void disposeSavepoint(
String pointer,
Configuration configuration,
ClassLoader classLoader,
@Nullable Logger logger)
throws IOException, FlinkException {
checkNotNull(pointer, "location");
checkNotNull(configuration, "configuration");
checkNotNull(classLoader, "classLoader");
// An empty job configuration is utilized here because `disposeSavepoint` is intended
// for cluster-wide operations, which do not require job-specific configuration.
CheckpointStorage storage =
loadCheckpointStorage(new Configuration(), configuration, classLoader, logger);
disposeSavepoint(pointer, storage, classLoader);
}
@Nonnull
public static StateBackend loadStateBackend(
Configuration jobConfig,
Configuration clusterConfig,
ClassLoader classLoader,
@Nullable Logger logger) {
if (logger != null) {
logger.info("Attempting to load configured state backend for savepoint disposal");
}
// Job level config can override the cluster level config.
Configuration mergedConfig = new Configuration(clusterConfig);
mergedConfig.addAll(jobConfig);
try {
return StateBackendLoader.loadStateBackendFromConfig(mergedConfig, classLoader, null);
} catch (Throwable t) {
// catches exceptions and errors (like linking errors)
if (logger != null) {
logger.info("Could not load configured state backend.");
logger.debug("Detailed exception:", t);
}
return new HashMapStateBackend();
}
}
@Nonnull
public static CheckpointStorage loadCheckpointStorage(
Configuration jobConfig,
Configuration clusterConfig,
ClassLoader classLoader,
@Nullable Logger logger) {
StateBackend backend = loadStateBackend(jobConfig, clusterConfig, classLoader, logger);
if (logger != null) {
logger.info("Attempting to load configured checkpoint storage for savepoint disposal");
}
CheckpointStorage checkpointStorage = null;
try {
checkpointStorage =
CheckpointStorageLoader.load(
null, backend, jobConfig, clusterConfig, classLoader, null);
} catch (Throwable t) {
// catches exceptions and errors (like linking errors)
if (logger != null) {
logger.info("Could not load configured state backend.");
logger.debug("Detailed exception:", t);
}
}
if (checkpointStorage == null) {
// We use the jobmanager checkpoint storage by default.
// The JobManagerCheckpointStorage is actually
// FileSystem-based for metadata
return new JobManagerCheckpointStorage();
}
return checkpointStorage;
}
// ------------------------------------------------------------------------
/** This | Checkpoints |
java | apache__camel | components/camel-ssh/src/test/java/org/apache/camel/component/ssh/EchoCommandFactory.java | {
"start": 1399,
"end": 3222
} | class ____ implements Command, Runnable {
private String command;
private OutputStream out;
private OutputStream err;
private ExitCallback callback;
private Thread thread;
public EchoCommand(String command) {
this.command = command;
}
@Override
public void setInputStream(InputStream in) {
}
@Override
public void setOutputStream(OutputStream out) {
this.out = out;
}
@Override
public void setErrorStream(OutputStream err) {
this.err = err;
}
@Override
public void setExitCallback(ExitCallback callback) {
this.callback = callback;
}
@Override
public void start(ChannelSession channelSession, Environment environment) throws IOException {
thread = new Thread(this, "EchoCommand");
thread.start();
}
@Override
public void destroy(ChannelSession channelSession) throws Exception {
// noop
}
@Override
public void run() {
boolean succeeded = true;
String message = null;
try {
// we set the error with the same command message
err.write("Error:".getBytes());
err.write(command.getBytes());
err.flush();
out.write(command.getBytes());
out.flush();
} catch (Exception e) {
succeeded = false;
message = e.toString();
} finally {
if (succeeded) {
callback.onExit(0);
} else {
callback.onExit(1, message);
}
}
}
}
}
| EchoCommand |
java | apache__camel | components/camel-cxf/camel-cxf-soap/src/test/java/org/apache/camel/component/cxf/jaxws/CxfJavaOnlyPayloadModeTest.java | {
"start": 1389,
"end": 3682
} | class ____ extends CamelTestSupport {
private static int port1 = CXFTestSupport.getPort1();
private String url = "cxf://http://localhost:" + port1 + "/CxfJavaOnlyPayloadModeTest/helloworld"
+ "?wsdlURL=classpath:person.wsdl"
+ "&serviceName={http://camel.apache.org/wsdl-first}PersonService"
+ "&portName={http://camel.apache.org/wsdl-first}soap"
+ "&dataFormat=PAYLOAD"
+ "&properties.exceptionMessageCauseEnabled=true&properties.faultStackTraceEnabled=true";
@Test
public void testCxfJavaOnly() throws Exception {
String s = "<GetPerson xmlns=\"http://camel.apache.org/wsdl-first/types\"><personId>123</personId></GetPerson>";
Document xml = context.getTypeConverter().convertTo(Document.class, s);
Object output = template.requestBody(url, xml);
assertNotNull(output);
// using CxfPayload in payload mode
CxfPayload<?> payload = (CxfPayload<?>) output;
// convert the payload body to string
String reply = context.getTypeConverter().convertTo(String.class, payload.getBody().get(0));
assertNotNull(reply);
assertTrue(reply.contains("<personId>123</personId"));
assertTrue(reply.contains("<ssn>456</ssn"));
assertTrue(reply.contains("<name>Donald Duck</name"));
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from(url).process(new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
String s = "<GetPersonResponse xmlns=\"http://camel.apache.org/wsdl-first/types\">"
+ "<personId>123</personId><ssn>456</ssn><name>Donald Duck</name>"
+ "</GetPersonResponse>";
Document xml = context.getTypeConverter().convertTo(Document.class, s);
exchange.getMessage().setBody(xml);
}
});
}
};
}
}
| CxfJavaOnlyPayloadModeTest |
java | spring-projects__spring-framework | spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/NumberTestBean.java | {
"start": 802,
"end": 2561
} | class ____ {
private short short1;
private Short short2;
private int int1;
private Integer int2;
private long long1;
private Long long2;
private BigInteger bigInteger;
private float float1;
private Float float2;
private double double1;
private Double double2;
private BigDecimal bigDecimal;
public short getShort1() {
return short1;
}
public void setShort1(short short1) {
this.short1 = short1;
}
public Short getShort2() {
return short2;
}
public void setShort2(Short short2) {
this.short2 = short2;
}
public int getInt1() {
return int1;
}
public void setInt1(int int1) {
this.int1 = int1;
}
public Integer getInt2() {
return int2;
}
public void setInt2(Integer int2) {
this.int2 = int2;
}
public long getLong1() {
return long1;
}
public void setLong1(long long1) {
this.long1 = long1;
}
public Long getLong2() {
return long2;
}
public void setLong2(Long long2) {
this.long2 = long2;
}
public BigInteger getBigInteger() {
return bigInteger;
}
public void setBigInteger(BigInteger bigInteger) {
this.bigInteger = bigInteger;
}
public float getFloat1() {
return float1;
}
public void setFloat1(float float1) {
this.float1 = float1;
}
public Float getFloat2() {
return float2;
}
public void setFloat2(Float float2) {
this.float2 = float2;
}
public double getDouble1() {
return double1;
}
public void setDouble1(double double1) {
this.double1 = double1;
}
public Double getDouble2() {
return double2;
}
public void setDouble2(Double double2) {
this.double2 = double2;
}
public BigDecimal getBigDecimal() {
return bigDecimal;
}
public void setBigDecimal(BigDecimal bigDecimal) {
this.bigDecimal = bigDecimal;
}
}
| NumberTestBean |
java | hibernate__hibernate-orm | local-build-plugins/src/main/java/org/hibernate/orm/properties/AsciiDocWriter.java | {
"start": 448,
"end": 4944
} | class ____ {
public static void writeToFile(
String anchorNameBase,
Map<SettingsDocSection, SortedSet<SettingDescriptor>> settingDescriptorMap,
RegularFile outputFile,
Project project) {
final File outputFileAsFile = outputFile.getAsFile();
try {
Files.createDirectories( outputFileAsFile.getParentFile().toPath() );
}
catch (IOException e) {
throw new RuntimeException( "Unable to prepare output directory for writing", e );
}
try ( FileWriter fileWriter = new FileWriter( outputFileAsFile ) ) {
write( anchorNameBase, settingDescriptorMap, fileWriter, project );
}
catch (IOException e) {
throw new RuntimeException( "Failed to produce asciidoc output for collected properties", e );
}
}
private static void write(
String anchorNameBase,
Map<SettingsDocSection, SortedSet<SettingDescriptor>> settingDescriptorMap,
FileWriter writer,
Project project) throws IOException {
for ( Map.Entry<SettingsDocSection, SortedSet<SettingDescriptor>> entry : settingDescriptorMap.entrySet() ) {
final SettingsDocSection sectionDescriptor = entry.getKey();
final SortedSet<SettingDescriptor> sectionSettingDescriptors = entry.getValue();
if ( sectionSettingDescriptors.isEmpty() ) {
continue;
}
final String sectionName = sectionDescriptor.getName();
// write an anchor in the form `[[{anchorNameBase}-{sectionName}]]`
tryToWriteLine( writer, "[[", anchorNameBase, "-", sectionName, "]]" );
tryToWriteLine( writer, "=== ", sectionDescriptor.getSummary() );
writer.write( '\n' );
for ( SettingDescriptor settingDescriptor : sectionSettingDescriptors ) {
// write an anchor in the form `[[{anchorNameBase}-{settingName}]]`
tryToWriteLine( writer, "[[", anchorNameBase, "-", settingDescriptor.getName(), "]]" );
tryToWriteLine( writer, "==== ", settingName( settingDescriptor ) );
writeMetadata( settingDescriptor, writer );
writer.write( settingDescriptor.getComment() );
writer.write( "\n\n'''\n" );
}
writer.write( '\n' );
}
}
private static void writeMetadata(SettingDescriptor settingDescriptor, FileWriter writer) throws IOException {
if ( !settingDescriptor.hasMetadata() ) {
return;
}
writer.write( "****\n" );
writer.write(
String.format(
Locale.ROOT,
"**See:** %s[%s.%s]\n\n",
settingDescriptor.getPublishedJavadocLink(),
Utils.withoutPackagePrefix( settingDescriptor.getSettingsClassName() ),
settingDescriptor.getSettingFieldName()
)
);
// NOTE : Asciidoctor requires that italic always be the innermost formatting
final SettingDescriptor.LifecycleDetails lifecycleDetails = settingDescriptor.getLifecycleDetails();
// NOTE : at the moment, there is at least one setting that is incubating AND deprecated which fundamentally seems wrong
if ( lifecycleDetails.isIncubating() ) {
writer.write( "NOTE: *_This setting is considered incubating_*\n\n" );
}
if ( lifecycleDetails.isDeprecated() ) {
writer.write( "WARNING: *_This setting is considered deprecated_*\n\n" );
}
if ( settingDescriptor.isUnsafe() ) {
writer.write( "WARNING: *_This setting is considered unsafe_*\n\n" );
}
if ( settingDescriptor.isCompatibility() ) {
writer.write( "INFO: *_This setting manages a certain backwards compatibility_*\n\n" );
}
if ( lifecycleDetails.getSince() != null ) {
writer.write( "*_Since:_* _" + lifecycleDetails.getSince() + "_\n\n" );
}
if ( settingDescriptor.getDefaultValue() != null ) {
writer.write( "*_Default Value:_* " + settingDescriptor.getDefaultValue() + "\n\n" );
}
if ( settingDescriptor.getApiNote() != null ) {
writer.write( settingDescriptor.getApiNote() + "\n\n" );
}
writer.write( "****\n\n" );
}
private static String settingName(SettingDescriptor settingDescriptor) {
if ( settingDescriptor.getLifecycleDetails().isDeprecated() ) {
return String.format(
Locale.ROOT,
"`[.line-through]#%s#`",
settingDescriptor.getName()
);
}
else {
return String.format(
Locale.ROOT,
"`%s`",
settingDescriptor.getName()
);
}
}
private static void tryToWriteLine(Writer writer, String prefix, String value, String... other) {
try {
writer.write( prefix );
writer.write( value );
for ( String s : other ) {
writer.write( s );
}
writer.write( "\n" );
}
catch (IOException e) {
throw new RuntimeException( "Unable to create asciidoc output", e );
}
}
}
| AsciiDocWriter |
java | google__gson | gson/src/test/java/com/google/gson/VersionExclusionStrategyTest.java | {
"start": 995,
"end": 3541
} | class ____ {
private static final double VERSION = 5.0D;
private static void assertIncludesClass(Excluder excluder, Class<?> c) {
assertThat(excluder.excludeClass(c, true)).isFalse();
assertThat(excluder.excludeClass(c, false)).isFalse();
}
private static void assertExcludesClass(Excluder excluder, Class<?> c) {
assertThat(excluder.excludeClass(c, true)).isTrue();
assertThat(excluder.excludeClass(c, false)).isTrue();
}
private static void assertIncludesField(Excluder excluder, Field f) {
assertThat(excluder.excludeField(f, true)).isFalse();
assertThat(excluder.excludeField(f, false)).isFalse();
}
private static void assertExcludesField(Excluder excluder, Field f) {
assertThat(excluder.excludeField(f, true)).isTrue();
assertThat(excluder.excludeField(f, false)).isTrue();
}
@Test
public void testSameVersion() throws Exception {
Excluder excluder = Excluder.DEFAULT.withVersion(VERSION);
assertIncludesClass(excluder, MockClassSince.class);
assertIncludesField(excluder, MockClassSince.class.getField("someField"));
// Until version is exclusive
assertExcludesClass(excluder, MockClassUntil.class);
assertExcludesField(excluder, MockClassUntil.class.getField("someField"));
assertIncludesClass(excluder, MockClassBoth.class);
assertIncludesField(excluder, MockClassBoth.class.getField("someField"));
}
@Test
public void testNewerVersion() throws Exception {
Excluder excluder = Excluder.DEFAULT.withVersion(VERSION + 5);
assertIncludesClass(excluder, MockClassSince.class);
assertIncludesField(excluder, MockClassSince.class.getField("someField"));
assertExcludesClass(excluder, MockClassUntil.class);
assertExcludesField(excluder, MockClassUntil.class.getField("someField"));
assertExcludesClass(excluder, MockClassBoth.class);
assertExcludesField(excluder, MockClassBoth.class.getField("someField"));
}
@Test
public void testOlderVersion() throws Exception {
Excluder excluder = Excluder.DEFAULT.withVersion(VERSION - 5);
assertExcludesClass(excluder, MockClassSince.class);
assertExcludesField(excluder, MockClassSince.class.getField("someField"));
assertIncludesClass(excluder, MockClassUntil.class);
assertIncludesField(excluder, MockClassUntil.class.getField("someField"));
assertExcludesClass(excluder, MockClassBoth.class);
assertExcludesField(excluder, MockClassBoth.class.getField("someField"));
}
@Since(VERSION)
private static | VersionExclusionStrategyTest |
java | google__guava | android/guava-tests/test/com/google/common/eventbus/SubscriberRegistryTest.java | {
"start": 4849,
"end": 4949
} | class ____ {
@Subscribe
public void handle(Integer i) {}
}
public static | IntegerSubscriber |
java | apache__camel | core/camel-main/src/main/java/org/apache/camel/main/MainListenerSupport.java | {
"start": 856,
"end": 937
} | class ____ {@link org.apache.camel.main.MainListener} implementations.
*/
public | for |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStPathContainer.java | {
"start": 1046,
"end": 6130
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(ForStResourceContainer.class);
public static final String DB_DIR_STRING = "db";
/**
* Local job path. This indicates the parent directory of ForSt, which ends with the Flink
* JobID.
*/
@Nullable private final Path localJobPath;
/**
* Local base path. This includes the information of the subtask that holds ForSt, such as the
* Operator Identifier and subtask index.
*/
@Nullable private final Path localBasePath;
/** Local ForSt path. This is the directory of ForSt DB, which ends with 'db'. */
@Nullable private final Path localForStPath;
/**
* Remote paths of ForSt. Similar to the respective Path mentioned above, but located under the
* remote parent path.
*/
@Nullable private final Path remoteJobPath;
@Nullable private final Path remoteBasePath;
@Nullable private final Path remoteForStPath;
public static ForStPathContainer empty() {
return of(null, null, null, null);
}
public static ForStPathContainer ofLocal(
@Nullable Path localJobPath, @Nullable Path localBasePath) {
return new ForStPathContainer(localJobPath, localBasePath, null, null);
}
public static ForStPathContainer of(
@Nullable Path localJobPath,
@Nullable Path localBasePath,
@Nullable Path remoteJobPath,
@Nullable Path remoteBasePath) {
return new ForStPathContainer(localJobPath, localBasePath, remoteJobPath, remoteBasePath);
}
public ForStPathContainer(
@Nullable Path localJobPath,
@Nullable Path localBasePath,
@Nullable Path remoteJobPath,
@Nullable Path remoteBasePath) {
this.localJobPath = localJobPath;
this.localBasePath = localBasePath;
this.localForStPath = localBasePath != null ? new Path(localBasePath, DB_DIR_STRING) : null;
this.remoteJobPath = remoteJobPath;
this.remoteBasePath = remoteBasePath;
this.remoteForStPath =
remoteBasePath != null ? new Path(remoteBasePath, DB_DIR_STRING) : null;
LOG.info(
"ForStPathContainer: localJobPath: {}, localBasePath: {}, localForStPath:{}, remoteJobPath: {}, remoteBasePath: {}, remoteForStPath: {}",
localJobPath,
localBasePath,
localForStPath,
remoteJobPath,
remoteBasePath,
remoteForStPath);
}
public @Nullable Path getLocalJobPath() {
return localJobPath;
}
public @Nullable Path getLocalBasePath() {
return localBasePath;
}
public @Nullable Path getLocalForStPath() {
return localForStPath;
}
public @Nullable Path getRemoteJobPath() {
return remoteJobPath;
}
public @Nullable Path getRemoteBasePath() {
return remoteBasePath;
}
public @Nullable Path getRemoteForStPath() {
return remoteForStPath;
}
public Path getJobPath() {
if (remoteJobPath != null) {
return remoteJobPath;
} else {
return localJobPath;
}
}
public Path getBasePath() {
if (remoteBasePath != null) {
return remoteBasePath;
} else {
return localBasePath;
}
}
public Path getDbPath() {
if (remoteForStPath != null) {
return remoteForStPath;
} else {
return localForStPath;
}
}
@Override
public String toString() {
return "ForStPathContainer(localJobPath = ["
+ localJobPath
+ "] localBasePath = ["
+ localBasePath
+ "] localForStPath = ["
+ localForStPath
+ "] remoteJobPath = ["
+ remoteJobPath
+ "] remoteBasePath = ["
+ remoteBasePath
+ "] remoteForStPath = ["
+ remoteForStPath
+ "])";
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ForStPathContainer that = (ForStPathContainer) o;
return Objects.equals(localJobPath, that.localJobPath)
&& Objects.equals(localBasePath, that.localBasePath)
&& Objects.equals(localForStPath, that.localForStPath)
&& Objects.equals(remoteJobPath, that.remoteJobPath)
&& Objects.equals(remoteBasePath, that.remoteBasePath)
&& Objects.equals(remoteForStPath, that.remoteForStPath);
}
@Override
public int hashCode() {
return Objects.hash(
localJobPath,
localBasePath,
localForStPath,
remoteJobPath,
remoteBasePath,
remoteForStPath);
}
}
| ForStPathContainer |
java | elastic__elasticsearch | x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/generator/command/pipe/DropGenerator.java | {
"start": 837,
"end": 3835
} | class ____ implements CommandGenerator {
public static final String DROP = "drop";
public static final String DROPPED_COLUMNS = "dropped_columns";
public static final CommandGenerator INSTANCE = new DropGenerator();
@Override
public CommandDescription generate(
List<CommandDescription> previousCommands,
List<Column> previousOutput,
QuerySchema schema,
QueryExecutor executor
) {
if (previousOutput.size() < 2) {
return CommandGenerator.EMPTY_DESCRIPTION; // don't drop all of them, just do nothing
}
Set<String> droppedColumns = new HashSet<>();
int n = randomIntBetween(1, previousOutput.size() - 1);
Set<String> proj = new HashSet<>();
for (int i = 0; i < n; i++) {
String name = EsqlQueryGenerator.randomRawName(previousOutput);
if (name == null) {
continue;
}
if (name.length() > 1 && name.startsWith("`") == false && randomIntBetween(0, 100) < 10) {
if (randomBoolean()) {
name = name.substring(0, randomIntBetween(1, name.length() - 1)) + "*";
} else {
name = "*" + name.substring(randomIntBetween(1, name.length() - 1));
}
} else if (name.startsWith("`") == false && (randomBoolean() || name.isEmpty())) {
name = "`" + name + "`";
}
proj.add(name);
droppedColumns.add(EsqlQueryGenerator.unquote(name));
}
if (proj.isEmpty()) {
return CommandGenerator.EMPTY_DESCRIPTION;
}
String cmdString = " | drop " + proj.stream().collect(Collectors.joining(", "));
return new CommandDescription(DROP, this, cmdString, Map.ofEntries(Map.entry(DROPPED_COLUMNS, droppedColumns)));
}
@Override
@SuppressWarnings("unchecked")
public ValidationResult validateOutput(
List<CommandDescription> previousCommands,
CommandDescription commandDescription,
List<Column> previousColumns,
List<List<Object>> previousOutput,
List<Column> columns,
List<List<Object>> output
) {
if (commandDescription == EMPTY_DESCRIPTION) {
return VALIDATION_OK;
}
Set<String> droppedColumns = (Set<String>) commandDescription.context().get(DROPPED_COLUMNS);
List<String> resultColNames = columns.stream().map(Column::name).toList();
// expected column names are unquoted already
for (String droppedColumn : droppedColumns) {
if (resultColNames.contains(droppedColumn)) {
return new ValidationResult(false, "Column [" + droppedColumn + "] was not dropped");
}
}
// TODO awaits fix https://github.com/elastic/elasticsearch/issues/120272
// return CommandGenerator.expectSameRowCount(previousOutput, output);
return VALIDATION_OK;
}
}
| DropGenerator |
java | quarkusio__quarkus | extensions/funqy/funqy-knative-events/deployment/src/test/java/io/quarkus/funqy/test/Overloading.java | {
"start": 70,
"end": 273
} | class ____ {
@Funq("intfun")
public int function(int i) {
return i * 2;
}
@Funq("strfun")
public String function(String s) {
return s.toUpperCase();
}
}
| Overloading |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/matchers/JUnitMatchers.java | {
"start": 3230,
"end": 5198
} | class ____ {
public static final String JUNIT4_TEST_ANNOTATION = "org.junit.Test";
public static final String JUNIT4_THEORY_ANNOTATION = "org.junit.experimental.theories.Theory";
public static final String JUNIT_BEFORE_ANNOTATION = "org.junit.Before";
public static final String JUNIT_AFTER_ANNOTATION = "org.junit.After";
public static final String JUNIT_BEFORE_CLASS_ANNOTATION = "org.junit.BeforeClass";
public static final String JUNIT_AFTER_CLASS_ANNOTATION = "org.junit.AfterClass";
public static final String JUNIT4_RUN_WITH_ANNOTATION = "org.junit.runner.RunWith";
private static final String JUNIT3_TEST_CASE_CLASS = "junit.framework.TestCase";
private static final String JUNIT4_IGNORE_ANNOTATION = "org.junit.Ignore";
/**
* Checks if a method, or any overridden method, is annotated with any annotation from the
* org.junit package.
*/
public static boolean hasJUnitAnnotation(MethodTree tree, VisitorState state) {
MethodSymbol methodSym = getSymbol(tree);
if (methodSym == null) {
return false;
}
if (hasJUnitAttr(methodSym)) {
return true;
}
return streamSuperMethods(methodSym, state.getTypes()).anyMatch(JUnitMatchers::hasJUnitAttr);
}
/** Checks if a method symbol has any attribute from the org.junit package. */
private static boolean hasJUnitAttr(MethodSymbol methodSym) {
return methodSym.getRawAttributes().stream()
.anyMatch(attr -> attr.type.tsym.getQualifiedName().toString().startsWith("org.junit."));
}
public static final Matcher<MethodTree> hasJUnit4BeforeAnnotations =
anyOf(
hasAnnotationOnAnyOverriddenMethod(JUNIT_BEFORE_ANNOTATION),
hasAnnotation(JUNIT_BEFORE_CLASS_ANNOTATION));
public static final Matcher<MethodTree> hasJUnit4AfterAnnotations =
anyOf(
hasAnnotationOnAnyOverriddenMethod(JUNIT_AFTER_ANNOTATION),
hasAnnotation(JUNIT_AFTER_CLASS_ANNOTATION));
/** Matches a | JUnitMatchers |
java | google__guava | guava-tests/benchmark/com/google/common/collect/ImmutableSetHashFloodingDetectionBenchmark.java | {
"start": 1726,
"end": 6989
} | enum ____ {
EXHAUSTIVE {
int maxRunBeforeFallback(int tableSize) {
return 12 * IntMath.log2(tableSize, RoundingMode.UNNECESSARY);
}
@Override
boolean hashFloodingDetected(Object[] hashTable) {
int maxRunBeforeFallback = maxRunBeforeFallback(hashTable.length);
// Test for a run wrapping around the end of the table, then check for runs in the middle.
int endOfStartRun;
for (endOfStartRun = 0; endOfStartRun < hashTable.length; ) {
if (hashTable[endOfStartRun] == null) {
break;
}
endOfStartRun++;
if (endOfStartRun > maxRunBeforeFallback) {
return true;
}
}
int startOfEndRun;
for (startOfEndRun = hashTable.length - 1; startOfEndRun > endOfStartRun; startOfEndRun--) {
if (hashTable[startOfEndRun] == null) {
break;
}
if (endOfStartRun + (hashTable.length - 1 - startOfEndRun) > maxRunBeforeFallback) {
return true;
}
}
for (int i = endOfStartRun + 1; i < startOfEndRun; i++) {
for (int runLength = 0; i < startOfEndRun && hashTable[i] != null; i++) {
runLength++;
if (runLength > maxRunBeforeFallback) {
return true;
}
}
}
return false;
}
},
SEPARATE_RANGES {
int maxRunBeforeFallback(int tableSize) {
return 13 * IntMath.log2(tableSize, RoundingMode.UNNECESSARY);
}
@Override
boolean hashFloodingDetected(Object[] hashTable) {
int maxRunBeforeFallback = maxRunBeforeFallback(hashTable.length);
// Test for a run wrapping around the end of the table, then check for runs in the middle.
int endOfStartRun;
for (endOfStartRun = 0; endOfStartRun < hashTable.length; ) {
if (hashTable[endOfStartRun] == null) {
break;
}
endOfStartRun++;
if (endOfStartRun > maxRunBeforeFallback) {
return true;
}
}
int startOfEndRun;
for (startOfEndRun = hashTable.length - 1; startOfEndRun > endOfStartRun; startOfEndRun--) {
if (hashTable[startOfEndRun] == null) {
break;
}
if (endOfStartRun + (hashTable.length - 1 - startOfEndRun) > maxRunBeforeFallback) {
return true;
}
}
// If this part returns true, there is definitely a run of size maxRunBeforeFallback/2.
// If this part returns false, there are definitely no runs of size >= maxRunBeforeFallback.
int testBlockSize = maxRunBeforeFallback / 2;
for (int i = endOfStartRun + 1; i + testBlockSize <= startOfEndRun; i += testBlockSize) {
boolean runGood = false;
for (int j = 0; j < testBlockSize; j++) {
if (hashTable[i + j] == null) {
runGood = true;
break;
}
}
if (!runGood) {
return true;
}
}
return false;
}
},
SKIPPING {
int maxRunBeforeFallback(int tableSize) {
return 13 * IntMath.log2(tableSize, RoundingMode.UNNECESSARY);
}
@Override
boolean hashFloodingDetected(Object[] hashTable) {
int maxRunBeforeFallback = maxRunBeforeFallback(hashTable.length);
int mask = hashTable.length - 1;
// Invariant: all elements at indices in [knownRunStart, knownRunEnd) are nonnull.
// If knownRunStart == knownRunEnd, this is vacuously true.
// When knownRunEnd exceeds hashTable.length, it "wraps", detecting runs around the end
// of the table.
int knownRunStart = 0;
int knownRunEnd = 0;
outerLoop:
while (knownRunStart < hashTable.length) {
if (knownRunStart == knownRunEnd && hashTable[knownRunStart] == null) {
if (hashTable[(knownRunStart + maxRunBeforeFallback - 1) & mask] == null) {
// There are only maxRunBeforeFallback - 1 elements between here and there,
// so even if they were all nonnull, we wouldn't detect a hash flood. Therefore,
// we can skip them all.
knownRunStart += maxRunBeforeFallback;
} else {
knownRunStart++; // the only case in which maxRunEnd doesn't increase by mRBF
// happens about f * (1-f) for f = DESIRED_LOAD_FACTOR, so around 21% of the time
}
knownRunEnd = knownRunStart;
} else {
for (int j = knownRunStart + maxRunBeforeFallback - 1; j >= knownRunEnd; j--) {
if (hashTable[j & mask] == null) {
knownRunEnd = knownRunStart + maxRunBeforeFallback;
knownRunStart = j + 1;
continue outerLoop;
}
}
return true;
}
}
return false;
}
};
abstract boolean hashFloodingDetected(Object[] array);
}
@Benchmark
public int detect(int reps) {
int count = 0;
for (int i = 0; i < reps; i++) {
if (impl.hashFloodingDetected(tables[i & 0xFF])) {
count++;
}
}
return count;
}
}
| Impl |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/service/annotation/HttpExchange.java | {
"start": 1398,
"end": 5662
} | interface ____ be passed to
* {@link org.springframework.web.service.invoker.HttpServiceProxyFactory}
* to create a client proxy. It can also be implemented by an
* {@link org.springframework.stereotype.Controller @Controller} for server
* handling. For more details in comparison to {@code @RequestMapping}, see the
* <a href="https://docs.spring.io/spring-framework/reference/web/webmvc/mvc-controller/ann-requestmapping.html#mvc-ann-httpexchange-annotation">reference docs</a>.
*
* <p>Supported at the type level to express common attributes, to be inherited
* by all methods, such as a base URL path. At the method level, it's more common
* to use one of the following HTTP method specific, shortcut annotations, each
* of which is itself <em>meta-annotated</em> with {@code HttpExchange}:
*
* <ul>
* <li>{@link GetExchange}
* <li>{@link PostExchange}
* <li>{@link PutExchange}
* <li>{@link PatchExchange}
* <li>{@link DeleteExchange}
* </ul>
*
* <p>Supported method arguments:
* <table border="1">
* <tr>
* <th>Method Argument</th>
* <th>Description</th>
* <th>Resolver</th>
* </tr>
* <tr>
* <td>{@link java.net.URI URI}</td>
* <td>Dynamically set the URL for the request, overriding the annotation's
* {@link #url()} attribute</td>
* <td>{@link org.springframework.web.service.invoker.UrlArgumentResolver}</td>
* </tr>
* <tr>
* <td>{@link UriBuilderFactory}</td>
* <td>Dynamically set the {@code base URI} for the request, overriding the
* one from the annotation's {@link #url()} attribute, while keeping the
* subsequent path segments as defined there</td>
* <td>{@link org.springframework.web.service.invoker.UriBuilderFactoryArgumentResolver}</td>
* </tr>
* <tr>
* <td>{@link org.springframework.http.HttpMethod HttpMethod}</td>
* <td>Dynamically set the HTTP method for the request, overriding the annotation's
* {@link #method()} attribute</td>
* <td>{@link org.springframework.web.service.invoker.HttpMethodArgumentResolver
* HttpMethodArgumentResolver}</td>
* </tr>
* <tr>
* <td>{@link org.springframework.web.bind.annotation.RequestHeader @RequestHeader}</td>
* <td>Add a request header</td>
* <td>{@link org.springframework.web.service.invoker.RequestHeaderArgumentResolver
* RequestHeaderArgumentResolver}</td>
* </tr>
* <tr>
* <td>{@link org.springframework.web.bind.annotation.PathVariable @PathVariable}</td>
* <td>Add a path variable for the URI template</td>
* <td>{@link org.springframework.web.service.invoker.PathVariableArgumentResolver
* PathVariableArgumentResolver}</td>
* </tr>
* <tr>
* <td>{@link org.springframework.web.bind.annotation.RequestBody @RequestBody}</td>
* <td>Set the body of the request</td>
* <td>{@link org.springframework.web.service.invoker.RequestBodyArgumentResolver
* RequestBodyArgumentResolver}</td>
* </tr>
* <tr>
* <td>{@link org.springframework.web.bind.annotation.RequestParam @RequestParam}</td>
* <td>Add a request parameter, either form data if {@code "Content-Type"} is
* {@code "application/x-www-form-urlencoded"} or query params otherwise</td>
* <td>{@link org.springframework.web.service.invoker.RequestParamArgumentResolver
* RequestParamArgumentResolver}</td>
* </tr>
* <tr>
* <td>{@link org.springframework.web.bind.annotation.RequestPart @RequestPart}</td>
* <td>Add a request part, which may be a String (form field),
* {@link org.springframework.core.io.Resource} (file part), Object (entity to be
* encoded, for example, as JSON), {@link HttpEntity} (part content and headers), a
* {@link org.springframework.http.codec.multipart.Part}, or a
* {@link org.reactivestreams.Publisher} of any of the above.
* (</td>
* <td>{@link org.springframework.web.service.invoker.RequestPartArgumentResolver
* RequestPartArgumentResolver}</td>
* </tr>
* <tr>
* <td>{@link org.springframework.web.bind.annotation.CookieValue @CookieValue}</td>
* <td>Add a cookie</td>
* <td>{@link org.springframework.web.service.invoker.CookieValueArgumentResolver
* CookieValueArgumentResolver}</td>
* </tr>
* </table>
*
* @author Rossen Stoyanchev
* @since 6.0
*/
@Target({ElementType.TYPE, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Mapping
@Reflective(HttpExchangeReflectiveProcessor.class)
public @ | can |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/hhh14276/entity/PlayerStatId.java | {
"start": 330,
"end": 905
} | class
____ ScoreId score;
public PlayerStatId() {
}
public Integer getGameId() {
return score.getGameId();
}
public void setGameId(Integer gameId) {
score.setGameId( gameId );
}
public Boolean getHome() {
return score.getHome();
}
public void setHome(Boolean home) {
score.setHome( home );
}
public Integer getPlayerId() {
return playerId;
}
public void setPlayerId(Integer playerId) {
this.playerId = playerId;
}
public ScoreId getScoreId() {
return score;
}
public void setScoreId(ScoreId scoreId) {
this.score = scoreId;
}
}
| private |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/suite/engine/SuiteLauncherDiscoveryRequestBuilderTests.java | {
"start": 23036,
"end": 23600
} | class ____ {
}
// @formatter:off
var configuration = new ParentConfigurationParameters("parent", "parent parameters were used");
var request = builder.applyConfigurationParametersFromSuite(Suite.class)
.parentConfigurationParameters(configuration)
.build();
// @formatter:on
var configurationParameters = request.getConfigurationParameters();
assertEquals(Optional.of("parent parameters were used"), configurationParameters.get("parent"));
}
@Test
void disableParentConfigurationParameters() {
@DisableParentConfigurationParameters
| Suite |
java | quarkusio__quarkus | integration-tests/injectmock/src/test/java/io/quarkus/it/mockbean/GreetingSingletonResourceTest.java | {
"start": 508,
"end": 2377
} | class ____ {
@InjectMock
@MockitoConfig(convertScopes = true)
MessageServiceSingleton messageService;
@InjectMock
@MockitoConfig(convertScopes = true)
SuffixServiceSingleton suffixService;
@InjectMock
@MockitoConfig(convertScopes = true)
CapitalizerServiceSingleton capitalizerService;
@Test
public void testGreet() {
Mockito.when(messageService.getMessage()).thenReturn("hi");
Mockito.when(suffixService.getSuffix()).thenReturn("!");
mockCapitalizerService();
given()
.when().get("/greetingSingleton")
.then()
.statusCode(200)
.body(is("hi!"));
}
@Test
public void testGreetAgain() {
Mockito.when(messageService.getMessage()).thenReturn("yolo");
Mockito.when(suffixService.getSuffix()).thenReturn("!!!");
mockCapitalizerService();
given()
.when().get("/greetingSingleton")
.then()
.statusCode(200)
.body(is("yolo!!!"));
}
@Test
public void testMocksNotSet() {
// when mocks are not configured, they return the Mockito default response
Assertions.assertNull(messageService.getMessage());
Assertions.assertNull(suffixService.getSuffix());
given()
.when().get("/greetingSingleton")
.then()
.statusCode(204);
}
private void mockCapitalizerService() {
Mockito.doAnswer(new Answer() { // don't upper case the string, leave it as it is
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
return invocationOnMock.getArgument(0);
}
}).when(capitalizerService).capitalize(anyString());
}
}
| GreetingSingletonResourceTest |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/web/server/OAuth2ResourceServerSpecTests.java | {
"start": 38315,
"end": 39067
} | class ____ {
private MockWebServer mockWebServer = new MockWebServer();
@Bean
SecurityWebFilterChain springSecurity(ServerHttpSecurity http) {
String introspectionUri = mockWebServer().url("/introspect").toString();
// @formatter:off
http
.oauth2ResourceServer((server) -> server
.opaqueToken((opaqueToken) -> opaqueToken
.introspectionUri(introspectionUri)
.introspectionClientCredentials("client", "secret")));
// @formatter:on
return http.build();
}
@Bean
MockWebServer mockWebServer() {
return this.mockWebServer;
}
@PreDestroy
void shutdown() throws IOException {
this.mockWebServer.shutdown();
}
}
@Configuration
@EnableWebFlux
@EnableWebFluxSecurity
static | IntrospectionConfig |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/stereotypes/TransitiveStereotypeTest.java | {
"start": 2958,
"end": 3090
} | interface ____ {
}
@MyStereotype
@Stereotype
@Target({ TYPE, METHOD, FIELD })
@Retention(RUNTIME)
@ | MyStereotype |
java | apache__kafka | storage/src/main/java/org/apache/kafka/storage/internals/utils/Throttler.java | {
"start": 1097,
"end": 1164
} | class ____ measure and throttle the rate of some process.
*/
public | to |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/target/CommonsPool2TargetSource.java | {
"start": 2561,
"end": 8982
} | class ____ extends AbstractPoolingTargetSource implements PooledObjectFactory<Object> {
private int maxIdle = GenericObjectPoolConfig.DEFAULT_MAX_IDLE;
private int minIdle = GenericObjectPoolConfig.DEFAULT_MIN_IDLE;
private long maxWait = GenericObjectPoolConfig.DEFAULT_MAX_WAIT_MILLIS;
private long timeBetweenEvictionRunsMillis = GenericObjectPoolConfig.DEFAULT_TIME_BETWEEN_EVICTION_RUNS_MILLIS;
private long minEvictableIdleTimeMillis = GenericObjectPoolConfig.DEFAULT_MIN_EVICTABLE_IDLE_TIME_MILLIS;
private boolean blockWhenExhausted = GenericObjectPoolConfig.DEFAULT_BLOCK_WHEN_EXHAUSTED;
/**
* The Apache Commons {@code ObjectPool} used to pool target objects.
*/
private @Nullable ObjectPool pool;
/**
* Create a CommonsPoolTargetSource with default settings.
* Default maximum size of the pool is 8.
* @see #setMaxSize
* @see GenericObjectPoolConfig#setMaxTotal
*/
public CommonsPool2TargetSource() {
setMaxSize(GenericObjectPoolConfig.DEFAULT_MAX_TOTAL);
}
/**
* Set the maximum number of idle objects in the pool.
* Default is 8.
* @see GenericObjectPool#setMaxIdle
*/
public void setMaxIdle(int maxIdle) {
this.maxIdle = maxIdle;
}
/**
* Return the maximum number of idle objects in the pool.
*/
public int getMaxIdle() {
return this.maxIdle;
}
/**
* Set the minimum number of idle objects in the pool.
* Default is 0.
* @see GenericObjectPool#setMinIdle
*/
public void setMinIdle(int minIdle) {
this.minIdle = minIdle;
}
/**
* Return the minimum number of idle objects in the pool.
*/
public int getMinIdle() {
return this.minIdle;
}
/**
* Set the maximum waiting time for fetching an object from the pool.
* Default is -1, waiting forever.
* @see GenericObjectPool#setMaxWaitMillis
*/
public void setMaxWait(long maxWait) {
this.maxWait = maxWait;
}
/**
* Return the maximum waiting time for fetching an object from the pool.
*/
public long getMaxWait() {
return this.maxWait;
}
/**
* Set the time between eviction runs that check idle objects whether
* they have been idle for too long or have become invalid.
* Default is -1, not performing any eviction.
* @see GenericObjectPool#setTimeBetweenEvictionRunsMillis
*/
public void setTimeBetweenEvictionRunsMillis(long timeBetweenEvictionRunsMillis) {
this.timeBetweenEvictionRunsMillis = timeBetweenEvictionRunsMillis;
}
/**
* Return the time between eviction runs that check idle objects.
*/
public long getTimeBetweenEvictionRunsMillis() {
return this.timeBetweenEvictionRunsMillis;
}
/**
* Set the minimum time that an idle object can sit in the pool before
* it becomes subject to eviction. Default is 1800000 (30 minutes).
* <p>Note that eviction runs need to be performed to take this
* setting into effect.
* @see #setTimeBetweenEvictionRunsMillis
* @see GenericObjectPool#setMinEvictableIdleTimeMillis
*/
public void setMinEvictableIdleTimeMillis(long minEvictableIdleTimeMillis) {
this.minEvictableIdleTimeMillis = minEvictableIdleTimeMillis;
}
/**
* Return the minimum time that an idle object can sit in the pool.
*/
public long getMinEvictableIdleTimeMillis() {
return this.minEvictableIdleTimeMillis;
}
/**
* Set whether the call should block when the pool is exhausted.
*/
public void setBlockWhenExhausted(boolean blockWhenExhausted) {
this.blockWhenExhausted = blockWhenExhausted;
}
/**
* Specify if the call should block when the pool is exhausted.
*/
public boolean isBlockWhenExhausted() {
return this.blockWhenExhausted;
}
/**
* Creates and holds an ObjectPool instance.
* @see #createObjectPool()
*/
@Override
protected final void createPool() {
logger.debug("Creating Commons object pool");
this.pool = createObjectPool();
}
/**
* Subclasses can override this if they want to return a specific Commons pool.
* They should apply any configuration properties to the pool here.
* <p>Default is a GenericObjectPool instance with the given pool size.
* @return an empty Commons {@code ObjectPool}.
* @see GenericObjectPool
* @see #setMaxSize
*/
protected ObjectPool createObjectPool() {
GenericObjectPoolConfig config = new GenericObjectPoolConfig();
config.setMaxTotal(getMaxSize());
config.setMaxIdle(getMaxIdle());
config.setMinIdle(getMinIdle());
config.setMaxWaitMillis(getMaxWait());
config.setTimeBetweenEvictionRunsMillis(getTimeBetweenEvictionRunsMillis());
config.setMinEvictableIdleTimeMillis(getMinEvictableIdleTimeMillis());
config.setBlockWhenExhausted(isBlockWhenExhausted());
return new GenericObjectPool(this, config);
}
/**
* Borrows an object from the {@code ObjectPool}.
*/
@Override
public Object getTarget() throws Exception {
Assert.state(this.pool != null, "No Commons ObjectPool available");
return this.pool.borrowObject();
}
/**
* Returns the specified object to the underlying {@code ObjectPool}.
*/
@Override
public void releaseTarget(Object target) throws Exception {
if (this.pool != null) {
this.pool.returnObject(target);
}
}
@Override
public int getActiveCount() throws UnsupportedOperationException {
return (this.pool != null ? this.pool.getNumActive() : 0);
}
@Override
public int getIdleCount() throws UnsupportedOperationException {
return (this.pool != null ? this.pool.getNumIdle() : 0);
}
/**
* Closes the underlying {@code ObjectPool} when destroying this object.
*/
@Override
public void destroy() throws Exception {
if (this.pool != null) {
logger.debug("Closing Commons ObjectPool");
this.pool.close();
}
}
//----------------------------------------------------------------------------
// Implementation of org.apache.commons.pool2.PooledObjectFactory interface
//----------------------------------------------------------------------------
@Override
public PooledObject<Object> makeObject() throws Exception {
return new DefaultPooledObject<>(newPrototypeInstance());
}
@Override
public void destroyObject(PooledObject<Object> p) throws Exception {
destroyPrototypeInstance(p.getObject());
}
@Override
public boolean validateObject(PooledObject<Object> p) {
return true;
}
@Override
public void activateObject(PooledObject<Object> p) throws Exception {
}
@Override
public void passivateObject(PooledObject<Object> p) throws Exception {
}
}
| CommonsPool2TargetSource |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/MatcherShouldHaveGroup.java | {
"start": 809,
"end": 1404
} | class ____ extends BasicErrorMessageFactory {
private MatcherShouldHaveGroup(Matcher matcher, Object groupIdentifier) {
super("%nExpecting %s to have group %s", matcher, groupIdentifier);
}
/**
* Indicates that the provided {@link Matcher} was expected to have a named or numbered group.
*
* @param matcher the actual {@link Matcher} to test.
* @return an error message factory.
*/
public static MatcherShouldHaveGroup shouldHaveGroup(Matcher matcher, Object groupIdentifier) {
return new MatcherShouldHaveGroup(matcher, groupIdentifier);
}
}
| MatcherShouldHaveGroup |
java | quarkusio__quarkus | extensions/devui/deployment/src/main/java/io/quarkus/devui/deployment/menu/ReadmeProcessor.java | {
"start": 568,
"end": 2020
} | class ____ {
private static final String NS = "devui-readme";
@BuildStep(onlyIf = IsLocalDevelopment.class)
void createReadmePage(BuildProducer<InternalPageBuildItem> internalPageProducer) {
String readme = getContents("README.md")
.orElse(getContents("readme.md")
.orElse(null));
if (readme != null) {
InternalPageBuildItem readmePage = new InternalPageBuildItem("Readme", 51);
readmePage.addBuildTimeData("readme", readme, "The current readme of this Quarkus Application.", "text/markdown");
readmePage.addPage(Page.webComponentPageBuilder()
.namespace(NS)
.title("Readme")
.icon("font-awesome-brands:readme")
.componentLink("qwc-readme.js"));
internalPageProducer.produce(readmePage);
}
}
@BuildStep(onlyIf = IsLocalDevelopment.class)
JsonRPCProvidersBuildItem createJsonRPCServiceForCache() {
return new JsonRPCProvidersBuildItem(NS, ReadmeJsonRPCService.class);
}
private Optional<String> getContents(String name) {
Path p = Path.of(name);
if (Files.exists(p)) {
try {
return Optional.of(Files.readString(p));
} catch (IOException ex) {
ex.printStackTrace();
}
}
return Optional.empty();
}
}
| ReadmeProcessor |
java | apache__camel | components/camel-kafka/src/test/java/org/apache/camel/component/kafka/integration/KafkaConsumerRebalanceIT.java | {
"start": 2849,
"end": 3733
} | class ____ implements StateRepository<String, String> {
private static final Logger LOG = LoggerFactory.getLogger(OffsetStateRepository.class);
final CountDownLatch messagesLatch;
public OffsetStateRepository(CountDownLatch messagesLatch) {
this.messagesLatch = messagesLatch;
}
@Override
public void start() {
}
@Override
public void stop() {
}
@Override
public String getState(String key) {
LOG.debug("Getting the state for {} from topic {}", key, TOPIC);
if (key.contains(TOPIC)) {
LOG.trace("Topic matches, counting down");
messagesLatch.countDown();
}
return "-1";
}
@Override
public void setState(String key, String value) {
}
}
}
| OffsetStateRepository |
java | spring-projects__spring-boot | test-support/spring-boot-docker-test-support/src/main/java/org/springframework/boot/testsupport/container/SymptomaActiveMQContainer.java | {
"start": 894,
"end": 1164
} | class ____ extends GenericContainer<SymptomaActiveMQContainer> {
private static final int DEFAULT_PORT = 61616;
public SymptomaActiveMQContainer(DockerImageName dockerImageName) {
super(dockerImageName);
addExposedPorts(DEFAULT_PORT);
}
}
| SymptomaActiveMQContainer |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/SystemUtils.java | {
"start": 34295,
"end": 34791
} | class ____ loaded.
* </p>
*/
public static final boolean IS_JAVA_1_2 = getJavaVersionMatches("1.2");
/**
* The constant {@code true} if this is Java version 1.3 (also 1.3.x versions).
* <p>
* The result depends on the value of the {@link #JAVA_SPECIFICATION_VERSION} constant.
* </p>
* <p>
* The field will return {@code false} if {@link #JAVA_SPECIFICATION_VERSION} is {@code null}.
* </p>
* <p>
* This value is initialized when the | is |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotShardStats.java | {
"start": 4492,
"end": 18844
} | class ____ implements Writeable, ToXContentObject {
private final String fileExt;
private final long numFiles;
private final ByteSizeValue totalSize;
private final ByteSizeValue minSize;
private final ByteSizeValue maxSize;
private final long openCount;
private final long closeCount;
private final Counter forwardSmallSeeks;
private final Counter backwardSmallSeeks;
private final Counter forwardLargeSeeks;
private final Counter backwardLargeSeeks;
private final Counter contiguousReads;
private final Counter nonContiguousReads;
private final Counter cachedBytesRead;
private final Counter indexCacheBytesRead;
private final TimedCounter cachedBytesWritten;
private final TimedCounter directBytesRead;
private final TimedCounter optimizedBytesRead;
private final Counter blobStoreBytesRequested;
private final Counter luceneBytesRead;
private final long currentIndexCacheFills;
public CacheIndexInputStats(
String fileExt,
long numFiles,
ByteSizeValue totalSize,
ByteSizeValue minSize,
ByteSizeValue maxSize,
long openCount,
long closeCount,
Counter forwardSmallSeeks,
Counter backwardSmallSeeks,
Counter forwardLargeSeeks,
Counter backwardLargeSeeks,
Counter contiguousReads,
Counter nonContiguousReads,
Counter cachedBytesRead,
Counter indexCacheBytesRead,
TimedCounter cachedBytesWritten,
TimedCounter directBytesRead,
TimedCounter optimizedBytesRead,
Counter blobStoreBytesRequested,
Counter luceneBytesRead,
long currentIndexCacheFills
) {
this.fileExt = fileExt;
this.numFiles = numFiles;
this.totalSize = totalSize;
this.minSize = minSize;
this.maxSize = maxSize;
this.openCount = openCount;
this.closeCount = closeCount;
this.forwardSmallSeeks = forwardSmallSeeks;
this.backwardSmallSeeks = backwardSmallSeeks;
this.forwardLargeSeeks = forwardLargeSeeks;
this.backwardLargeSeeks = backwardLargeSeeks;
this.contiguousReads = contiguousReads;
this.nonContiguousReads = nonContiguousReads;
this.cachedBytesRead = cachedBytesRead;
this.indexCacheBytesRead = indexCacheBytesRead;
this.cachedBytesWritten = cachedBytesWritten;
this.directBytesRead = directBytesRead;
this.optimizedBytesRead = optimizedBytesRead;
this.blobStoreBytesRequested = blobStoreBytesRequested;
this.luceneBytesRead = luceneBytesRead;
this.currentIndexCacheFills = currentIndexCacheFills;
}
CacheIndexInputStats(final StreamInput in) throws IOException {
this.fileExt = in.readString();
this.numFiles = in.readVLong();
this.totalSize = ByteSizeValue.readFrom(in);
this.minSize = ByteSizeValue.readFrom(in);
this.maxSize = ByteSizeValue.readFrom(in);
this.openCount = in.readVLong();
this.closeCount = in.readVLong();
this.forwardSmallSeeks = new Counter(in);
this.backwardSmallSeeks = new Counter(in);
this.forwardLargeSeeks = new Counter(in);
this.backwardLargeSeeks = new Counter(in);
this.contiguousReads = new Counter(in);
this.nonContiguousReads = new Counter(in);
this.cachedBytesRead = new Counter(in);
this.indexCacheBytesRead = new Counter(in);
this.cachedBytesWritten = new TimedCounter(in);
this.directBytesRead = new TimedCounter(in);
this.optimizedBytesRead = new TimedCounter(in);
this.blobStoreBytesRequested = new Counter(in);
this.luceneBytesRead = new Counter(in);
this.currentIndexCacheFills = in.readVLong();
}
public static CacheIndexInputStats combine(CacheIndexInputStats cis1, CacheIndexInputStats cis2) {
if (cis1.getFileExt().equals(cis2.getFileExt()) == false) {
assert false : "can only combine same file extensions";
throw new IllegalArgumentException(
"can only combine same file extensions but was " + cis1.fileExt + " and " + cis2.fileExt
);
}
return new CacheIndexInputStats(
cis1.fileExt,
cis1.numFiles + cis2.numFiles,
ByteSizeValue.ofBytes(Math.addExact(cis1.totalSize.getBytes(), cis2.totalSize.getBytes())),
ByteSizeValue.ofBytes(Math.min(cis1.minSize.getBytes(), cis2.minSize.getBytes())),
ByteSizeValue.ofBytes(Math.max(cis1.maxSize.getBytes(), cis2.maxSize.getBytes())),
cis1.openCount + cis2.openCount,
cis1.closeCount + cis2.closeCount,
cis1.forwardSmallSeeks.add(cis2.forwardSmallSeeks),
cis1.backwardSmallSeeks.add(cis2.backwardSmallSeeks),
cis1.forwardLargeSeeks.add(cis2.forwardLargeSeeks),
cis1.backwardLargeSeeks.add(cis2.backwardLargeSeeks),
cis1.contiguousReads.add(cis2.contiguousReads),
cis1.nonContiguousReads.add(cis2.nonContiguousReads),
cis1.cachedBytesRead.add(cis2.cachedBytesRead),
cis1.indexCacheBytesRead.add(cis2.indexCacheBytesRead),
cis1.cachedBytesWritten.add(cis2.cachedBytesWritten),
cis1.directBytesRead.add(cis2.directBytesRead),
cis1.optimizedBytesRead.add(cis2.optimizedBytesRead),
cis1.blobStoreBytesRequested.add(cis2.blobStoreBytesRequested),
cis1.luceneBytesRead.add(cis2.luceneBytesRead),
cis1.currentIndexCacheFills + cis2.currentIndexCacheFills
);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(fileExt);
out.writeVLong(numFiles);
totalSize.writeTo(out);
minSize.writeTo(out);
maxSize.writeTo(out);
out.writeVLong(openCount);
out.writeVLong(closeCount);
forwardSmallSeeks.writeTo(out);
backwardSmallSeeks.writeTo(out);
forwardLargeSeeks.writeTo(out);
backwardLargeSeeks.writeTo(out);
contiguousReads.writeTo(out);
nonContiguousReads.writeTo(out);
cachedBytesRead.writeTo(out);
indexCacheBytesRead.writeTo(out);
cachedBytesWritten.writeTo(out);
directBytesRead.writeTo(out);
optimizedBytesRead.writeTo(out);
blobStoreBytesRequested.writeTo(out);
luceneBytesRead.writeTo(out);
out.writeVLong(currentIndexCacheFills);
}
public String getFileExt() {
return fileExt;
}
public long getNumFiles() {
return numFiles;
}
public ByteSizeValue getTotalSize() {
return totalSize;
}
public ByteSizeValue getMinSize() {
return minSize;
}
public ByteSizeValue getMaxSize() {
return maxSize;
}
public ByteSizeValue getAverageSize() {
final double average = (double) totalSize.getBytes() / (double) numFiles;
return ByteSizeValue.ofBytes(Math.round(average));
}
public long getOpenCount() {
return openCount;
}
public long getCloseCount() {
return closeCount;
}
public Counter getForwardSmallSeeks() {
return forwardSmallSeeks;
}
public Counter getBackwardSmallSeeks() {
return backwardSmallSeeks;
}
public Counter getForwardLargeSeeks() {
return forwardLargeSeeks;
}
public Counter getBackwardLargeSeeks() {
return backwardLargeSeeks;
}
public Counter getContiguousReads() {
return contiguousReads;
}
public Counter getNonContiguousReads() {
return nonContiguousReads;
}
public Counter getCachedBytesRead() {
return cachedBytesRead;
}
public Counter getIndexCacheBytesRead() {
return indexCacheBytesRead;
}
public TimedCounter getCachedBytesWritten() {
return cachedBytesWritten;
}
public TimedCounter getDirectBytesRead() {
return directBytesRead;
}
public TimedCounter getOptimizedBytesRead() {
return optimizedBytesRead;
}
public Counter getBlobStoreBytesRequested() {
return blobStoreBytesRequested;
}
public Counter getLuceneBytesRead() {
return luceneBytesRead;
}
public long getCurrentIndexCacheFills() {
return currentIndexCacheFills;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
{
builder.field("file_ext", getFileExt());
builder.field("num_files", getNumFiles());
builder.field("open_count", getOpenCount());
builder.field("close_count", getCloseCount());
{
builder.startObject("size");
builder.humanReadableField("total_in_bytes", "total", getTotalSize());
builder.humanReadableField("min_in_bytes", "min", getMinSize());
builder.humanReadableField("max_in_bytes", "max", getMaxSize());
builder.humanReadableField("average_in_bytes", "average", getAverageSize());
builder.endObject();
}
builder.field("contiguous_bytes_read", getContiguousReads(), params);
builder.field("non_contiguous_bytes_read", getNonContiguousReads(), params);
builder.field("cached_bytes_read", getCachedBytesRead(), params);
builder.field("index_cache_bytes_read", getIndexCacheBytesRead(), params);
builder.field("cached_bytes_written", getCachedBytesWritten(), params);
builder.field("direct_bytes_read", getDirectBytesRead(), params);
builder.field("optimized_bytes_read", getOptimizedBytesRead(), params);
{
builder.startObject("forward_seeks");
builder.field("small", getForwardSmallSeeks(), params);
builder.field("large", getForwardLargeSeeks(), params);
builder.endObject();
}
{
builder.startObject("backward_seeks");
builder.field("small", getBackwardSmallSeeks(), params);
builder.field("large", getBackwardLargeSeeks(), params);
builder.endObject();
}
builder.field("blob_store_bytes_requested", getBlobStoreBytesRequested(), params);
builder.field("lucene_bytes_read", getLuceneBytesRead(), params);
builder.field("current_index_cache_fills", getCurrentIndexCacheFills());
}
return builder.endObject();
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
CacheIndexInputStats stats = (CacheIndexInputStats) other;
return numFiles == stats.numFiles
&& openCount == stats.openCount
&& closeCount == stats.closeCount
&& Objects.equals(fileExt, stats.fileExt)
&& Objects.equals(totalSize, stats.totalSize)
&& Objects.equals(minSize, stats.minSize)
&& Objects.equals(maxSize, stats.maxSize)
&& Objects.equals(forwardSmallSeeks, stats.forwardSmallSeeks)
&& Objects.equals(backwardSmallSeeks, stats.backwardSmallSeeks)
&& Objects.equals(forwardLargeSeeks, stats.forwardLargeSeeks)
&& Objects.equals(backwardLargeSeeks, stats.backwardLargeSeeks)
&& Objects.equals(contiguousReads, stats.contiguousReads)
&& Objects.equals(nonContiguousReads, stats.nonContiguousReads)
&& Objects.equals(cachedBytesRead, stats.cachedBytesRead)
&& Objects.equals(indexCacheBytesRead, stats.indexCacheBytesRead)
&& Objects.equals(cachedBytesWritten, stats.cachedBytesWritten)
&& Objects.equals(directBytesRead, stats.directBytesRead)
&& Objects.equals(optimizedBytesRead, stats.optimizedBytesRead)
&& Objects.equals(blobStoreBytesRequested, stats.blobStoreBytesRequested)
&& Objects.equals(luceneBytesRead, stats.luceneBytesRead)
&& currentIndexCacheFills == stats.currentIndexCacheFills;
}
@Override
public int hashCode() {
return Objects.hash(
fileExt,
numFiles,
totalSize,
minSize,
maxSize,
openCount,
closeCount,
forwardSmallSeeks,
backwardSmallSeeks,
forwardLargeSeeks,
backwardLargeSeeks,
contiguousReads,
nonContiguousReads,
cachedBytesRead,
indexCacheBytesRead,
cachedBytesWritten,
directBytesRead,
optimizedBytesRead,
blobStoreBytesRequested,
luceneBytesRead,
currentIndexCacheFills
);
}
}
public static | CacheIndexInputStats |
java | apache__flink | flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroWriters.java | {
"start": 1513,
"end": 1741
} | class ____ {
/**
* Creates an {@link AvroWriterFactory} for an Avro specific type. The Avro writers will use the
* schema of that specific type to build and write the records.
*
* @param type The | AvroWriters |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/ScaleTimerTests.java | {
"start": 562,
"end": 3075
} | class ____ extends ESTestCase {
public void testLastScaleToScaleIntervalMillis_GivenNoScaleEver() {
ScaleTimer scaleTimer = new ScaleTimer(() -> System.currentTimeMillis());
assertThat(scaleTimer.lastScaleToScaleIntervalMillis().isEmpty(), is(true));
}
public void testLastScaleToScaleIntervalMillis_GivenSingleScaleEvent() {
ScaleTimer scaleTimer = new ScaleTimer(() -> System.currentTimeMillis());
scaleTimer.markScale();
assertThat(scaleTimer.lastScaleToScaleIntervalMillis().isEmpty(), is(true));
}
public void testLastScaleToScaleIntervalMillis_GivenMultipleScaleEvents() {
ScaleTimer scaleTimer = new ScaleTimer(new MockNowSupplier(100L, 250L, 500L));
scaleTimer.markScale();
scaleTimer.markScale();
OptionalLong scaleInterval = scaleTimer.lastScaleToScaleIntervalMillis();
assertThat(scaleInterval.isPresent(), is(true));
assertThat(scaleInterval.getAsLong(), equalTo(150L));
scaleTimer.markScale();
scaleInterval = scaleTimer.lastScaleToScaleIntervalMillis();
assertThat(scaleInterval.isPresent(), is(true));
assertThat(scaleInterval.getAsLong(), equalTo(250L));
}
public void testMarkDownScaleAndGetMillisLeftFromDelay() {
ScaleTimer scaleTimer = new ScaleTimer(new MockNowSupplier(100L, 100L, 300L, 1300L, 1500L));
scaleTimer.markScale();
long millisLeft = scaleTimer.markDownScaleAndGetMillisLeftFromDelay(Settings.builder().put("down_scale_delay", "1s").build());
assertThat(scaleTimer.downScaleDetectedMillis(), equalTo(100L));
assertThat(millisLeft, equalTo(1000L));
millisLeft = scaleTimer.markDownScaleAndGetMillisLeftFromDelay(Settings.builder().put("down_scale_delay", "1s").build());
assertThat(scaleTimer.downScaleDetectedMillis(), equalTo(100L));
assertThat(millisLeft, equalTo(800L));
millisLeft = scaleTimer.markDownScaleAndGetMillisLeftFromDelay(Settings.builder().put("down_scale_delay", "1s").build());
assertThat(scaleTimer.downScaleDetectedMillis(), equalTo(100L));
assertThat(millisLeft, equalTo(-200L));
scaleTimer.resetScaleDownCoolDown();
millisLeft = scaleTimer.markDownScaleAndGetMillisLeftFromDelay(Settings.builder().put("down_scale_delay", "1s").build());
assertThat(scaleTimer.downScaleDetectedMillis(), equalTo(1500L));
assertThat(millisLeft, equalTo(1000L));
}
private | ScaleTimerTests |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/AbstractFpgaVendorPlugin.java | {
"start": 1465,
"end": 3449
} | interface ____ {
/**
* Check vendor's toolchain and required environment
* @param conf Hadoop configuration
* @return true if the initialization was successful
* */
boolean initPlugin(Configuration conf);
/**
* Diagnose the devices using vendor toolchain but no need to parse device information
*
* @param timeout timeout in milliseconds
* @return true if the diagnostics was successful
* */
boolean diagnose(int timeout);
/**
* Discover the vendor's FPGA devices with execution time constraint
* @param timeout The vendor plugin should return result during this time
* @return The result will be added to FPGAResourceAllocator for later scheduling
* */
List<FpgaDevice> discover(int timeout);
/**
* Since all vendor plugins share a {@link org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.fpga.FpgaResourceAllocator}
* which distinguish FPGA devices by type. Vendor plugin must report this.
*
* @return the type of FPGA plugin represented as a string
* */
String getFpgaType();
/**
* The vendor plugin download required IP files to a required directory.
* It should check if the IP file has already been downloaded.
* @param id The identifier for IP file. Comes from application, ie. matrix_multi_v1
* @param dstDir The plugin should download IP file to this directory
* @param localizedResources The container localized resource can be searched for IP file. Key is
* localized file path and value is soft link names
* @return The absolute path string of IP file
* */
String retrieveIPfilePath(String id, String dstDir,
Map<Path, List<String>> localizedResources);
/**
* The vendor plugin configure an IP file to a device
* @param ipPath The absolute path of the IP file
* @param device The FPGA device object
* @return configure device ok or not
* */
boolean configureIP(String ipPath, FpgaDevice device);
}
| AbstractFpgaVendorPlugin |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/referencearray/AtomicReferenceArrayAssert_hasSizeLessThan_Test.java | {
"start": 834,
"end": 1208
} | class ____ extends AtomicReferenceArrayAssertBaseTest {
@Override
protected AtomicReferenceArrayAssert<Object> invoke_api_method() {
return assertions.hasSizeLessThan(6);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasSizeLessThan(getInfo(assertions), internalArray(), 6);
}
}
| AtomicReferenceArrayAssert_hasSizeLessThan_Test |
java | apache__maven | impl/maven-core/src/test/java/org/apache/maven/configuration/internal/CompositeBeanHelperPerformanceTest.java | {
"start": 14319,
"end": 14376
} | class ____ performance testing.
*/
public static | for |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/source/spi/SingularAttributeSourceBasic.java | {
"start": 252,
"end": 392
} | interface ____
extends SingularAttributeSource, RelationalValueSourceContainer, ImplicitBasicColumnNameSource {
}
| SingularAttributeSourceBasic |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/util/internal/InternalLoggerRegistry.java | {
"start": 1776,
"end": 2142
} | class ____ internally used by {@link LoggerContext}.
* <p>
* We don't use {@linkplain org.apache.logging.log4j.spi.LoggerRegistry the
* registry from Log4j API} to keep Log4j Core independent from the version of
* Log4j API at runtime.
* This also allows Log4j Core to evolve independently from Log4j API.
* </p>
*
* @since 2.25.0
*/
@NullMarked
public final | is |
java | jhy__jsoup | src/test/java/org/jsoup/parser/StreamParserTest.java | {
"start": 15103,
"end": 19022
} | interface
____ html = "<tr id=1><td>One</td><tr id=2><td>Two</td></tr><tr id=3><td>Three</td></tr>"; // missing </tr>, following <tr> infers it
Element context = new Element("table");
try(StreamParser parser = new StreamParser(Parser.htmlParser()).parseFragment(html, context, "")) {
StringBuilder seen = new StringBuilder();
Iterator<Element> it = parser.iterator();
while (it.hasNext()) {
trackSeen(it.next(), seen);
}
assertEquals("td[One];tr#1+;td[Two];tr#2+;td[Three];tr#3;tbody;table;#root;", seen.toString());
// checks expected order, and the + indicates that element had a next sibling at time of emission
// note that we don't get a full doc, just the fragment (and the context at the end of the stack)
assertTrue(isClosed(parser)); // as read to completion
}
}
@Test
void canSelectAndCompleteFragment() throws IOException {
String html = "<tr id=1><td>One</td><tr id=2><td>Two</td></tr><tr id=3><td>Three</td></tr>";
Element context = new Element("table");
try (StreamParser parser = new StreamParser(Parser.htmlParser()).parseFragment(html, context, "")) {
Element first = parser.expectNext("td");
assertEquals("One", first.ownText());
Element el = parser.expectNext("td");
assertEquals("Two", el.ownText());
el = parser.expectNext("td");
assertEquals("Three", el.ownText());
el = parser.selectNext("td");
assertNull(el);
List<Node> nodes = parser.completeFragment();
assertEquals(1, nodes.size()); // should be the inferred tbody
Node tbody = nodes.get(0);
assertEquals("tbody", tbody.nodeName());
List<Node> trs = tbody.childNodes();
assertEquals(3, trs.size()); // should be the three TRs
assertSame(trs.get(0).childNode(0), first); // tr -> td
assertSame(parser.document(), first.ownerDocument()); // the shell document for this fragment
}
}
@Test
void canStreamFragmentXml() throws IOException {
String html = "<tr id=1><td>One</td></tr><tr id=2><td>Two</td></tr><tr id=3><td>Three</td></tr>";
Element context = new Element("Other");
try (StreamParser parser = new StreamParser(Parser.xmlParser()).parseFragment(html, context, "")) {
StringBuilder seen = new StringBuilder();
parser.stream().forEachOrdered(el -> trackSeen(el, seen));
assertEquals("td[One];tr#1+;td[Two];tr#2+;td[Three];tr#3;#root;", seen.toString());
// checks expected order, and the + indicates that element had a next sibling at time of emission
// note that we don't get a full doc, just the fragment
assertTrue(isClosed(parser)); // as read to completion
List<Node> nodes = parser.completeFragment();
assertEquals(3, nodes.size());
assertEquals("tr", nodes.get(0).nodeName());
}
}
@ParameterizedTest
@ValueSource(strings = {
"<html><body><a>Link</a></body></html>",
"<html><body><a>Link</a>",
"<a>Link</a></body></html>",
"<a>Link</a>",
"<a>Link",
"<a>Link</body>",
})
void emitsOnlyOnce(String html) {
try (StreamParser parser = new StreamParser(Parser.htmlParser()).parse(html, "")) {
// https://github.com/jhy/jsoup/issues/2295
// When there was a /body or /html, those were being emitted twice, due to firing a fake onNodeClosed to track their source positions
StringBuilder seen = new StringBuilder();
parser.stream().forEach(el -> trackSeen(el, seen));
assertEquals("head+;a[Link];body;html;#root;", seen.toString());
}
}
}
| String |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/util/springframework/ConcurrentReferenceHashMap.java | {
"start": 26968,
"end": 28164
} | class ____ extends AbstractSet<Map.Entry<K, V>> {
@Override
public Iterator<Map.Entry<K, V>> iterator() {
return new EntryIterator();
}
@Override
public boolean contains(@Nullable Object o) {
if (o instanceof Map.Entry<?, ?>) {
Map.Entry<?, ?> entry = (Map.Entry<?, ?>) o;
Reference<K, V> ref = ConcurrentReferenceHashMap.this.getReference(entry.getKey(), Restructure.NEVER);
Entry<K, V> otherEntry = (ref != null ? ref.get() : null);
if (otherEntry != null) {
return ObjectUtils.nullSafeEquals(entry.getValue(), otherEntry.getValue());
}
}
return false;
}
@Override
public boolean remove(Object o) {
if (o instanceof Map.Entry<?, ?>) {
Map.Entry<?, ?> entry = (Map.Entry<?, ?>) o;
return ConcurrentReferenceHashMap.this.remove(entry.getKey(), entry.getValue());
}
return false;
}
@Override
public int size() {
return ConcurrentReferenceHashMap.this.size();
}
@Override
public void clear() {
ConcurrentReferenceHashMap.this.clear();
}
}
/**
* Internal entry iterator implementation.
*/
private | EntrySet |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java | {
"start": 5396,
"end": 9821
} | class ____ initialize,
* used to construct paths and file names for the record.
* Determined by configuration settings for the specific driver.
* @param clazz Record type corresponding to the provided name.
* @param <T> Type of the state store record.
* @return True if successful, false otherwise.
*/
public abstract <T extends BaseRecord> boolean initRecordStorage(
String className, Class<T> clazz);
/**
* Check if the driver is currently running and the data store connection is
* valid.
*
* @return True if the driver is initialized and the data store is ready.
*/
public abstract boolean isDriverReady();
/**
* Check if the driver is ready to be used and throw an exception otherwise.
*
* @throws StateStoreUnavailableException If the driver is not ready.
*/
public void verifyDriverReady() throws StateStoreUnavailableException {
if (!isDriverReady()) {
String driverName = getDriverName();
String hostname = getHostname();
throw new StateStoreUnavailableException("State Store driver " +
driverName + " in " + hostname + " is not ready.");
}
}
/**
* Close the State Store driver connection.
*
* @throws Exception if something goes wrong while closing the state store driver connection.
*/
public void close() throws Exception {
if (executor != null) {
executor.shutdown();
executor = null;
}
}
/**
* Returns the current time synchronization from the underlying store.
* Override for stores that supply a current date. The data store driver is
* responsible for maintaining the official synchronization time/date for all
* distributed components.
*
* @return Current time stamp, used for all synchronization dates.
*/
public long getTime() {
return Time.now();
}
/**
* Get the name of the driver implementation for debugging.
*
* @return Name of the driver implementation.
*/
private String getDriverName() {
return this.getClass().getSimpleName();
}
/**
* Get the host name of the machine running the driver for debugging.
*
* @return Host name of the machine running the driver.
*/
private String getHostname() {
String hostname = "Unknown";
try {
hostname = InetAddress.getLocalHost().getHostName();
} catch (Exception e) {
LOG.error("Cannot get local address", e);
}
return hostname;
}
/**
* Try to overwrite records in commitRecords and remove records in deleteRecords.
* Should return null if async mode is used. Else return removed records.
* @param commitRecords records to overwrite in state store
* @param deleteRecords records to remove from state store
* @param <R> record class
* @throws IOException when there is a failure during overwriting or deletion
* @return null if async mode is used, else removed records
*/
public <R extends BaseRecord> List<R> handleOverwriteAndDelete(List<R> commitRecords,
List<R> deleteRecords) throws IOException {
List<R> result = null;
try {
// Overwrite all expired records.
if (commitRecords != null && !commitRecords.isEmpty()) {
Runnable overwriteCallable =
() -> {
try {
putAll(commitRecords, true, false);
} catch (IOException e) {
throw new RuntimeException(e);
}
};
if (executor != null) {
executor.execute(overwriteCallable);
} else {
overwriteCallable.run();
}
}
// Delete all deletable records.
if (deleteRecords != null && !deleteRecords.isEmpty()) {
Map<R, Boolean> removedRecords = new HashMap<>();
Runnable deletionCallable = () -> {
try {
removedRecords.putAll(removeMultiple(deleteRecords));
} catch (IOException e) {
throw new RuntimeException(e);
}
};
if (executor != null) {
executor.execute(deletionCallable);
} else {
result = new ArrayList<>();
deletionCallable.run();
for (Map.Entry<R, Boolean> entry : removedRecords.entrySet()) {
if (entry.getValue()) {
result.add(entry.getKey());
}
}
}
}
} catch (Exception e) {
throw new IOException(e);
}
return result;
}
}
| to |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.