language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
grpc__grpc-java
|
interop-testing/src/test/java/io/grpc/testing/integration/RetryTest.java
|
{
"start": 22121,
"end": 22754
}
|
class ____ extends ClientCall.Listener<Integer> {
Status status = null;
private CountDownLatch closeLatch = new CountDownLatch(1);
@Override
public void onClose(Status status, Metadata trailers) {
this.status = status;
closeLatch.countDown();
}
void clear() {
status = null;
closeLatch = new CountDownLatch(1);
}
void verifyDescription(String description, long timeoutMs) throws InterruptedException {
closeLatch.await(timeoutMs, TimeUnit.MILLISECONDS);
assertNotNull(status);
assertThat(status.getDescription()).contains(description);
}
}
}
|
TestListener
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/annotation/FullyQualifiedConfigurationBeanNameGenerator.java
|
{
"start": 893,
"end": 2107
}
|
class ____, not only enforcing
* fully-qualified names for component and configuration classes themselves
* but also fully-qualified default bean names ("className.methodName") for
* {@link Bean @Bean} methods. By default, this only affects methods without
* an explicit {@link Bean#name() name} attribute specified.
*
* <p>This provides an alternative to the default bean name generation for
* {@code @Bean} methods (which uses the plain method name), primarily for use
* in large applications with potential bean name overlaps. Favor this bean
* naming strategy over {@code FullyQualifiedAnnotationBeanNameGenerator} if
* you expect such naming conflicts for {@code @Bean} methods, as long as the
* application does not depend on {@code @Bean} method names as bean names.
* Where the name does matter, make sure to declare {@code @Bean("myBeanName")}
* in such a scenario, even if it repeats the method name as the bean name.
*
* @author Juergen Hoeller
* @since 7.0
* @see AnnotationBeanNameGenerator
* @see FullyQualifiedAnnotationBeanNameGenerator
* @see AnnotationConfigApplicationContext#setBeanNameGenerator
* @see AnnotationConfigUtils#CONFIGURATION_BEAN_NAME_GENERATOR
*/
public
|
purposes
|
java
|
spring-projects__spring-boot
|
module/spring-boot-restclient/src/test/java/org/springframework/boot/restclient/autoconfigure/RestTemplateAutoConfigurationTests.java
|
{
"start": 12693,
"end": 12773
}
|
class ____ extends ByteArrayHttpMessageConverter {
}
}
|
CustomHttpMessageConverter
|
java
|
hibernate__hibernate-orm
|
hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/IngresLimitHandlerTest.java
|
{
"start": 372,
"end": 543
}
|
class ____ extends AbstractLimitHandlerTest {
@Override
protected AbstractLimitHandler getLimitHandler() {
return IngresLimitHandler.INSTANCE;
}
}
|
IngresLimitHandlerTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/plain/CartesianPointIndexFieldData.java
|
{
"start": 3154,
"end": 4060
}
|
class ____ implements IndexFieldData.Builder {
private final String name;
private final ValuesSourceType valuesSourceType;
private final ToScriptFieldFactory<CartesianPointValuesSource.MultiCartesianPointValues> toScriptFieldFactory;
public Builder(
String name,
ValuesSourceType valuesSourceType,
ToScriptFieldFactory<CartesianPointValuesSource.MultiCartesianPointValues> toScriptFieldFactory
) {
this.name = name;
this.valuesSourceType = valuesSourceType;
this.toScriptFieldFactory = toScriptFieldFactory;
}
@Override
public IndexFieldData<?> build(IndexFieldDataCache cache, CircuitBreakerService breakerService) {
// ignore breaker
return new CartesianPointIndexFieldData(name, valuesSourceType, toScriptFieldFactory);
}
}
}
|
Builder
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/hashset/HashSetAssert_containsExactly_Test.java
|
{
"start": 1172,
"end": 2693
}
|
class ____ extends HashSetAssertBaseTest {
@Override
protected HashSetAssert<Object> invoke_api_method() {
return assertions.containsExactly(someValues);
}
@Override
protected void verify_internal_effects() {
verify(iterables).assertContainsExactly(getInfo(assertions), getActual(assertions), someValues);
}
@HashSetTest
void should_pass(HashSetFactory hashSetFactory) {
// GIVEN
HashSet<String> hashSet = hashSetFactory.createWith("Yoda", "Yoda", "Luke", "Han");
// WHEN
String[] exactElements = hashSetFactory.createWith("Yoda", "Luke", "Han").toArray(new String[0]);
// THEN
then(hashSet).containsExactly(exactElements);
}
@HashSetTest
void should_fail_finding_elements_with_changed_hashCode(HashSetFactory hashSetFactory) {
// GIVEN
Date first = Date.from(EPOCH.plusSeconds(1));
Date second = Date.from(EPOCH.plusSeconds(2));
HashSet<Date> dates = hashSetFactory.createWith(first, second);
first.setTime(3_000);
// WHEN
var assertionError = expectAssertionError(() -> assertThat(dates).containsExactly(first, second));
// THEN
String message = shouldContainExactly(dates, List.of(first, second), List.of(first), List.of(first)).create()
+ "(elements were checked as in HashSet, as soon as their hashCode change, the HashSet won't find them anymore - use skippingHashCodeComparison to get a collection like comparison)";
then(assertionError).hasMessage(message);
}
}
|
HashSetAssert_containsExactly_Test
|
java
|
google__auto
|
common/src/test/java/com/google/auto/common/BasicAnnotationProcessorTest.java
|
{
"start": 21144,
"end": 21778
}
|
class ____ {",
" public void myMethod(@"
+ RequiresGeneratedCode.class.getCanonicalName()
+ " int myInt)",
" {}",
"}");
requiresGeneratedCodeRejectionTest(classAFileObject);
}
@Test
public void properlyDefersProcessing_rejectsField() {
double version = Double.parseDouble(Objects.requireNonNull(JAVA_SPECIFICATION_VERSION.value()));
assume().that(version).isAtLeast(16.0);
JavaFileObject classAFileObject =
JavaFileObjects.forSourceLines(
"test.ClassA",
"package test;",
"",
"public
|
ClassA
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
|
{
"start": 1730,
"end": 1796
}
|
class ____ {
/**
* Type of metric.
*/
public
|
TimelineMetric
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/common/processor/src/main/java/org/jboss/resteasy/reactive/common/processor/TypeArgMapper.java
|
{
"start": 389,
"end": 1917
}
|
class ____ implements Function<String, Type> {
private final ClassInfo declaringClass;
private final IndexView index;
public TypeArgMapper(ClassInfo declaringClass, IndexView index) {
this.declaringClass = declaringClass;
this.index = index;
}
@Override
public Type apply(String v) {
//we attempt to resolve type variables
ClassInfo declarer = declaringClass;
int pos = -1;
for (;;) {
if (declarer == null || OBJECT.equals(declarer.name())) {
return null;
}
List<TypeVariable> typeParameters = declarer.typeParameters();
for (int i = 0; i < typeParameters.size(); i++) {
TypeVariable tv = typeParameters.get(i);
if (tv.identifier().equals(v)) {
pos = i;
}
}
if (pos != -1) {
break;
}
declarer = index.getClassByName(declarer.superName());
}
Type type = JandexUtil
.resolveTypeParameters(declaringClass.name(), declarer.name(), index)
.get(pos);
if (type.kind() == Type.Kind.TYPE_VARIABLE && type.asTypeVariable().identifier().equals(v)) {
List<Type> bounds = type.asTypeVariable().bounds();
if (bounds.isEmpty()) {
return ClassType.OBJECT_TYPE;
}
return bounds.get(0);
} else {
return type;
}
}
}
|
TypeArgMapper
|
java
|
quarkusio__quarkus
|
extensions/security/deployment/src/test/java/io/quarkus/security/test/permissionsallowed/checker/PermissionCheckerAssignabilityTest.java
|
{
"start": 4079,
"end": 5812
}
|
class ____ implementation
// checker accepts implementation thrice (once for each secured method param)
var theInterface = new Third("interface");
var theAbstract = new Third("abstract");
var implementation = new Third("implementation");
assertSuccess(() -> securedBean.allThree(implementation, theAbstract, theInterface), "allThree", ADMIN_WITH_AUGMENTORS);
assertFailureFor(() -> securedBean.allThree(theInterface, theAbstract, implementation), ForbiddenException.class,
ADMIN_WITH_AUGMENTORS);
assertFailureFor(() -> securedBean.allThree(theAbstract, theInterface, implementation), ForbiddenException.class,
ADMIN_WITH_AUGMENTORS);
}
@Test
public void testGenericChecker() {
var wrongValue = new Third("wrong-value");
var rightValue = new Third("generic");
var anotherRightValue = new Second_Record("generic");
assertSuccess(() -> securedBean.genericChecker(rightValue), "generic", ADMIN_WITH_AUGMENTORS);
assertSuccess(() -> securedBean.genericChecker(anotherRightValue), "generic", ADMIN_WITH_AUGMENTORS);
assertFailureFor(() -> securedBean.genericChecker(wrongValue), ForbiddenException.class, ADMIN_WITH_AUGMENTORS);
}
@Test
public void testNotAssignableCheckerParam() {
var theInterface = new Second_Record("interface"); // not assignable
var theAbstract = new Third("abstract");
var implementation = new Third("implementation");
assertFailureFor(() -> securedBean.allThree(implementation, theAbstract, theInterface), ForbiddenException.class,
ADMIN_WITH_AUGMENTORS);
}
@ApplicationScoped
public static
|
and
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java
|
{
"start": 1484,
"end": 3606
}
|
class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestProxyUsers.class);
private static final String REAL_USER_NAME = "proxier";
private static final String PROXY_USER_NAME = "proxied_user";
private static final String AUTHORIZED_PROXY_USER_NAME = "authorized_proxied_user";
private static final String[] GROUP_NAMES =
new String[] { "foo_group" };
private static final String[] NETGROUP_NAMES =
new String[] { "@foo_group" };
private static final String[] OTHER_GROUP_NAMES =
new String[] { "bar_group" };
private static final String[] SUDO_GROUP_NAMES =
new String[] { "sudo_proxied_user" };
private static final String PROXY_IP = "1.2.3.4";
private static final String PROXY_IP_RANGE = "10.222.0.0/16,10.113.221.221";
/**
* Test the netgroups (groups in ACL rules that start with @)
*
* This is a manual test because it requires:
* - host setup
* - native code compiled
* - specify the group mapping class
*
* Host setup:
*
* /etc/nsswitch.conf should have a line like this:
* netgroup: files
*
* /etc/netgroup should be (the whole file):
* foo_group (,proxied_user,)
*
* To run this test:
*
* export JAVA_HOME='path/to/java'
* mvn test \
* -Dtest=TestProxyUsers \
* -DTestProxyUsersGroupMapping=$className \
*
* where $className is one of the classes that provide group
* mapping services, i.e. classes that implement
* GroupMappingServiceProvider interface, at this time:
* - org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMapping
* - org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping
*
*/
@Test
public void testNetgroups () throws IOException{
if(!NativeCodeLoader.isNativeCodeLoaded()) {
LOG.info("Not testing netgroups, " +
"this test only runs when native code is compiled");
return;
}
String groupMappingClassName =
System.getProperty("TestProxyUsersGroupMapping");
if(groupMappingClassName == null) {
LOG.info("Not testing netgroups, no group mapping
|
TestProxyUsers
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/GetInferenceFieldsCrossClusterIT.java
|
{
"start": 2125,
"end": 6555
}
|
class ____ extends AbstractMultiClustersTestCase {
private static final String REMOTE_CLUSTER = "cluster_a";
private static final String INDEX_NAME = "test-index";
private static final String INFERENCE_FIELD = "test-inference-field";
private static final String INFERENCE_ID = "test-inference-id";
private static final Map<String, Object> INFERENCE_ENDPOINT_SERVICE_SETTINGS = Map.of("model", "my_model", "api_key", "my_api_key");
private boolean clustersConfigured = false;
@Override
protected List<String> remoteClusterAlias() {
return List.of(REMOTE_CLUSTER);
}
@Override
protected Map<String, Boolean> skipUnavailableForRemoteClusters() {
return Map.of(REMOTE_CLUSTER, DEFAULT_SKIP_UNAVAILABLE);
}
@Override
protected Settings nodeSettings() {
return Settings.builder().put(LicenseSettings.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial").build();
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins(String clusterAlias) {
return List.of(LocalStateInferencePlugin.class, TestInferenceServicePlugin.class, FakeMlPlugin.class);
}
@Before
public void configureClusters() throws Exception {
if (clustersConfigured == false) {
setupTwoClusters();
clustersConfigured = true;
}
}
public void testRemoteIndex() {
Consumer<GetInferenceFieldsAction.Request> assertFailedRequest = r -> {
IllegalArgumentException e = assertThrows(
IllegalArgumentException.class,
() -> client().execute(GetInferenceFieldsAction.INSTANCE, r).actionGet(TEST_REQUEST_TIMEOUT)
);
assertThat(e.getMessage(), containsString("GetInferenceFieldsAction does not support remote indices"));
};
var concreteIndexRequest = new GetInferenceFieldsAction.Request(
Set.of(REMOTE_CLUSTER + ":test-index"),
Map.of(),
false,
false,
"foo"
);
assertFailedRequest.accept(concreteIndexRequest);
var wildcardIndexRequest = new GetInferenceFieldsAction.Request(Set.of(REMOTE_CLUSTER + ":*"), Map.of(), false, false, "foo");
assertFailedRequest.accept(wildcardIndexRequest);
var wildcardClusterAndIndexRequest = new GetInferenceFieldsAction.Request(Set.of("*:*"), Map.of(), false, false, "foo");
assertFailedRequest.accept(wildcardClusterAndIndexRequest);
}
public void testRemoteClusterAction() {
RemoteClusterClient remoteClusterClient = client().getRemoteClusterClient(
REMOTE_CLUSTER,
EsExecutors.DIRECT_EXECUTOR_SERVICE,
RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED
);
var request = new GetInferenceFieldsAction.Request(
Set.of(INDEX_NAME),
generateDefaultWeightFieldMap(Set.of(INFERENCE_FIELD)),
false,
false,
"foo"
);
PlainActionFuture<GetInferenceFieldsAction.Response> future = new PlainActionFuture<>();
remoteClusterClient.execute(GetInferenceFieldsAction.REMOTE_TYPE, request, future);
var response = future.actionGet(TEST_REQUEST_TIMEOUT);
assertInferenceFieldsMap(
response.getInferenceFieldsMap(),
Map.of(INDEX_NAME, Set.of(new GetInferenceFieldsIT.InferenceFieldWithTestMetadata(INFERENCE_FIELD, INFERENCE_ID, 1.0f)))
);
assertInferenceResultsMap(response.getInferenceResultsMap(), Map.of(INFERENCE_ID, TextExpansionResults.class));
}
private void setupTwoClusters() throws IOException {
setupCluster(LOCAL_CLUSTER);
setupCluster(REMOTE_CLUSTER);
}
private void setupCluster(String clusterAlias) throws IOException {
final Client client = client(clusterAlias);
createInferenceEndpoint(client, TaskType.SPARSE_EMBEDDING, INFERENCE_ID, INFERENCE_ENDPOINT_SERVICE_SETTINGS);
int dataNodeCount = cluster(clusterAlias).numDataNodes();
XContentBuilder mappings = generateSemanticTextMapping(Map.of(INFERENCE_FIELD, INFERENCE_ID));
Settings indexSettings = indexSettings(randomIntBetween(1, dataNodeCount), 0).build();
assertAcked(client.admin().indices().prepareCreate(INDEX_NAME).setSettings(indexSettings).setMapping(mappings));
}
}
|
GetInferenceFieldsCrossClusterIT
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/taskmanager/TaskManagerLogFileHandler.java
|
{
"start": 1853,
"end": 3214
}
|
class ____
extends AbstractTaskManagerFileHandler<TaskManagerMessageParameters> {
public TaskManagerLogFileHandler(
@Nonnull GatewayRetriever<? extends RestfulGateway> leaderRetriever,
@Nonnull Duration timeout,
@Nonnull Map<String, String> responseHeaders,
@Nonnull
UntypedResponseMessageHeaders<EmptyRequestBody, TaskManagerMessageParameters>
untypedResponseMessageHeaders,
@Nonnull GatewayRetriever<ResourceManagerGateway> resourceManagerGatewayRetriever,
@Nonnull TransientBlobService transientBlobService,
@Nonnull Duration cacheEntryDuration) {
super(
leaderRetriever,
timeout,
responseHeaders,
untypedResponseMessageHeaders,
resourceManagerGatewayRetriever,
transientBlobService,
cacheEntryDuration);
}
@Override
protected CompletableFuture<TransientBlobKey> requestFileUpload(
ResourceManagerGateway resourceManagerGateway,
Tuple2<ResourceID, String> taskManagerIdAndFileName) {
return resourceManagerGateway.requestTaskManagerFileUploadByType(
taskManagerIdAndFileName.f0, FileType.LOG, timeout);
}
}
|
TaskManagerLogFileHandler
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFetcher.java
|
{
"start": 1002,
"end": 1219
}
|
interface ____ permits the abstraction and separation of
* delegation token fetch implementaions across different packages and
* compilation units. Resolution of fetcher impl will be done at runtime.
*/
public
|
which
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/type/GenericFieldInSubtypeTest.java
|
{
"start": 1050,
"end": 1205
}
|
class ____<K> extends Result677<K> {
public K value;
public Success677() { }
public Success677(K k) { value = k; }
}
}
abstract
|
Success677
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/XmppEndpointBuilderFactory.java
|
{
"start": 21602,
"end": 30229
}
|
interface ____
extends
EndpointProducerBuilder {
default AdvancedXmppEndpointProducerBuilder advanced() {
return (AdvancedXmppEndpointProducerBuilder) this;
}
/**
* Whether to login the user.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param login the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder login(boolean login) {
doSetProperty("login", login);
return this;
}
/**
* Whether to login the user.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param login the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder login(String login) {
doSetProperty("login", login);
return this;
}
/**
* Use nickname when joining room. If room is specified and nickname is
* not, user will be used for the nickname.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param nickname the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder nickname(String nickname) {
doSetProperty("nickname", nickname);
return this;
}
/**
* Accept pubsub packets on input, default is false.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param pubsub the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder pubsub(boolean pubsub) {
doSetProperty("pubsub", pubsub);
return this;
}
/**
* Accept pubsub packets on input, default is false.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param pubsub the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder pubsub(String pubsub) {
doSetProperty("pubsub", pubsub);
return this;
}
/**
* If this option is specified, the component will connect to MUC (Multi
* User Chat). Usually, the domain name for MUC is different from the
* login domain. For example, if you are supermanjabber.org and want to
* join the krypton room, then the room URL is
* kryptonconference.jabber.org. Note the conference part. It is not a
* requirement to provide the full room JID. If the room parameter does
* not contain the symbol, the domain part will be discovered and added
* by Camel.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param room the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder room(String room) {
doSetProperty("room", room);
return this;
}
/**
* The name of the service you are connecting to. For Google Talk, this
* would be gmail.com.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param serviceName the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder serviceName(String serviceName) {
doSetProperty("serviceName", serviceName);
return this;
}
/**
* Specifies whether to test the connection on startup. This is used to
* ensure that the XMPP client has a valid connection to the XMPP server
* when the route starts. Camel throws an exception on startup if a
* connection cannot be established. When this option is set to false,
* Camel will attempt to establish a lazy connection when needed by a
* producer, and will poll for a consumer connection until the
* connection is established. Default is true.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param testConnectionOnStartup the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder testConnectionOnStartup(boolean testConnectionOnStartup) {
doSetProperty("testConnectionOnStartup", testConnectionOnStartup);
return this;
}
/**
* Specifies whether to test the connection on startup. This is used to
* ensure that the XMPP client has a valid connection to the XMPP server
* when the route starts. Camel throws an exception on startup if a
* connection cannot be established. When this option is set to false,
* Camel will attempt to establish a lazy connection when needed by a
* producer, and will poll for a consumer connection until the
* connection is established. Default is true.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: common
*
* @param testConnectionOnStartup the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder testConnectionOnStartup(String testConnectionOnStartup) {
doSetProperty("testConnectionOnStartup", testConnectionOnStartup);
return this;
}
/**
* To use a custom HeaderFilterStrategy to filter header to and from
* Camel message.
*
* The option is a:
* <code>org.apache.camel.spi.HeaderFilterStrategy</code> type.
*
* Group: filter
*
* @param headerFilterStrategy the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder headerFilterStrategy(org.apache.camel.spi.HeaderFilterStrategy headerFilterStrategy) {
doSetProperty("headerFilterStrategy", headerFilterStrategy);
return this;
}
/**
* To use a custom HeaderFilterStrategy to filter header to and from
* Camel message.
*
* The option will be converted to a
* <code>org.apache.camel.spi.HeaderFilterStrategy</code> type.
*
* Group: filter
*
* @param headerFilterStrategy the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder headerFilterStrategy(String headerFilterStrategy) {
doSetProperty("headerFilterStrategy", headerFilterStrategy);
return this;
}
/**
* Password for login.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param password the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder password(String password) {
doSetProperty("password", password);
return this;
}
/**
* Password for room.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param roomPassword the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder roomPassword(String roomPassword) {
doSetProperty("roomPassword", roomPassword);
return this;
}
/**
* User name (without server name). If not specified, anonymous login
* will be attempted.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param user the value to set
* @return the dsl builder
*/
default XmppEndpointProducerBuilder user(String user) {
doSetProperty("user", user);
return this;
}
}
/**
* Advanced builder for endpoint producers for the XMPP component.
*/
public
|
XmppEndpointProducerBuilder
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/javadoc/InvalidParamTest.java
|
{
"start": 1047,
"end": 1483
}
|
class ____ {
private final BugCheckerRefactoringTestHelper refactoring =
BugCheckerRefactoringTestHelper.newInstance(InvalidParam.class, getClass());
private final CompilationTestHelper helper =
CompilationTestHelper.newInstance(InvalidParam.class, getClass());
@Test
public void badParameterName_positioning() {
helper
.addSourceLines(
"Test.java",
"""
|
InvalidParamTest
|
java
|
spring-projects__spring-framework
|
spring-websocket/src/main/java/org/springframework/web/socket/sockjs/transport/session/AbstractSockJsSession.java
|
{
"start": 2098,
"end": 12349
}
|
enum ____ {NEW, OPEN, CLOSED}
/**
* Log category to use for network failure after a client has gone away.
* @see DisconnectedClientHelper
*/
public static final String DISCONNECTED_CLIENT_LOG_CATEGORY =
"org.springframework.web.socket.sockjs.DisconnectedClient";
private static final DisconnectedClientHelper disconnectedClientHelper =
new DisconnectedClientHelper(DISCONNECTED_CLIENT_LOG_CATEGORY);
protected final Log logger = LogFactory.getLog(getClass());
protected final Object responseLock = new Object();
private final String id;
private final SockJsServiceConfig config;
private final WebSocketHandler handler;
private final Map<String, Object> attributes = new ConcurrentHashMap<>();
private volatile State state = State.NEW;
private final long timeCreated = System.currentTimeMillis();
private volatile long timeLastActive = this.timeCreated;
private @Nullable ScheduledFuture<?> heartbeatFuture;
private @Nullable HeartbeatTask heartbeatTask;
private volatile boolean heartbeatDisabled;
/**
* Create a new instance.
* @param id the session ID
* @param config the SockJS service configuration options
* @param handler the recipient of SockJS messages
* @param attributes the attributes from the HTTP handshake to associate with the WebSocket
* session; the provided attributes are copied, the original map is not used.
*/
public AbstractSockJsSession(String id, SockJsServiceConfig config, WebSocketHandler handler,
@Nullable Map<String, Object> attributes) {
Assert.notNull(id, "Session id must not be null");
Assert.notNull(config, "SockJsServiceConfig must not be null");
Assert.notNull(handler, "WebSocketHandler must not be null");
this.id = id;
this.config = config;
this.handler = handler;
if (attributes != null) {
this.attributes.putAll(attributes);
}
}
@Override
public String getId() {
return this.id;
}
protected SockJsMessageCodec getMessageCodec() {
return this.config.getMessageCodec();
}
public SockJsServiceConfig getSockJsServiceConfig() {
return this.config;
}
@Override
public Map<String, Object> getAttributes() {
return this.attributes;
}
// Message sending
@Override
public final void sendMessage(WebSocketMessage<?> message) throws IOException {
Assert.state(!isClosed(), "Cannot send a message when session is closed");
Assert.isInstanceOf(TextMessage.class, message, "SockJS supports text messages only");
sendMessageInternal(((TextMessage) message).getPayload());
}
protected abstract void sendMessageInternal(String message) throws IOException;
// Lifecycle related methods
public boolean isNew() {
return State.NEW.equals(this.state);
}
@Override
public boolean isOpen() {
return State.OPEN.equals(this.state);
}
public boolean isClosed() {
return State.CLOSED.equals(this.state);
}
/**
* Performs cleanup and notify the {@link WebSocketHandler}.
*/
@Override
public final void close() throws IOException {
close(new CloseStatus(3000, "Go away!"));
}
/**
* Performs cleanup and notify the {@link WebSocketHandler}.
*/
@Override
public final void close(CloseStatus status) throws IOException {
if (isOpen()) {
if (logger.isDebugEnabled()) {
logger.debug("Closing SockJS session " + getId() + " with " + status);
}
this.state = State.CLOSED;
try {
if (isActive() && !CloseStatus.SESSION_NOT_RELIABLE.equals(status)) {
try {
writeFrameInternal(SockJsFrame.closeFrame(status.getCode(), status.getReason()));
}
catch (Throwable ex) {
logger.debug("Failure while sending SockJS close frame", ex);
}
}
updateLastActiveTime();
cancelHeartbeat();
disconnect(status);
}
finally {
try {
this.handler.afterConnectionClosed(this, status);
}
catch (Throwable ex) {
logger.debug("Error from WebSocketHandler.afterConnectionClosed in " + this, ex);
}
}
}
}
@Override
public long getTimeSinceLastActive() {
if (isNew()) {
return (System.currentTimeMillis() - this.timeCreated);
}
else {
return (isActive() ? 0 : System.currentTimeMillis() - this.timeLastActive);
}
}
/**
* Should be invoked whenever the session becomes inactive.
*/
protected void updateLastActiveTime() {
this.timeLastActive = System.currentTimeMillis();
}
@Override
public void disableHeartbeat() {
this.heartbeatDisabled = true;
cancelHeartbeat();
}
protected void sendHeartbeat() throws SockJsTransportFailureException {
synchronized (this.responseLock) {
if (isActive() && !this.heartbeatDisabled) {
writeFrame(SockJsFrame.heartbeatFrame());
scheduleHeartbeat();
}
}
}
protected void scheduleHeartbeat() {
if (this.heartbeatDisabled) {
return;
}
synchronized (this.responseLock) {
cancelHeartbeat();
if (!isActive()) {
return;
}
Instant time = Instant.now().plus(this.config.getHeartbeatTime(), ChronoUnit.MILLIS);
this.heartbeatTask = new HeartbeatTask();
this.heartbeatFuture = this.config.getTaskScheduler().schedule(this.heartbeatTask, time);
if (logger.isTraceEnabled()) {
logger.trace("Scheduled heartbeat in session " + getId());
}
}
}
protected void cancelHeartbeat() {
synchronized (this.responseLock) {
if (this.heartbeatFuture != null) {
if (logger.isTraceEnabled()) {
logger.trace("Cancelling heartbeat in session " + getId());
}
this.heartbeatFuture.cancel(false);
this.heartbeatFuture = null;
}
if (this.heartbeatTask != null) {
this.heartbeatTask.cancel();
this.heartbeatTask = null;
}
}
}
/**
* Polling and Streaming sessions periodically close the current HTTP request and
* wait for the next request to come through. During this "downtime" the session is
* still open but inactive and unable to send messages and therefore has to buffer
* them temporarily. A WebSocket session by contrast is stateful and remain active
* until closed.
*/
public abstract boolean isActive();
/**
* Actually close the underlying WebSocket session or in the case of HTTP
* transports complete the underlying request.
*/
protected abstract void disconnect(CloseStatus status) throws IOException;
// Frame writing
/**
* For internal use within a TransportHandler and the (TransportHandler-specific)
* session class.
*/
protected void writeFrame(SockJsFrame frame) throws SockJsTransportFailureException {
if (logger.isTraceEnabled()) {
logger.trace("Preparing to write " + frame);
}
try {
writeFrameInternal(frame);
}
catch (Exception ex) {
logWriteFrameFailure(ex);
try {
// Force disconnect (so we won't try to send close frame)
disconnect(CloseStatus.SERVER_ERROR);
}
catch (Throwable disconnectFailure) {
// Ignore
}
try {
close(CloseStatus.SERVER_ERROR);
}
catch (Throwable closeFailure) {
// Nothing of consequence, already forced disconnect
}
throw new SockJsTransportFailureException("Failed to write " + frame, getId(), ex);
}
}
protected abstract void writeFrameInternal(SockJsFrame frame) throws IOException;
private void logWriteFrameFailure(Throwable ex) {
if (!disconnectedClientHelper.checkAndLogClientDisconnectedException(ex)) {
logger.debug("Terminating connection after failure to send message to client", ex);
}
}
// Delegation methods
public void delegateConnectionEstablished() throws Exception {
this.state = State.OPEN;
this.handler.afterConnectionEstablished(this);
}
public void delegateMessages(String... messages) throws SockJsMessageDeliveryException {
for (int i = 0; i < messages.length; i++) {
try {
if (isClosed()) {
logUndeliveredMessages(i, messages);
return;
}
this.handler.handleMessage(this, new TextMessage(messages[i]));
}
catch (Exception ex) {
if (isClosed()) {
if (logger.isTraceEnabled()) {
logger.trace("Failed to handle message '" + messages[i] + "'", ex);
}
logUndeliveredMessages(i, messages);
return;
}
throw new SockJsMessageDeliveryException(this.id, getUndelivered(messages, i), ex);
}
}
}
private void logUndeliveredMessages(int index, String[] messages) {
List<String> undelivered = getUndelivered(messages, index);
if (logger.isTraceEnabled() && !undelivered.isEmpty()) {
logger.trace("Dropped inbound message(s) due to closed session: " + undelivered);
}
}
private static List<String> getUndelivered(String[] messages, int i) {
return switch (messages.length - i) {
case 0 -> Collections.emptyList();
case 1 -> (messages[i].trim().isEmpty() ?
Collections.<String>emptyList() : Collections.singletonList(messages[i]));
default -> Arrays.stream(Arrays.copyOfRange(messages, i, messages.length))
.filter(message -> !message.trim().isEmpty())
.toList();
};
}
/**
* Invoked when the underlying connection is closed.
*/
public final void delegateConnectionClosed(CloseStatus status) throws Exception {
if (!isClosed()) {
try {
updateLastActiveTime();
// Avoid cancelHeartbeat() and responseLock within server "close" callback
ScheduledFuture<?> future = this.heartbeatFuture;
if (future != null) {
this.heartbeatFuture = null;
future.cancel(false);
}
}
finally {
this.state = State.CLOSED;
this.handler.afterConnectionClosed(this, status);
}
}
}
/**
* Close due to error arising from SockJS transport handling.
*/
public void tryCloseWithSockJsTransportError(Throwable error, CloseStatus closeStatus) {
if (logger.isDebugEnabled()) {
logger.debug("Closing due to transport error for " + this);
}
try {
delegateError(error);
}
catch (Throwable delegateException) {
// Ignore
logger.debug("Exception from error handling delegate", delegateException);
}
try {
close(closeStatus);
}
catch (Throwable closeException) {
logger.debug("Failure while closing " + this, closeException);
}
}
public void delegateError(Throwable ex) throws Exception {
this.handler.handleTransportError(this, ex);
}
// Self description
@Override
public String toString() {
return getClass().getSimpleName() + "[id=" + getId() + "]";
}
private
|
State
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/query/EclipseLinkQueryUtilsIntegrationTests.java
|
{
"start": 1506,
"end": 3404
}
|
class ____ extends QueryUtilsIntegrationTests {
int getNumberOfJoinsAfterCreatingAPath() {
return 1;
}
@Test // GH-2756
@Override
void prefersFetchOverJoin() {
CriteriaBuilder builder = em.getCriteriaBuilder();
CriteriaQuery<User> query = builder.createQuery(User.class);
Root<User> from = query.from(User.class);
from.fetch("manager");
from.join("manager");
PropertyPath managerFirstname = PropertyPath.from("manager.firstname", User.class);
PropertyPath managerLastname = PropertyPath.from("manager.lastname", User.class);
QueryUtils.toExpressionRecursively(from, managerLastname);
Path<Object> expr = (Path) QueryUtils.toExpressionRecursively(from, managerFirstname);
assertThat(expr.getParentPath()).hasFieldOrPropertyWithValue("isFetch", true);
assertThat(from.getFetches()).hasSize(1);
assertThat(from.getJoins()).hasSize(1);
}
@Test // GH-3349
@Disabled
@Override
void doesNotCreateJoinForRelationshipSimpleId() {
// eclipse link produces join for path.get(relationship)
}
@Test // GH-3349
@Disabled
@Override
void doesNotCreateJoinForRelationshipEmbeddedId() {
// eclipse link produces join for path.get(relationship)
}
@Test // GH-3349
@Disabled
@Override
void doesNotCreateJoinForRelationshipIdClass() {
// eclipse link produces join for path.get(relationship)
}
@Test // GH-3983, GH-2870
@Disabled("Not supported by EclipseLink")
@Transactional
@Override
void applyAndBindOptimizesIn() {}
@Test // GH-3983, GH-2870
@Transactional
@Override
void applyAndBindExpandsToPositionalPlaceholders() {
em.getCriteriaBuilder();
EJBQueryImpl<?> query = (EJBQueryImpl) QueryUtils.applyAndBind("DELETE FROM User u",
List.of(new User(), new User()), em.unwrap(null));
assertThat(query.getDatabaseQuery().getJPQLString()).isEqualTo("DELETE FROM User u where u = ?1 or u = ?2");
}
}
|
EclipseLinkQueryUtilsIntegrationTests
|
java
|
micronaut-projects__micronaut-core
|
core/src/main/java/io/micronaut/core/optim/StaticOptimizations.java
|
{
"start": 865,
"end": 1039
}
|
class ____ a generic container for pre-computed data
* which can be injected at initialization time. Every class
* which needs static optimizations should go through this
*
|
is
|
java
|
apache__camel
|
components/camel-ai/camel-milvus/src/main/java/org/apache/camel/component/milvus/MilvusProducer.java
|
{
"start": 1710,
"end": 7166
}
|
class ____ extends DefaultProducer {
private MilvusClient client;
private ExecutorService executor;
public MilvusProducer(MilvusEndpoint endpoint) {
super(endpoint);
}
@Override
public MilvusEndpoint getEndpoint() {
return (MilvusEndpoint) super.getEndpoint();
}
@Override
public void doStart() throws Exception {
super.doStart();
this.client = getEndpoint().getClient();
}
@Override
public void process(Exchange exchange) {
final Message in = exchange.getMessage();
final MilvusAction action = in.getHeader(MilvusHeaders.ACTION, MilvusAction.class);
try {
if (action == null) {
throw new NoSuchHeaderException("The action is a required header", exchange, MilvusHeaders.ACTION);
}
switch (action) {
case CREATE_COLLECTION:
createCollection(exchange);
break;
case CREATE_INDEX:
createIndex(exchange);
break;
case UPSERT:
upsert(exchange);
break;
case INSERT:
insert(exchange);
break;
case SEARCH:
search(exchange);
break;
case QUERY:
query(exchange);
break;
case DELETE:
delete(exchange);
break;
default:
throw new UnsupportedOperationException("Unsupported action: " + action.name());
}
} catch (Exception e) {
exchange.setException(e);
}
}
// ***************************************
//
// Actions
//
// ***************************************
@SuppressWarnings({ "unchecked" })
private void upsert(Exchange exchange) throws Exception {
final Message in = exchange.getMessage();
final UpsertParam upsert = in.getMandatoryBody(UpsertParam.class);
R<MutationResult> result = this.client.upsert(upsert);
handleResponseStatus(result);
populateResponse(result, exchange);
}
@SuppressWarnings({ "unchecked" })
private void insert(Exchange exchange) throws Exception {
final Message in = exchange.getMessage();
final InsertParam insert = in.getMandatoryBody(InsertParam.class);
R<MutationResult> result = this.client.insert(insert);
handleResponseStatus(result);
populateResponse(result, exchange);
}
private void createCollection(Exchange exchange) throws Exception {
final Message in = exchange.getMessage();
final CreateCollectionParam body = in.getMandatoryBody(CreateCollectionParam.class);
R<RpcStatus> result = this.client.createCollection(body);
handleResponseStatus(result);
populateResponse(result, exchange);
}
private void createIndex(Exchange exchange) throws Exception {
final Message in = exchange.getMessage();
final CreateIndexParam body = in.getMandatoryBody(CreateIndexParam.class);
R<RpcStatus> result = this.client.createIndex(body);
handleResponseStatus(result);
populateResponse(result, exchange);
}
private void search(Exchange exchange) throws Exception {
final Message in = exchange.getMessage();
final SearchSimpleParam body = in.getMandatoryBody(SearchSimpleParam.class);
this.client.loadCollection(
LoadCollectionParam.newBuilder().withCollectionName(getEndpoint().getCollection()).withSyncLoad(true).build());
R<SearchResponse> result = this.client.search(body);
handleResponseStatus(result);
populateResponse(result, exchange);
}
private void query(Exchange exchange) throws Exception {
final Message in = exchange.getMessage();
final QueryParam body = in.getMandatoryBody(QueryParam.class);
this.client.loadCollection(
LoadCollectionParam.newBuilder().withCollectionName(getEndpoint().getCollection()).withSyncLoad(true).build());
R<QueryResults> result = this.client.query(body);
handleResponseStatus(result);
populateResponse(result, exchange);
}
private void delete(Exchange exchange) throws Exception {
final Message in = exchange.getMessage();
final DeleteParam body = in.getMandatoryBody(DeleteParam.class);
R<MutationResult> result = this.client.delete(body);
handleResponseStatus(result);
populateResponse(result, exchange);
}
// ***************************************
//
// Helpers
//
// ***************************************
private CamelContext getCamelContext() {
return getEndpoint().getCamelContext();
}
private void handleResponseStatus(R<?> r) {
if (r.getStatus() != R.Status.Success.getCode()) {
throw new RuntimeException(r.getMessage());
}
}
private void populateResponse(R<?> r, Exchange exchange) {
Message out = exchange.getMessage();
out.setHeader(MilvusHeaders.OPERATION_STATUS, r.getStatus());
out.setHeader(MilvusHeaders.OPERATION_STATUS_VALUE, r.getStatus().intValue());
out.setBody(r.getData());
}
}
|
MilvusProducer
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/util/ServiceLoaderUtil.java
|
{
"start": 2097,
"end": 3107
}
|
class ____ {
private static final int MAX_BROKEN_SERVICES = 8;
private ServiceLoaderUtil() {}
/**
* Retrieves services registered with {@link ServiceLoader}
* <p>
* It ignores the most common service loading errors.
* </p>
* @param serviceType The service type to use for OSGi service retrieval.
* @param serviceLoader The service loader instance to use.
* @param logger The logger to use to report service failures.
* @return A stream of all correctly loaded services.
* @since 2.24.0
*/
public static <S> Stream<S> safeStream(
final Class<S> serviceType, final ServiceLoader<? extends S> serviceLoader, final Logger logger) {
requireNonNull(serviceLoader, "serviceLoader");
final Collection<Class<?>> classes = new HashSet<>();
final Stream<S> services =
StreamSupport.stream(new ServiceLoaderSpliterator<>(serviceType, serviceLoader, logger), false);
// Caller
|
ServiceLoaderUtil
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/TwitterTimelineEndpointBuilderFactory.java
|
{
"start": 53429,
"end": 56052
}
|
interface ____ extends EndpointProducerBuilder {
default TwitterTimelineEndpointProducerBuilder basic() {
return (TwitterTimelineEndpointProducerBuilder) this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedTwitterTimelineEndpointProducerBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer (advanced)
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default AdvancedTwitterTimelineEndpointProducerBuilder lazyStartProducer(String lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
/**
* Builder for endpoint for the Twitter Timeline component.
*/
public
|
AdvancedTwitterTimelineEndpointProducerBuilder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/rerank/IbmWatsonxRerankTaskSettings.java
|
{
"start": 1080,
"end": 7007
}
|
class ____ implements TaskSettings {
public static final String NAME = "ibm_watsonx_rerank_task_settings";
public static final String RETURN_DOCUMENTS = "return_documents";
public static final String TOP_N_DOCS_ONLY = "top_n";
public static final String TRUNCATE_INPUT_TOKENS = "truncate_input_tokens";
static final IbmWatsonxRerankTaskSettings EMPTY_SETTINGS = new IbmWatsonxRerankTaskSettings(null, null, null);
public static IbmWatsonxRerankTaskSettings fromMap(Map<String, Object> map) {
ValidationException validationException = new ValidationException();
if (map == null || map.isEmpty()) {
return EMPTY_SETTINGS;
}
Boolean returnDocuments = extractOptionalBoolean(map, RETURN_DOCUMENTS, validationException);
Integer topNDocumentsOnly = extractOptionalPositiveInteger(
map,
TOP_N_DOCS_ONLY,
ModelConfigurations.TASK_SETTINGS,
validationException
);
Integer truncateInputTokens = extractOptionalPositiveInteger(
map,
TRUNCATE_INPUT_TOKENS,
ModelConfigurations.TASK_SETTINGS,
validationException
);
if (validationException.validationErrors().isEmpty() == false) {
throw validationException;
}
return of(topNDocumentsOnly, returnDocuments, truncateInputTokens);
}
/**
* Creates a new {@link IbmWatsonxRerankTaskSettings}
* by preferring non-null fields from the request settings over the original settings.
*
* @param originalSettings the settings stored as part of the inference entity configuration
* @param requestTaskSettings the settings passed in within the task_settings field of the request
* @return a constructed {@link IbmWatsonxRerankTaskSettings}
*/
public static IbmWatsonxRerankTaskSettings of(
IbmWatsonxRerankTaskSettings originalSettings,
IbmWatsonxRerankTaskSettings requestTaskSettings
) {
return new IbmWatsonxRerankTaskSettings(
requestTaskSettings.getTopNDocumentsOnly() != null
? requestTaskSettings.getTopNDocumentsOnly()
: originalSettings.getTopNDocumentsOnly(),
requestTaskSettings.getReturnDocuments() != null
? requestTaskSettings.getReturnDocuments()
: originalSettings.getReturnDocuments(),
requestTaskSettings.getTruncateInputTokens() != null
? requestTaskSettings.getTruncateInputTokens()
: originalSettings.getTruncateInputTokens()
);
}
public static IbmWatsonxRerankTaskSettings of(Integer topNDocumentsOnly, Boolean returnDocuments, Integer maxChunksPerDoc) {
return new IbmWatsonxRerankTaskSettings(topNDocumentsOnly, returnDocuments, maxChunksPerDoc);
}
private final Integer topNDocumentsOnly;
private final Boolean returnDocuments;
private final Integer truncateInputTokens;
public IbmWatsonxRerankTaskSettings(StreamInput in) throws IOException {
this(in.readOptionalInt(), in.readOptionalBoolean(), in.readOptionalInt());
}
public IbmWatsonxRerankTaskSettings(
@Nullable Integer topNDocumentsOnly,
@Nullable Boolean doReturnDocuments,
@Nullable Integer truncateInputTokens
) {
this.topNDocumentsOnly = topNDocumentsOnly;
this.returnDocuments = doReturnDocuments;
this.truncateInputTokens = truncateInputTokens;
}
@Override
public boolean isEmpty() {
return topNDocumentsOnly == null && returnDocuments == null && truncateInputTokens == null;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (topNDocumentsOnly != null) {
builder.field(TOP_N_DOCS_ONLY, topNDocumentsOnly);
}
if (returnDocuments != null) {
builder.field(RETURN_DOCUMENTS, returnDocuments);
}
if (truncateInputTokens != null) {
builder.field(TRUNCATE_INPUT_TOKENS, truncateInputTokens);
}
builder.endObject();
return builder;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_8_18_0;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalInt(topNDocumentsOnly);
out.writeOptionalBoolean(returnDocuments);
out.writeOptionalInt(truncateInputTokens);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IbmWatsonxRerankTaskSettings that = (IbmWatsonxRerankTaskSettings) o;
return Objects.equals(returnDocuments, that.returnDocuments)
&& Objects.equals(topNDocumentsOnly, that.topNDocumentsOnly)
&& Objects.equals(truncateInputTokens, that.truncateInputTokens);
}
@Override
public int hashCode() {
return Objects.hash(returnDocuments, topNDocumentsOnly, truncateInputTokens);
}
public Boolean getDoesReturnDocuments() {
return returnDocuments;
}
public Integer getTopNDocumentsOnly() {
return topNDocumentsOnly;
}
public Boolean getReturnDocuments() {
return returnDocuments;
}
public Integer getTruncateInputTokens() {
return truncateInputTokens;
}
@Override
public TaskSettings updatedTaskSettings(Map<String, Object> newSettings) {
IbmWatsonxRerankTaskSettings updatedSettings = IbmWatsonxRerankTaskSettings.fromMap(new HashMap<>(newSettings));
return IbmWatsonxRerankTaskSettings.of(this, updatedSettings);
}
}
|
IbmWatsonxRerankTaskSettings
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java
|
{
"start": 935,
"end": 7544
}
|
class ____ extends XPackFeatureUsage {
private final DataStreamStats streamStats;
public DataStreamFeatureSetUsage(StreamInput input) throws IOException {
super(input);
this.streamStats = new DataStreamStats(input);
}
public DataStreamFeatureSetUsage(DataStreamStats stats) {
super(XPackField.DATA_STREAMS, true, true);
this.streamStats = stats;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
streamStats.writeTo(out);
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
public DataStreamStats getStats() {
return streamStats;
}
@Override
protected void innerXContent(XContentBuilder builder, Params params) throws IOException {
super.innerXContent(builder, params);
builder.field("data_streams", streamStats.totalDataStreamCount);
builder.field("indices_count", streamStats.indicesBehindDataStream);
builder.startObject("failure_store");
builder.field("explicitly_enabled_count", streamStats.failureStoreExplicitlyEnabledDataStreamCount);
builder.field("effectively_enabled_count", streamStats.failureStoreEffectivelyEnabledDataStreamCount);
builder.field("failure_indices_count", streamStats.failureStoreIndicesCount);
// Failures lifecycle
builder.startObject("lifecycle");
builder.field("explicitly_enabled_count", streamStats.failuresLifecycleExplicitlyEnabledCount);
builder.field("effectively_enabled_count", streamStats.failuresLifecycleEffectivelyEnabledCount);
// Retention
DataStreamLifecycleFeatureSetUsage.RetentionStats.toXContentFragment(
builder,
streamStats.failuresLifecycleDataRetentionStats,
false
);
DataStreamLifecycleFeatureSetUsage.RetentionStats.toXContentFragment(
builder,
streamStats.failuresLifecycleEffectiveRetentionStats,
true
);
builder.startObject("global_retention");
DataStreamLifecycleFeatureSetUsage.GlobalRetentionStats.toXContentFragment(
builder,
LifecycleStats.DEFAULT_RETENTION_FIELD_NAME,
streamStats.globalRetentionStats.get(LifecycleStats.DEFAULT_RETENTION_FIELD_NAME)
);
DataStreamLifecycleFeatureSetUsage.GlobalRetentionStats.toXContentFragment(
builder,
LifecycleStats.MAX_RETENTION_FIELD_NAME,
streamStats.globalRetentionStats.get(LifecycleStats.MAX_RETENTION_FIELD_NAME)
);
builder.endObject();
builder.endObject();
builder.endObject();
}
@Override
public String toString() {
return Strings.toString(this);
}
@Override
public int hashCode() {
return streamStats.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj.getClass() != getClass()) {
return false;
}
DataStreamFeatureSetUsage other = (DataStreamFeatureSetUsage) obj;
return Objects.equals(streamStats, other.streamStats);
}
public record DataStreamStats(
long totalDataStreamCount,
long indicesBehindDataStream,
long failureStoreExplicitlyEnabledDataStreamCount,
long failureStoreEffectivelyEnabledDataStreamCount,
long failureStoreIndicesCount,
long failuresLifecycleExplicitlyEnabledCount,
long failuresLifecycleEffectivelyEnabledCount,
DataStreamLifecycleFeatureSetUsage.RetentionStats failuresLifecycleDataRetentionStats,
DataStreamLifecycleFeatureSetUsage.RetentionStats failuresLifecycleEffectiveRetentionStats,
Map<String, DataStreamLifecycleFeatureSetUsage.GlobalRetentionStats> globalRetentionStats
) implements Writeable {
private static final TransportVersion INTRODUCE_FAILURES_LIFECYCLE = TransportVersion.fromName("introduce_failures_lifecycle");
public DataStreamStats(StreamInput in) throws IOException {
this(
in.readVLong(),
in.readVLong(),
in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0) ? in.readVLong() : 0,
in.getTransportVersion().supports(TransportVersions.V_8_18_0) ? in.readVLong() : 0,
in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0) ? in.readVLong() : 0,
in.getTransportVersion().supports(INTRODUCE_FAILURES_LIFECYCLE) ? in.readVLong() : 0,
in.getTransportVersion().supports(INTRODUCE_FAILURES_LIFECYCLE) ? in.readVLong() : 0,
in.getTransportVersion().supports(INTRODUCE_FAILURES_LIFECYCLE)
? DataStreamLifecycleFeatureSetUsage.RetentionStats.read(in)
: DataStreamLifecycleFeatureSetUsage.RetentionStats.NO_DATA,
in.getTransportVersion().supports(INTRODUCE_FAILURES_LIFECYCLE)
? DataStreamLifecycleFeatureSetUsage.RetentionStats.read(in)
: DataStreamLifecycleFeatureSetUsage.RetentionStats.NO_DATA,
in.getTransportVersion().supports(INTRODUCE_FAILURES_LIFECYCLE)
? in.readMap(DataStreamLifecycleFeatureSetUsage.GlobalRetentionStats::new)
: Map.of()
);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(this.totalDataStreamCount);
out.writeVLong(this.indicesBehindDataStream);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
out.writeVLong(this.failureStoreExplicitlyEnabledDataStreamCount);
if (out.getTransportVersion().supports(TransportVersions.V_8_18_0)) {
out.writeVLong(failureStoreEffectivelyEnabledDataStreamCount);
}
out.writeVLong(this.failureStoreIndicesCount);
}
if (out.getTransportVersion().supports(INTRODUCE_FAILURES_LIFECYCLE)) {
out.writeVLong(failuresLifecycleExplicitlyEnabledCount);
out.writeVLong(failuresLifecycleEffectivelyEnabledCount);
failuresLifecycleDataRetentionStats.writeTo(out);
failuresLifecycleEffectiveRetentionStats.writeTo(out);
out.writeMap(globalRetentionStats, (o, v) -> v.writeTo(o));
}
}
}
}
|
DataStreamFeatureSetUsage
|
java
|
apache__camel
|
components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/api/dto/analytics/reports/SyncAttributes.java
|
{
"start": 922,
"end": 1395
}
|
class ____ extends AbstractAttributesBase {
private String describeUrl;
private String instancesUrl;
public String getDescribeUrl() {
return describeUrl;
}
public void setDescribeUrl(String describeUrl) {
this.describeUrl = describeUrl;
}
public String getInstancesUrl() {
return instancesUrl;
}
public void setInstancesUrl(String instancesUrl) {
this.instancesUrl = instancesUrl;
}
}
|
SyncAttributes
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java
|
{
"start": 1271,
"end": 3072
}
|
class ____ extends BaseRestHandler {
private static final Set<String> SUPPORTED_CAPABILITIES = Set.of(
"human-readable-total-docs-size",
"verbose-dense-vector-mapping-stats",
"ccs-stats",
"retrievers-usage-stats",
"esql-stats",
"extended-search-usage-stats"
);
private static final Set<String> SUPPORTED_QUERY_PARAMETERS = Set.of("include_remotes", "nodeId", REST_TIMEOUT_PARAM);
@Override
public List<Route> routes() {
return List.of(new Route(GET, "/_cluster/stats"), new Route(GET, "/_cluster/stats/nodes/{nodeId}"));
}
@Override
public String getName() {
return "cluster_stats_action";
}
@Override
public Set<String> supportedQueryParameters() {
return SUPPORTED_QUERY_PARAMETERS;
}
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
boolean includeRemotes = request.paramAsBoolean("include_remotes", false);
ClusterStatsRequest clusterStatsRequest = request.isServerlessRequest()
? ClusterStatsRequest.newServerlessRequest(request.paramAsStringArray("nodeId", null))
: new ClusterStatsRequest(includeRemotes, request.paramAsStringArray("nodeId", null));
clusterStatsRequest.setTimeout(getTimeout(request));
return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin()
.cluster()
.clusterStats(clusterStatsRequest, new NodesResponseRestListener<>(channel));
}
@Override
public boolean canTripCircuitBreaker() {
return false;
}
@Override
public Set<String> supportedCapabilities() {
return SUPPORTED_CAPABILITIES;
}
}
|
RestClusterStatsAction
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AzureBlobBlockManager.java
|
{
"start": 1412,
"end": 8738
}
|
class ____ extends AzureBlockManager {
private static final Logger LOG = LoggerFactory.getLogger(
AbfsOutputStream.class);
/** Cached list of committed block IDs */
private final StringBuilder committedBlockEntries = new StringBuilder();
/** The list to store blockId, position, and status. */
private final LinkedList<BlockEntry> blockEntryList = new LinkedList<>();
private int blockIdLength = 0;
/**
* Constructs an AzureBlobBlockManager.
*
* @param abfsOutputStream the output stream
* @param blockFactory the block factory
* @param bufferSize the buffer size
* @throws AzureBlobFileSystemException if an error occurs
*/
public AzureBlobBlockManager(AbfsOutputStream abfsOutputStream,
DataBlocks.BlockFactory blockFactory,
int bufferSize)
throws AzureBlobFileSystemException {
super(abfsOutputStream, blockFactory, bufferSize);
if (abfsOutputStream.getPosition() > 0 && !abfsOutputStream.isAppendBlob()) {
List<String> committedBlocks = getBlockList(abfsOutputStream.getTracingContext());
if (!committedBlocks.isEmpty()) {
committedBlockEntries.append(String.join(COMMA, committedBlocks));
}
}
LOG.debug("Created a new Blob Block Manager for AbfsOutputStream instance {} for path {}",
abfsOutputStream.getStreamID(), abfsOutputStream.getPath());
}
/**
* Retrieves the length of the block ID.
*
* @return the length of the block ID in bytes.
*/
public int getBlockIdLength() {
return blockIdLength;
}
/**
* Creates a new block.
*
* @param position the position
* @return the created block
* @throws IOException if an I/O error occurs
*/
@Override
protected synchronized AbfsBlock createBlockInternal(long position)
throws IOException {
if (getActiveBlock() == null) {
setBlockCount(getBlockCount() + 1);
AbfsBlock activeBlock = new AbfsBlobBlock(getAbfsOutputStream(), position, getBlockIdLength(), getBlockCount());
activeBlock.setBlockEntry(addNewEntry(activeBlock.getBlockId(), activeBlock.getOffset()));
if (getAbfsOutputStream().isChecksumValidationEnabled()) {
getAbfsOutputStream().getMessageDigest().reset();
}
setActiveBlock(activeBlock);
}
return getActiveBlock();
}
/**
* Returns block id's which are committed for the blob.
*
* @param tracingContext Tracing context object.
* @return list of committed block id's.
* @throws AzureBlobFileSystemException if an error occurs
*/
private List<String> getBlockList(TracingContext tracingContext)
throws AzureBlobFileSystemException {
List<String> committedBlockIdList = new ArrayList<>();
AbfsBlobClient blobClient = getAbfsOutputStream().getClientHandler().getBlobClient();
final AbfsRestOperation op = blobClient
.getBlockList(getAbfsOutputStream().getPath(), tracingContext);
if (op != null && op.getResult() != null) {
committedBlockIdList = op.getResult().getBlockIdList();
if (!committedBlockIdList.isEmpty()) {
blockIdLength = Base64.decodeBase64(committedBlockIdList.get(0)).length;
}
}
return committedBlockIdList;
}
/**
* Adds a new block entry to the block entry list.
* The block entry is added only if the position of the new block
* is greater than the position of the last block in the list.
*
* @param blockId The ID of the new block to be added.
* @param position The position of the new block in the stream.
* @return The newly added {@link BlockEntry}.
* @throws IOException If the position of the new block is not greater than the last block in the list.
*/
private synchronized BlockEntry addNewEntry(String blockId, long position) throws IOException {
if (!blockEntryList.isEmpty()) {
BlockEntry lastEntry = blockEntryList.getLast();
if (position <= lastEntry.getPosition()) {
throw new IOException("New block position " + position + " must be greater than the last block position "
+ lastEntry.getPosition() + " for path " + getAbfsOutputStream().getPath());
}
}
BlockEntry blockEntry = new BlockEntry(blockId, position, AbfsBlockStatus.NEW);
blockEntryList.addLast(blockEntry);
LOG.debug("Added block {} at position {} with status NEW.", blockId, position);
return blockEntry;
}
/**
* Updates the status of an existing block entry to SUCCESS.
* This method is used to mark a block as successfully processed.
*
* @param block The {@link AbfsBlock} whose status needs to be updated to SUCCESS.
*/
protected synchronized void updateEntry(AbfsBlock block) {
BlockEntry blockEntry = block.getBlockEntry();
blockEntry.setStatus(AbfsBlockStatus.SUCCESS);
LOG.debug("Added block {} at position {} with status SUCCESS.", block.getBlockId(), blockEntry.getPosition());
}
/**
* Prepares the list of blocks to commit.
*
* @return whether we have some data to commit or not.
* @throws IOException if an I/O error occurs
*/
protected synchronized boolean hasBlocksToCommit() throws IOException {
// Adds all the committed blocks if available to the list of blocks to be added in putBlockList.
if (blockEntryList.isEmpty()) {
return false; // No entries to commit
}
while (!blockEntryList.isEmpty()) {
BlockEntry current = blockEntryList.poll();
if (current.getStatus() != AbfsBlockStatus.SUCCESS) {
LOG.debug(
"Block {} with position {} has status {}, flush cannot proceed.",
current.getBlockId(), current.getPosition(), current.getStatus());
throw new IOException("Flush failed. Block " + current.getBlockId()
+ " with position " + current.getPosition() + " has status "
+ current.getStatus() + "for path " + getAbfsOutputStream().getPath());
}
if (!blockEntryList.isEmpty()) {
BlockEntry next = blockEntryList.getFirst();
if (current.getPosition() >= next.getPosition()) {
String errorMessage =
"Position check failed. Current block position is greater than or equal to the next block's position.\n"
+ "Current Block Entry:\n"
+ "Block ID: " + current.getBlockId()
+ ", Position: " + current.getPosition()
+ ", Status: " + current.getStatus()
+ ", Path: " + getAbfsOutputStream().getPath()
+ ", StreamID: " + getAbfsOutputStream().getStreamID()
+ ", Next block position: " + next.getPosition()
+ "\n";
throw new IOException(errorMessage);
}
}
// Append the current block's ID to the committedBlockBuilder
if (committedBlockEntries.length() > 0) {
committedBlockEntries.append(COMMA);
}
committedBlockEntries.append(current.getBlockId());
LOG.debug("Block {} added to committed entries.", current.getBlockId());
}
return true;
}
/**
* Returns the block ID list.
*
* @return the block ID list
*/
protected String getBlockIdToCommit() {
return committedBlockEntries.toString();
}
@Override
public void close(){
super.close();
committedBlockEntries.setLength(0);
}
}
|
AzureBlobBlockManager
|
java
|
quarkusio__quarkus
|
independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/bcextensions/SyntheticComponentBuilderBase.java
|
{
"start": 374,
"end": 4365
}
|
class ____<THIS extends SyntheticComponentBuilderBase<THIS>> {
Map<String, Object> params = new HashMap<>();
abstract THIS self();
public THIS withParam(String key, boolean value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, boolean[] value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, int value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, int[] value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, long value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, long[] value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, double value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, double[] value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, String value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, String[] value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, Enum<?> value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, Enum<?>[] value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, Class<?> value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, ClassInfo value) {
params.put(key, ((ClassInfoImpl) value).jandexDeclaration);
return self();
}
public THIS withParam(String key, Class<?>[] value) {
params.put(key, value);
return self();
}
public THIS withParam(String key, ClassInfo[] value) {
org.jboss.jandex.ClassInfo[] jandexValues = new org.jboss.jandex.ClassInfo[value.length];
for (int i = 0; i < value.length; i++) {
jandexValues[i] = ((ClassInfoImpl) value[i]).jandexDeclaration;
}
params.put(key, jandexValues);
return self();
}
public THIS withParam(String key, AnnotationInfo value) {
params.put(key, ((AnnotationInfoImpl) value).jandexAnnotation);
return self();
}
public THIS withParam(String key, Annotation value) {
params.put(key, Annotations.jandexAnnotation(value));
return self();
}
public THIS withParam(String key, AnnotationInfo[] value) {
org.jboss.jandex.AnnotationInstance[] jandexValues = new org.jboss.jandex.AnnotationInstance[value.length];
for (int i = 0; i < value.length; i++) {
jandexValues[i] = ((AnnotationInfoImpl) value[i]).jandexAnnotation;
}
params.put(key, jandexValues);
return self();
}
public THIS withParam(String key, Annotation[] value) {
org.jboss.jandex.AnnotationInstance[] jandexValues = new org.jboss.jandex.AnnotationInstance[value.length];
for (int i = 0; i < value.length; i++) {
jandexValues[i] = Annotations.jandexAnnotation(value[i]);
}
params.put(key, jandexValues);
return self();
}
public THIS withParam(String key, InvokerInfo value) {
io.quarkus.arc.processor.InvokerInfo arcValue = ((InvokerInfoImpl) value).arcInvokerInfo;
params.put(key, arcValue);
return self();
}
public THIS withParam(String key, InvokerInfo[] value) {
io.quarkus.arc.processor.InvokerInfo[] arcValues = new io.quarkus.arc.processor.InvokerInfo[value.length];
for (int i = 0; i < value.length; i++) {
arcValues[i] = ((InvokerInfoImpl) value[i]).arcInvokerInfo;
}
params.put(key, arcValues);
return self();
}
}
|
SyntheticComponentBuilderBase
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/support/ConnectionPoolSupport.java
|
{
"start": 2848,
"end": 11466
}
|
class ____ {
private ConnectionPoolSupport() {
}
/**
* Creates a new {@link GenericObjectPool} using the {@link Supplier}. Allocated instances are wrapped and must not be
* returned with {@link ObjectPool#returnObject(Object)}. By default, connections are validated by checking their
* {@link StatefulConnection#isOpen()} method.
*
* @param connectionSupplier must not be {@code null}.
* @param config must not be {@code null}.
* @param <T> connection type.
* @return the connection pool.
*/
public static <T extends StatefulConnection<?, ?>> GenericObjectPool<T> createGenericObjectPool(
Supplier<T> connectionSupplier, GenericObjectPoolConfig<T> config) {
return createGenericObjectPool(connectionSupplier, config, true, (c) -> c.isOpen());
}
/**
* Creates a new {@link GenericObjectPool} using the {@link Supplier}. Allocated instances are wrapped and must not be
* returned with {@link ObjectPool#returnObject(Object)}.
*
* @param connectionSupplier must not be {@code null}.
* @param config must not be {@code null}.
* @param validationPredicate a {@link Predicate} to help validate connections
* @param <T> connection type.
* @return the connection pool.
*/
public static <T extends StatefulConnection<?, ?>> GenericObjectPool<T> createGenericObjectPool(
Supplier<T> connectionSupplier, GenericObjectPoolConfig<T> config, Predicate<T> validationPredicate) {
return createGenericObjectPool(connectionSupplier, config, true, validationPredicate);
}
/**
* Creates a new {@link GenericObjectPool} using the {@link Supplier}. By default, connections are validated by checking
* their {@link StatefulConnection#isOpen()} method.
*
* @param connectionSupplier must not be {@code null}.
* @param config must not be {@code null}.
* @param wrapConnections {@code false} to return direct connections that need to be returned to the pool using
* {@link ObjectPool#returnObject(Object)}. {@code true} to return wrapped connections that are returned to the pool
* when invoking {@link StatefulConnection#close()}.
* @param <T> connection type.
* @return the connection pool.
*/
@SuppressWarnings("unchecked")
public static <T extends StatefulConnection<?, ?>> GenericObjectPool<T> createGenericObjectPool(
Supplier<T> connectionSupplier, GenericObjectPoolConfig<T> config, boolean wrapConnections) {
return createGenericObjectPool(connectionSupplier, config, wrapConnections, (c) -> c.isOpen());
}
/**
* Creates a new {@link GenericObjectPool} using the {@link Supplier}.
*
* @param connectionSupplier must not be {@code null}.
* @param config must not be {@code null}.
* @param wrapConnections {@code false} to return direct connections that need to be returned to the pool using
* {@link ObjectPool#returnObject(Object)}. {@code true} to return wrapped connections that are returned to the pool
* when invoking {@link StatefulConnection#close()}.
* @param validationPredicate a {@link Predicate} to help validate connections
* @param <T> connection type.
* @return the connection pool.
*/
@SuppressWarnings("unchecked")
public static <T extends StatefulConnection<?, ?>> GenericObjectPool<T> createGenericObjectPool(
Supplier<T> connectionSupplier, GenericObjectPoolConfig<T> config, boolean wrapConnections,
Predicate<T> validationPredicate) {
LettuceAssert.notNull(connectionSupplier, "Connection supplier must not be null");
LettuceAssert.notNull(config, "GenericObjectPoolConfig must not be null");
LettuceAssert.notNull(validationPredicate, "Connection validator must not be null");
AtomicReference<Origin<T>> poolRef = new AtomicReference<>();
GenericObjectPool<T> pool = new GenericObjectPool<T>(
new RedisPooledObjectFactory<>(connectionSupplier, validationPredicate), config) {
@Override
public T borrowObject() throws Exception {
return wrapConnections ? ConnectionWrapping.wrapConnection(super.borrowObject(), poolRef.get())
: super.borrowObject();
}
@Override
public void returnObject(T obj) {
if (wrapConnections && obj instanceof HasTargetConnection) {
super.returnObject((T) ((HasTargetConnection) obj).getTargetConnection());
return;
}
super.returnObject(obj);
}
};
poolRef.set(new ObjectPoolWrapper<>(pool));
return pool;
}
/**
* Creates a new {@link SoftReferenceObjectPool} using the {@link Supplier}. Allocated instances are wrapped and must not be
* returned with {@link ObjectPool#returnObject(Object)}.
*
* @param connectionSupplier must not be {@code null}.
* @param <T> connection type.
* @return the connection pool.
*/
public static <T extends StatefulConnection<?, ?>> SoftReferenceObjectPool<T> createSoftReferenceObjectPool(
Supplier<T> connectionSupplier) {
return createSoftReferenceObjectPool(connectionSupplier, true);
}
/**
* Creates a new {@link SoftReferenceObjectPool} using the {@link Supplier}.
*
* @param connectionSupplier must not be {@code null}.
* @param wrapConnections {@code false} to return direct connections that need to be returned to the pool using
* {@link ObjectPool#returnObject(Object)}. {@code true} to return wrapped connections that are returned to the pool
* when invoking {@link StatefulConnection#close()}.
* @param <T> connection type.
* @return the connection pool.
*/
@SuppressWarnings("unchecked")
public static <T extends StatefulConnection<?, ?>> SoftReferenceObjectPool<T> createSoftReferenceObjectPool(
Supplier<T> connectionSupplier, boolean wrapConnections) {
return createSoftReferenceObjectPool(connectionSupplier, wrapConnections, (c) -> c.isOpen());
}
/**
* Creates a new {@link SoftReferenceObjectPool} using the {@link Supplier}.
*
* @param connectionSupplier must not be {@code null}.
* @param wrapConnections {@code false} to return direct connections that need to be returned to the pool using
* {@link ObjectPool#returnObject(Object)}. {@code true} to return wrapped connections that are returned to the pool
* when invoking {@link StatefulConnection#close()}.
* @param validationPredicate a {@link Predicate} to help validate connections
* @param <T> connection type.
* @return the connection pool.
*/
@SuppressWarnings("unchecked")
public static <T extends StatefulConnection<?, ?>> SoftReferenceObjectPool<T> createSoftReferenceObjectPool(
Supplier<T> connectionSupplier, boolean wrapConnections, Predicate<T> validationPredicate) {
LettuceAssert.notNull(connectionSupplier, "Connection supplier must not be null");
AtomicReference<Origin<T>> poolRef = new AtomicReference<>();
SoftReferenceObjectPool<T> pool = new SoftReferenceObjectPool<T>(
new RedisPooledObjectFactory<>(connectionSupplier, validationPredicate)) {
private final Lock lock = new ReentrantLock();
@Override
public T borrowObject() throws Exception {
lock.lock();
try {
return wrapConnections ? ConnectionWrapping.wrapConnection(super.borrowObject(), poolRef.get())
: super.borrowObject();
} finally {
lock.unlock();
}
}
@Override
public void returnObject(T obj) throws Exception {
lock.lock();
try {
if (wrapConnections && obj instanceof HasTargetConnection) {
super.returnObject((T) ((HasTargetConnection) obj).getTargetConnection());
return;
}
super.returnObject(obj);
} finally {
lock.unlock();
}
}
};
poolRef.set(new ObjectPoolWrapper<>(pool));
return pool;
}
/**
* @author Mark Paluch
* @since 4.3
*/
private static
|
ConnectionPoolSupport
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/monitor/fs/FsService.java
|
{
"start": 1005,
"end": 3128
}
|
class ____ {
private static final Logger logger = LogManager.getLogger(FsService.class);
private final Supplier<FsInfo> fsInfoSupplier;
public static final Setting<TimeValue> REFRESH_INTERVAL_SETTING = Setting.timeSetting(
"monitor.fs.refresh_interval",
TimeValue.timeValueSeconds(1),
TimeValue.timeValueSeconds(1),
Property.NodeScope
);
// permits tests to bypass the refresh interval on the cache; deliberately unregistered since it is only for use in tests
public static final Setting<Boolean> ALWAYS_REFRESH_SETTING = Setting.boolSetting(
"monitor.fs.always_refresh",
false,
Property.NodeScope
);
public FsService(final Settings settings, final NodeEnvironment nodeEnvironment) {
final FsProbe probe = new FsProbe(nodeEnvironment);
final FsInfo initialValue = stats(probe, null);
if (ALWAYS_REFRESH_SETTING.get(settings)) {
assert REFRESH_INTERVAL_SETTING.exists(settings) == false;
logger.debug("bypassing refresh_interval");
fsInfoSupplier = () -> stats(probe, initialValue);
} else {
final TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
logger.debug("using refresh_interval [{}]", refreshInterval);
final FsInfoCache fsInfoCache = new FsInfoCache(refreshInterval, initialValue, probe);
fsInfoSupplier = () -> {
try {
return fsInfoCache.getOrRefresh();
} catch (UncheckedIOException e) {
logger.debug("unexpected exception reading filesystem info", e);
return null;
}
};
}
}
public FsInfo stats() {
return fsInfoSupplier.get();
}
private static FsInfo stats(FsProbe probe, FsInfo initialValue) {
try {
return probe.stats(initialValue);
} catch (IOException e) {
logger.debug("unexpected exception reading filesystem info", e);
return null;
}
}
private static
|
FsService
|
java
|
elastic__elasticsearch
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Platform.java
|
{
"start": 531,
"end": 1131
}
|
enum ____ {
LINUX,
MACOS,
WINDOWS;
private static final Platform current = findCurrent();
private static Platform findCurrent() {
String os = System.getProperty("os.name");
if (os.startsWith("Linux")) {
return LINUX;
} else if (os.startsWith("Mac OS")) {
return MACOS;
} else if (os.startsWith("Windows")) {
return WINDOWS;
} else {
throw new AssertionError("Unsupported platform [" + os + "]");
}
}
public boolean isCurrent() {
return this == current;
}
}
|
Platform
|
java
|
apache__flink
|
flink-end-to-end-tests/flink-datastream-allround-test/src/main/java/org/apache/flink/streaming/tests/DataStreamAllroundTestJobFactory.java
|
{
"start": 31982,
"end": 32517
}
|
class ____
implements MapFunction<Event, Event>, ResultTypeQueryable<Event> {
private final SingleThreadAccessCheckingTypeInfo<Event> typeInformation =
new SingleThreadAccessCheckingTypeInfo<>(Event.class);
@Override
public Event map(Event value) {
return value;
}
@Override
public TypeInformation<Event> getProducedType() {
return typeInformation;
}
}
private static
|
EventIdentityFunctionWithCustomEventTypeInformation
|
java
|
apache__flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/ApplicationDispatcherGatewayServiceFactory.java
|
{
"start": 2702,
"end": 5604
}
|
class ____
implements AbstractDispatcherLeaderProcess.DispatcherGatewayServiceFactory {
private final Configuration configuration;
private final DispatcherFactory dispatcherFactory;
private final PackagedProgram application;
private final RpcService rpcService;
private final PartialDispatcherServices partialDispatcherServices;
public ApplicationDispatcherGatewayServiceFactory(
Configuration configuration,
DispatcherFactory dispatcherFactory,
PackagedProgram application,
RpcService rpcService,
PartialDispatcherServices partialDispatcherServices) {
this.configuration = configuration;
this.dispatcherFactory = dispatcherFactory;
this.application = checkNotNull(application);
this.rpcService = rpcService;
this.partialDispatcherServices = partialDispatcherServices;
}
@Override
public AbstractDispatcherLeaderProcess.DispatcherGatewayService create(
DispatcherId fencingToken,
Collection<ExecutionPlan> recoveredJobs,
Collection<JobResult> recoveredDirtyJobResults,
ExecutionPlanWriter executionPlanWriter,
JobResultStore jobResultStore) {
final List<JobID> recoveredJobIds = getRecoveredJobIds(recoveredJobs);
final Dispatcher dispatcher;
try {
dispatcher =
dispatcherFactory.createDispatcher(
rpcService,
fencingToken,
recoveredJobs,
recoveredDirtyJobResults,
(dispatcherGateway, scheduledExecutor, errorHandler) ->
new ApplicationDispatcherBootstrap(
application,
recoveredJobIds,
configuration,
dispatcherGateway,
scheduledExecutor,
errorHandler),
PartialDispatcherServicesWithJobPersistenceComponents.from(
partialDispatcherServices,
executionPlanWriter,
jobResultStore));
} catch (Exception e) {
throw new FlinkRuntimeException("Could not create the Dispatcher rpc endpoint.", e);
}
dispatcher.start();
return DefaultDispatcherGatewayService.from(dispatcher);
}
private List<JobID> getRecoveredJobIds(final Collection<ExecutionPlan> recoveredJobs) {
return recoveredJobs.stream().map(ExecutionPlan::getJobID).collect(Collectors.toList());
}
}
|
ApplicationDispatcherGatewayServiceFactory
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/process/RuntimeChangelogMode.java
|
{
"start": 1142,
"end": 2132
}
|
class ____ implements Serializable {
private final byte[] changes;
private final boolean keyOnlyDeletes;
public RuntimeChangelogMode(byte[] changes, boolean keyOnlyDeletes) {
this.changes = changes;
this.keyOnlyDeletes = keyOnlyDeletes;
}
public static RuntimeChangelogMode serialize(ChangelogMode mode) {
final RowKind[] kinds = mode.getContainedKinds().toArray(RowKind[]::new);
final byte[] changes = new byte[kinds.length];
IntStream.range(0, kinds.length).forEach(i -> changes[i] = kinds[i].toByteValue());
return new RuntimeChangelogMode(changes, mode.keyOnlyDeletes());
}
public ChangelogMode deserialize() {
final ChangelogMode.Builder builder = ChangelogMode.newBuilder();
for (byte change : changes) {
builder.addContainedKind(RowKind.fromByteValue(change));
}
builder.keyOnlyDeletes(keyOnlyDeletes);
return builder.build();
}
}
|
RuntimeChangelogMode
|
java
|
quarkusio__quarkus
|
extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/config/runtime/AttributeConfig.java
|
{
"start": 233,
"end": 706
}
|
interface ____ {
/**
* The maximum length of attribute values. Applies to spans and logs.
* <p>
* By default, there is no limit.
*/
@WithName("value.length.limit")
Optional<String> valueLengthLimit();
/**
* The maximum number of attributes. Applies to spans, span events, span links, and logs.
* <p>
* Default is `128`.
*/
@WithName("count.limit")
@WithDefault("128")
Integer countLimit();
}
|
AttributeConfig
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/UnaryScalarFunction.java
|
{
"start": 802,
"end": 1828
}
|
class ____ extends ScalarFunction {
private final Expression field;
protected UnaryScalarFunction(Source source, Expression field) {
super(source, singletonList(field));
this.field = field;
}
protected UnaryScalarFunction(StreamInput in) throws IOException {
this(Source.readFrom((StreamInput & PlanStreamInput) in), in.readNamedWriteable(Expression.class));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
source().writeTo(out);
out.writeNamedWriteable(field);
}
@Override
public final UnaryScalarFunction replaceChildren(List<Expression> newChildren) {
return replaceChild(newChildren.get(0));
}
protected abstract UnaryScalarFunction replaceChild(Expression newChild);
public Expression field() {
return field;
}
@Override
public boolean foldable() {
return field.foldable();
}
@Override
public abstract Object fold(FoldContext ctx);
}
|
UnaryScalarFunction
|
java
|
lettuce-io__lettuce-core
|
src/test/java/io/lettuce/core/protocol/RedisStateMachineResp3UnitTests.java
|
{
"start": 1849,
"end": 8696
}
|
class ____ {
private RedisCodec<String, String> codec = StringCodec.UTF8;
private Charset charset = StandardCharsets.UTF_8;
private CommandOutput<String, String, String> output;
private RedisStateMachine rsm;
@BeforeAll
static void beforeClass() {
LoggerContext ctx = (LoggerContext) LogManager.getContext();
Configuration config = ctx.getConfiguration();
LoggerConfig loggerConfig = config.getLoggerConfig(RedisStateMachine.class.getName());
loggerConfig.setLevel(Level.ALL);
}
@AfterAll
static void afterClass() {
LoggerContext ctx = (LoggerContext) LogManager.getContext();
Configuration config = ctx.getConfiguration();
LoggerConfig loggerConfig = config.getLoggerConfig(RedisStateMachine.class.getName());
loggerConfig.setLevel(null);
}
@BeforeEach
final void createStateMachine() {
output = new StatusOutput<>(codec);
rsm = new RedisStateMachine();
rsm.setProtocolVersion(ProtocolVersion.RESP3);
}
@AfterEach
void tearDown() {
rsm.close();
}
@Test
void single() {
assertThat(rsm.decode(buffer("+OK\r\n"), output)).isTrue();
assertThat(output.get()).isEqualTo("OK");
}
@Test
void error() {
assertThat(rsm.decode(buffer("-ERR\r\n"), output)).isTrue();
assertThat(output.getError()).isEqualTo("ERR");
}
@Test
void errorWithoutLineBreak() {
assertThat(rsm.decode(buffer("-ERR"), output)).isFalse();
assertThat(rsm.decode(buffer("\r\n"), output)).isTrue();
assertThat(output.getError()).isEmpty();
}
@Test
void integer() {
CommandOutput<String, String, Long> output = new IntegerOutput<>(codec);
assertThat(rsm.decode(buffer(":1\r\n"), output)).isTrue();
assertThat((long) output.get()).isEqualTo(1);
}
@Test
void floatNumber() {
CommandOutput<String, String, Double> output = new DoubleOutput<>(codec);
assertThat(rsm.decode(buffer(",12.345\r\n"), output)).isTrue();
assertThat(output.get()).isEqualTo(12.345);
}
@Test
void bigNumber() {
CommandOutput<String, String, String> output = new StatusOutput<>(codec);
assertThat(rsm.decode(buffer("(3492890328409238509324850943850943825024385\r\n"), output)).isTrue();
assertThat(output.get()).isEqualTo("3492890328409238509324850943850943825024385");
}
@Test
void booleanValue() {
CommandOutput<String, String, Boolean> output = new BooleanOutput<>(codec);
assertThat(rsm.decode(buffer("#t\r\n"), output)).isTrue();
assertThat(output.get()).isTrue();
output = new BooleanOutput<>(codec);
assertThat(rsm.decode(buffer("#f\r\n"), output)).isTrue();
assertThat(output.get()).isFalse();
}
@Test
void hello() {
CommandOutput<String, String, Map<String, Object>> output = new GenericMapOutput<>(codec);
assertThat(rsm.decode(buffer("%7\r\n" + "$6\r\nserver\r\n$5\r\nredis\r\n" + "$7\r\nversion\r\n$11\r\n999.999.999\r\n"
+ "$5\r\nproto\r\n:3\r\n" + "$2\r\nid\r\n:184\r\n" + "$4\r\nmode\r\n$10\r\nstandalone\r\n"
+ "$4\r\nrole\r\n$6\r\nmaster\r\n" + "$7\r\nmodules\r\n*0\r\n"), output)).isTrue();
assertThat(output.get()).containsEntry("mode", "standalone");
}
@Test
void bulk() {
CommandOutput<String, String, String> output = new ValueOutput<>(codec);
assertThat(rsm.decode(buffer("$-1\r\n"), output)).isTrue();
assertThat(output.get()).isNull();
assertThat(rsm.decode(buffer("$3\r\nfoo\r\n"), output)).isTrue();
assertThat(output.get()).isEqualTo("foo");
}
@Test
void multi() {
CommandOutput<String, String, List<String>> output = new ValueListOutput<>(codec);
ByteBuf buffer = buffer("*2\r\n$-1\r\n$2\r\nok\r\n");
assertThat(rsm.decode(buffer, output)).isTrue();
assertThat(output.get()).isEqualTo(Arrays.asList(null, "ok"));
}
@Test
void multiSet() {
CommandOutput<String, String, List<String>> output = new ValueListOutput<>(codec);
ByteBuf buffer = buffer("~2\r\n$-1\r\n$2\r\nok\r\n");
assertThat(rsm.decode(buffer, output)).isTrue();
assertThat(output.get()).isEqualTo(Arrays.asList(null, "ok"));
}
@Test
void multiMap() {
CommandOutput<String, String, Map<String, Object>> output = new GenericMapOutput<>(codec);
ByteBuf buffer = buffer("%1\r\n$3\r\nfoo\r\n$2\r\nok\r\n");
assertThat(rsm.decode(buffer, output)).isTrue();
assertThat(output.get()).containsEntry("foo", "ok");
}
@Test
void multiEmptyArray1() {
CommandOutput<String, String, List<Object>> output = new NestedMultiOutput<>(codec);
ByteBuf buffer = buffer("*2\r\n$3\r\nABC\r\n*0\r\n");
assertThat(rsm.decode(buffer, output)).isTrue();
assertThat(output.get().get(0)).isEqualTo("ABC");
assertThat(output.get().get(1)).isEqualTo(Collections.emptyList());
assertThat(output.get()).hasSize(2);
}
@Test
void multiEmptyArray2() {
CommandOutput<String, String, List<Object>> output = new NestedMultiOutput<>(codec);
ByteBuf buffer = buffer("*2\r\n*0\r\n$3\r\nABC\r\n");
assertThat(rsm.decode(buffer, output)).isTrue();
assertThat(output.get().get(0)).isEqualTo(Collections.emptyList());
assertThat(output.get().get(1)).isEqualTo("ABC");
assertThat(output.get()).hasSize(2);
}
@Test
void multiEmptyArray3() {
CommandOutput<String, String, List<Object>> output = new NestedMultiOutput<>(codec);
ByteBuf buffer = buffer("*2\r\n*2\r\n$2\r\nAB\r\n$2\r\nXY\r\n*0\r\n");
assertThat(rsm.decode(buffer, output)).isTrue();
assertThat(output.get().get(0)).isEqualTo(Arrays.asList("AB", "XY"));
assertThat(output.get().get(1)).isEqualTo(Collections.emptyList());
assertThat(output.get()).hasSize(2);
}
@Test
void partialFirstLine() {
assertThat(rsm.decode(buffer("+"), output)).isFalse();
assertThat(rsm.decode(buffer("-"), output)).isFalse();
assertThat(rsm.decode(buffer(":"), output)).isFalse();
assertThat(rsm.decode(buffer("$"), output)).isFalse();
assertThat(rsm.decode(buffer("*"), output)).isFalse();
}
@Test
void invalidReplyType() {
assertThatThrownBy(() -> rsm.decode(buffer("?"), output)).isInstanceOf(RedisException.class);
}
@Test
void sillyTestsForEmmaCoverage() {
assertThat(State.Type.valueOf("SINGLE")).isEqualTo(State.Type.SINGLE);
}
ByteBuf buffer(String content) {
return Unpooled.copiedBuffer(content, charset);
}
}
|
RedisStateMachineResp3UnitTests
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ServletAnnotationControllerHandlerMethodTests.java
|
{
"start": 142983,
"end": 144812
}
|
class ____ {
@RequestMapping("/map")
public void map(@RequestHeader Map<String, String> headers, Writer writer) throws IOException {
for (Iterator<Map.Entry<String, String>> it = headers.entrySet().iterator(); it.hasNext();) {
Map.Entry<String, String> entry = it.next();
writer.write(entry.getKey() + "=" + entry.getValue());
if (it.hasNext()) {
writer.write(',');
}
}
}
@RequestMapping("/multiValueMap")
public void multiValueMap(@RequestHeader MultiValueMap<String, String> headers, Writer writer)
throws IOException {
for (Iterator<Map.Entry<String, List<String>>> it1 = headers.entrySet().iterator(); it1.hasNext();) {
Map.Entry<String, List<String>> entry = it1.next();
writer.write(entry.getKey() + "=[");
for (Iterator<String> it2 = entry.getValue().iterator(); it2.hasNext();) {
String value = it2.next();
writer.write(value);
if (it2.hasNext()) {
writer.write(',');
}
}
writer.write(']');
if (it1.hasNext()) {
writer.write(',');
}
}
}
@RequestMapping("/httpHeaders")
public void httpHeaders(@RequestHeader HttpHeaders headers, Writer writer) throws IOException {
assertThat(headers.getContentType()).as("Invalid Content-Type").isEqualTo(new MediaType("text", "html"));
for (Iterator<Map.Entry<String, List<String>>> it1 = headers.headerSet().iterator(); it1.hasNext();) {
Map.Entry<String, List<String>> entry = it1.next();
writer.write(entry.getKey() + "=[");
for (Iterator<String> it2 = entry.getValue().iterator(); it2.hasNext();) {
String value = it2.next();
writer.write(value);
if (it2.hasNext()) {
writer.write(',');
}
}
writer.write(']');
if (it1.hasNext()) {
writer.write(',');
}
}
}
}
@Controller
|
RequestHeaderMapController
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_214.java
|
{
"start": 982,
"end": 3705
}
|
class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "SELECT numbers, animals, n, a\n" +
"FROM (\n" +
" VALUES\n" +
" (ARRAY[2, 5], ARRAY['dog', 'cat', 'bird']),\n" +
" (ARRAY[7, 8, 9], ARRAY['cow', 'pig'])\n" +
") AS x (numbers, animals)\n" +
"CROSS JOIN UNNEST(numbers, animals) AS t (n, a);";
System.out.println(sql);
MySqlStatementParser parser = new MySqlStatementParser(sql, SQLParserFeature.SelectItemGenerateAlias);
List<SQLStatement> statementList = parser.parseStatementList();
assertEquals(1, statementList.size());
SQLStatement stmt = statementList.get(0);
assertEquals("SELECT numbers, animals, n, a\n" +
"FROM (\n" +
"\tVALUES (ARRAY[2, 5], ARRAY['dog', 'cat', 'bird']),\n" +
"\t(ARRAY[7, 8, 9], ARRAY['cow', 'pig'])\n" +
") AS x (numbers, animals)\n" +
"\tCROSS JOIN UNNEST(numbers, animals) AS t (n, a);", stmt.toString());
assertEquals("select numbers, animals, n, a\n" +
"from (\n" +
"\tvalues (ARRAY[2, 5], ARRAY['dog', 'cat', 'bird']),\n" +
"\t(ARRAY[7, 8, 9], ARRAY['cow', 'pig'])\n" +
") AS x (numbers, animals)\n" +
"\tcross join unnest(numbers, animals) as t (n, a);", stmt.clone().toLowerCaseString());
}
public void test_1() throws Exception {
String sql = "SELECT numbers, animals, n, a\n" +
"FROM (\n" +
" VALUES\n" +
" (ARRAY[2, 5], ARRAY['dog', 'cat', 'bird']),\n" +
" (ARRAY[7, 8, 9], ARRAY['cow', 'pig'])\n" +
") AS x (numbers, animals)\n" +
"CROSS JOIN UNNEST(numbers, animals) AS t (n, a);";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, DbType.postgresql);
assertEquals(1, statementList.size());
SQLStatement stmt = statementList.get(0);
assertEquals("SELECT numbers, animals, n, a\n" +
"FROM (VALUES (ARRAY[2, 5], ARRAY['dog', 'cat', 'bird']), (ARRAY[7, 8, 9], ARRAY['cow', 'pig'])) AS x (numbers, animals)\n" +
"\tCROSS JOIN UNNEST(numbers, animals) AS t (n, a);", stmt.toString());
assertEquals("select numbers, animals, n, a\n" +
"from (values (ARRAY[2, 5], ARRAY['dog', 'cat', 'bird']), (ARRAY[7, 8, 9], ARRAY['cow', 'pig'])) as x (numbers, animals)\n" +
"\tcross join unnest(numbers, animals) as t (n, a);", stmt.clone().toLowerCaseString());
}
}
|
MySqlSelectTest_214
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/api/AbstractCollectionAssert.java
|
{
"start": 1542,
"end": 6474
}
|
class ____<SELF extends AbstractCollectionAssert<SELF, ACTUAL, ELEMENT, ELEMENT_ASSERT>,
ACTUAL extends Collection<? extends ELEMENT>,
ELEMENT,
ELEMENT_ASSERT extends AbstractAssert<ELEMENT_ASSERT, ELEMENT>>
extends AbstractIterableAssert<SELF, ACTUAL, ELEMENT, ELEMENT_ASSERT> {
//@format:on
protected AbstractCollectionAssert(ACTUAL actual, Class<?> selfType) {
super(actual, selfType);
}
/**
* Verifies that the actual collection is unmodifiable, i.e., throws an {@link UnsupportedOperationException} with
* any attempt to modify the collection.
* <p>
* Example:
* <pre><code class='java'> // assertions will pass
* assertThat(Collections.unmodifiableCollection(new ArrayList<>())).isUnmodifiable();
* assertThat(Collections.unmodifiableList(new ArrayList<>())).isUnmodifiable();
* assertThat(Collections.unmodifiableSet(new HashSet<>())).isUnmodifiable();
*
* // assertions will fail
* assertThat(new ArrayList<>()).isUnmodifiable();
* assertThat(new HashSet<>()).isUnmodifiable();</code></pre>
*
* @return {@code this} assertion object.
* @throws AssertionError if the actual collection is modifiable.
* @see Collections#unmodifiableCollection(java.util.Collection)
* @see Collections#unmodifiableList(java.util.List)
* @see Collections#unmodifiableSet(java.util.Set)
*/
@Beta
public SELF isUnmodifiable() {
isNotNull();
assertIsUnmodifiable();
return myself;
}
@SuppressWarnings("unchecked")
private void assertIsUnmodifiable() {
switch (actual.getClass().getName()) {
case "java.util.Collections$EmptyList":
case "java.util.Collections$EmptyNavigableSet":
case "java.util.Collections$EmptySet":
case "java.util.Collections$EmptySortedSet":
case "java.util.Collections$SingletonList":
case "java.util.Collections$SingletonSet":
// immutable by contract, although not all methods throw UnsupportedOperationException
return;
}
expectUnsupportedOperationException(() -> actual.add(null), "Collection.add(null)");
expectUnsupportedOperationException(() -> actual.addAll(emptyCollection()), "Collection.addAll(emptyCollection())");
expectUnsupportedOperationException(actual::clear, "Collection.clear()");
expectUnsupportedOperationException(() -> actual.iterator().remove(), "Collection.iterator().remove()");
expectUnsupportedOperationException(() -> actual.remove(null), "Collection.remove(null)");
expectUnsupportedOperationException(() -> actual.removeAll(emptyCollection()), "Collection.removeAll(emptyCollection())");
expectUnsupportedOperationException(() -> actual.removeIf(element -> true), "Collection.removeIf(element -> true)");
expectUnsupportedOperationException(() -> actual.retainAll(emptyCollection()), "Collection.retainAll(emptyCollection())");
if (actual instanceof List) {
List<ELEMENT> list = (List<ELEMENT>) actual;
expectUnsupportedOperationException(() -> list.add(0, null), "List.add(0, null)");
expectUnsupportedOperationException(() -> list.addAll(0, emptyCollection()), "List.addAll(0, emptyCollection())");
expectUnsupportedOperationException(() -> list.listIterator().add(null), "List.listIterator().add(null)");
expectUnsupportedOperationException(() -> list.listIterator().remove(), "List.listIterator().remove()");
expectUnsupportedOperationException(() -> list.listIterator().set(null), "List.listIterator().set(null)");
expectUnsupportedOperationException(() -> list.remove(0), "List.remove(0)");
expectUnsupportedOperationException(() -> list.replaceAll(identity()), "List.replaceAll(identity())");
expectUnsupportedOperationException(() -> list.set(0, null), "List.set(0, null)");
expectUnsupportedOperationException(() -> list.sort((o1, o2) -> 0), "List.sort((o1, o2) -> 0)");
}
if (actual instanceof NavigableSet) {
NavigableSet<ELEMENT> set = (NavigableSet<ELEMENT>) actual;
expectUnsupportedOperationException(() -> set.descendingIterator().remove(), "NavigableSet.descendingIterator().remove()");
expectUnsupportedOperationException(set::pollFirst, "NavigableSet.pollFirst()");
expectUnsupportedOperationException(set::pollLast, "NavigableSet.pollLast()");
}
}
private void expectUnsupportedOperationException(Runnable runnable, String method) {
try {
runnable.run();
throwAssertionError(shouldBeUnmodifiable(method));
} catch (UnsupportedOperationException e) {
// happy path
} catch (RuntimeException e) {
throwAssertionError(shouldBeUnmodifiable(method, e));
}
}
private <E extends ELEMENT> Collection<E> emptyCollection() {
return Collections.emptyList();
}
}
|
AbstractCollectionAssert
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/validation/DenseEmbeddingModelValidator.java
|
{
"start": 860,
"end": 3593
}
|
class ____ implements ModelValidator {
private final ServiceIntegrationValidator serviceIntegrationValidator;
public DenseEmbeddingModelValidator(ServiceIntegrationValidator serviceIntegrationValidator) {
this.serviceIntegrationValidator = serviceIntegrationValidator;
}
@Override
public void validate(InferenceService service, Model model, TimeValue timeout, ActionListener<Model> listener) {
serviceIntegrationValidator.validate(service, model, timeout, listener.delegateFailureAndWrap((delegate, r) -> {
delegate.onResponse(postValidate(service, model, r));
}));
}
private Model postValidate(InferenceService service, Model model, InferenceServiceResults results) {
if (results instanceof DenseEmbeddingResults<?> embeddingResults) {
var serviceSettings = model.getServiceSettings();
var dimensions = serviceSettings.dimensions();
int embeddingSize = getEmbeddingSize(embeddingResults);
if (Boolean.TRUE.equals(serviceSettings.dimensionsSetByUser())
&& dimensions != null
&& (dimensions.equals(embeddingSize) == false)) {
throw new ElasticsearchStatusException(
Strings.format(
"The retrieved embeddings size [%s] does not match the size specified in the settings [%s]. "
+ "Please recreate the [%s] configuration with the correct dimensions",
embeddingResults.getFirstEmbeddingSize(),
serviceSettings.dimensions(),
model.getInferenceEntityId()
),
RestStatus.BAD_REQUEST
);
}
return service.updateModelWithEmbeddingDetails(model, embeddingSize);
} else {
throw new ElasticsearchStatusException(
"Validation call did not return expected results type. "
+ "Expected a result of type ["
+ DenseEmbeddingResults.class.getSimpleName()
+ "] got ["
+ (results == null ? "null" : results.getClass().getSimpleName())
+ "]",
RestStatus.BAD_REQUEST
);
}
}
private int getEmbeddingSize(DenseEmbeddingResults<?> embeddingResults) {
int embeddingSize;
try {
embeddingSize = embeddingResults.getFirstEmbeddingSize();
} catch (Exception e) {
throw new ElasticsearchStatusException("Could not determine embedding size", RestStatus.BAD_REQUEST, e);
}
return embeddingSize;
}
}
|
DenseEmbeddingModelValidator
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ArrayToStringTest.java
|
{
"start": 8511,
"end": 9238
}
|
class ____ {
private static final int[] a = {1, 2, 3};
public void stringVariableAddsArrayAndAssigns() {
String b = "a string";
// BUG: Diagnostic contains: += Arrays.toString(a)
b += a;
}
}\
""")
.doTest();
}
@Test
public void negativeCompoundAssignment() {
compilationHelper
.addSourceLines(
"ArrayToStringCompoundAssignmentNegativeCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
/**
* @author adgar@google.com (Mike Edgar)
*/
public
|
ArrayToStringCompoundAssignmentPositiveCases
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/aop/framework/CglibProxyTests.java
|
{
"start": 14204,
"end": 14274
}
|
enum ____ implements MyInterface {
C, D
}
public static
|
MyOtherEnum
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/builder/EndpointConsumerBuilder.java
|
{
"start": 1095,
"end": 2022
}
|
interface ____ extends EndpointConsumerResolver {
/**
* Builds the raw url of this endpoint. This API is only intended for Camel internally.
*/
String getRawUri();
/**
* Adds an option to this endpoint. This API is only intended for Camel internally.
*/
void doSetProperty(String name, Object value);
/**
* Adds a multi-value option to this endpoint. This API is only intended for Camel internally.
*/
void doSetMultiValueProperty(String name, String key, Object value);
/**
* Adds multi-value options to this endpoint. This API is only intended for Camel internally.
*/
void doSetMultiValueProperties(String name, String prefix, Map<String, Object> values);
/**
* Builds a dynamic expression of this endpoint url. This API is only intended for Camel internally.
*/
Expression expr(CamelContext camelContext);
}
|
EndpointConsumerBuilder
|
java
|
google__dagger
|
javatests/artifacts/dagger/build-tests/src/test/java/buildtests/TransitiveSubcomponentModulesTest.java
|
{
"start": 2209,
"end": 3932
}
|
class ____ {",
" public abstract int getInt();",
"}"));
BuildResult result;
switch (transitiveDependencyType) {
case "implementation":
result = runner.buildAndFail();
assertThat(result.getOutput()).contains("Task :app:compileJava FAILED");
String expectedErrorMsg =
"error: ComponentProcessingStep was unable to process 'app.MyComponent' because"
+ " 'library2.TransitiveModule' could not be resolved."
+ "\n "
+ "\n Dependency trace:"
+ "\n => element (CLASS): library1.MySubcomponent"
+ "\n => annotation type: dagger.Subcomponent"
+ "\n => annotation: @dagger.Subcomponent(modules={library2.TransitiveModule})"
+ "\n => annotation value (TYPE_ARRAY): modules={library2.TransitiveModule}"
+ "\n => annotation value (TYPE): modules=library2.TransitiveModule";
assertThat(result.getOutput()).contains(expectedErrorMsg);
break;
case "api":
result = runner.build();
assertThat(result.task(":app:assemble").getOutcome()).isEqualTo(SUCCESS);
break;
}
}
@Test
public void testSubcomponentAnnotationWithModuleIncludesTransitiveModuleDependencies()
throws IOException {
GradleRunner runner =
setupRunner(
GradleFile.create(
"MySubcomponent.java",
"package library1;",
"",
"import dagger.Subcomponent;",
"",
"@Subcomponent(modules = IncludesTransitiveModule.class)",
"public abstract
|
MySubcomponent
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
|
{
"start": 53878,
"end": 57415
}
|
class ____ implements
MultipleArcTransition<ContainerImpl,ContainerEvent,ContainerState> {
@SuppressWarnings("unchecked")
@Override
public ContainerState transition(ContainerImpl container,
ContainerEvent event) {
ContainerResourceLocalizedEvent rsrcEvent = (ContainerResourceLocalizedEvent) event;
LocalResourceRequest resourceRequest = rsrcEvent.getResource();
Path location = rsrcEvent.getLocation();
Set<String> syms =
container.resourceSet.resourceLocalized(resourceRequest, location);
if (null == syms) {
LOG.info("Localized resource " + resourceRequest +
" for container " + container.containerId);
return ContainerState.LOCALIZING;
}
final long localizedSize = rsrcEvent.getSize();
if (localizedSize > 0) {
container.localizationCounts
[LocalizationCounter.BYTES_MISSED.ordinal()] += localizedSize;
container.localizationCounts
[LocalizationCounter.FILES_MISSED.ordinal()]++;
} else if (localizedSize < 0) {
// cached: recorded negative, restore the sign
container.localizationCounts
[LocalizationCounter.BYTES_CACHED.ordinal()] -= localizedSize;
container.localizationCounts
[LocalizationCounter.FILES_CACHED.ordinal()]++;
}
container.metrics.localizationCacheHitMiss(localizedSize);
// check to see if this resource should be uploaded to the shared cache
// as well
if (shouldBeUploadedToSharedCache(container, resourceRequest)) {
container.resourceSet.getResourcesToBeUploaded()
.put(resourceRequest, location);
}
if (!container.resourceSet.getPendingResources().isEmpty()) {
return ContainerState.LOCALIZING;
}
// duration = end - start;
// record in RequestResourcesTransition: -start
// add in LocalizedTransition: +end
//
container.localizationCounts[LocalizationCounter.MILLIS.ordinal()]
+= Time.monotonicNow();
container.metrics.localizationComplete(
container.localizationCounts[LocalizationCounter.MILLIS.ordinal()]);
container.dispatcher.getEventHandler().handle(
new ContainerLocalizationEvent(LocalizationEventType.
CONTAINER_RESOURCES_LOCALIZED, container));
container.sendScheduleEvent();
container.metrics.endInitingContainer();
// If this is a recovered container that has already launched, skip
// uploading resources to the shared cache. We do this to avoid uploading
// the same resources multiple times. The tradeoff is that in the case of
// a recovered container, there is a chance that resources don't get
// uploaded into the shared cache. This is OK because resources are not
// acknowledged by the SCM until they have been uploaded by the node
// manager.
if (container.recoveredStatus != RecoveredContainerStatus.LAUNCHED
&& container.recoveredStatus != RecoveredContainerStatus.COMPLETED) {
// kick off uploads to the shared cache
container.dispatcher.getEventHandler().handle(
new SharedCacheUploadEvent(
container.resourceSet.getResourcesToBeUploaded(), container
.getLaunchContext(), container.getUser(),
SharedCacheUploadEventType.UPLOAD));
}
return ContainerState.SCHEDULED;
}
}
/**
* Transition to start the Re-Initialization process.
*/
static
|
LocalizedTransition
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java
|
{
"start": 4103,
"end": 4732
}
|
class ____
* @param dbUrl JDBC DB access URL.
* @param userName DB access username
* @param passwd DB access passwd
*/
public static void configureDB(Configuration conf, String driverClass,
String dbUrl, String userName, String passwd) {
conf.set(DRIVER_CLASS_PROPERTY, driverClass);
conf.set(URL_PROPERTY, dbUrl);
if (userName != null) {
conf.set(USERNAME_PROPERTY, userName);
}
if (passwd != null) {
conf.set(PASSWORD_PROPERTY, passwd);
}
}
/**
* Sets the DB access related fields in the JobConf.
* @param job the job
* @param driverClass JDBC Driver
|
name
|
java
|
quarkusio__quarkus
|
extensions/load-shedding/runtime/src/main/java/io/quarkus/load/shedding/runtime/ManagementRequestPrioritizer.java
|
{
"start": 414,
"end": 1658
}
|
class ____ implements RequestPrioritizer<RoutingContext> {
private final String managementPath;
@Inject
public ManagementRequestPrioritizer(
VertxHttpBuildTimeConfig buildTimeConfig,
ManagementInterfaceBuildTimeConfig managementBuildTimeConfig) {
if (managementBuildTimeConfig.enabled()) {
managementPath = null;
return;
}
if (buildTimeConfig.nonApplicationRootPath().startsWith("/")) {
if (buildTimeConfig.nonApplicationRootPath().equals(buildTimeConfig.rootPath())) {
managementPath = null;
return;
}
managementPath = buildTimeConfig.nonApplicationRootPath();
return;
}
managementPath = buildTimeConfig.rootPath() + buildTimeConfig.nonApplicationRootPath();
}
@Override
public boolean appliesTo(Object request) {
if (managementPath != null && request instanceof RoutingContext ctx) {
return ctx.normalizedPath().startsWith(managementPath);
}
return false;
}
@Override
public RequestPriority priority(RoutingContext request) {
return RequestPriority.CRITICAL;
}
}
|
ManagementRequestPrioritizer
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/runtime/LongScriptFieldDistanceFeatureQuery.java
|
{
"start": 3282,
"end": 7131
}
|
class ____ extends Scorer {
private final AbstractLongFieldScript script;
private final TwoPhaseIterator twoPhase;
private final DocIdSetIterator disi;
private final float weight;
protected DistanceScorer(AbstractLongFieldScript script, int maxDoc, float boost) {
this.script = script;
twoPhase = new TwoPhaseIterator(DocIdSetIterator.all(maxDoc)) {
@Override
public boolean matches() {
return LongScriptFieldDistanceFeatureQuery.this.matches(script, approximation.docID());
}
@Override
public float matchCost() {
return MATCH_COST;
}
};
disi = TwoPhaseIterator.asDocIdSetIterator(twoPhase);
this.weight = boost;
}
@Override
public int docID() {
return disi.docID();
}
@Override
public DocIdSetIterator iterator() {
return disi;
}
@Override
public TwoPhaseIterator twoPhaseIterator() {
return twoPhase;
}
@Override
public float getMaxScore(int upTo) {
return weight;
}
@Override
public float score() {
if (script.count() == 0) {
return 0;
}
return LongScriptFieldDistanceFeatureQuery.this.score(weight, (double) minAbsoluteDistance(script));
}
}
long minAbsoluteDistance(AbstractLongFieldScript script) {
long minDistance = Long.MAX_VALUE;
for (int i = 0; i < script.count(); i++) {
minDistance = Math.min(minDistance, distanceFor(script.values()[i]));
}
return minDistance;
}
long valueWithMinAbsoluteDistance(AbstractLongFieldScript script) {
long minDistance = Long.MAX_VALUE;
long minDistanceValue = Long.MAX_VALUE;
for (int i = 0; i < script.count(); i++) {
long distance = distanceFor(script.values()[i]);
if (distance < minDistance) {
minDistance = distance;
minDistanceValue = script.values()[i];
}
}
return minDistanceValue;
}
long distanceFor(long value) {
long distance = Math.max(value, origin) - Math.min(value, origin);
if (distance < 0) {
// The distance doesn't fit into signed long so clamp it to MAX_VALUE
return Long.MAX_VALUE;
}
return distance;
}
float score(float weight, double distance) {
return (float) (weight * (pivot / (pivot + distance)));
}
@Override
public String toString(String field) {
StringBuilder b = new StringBuilder();
if (false == fieldName().equals(field)) {
b.append(fieldName()).append(":");
}
b.append(getClass().getSimpleName());
b.append("(origin=").append(origin);
b.append(",pivot=").append(pivot).append(")");
return b.toString();
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), origin, pivot);
}
@Override
public boolean equals(Object obj) {
if (false == super.equals(obj)) {
return false;
}
LongScriptFieldDistanceFeatureQuery other = (LongScriptFieldDistanceFeatureQuery) obj;
return origin == other.origin && pivot == other.pivot;
}
@Override
public void visit(QueryVisitor visitor) {
// No subclasses contain any Terms because those have to be strings.
if (visitor.acceptField(fieldName())) {
visitor.visitLeaf(this);
}
}
long origin() {
return origin;
}
long pivot() {
return pivot;
}
}
|
DistanceScorer
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemChecksum.java
|
{
"start": 2527,
"end": 12822
}
|
class ____ extends AbstractAbfsIntegrationTest {
private static final int MB_2 = 2 * ONE_MB;
private static final int MB_3 = 3 * ONE_MB;
private static final int MB_4 = 4 * ONE_MB;
private static final int MB_8 = 8 * ONE_MB;
private static final int MB_15 = 15 * ONE_MB;
private static final int MB_16 = 16 * ONE_MB;
private static final String INVALID_MD5_TEXT = "Text for Invalid MD5 Computation";
public ITestAzureBlobFileSystemChecksum() throws Exception {
super();
}
@Test
public void testWriteReadWithChecksum() throws Exception {
testWriteReadWithChecksumInternal(true);
testWriteReadWithChecksumInternal(false);
}
@Test
public void testAppendWithChecksumAtDifferentOffsets() throws Exception {
AzureBlobFileSystem fs = getConfiguredFileSystem(MB_4, MB_4, true);
if (!getIsNamespaceEnabled(fs)) {
assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse();
}
AbfsClient client = fs.getAbfsStore().getClientHandler().getIngressClient();
Path path = path("testPath" + getMethodName());
AbfsOutputStream os = (AbfsOutputStream) fs.create(path).getWrappedStream();
byte[] data = generateRandomBytes(MB_4);
int pos = 0;
pos += appendWithOffsetHelper(os, client, path, data, fs, pos, 0, client.computeMD5Hash(data, 0, data.length));
pos += appendWithOffsetHelper(os, client, path, data, fs, pos, ONE_MB, client.computeMD5Hash(data, ONE_MB, data.length - ONE_MB));
pos += appendWithOffsetHelper(os, client, path, data, fs, pos, MB_2, client.computeMD5Hash(data, MB_2, data.length-MB_2));
appendWithOffsetHelper(os, client, path, data, fs, pos, MB_4 - 1, client.computeMD5Hash(data, MB_4 - 1, data.length - (MB_4 - 1)));
fs.close();
}
@Test
public void testReadWithChecksumAtDifferentOffsets() throws Exception {
AzureBlobFileSystem fs = getConfiguredFileSystem(MB_4, MB_4, true);
AbfsClient client = fs.getAbfsStore().getClient();
Path path = path("testPath" + getMethodName());
byte[] data = generateRandomBytes(MB_16);
createFileWithData(path, data, fs);
readWithOffsetAndPositionHelper(client, path, data, fs, 0, 0);
readWithOffsetAndPositionHelper(client, path, data, fs, MB_4, 0);
readWithOffsetAndPositionHelper(client, path, data, fs, MB_4, ONE_MB);
readWithOffsetAndPositionHelper(client, path, data, fs, MB_8, MB_2);
readWithOffsetAndPositionHelper(client, path, data, fs, MB_15, MB_4 - 1);
fs.close();
}
@Test
public void testWriteReadWithChecksumAndOptions() throws Exception {
testWriteReadWithChecksumAndOptionsInternal(true);
testWriteReadWithChecksumAndOptionsInternal(false);
}
@Test
public void testAbfsInvalidChecksumExceptionInAppend() throws Exception {
AzureBlobFileSystem fs = getConfiguredFileSystem(MB_4, MB_4, true);
AbfsClient spiedClient = Mockito.spy(fs.getAbfsStore().getClientHandler().getIngressClient());
Path path = path("testPath" + getMethodName());
AbfsOutputStream os = Mockito.spy((AbfsOutputStream) fs.create(path).getWrappedStream());
byte[] data= generateRandomBytes(MB_4);
String invalidMD5Hash = spiedClient.computeMD5Hash(
INVALID_MD5_TEXT.getBytes(), 0, INVALID_MD5_TEXT.length());
Mockito.doReturn(invalidMD5Hash).when(spiedClient).computeMD5Hash(any(),
any(Integer.class), any(Integer.class));
Mockito.doReturn(invalidMD5Hash).when(os).getMd5();
AbfsRestOperationException ex = intercept(AbfsInvalidChecksumException.class, () -> {
appendWithOffsetHelper(os, spiedClient, path, data, fs, 0, 0, invalidMD5Hash);
});
Assertions.assertThat(ex.getErrorCode())
.describedAs("Exception Message should contain MD5Mismatch")
.isEqualTo(AzureServiceErrorCode.MD5_MISMATCH);
fs.close();
}
@Test
public void testAbfsInvalidChecksumExceptionInRead() throws Exception {
AzureBlobFileSystem fs = getConfiguredFileSystem(MB_4, MB_4, true);
AbfsClient spiedClient = Mockito.spy(fs.getAbfsStore().getClient());
Path path = path("testPath" + getMethodName());
byte[] data = generateRandomBytes(MB_3);
createFileWithData(path, data, fs);
String invalidMD5Hash = spiedClient.computeMD5Hash(
INVALID_MD5_TEXT.getBytes(), 0, INVALID_MD5_TEXT.length());
Mockito.doReturn(invalidMD5Hash).when(spiedClient).computeMD5Hash(any(),
any(Integer.class), any(Integer.class));
intercept(AbfsInvalidChecksumException.class, () -> {
readWithOffsetAndPositionHelper(spiedClient, path, data, fs, 0, 0);
});
}
private void testWriteReadWithChecksumInternal(final boolean readAheadEnabled)
throws Exception {
AzureBlobFileSystem fs = getConfiguredFileSystem(MB_4, MB_4, readAheadEnabled);
final int dataSize = MB_16 + 1000;
Path testPath = path("testPath" + getMethodName());
byte[] bytesUploaded = generateRandomBytes(dataSize);
createFileWithData(testPath, bytesUploaded, fs);
try (FSDataInputStream in = fs.open(testPath)) {
byte[] bytesRead = new byte[bytesUploaded.length];
in.read(bytesRead, 0, dataSize);
// Verify that the data read is same as data written
Assertions.assertThat(bytesRead)
.describedAs("Bytes read with checksum enabled are not as expected")
.containsExactly(bytesUploaded);
}
}
/**
* Helper method that generates blockId.
* @param position The offset needed to generate blockId.
* @return String representing the block ID generated.
*/
private String generateBlockId(AbfsOutputStream os, long position) {
String streamId = os.getStreamID();
String streamIdHash = Integer.toString(streamId.hashCode());
String blockId = String.format("%d_%s", position, streamIdHash);
byte[] blockIdByteArray = new byte[BLOCK_ID_LENGTH];
System.arraycopy(blockId.getBytes(), 0, blockIdByteArray, 0, Math.min(BLOCK_ID_LENGTH, blockId.length()));
return new String(Base64.encodeBase64(blockIdByteArray), StandardCharsets.UTF_8);
}
/**
* Verify that the checksum computed on client side matches with the one
* computed at server side. If not, request will fail with 400 Bad request.
* @param client
* @param path
* @param data
* @param fs
* @param offset
* @throws Exception
*/
private int appendWithOffsetHelper(AbfsOutputStream os, AbfsClient client, Path path,
byte[] data, AzureBlobFileSystem fs, final int pos, final int offset, String md5) throws Exception {
String blockId = generateBlockId(os, pos);
String eTag = os.getIngressHandler().getETag();
AppendRequestParameters reqParams = new AppendRequestParameters(
pos, offset, data.length - offset, APPEND_MODE, isAppendBlobEnabled(), null, true,
new BlobAppendRequestParameters(blockId, eTag), md5);
client.append(path.toUri().getPath(), data, reqParams, null, null,
getTestTracingContext(fs, false));
return reqParams.getLength();
}
/**
* Verify that the checksum returned by server is same as computed on client
* side even when read from different positions and stored at different offsets
* If not server request will pass but client.read() will fail with
* {@link org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsInvalidChecksumException}
* @param client
* @param path
* @param data
* @param fs
* @param position
* @param offset
* @throws Exception
*/
private void readWithOffsetAndPositionHelper(AbfsClient client, Path path,
byte[] data, AzureBlobFileSystem fs, final int position,
final int offset) throws Exception {
int bufferLength = fs.getAbfsStore().getAbfsConfiguration().getReadBufferSize();
byte[] readBuffer = new byte[bufferLength];
final int readLength = bufferLength - offset;
client.read(path.toUri().getPath(), position, readBuffer, offset, readLength,
"*", null, null, getTestTracingContext(fs, false));
byte[] actual = Arrays.copyOfRange(readBuffer, offset, offset + readLength);
byte[] expected = Arrays.copyOfRange(data, position, readLength + position);
Assertions.assertThat(actual)
.describedAs("Data read should be same as Data Written")
.containsExactly(expected);
}
private void testWriteReadWithChecksumAndOptionsInternal(
final boolean readAheadEnabled) throws Exception {
AzureBlobFileSystem fs = getConfiguredFileSystem(MB_8, ONE_MB, readAheadEnabled);
final int dataSize = MB_16 + 1000;
Path testPath = path("testPath" + getMethodName());
byte[] bytesUploaded = generateRandomBytes(dataSize);
createFileWithData(testPath, bytesUploaded, fs);
Configuration cpm1 = new Configuration();
cpm1.setBoolean(FS_AZURE_BUFFERED_PREAD_DISABLE, true);
try (FSDataInputStream in = fs.openFileWithOptions(testPath,
new OpenFileParameters().withOptions(cpm1)
.withMandatoryKeys(new HashSet<>())).get()) {
byte[] bytesRead = new byte[dataSize];
in.read(1, bytesRead, 1, MB_4);
// Verify that the data read is same as data written
Assertions.assertThat(Arrays.copyOfRange(bytesRead, 1, MB_4))
.describedAs("Bytes read with checksum enabled are not as expected")
.containsExactly(Arrays.copyOfRange(bytesUploaded, 1, MB_4));
}
}
private void createFileWithData(Path path, byte[] data, AzureBlobFileSystem fs) throws Exception {
try (FSDataOutputStream out = fs.create(path)) {
out.write(data);
out.hflush();
}
}
private AzureBlobFileSystem getConfiguredFileSystem(final int writeBuffer,
final int readBuffer, final boolean readAheadEnabled) throws Exception {
AzureBlobFileSystem fs = createFileSystem();
AbfsConfiguration abfsConf = fs.getAbfsStore().getAbfsConfiguration();
abfsConf.setIsChecksumValidationEnabled(true);
abfsConf.setWriteBufferSize(writeBuffer);
abfsConf.setReadBufferSize(readBuffer);
abfsConf.setReadAheadEnabled(readAheadEnabled);
return fs;
}
public static byte[] generateRandomBytes(int numBytes) {
SecureRandom secureRandom = new SecureRandom();
byte[] randomBytes = new byte[numBytes];
secureRandom.nextBytes(randomBytes);
return randomBytes;
}
}
|
ITestAzureBlobFileSystemChecksum
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/parser/deser/SmartMatchTest.java
|
{
"start": 865,
"end": 1096
}
|
class ____ {
private int messageId;
public int getMessageId() {
return messageId;
}
public void setMessageId(int messageId) {
this.messageId = messageId;
}
}
}
|
VO2
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/jdbc/InferredDataSourceTransactionalSqlScriptsTests.java
|
{
"start": 2851,
"end": 3664
}
|
class ____ {
@Bean
PlatformTransactionManager txMgr1() {
return new DataSourceTransactionManager(dataSource1());
}
@Bean
PlatformTransactionManager txMgr2() {
return new DataSourceTransactionManager(dataSource2());
}
@Bean
DataSource dataSource1() {
return new EmbeddedDatabaseBuilder()
.generateUniqueName(true)
.addScript("classpath:/org/springframework/test/context/jdbc/schema.sql")
.addScript("classpath:/org/springframework/test/context/jdbc/data.sql")
.build();
}
@Bean
DataSource dataSource2() {
return new EmbeddedDatabaseBuilder()
.generateUniqueName(true)
.addScript("classpath:/org/springframework/test/context/jdbc/schema.sql")
.addScript("classpath:/org/springframework/test/context/jdbc/data.sql")
.build();
}
}
}
|
Config
|
java
|
google__guava
|
guava-testlib/test/com/google/common/testing/NullPointerTesterTest.java
|
{
"start": 32466,
"end": 34633
}
|
class ____ extends DefaultValueChecker {
@SuppressWarnings("unused") // called by NullPointerTester
@Keep
public void checkDefaultValuesForTheseTypes(
Gender gender,
Integer integer,
int i,
String string,
CharSequence charSequence,
List<String> list,
ImmutableList<Integer> immutableList,
Map<String, Integer> map,
ImmutableMap<String, String> immutableMap,
Set<String> set,
ImmutableSet<Integer> immutableSet,
SortedSet<Number> sortedSet,
ImmutableSortedSet<Number> immutableSortedSet,
Multiset<String> multiset,
ImmutableMultiset<Integer> immutableMultiset,
Multimap<String, Integer> multimap,
ImmutableMultimap<String, Integer> immutableMultimap,
Table<String, Integer, Exception> table,
ImmutableTable<Integer, String, Exception> immutableTable) {
calledWith(
gender,
integer,
i,
string,
charSequence,
list,
immutableList,
map,
immutableMap,
set,
immutableSet,
sortedSet,
immutableSortedSet,
multiset,
immutableMultiset,
multimap,
immutableMultimap,
table,
immutableTable);
}
final void check() {
runTester()
.assertNonNullValues(
Gender.MALE,
Integer.valueOf(0),
0,
"",
"",
ImmutableList.of(),
ImmutableList.of(),
ImmutableMap.of(),
ImmutableMap.of(),
ImmutableSet.of(),
ImmutableSet.of(),
ImmutableSortedSet.of(),
ImmutableSortedSet.of(),
ImmutableMultiset.of(),
ImmutableMultiset.of(),
ImmutableMultimap.of(),
ImmutableMultimap.of(),
ImmutableTable.of(),
ImmutableTable.of());
}
}
public void testDefaultValues() {
new AllDefaultValuesChecker().check();
}
private static
|
AllDefaultValuesChecker
|
java
|
apache__camel
|
core/camel-core-reifier/src/main/java/org/apache/camel/reifier/dataformat/SoapDataFormatReifier.java
|
{
"start": 1027,
"end": 1845
}
|
class ____ extends DataFormatReifier<SoapDataFormat> {
public SoapDataFormatReifier(CamelContext camelContext, DataFormatDefinition definition) {
super(camelContext, (SoapDataFormat) definition);
}
@Override
protected void prepareDataFormatConfig(Map<String, Object> properties) {
properties.put("elementNameStrategy", or(definition.getElementNameStrategyObject(),
asRef(definition.getElementNameStrategy())));
properties.put("encoding", definition.getEncoding());
properties.put("version", definition.getVersion());
properties.put("namespacePrefix", asRef(definition.getNamespacePrefix()));
properties.put("schema", definition.getSchema());
properties.put("contextPath", definition.getContextPath());
}
}
|
SoapDataFormatReifier
|
java
|
resilience4j__resilience4j
|
resilience4j-spring/src/test/java/io/github/resilience4j/bulkhead/configure/BulkHeadInitializationInAspectTest.java
|
{
"start": 1005,
"end": 1083
}
|
class ____ {
@TestConfiguration
static
|
BulkHeadInitializationInAspectTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/sql/model/jdbc/UpsertOperation.java
|
{
"start": 501,
"end": 911
}
|
class ____ extends AbstractJdbcMutation {
public UpsertOperation(
TableMapping tableDetails,
MutationTarget<?> mutationTarget,
String sql,
List<? extends JdbcParameterBinder> parameterBinders) {
super( tableDetails, mutationTarget, sql, false, new Expectation.RowCount(), parameterBinders );
}
@Override
public MutationType getMutationType() {
return MutationType.UPDATE;
}
}
|
UpsertOperation
|
java
|
quarkusio__quarkus
|
extensions/flyway/deployment/src/test/java/io/quarkus/flyway/test/FlywayExtensionBaselineAtStartExistingSchemaHistoryTableTest.java
|
{
"start": 385,
"end": 1610
}
|
class ____ {
@Inject
Flyway flyway;
static final FlywayH2TestCustomizer customizer = FlywayH2TestCustomizer
.withDbName("quarkus-baseline-at-start-existing-schema-history")
.withPort(11309)
.withInitSqlFile("src/test/resources/h2-init-schema-history-table.sql");
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.setBeforeAllCustomizer(customizer::startH2)
.setAfterAllCustomizer(customizer::stopH2)
.withApplicationRoot((jar) -> jar
.addClass(FlywayH2TestCustomizer.class)
.addAsResource("db/migration/V1.0.0__Quarkus.sql")
.addAsResource("baseline-at-start-existing-schema-history-table-config.properties",
"application.properties"));
@Test
@DisplayName("Baseline at start is not executed against existing schema-history-table")
public void testFlywayConfigInjection() {
MigrationInfo migrationInfo = flyway.info().current();
assertNull(migrationInfo, "Flyway baseline was executed on existing schema history table");
}
}
|
FlywayExtensionBaselineAtStartExistingSchemaHistoryTableTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java
|
{
"start": 1103,
"end": 3431
}
|
class ____ extends LegacyActionRequest {
private String calendarId;
private String jobIdsToAddExpression;
private String jobIdsToRemoveExpression;
public Request(StreamInput in) throws IOException {
super(in);
calendarId = in.readString();
jobIdsToAddExpression = in.readOptionalString();
jobIdsToRemoveExpression = in.readOptionalString();
}
/**
* Job id expressions may be a single job, job group or comma separated
* list of job Ids or groups
*/
public Request(String calendarId, String jobIdsToAddExpression, String jobIdsToRemoveExpression) {
this.calendarId = ExceptionsHelper.requireNonNull(calendarId, Calendar.ID.getPreferredName());
this.jobIdsToAddExpression = jobIdsToAddExpression;
this.jobIdsToRemoveExpression = jobIdsToRemoveExpression;
}
public String getCalendarId() {
return calendarId;
}
public String getJobIdsToAddExpression() {
return jobIdsToAddExpression;
}
public String getJobIdsToRemoveExpression() {
return jobIdsToRemoveExpression;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(calendarId);
out.writeOptionalString(jobIdsToAddExpression);
out.writeOptionalString(jobIdsToRemoveExpression);
}
@Override
public int hashCode() {
return Objects.hash(calendarId, jobIdsToAddExpression, jobIdsToRemoveExpression);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Request other = (Request) obj;
return Objects.equals(calendarId, other.calendarId)
&& Objects.equals(jobIdsToAddExpression, other.jobIdsToAddExpression)
&& Objects.equals(jobIdsToRemoveExpression, other.jobIdsToRemoveExpression);
}
}
}
|
Request
|
java
|
redisson__redisson
|
redisson/src/test/java/org/redisson/executor/RedissonExecutorServiceSpringTest.java
|
{
"start": 1855,
"end": 2097
}
|
class ____ {
public String myMethod(String key) {
return "hello " + key;
}
}
private static final String EXECUTOR_NAME = "spring_test";
@Configuration
@ComponentScan
public static
|
SampleBean
|
java
|
quarkusio__quarkus
|
test-framework/arquillian/src/main/java/io/quarkus/arquillian/InjectionEnricher.java
|
{
"start": 996,
"end": 3325
}
|
class ____ implements TestEnricher {
private static final Logger log = Logger.getLogger(TestEnricher.class.getName());
@Inject
@TestScoped
private InstanceProducer<CreationContextHolder> creationalContextProducer;
@Inject
@DeploymentScoped
private Instance<QuarkusDeployment> deployment;
@Override
public void enrich(Object testCase) {
}
@Override
public Object[] resolve(Method method) {
//we need to resolve from inside the
if (method.getParameterCount() > 0) {
ClassLoader old = Thread.currentThread().getContextClassLoader();
try {
CreationContextHolder holder = getCreationalContext();
ClassLoader cl = deployment.get() != null && deployment.get().hasAppClassLoader()
? deployment.get().getAppClassLoader()
: getClass().getClassLoader();
Thread.currentThread().setContextClassLoader(cl);
Class<?> c = cl.loadClass(IsolatedEnricher.class.getName());
BiFunction<Method, Object, Object[]> function = (BiFunction<Method, Object, Object[]>) c
.getDeclaredConstructor().newInstance();
return function.apply(method, holder.creationalContext);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
Thread.currentThread().setContextClassLoader(old);
}
}
return new Object[0];
}
private CreationContextHolder getCreationalContext() {
try {
ClassLoader cl = deployment.get() != null && deployment.get().hasAppClassLoader()
? deployment.get().getAppClassLoader()
: getClass().getClassLoader();
Class<?> c = cl.loadClass(IsolatedCreationContextCreator.class.getName());
Supplier<Map.Entry<Closeable, Object>> supplier = (Supplier<Map.Entry<Closeable, Object>>) c
.getDeclaredConstructor().newInstance();
Map.Entry<Closeable, Object> val = supplier.get();
return new CreationContextHolder(val.getKey(), val.getValue());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static
|
InjectionEnricher
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java
|
{
"start": 154733,
"end": 156203
}
|
class ____ extends BooleanExpressionContext {
public StringContext multiFields;
public StringContext queryString;
public TerminalNode MATCH() {
return getToken(SqlBaseParser.MATCH, 0);
}
public MatchQueryOptionsContext matchQueryOptions() {
return getRuleContext(MatchQueryOptionsContext.class, 0);
}
public List<StringContext> string() {
return getRuleContexts(StringContext.class);
}
public StringContext string(int i) {
return getRuleContext(StringContext.class, i);
}
public MultiMatchQueryContext(BooleanExpressionContext ctx) {
copyFrom(ctx);
}
@Override
public void enterRule(ParseTreeListener listener) {
if (listener instanceof SqlBaseListener) ((SqlBaseListener) listener).enterMultiMatchQuery(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if (listener instanceof SqlBaseListener) ((SqlBaseListener) listener).exitMultiMatchQuery(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if (visitor instanceof SqlBaseVisitor) return ((SqlBaseVisitor<? extends T>) visitor).visitMultiMatchQuery(this);
else return visitor.visitChildren(this);
}
}
@SuppressWarnings("CheckReturnValue")
public static
|
MultiMatchQueryContext
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ComponentProtectedTypeTest.java
|
{
"start": 2564,
"end": 3463
}
|
class ____ extends TestComponentBase {",
// This component method will be implemented as:
// TestComponentBase.ProtectedType provideProtectedType() {
// return protectedTypeProvider.get();
// }
// The protectedTypeProvider can't be a raw provider, otherwise it will have a type cast
// error. So protected accessibility should be evaluated when checking accessibility of
// a type.
" abstract TestComponentBase.ProtectedType provideProtectedType();",
"}");
CompilerTests.daggerCompiler(baseSrc, componentSrc)
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(0);
subject.generatedSource(goldenFileRule.goldenSource("test/DaggerTestComponent"));
});
}
}
|
TestComponent
|
java
|
spring-projects__spring-boot
|
module/spring-boot-elasticsearch/src/main/java/org/springframework/boot/elasticsearch/autoconfigure/ElasticsearchProperties.java
|
{
"start": 3723,
"end": 4609
}
|
class ____ {
/**
* Whether the sniffer is enabled.
*/
private boolean enabled;
/**
* Interval between consecutive ordinary sniff executions.
*/
private Duration interval = Duration.ofMinutes(5);
/**
* Delay of a sniff execution scheduled after a failure.
*/
private Duration delayAfterFailure = Duration.ofMinutes(1);
public boolean isEnabled() {
return this.enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
public Duration getInterval() {
return this.interval;
}
public void setInterval(Duration interval) {
this.interval = interval;
}
public Duration getDelayAfterFailure() {
return this.delayAfterFailure;
}
public void setDelayAfterFailure(Duration delayAfterFailure) {
this.delayAfterFailure = delayAfterFailure;
}
}
public static
|
Sniffer
|
java
|
google__error-prone
|
check_api/src/test/java/com/google/errorprone/util/FindIdentifiersTest.java
|
{
"start": 37313,
"end": 37723
}
|
class ____ {
private void doIt() {
// BUG: Diagnostic contains: [staticField, instanceField]
Reference.test();
}
}
""")
.doTest();
}
/** A {@link BugChecker} that prints whether {@code A} is visible on each member select. */
@BugPattern(severity = SeverityLevel.ERROR, summary = "A is visible")
public static
|
Test
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/util/concurrent/ExecutionSequencerTest.java
|
{
"start": 11670,
"end": 15477
}
|
class ____ {
long count;
}
private static final int ITERATION_COUNT = 50_000;
private static final int DIRECT_EXECUTIONS_PER_THREAD = 100;
@J2ktIncompatible
@GwtIncompatible // threads
public void testAvoidsStackOverflow_multipleThreads() throws Exception {
LongHolder holder = new LongHolder();
ArrayList<ListenableFuture<Integer>> lengthChecks = new ArrayList<>();
List<Integer> completeLengthChecks;
int baseStackDepth;
ExecutorService service = newFixedThreadPool(5);
try {
// Avoid counting frames from the executor itself, or the ExecutionSequencer
baseStackDepth =
serializer
.submit(
new Callable<Integer>() {
@Override
public Integer call() {
return Thread.currentThread().getStackTrace().length;
}
},
service)
.get();
SettableFuture<@Nullable Void> settableFuture = SettableFuture.create();
ListenableFuture<?> unused =
serializer.submitAsync(
new AsyncCallable<@Nullable Void>() {
@Override
public ListenableFuture<@Nullable Void> call() {
return settableFuture;
}
},
directExecutor());
for (int i = 0; i < 50_000; i++) {
if (i % DIRECT_EXECUTIONS_PER_THREAD == 0) {
// after some number of iterations, switch threads
unused =
serializer.submit(
new Callable<@Nullable Void>() {
@Override
public @Nullable Void call() {
holder.count++;
return null;
}
},
service);
} else if (i % DIRECT_EXECUTIONS_PER_THREAD == DIRECT_EXECUTIONS_PER_THREAD - 1) {
// When at max depth, record stack trace depth
lengthChecks.add(
serializer.submit(
new Callable<Integer>() {
@Override
public Integer call() {
holder.count++;
return Thread.currentThread().getStackTrace().length;
}
},
directExecutor()));
} else {
// Otherwise, schedule a task on directExecutor
unused =
serializer.submit(
new Callable<@Nullable Void>() {
@Override
public @Nullable Void call() {
holder.count++;
return null;
}
},
directExecutor());
}
}
settableFuture.set(null);
completeLengthChecks = allAsList(lengthChecks).get();
} finally {
service.shutdown();
}
assertThat(holder.count).isEqualTo(ITERATION_COUNT);
for (int length : completeLengthChecks) {
// Verify that at max depth, less than one stack frame per submitted task was consumed
assertThat(length - baseStackDepth).isLessThan(DIRECT_EXECUTIONS_PER_THREAD / 2);
}
}
@SuppressWarnings("ObjectToString") // Intended behavior
public void testToString() {
Future<?> unused = serializer.submitAsync(firstCallable, directExecutor());
TestCallable secondCallable = new TestCallable(SettableFuture.create());
Future<?> second = serializer.submitAsync(secondCallable, directExecutor());
assertThat(secondCallable.called).isFalse();
assertThat(second.toString()).contains(secondCallable.toString());
firstFuture.set(null);
assertThat(second.toString()).contains(secondCallable.future.toString());
}
private static
|
LongHolder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java
|
{
"start": 6288,
"end": 9121
}
|
class ____ implements Writeable, ToXContentObject {
private final RollupJobConfig job;
private final RollupIndexerJobStats stats;
private final RollupJobStatus status;
public static final ConstructingObjectParser<JobWrapper, Void> PARSER = new ConstructingObjectParser<>(
NAME,
a -> new JobWrapper((RollupJobConfig) a[0], (RollupIndexerJobStats) a[1], (RollupJobStatus) a[2])
);
static {
PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> RollupJobConfig.fromXContent(p, null), CONFIG);
PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupIndexerJobStats.PARSER::apply, STATS);
PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupJobStatus.PARSER::apply, STATUS);
}
public JobWrapper(RollupJobConfig job, RollupIndexerJobStats stats, RollupJobStatus status) {
this.job = job;
this.stats = stats;
this.status = status;
}
public JobWrapper(StreamInput in) throws IOException {
this.job = new RollupJobConfig(in);
this.stats = new RollupIndexerJobStats(in);
this.status = new RollupJobStatus(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
job.writeTo(out);
stats.writeTo(out);
status.writeTo(out);
}
public RollupJobConfig getJob() {
return job;
}
public RollupIndexerJobStats getStats() {
return stats;
}
public RollupJobStatus getStatus() {
return status;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(CONFIG.getPreferredName());
job.toXContent(builder, params);
builder.field(STATUS.getPreferredName(), status);
builder.field(STATS.getPreferredName(), stats, params);
builder.endObject();
return builder;
}
@Override
public int hashCode() {
return Objects.hash(job, stats, status);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
JobWrapper other = (JobWrapper) obj;
return Objects.equals(job, other.job) && Objects.equals(stats, other.stats) && Objects.equals(status, other.status);
}
@Override
public final String toString() {
return Strings.toString(this);
}
}
}
|
JobWrapper
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/nullness/UnnecessaryCheckNotNullTest.java
|
{
"start": 18592,
"end": 18738
}
|
class ____ {
public String b;
void foo() {}
}
""");
writeFile(
"B.java",
"""
public
|
A
|
java
|
google__guava
|
android/guava/src/com/google/common/collect/Ordering.java
|
{
"start": 5143,
"end": 6176
}
|
class ____ now obsolete. Most of its functionality is now
* provided by {@link java.util.stream.Stream Stream} and by {@link Comparator} itself, and the rest
* can now be found as static methods in our new {@link Comparators} class. See each method below
* for further instructions. Whenever possible, you should change any references of type {@code
* Ordering} to be of type {@code Comparator} instead. However, at this time we have no plan to
* <i>deprecate</i> this class.
*
* <p>Many replacements involve adopting {@code Stream}, and these changes can sometimes make your
* code verbose. Whenever following this advice, you should check whether {@code Stream} could be
* adopted more comprehensively in your code; the end result may be quite a bit simpler.
*
* <h3>See also</h3>
*
* <p>See the Guava User Guide article on <a href=
* "https://github.com/google/guava/wiki/OrderingExplained">{@code Ordering}</a>.
*
* @author Jesse Wilson
* @author Kevin Bourrillion
* @since 2.0
*/
@GwtCompatible
public abstract
|
is
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ArrayRecordComponentTest.java
|
{
"start": 2217,
"end": 2451
}
|
class ____ {",
" public final int[] ints;",
"",
" public IntArrayHolder(int[] ints) {",
" this.ints = ints;",
" }",
"}")
.doTest();
}
}
|
IntArrayHolder
|
java
|
apache__kafka
|
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/metrics/GroupCoordinatorMetrics.java
|
{
"start": 2240,
"end": 25045
}
|
class ____ extends CoordinatorMetrics implements AutoCloseable {
public static final String METRICS_GROUP = "group-coordinator-metrics";
/**
* Old classic group count metric. To be deprecated.
*/
public static final com.yammer.metrics.core.MetricName NUM_CLASSIC_GROUPS = getMetricName(
"GroupMetadataManager", "NumGroups");
public static final com.yammer.metrics.core.MetricName NUM_OFFSETS = getMetricName(
"GroupMetadataManager", "NumOffsets");
public static final com.yammer.metrics.core.MetricName NUM_CLASSIC_GROUPS_PREPARING_REBALANCE = getMetricName(
"GroupMetadataManager", "NumGroupsPreparingRebalance");
public static final com.yammer.metrics.core.MetricName NUM_CLASSIC_GROUPS_COMPLETING_REBALANCE = getMetricName(
"GroupMetadataManager", "NumGroupsCompletingRebalance");
public static final com.yammer.metrics.core.MetricName NUM_CLASSIC_GROUPS_STABLE = getMetricName(
"GroupMetadataManager", "NumGroupsStable");
public static final com.yammer.metrics.core.MetricName NUM_CLASSIC_GROUPS_DEAD = getMetricName(
"GroupMetadataManager", "NumGroupsDead");
public static final com.yammer.metrics.core.MetricName NUM_CLASSIC_GROUPS_EMPTY = getMetricName(
"GroupMetadataManager", "NumGroupsEmpty");
public static final String GROUP_COUNT_METRIC_NAME = "group-count";
public static final String GROUP_COUNT_PROTOCOL_TAG = "protocol";
public static final String SHARE_GROUP_PROTOCOL_TAG = GROUP_COUNT_PROTOCOL_TAG;
public static final String CONSUMER_GROUP_COUNT_METRIC_NAME = "consumer-group-count";
public static final String SHARE_GROUP_COUNT_METRIC_NAME = "share-group-count";
public static final String CONSUMER_GROUP_COUNT_STATE_TAG = "state";
public static final String SHARE_GROUP_COUNT_STATE_TAG = CONSUMER_GROUP_COUNT_STATE_TAG;
public static final String STREAMS_GROUP_COUNT_METRIC_NAME = "streams-group-count";
public static final String STREAMS_GROUP_COUNT_STATE_TAG = "state";
public static final String OFFSET_COMMITS_SENSOR_NAME = "OffsetCommits";
public static final String OFFSET_EXPIRED_SENSOR_NAME = "OffsetExpired";
public static final String OFFSET_DELETIONS_SENSOR_NAME = "OffsetDeletions";
public static final String CLASSIC_GROUP_COMPLETED_REBALANCES_SENSOR_NAME = "CompletedRebalances";
public static final String CONSUMER_GROUP_REBALANCES_SENSOR_NAME = "ConsumerGroupRebalances";
public static final String SHARE_GROUP_REBALANCES_SENSOR_NAME = "ShareGroupRebalances";
public static final String STREAMS_GROUP_REBALANCES_SENSOR_NAME = "StreamsGroupRebalances";
private final MetricName classicGroupCountMetricName;
private final MetricName consumerGroupCountMetricName;
private final MetricName consumerGroupCountEmptyMetricName;
private final MetricName consumerGroupCountAssigningMetricName;
private final MetricName consumerGroupCountReconcilingMetricName;
private final MetricName consumerGroupCountStableMetricName;
private final MetricName consumerGroupCountDeadMetricName;
private final MetricName shareGroupCountMetricName;
private final MetricName shareGroupCountEmptyMetricName;
private final MetricName shareGroupCountStableMetricName;
private final MetricName shareGroupCountDeadMetricName;
private final MetricName streamsGroupCountMetricName;
private final MetricName streamsGroupCountEmptyMetricName;
private final MetricName streamsGroupCountAssigningMetricName;
private final MetricName streamsGroupCountReconcilingMetricName;
private final MetricName streamsGroupCountStableMetricName;
private final MetricName streamsGroupCountDeadMetricName;
private final MetricName streamsGroupCountNotReadyMetricName;
private final MetricsRegistry registry;
private final Metrics metrics;
private final Map<TopicPartition, GroupCoordinatorMetricsShard> shards = new ConcurrentHashMap<>();
/**
* Global sensors. These are shared across all metrics shards.
*/
public final Map<String, Sensor> globalSensors;
public GroupCoordinatorMetrics() {
this(KafkaYammerMetrics.defaultRegistry(), new Metrics());
}
@SuppressWarnings("MethodLength")
public GroupCoordinatorMetrics(MetricsRegistry registry, Metrics metrics) {
this.registry = Objects.requireNonNull(registry);
this.metrics = Objects.requireNonNull(metrics);
classicGroupCountMetricName = metrics.metricName(
GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The total number of groups using the classic rebalance protocol.",
Map.of(GROUP_COUNT_PROTOCOL_TAG, Group.GroupType.CLASSIC.toString())
);
consumerGroupCountMetricName = metrics.metricName(
GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The total number of groups using the consumer rebalance protocol.",
Map.of(GROUP_COUNT_PROTOCOL_TAG, Group.GroupType.CONSUMER.toString())
);
consumerGroupCountEmptyMetricName = metrics.metricName(
CONSUMER_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of consumer groups in empty state.",
Map.of(CONSUMER_GROUP_COUNT_STATE_TAG, ConsumerGroupState.EMPTY.toString())
);
consumerGroupCountAssigningMetricName = metrics.metricName(
CONSUMER_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of consumer groups in assigning state.",
Map.of(CONSUMER_GROUP_COUNT_STATE_TAG, ConsumerGroupState.ASSIGNING.toString())
);
consumerGroupCountReconcilingMetricName = metrics.metricName(
CONSUMER_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of consumer groups in reconciling state.",
Map.of(CONSUMER_GROUP_COUNT_STATE_TAG, ConsumerGroupState.RECONCILING.toString())
);
consumerGroupCountStableMetricName = metrics.metricName(
CONSUMER_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of consumer groups in stable state.",
Map.of(CONSUMER_GROUP_COUNT_STATE_TAG, ConsumerGroupState.STABLE.toString())
);
consumerGroupCountDeadMetricName = metrics.metricName(
CONSUMER_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of consumer groups in dead state.",
Map.of(CONSUMER_GROUP_COUNT_STATE_TAG, ConsumerGroupState.DEAD.toString())
);
shareGroupCountMetricName = metrics.metricName(
GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The total number of share groups.",
Map.of(SHARE_GROUP_PROTOCOL_TAG, Group.GroupType.SHARE.toString())
);
shareGroupCountEmptyMetricName = metrics.metricName(
SHARE_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of share groups in empty state.",
Map.of(SHARE_GROUP_COUNT_STATE_TAG, ShareGroup.ShareGroupState.EMPTY.toString())
);
shareGroupCountStableMetricName = metrics.metricName(
SHARE_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of share groups in stable state.",
Map.of(SHARE_GROUP_COUNT_STATE_TAG, ShareGroup.ShareGroupState.STABLE.toString())
);
shareGroupCountDeadMetricName = metrics.metricName(
SHARE_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of share groups in dead state.",
Map.of(SHARE_GROUP_COUNT_STATE_TAG, ShareGroup.ShareGroupState.DEAD.toString())
);
streamsGroupCountMetricName = metrics.metricName(
GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The total number of groups using the streams rebalance protocol.",
Map.of(GROUP_COUNT_PROTOCOL_TAG, Group.GroupType.STREAMS.toString())
);
streamsGroupCountEmptyMetricName = metrics.metricName(
STREAMS_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of streams groups in empty state.",
Map.of(STREAMS_GROUP_COUNT_STATE_TAG, StreamsGroupState.EMPTY.toString())
);
streamsGroupCountAssigningMetricName = metrics.metricName(
STREAMS_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of streams groups in assigning state.",
Map.of(STREAMS_GROUP_COUNT_STATE_TAG, StreamsGroupState.ASSIGNING.toString())
);
streamsGroupCountReconcilingMetricName = metrics.metricName(
STREAMS_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of streams groups in reconciling state.",
Map.of(STREAMS_GROUP_COUNT_STATE_TAG, StreamsGroupState.RECONCILING.toString())
);
streamsGroupCountStableMetricName = metrics.metricName(
STREAMS_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of streams groups in stable state.",
Map.of(STREAMS_GROUP_COUNT_STATE_TAG, StreamsGroupState.STABLE.toString())
);
streamsGroupCountDeadMetricName = metrics.metricName(
STREAMS_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of streams groups in dead state.",
Map.of(STREAMS_GROUP_COUNT_STATE_TAG, StreamsGroupState.DEAD.toString())
);
streamsGroupCountNotReadyMetricName = metrics.metricName(
STREAMS_GROUP_COUNT_METRIC_NAME,
METRICS_GROUP,
"The number of streams groups in not ready state.",
Map.of(STREAMS_GROUP_COUNT_STATE_TAG, StreamsGroupState.NOT_READY.toString())
);
registerGauges();
Sensor offsetCommitsSensor = metrics.sensor(OFFSET_COMMITS_SENSOR_NAME);
offsetCommitsSensor.add(new Meter(
metrics.metricName("offset-commit-rate",
METRICS_GROUP,
"The rate of committed offsets"),
metrics.metricName("offset-commit-count",
METRICS_GROUP,
"The total number of committed offsets")));
Sensor offsetExpiredSensor = metrics.sensor(OFFSET_EXPIRED_SENSOR_NAME);
offsetExpiredSensor.add(new Meter(
metrics.metricName("offset-expiration-rate",
METRICS_GROUP,
"The rate of expired offsets"),
metrics.metricName("offset-expiration-count",
METRICS_GROUP,
"The total number of expired offsets")));
Sensor offsetDeletionsSensor = metrics.sensor(OFFSET_DELETIONS_SENSOR_NAME);
offsetDeletionsSensor.add(new Meter(
metrics.metricName("offset-deletion-rate",
METRICS_GROUP,
"The rate of administrative deleted offsets"),
metrics.metricName("offset-deletion-count",
METRICS_GROUP,
"The total number of administrative deleted offsets")));
Sensor classicGroupCompletedRebalancesSensor = metrics.sensor(CLASSIC_GROUP_COMPLETED_REBALANCES_SENSOR_NAME);
classicGroupCompletedRebalancesSensor.add(new Meter(
metrics.metricName("group-completed-rebalance-rate",
METRICS_GROUP,
"The rate of classic group completed rebalances"),
metrics.metricName("group-completed-rebalance-count",
METRICS_GROUP,
"The total number of classic group completed rebalances")));
Sensor consumerGroupRebalanceSensor = metrics.sensor(CONSUMER_GROUP_REBALANCES_SENSOR_NAME);
consumerGroupRebalanceSensor.add(new Meter(
metrics.metricName("consumer-group-rebalance-rate",
METRICS_GROUP,
"The rate of consumer group rebalances"),
metrics.metricName("consumer-group-rebalance-count",
METRICS_GROUP,
"The total number of consumer group rebalances")));
Sensor shareGroupRebalanceSensor = metrics.sensor(SHARE_GROUP_REBALANCES_SENSOR_NAME);
shareGroupRebalanceSensor.add(new Meter(
metrics.metricName("share-group-rebalance-rate",
METRICS_GROUP,
"The rate of share group rebalances"),
metrics.metricName("share-group-rebalance-count",
METRICS_GROUP,
"The total number of share group rebalances")));
Sensor streamsGroupRebalanceSensor = metrics.sensor(STREAMS_GROUP_REBALANCES_SENSOR_NAME);
streamsGroupRebalanceSensor.add(new Meter(
metrics.metricName("streams-group-rebalance-rate",
METRICS_GROUP,
"The rate of streams group rebalances"),
metrics.metricName("streams-group-rebalance-count",
METRICS_GROUP,
"The total number of streams group rebalances")));
globalSensors = Collections.unmodifiableMap(Utils.mkMap(
Utils.mkEntry(OFFSET_COMMITS_SENSOR_NAME, offsetCommitsSensor),
Utils.mkEntry(OFFSET_EXPIRED_SENSOR_NAME, offsetExpiredSensor),
Utils.mkEntry(OFFSET_DELETIONS_SENSOR_NAME, offsetDeletionsSensor),
Utils.mkEntry(CLASSIC_GROUP_COMPLETED_REBALANCES_SENSOR_NAME, classicGroupCompletedRebalancesSensor),
Utils.mkEntry(CONSUMER_GROUP_REBALANCES_SENSOR_NAME, consumerGroupRebalanceSensor),
Utils.mkEntry(SHARE_GROUP_REBALANCES_SENSOR_NAME, shareGroupRebalanceSensor),
Utils.mkEntry(STREAMS_GROUP_REBALANCES_SENSOR_NAME, streamsGroupRebalanceSensor)
));
}
private Long numOffsets() {
return shards.values().stream().mapToLong(GroupCoordinatorMetricsShard::numOffsets).sum();
}
private Long numClassicGroups() {
return shards.values().stream().mapToLong(GroupCoordinatorMetricsShard::numClassicGroups).sum();
}
private Long numClassicGroups(ClassicGroupState state) {
return shards.values().stream().mapToLong(shard -> shard.numClassicGroups(state)).sum();
}
private long numConsumerGroups() {
return shards.values().stream().mapToLong(GroupCoordinatorMetricsShard::numConsumerGroups).sum();
}
private long numConsumerGroups(ConsumerGroupState state) {
return shards.values().stream().mapToLong(shard -> shard.numConsumerGroups(state)).sum();
}
private long numStreamsGroups() {
return shards.values().stream().mapToLong(GroupCoordinatorMetricsShard::numStreamsGroups).sum();
}
private long numStreamsGroups(StreamsGroupState state) {
return shards.values().stream().mapToLong(shard -> shard.numStreamsGroups(state)).sum();
}
private long numShareGroups() {
return shards.values().stream().mapToLong(GroupCoordinatorMetricsShard::numShareGroups).sum();
}
private long numShareGroups(ShareGroup.ShareGroupState state) {
return shards.values().stream().mapToLong(shard -> shard.numShareGroups(state)).sum();
}
@Override
public void close() {
Arrays.asList(
NUM_OFFSETS,
NUM_CLASSIC_GROUPS,
NUM_CLASSIC_GROUPS_PREPARING_REBALANCE,
NUM_CLASSIC_GROUPS_COMPLETING_REBALANCE,
NUM_CLASSIC_GROUPS_STABLE,
NUM_CLASSIC_GROUPS_DEAD,
NUM_CLASSIC_GROUPS_EMPTY
).forEach(registry::removeMetric);
Arrays.asList(
classicGroupCountMetricName,
consumerGroupCountMetricName,
consumerGroupCountEmptyMetricName,
consumerGroupCountAssigningMetricName,
consumerGroupCountReconcilingMetricName,
consumerGroupCountStableMetricName,
consumerGroupCountDeadMetricName,
shareGroupCountMetricName,
shareGroupCountEmptyMetricName,
shareGroupCountStableMetricName,
shareGroupCountDeadMetricName,
streamsGroupCountMetricName,
streamsGroupCountEmptyMetricName,
streamsGroupCountAssigningMetricName,
streamsGroupCountReconcilingMetricName,
streamsGroupCountStableMetricName,
streamsGroupCountDeadMetricName,
streamsGroupCountNotReadyMetricName
).forEach(metrics::removeMetric);
Arrays.asList(
OFFSET_COMMITS_SENSOR_NAME,
OFFSET_EXPIRED_SENSOR_NAME,
OFFSET_DELETIONS_SENSOR_NAME,
CLASSIC_GROUP_COMPLETED_REBALANCES_SENSOR_NAME,
CONSUMER_GROUP_REBALANCES_SENSOR_NAME,
SHARE_GROUP_REBALANCES_SENSOR_NAME,
STREAMS_GROUP_REBALANCES_SENSOR_NAME
).forEach(metrics::removeSensor);
}
@Override
public GroupCoordinatorMetricsShard newMetricsShard(SnapshotRegistry snapshotRegistry, TopicPartition tp) {
return new GroupCoordinatorMetricsShard(snapshotRegistry, globalSensors, tp);
}
@Override
public void activateMetricsShard(CoordinatorMetricsShard shard) {
if (!(shard instanceof GroupCoordinatorMetricsShard)) {
throw new IllegalArgumentException("GroupCoordinatorMetrics can only activate GroupCoordinatorMetricShard");
}
shards.put(shard.topicPartition(), (GroupCoordinatorMetricsShard) shard);
}
@Override
public void deactivateMetricsShard(CoordinatorMetricsShard shard) {
shards.remove(shard.topicPartition());
}
@Override
public MetricsRegistry registry() {
return this.registry;
}
@Override
public void onUpdateLastCommittedOffset(TopicPartition tp, long offset) {
CoordinatorMetricsShard shard = shards.get(tp);
if (shard != null) {
shard.commitUpTo(offset);
}
}
public static com.yammer.metrics.core.MetricName getMetricName(String type, String name) {
return getMetricName("kafka.coordinator.group", type, name);
}
private void registerGauges() {
registry.newGauge(NUM_OFFSETS, new com.yammer.metrics.core.Gauge<Long>() {
@Override
public Long value() {
return numOffsets();
}
});
registry.newGauge(NUM_CLASSIC_GROUPS, new com.yammer.metrics.core.Gauge<Long>() {
@Override
public Long value() {
return numClassicGroups();
}
});
registry.newGauge(NUM_CLASSIC_GROUPS_PREPARING_REBALANCE, new com.yammer.metrics.core.Gauge<Long>() {
@Override
public Long value() {
return numClassicGroups(ClassicGroupState.PREPARING_REBALANCE);
}
});
registry.newGauge(NUM_CLASSIC_GROUPS_COMPLETING_REBALANCE, new com.yammer.metrics.core.Gauge<Long>() {
@Override
public Long value() {
return numClassicGroups(ClassicGroupState.COMPLETING_REBALANCE);
}
});
registry.newGauge(NUM_CLASSIC_GROUPS_STABLE, new com.yammer.metrics.core.Gauge<Long>() {
@Override
public Long value() {
return numClassicGroups(ClassicGroupState.STABLE);
}
});
registry.newGauge(NUM_CLASSIC_GROUPS_DEAD, new com.yammer.metrics.core.Gauge<Long>() {
@Override
public Long value() {
return numClassicGroups(ClassicGroupState.DEAD);
}
});
registry.newGauge(NUM_CLASSIC_GROUPS_EMPTY, new com.yammer.metrics.core.Gauge<Long>() {
@Override
public Long value() {
return numClassicGroups(ClassicGroupState.EMPTY);
}
});
metrics.addMetric(
classicGroupCountMetricName,
(Gauge<Long>) (config, now) -> numClassicGroups()
);
metrics.addMetric(
consumerGroupCountMetricName,
(Gauge<Long>) (config, now) -> numConsumerGroups()
);
metrics.addMetric(
consumerGroupCountEmptyMetricName,
(Gauge<Long>) (config, now) -> numConsumerGroups(ConsumerGroupState.EMPTY)
);
metrics.addMetric(
consumerGroupCountAssigningMetricName,
(Gauge<Long>) (config, now) -> numConsumerGroups(ConsumerGroupState.ASSIGNING)
);
metrics.addMetric(
consumerGroupCountReconcilingMetricName,
(Gauge<Long>) (config, now) -> numConsumerGroups(ConsumerGroupState.RECONCILING)
);
metrics.addMetric(
consumerGroupCountStableMetricName,
(Gauge<Long>) (config, now) -> numConsumerGroups(ConsumerGroupState.STABLE)
);
metrics.addMetric(
consumerGroupCountDeadMetricName,
(Gauge<Long>) (config, now) -> numConsumerGroups(ConsumerGroupState.DEAD)
);
metrics.addMetric(
shareGroupCountMetricName,
(Gauge<Long>) (config, now) -> numShareGroups()
);
metrics.addMetric(
shareGroupCountEmptyMetricName,
(Gauge<Long>) (config, now) -> numShareGroups(ShareGroup.ShareGroupState.EMPTY)
);
metrics.addMetric(
shareGroupCountStableMetricName,
(Gauge<Long>) (config, now) -> numShareGroups(ShareGroup.ShareGroupState.STABLE)
);
metrics.addMetric(
shareGroupCountDeadMetricName,
(Gauge<Long>) (config, now) -> numShareGroups(ShareGroup.ShareGroupState.DEAD)
);
metrics.addMetric(
streamsGroupCountMetricName,
(Gauge<Long>) (config, now) -> numStreamsGroups()
);
metrics.addMetric(
streamsGroupCountEmptyMetricName,
(Gauge<Long>) (config, now) -> numStreamsGroups(StreamsGroupState.EMPTY)
);
metrics.addMetric(
streamsGroupCountAssigningMetricName,
(Gauge<Long>) (config, now) -> numStreamsGroups(StreamsGroupState.ASSIGNING)
);
metrics.addMetric(
streamsGroupCountReconcilingMetricName,
(Gauge<Long>) (config, now) -> numStreamsGroups(StreamsGroupState.RECONCILING)
);
metrics.addMetric(
streamsGroupCountStableMetricName,
(Gauge<Long>) (config, now) -> numStreamsGroups(StreamsGroupState.STABLE)
);
metrics.addMetric(
streamsGroupCountDeadMetricName,
(Gauge<Long>) (config, now) -> numStreamsGroups(StreamsGroupState.DEAD)
);
metrics.addMetric(
streamsGroupCountNotReadyMetricName,
(Gauge<Long>) (config, now) -> numStreamsGroups(StreamsGroupState.NOT_READY)
);
}
}
|
GroupCoordinatorMetrics
|
java
|
elastic__elasticsearch
|
x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CancelReindexDataStreamTransportAction.java
|
{
"start": 1111,
"end": 2947
}
|
class ____ extends HandledTransportAction<Request, AcknowledgedResponse> {
private final PersistentTasksService persistentTasksService;
private final ProjectResolver projectResolver;
@Inject
public CancelReindexDataStreamTransportAction(
TransportService transportService,
ActionFilters actionFilters,
PersistentTasksService persistentTasksService,
ProjectResolver projectResolver
) {
super(CancelReindexDataStreamAction.NAME, transportService, actionFilters, Request::new, EsExecutors.DIRECT_EXECUTOR_SERVICE);
this.persistentTasksService = persistentTasksService;
this.projectResolver = projectResolver;
}
@Override
protected void doExecute(Task task, Request request, ActionListener<AcknowledgedResponse> listener) {
String index = request.getIndex();
ProjectId projectId = projectResolver.getProjectId();
String persistentTaskId = ReindexDataStreamAction.TASK_ID_PREFIX + index;
/*
* This removes the persistent task from the cluster state and results in the running task being cancelled (but not removed from
* the task manager). The running task is removed from the task manager in ReindexDataStreamTask::onCancelled, which is called as
* as result of this.
*/
persistentTasksService.sendProjectRemoveRequest(projectId, persistentTaskId, TimeValue.MAX_VALUE, new ActionListener<>() {
@Override
public void onResponse(PersistentTasksCustomMetadata.PersistentTask<?> persistentTask) {
listener.onResponse(AcknowledgedResponse.TRUE);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
}
|
CancelReindexDataStreamTransportAction
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_76_union.java
|
{
"start": 1086,
"end": 2928
}
|
class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "((select 1 as a1, 10 as a2)) union all (((select 2, 20))) union distinct (select 3, 30);";
System.out.println(sql);
SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, JdbcConstants.MYSQL, SQLParserFeature.OptimizedForParameterized);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
assertEquals(1, statementList.size());
SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(JdbcConstants.MYSQL);
stmt.accept(visitor);
{
String output = SQLUtils.toMySqlString(stmt);
assertEquals("(SELECT 1 AS a1, 10 AS a2)\n" +
"UNION ALL\n" +
"(SELECT 2, 20)\n" +
"UNION DISTINCT\n" +
"(SELECT 3, 30);", //
output);
}
{
String output = SQLUtils.toMySqlString(stmt, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION);
assertEquals("(select 1 as a1, 10 as a2)\n" +
"union all\n" +
"(select 2, 20)\n" +
"union distinct\n" +
"(select 3, 30);", //
output);
}
{
String output = SQLUtils.toMySqlString(stmt, new SQLUtils.FormatOption(true, true, true));
assertEquals("(SELECT ? AS a1, ? AS a2)\n" +
"UNION ALL\n" +
"(SELECT ?, ?)\n" +
"UNION DISTINCT\n" +
"(SELECT ?, ?);", //
output);
}
}
}
|
MySqlSelectTest_76_union
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/api/sync/RedisHLLCommands.java
|
{
"start": 1010,
"end": 1986
}
|
interface ____<K, V> {
/**
* Adds the specified elements to the specified HyperLogLog.
*
* @param key the key.
* @param values the values.
* @return Long integer-reply specifically:
*
* 1 if at least 1 HyperLogLog internal register was altered. 0 otherwise.
*/
Long pfadd(K key, V... values);
/**
* Merge N different HyperLogLogs into a single one.
*
* @param destkey the destination key.
* @param sourcekeys the source key.
* @return String simple-string-reply The command just returns {@code OK}.
*/
String pfmerge(K destkey, K... sourcekeys);
/**
* Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s).
*
* @param keys the keys.
* @return Long integer-reply specifically:
*
* The approximated number of unique elements observed via {@code PFADD}.
*/
Long pfcount(K... keys);
}
|
RedisHLLCommands
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/test/java/io/quarkus/arc/test/unused/ArcContainerLookupProblemDetectedTest.java
|
{
"start": 1618,
"end": 2036
}
|
class ____.quarkus.arc.test.unused.ArcContainerLookupProblemDetectedTest$Alpha"),
message);
});
@Test
public void testWarning() {
// Note that the warning is only displayed once, subsequent calls use a cached result
assertFalse(Arc.container().instance(Alpha.class).isAvailable());
}
// unused bean, will be removed
@ApplicationScoped
static
|
io
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/OptionalArgument.java
|
{
"start": 327,
"end": 530
}
|
interface ____ that a function accepts one optional argument (typically the last one).
* This is used by the {@link EsqlFunctionRegistry} to perform validation of function declaration.
*/
public
|
indicating
|
java
|
apache__kafka
|
clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerTest.java
|
{
"start": 7438,
"end": 79543
}
|
class ____ {
private final ClusterInstance cluster;
public static final double EPSILON = 0.1;
public PlaintextConsumerTest(ClusterInstance cluster) {
this.cluster = cluster;
}
@ClusterTest
public void testClassicConsumerSimpleConsumption() throws InterruptedException {
testSimpleConsumption(cluster, Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerSimpleConsumption() throws InterruptedException {
testSimpleConsumption(cluster, Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testClassicConsumerClusterResourceListener() throws InterruptedException {
testClusterResourceListener(cluster, Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerClusterResourceListener() throws InterruptedException {
testClusterResourceListener(cluster, Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testClassicConsumerCoordinatorFailover() throws InterruptedException {
Map<String, Object> config = Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
SESSION_TIMEOUT_MS_CONFIG, 5001,
HEARTBEAT_INTERVAL_MS_CONFIG, 1000,
// Use higher poll timeout to avoid consumer leaving the group due to timeout
MAX_POLL_INTERVAL_MS_CONFIG, 15000
);
testCoordinatorFailover(cluster, config);
}
@ClusterTest
public void testAsyncConsumeCoordinatorFailover() throws InterruptedException {
Map<String, Object> config = Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
// Use higher poll timeout to avoid consumer leaving the group due to timeout
MAX_POLL_INTERVAL_MS_CONFIG, 15000
);
testCoordinatorFailover(cluster, config);
}
@ClusterTest(
brokers = 1,
serverProperties = {
@ClusterConfigProperty(key = OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"),
@ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"),
@ClusterConfigProperty(key = "transaction.state.log.replication.factor", value = "1"),
@ClusterConfigProperty(key = "transaction.state.log.min.isr", value = "1")
}
)
public void testClassicConsumerCloseOnBrokerShutdown() {
Map<String, Object> config = Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
);
testConsumerCloseOnBrokerShutdown(config);
}
@ClusterTest(
brokers = 1,
serverProperties = {
@ClusterConfigProperty(key = OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"),
@ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"),
@ClusterConfigProperty(key = "transaction.state.log.replication.factor", value = "1"),
@ClusterConfigProperty(key = "transaction.state.log.min.isr", value = "1")
}
)
public void testAsyncConsumerCloseOnBrokerShutdown() {
Map<String, Object> config = Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
ENABLE_AUTO_COMMIT_CONFIG, false
);
// Disabling auto commit so that commitSync() does not block the close timeout.
testConsumerCloseOnBrokerShutdown(config);
}
private void testConsumerCloseOnBrokerShutdown(Map<String, Object> consumerConfig) {
try (Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)) {
consumer.subscribe(List.of(TOPIC));
// Force consumer to discover coordinator by doing a poll
// This ensures coordinator is discovered before we shutdown the broker
consumer.poll(Duration.ofMillis(100));
// Now shutdown broker.
assertEquals(1, cluster.brokers().size());
KafkaBroker broker = cluster.brokers().get(0);
cluster.shutdownBroker(0);
broker.awaitShutdown();
// Do another poll to force the consumer to retry finding the coordinator.
consumer.poll(Duration.ofMillis(100));
// Close should not hang waiting for retries when broker is already down
assertTimeoutPreemptively(Duration.ofSeconds(5), () -> consumer.close(),
"Consumer close should not wait for full timeout when broker is already shutdown");
}
}
@ClusterTest
public void testClassicConsumerHeaders() throws Exception {
testHeaders(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerHeaders() throws Exception {
testHeaders(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testHeaders(Map<String, Object> consumerConfig) throws Exception {
var numRecords = 1;
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
var record = new ProducerRecord<>(TP.topic(), TP.partition(), null, "key".getBytes(), "value".getBytes());
record.headers().add("headerKey", "headerValue".getBytes());
record.headers().add("headerKey2", "headerValue2".getBytes());
record.headers().add("headerKey3", "headerValue3".getBytes());
producer.send(record);
producer.flush();
assertEquals(0, consumer.assignment().size());
consumer.assign(List.of(TP));
assertEquals(1, consumer.assignment().size());
consumer.seek(TP, 0);
var records = consumeRecords(consumer, numRecords);
assertEquals(numRecords, records.size());
var header = records.get(0).headers().lastHeader("headerKey");
assertEquals("headerValue", header == null ? null : new String(header.value()));
// Test the order of headers in a record is preserved when producing and consuming
Header[] headers = records.get(0).headers().toArray();
assertEquals("headerKey", headers[0].key());
assertEquals("headerKey2", headers[1].key());
assertEquals("headerKey3", headers[2].key());
}
}
@ClusterTest
public void testClassicConsumerHeadersSerializerDeserializer() throws Exception {
testHeadersSerializeDeserialize(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerHeadersSerializerDeserializer() throws Exception {
testHeadersSerializeDeserialize(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testHeadersSerializeDeserialize(Map<String, Object> config) throws InterruptedException {
var numRecords = 1;
Map<String, Object> consumerConfig = new HashMap<>(config);
consumerConfig.put(VALUE_DESERIALIZER_CLASS_CONFIG, DeserializerImpl.class);
Map<String, Object> producerConfig = Map.of(
VALUE_SERIALIZER_CLASS_CONFIG, SerializerImpl.class.getName()
);
try (Producer<byte[], byte[]> producer = cluster.producer(producerConfig);
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
producer.send(new ProducerRecord<>(
TP.topic(),
TP.partition(),
null,
"key".getBytes(),
"value".getBytes())
);
assertEquals(0, consumer.assignment().size());
consumer.assign(List.of(TP));
assertEquals(1, consumer.assignment().size());
consumer.seek(TP, 0);
assertEquals(numRecords, consumeRecords(consumer, numRecords).size());
}
}
@ClusterTest
public void testClassicConsumerAutoOffsetReset() throws Exception {
testAutoOffsetReset(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerAutoOffsetReset() throws Exception {
testAutoOffsetReset(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testAutoOffsetReset(Map<String, Object> consumerConfig) throws Exception {
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
var startingTimestamp = System.currentTimeMillis();
sendRecords(producer, TP, 1, startingTimestamp);
consumer.assign(List.of(TP));
consumeAndVerifyRecords(consumer, TP, 1, 0, 0, startingTimestamp);
}
}
@ClusterTest
public void testClassicConsumerGroupConsumption() throws Exception {
testGroupConsumption(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerGroupConsumption() throws Exception {
testGroupConsumption(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testGroupConsumption(Map<String, Object> consumerConfig) throws Exception {
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
var startingTimestamp = System.currentTimeMillis();
sendRecords(producer, TP, 10, startingTimestamp);
consumer.subscribe(List.of(TOPIC));
consumeAndVerifyRecords(consumer, TP, 1, 0, 0, startingTimestamp);
}
}
@ClusterTest
public void testClassicConsumerPartitionsFor() throws Exception {
testPartitionsFor(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerPartitionsFor() throws Exception {
testPartitionsFor(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testPartitionsFor(Map<String, Object> consumerConfig) throws Exception {
var numParts = 2;
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
cluster.createTopic("part-test", numParts, (short) 1);
try (var consumer = cluster.consumer(consumerConfig)) {
var partitions = consumer.partitionsFor(TOPIC);
assertNotNull(partitions);
assertEquals(2, partitions.size());
}
}
@ClusterTest
public void testClassicConsumerPartitionsForAutoCreate() throws Exception {
testPartitionsForAutoCreate(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerPartitionsForAutoCreate() throws Exception {
testPartitionsForAutoCreate(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testPartitionsForAutoCreate(Map<String, Object> consumerConfig) throws Exception {
try (var consumer = cluster.consumer(consumerConfig)) {
// First call would create the topic
consumer.partitionsFor("non-exist-topic");
TestUtils.waitForCondition(
() -> !consumer.partitionsFor("non-exist-topic").isEmpty(),
"Timed out while awaiting non empty partitions."
);
}
}
@ClusterTest
public void testClassicConsumerPartitionsForInvalidTopic() {
testPartitionsForInvalidTopic(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerPartitionsForInvalidTopic() {
testPartitionsForInvalidTopic(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testPartitionsForInvalidTopic(Map<String, Object> consumerConfig) {
try (var consumer = cluster.consumer(consumerConfig)) {
assertThrows(InvalidTopicException.class, () -> consumer.partitionsFor(";3# ads,{234"));
}
}
@ClusterTest
public void testClassicConsumerSeek() throws Exception {
testSeek(
Map.of(GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerSeek() throws Exception {
testSeek(
Map.of(GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testSeek(Map<String, Object> consumerConfig) throws Exception {
var totalRecords = 50;
var mid = totalRecords / 2;
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
var startingTimestamp = 0;
sendRecords(producer, TP, totalRecords, startingTimestamp);
consumer.assign(List.of(TP));
consumer.seekToEnd(List.of(TP));
assertEquals(totalRecords, consumer.position(TP));
assertTrue(consumer.poll(Duration.ofMillis(50)).isEmpty());
consumer.seekToBeginning(List.of(TP));
assertEquals(0, consumer.position(TP));
consumeAndVerifyRecords(consumer, TP, 1, 0, 0, startingTimestamp);
consumer.seek(TP, mid);
assertEquals(mid, consumer.position(TP));
consumeAndVerifyRecords(consumer, TP, 1, mid, mid, mid);
// Test seek compressed message
var tp2 = new TopicPartition(TOPIC, 1);
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
sendCompressedMessages(totalRecords, tp2);
consumer.assign(List.of(tp2));
consumer.seekToEnd(List.of(tp2));
assertEquals(totalRecords, consumer.position(tp2));
assertTrue(consumer.poll(Duration.ofMillis(50)).isEmpty());
consumer.seekToBeginning(List.of(tp2));
assertEquals(0L, consumer.position(tp2));
consumeAndVerifyRecords(consumer, tp2, 1, 0);
consumer.seek(tp2, mid);
assertEquals(mid, consumer.position(tp2));
consumeAndVerifyRecords(consumer, tp2, 1, mid, mid, mid);
}
}
@ClusterTest
public void testClassicConsumerPartitionPauseAndResume() throws Exception {
testPartitionPauseAndResume(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerPartitionPauseAndResume() throws Exception {
testPartitionPauseAndResume(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testPartitionPauseAndResume(Map<String, Object> consumerConfig) throws Exception {
var partitions = List.of(TP);
var numRecords = 5;
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
var startingTimestamp = System.currentTimeMillis();
sendRecords(producer, TP, numRecords, startingTimestamp);
consumer.assign(partitions);
consumeAndVerifyRecords(consumer, TP, numRecords, 0, 0, startingTimestamp);
consumer.pause(partitions);
startingTimestamp = System.currentTimeMillis();
sendRecords(producer, TP, numRecords, startingTimestamp);
assertTrue(consumer.poll(Duration.ofMillis(100)).isEmpty());
consumer.resume(partitions);
consumeAndVerifyRecords(consumer, TP, numRecords, 5, 0, startingTimestamp);
}
}
@ClusterTest
public void testClassicConsumerInterceptors() throws Exception {
testInterceptors(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerInterceptors() throws Exception {
testInterceptors(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testInterceptors(Map<String, Object> consumerConfig) throws Exception {
var appendStr = "mock";
MockConsumerInterceptor.resetCounters();
MockProducerInterceptor.resetCounters();
// create producer with interceptor
Map<String, Object> producerConfig = Map.of(
ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MockProducerInterceptor.class.getName(),
KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName(),
VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName(),
"mock.interceptor.append", appendStr
);
// create consumer with interceptor
Map<String, Object> consumerConfigOverride = new HashMap<>(consumerConfig);
consumerConfigOverride.put(INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
consumerConfigOverride.put(KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
consumerConfigOverride.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
try (Producer<String, String> producer = cluster.producer(producerConfig);
Consumer<String, String> consumer = cluster.consumer(consumerConfigOverride)
) {
// produce records
var numRecords = 10;
List<Future<RecordMetadata>> futures = new ArrayList<>();
for (var i = 0; i < numRecords; i++) {
Future<RecordMetadata> future = producer.send(
new ProducerRecord<>(TP.topic(), TP.partition(), "key " + i, "value " + i)
);
futures.add(future);
}
// Wait for all sends to complete
futures.forEach(future -> assertDoesNotThrow(() -> future.get()));
assertEquals(numRecords, MockProducerInterceptor.ONSEND_COUNT.intValue());
assertEquals(numRecords, MockProducerInterceptor.ON_SUCCESS_COUNT.intValue());
// send invalid record
assertThrows(
Throwable.class,
() -> producer.send(null),
"Should not allow sending a null record"
);
assertEquals(
1,
MockProducerInterceptor.ON_ERROR_COUNT.intValue(),
"Interceptor should be notified about exception"
);
assertEquals(
0,
MockProducerInterceptor.ON_ERROR_WITH_METADATA_COUNT.intValue(),
"Interceptor should not receive metadata with an exception when record is null"
);
consumer.assign(List.of(TP));
consumer.seek(TP, 0);
// consume and verify that values are modified by interceptors
var records = consumeRecords(consumer, numRecords);
for (var i = 0; i < numRecords; i++) {
ConsumerRecord<String, String> record = records.get(i);
assertEquals("key " + i, record.key());
assertEquals(("value " + i + appendStr).toUpperCase(Locale.ROOT), record.value());
}
// commit sync and verify onCommit is called
var commitCountBefore = MockConsumerInterceptor.ON_COMMIT_COUNT.intValue();
consumer.commitSync(Map.of(TP, new OffsetAndMetadata(2L, "metadata")));
OffsetAndMetadata metadata = consumer.committed(Set.of(TP)).get(TP);
assertEquals(2, metadata.offset());
assertEquals("metadata", metadata.metadata());
assertEquals(commitCountBefore + 1, MockConsumerInterceptor.ON_COMMIT_COUNT.intValue());
// commit async and verify onCommit is called
var offsetsToCommit = Map.of(TP, new OffsetAndMetadata(5L, null));
sendAndAwaitAsyncCommit(consumer, Optional.of(offsetsToCommit));
metadata = consumer.committed(Set.of(TP)).get(TP);
assertEquals(5, metadata.offset());
// null metadata will be converted to an empty string
assertEquals("", metadata.metadata());
assertEquals(commitCountBefore + 2, MockConsumerInterceptor.ON_COMMIT_COUNT.intValue());
}
// cleanup
MockConsumerInterceptor.resetCounters();
MockProducerInterceptor.resetCounters();
}
@ClusterTest
public void testClassicConsumerInterceptorsWithWrongKeyValue() throws Exception {
testInterceptorsWithWrongKeyValue(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerInterceptorsWithWrongKeyValue() throws Exception {
testInterceptorsWithWrongKeyValue(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testInterceptorsWithWrongKeyValue(Map<String, Object> consumerConfig) throws Exception {
var appendStr = "mock";
// create producer with interceptor that has different key and value types from the producer
Map<String, Object> producerConfig = Map.of(
ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MockProducerInterceptor.class.getName(),
"mock.interceptor.append", appendStr
);
// create consumer with interceptor that has different key and value types from the consumer
Map<String, Object> consumerConfigOverride = new HashMap<>(consumerConfig);
consumerConfigOverride.put(INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName());
try (Producer<byte[], byte[]> producer = cluster.producer(producerConfig);
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfigOverride)
) {
// producing records should succeed
producer.send(new ProducerRecord<>(
TP.topic(),
TP.partition(),
"key".getBytes(),
"value will not be modified".getBytes()
));
consumer.assign(List.of(TP));
consumer.seek(TP, 0);
// consume and verify that values are not modified by interceptors -- their exceptions are caught and logged, but not propagated
var records = consumeRecords(consumer, 1);
var record = records.get(0);
assertEquals("value will not be modified", new String(record.value()));
}
}
@ClusterTest
public void testClassicConsumerConsumeMessagesWithCreateTime() throws Exception {
testConsumeMessagesWithCreateTime(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerConsumeMessagesWithCreateTime() throws Exception {
testConsumeMessagesWithCreateTime(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testConsumeMessagesWithCreateTime(Map<String, Object> consumerConfig) throws Exception {
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
var numRecords = 50;
var tp2 = new TopicPartition(TOPIC, 1);
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
// Test non-compressed messages
var startingTimestamp = System.currentTimeMillis();
sendRecords(producer, TP, numRecords, startingTimestamp);
consumer.assign(List.of(TP));
consumeAndVerifyRecords(consumer, TP, numRecords, 0, 0, startingTimestamp);
// Test compressed messages
sendCompressedMessages(numRecords, tp2);
consumer.assign(List.of(tp2));
consumeAndVerifyRecords(consumer, tp2, numRecords, 0, 0, 0);
}
}
@ClusterTest
public void testClassicConsumerConsumeMessagesWithLogAppendTime() throws Exception {
testConsumeMessagesWithLogAppendTime(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerConsumeMessagesWithLogAppendTime() throws Exception {
testConsumeMessagesWithLogAppendTime(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testConsumeMessagesWithLogAppendTime(Map<String, Object> consumerConfig) throws Exception {
var topicName = "testConsumeMessagesWithLogAppendTime";
var startTime = System.currentTimeMillis();
var numRecords = 50;
cluster.createTopic(topicName, 2, (short) 2, Map.of(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, "LogAppendTime"));
try (Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)) {
// Test non-compressed messages
var tp1 = new TopicPartition(topicName, 0);
sendRecords(cluster, tp1, numRecords);
consumer.assign(List.of(tp1));
consumeAndVerifyRecordsWithTimeTypeLogAppend(consumer, tp1, numRecords, startTime);
// Test compressed messages
var tp2 = new TopicPartition(topicName, 1);
sendCompressedMessages(numRecords, tp2);
consumer.assign(List.of(tp2));
consumeAndVerifyRecordsWithTimeTypeLogAppend(consumer, tp2, numRecords, startTime);
}
}
@ClusterTest
public void testClassicConsumerListTopics() throws Exception {
testListTopics(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerListTopics() throws Exception {
testListTopics(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testListTopics(Map<String, Object> consumerConfig) throws Exception {
var numParts = 2;
var topic1 = "part-test-topic-1";
var topic2 = "part-test-topic-2";
var topic3 = "part-test-topic-3";
cluster.createTopic(topic1, numParts, (short) 1);
cluster.createTopic(topic2, numParts, (short) 1);
cluster.createTopic(topic3, numParts, (short) 1);
sendRecords(cluster, new TopicPartition(topic1, 0), 1);
try (var consumer = cluster.consumer(consumerConfig)) {
// consumer some messages, and we can list the internal topic __consumer_offsets
consumer.subscribe(List.of(topic1));
consumer.poll(Duration.ofMillis(100));
var topics = consumer.listTopics();
assertNotNull(topics);
assertEquals(4, topics.size());
assertEquals(2, topics.get(topic1).size());
assertEquals(2, topics.get(topic2).size());
assertEquals(2, topics.get(topic3).size());
}
}
@ClusterTest
public void testClassicConsumerPauseStateNotPreservedByRebalance() throws Exception {
testPauseStateNotPreservedByRebalance(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
SESSION_TIMEOUT_MS_CONFIG, 100,
HEARTBEAT_INTERVAL_MS_CONFIG, 30
));
}
@ClusterTest
public void testAsyncConsumerPauseStateNotPreservedByRebalance() throws Exception {
testPauseStateNotPreservedByRebalance(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testPauseStateNotPreservedByRebalance(Map<String, Object> consumerConfig) throws Exception {
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
var startingTimestamp = System.currentTimeMillis();
sendRecords(producer, TP, 5, startingTimestamp);
consumer.subscribe(List.of(TOPIC));
consumeAndVerifyRecords(consumer, TP, 5, 0, 0, startingTimestamp);
consumer.pause(List.of(TP));
// subscribe to a new topic to trigger a rebalance
consumer.subscribe(List.of("topic2"));
// after rebalance, our position should be reset and our pause state lost,
// so we should be able to consume from the beginning
consumeAndVerifyRecords(consumer, TP, 0, 5, 0, startingTimestamp);
}
}
@ClusterTest
public void testClassicConsumerPerPartitionLeadMetricsCleanUpWithSubscribe() throws Exception {
String consumerClientId = "testClassicConsumerPerPartitionLeadMetricsCleanUpWithSubscribe";
testPerPartitionLeadMetricsCleanUpWithSubscribe(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId
), consumerClientId);
}
@ClusterTest
public void testAsyncConsumerPerPartitionLeadMetricsCleanUpWithSubscribe() throws Exception {
String consumerClientId = "testAsyncConsumerPerPartitionLeadMetricsCleanUpWithSubscribe";
testPerPartitionLeadMetricsCleanUpWithSubscribe(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId
), consumerClientId);
}
private void testPerPartitionLeadMetricsCleanUpWithSubscribe(
Map<String, Object> consumerConfig,
String consumerClientId
) throws Exception {
var numMessages = 1000;
var topic2 = "topic2";
var tp2 = new TopicPartition(TOPIC, 1);
cluster.createTopic(topic2, 2, (short) BROKER_COUNT);
try (Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)) {
// send some messages.
sendRecords(cluster, TP, numMessages);
// Test subscribe
// Create a consumer and consumer some messages.
var listener = new TestConsumerReassignmentListener();
consumer.subscribe(List.of(TOPIC, topic2), listener);
var records = awaitNonEmptyRecords(consumer, TP);
assertEquals(1, listener.callsToAssigned, "should be assigned once");
// Verify the metric exist.
Map<String, String> tags1 = Map.of(
"client-id", consumerClientId,
"topic", TP.topic(),
"partition", String.valueOf(TP.partition())
);
Map<String, String> tags2 = Map.of(
"client-id", consumerClientId,
"topic", tp2.topic(),
"partition", String.valueOf(tp2.partition())
);
var fetchLead0 = consumer.metrics().get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags1));
assertNotNull(fetchLead0);
assertEquals((double) records.count(), fetchLead0.metricValue(), "The lead should be " + records.count());
// Remove topic from subscription
consumer.subscribe(List.of(topic2), listener);
awaitRebalance(consumer, listener);
// Verify the metric has gone
assertNull(consumer.metrics().get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags1)));
assertNull(consumer.metrics().get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags2)));
}
}
@ClusterTest
public void testClassicConsumerPerPartitionLagMetricsCleanUpWithSubscribe() throws Exception {
String consumerClientId = "testClassicConsumerPerPartitionLagMetricsCleanUpWithSubscribe";
testPerPartitionLagMetricsCleanUpWithSubscribe(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId
), consumerClientId);
}
@ClusterTest
public void testAsyncConsumerPerPartitionLagMetricsCleanUpWithSubscribe() throws Exception {
String consumerClientId = "testAsyncConsumerPerPartitionLagMetricsCleanUpWithSubscribe";
testPerPartitionLagMetricsCleanUpWithSubscribe(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId
), consumerClientId);
}
private void testPerPartitionLagMetricsCleanUpWithSubscribe(
Map<String, Object> consumerConfig,
String consumerClientId
) throws Exception {
int numMessages = 1000;
var topic2 = "topic2";
var tp2 = new TopicPartition(TOPIC, 1);
cluster.createTopic(topic2, 2, (short) BROKER_COUNT);
try (Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)) {
// send some messages.
sendRecords(cluster, TP, numMessages);
// Test subscribe
// Create a consumer and consumer some messages.
var listener = new TestConsumerReassignmentListener();
consumer.subscribe(List.of(TOPIC, topic2), listener);
var records = awaitNonEmptyRecords(consumer, TP);
assertEquals(1, listener.callsToAssigned, "should be assigned once");
// Verify the metric exist.
Map<String, String> tags1 = Map.of(
"client-id", consumerClientId,
"topic", TP.topic(),
"partition", String.valueOf(TP.partition())
);
Map<String, String> tags2 = Map.of(
"client-id", consumerClientId,
"topic", tp2.topic(),
"partition", String.valueOf(tp2.partition())
);
var fetchLag0 = consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags1));
assertNotNull(fetchLag0);
var expectedLag = numMessages - records.count();
assertEquals(expectedLag, (double) fetchLag0.metricValue(), EPSILON, "The lag should be " + expectedLag);
// Remove topic from subscription
consumer.subscribe(List.of(topic2), listener);
awaitRebalance(consumer, listener);
// Verify the metric has gone
assertNull(consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags1)));
assertNull(consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags2)));
}
}
@ClusterTest
public void testClassicConsumerPerPartitionLeadMetricsCleanUpWithAssign() throws Exception {
String consumerClientId = "testClassicConsumerPerPartitionLeadMetricsCleanUpWithAssign";
testPerPartitionLeadMetricsCleanUpWithAssign(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId
), consumerClientId);
}
@ClusterTest
public void testAsyncConsumerPerPartitionLeadMetricsCleanUpWithAssign() throws Exception {
String consumerClientId = "testAsyncConsumerPerPartitionLeadMetricsCleanUpWithAssign";
testPerPartitionLeadMetricsCleanUpWithAssign(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId
), consumerClientId);
}
private void testPerPartitionLeadMetricsCleanUpWithAssign(
Map<String, Object> consumerConfig,
String consumerClientId
) throws Exception {
var numMessages = 1000;
var tp2 = new TopicPartition(TOPIC, 1);
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
// Test assign send some messages.
sendRecords(producer, TP, numMessages, System.currentTimeMillis());
sendRecords(producer, tp2, numMessages, System.currentTimeMillis());
consumer.assign(List.of(TP));
var records = awaitNonEmptyRecords(consumer, TP);
// Verify the metric exist.
Map<String, String> tags = Map.of(
"client-id", consumerClientId,
"topic", TP.topic(),
"partition", String.valueOf(TP.partition())
);
var fetchLead = consumer.metrics().get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags));
assertNotNull(fetchLead);
assertEquals((double) records.count(), fetchLead.metricValue(), "The lead should be " + records.count());
consumer.assign(List.of(tp2));
awaitNonEmptyRecords(consumer, tp2);
assertNull(consumer.metrics().get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags)));
}
}
@ClusterTest
public void testClassicConsumerPerPartitionLagMetricsCleanUpWithAssign() throws Exception {
String consumerClientId = "testClassicConsumerPerPartitionLagMetricsCleanUpWithAssign";
testPerPartitionLagMetricsCleanUpWithAssign(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId
), consumerClientId);
}
@ClusterTest
public void testAsyncConsumerPerPartitionLagMetricsCleanUpWithAssign() throws Exception {
String consumerClientId = "testAsyncConsumerPerPartitionLagMetricsCleanUpWithAssign";
testPerPartitionLagMetricsCleanUpWithAssign(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId
), consumerClientId);
}
private void testPerPartitionLagMetricsCleanUpWithAssign(
Map<String, Object> consumerConfig,
String consumerClientId
) throws Exception {
var numMessages = 1000;
var tp2 = new TopicPartition(TOPIC, 1);
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
// Test assign send some messages.
sendRecords(producer, TP, numMessages, System.currentTimeMillis());
sendRecords(producer, tp2, numMessages, System.currentTimeMillis());
consumer.assign(List.of(TP));
var records = awaitNonEmptyRecords(consumer, TP);
// Verify the metric exist.
Map<String, String> tags = Map.of(
"client-id", consumerClientId,
"topic", TP.topic(),
"partition", String.valueOf(TP.partition())
);
var fetchLag = consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags));
assertNotNull(fetchLag);
var expectedLag = numMessages - records.count();
assertEquals(expectedLag, (double) fetchLag.metricValue(), EPSILON, "The lag should be " + expectedLag);
consumer.assign(List.of(tp2));
awaitNonEmptyRecords(consumer, tp2);
assertNull(consumer.metrics().get(new MetricName(TP + ".records-lag", "consumer-fetch-manager-metrics", "", tags)));
assertNull(consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags)));
}
}
@ClusterTest
public void testClassicConsumerPerPartitionLagMetricsWhenReadCommitted() throws Exception {
String consumerClientId = "testClassicConsumerPerPartitionLagMetricsWhenReadCommitted";
testPerPartitionLagMetricsWhenReadCommitted(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId,
ISOLATION_LEVEL_CONFIG, "read_committed"
), consumerClientId);
}
@ClusterTest
public void testAsyncConsumerPerPartitionLagMetricsWhenReadCommitted() throws Exception {
String consumerClientId = "testAsyncConsumerPerPartitionLagMetricsWhenReadCommitted";
testPerPartitionLagMetricsWhenReadCommitted(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId,
ISOLATION_LEVEL_CONFIG, "read_committed"
), consumerClientId);
}
private void testPerPartitionLagMetricsWhenReadCommitted(
Map<String, Object> consumerConfig,
String consumerClientId
) throws Exception {
var numMessages = 1000;
var tp2 = new TopicPartition(TOPIC, 1);
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
// Test assign send some messages.
sendRecords(producer, TP, numMessages, System.currentTimeMillis());
sendRecords(producer, tp2, numMessages, System.currentTimeMillis());
consumer.assign(List.of(TP));
awaitNonEmptyRecords(consumer, TP);
// Verify the metric exist.
Map<String, String> tags = Map.of(
"client-id", consumerClientId,
"topic", TP.topic(),
"partition", String.valueOf(TP.partition())
);
var fetchLag = consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags));
assertNotNull(fetchLag);
}
}
@ClusterTest
public void testClassicConsumerQuotaMetricsNotCreatedIfNoQuotasConfigured() throws Exception {
var consumerClientId = "testClassicConsumerQuotaMetricsNotCreatedIfNoQuotasConfigured";
testQuotaMetricsNotCreatedIfNoQuotasConfigured(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId,
ISOLATION_LEVEL_CONFIG, "read_committed"
), consumerClientId);
}
@ClusterTest
public void testAsyncConsumerQuotaMetricsNotCreatedIfNoQuotasConfigured() throws Exception {
var consumerClientId = "testAsyncConsumerQuotaMetricsNotCreatedIfNoQuotasConfigured";
testQuotaMetricsNotCreatedIfNoQuotasConfigured(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, consumerClientId,
CLIENT_ID_CONFIG, consumerClientId,
ISOLATION_LEVEL_CONFIG, "read_committed"
), consumerClientId);
}
private void testQuotaMetricsNotCreatedIfNoQuotasConfigured(
Map<String, Object> consumerConfig,
String consumerClientId
) throws Exception {
var producerClientId = UUID.randomUUID().toString();
var numRecords = 1000;
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
try (Producer<byte[], byte[]> producer = cluster.producer(Map.of(ProducerConfig.CLIENT_ID_CONFIG, producerClientId));
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
var startingTimestamp = System.currentTimeMillis();
sendRecords(producer, TP, numRecords, startingTimestamp);
consumer.assign(List.of(TP));
consumer.seek(TP, 0);
consumeAndVerifyRecords(consumer, TP, numRecords, 0, 0, startingTimestamp);
var brokers = cluster.brokers().values();
brokers.forEach(broker -> assertNoMetric(broker, "byte-rate", QuotaType.PRODUCE, producerClientId));
brokers.forEach(broker -> assertNoMetric(broker, "throttle-time", QuotaType.PRODUCE, producerClientId));
brokers.forEach(broker -> assertNoMetric(broker, "byte-rate", QuotaType.FETCH, consumerClientId));
brokers.forEach(broker -> assertNoMetric(broker, "throttle-time", QuotaType.FETCH, consumerClientId));
brokers.forEach(broker -> assertNoMetric(broker, "request-time", QuotaType.REQUEST, producerClientId));
brokers.forEach(broker -> assertNoMetric(broker, "throttle-time", QuotaType.REQUEST, producerClientId));
brokers.forEach(broker -> assertNoMetric(broker, "request-time", QuotaType.REQUEST, consumerClientId));
brokers.forEach(broker -> assertNoMetric(broker, "throttle-time", QuotaType.REQUEST, consumerClientId));
}
}
private void assertNoMetric(KafkaBroker broker, String name, QuotaType quotaType, String clientId) {
var metricName = broker.metrics().metricName(name, quotaType.toString(), "", "user", "", "client-id", clientId);
assertNull(broker.metrics().metric(metricName), "Metric should not have been created " + metricName);
}
@ClusterTest
public void testClassicConsumerSeekThrowsIllegalStateIfPartitionsNotAssigned() throws Exception {
testSeekThrowsIllegalStateIfPartitionsNotAssigned(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerSeekThrowsIllegalStateIfPartitionsNotAssigned() throws Exception {
testSeekThrowsIllegalStateIfPartitionsNotAssigned(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testSeekThrowsIllegalStateIfPartitionsNotAssigned(Map<String, Object> consumerConfig) throws Exception {
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
try (var consumer = cluster.consumer(consumerConfig)) {
var e = assertThrows(IllegalStateException.class, () -> consumer.seekToEnd(List.of(TP)));
assertEquals("No current assignment for partition " + TP, e.getMessage());
}
}
@ClusterTest
public void testClassicConsumingWithNullGroupId() throws Exception {
testConsumingWithNullGroupId(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()
));
}
@ClusterTest
public void testAsyncConsumerConsumingWithNullGroupId() throws Exception {
testConsumingWithNullGroupId(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()
));
}
private void testConsumingWithNullGroupId(Map<String, Object> consumerConfig) throws Exception {
var partition = 0;
cluster.createTopic(TOPIC, 1, (short) 1);
// consumer 1 uses the default group id and consumes from earliest offset
Map<String, Object> consumer1Config = new HashMap<>(consumerConfig);
consumer1Config.put(AUTO_OFFSET_RESET_CONFIG, "earliest");
consumer1Config.put(CLIENT_ID_CONFIG, "consumer1");
// consumer 2 uses the default group id and consumes from latest offset
Map<String, Object> consumer2Config = new HashMap<>(consumerConfig);
consumer2Config.put(AUTO_OFFSET_RESET_CONFIG, "latest");
consumer2Config.put(CLIENT_ID_CONFIG, "consumer2");
// consumer 3 uses the default group id and starts from an explicit offset
Map<String, Object> consumer3Config = new HashMap<>(consumerConfig);
consumer3Config.put(CLIENT_ID_CONFIG, "consumer3");
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer1 = new KafkaConsumer<>(consumer1Config);
Consumer<byte[], byte[]> consumer2 = new KafkaConsumer<>(consumer2Config);
Consumer<byte[], byte[]> consumer3 = new KafkaConsumer<>(consumer3Config)
) {
producer.send(new ProducerRecord<>(TOPIC, partition, "k1".getBytes(), "v1".getBytes())).get();
producer.send(new ProducerRecord<>(TOPIC, partition, "k2".getBytes(), "v2".getBytes())).get();
producer.send(new ProducerRecord<>(TOPIC, partition, "k3".getBytes(), "v3".getBytes())).get();
consumer1.assign(List.of(TP));
consumer2.assign(List.of(TP));
consumer3.assign(List.of(TP));
consumer3.seek(TP, 1);
var numRecords1 = consumer1.poll(Duration.ofMillis(5000)).count();
assertThrows(InvalidGroupIdException.class, consumer1::commitSync);
assertThrows(InvalidGroupIdException.class, () -> consumer2.committed(Set.of(TP)));
var numRecords2 = consumer2.poll(Duration.ofMillis(5000)).count();
var numRecords3 = consumer3.poll(Duration.ofMillis(5000)).count();
consumer1.unsubscribe();
consumer2.unsubscribe();
consumer3.unsubscribe();
assertTrue(consumer1.assignment().isEmpty());
assertTrue(consumer2.assignment().isEmpty());
assertTrue(consumer3.assignment().isEmpty());
consumer1.close();
consumer2.close();
consumer3.close();
assertEquals(3, numRecords1, "Expected consumer1 to consume from earliest offset");
assertEquals(0, numRecords2, "Expected consumer2 to consume from latest offset");
assertEquals(2, numRecords3, "Expected consumer3 to consume from offset 1");
}
}
@ClusterTest
public void testClassicConsumerNullGroupIdNotSupportedIfCommitting() throws Exception {
testNullGroupIdNotSupportedIfCommitting(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers(),
AUTO_OFFSET_RESET_CONFIG, "earliest",
CLIENT_ID_CONFIG, "consumer1"
));
}
@ClusterTest
public void testAsyncConsumerNullGroupIdNotSupportedIfCommitting() throws Exception {
testNullGroupIdNotSupportedIfCommitting(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers(),
AUTO_OFFSET_RESET_CONFIG, "earliest",
CLIENT_ID_CONFIG, "consumer1"
));
}
private void testNullGroupIdNotSupportedIfCommitting(Map<String, Object> consumerConfig) throws Exception {
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
try (var consumer = new KafkaConsumer<>(consumerConfig)) {
consumer.assign(List.of(TP));
assertThrows(InvalidGroupIdException.class, consumer::commitSync);
}
}
@ClusterTest
public void testClassicConsumerStaticConsumerDetectsNewPartitionCreatedAfterRestart() throws Exception {
testStaticConsumerDetectsNewPartitionCreatedAfterRestart(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, "my-group-id",
GROUP_INSTANCE_ID_CONFIG, "my-instance-id",
METADATA_MAX_AGE_CONFIG, 100,
MAX_POLL_INTERVAL_MS_CONFIG, 6000
));
}
@ClusterTest
public void testAsyncConsumerStaticConsumerDetectsNewPartitionCreatedAfterRestart() throws Exception {
testStaticConsumerDetectsNewPartitionCreatedAfterRestart(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
GROUP_ID_CONFIG, "my-group-id",
GROUP_INSTANCE_ID_CONFIG, "my-instance-id",
METADATA_MAX_AGE_CONFIG, 100,
MAX_POLL_INTERVAL_MS_CONFIG, 6000
));
}
private void testStaticConsumerDetectsNewPartitionCreatedAfterRestart(Map<String, Object> consumerConfig) throws Exception {
var foo = "foo";
var foo0 = new TopicPartition(foo, 0);
var foo1 = new TopicPartition(foo, 1);
cluster.createTopic(foo, 1, (short) 1);
try (Consumer<byte[], byte[]> consumer1 = cluster.consumer(consumerConfig);
Consumer<byte[], byte[]> consumer2 = cluster.consumer(consumerConfig);
var admin = cluster.admin()
) {
consumer1.subscribe(List.of(foo));
awaitAssignment(consumer1, Set.of(foo0));
consumer1.close();
consumer2.subscribe(List.of(foo));
awaitAssignment(consumer2, Set.of(foo0));
admin.createPartitions(Map.of(foo, NewPartitions.increaseTo(2))).all().get();
awaitAssignment(consumer2, Set.of(foo0, foo1));
}
}
@ClusterTest
public void testClassicConsumerEndOffsets() throws Exception {
testEndOffsets(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
METADATA_MAX_AGE_CONFIG, 100,
MAX_POLL_INTERVAL_MS_CONFIG, 6000
));
}
@ClusterTest
public void testAsyncConsumerEndOffsets() throws Exception {
testEndOffsets(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
METADATA_MAX_AGE_CONFIG, 100,
MAX_POLL_INTERVAL_MS_CONFIG, 6000
));
}
private void testEndOffsets(Map<String, Object> consumerConfig) throws Exception {
var numRecords = 10000;
var tp2 = new TopicPartition(TOPIC, 1);
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
var startingTimestamp = System.currentTimeMillis();
for (var i = 0; i < numRecords; i++) {
var timestamp = startingTimestamp + (long) i;
ProducerRecord<byte[], byte[]> record = new ProducerRecord<>(
TP.topic(),
TP.partition(),
timestamp,
("key " + i).getBytes(),
("value " + i).getBytes()
);
producer.send(record);
}
producer.flush();
consumer.subscribe(List.of(TOPIC));
awaitAssignment(consumer, Set.of(TP, tp2));
var endOffsets = consumer.endOffsets(Set.of(TP));
assertEquals(numRecords, endOffsets.get(TP));
}
}
@ClusterTest
public void testClassicConsumerFetchOffsetsForTime() throws Exception {
testFetchOffsetsForTime(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerFetchOffsetsForTime() throws Exception {
testFetchOffsetsForTime(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testFetchOffsetsForTime(Map<String, Object> consumerConfig) throws Exception {
var numPartitions = 2;
var tp2 = new TopicPartition(TOPIC, 1);
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
try (Producer<byte[], byte[]> producer = cluster.producer();
Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)
) {
Map<TopicPartition, Long> timestampsToSearch = new HashMap<>();
for (int part = 0, i = 0; part < numPartitions; part++, i++) {
var tp = new TopicPartition(TOPIC, part);
// key, val, and timestamp equal to the sequence number.
sendRecords(producer, tp, 100, 0);
timestampsToSearch.put(tp, i * 20L);
}
// Test negative target time
assertThrows(IllegalArgumentException.class, () -> consumer.offsetsForTimes(Map.of(TP, -1L)));
var timestampOffsets = consumer.offsetsForTimes(timestampsToSearch);
var timestampTp0 = timestampOffsets.get(TP);
assertEquals(0, timestampTp0.offset());
assertEquals(0, timestampTp0.timestamp());
assertEquals(Optional.of(0), timestampTp0.leaderEpoch());
var timestampTp1 = timestampOffsets.get(tp2);
assertEquals(20, timestampTp1.offset());
assertEquals(20, timestampTp1.timestamp());
assertEquals(Optional.of(0), timestampTp1.leaderEpoch());
}
}
@ClusterTest
public void testClassicConsumerPositionRespectsTimeout() {
testPositionRespectsTimeout(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerPositionRespectsTimeout() {
testPositionRespectsTimeout(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testPositionRespectsTimeout(Map<String, Object> consumerConfig) {
var topicPartition = new TopicPartition(TOPIC, 15);
try (var consumer = cluster.consumer(consumerConfig)) {
consumer.assign(List.of(topicPartition));
// When position() is called for a topic/partition that doesn't exist, the consumer will repeatedly update the
// local metadata. However, it should give up after the user-supplied timeout has past.
assertThrows(TimeoutException.class, () -> consumer.position(topicPartition, Duration.ofSeconds(3)));
}
}
@ClusterTest
public void testClassicConsumerPositionRespectsWakeup() {
testPositionRespectsWakeup(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerPositionRespectsWakeup() {
testPositionRespectsWakeup(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testPositionRespectsWakeup(Map<String, Object> consumerConfig) {
var topicPartition = new TopicPartition(TOPIC, 15);
try (var consumer = cluster.consumer(consumerConfig)) {
consumer.assign(List.of(topicPartition));
CompletableFuture.runAsync(() -> {
try {
TimeUnit.SECONDS.sleep(1);
consumer.wakeup();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
});
assertThrows(WakeupException.class, () -> consumer.position(topicPartition, Duration.ofSeconds(3)));
}
}
@ClusterTest
public void testClassicConsumerPositionWithErrorConnectionRespectsWakeup() {
testPositionWithErrorConnectionRespectsWakeup(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
// make sure the connection fails
BOOTSTRAP_SERVERS_CONFIG, "localhost:12345"
));
}
@ClusterTest
public void testAsyncConsumerPositionWithErrorConnectionRespectsWakeup() {
testPositionWithErrorConnectionRespectsWakeup(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
// make sure the connection fails
BOOTSTRAP_SERVERS_CONFIG, "localhost:12345"
));
}
private void testPositionWithErrorConnectionRespectsWakeup(Map<String, Object> consumerConfig) {
var topicPartition = new TopicPartition(TOPIC, 15);
try (var consumer = cluster.consumer(consumerConfig)) {
consumer.assign(List.of(topicPartition));
CompletableFuture.runAsync(() -> {
try {
TimeUnit.SECONDS.sleep(1);
consumer.wakeup();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
});
assertThrows(WakeupException.class, () -> consumer.position(topicPartition, Duration.ofSeconds(100)));
}
}
@Flaky("KAFKA-18031")
@ClusterTest
public void testClassicConsumerCloseLeavesGroupOnInterrupt() throws Exception {
testCloseLeavesGroupOnInterrupt(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT),
KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
AUTO_OFFSET_RESET_CONFIG, "earliest",
GROUP_ID_CONFIG, "group_test,",
BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()
));
}
@Flaky("KAFKA-18031")
@ClusterTest
public void testAsyncConsumerCloseLeavesGroupOnInterrupt() throws Exception {
testCloseLeavesGroupOnInterrupt(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT),
KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(),
AUTO_OFFSET_RESET_CONFIG, "earliest",
GROUP_ID_CONFIG, "group_test,",
BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()
));
}
private void testCloseLeavesGroupOnInterrupt(Map<String, Object> consumerConfig) throws Exception {
try (Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)) {
var listener = new TestConsumerReassignmentListener();
consumer.subscribe(List.of(TOPIC), listener);
awaitRebalance(consumer, listener);
assertEquals(1, listener.callsToAssigned);
assertEquals(0, listener.callsToRevoked);
try {
Thread.currentThread().interrupt();
assertThrows(InterruptException.class, consumer::close);
} finally {
// Clear the interrupted flag so we don't create problems for subsequent tests.
Thread.interrupted();
}
assertEquals(1, listener.callsToAssigned);
assertEquals(1, listener.callsToRevoked);
Map<String, Object> consumerConfigMap = new HashMap<>(consumerConfig);
var config = new ConsumerConfig(consumerConfigMap);
// Set the wait timeout to be only *half* the configured session timeout. This way we can make sure that the
// consumer explicitly left the group as opposed to being kicked out by the broker.
var leaveGroupTimeoutMs = config.getInt(SESSION_TIMEOUT_MS_CONFIG) / 2;
TestUtils.waitForCondition(
() -> checkGroupMemberEmpty(config),
leaveGroupTimeoutMs,
"Consumer did not leave the consumer group within " + leaveGroupTimeoutMs + " ms of close"
);
}
}
private boolean checkGroupMemberEmpty(ConsumerConfig config) {
try (var admin = cluster.admin()) {
var groupId = config.getString(GROUP_ID_CONFIG);
var result = admin.describeConsumerGroups(List.of(groupId));
var groupDescription = result.describedGroups().get(groupId).get();
return groupDescription.members().isEmpty();
} catch (ExecutionException | InterruptedException e) {
return false;
}
}
@ClusterTest
public void testClassicConsumerOffsetRelatedWhenTimeoutZero() throws Exception {
testOffsetRelatedWhenTimeoutZero(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)
));
}
@ClusterTest
public void testAsyncConsumerOffsetRelatedWhenTimeoutZero() throws Exception {
testOffsetRelatedWhenTimeoutZero(Map.of(
GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)
));
}
private void testOffsetRelatedWhenTimeoutZero(Map<String, Object> consumerConfig) throws Exception {
cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT);
try (var consumer = cluster.consumer(consumerConfig)) {
var result1 = consumer.beginningOffsets(List.of(TP), Duration.ZERO);
assertNotNull(result1);
assertEquals(0, result1.size());
var result2 = consumer.endOffsets(List.of(TP), Duration.ZERO);
assertNotNull(result2);
assertEquals(0, result2.size());
var result3 = consumer.offsetsForTimes(Map.of(TP, 0L), Duration.ZERO);
assertNotNull(result3);
assertEquals(1, result3.size());
assertNull(result3.get(TP));
}
}
private void sendCompressedMessages(int numRecords, TopicPartition tp) {
Map<String, Object> config = Map.of(
COMPRESSION_TYPE_CONFIG, CompressionType.GZIP.name,
LINGER_MS_CONFIG, Integer.MAX_VALUE
);
try (Producer<byte[], byte[]> producer = cluster.producer(config)) {
IntStream.range(0, numRecords).forEach(i -> producer.send(new ProducerRecord<>(
tp.topic(),
tp.partition(),
(long) i,
("key " + i).getBytes(),
("value " + i).getBytes()
)));
}
}
@ClusterTest
public void testClassicConsumerStallBetweenPoll() throws Exception {
testStallBetweenPoll(GroupProtocol.CLASSIC);
}
@ClusterTest
public void testAsyncConsumerStallBetweenPoll() throws Exception {
testStallBetweenPoll(GroupProtocol.CONSUMER);
}
/**
* This test is to prove that the intermittent stalling that has been experienced when using the asynchronous
* consumer, as filed under KAFKA-19259, have been fixed.
*
* <p/>
*
* The basic idea is to have one thread that produces a record every 500 ms. and the main thread that consumes
* records without pausing between polls for much more than the produce delay. In the test case filed in
* KAFKA-19259, the consumer sometimes pauses for up to 5-10 seconds despite records being produced every second.
*/
private void testStallBetweenPoll(GroupProtocol groupProtocol) throws Exception {
var testTopic = "stall-test-topic";
var numPartitions = 6;
cluster.createTopic(testTopic, numPartitions, (short) BROKER_COUNT);
// The producer must produce slowly to tickle the scenario.
var produceDelay = 500;
var executor = Executors.newScheduledThreadPool(1);
try (var producer = cluster.producer()) {
// Start a thread running that produces records at a relative trickle.
executor.scheduleWithFixedDelay(
() -> producer.send(new ProducerRecord<>(testTopic, TestUtils.randomBytes(64))),
0,
produceDelay,
TimeUnit.MILLISECONDS
);
Map<String, Object> consumerConfig = Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT));
// Assign a tolerance for how much time is allowed to pass between Consumer.poll() calls given that there
// should be *at least* one record to read every second.
var pollDelayTolerance = 2000;
try (Consumer<byte[], byte[]> consumer = cluster.consumer(consumerConfig)) {
consumer.subscribe(List.of(testTopic));
// This is here to allow the consumer time to settle the group membership/assignment.
awaitNonEmptyRecords(consumer, new TopicPartition(testTopic, 0));
// Keep track of the last time the poll is invoked to ensure the deltas between invocations don't
// exceed the delay threshold defined above.
var beforePoll = System.currentTimeMillis();
consumer.poll(Duration.ofSeconds(5));
consumer.poll(Duration.ofSeconds(5));
var afterPoll = System.currentTimeMillis();
var pollDelay = afterPoll - beforePoll;
if (pollDelay > pollDelayTolerance)
fail("Detected a stall of " + pollDelay + " ms between Consumer.poll() invocations despite a Producer producing records every " + produceDelay + " ms");
} finally {
executor.shutdownNow();
// Wait for any active tasks to terminate to ensure consumer is not closed while being used from another thread
assertTrue(executor.awaitTermination(5, TimeUnit.SECONDS), "Executor did not terminate");
}
}
}
private ConsumerRecords<byte[], byte[]> awaitNonEmptyRecords(
Consumer<byte[], byte[]> consumer,
TopicPartition tp
) throws Exception {
AtomicReference<ConsumerRecords<byte[], byte[]>> result = new AtomicReference<>();
TestUtils.waitForCondition(() -> {
var polledRecords = consumer.poll(Duration.ofSeconds(10));
boolean hasRecords = !polledRecords.isEmpty();
if (hasRecords) {
result.set(polledRecords);
}
return hasRecords;
}, "Timed out waiting for non-empty records from topic " + tp.topic() + " partition " + tp.partition());
return result.get();
}
public static
|
PlaintextConsumerTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateMetricDoubleFieldMapperTests.java
|
{
"start": 2079,
"end": 25509
}
|
class ____ extends MapperTestCase {
public static final String METRICS_FIELD = METRICS;
public static final String CONTENT_TYPE = AggregateMetricDoubleFieldMapper.CONTENT_TYPE;
public static final String DEFAULT_METRIC = AggregateMetricDoubleFieldMapper.Names.DEFAULT_METRIC;
@Override
protected Collection<? extends Plugin> getPlugins() {
return List.of(new AggregateMetricMapperPlugin());
}
@Override
protected void minimalMapping(XContentBuilder b) throws IOException {
b.field("type", CONTENT_TYPE).field(METRICS_FIELD, new String[] { "min", "max", "value_count" }).field(DEFAULT_METRIC, "max");
}
@Override
protected void registerParameters(ParameterChecker checker) throws IOException {
checker.registerConflictCheck(DEFAULT_METRIC, fieldMapping(this::minimalMapping), fieldMapping(b -> {
b.field("type", CONTENT_TYPE).field(METRICS_FIELD, new String[] { "min", "max" }).field(DEFAULT_METRIC, "min");
}));
checker.registerConflictCheck(METRICS_FIELD, fieldMapping(this::minimalMapping), fieldMapping(b -> {
b.field("type", CONTENT_TYPE).field(METRICS_FIELD, new String[] { "min", "max" }).field(DEFAULT_METRIC, "max");
}));
checker.registerConflictCheck(METRICS_FIELD, fieldMapping(this::minimalMapping), fieldMapping(b -> {
b.field("type", CONTENT_TYPE)
.field(METRICS_FIELD, new String[] { "min", "max", "value_count", "sum" })
.field(DEFAULT_METRIC, "min");
}));
}
@Override
protected Object getSampleValueForDocument() {
return Map.of("min", -10.1, "max", 50.0, "value_count", 14);
}
@Override
protected Object getSampleObjectForDocument() {
return getSampleValueForDocument();
}
@Override
protected Object getSampleValueForQuery() {
return 50.0;
}
@Override
protected boolean supportsStoredFields() {
return false;
}
/**
* Test parsing field mapping and adding simple field
*/
public void testParseValue() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
ParsedDocument doc = mapper.parse(
source(b -> b.startObject("field").field("min", -10.1).field("max", 50.0).field("value_count", 14).endObject())
);
assertEquals("DoubleField <field.min:-10.1>", doc.rootDoc().getField("field.min").toString());
Mapper fieldMapper = mapper.mappers().getMapper("field");
assertThat(fieldMapper, instanceOf(AggregateMetricDoubleFieldMapper.class));
}
/**
* Test that invalid field mapping containing no metrics is not accepted
*/
public void testInvalidMapping() throws Exception {
XContentBuilder mapping = XContentFactory.jsonBuilder()
.startObject()
.startObject("_doc")
.startObject("properties")
.startObject("metric")
.field("type", CONTENT_TYPE)
.endObject()
.endObject()
.endObject()
.endObject();
Exception e = expectThrows(MapperParsingException.class, () -> createDocumentMapper(mapping));
assertThat(e.getMessage(), containsString("Property [metrics] is required for field [metric]."));
}
/**
* Test parsing an aggregate_metric field that contains no values
*/
public void testParseEmptyValue() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
Exception e = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> b.startObject("field").endObject())));
assertThat(
e.getCause().getMessage(),
containsString("Aggregate metric field [field] must contain all metrics [min, max, value_count]")
);
}
@Override
protected boolean supportsIgnoreMalformed() {
return true;
}
@Override
protected List<ExampleMalformedValue> exampleMalformedValues() {
var min = randomDoubleBetween(-100, 100, false);
var max = randomDoubleBetween(min, 150, false);
var valueCount = randomIntBetween(1, Integer.MAX_VALUE);
var randomString = randomAlphaOfLengthBetween(1, 10);
var randomLong = randomLong();
var randomDouble = randomDouble();
var randomBoolean = randomBoolean();
return List.of(
// wrong input structure
exampleMalformedValue(b -> b.value(randomString)).errorMatches("Failed to parse object"),
exampleMalformedValue(b -> b.value(randomLong)).errorMatches("Failed to parse object"),
exampleMalformedValue(b -> b.value(randomDouble)).errorMatches("Failed to parse object"),
exampleMalformedValue(b -> b.value(randomBoolean)).errorMatches("Failed to parse object"),
// no metrics
exampleMalformedValue(b -> b.startObject().endObject()).errorMatches(
"Aggregate metric field [field] must contain all metrics [min, max, value_count]"
),
// unmapped metric
exampleMalformedValue(
b -> b.startObject()
.field("min", min)
.field("max", max)
.field("value_count", valueCount)
.field("sum", randomLong)
.endObject()
).errorMatches("Aggregate metric [sum] does not exist in the mapping of field [field]"),
// missing metric
exampleMalformedValue(b -> b.startObject().field("min", min).field("max", max).endObject()).errorMatches(
"Aggregate metric field [field] must contain all metrics [min, max, value_count]"
),
// invalid metric value
exampleMalformedValue(b -> b.startObject().field("min", "10.0").field("max", max).field("value_count", valueCount).endObject())
.errorMatches("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [VALUE_STRING]"),
// Invalid metric value with additional data.
// `min` field triggers the error and all additional data should be preserved in synthetic source.
exampleMalformedValue(
b -> b.startObject()
.field("max", max)
.field("value_count", valueCount)
.field("min", "10.0")
.field("hello", randomString)
.startObject("object")
.field("hello", randomLong)
.endObject()
.array("list", randomString, randomString)
.endObject()
).errorMatches("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [VALUE_STRING]"),
// metric is an object
exampleMalformedValue(
b -> b.startObject()
.startObject("min")
.field("hello", "world")
.endObject()
.field("max", max)
.field("value_count", valueCount)
.endObject()
).errorMatches("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [START_OBJECT]"),
// metric is an array
exampleMalformedValue(
b -> b.startObject().array("min", "hello", "world").field("max", max).field("value_count", valueCount).endObject()
).errorMatches("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [START_ARRAY]"),
// negative value count
exampleMalformedValue(
b -> b.startObject().field("min", min).field("max", max).field("value_count", -1 * valueCount).endObject()
).errorMatches("Aggregate metric [value_count] of field [field] cannot be a negative number"),
// value count with decimal digits (whole numbers formatted as doubles are permitted, but non-whole numbers are not)
exampleMalformedValue(b -> b.startObject().field("min", min).field("max", max).field("value_count", 77.33).endObject())
.errorMatches("failed to parse [value_count] sub field: 77.33 cannot be converted to Integer without data loss")
);
}
/**
* Test adding a metric that other than the supported ones (min, max, sum, value_count)
*/
public void testUnsupportedMetric() throws Exception {
Exception e = expectThrows(
MapperParsingException.class,
() -> createDocumentMapper(
fieldMapping(b -> b.field("type", CONTENT_TYPE).field(METRICS_FIELD, new String[] { "min", "max", "unsupported" }))
)
);
assertThat(e.getMessage(), containsString("Metric [unsupported] is not supported."));
}
/**
* Test parsing a value_count metric written as double with zero decimal digits
*/
public void testValueCountDouble() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
ParsedDocument doc = mapper.parse(
source(b -> b.startObject("field").field("min", 10.0).field("max", 50.0).field("value_count", 77.0).endObject())
);
assertEquals(77, doc.rootDoc().getField("field.value_count").numericValue().longValue());
}
/**
* Test parsing a metric and check the min max value
*/
public void testCheckMinMaxValue() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
// min > max
Exception e = expectThrows(
DocumentParsingException.class,
() -> mapper.parse(
source(b -> b.startObject("field").field("min", 50.0).field("max", 10.0).field("value_count", 14).endObject())
)
);
assertThat(e.getCause().getMessage(), containsString("Aggregate metric field [field] max value cannot be smaller than min value"));
// min == max
mapper.parse(source(b -> b.startObject("field").field("min", 50.0).field("max", 50.0).field("value_count", 14).endObject()));
// min < max
mapper.parse(source(b -> b.startObject("field").field("min", 10.0).field("max", 50.0).field("value_count", 14).endObject()));
}
private void randomMapping(XContentBuilder b, int randomNumber) throws IOException {
b.field("type", CONTENT_TYPE);
switch (randomNumber) {
case 0 -> b.field(METRICS_FIELD, new String[] { "min" }).field(DEFAULT_METRIC, "min");
case 1 -> b.field(METRICS_FIELD, new String[] { "max" }).field(DEFAULT_METRIC, "max");
case 2 -> b.field(METRICS_FIELD, new String[] { "value_count" }).field(DEFAULT_METRIC, "value_count");
case 3 -> b.field(METRICS_FIELD, new String[] { "sum" }).field(DEFAULT_METRIC, "sum");
}
}
/**
* Test inserting a document containing an array of metrics. An exception must be thrown.
*/
public void testParseArrayValue() throws Exception {
int randomNumber = randomIntBetween(0, 3);
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> randomMapping(b, randomNumber)));
Exception e = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> {
b.startArray("field").startObject();
switch (randomNumber) {
case 0 -> b.field("min", 10.0);
case 1 -> b.field("max", 50);
case 2 -> b.field("value_count", 3);
case 3 -> b.field("sum", 100.0);
}
b.endObject();
b.startObject();
switch (randomNumber) {
case 0 -> b.field("min", 20.0);
case 1 -> b.field("max", 60);
case 2 -> b.field("value_count", 2);
case 3 -> b.field("sum", 200.0);
}
b.endObject().endArray();
})));
assertThat(
e.getCause().getMessage(),
containsString(
"Field [field] of type [aggregate_metric_double] "
+ "does not support indexing multiple values for the same field in the same document"
)
);
}
/**
* Test setting the default_metric explicitly
*/
public void testExplicitDefaultMetric() throws Exception {
DocumentMapper mapper = createDocumentMapper(
fieldMapping(
b -> b.field("type", CONTENT_TYPE).field(METRICS_FIELD, new String[] { "value_count", "sum" }).field(DEFAULT_METRIC, "sum")
)
);
Mapper fieldMapper = mapper.mappers().getMapper("field");
assertThat(fieldMapper, instanceOf(AggregateMetricDoubleFieldMapper.class));
assertEquals(Metric.sum, ((AggregateMetricDoubleFieldMapper) fieldMapper).defaultMetric());
}
/**
* Test the default_metric when not set explicitly. When only a single metric is contained, this is set as the default
*/
public void testImplicitDefaultMetricSingleMetric() throws Exception {
DocumentMapper mapper = createDocumentMapper(
fieldMapping(b -> b.field("type", CONTENT_TYPE).field(METRICS_FIELD, new String[] { "value_count" }))
);
Mapper fieldMapper = mapper.mappers().getMapper("field");
assertThat(fieldMapper, instanceOf(AggregateMetricDoubleFieldMapper.class));
assertEquals(Metric.value_count, ((AggregateMetricDoubleFieldMapper) fieldMapper).defaultMetric);
}
/**
* Test the default_metric when not set explicitly, by default we have set it to be the max.
*/
public void testImplicitDefaultMetric() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
Mapper fieldMapper = mapper.mappers().getMapper("field");
assertThat(fieldMapper, instanceOf(AggregateMetricDoubleFieldMapper.class));
assertEquals(Metric.max, ((AggregateMetricDoubleFieldMapper) fieldMapper).defaultMetric);
}
/**
* Test the default_metric when not set explicitly. When more than one metrics are contained
* and max is not one of them, an exception should be thrown.
*/
public void testMissingDefaultMetric() {
Exception e = expectThrows(
MapperParsingException.class,
() -> createDocumentMapper(
fieldMapping(b -> b.field("type", CONTENT_TYPE).field(METRICS_FIELD, new String[] { "value_count", "sum" }))
)
);
assertThat(e.getMessage(), containsString("Property [default_metric] is required for field [field]."));
}
/**
* Test setting an invalid value for the default_metric. An exception must be thrown
*/
public void testInvalidDefaultMetric() {
Exception e = expectThrows(
MapperParsingException.class,
() -> createDocumentMapper(
fieldMapping(
b -> b.field("type", CONTENT_TYPE)
.field(METRICS_FIELD, new String[] { "value_count", "sum" })
.field(DEFAULT_METRIC, "invalid_metric")
)
)
);
assertThat(e.getMessage(), containsString("Metric [invalid_metric] is not supported."));
}
/**
* Test setting a value for the default_metric that is not contained in the "metrics" field.
* An exception must be thrown
*/
public void testUndefinedDefaultMetric() {
Exception e = expectThrows(
MapperParsingException.class,
() -> createDocumentMapper(
fieldMapping(
b -> b.field("type", CONTENT_TYPE)
.field(METRICS_FIELD, new String[] { "value_count", "sum" })
.field(DEFAULT_METRIC, "min")
)
)
);
assertThat(e.getMessage(), containsString("Default metric [min] is not defined in the metrics of field [field]."));
}
/**
* Test parsing field mapping and adding simple field
*/
public void testParseNestedValue() throws Exception {
DocumentMapper mapper = createDocumentMapper(
fieldMapping(
b -> b.startObject("properties")
.startObject("subfield")
.field("type", CONTENT_TYPE)
.field(METRICS_FIELD, new String[] { "min", "max", "sum", "value_count" })
.field(DEFAULT_METRIC, "max")
.endObject()
.endObject()
)
);
Mapper fieldMapper = mapper.mappers().getMapper("field.subfield");
assertThat(fieldMapper, instanceOf(AggregateMetricDoubleFieldMapper.class));
ParsedDocument doc = mapper.parse(
source(
b -> b.startObject("field")
.startObject("subfield")
.field("min", 10.1)
.field("max", 50.0)
.field("sum", 43)
.field("value_count", 14)
.endObject()
.endObject()
)
);
assertThat(doc.rootDoc().getField("field.subfield.min"), notNullValue());
}
/**
* subfields of aggregate_metric_double should not be searchable or exposed in field_caps
*/
public void testNoSubFieldsIterated() throws IOException {
Metric[] values = Metric.values();
List<Metric> subset = randomSubsetOf(randomIntBetween(1, values.length), values);
DocumentMapper mapper = createDocumentMapper(
fieldMapping(b -> b.field("type", CONTENT_TYPE).field(METRICS_FIELD, subset).field(DEFAULT_METRIC, subset.get(0)))
);
Iterator<Mapper> iterator = mapper.mappers().getMapper("field").iterator();
assertFalse(iterator.hasNext());
}
public void testFieldCaps() throws IOException {
MapperService aggMetricMapperService = createMapperService(fieldMapping(this::minimalMapping));
MappedFieldType fieldType = aggMetricMapperService.fieldType("field");
assertThat(fieldType.familyTypeName(), equalTo("aggregate_metric_double"));
assertTrue(fieldType.isSearchable());
assertTrue(fieldType.isAggregatable());
}
/*
* Since all queries for aggregate_metric_double fields are delegated to their default_metric numeric
* sub-field, we override this method so that testExistsQueryMinimalMapping() passes successfully.
*/
protected void assertExistsQuery(MappedFieldType fieldType, Query query, LuceneDocument fields) {
assertThat(query, Matchers.instanceOf(FieldExistsQuery.class));
FieldExistsQuery fieldExistsQuery = (FieldExistsQuery) query;
String defaultMetric = ((AggregateMetricDoubleFieldMapper.AggregateMetricDoubleFieldType) fieldType).getDefaultMetric().name();
assertEquals("field." + defaultMetric, fieldExistsQuery.getField());
assertNoFieldNamesField(fields);
}
@Override
protected Object generateRandomInputValue(MappedFieldType ft) {
assumeFalse("Test implemented in a follow up", true);
return null;
}
public void testCannotBeUsedInMultifields() {
Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
b.field("type", "keyword");
b.startObject("fields");
b.startObject("metric");
minimalMapping(b);
b.endObject();
b.endObject();
})));
assertThat(e.getMessage(), containsString("Field [metric] of type [aggregate_metric_double] can't be used in multifields"));
}
public void testMetricType() throws IOException {
// Test default setting
MapperService mapperService = createMapperService(fieldMapping(b -> minimalMapping(b)));
AggregateMetricDoubleFieldMapper.AggregateMetricDoubleFieldType ft =
(AggregateMetricDoubleFieldMapper.AggregateMetricDoubleFieldType) mapperService.fieldType("field");
assertNull(ft.getMetricType());
assertMetricType("gauge", AggregateMetricDoubleFieldMapper.AggregateMetricDoubleFieldType::getMetricType);
{
// Test invalid metric type for this field type
Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
minimalMapping(b);
b.field("time_series_metric", "counter");
})));
assertThat(
e.getCause().getMessage(),
containsString("Unknown value [counter] for field [time_series_metric] - accepted values are [gauge]")
);
}
{
// Test invalid metric type for this field type
Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
minimalMapping(b);
b.field("time_series_metric", "unknown");
})));
assertThat(
e.getCause().getMessage(),
containsString("Unknown value [unknown] for field [time_series_metric] - accepted values are [gauge]")
);
}
}
@Override
protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) {
return new AggregateMetricDoubleSyntheticSourceSupport(ignoreMalformed);
}
@Override
protected IngestScriptSupport ingestScriptSupport() {
throw new AssumptionViolatedException("not supported");
}
public void testArrayValueSyntheticSource() throws Exception {
DocumentMapper mapper = createSytheticSourceMapperService(
fieldMapping(
b -> b.field("type", CONTENT_TYPE)
.array("metrics", "min", "max")
.field("default_metric", "min")
.field("ignore_malformed", "true")
)
).documentMapper();
var randomString = randomAlphaOfLength(10);
CheckedConsumer<XContentBuilder, IOException> arrayValue = b -> {
b.startArray("field");
{
b.startObject().field("max", 100).field("min", 10).endObject();
b.startObject().field("max", 200).field("min", 20).endObject();
b.value(randomString);
}
b.endArray();
};
var expected = JsonXContent.contentBuilder().startObject();
// First value comes from synthetic field loader and so is formatted in a specific format (e.g. min always come first).
// Other values are stored as is as part of ignore_malformed logic for synthetic source.
{
expected.startArray("field");
expected.startObject().field("min", 10.0).field("max", 100.0).endObject();
expected.startObject().field("max", 200).field("min", 20).endObject();
expected.value(randomString);
expected.endArray();
}
expected.endObject();
var syntheticSource = syntheticSource(mapper, arrayValue);
assertEquals(Strings.toString(expected), syntheticSource);
}
protected final
|
AggregateMetricDoubleFieldMapperTests
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java
|
{
"start": 1137,
"end": 1177
}
|
class ____ AutoCloseableLock.
*/
public
|
for
|
java
|
apache__flink
|
flink-test-utils-parent/flink-connector-test-utils/src/main/java/org/apache/flink/connector/testframe/testsuites/SinkTestSuiteBase.java
|
{
"start": 4730,
"end": 5247
}
|
class ____ sink test suite.
*
* <p>All cases should have well-descriptive JavaDoc, including:
*
* <ul>
* <li>What's the purpose of this case
* <li>Simple description of how this case works
* <li>Condition to fulfill in order to pass this case
* <li>Requirement of running this case
* </ul>
*/
@ExtendWith({
ConnectorTestingExtension.class,
TestLoggerExtension.class,
TestCaseInvocationContextProvider.class
})
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@Experimental
public abstract
|
for
|
java
|
elastic__elasticsearch
|
x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/JiraAction.java
|
{
"start": 854,
"end": 4640
}
|
class ____ implements Action {
public static final String TYPE = "jira";
@Nullable
final String account;
@Nullable
final HttpProxy proxy;
final Map<String, Object> fields;
public JiraAction(@Nullable String account, Map<String, Object> fields, HttpProxy proxy) {
this.account = account;
this.fields = fields;
this.proxy = proxy;
}
@Override
public String type() {
return TYPE;
}
public String getAccount() {
return account;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JiraAction that = (JiraAction) o;
return Objects.equals(account, that.account) && Objects.equals(fields, that.fields) && Objects.equals(proxy, that.proxy);
}
@Override
public int hashCode() {
return Objects.hash(account, fields, proxy);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (account != null) {
builder.field(Field.ACCOUNT.getPreferredName(), account);
}
if (proxy != null) {
proxy.toXContent(builder, params);
}
builder.field(Field.FIELDS.getPreferredName(), fields);
return builder.endObject();
}
public static JiraAction parse(String watchId, String actionId, XContentParser parser) throws IOException {
String account = null;
HttpProxy proxy = null;
Map<String, Object> fields = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (Field.ACCOUNT.match(currentFieldName, parser.getDeprecationHandler())) {
if (token == XContentParser.Token.VALUE_STRING) {
account = parser.text();
} else {
throw new ElasticsearchParseException(
"failed to parse [{}] action [{}/{}]. expected [{}] to be of type string, but " + "found [{}] instead",
TYPE,
watchId,
actionId,
Field.ACCOUNT.getPreferredName(),
token
);
}
} else if (Field.PROXY.match(currentFieldName, parser.getDeprecationHandler())) {
proxy = HttpProxy.parse(parser);
} else if (Field.FIELDS.match(currentFieldName, parser.getDeprecationHandler())) {
try {
fields = parser.map();
} catch (Exception e) {
throw new ElasticsearchParseException(
"failed to parse [{}] action [{}/{}]. failed to parse [{}] field",
e,
TYPE,
watchId,
actionId,
Field.FIELDS.getPreferredName()
);
}
} else {
throw new ElasticsearchParseException(
"failed to parse [{}] action [{}/{}]. unexpected token [{}/{}]",
TYPE,
watchId,
actionId,
token,
currentFieldName
);
}
}
if (fields == null) {
fields = Collections.emptyMap();
}
return new JiraAction(account, fields, proxy);
}
public static
|
JiraAction
|
java
|
spring-projects__spring-boot
|
module/spring-boot-health/src/test/java/org/springframework/boot/health/autoconfigure/contributor/CompositeHealthContributorConfigurationTests.java
|
{
"start": 1232,
"end": 1780
}
|
class ____
extends AbstractCompositeHealthContributorConfigurationTests<HealthContributor, TestHealthIndicator> {
@Override
protected AbstractCompositeHealthContributorConfiguration<HealthContributor, TestHealthIndicator, TestBean> newComposite() {
return new TestCompositeHealthContributorConfiguration();
}
@Override
protected Stream<String> allNamesFromComposite(HealthContributor composite) {
return ((HealthContributors) composite).stream().map(HealthContributors.Entry::name);
}
static
|
CompositeHealthContributorConfigurationTests
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/web/servlet/htmlunit/UrlRegexRequestMatcherTests.java
|
{
"start": 839,
"end": 1235
}
|
class ____ extends AbstractWebRequestMatcherTests {
@Test
public void verifyExampleInClassLevelJavadoc() throws Exception {
WebRequestMatcher cdnMatcher = new UrlRegexRequestMatcher(".*?//code.jquery.com/.*");
assertMatches(cdnMatcher, "https://code.jquery.com/jquery-1.11.0.min.js");
assertDoesNotMatch(cdnMatcher, "http://localhost/jquery-1.11.0.min.js");
}
}
|
UrlRegexRequestMatcherTests
|
java
|
google__dagger
|
hilt-compiler/main/java/dagger/hilt/processor/internal/root/ProcessedRootSentinelGenerator.java
|
{
"start": 1035,
"end": 1644
}
|
class ____ {
private final XTypeElement processedRoot;
private final Mode mode;
ProcessedRootSentinelGenerator(XTypeElement processedRoot, Mode mode) {
this.processedRoot = processedRoot;
this.mode = mode;
}
void generate() throws IOException {
Processors.generateAggregatingClass(
ClassNames.PROCESSED_ROOT_SENTINEL_PACKAGE,
AnnotationSpec.builder(ClassNames.PROCESSED_ROOT_SENTINEL)
.addMember("roots", "$S", processedRoot.getQualifiedName())
.build(),
processedRoot,
getClass(),
mode);
}
}
|
ProcessedRootSentinelGenerator
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java
|
{
"start": 3160,
"end": 3204
}
|
class ____ TestNodeAttributesCLI.
*/
public
|
for
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallback.java
|
{
"start": 1608,
"end": 5550
}
|
class ____ implements Callback {
private final String tokenValue;
private OAuthBearerToken token = null;
private String errorStatus = null;
private String errorScope = null;
private String errorOpenIDConfiguration = null;
/**
* Constructor
*
* @param tokenValue
* the mandatory/non-blank token value
*/
public OAuthBearerValidatorCallback(String tokenValue) {
if (Objects.requireNonNull(tokenValue).isEmpty())
throw new IllegalArgumentException("token value must not be empty");
this.tokenValue = tokenValue;
}
/**
* Return the (always non-null) token value
*
* @return the (always non-null) token value
*/
public String tokenValue() {
return tokenValue;
}
/**
* Return the (potentially null) token
*
* @return the (potentially null) token
*/
public OAuthBearerToken token() {
return token;
}
/**
* Return the (potentially null) error status value as per
* <a href="https://tools.ietf.org/html/rfc7628#section-3.2.2">RFC 7628: A Set
* of Simple Authentication and Security Layer (SASL) Mechanisms for OAuth</a>
* and the <a href=
* "https://www.iana.org/assignments/oauth-parameters/oauth-parameters.xhtml#extensions-error">IANA
* OAuth Extensions Error Registry</a>.
*
* @return the (potentially null) error status value
*/
public String errorStatus() {
return errorStatus;
}
/**
* Return the (potentially null) error scope value as per
* <a href="https://tools.ietf.org/html/rfc7628#section-3.2.2">RFC 7628: A Set
* of Simple Authentication and Security Layer (SASL) Mechanisms for OAuth</a>.
*
* @return the (potentially null) error scope value
*/
public String errorScope() {
return errorScope;
}
/**
* Return the (potentially null) error openid-configuration value as per
* <a href="https://tools.ietf.org/html/rfc7628#section-3.2.2">RFC 7628: A Set
* of Simple Authentication and Security Layer (SASL) Mechanisms for OAuth</a>.
*
* @return the (potentially null) error openid-configuration value
*/
public String errorOpenIDConfiguration() {
return errorOpenIDConfiguration;
}
/**
* Set the token. The token value is unchanged and is expected to match the
* provided token's value. All error values are cleared.
*
* @param token
* the mandatory token to set
*/
public void token(OAuthBearerToken token) {
this.token = Objects.requireNonNull(token);
this.errorStatus = null;
this.errorScope = null;
this.errorOpenIDConfiguration = null;
}
/**
* Set the error values as per
* <a href="https://tools.ietf.org/html/rfc7628#section-3.2.2">RFC 7628: A Set
* of Simple Authentication and Security Layer (SASL) Mechanisms for OAuth</a>.
* Any token is cleared.
*
* @param errorStatus
* the mandatory error status value from the <a href=
* "https://www.iana.org/assignments/oauth-parameters/oauth-parameters.xhtml#extensions-error">IANA
* OAuth Extensions Error Registry</a> to set
* @param errorScope
* the optional error scope value to set
* @param errorOpenIDConfiguration
* the optional error openid-configuration value to set
*/
public void error(String errorStatus, String errorScope, String errorOpenIDConfiguration) {
if (Objects.requireNonNull(errorStatus).isEmpty())
throw new IllegalArgumentException("error status must not be empty");
this.errorStatus = errorStatus;
this.errorScope = errorScope;
this.errorOpenIDConfiguration = errorOpenIDConfiguration;
this.token = null;
}
}
|
OAuthBearerValidatorCallback
|
java
|
reactor__reactor-core
|
reactor-core/src/test/java/reactor/core/publisher/OnNextFailureStrategyTest.java
|
{
"start": 1102,
"end": 24382
}
|
class ____ {
/**
* Helper for other tests to emulate resumeDrop with the public consumer-based API.
*/
public static <T> void drop(@Nullable Throwable e, @Nullable T v) {
if (v != null) {
Operators.onNextDropped(v, Context.empty());
}
if (e != null) {
Operators.onErrorDropped(e, Context.empty());
}
}
@Test
public void resumeDrop() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
Hooks.onErrorDropped(error::set);
Hooks.onNextDropped(value::set);
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeDrop();
String data = "foo";
Throwable exception = new NullPointerException("foo");
assertThat(strategy.test(exception, data)).isTrue();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t).isNull();
assertThat(error.get()).isInstanceOf(NullPointerException.class).hasMessage("foo");
assertThat(value).hasValue("foo");
}
@Test
public void resumeDropWithFatal() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
Hooks.onErrorDropped(error::set);
Hooks.onNextDropped(value::set);
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeDrop();
String data = "foo";
Throwable exception = new NoSuchMethodError("foo");
assertThat(strategy.test(exception, data)).isTrue();
assertThatExceptionOfType(NoSuchMethodError.class)
.isThrownBy(() -> strategy.process(exception, data, Context.empty()));
assertThat(error.get()).isNull();
assertThat(value.get()).isNull();
}
@Test
public void resumeDropIfMatch() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
Hooks.onErrorDropped(error::set);
Hooks.onNextDropped(value::set);
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeDropIf(
e -> e instanceof NullPointerException);
String data = "foo";
Throwable exception = new NullPointerException("foo");
assertThat(strategy.test(exception, data)).isTrue();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t).isNull();
assertThat(error.get()).isInstanceOf(NullPointerException.class).hasMessage("foo");
assertThat(value).hasValue("foo");
}
@Test
public void resumeDropIfNoMatch() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
Hooks.onErrorDropped(error::set);
Hooks.onNextDropped(value::set);
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeDropIf(
e -> e instanceof IllegalArgumentException);
String data = "foo";
Throwable exception = new NullPointerException("foo");
assertThat(strategy.test(exception, data)).isFalse();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t)
.isSameAs(exception)
.hasNoSuppressedExceptions();
assertThat(error.get()).isNull();
assertThat(value.get()).isNull();
}
@Test
public void resumeDropIfWithFatalMatch() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
Hooks.onErrorDropped(error::set);
Hooks.onNextDropped(value::set);
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeDropIf(
e -> e instanceof NoSuchMethodError);
String data = "foo";
Throwable exception = new NoSuchMethodError("foo");
assertThat(strategy.test(exception, data)).isTrue();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t).isNull();
assertThat(error.get())
.isInstanceOf(NoSuchMethodError.class)
.hasMessage("foo");
assertThat(value).hasValue("foo");
}
@Test
public void resumeDropIfWithFatalNoMatch() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
Hooks.onErrorDropped(error::set);
Hooks.onNextDropped(value::set);
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeDropIf(
e -> e instanceof NullPointerException);
String data = "foo";
Throwable exception = new NoSuchMethodError("foo");
assertThat(strategy.test(exception, data)).isFalse();
assertThatExceptionOfType(NoSuchMethodError.class)
.isThrownBy(() -> strategy.process(exception, data, Context.empty()));
assertThat(error.get()).isNull();
assertThat(value.get()).isNull();
}
@Test
public void resumeDropIfPredicateFails() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
Hooks.onErrorDropped(error::set);
Hooks.onNextDropped(value::set);
IllegalStateException failurePredicate = new IllegalStateException("boomInPredicate");
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeDropIf(
e -> { throw failurePredicate; });
String data = "foo";
Throwable exception = new NullPointerException("foo");
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> strategy.test(exception, data))
.withMessage("boomInPredicate");
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> strategy.process(exception, data, Context.empty()))
.withMessage("boomInPredicate");
assertThat(error.get()).isNull();
assertThat(value.get()).isNull();
}
@Test
public void resumeDropValueHookFails() {
AtomicReference<Throwable> error = new AtomicReference<>();
UnsupportedOperationException failure = new UnsupportedOperationException("value hook");
Hooks.onErrorDropped(error::set);
Hooks.onNextDropped(v -> { throw failure; });
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeDrop();
String data = "foo";
Throwable exception = new NullPointerException("foo");
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t)
.hasMessage("value hook")
.hasSuppressedException(exception);
assertThat(error.get()).isNull();
}
@Test
public void resumeDropErrorHookFails() {
AtomicReference<Object> value = new AtomicReference<>();
UnsupportedOperationException failure = new UnsupportedOperationException("error hook");
Hooks.onNextDropped(value::set);
Hooks.onErrorDropped(v -> { throw failure; });
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeDrop();
String data = "foo";
Throwable exception = new NullPointerException("foo");
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t)
.hasMessage("error hook")
.hasSuppressedException(exception);
assertThat(value).hasValue("foo");
}
@Test
public void resumeDropDoesntSelfSuppressIfHookRethrows() {
AtomicReference<Object> value = new AtomicReference<>();
Hooks.onErrorDropped(e -> { throw Exceptions.propagate(e); });
Hooks.onNextDropped(value::set);
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeDrop();
String data = "foo";
Throwable exception = new IllegalArgumentException("foo");
assertThat(strategy.test(exception, data)).as("predicate matches").isTrue();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t)
.isSameAs(exception)
.hasNoSuppressedExceptions();
assertThat(value).hasValue("foo");
}
@Test
public void resume() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
OnNextFailureStrategy strategy = OnNextFailureStrategy.resume(
(t, v) -> {
error.set(t);
value.set(v);
});
String data = "foo";
Throwable exception = new NullPointerException("foo");
assertThat(strategy.test(exception, data)).isTrue();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t).isNull();
assertThat(error.get()).isInstanceOf(NullPointerException.class).hasMessage("foo");
assertThat(value).hasValue("foo");
}
@Test
public void resumeWithFatal() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
OnNextFailureStrategy strategy = OnNextFailureStrategy.resume(
(t, v) -> {
error.set(t);
value.set(v);
});
String data = "foo";
Throwable exception = new NoSuchMethodError("foo");
assertThat(strategy.test(exception, data)).isTrue();
assertThatExceptionOfType(NoSuchMethodError.class)
.isThrownBy(() -> strategy.process(exception, data, Context.empty()));
assertThat(error.get()).isNull();
assertThat(value.get()).isNull();
}
@Test
public void resumeErrorConsumerFails() {
AtomicReference<Object> value = new AtomicReference<>();
IllegalStateException failureError = new IllegalStateException("boomInErrorConsumer");
OnNextFailureStrategy strategy = OnNextFailureStrategy.resume(
(t, v) -> {
value.set(v);
throw failureError;
});
String data = "foo";
Throwable exception = new NullPointerException("foo");
assertThat(strategy.test(exception, data)).isTrue();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t).isSameAs(failureError)
.hasSuppressedException(exception);
assertThat(value).hasValue("foo");
}
@Test
public void resumeIfMatch() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeIf(
e -> e instanceof NullPointerException,
(t, v) -> {
error.set(t);
value.set(v);
});
String data = "foo";
Throwable exception = new NullPointerException("foo");
assertThat(strategy.test(exception, data)).isTrue();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t).isNull();
assertThat(error.get()).isInstanceOf(NullPointerException.class).hasMessage("foo");
assertThat(value).hasValue("foo");
}
@Test
public void resumeIfNoMatch() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeIf(
e -> e instanceof IllegalArgumentException,
(t, v) -> {
error.set(t);
value.set(v);
});
String data = "foo";
Throwable exception = new NullPointerException("foo");
assertThat(strategy.test(exception, data)).isFalse();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t)
.isSameAs(exception)
.hasNoSuppressedExceptions();
assertThat(error.get()).isNull();
assertThat(value.get()).isNull();
}
@Test
public void resumeIfWithFatalMatch() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeIf(
e -> e instanceof NoSuchMethodError,
(t, v) -> {
error.set(t);
value.set(v);
});
String data = "foo";
Throwable exception = new NoSuchMethodError("foo");
assertThat(strategy.test(exception, data)).isTrue();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t).isNull();
assertThat(error.get())
.isInstanceOf(NoSuchMethodError.class)
.hasMessage("foo");
assertThat(value).hasValue("foo");
}
@Test
public void resumeIfWithFatalNoMatch() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeIf(
e -> e instanceof IllegalArgumentException,
(t, v) -> {
error.set(t);
value.set(v);
});
String data = "foo";
Throwable exception = new NoSuchMethodError("foo");
assertThat(strategy.test(exception, data)).isFalse();
assertThatExceptionOfType(NoSuchMethodError.class)
.isThrownBy(() -> strategy.process(exception, data, Context.empty()));
assertThat(error.get()).isNull();
assertThat(value.get()).isNull();
}
@Test
public void resumeIfErrorConsumerFails() {
AtomicReference<Object> value = new AtomicReference<>();
IllegalStateException failureError = new IllegalStateException("boomInErrorConsumer");
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeIf(
e -> true,
(t, v) -> {
value.set(v);
throw failureError;
});
String data = "foo";
Throwable exception = new NullPointerException("foo");
assertThat(strategy.test(exception, data)).isTrue();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t).isSameAs(failureError)
.hasSuppressedException(exception);
assertThat(value).hasValue("foo");
}
@Test
public void resumeIfPredicateFails() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
IllegalStateException failurePredicate = new IllegalStateException("boomInPredicate");
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeIf(
e -> { throw failurePredicate; },
(t, v) -> {
error.set(t);
value.set(v);
});
String data = "foo";
Throwable exception = new NullPointerException("foo");
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> strategy.test(exception, data))
.withMessage("boomInPredicate");
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> strategy.process(exception, data, Context.empty()))
.withMessage("boomInPredicate");
assertThat(error.get()).isNull();
assertThat(value.get()).isNull();
}
@Test
public void resumeIfDoesntSelfSuppress() {
AtomicReference<Throwable> error = new AtomicReference<>();
AtomicReference<Object> value = new AtomicReference<>();
Hooks.onErrorDropped(error::set);
Hooks.onNextDropped(value::set);
OnNextFailureStrategy strategy = OnNextFailureStrategy.resumeIf(t -> t instanceof IllegalArgumentException,
(t, v) -> { throw Exceptions.propagate(t);});
String data = "foo";
Throwable exception = new IllegalArgumentException("foo");
assertThat(strategy.test(exception, data)).as("predicate matches").isTrue();
Throwable t = strategy.process(exception, data, Context.empty());
assertThat(t)
.isSameAs(exception)
.hasNoSuppressedExceptions();
assertThat(error).hasValue(null);
assertThat(value).hasValue(null);
}
@Test
public void stopCannotResume() {
OnNextFailureStrategy strategy = OnNextFailureStrategy.stop();
assertThat(strategy.test(new IllegalStateException(), null))
.isFalse();
assertThat(strategy.test(new NoSuchMethodError(), null))
.isFalse();
}
@Test
public void stopProcessReturnsNewException() {
OnNextFailureStrategy strategy = OnNextFailureStrategy.stop();
Throwable exception = new NullPointerException("foo");
Throwable t = strategy.process(exception, null, Context.empty());
assertThat(t).isInstanceOf(IllegalStateException.class)
.hasMessage("STOP strategy cannot process errors")
.hasSuppressedException(exception);
}
@Test
public void stopProcessWithFatal() {
OnNextFailureStrategy strategy = OnNextFailureStrategy.stop();
Throwable exception = new NoSuchMethodError("foo");
assertThatExceptionOfType(NoSuchMethodError.class)
.isThrownBy(() -> strategy.process(exception, null, Context.empty()))
.satisfies(e -> assertThat(e)
.hasMessage("foo")
.hasNoSuppressedExceptions());
}
@Test
public void fluxApiErrorContinue() {
List<String> valueDropped = new ArrayList<>();
List<Throwable> errorDropped = new ArrayList<>();
Flux<String> test = Flux.just("foo", "", "bar", "baz")
.filter(s -> 3 / s.length() == 1)
.onErrorContinue((t, v) -> {
errorDropped.add(t);
valueDropped.add((String) v);
});
StepVerifier.create(test)
.expectNext("foo", "bar", "baz")
.expectComplete()
.verifyThenAssertThat()
.hasNotDroppedElements()
.hasNotDroppedErrors();
assertThat(valueDropped).containsExactly("");
assertThat(errorDropped)
.hasSize(1)
.allSatisfy(e -> assertThat(e).hasMessage("/ by zero"));
}
@Test
public void fluxApiErrorContinueConditional() {
List<String> valueDropped = new ArrayList<>();
List<Throwable> errorDropped = new ArrayList<>();
Flux<String> test = Flux.just("foo", "", "bar", "baz")
.filter(s -> 3 / s.length() == 1)
.onErrorContinue(
t -> t instanceof ArithmeticException,
(t, v) -> {
errorDropped.add(t);
valueDropped.add((String) v);
});
StepVerifier.create(test)
.expectNext("foo", "bar", "baz")
.expectComplete()
.verifyThenAssertThat()
.hasNotDroppedElements()
.hasNotDroppedErrors();
assertThat(valueDropped).containsExactly("");
assertThat(errorDropped)
.hasSize(1)
.allSatisfy(e -> assertThat(e).hasMessage("/ by zero"));
}
@Test
public void fluxApiErrorContinueConditionalErrorNotMatch() {
List<String> valueDropped = new ArrayList<>();
List<Throwable> errorDropped = new ArrayList<>();
Flux<String> test = Flux.just("foo", "", "bar", "baz")
.filter(s -> 3 / s.length() == 1)
.onErrorContinue(
t -> t instanceof IllegalStateException,
(t, v) -> {
errorDropped.add(t);
valueDropped.add((String) v);
});
StepVerifier.create(test)
.expectNext("foo")
.expectErrorMessage("/ by zero")
.verifyThenAssertThat()
.hasNotDroppedElements()
.hasNotDroppedErrors();
assertThat(valueDropped).isEmpty();
assertThat(errorDropped).isEmpty();
}
@Test
public void fluxApiErrorContinueConditionalByClass() {
List<String> valueDropped = new ArrayList<>();
List<Throwable> errorDropped = new ArrayList<>();
Flux<String> test = Flux.just("foo", "", "bar", "baz")
.filter(s -> 3 / s.length() == 1)
.onErrorContinue(ArithmeticException.class,
(t, v) -> {
errorDropped.add(t);
valueDropped.add((String) v);
});
StepVerifier.create(test)
.expectNext("foo", "bar", "baz")
.expectComplete()
.verifyThenAssertThat()
.hasNotDroppedElements()
.hasNotDroppedErrors();
assertThat(valueDropped).containsExactly("");
assertThat(errorDropped)
.hasSize(1)
.allSatisfy(e -> assertThat(e).hasMessage("/ by zero"));
}
@Test
public void fluxApiErrorContinueConditionalByClassNotMatch() {
List<String> valueDropped = new ArrayList<>();
List<Throwable> errorDropped = new ArrayList<>();
Flux<String> test = Flux.just("foo", "", "bar", "baz")
.filter(s -> 3 / s.length() == 1)
.onErrorContinue(IllegalStateException.class,
(t, v) -> {
errorDropped.add(t);
valueDropped.add((String) v);
});
StepVerifier.create(test)
.expectNext("foo")
.expectErrorMessage("/ by zero")
.verifyThenAssertThat()
.hasNotDroppedElements()
.hasNotDroppedErrors();
assertThat(valueDropped).isEmpty();
assertThat(errorDropped).isEmpty();
}
@Test
public void fluxApiWithinFlatMap() {
Flux<Integer> test = Flux.just(1, 2, 3)
.flatMap(i -> Flux.range(0, i + 1)
.map(v -> 30 / v))
.onErrorContinue(OnNextFailureStrategyTest::drop);
StepVerifier.create(test)
.expectNext(30, 30, 15, 30, 15, 10)
.expectComplete()
.verifyThenAssertThat()
.hasDroppedExactly(0, 0, 0)
.hasDroppedErrorsSatisfying(
errors -> assertThat(errors)
.hasSize(3)
.allMatch(e -> e instanceof ArithmeticException));
}
@Test
public void monoApiWithinFlatMap() {
Flux<Integer> test = Flux.just(0, 1, 2, 3)
.flatMap(i -> Mono.just(i).map(v -> 30 / v))
.onErrorContinue(OnNextFailureStrategyTest::drop);
StepVerifier.create(test)
.expectNext(30, 15, 10)
.expectComplete()
.verifyThenAssertThat()
.hasDroppedExactly(0)
.hasDroppedErrorsSatisfying(
errors -> assertThat(errors)
.hasSize(1)
.allMatch(e -> e instanceof ArithmeticException));
}
@Test
public void overrideInheritedErrorStrategyInFlatMap() {
AtomicReference<Throwable> errorRef = new AtomicReference<>();
Flux<Integer> test = Flux.just(1, 2, 3)
.flatMap(i -> Flux.range(0, i + 1)
.map(v -> 30 / v)
.onErrorReturn(100)
.onErrorStop()
)
.onErrorContinue((t, o) -> errorRef.compareAndSet(null, t));
StepVerifier.create(test)
.expectNext(100, 100, 100)
.expectComplete()
.verify();
assertThat(errorRef).hasValue(null);
}
@Test
public void overrideInheritedErrorStrategyInFlatMapMono() {
AtomicReference<Throwable> errorRef = new AtomicReference<>();
Mono<Integer> test = Mono.just(1)
.flatMap(i -> Mono.just(1 - i)
.map(v -> 30 / v)
.onErrorReturn(100)
.onErrorStop()
)
.onErrorContinue((t, o) -> errorRef.compareAndSet(null, t));
StepVerifier.create(test)
.expectNext(100)
.expectComplete()
.verify();
assertThat(errorRef).hasValue(null);
}
@Test
public void errorStrategyConfiguredInFlatMapDoesNotLeak() {
@SuppressWarnings("divzero")
Flux<Integer> test = Flux.just(0, 1, 2)
.map(i -> i / 0)
.flatMap(i -> Flux.just(i).onErrorContinue(OnNextFailureStrategyTest::drop));
StepVerifier.create(test)
.expectError(ArithmeticException.class)
.verify();
}
@Test
public void errorStrategySimpleScoping() {
Flux<Integer> test = Flux.just(0, 1, 2, 3)
.map(i -> {
if (i == 3) {
throw new IllegalStateException();
}
else {
return i;
}
})
.onErrorStop()
.map(i -> 10 / i)
.onErrorContinue(OnNextFailureStrategyTest::drop);
StepVerifier.create(test)
.expectNext(10, 5)
.expectError(IllegalStateException.class)
.verifyThenAssertThat()
.hasDropped(0)
.hasDroppedErrors(1);
}
@Test
public void errorStrategyLocalHandlerWithSimpleMappingScoping() {
List<String> valueDropped = new ArrayList<>();
List<Throwable> errorDropped = new ArrayList<>();
Flux<Integer> test = Flux.just("0", "1", "2", "asdfghc3")
.map(Integer::parseInt)
.filter(l -> l < 3)
.onErrorContinue((t, v) -> {
errorDropped.add(t);
valueDropped.add((String) v); // <--- STRING HERE
});
StepVerifier.create(test)
.expectNext(0, 1, 2)
.expectComplete()
.verifyThenAssertThat()
.hasNotDroppedErrors()
.hasNotDroppedElements();
assertThat(valueDropped).containsOnly("asdfghc3");
assertThat(errorDropped.get(0)).isExactlyInstanceOf(NumberFormatException.class);
}
}
|
OnNextFailureStrategyTest
|
java
|
google__auto
|
value/src/it/functional/src/test/java/com/google/auto/value/AutoValueTest.java
|
{
"start": 52847,
"end": 53206
}
|
class ____ {
public abstract com.google.common.base.Optional<String> optionalString();
public abstract com.google.common.base.Optional<Integer> optionalInteger();
public static Builder builder() {
return new AutoValue_AutoValueTest_OptionalPropertiesWithBuilder.Builder();
}
@AutoValue.Builder
public
|
OptionalPropertiesWithBuilder
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
|
{
"start": 1500,
"end": 9450
}
|
class ____ extends FsCommand {
/**
* Register the names for the count command
* @param factory the command factory that will instantiate this class
*/
public static void registerCommands(CommandFactory factory) {
factory.addClass(Count.class, "-count");
}
private static final String OPTION_QUOTA = "q";
private static final String OPTION_HUMAN = "h";
private static final String OPTION_HEADER = "v";
private static final String OPTION_TYPE = "t";
// exclude snapshots from calculation. Only work on default columns.
private static final String OPTION_EXCLUDE_SNAPSHOT = "x";
//return the quota, namespace count and disk space usage.
private static final String OPTION_QUOTA_AND_USAGE = "u";
private static final String OPTION_ECPOLICY = "e";
private static final String OPTION_SNAPSHOT_COUNT = "s";
public static final String NAME = "count";
public static final String USAGE =
"[-" + OPTION_QUOTA + "] [-" + OPTION_HUMAN + "] [-" + OPTION_HEADER
+ "] [-" + OPTION_TYPE + " [<storage type>]] [-" +
OPTION_QUOTA_AND_USAGE + "] [-" + OPTION_EXCLUDE_SNAPSHOT
+ "] [-" + OPTION_ECPOLICY + "] [-" + OPTION_SNAPSHOT_COUNT
+ "] <path> ...";
public static final String DESCRIPTION =
"Count the number of directories, files and bytes under the paths\n" +
"that match the specified file pattern. The output columns are:\n" +
StringUtils.join(ContentSummary.getHeaderFields(), ' ') +
" PATHNAME\n" +
"or, with the -" + OPTION_QUOTA + " option:\n" +
StringUtils.join(ContentSummary.getQuotaHeaderFields(), ' ') + "\n" +
" " +
StringUtils.join(ContentSummary.getHeaderFields(), ' ') +
" PATHNAME\n" +
"The -" + OPTION_HUMAN +
" option shows file sizes in human readable format.\n" +
"The -" + OPTION_HEADER + " option displays a header line.\n" +
"The -" + OPTION_EXCLUDE_SNAPSHOT + " option excludes snapshots " +
"from being calculated. \n" +
"The -" + OPTION_TYPE + " option displays quota by storage types.\n" +
"It should be used with -" + OPTION_QUOTA + " or -" +
OPTION_QUOTA_AND_USAGE + " option, otherwise it will be ignored.\n" +
"If a comma-separated list of storage types is given after the -" +
OPTION_TYPE + " option, \n" +
"it displays the quota and usage for the specified types. \n" +
"Otherwise, it displays the quota and usage for all the storage \n" +
"types that support quota. The list of possible storage " +
"types(case insensitive):\n" +
"ram_disk, ssd, disk, archive and nvdimm.\n" +
"It can also pass the value '', 'all' or 'ALL' to specify all " +
"the storage types.\n" +
"The -" + OPTION_QUOTA_AND_USAGE + " option shows the quota and \n" +
"the usage against the quota without the detailed content summary."+
"The -" + OPTION_ECPOLICY + " option shows the erasure coding policy."
+ "The -" + OPTION_SNAPSHOT_COUNT + " option shows snapshot counts.";
private boolean showQuotas;
private boolean humanReadable;
private boolean showQuotabyType;
private List<StorageType> storageTypes = null;
private boolean showQuotasAndUsageOnly;
private boolean excludeSnapshots;
private boolean displayECPolicy;
private boolean showSnapshot;
/** Constructor */
public Count() {}
/** Constructor
* @deprecated invoke via {@link FsShell}
* @param cmd the count command
* @param pos the starting index of the arguments
* @param conf configuration
*/
@Deprecated
public Count(String[] cmd, int pos, Configuration conf) {
super(conf);
this.args = Arrays.copyOfRange(cmd, pos, cmd.length);
}
@Override
protected void processOptions(LinkedList<String> args) {
CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER, OPTION_QUOTA_AND_USAGE,
OPTION_EXCLUDE_SNAPSHOT,
OPTION_ECPOLICY, OPTION_SNAPSHOT_COUNT);
cf.addOptionWithValue(OPTION_TYPE);
cf.parse(args);
if (args.isEmpty()) { // default path is the current working directory
args.add(".");
}
showQuotas = cf.getOpt(OPTION_QUOTA);
humanReadable = cf.getOpt(OPTION_HUMAN);
showQuotasAndUsageOnly = cf.getOpt(OPTION_QUOTA_AND_USAGE);
excludeSnapshots = cf.getOpt(OPTION_EXCLUDE_SNAPSHOT);
displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
showSnapshot = cf.getOpt(OPTION_SNAPSHOT_COUNT);
if (showQuotas || showQuotasAndUsageOnly) {
String types = cf.getOptValue(OPTION_TYPE);
if (null != types) {
showQuotabyType = true;
storageTypes = getAndCheckStorageTypes(types);
} else {
showQuotabyType = false;
}
if (excludeSnapshots) {
out.println(OPTION_QUOTA + " or " + OPTION_QUOTA_AND_USAGE + " option "
+ "is given, the -x option is ignored.");
excludeSnapshots = false;
}
}
if (cf.getOpt(OPTION_HEADER)) {
StringBuilder headString = new StringBuilder();
if (showQuotabyType) {
headString.append(QuotaUsage.getStorageTypeHeader(storageTypes));
} else {
if (showQuotasAndUsageOnly) {
headString.append(QuotaUsage.getHeader());
} else {
headString.append(ContentSummary.getHeader(showQuotas));
}
}
if (displayECPolicy) {
headString.append(ContentSummary.getErasureCodingPolicyHeader());
}
if (showSnapshot) {
headString.append(ContentSummary.getSnapshotHeader());
}
headString.append("PATHNAME");
out.println(headString.toString());
}
}
private List<StorageType> getAndCheckStorageTypes(String types) {
if ("".equals(types) || "all".equalsIgnoreCase(types)) {
return StorageType.getTypesSupportingQuota();
}
String[] typeArray = StringUtils.split(types, ',');
List<StorageType> stTypes = new ArrayList<>();
for (String t : typeArray) {
stTypes.add(StorageType.parseStorageType(t));
}
return stTypes;
}
@Override
protected void processPath(PathData src) throws IOException {
StringBuilder outputString = new StringBuilder();
if (showQuotasAndUsageOnly || showQuotabyType) {
QuotaUsage usage = src.fs.getQuotaUsage(src.path);
outputString.append(usage.toString(
isHumanReadable(), showQuotabyType, storageTypes));
} else {
ContentSummary summary = src.fs.getContentSummary(src.path);
outputString.append(summary.toString(
showQuotas, isHumanReadable(), excludeSnapshots));
}
if (displayECPolicy) {
ContentSummary summary = src.fs.getContentSummary(src.path);
outputString.append(summary.toErasureCodingPolicy());
}
if (showSnapshot) {
ContentSummary summary = src.fs.getContentSummary(src.path);
outputString.append(summary.toSnapshot(isHumanReadable()));
}
outputString.append(src);
out.println(outputString.toString());
}
/**
* Should quotas get shown as part of the report?
* @return if quotas should be shown then true otherwise false
*/
@InterfaceAudience.Private
boolean isShowQuotas() {
return showQuotas;
}
/**
* Should sizes be shown in human readable format rather than bytes?
* @return true if human readable format
*/
@InterfaceAudience.Private
boolean isHumanReadable() {
return humanReadable;
}
/**
* should print quota by storage types
* @return true if enables quota by storage types
*/
@InterfaceAudience.Private
boolean isShowQuotabyType() {
return showQuotabyType;
}
/**
* show specified storage types
* @return specified storagetypes
*/
@InterfaceAudience.Private
List<StorageType> getStorageTypes() {
return storageTypes;
}
}
|
Count
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/RedissonExecutorService.java
|
{
"start": 30954,
"end": 64262
}
|
class ____ it should be static");
}
}
private <T> RedissonScheduledFuture<T> syncExecute(RedissonScheduledFuture<T> future) {
RemotePromise<?> rp = future.getInnerPromise();
syncExecute(rp);
return future;
}
private <T> void syncExecute(RemotePromise<T> promise) {
CompletableFuture<Boolean> addFuture = promise.getAddFuture();
try {
Boolean res = addFuture.join();
if (!res) {
throw new RejectedExecutionException("Task rejected. ExecutorService is in shutdown state");
}
} catch (CompletionException e) {
throw (RuntimeException) e.getCause();
}
}
@Override
public <T> RExecutorFuture<T> submit(Runnable task, T result) {
RemotePromise<T> future = (RemotePromise<T>) submit(task).toCompletableFuture();
CompletableFuture<T> f = future.thenApply(res -> result);
return new RedissonExecutorFuture<T>(f, future.getRequestId());
}
@Override
public RExecutorBatchFuture submit(Runnable... tasks) {
if (tasks.length == 0) {
throw new NullPointerException("Tasks are not defined");
}
List<RExecutorFuture<?>> result = new ArrayList<>();
TasksBatchService executorRemoteService = createBatchService();
RemoteExecutorServiceAsync asyncService = executorRemoteService.get(RemoteExecutorServiceAsync.class, RESULT_OPTIONS);
for (Runnable task : tasks) {
check(task);
RemotePromise<Void> promise = (RemotePromise<Void>) asyncService.executeRunnable(createTaskParameters(task)).toCompletableFuture();
RedissonExecutorFuture<Void> executorFuture = new RedissonExecutorFuture<Void>(promise);
result.add(executorFuture);
}
List<Boolean> addResult = executorRemoteService.executeAdd();
if (!addResult.get(0)) {
throw new RejectedExecutionException("Tasks have been rejected. ExecutorService is in shutdown state");
}
CompletableFuture<Void> future = CompletableFuture.allOf(result.stream()
.map(CompletionStage::toCompletableFuture)
.toArray(CompletableFuture[]::new));
return new RedissonExecutorBatchFuture(future, result);
}
@Override
public RExecutorBatchFuture submitAsync(Runnable... tasks) {
if (tasks.length == 0) {
throw new NullPointerException("Tasks are not defined");
}
TasksBatchService executorRemoteService = createBatchService();
RemoteExecutorServiceAsync asyncService = executorRemoteService.get(RemoteExecutorServiceAsync.class, RESULT_OPTIONS);
List<RExecutorFuture<?>> result = new ArrayList<>();
for (Runnable task : tasks) {
check(task);
RemotePromise<Void> promise = (RemotePromise<Void>) asyncService.executeRunnable(createTaskParameters(task)).toCompletableFuture();
RedissonExecutorFuture<Void> executorFuture = new RedissonExecutorFuture<Void>(promise);
result.add(executorFuture);
}
executorRemoteService.executeAddAsync().whenComplete((res, e) -> {
if (e != null) {
for (RExecutorFuture<?> executorFuture : result) {
executorFuture.toCompletableFuture().completeExceptionally(e);
}
return;
}
for (Boolean bool : res) {
if (!bool) {
RejectedExecutionException ex = new RejectedExecutionException("Task rejected. ExecutorService is in shutdown state");
for (RExecutorFuture<?> executorFuture : result) {
executorFuture.toCompletableFuture().completeExceptionally(ex);
}
break;
}
}
});
CompletableFuture<Void> future = CompletableFuture.allOf(result.stream()
.map(CompletionStage::toCompletableFuture)
.toArray(CompletableFuture[]::new));
return new RedissonExecutorBatchFuture(future, result);
}
@Override
public RExecutorFuture<?> submit(Runnable task) {
RemotePromise<Void> promise = (RemotePromise<Void>) submitWithoutCheckAsync(idGenerator.generateId(), task).toCompletableFuture();
syncExecute(promise);
return createFuture(promise);
}
@Override
public RExecutorFuture<?> submit(Runnable task, long timeToLive, TimeUnit timeUnit) {
RemotePromise<Void> promise = (RemotePromise<Void>) submitWithoutCheckAsync(idGenerator.generateId(),
task, Duration.ofMillis(timeUnit.toMillis(timeToLive))).toCompletableFuture();
syncExecute(promise);
return createFuture(promise);
}
@Override
public RExecutorFuture<?> submitAsync(Runnable task, long timeToLive, TimeUnit timeUnit) {
return submitWithoutCheckAsync(idGenerator.generateId(), task, Duration.ofMillis(timeUnit.toMillis(timeToLive)));
}
@Override
public RExecutorFuture<?> submitAsync(Runnable task) {
return submitWithoutCheckAsync(idGenerator.generateId(), task);
}
private void cancelResponseHandling(String requestId) {
responses.computeIfPresent(responseQueueName, (key, entry) -> {
List<Result> list = entry.getResponses().remove(requestId);
if (list != null) {
for (Result result : list) {
result.cancelResponseTimeout();
}
}
if (entry.getResponses().isEmpty()) {
return null;
}
return entry;
});
}
@Override
public RScheduledFuture<?> schedule(Runnable task, long delay, TimeUnit unit) {
return schedule(task, delay, unit, 0, unit);
}
private <T> RExecutorFuture<T> createFuture(RemotePromise<T> promise) {
RExecutorFuture<T> f = new RedissonExecutorFuture<T>(promise);
storeReference(f, promise.getRequestId());
return f;
}
private <T> RScheduledFuture<T> createFuture(RemotePromise<T> promise, long scheduledExecutionTime) {
RedissonScheduledFuture<T> f = new RedissonScheduledFuture<T>(promise, scheduledExecutionTime);
storeReference(f, promise.getRequestId());
return f;
}
private void storeReference(RExecutorFuture<?> future, String requestId) {
while (true) {
RedissonExecutorFutureReference r = (RedissonExecutorFutureReference) referenceDueue.poll();
if (r == null) {
break;
}
references.remove(r);
if (r.getPromise().getNumberOfDependents() == 0) {
cancelResponseHandling(r.getRequestId());
}
}
CompletableFuture<?> promise = ((CompletableFutureWrapper<?>) future).toCompletableFuture();
RedissonExecutorFutureReference reference = new RedissonExecutorFutureReference(requestId, future, referenceDueue, promise);
references.add(reference);
}
@Override
public RScheduledFuture<?> scheduleAsync(Runnable task, long delay, TimeUnit unit) {
return scheduleAsync(task, delay, unit, 0, unit);
}
@Override
public <V> RScheduledFuture<V> schedule(Callable<V> task, long delay, TimeUnit unit) {
return schedule(task, delay, unit, 0, unit);
}
@Override
public <V> RScheduledFuture<V> scheduleAsync(Callable<V> task, long delay, TimeUnit unit) {
return scheduleWithoutCheckAsync(idGenerator.generateId(), task, Duration.ofMillis(unit.toMillis(delay)), Duration.ZERO);
}
@Override
public RScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit, long ttl, TimeUnit ttlUnit) {
RedissonScheduledFuture<?> future = (RedissonScheduledFuture<?>) scheduleWithoutCheckAsync(idGenerator.generateId(), command,
Duration.ofMillis(unit.toMillis(delay)), Duration.ofMillis(ttlUnit.toMillis(ttl)));
return syncExecute(future);
}
@Override
public RScheduledFuture<?> scheduleAsync(Runnable task, long delay, TimeUnit unit, long timeToLive, TimeUnit ttlUnit) {
return scheduleWithoutCheckAsync(idGenerator.generateId(), task, Duration.ofMillis(unit.toMillis(delay)), Duration.ofMillis(ttlUnit.toMillis(timeToLive)));
}
@Override
public <V> RScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit, long timeToLive, TimeUnit ttlUnit) {
RedissonScheduledFuture<V> future = (RedissonScheduledFuture<V>) scheduleWithoutCheckAsync(idGenerator.generateId(), callable,
Duration.ofMillis(unit.toMillis(delay)), Duration.ofMillis(ttlUnit.toMillis(timeToLive)));
return syncExecute(future);
}
@Override
public <V> RScheduledFuture<V> scheduleAsync(Callable<V> task, long delay, TimeUnit unit, long timeToLive, TimeUnit ttlUnit) {
return scheduleWithoutCheckAsync(idGenerator.generateId(), task, Duration.ofMillis(unit.toMillis(delay)), Duration.ofMillis(ttlUnit.toMillis(timeToLive)));
}
private ScheduledParameters createScheduledParameters(String id, Duration timeToLive, ClassBody classBody, byte[] state, long startTime) {
ScheduledParameters params = new ScheduledParameters(id, classBody.getClazzName(), classBody.getClazz(), classBody.getLambda(), state, startTime);
if (timeToLive.toMillis() > 0) {
params.setTtl(timeToLive.toMillis());
}
return params;
}
@Override
public RScheduledFuture<?> scheduleAtFixedRate(Runnable task, long initialDelay, long period, TimeUnit unit) {
RedissonScheduledFuture<?> future = (RedissonScheduledFuture<?>) scheduleWithoutCheckAtFixedRateAsync(idGenerator.generateId(),
task, Duration.ofMillis(unit.toMillis(initialDelay)), Duration.ofMillis(unit.toMillis(period)));
return syncExecute(future);
}
@Override
public RScheduledFuture<?> scheduleAtFixedRateAsync(Runnable task, long initialDelay, long period, TimeUnit unit) {
return scheduleWithoutCheckAtFixedRateAsync(idGenerator.generateId(), task, Duration.ofMillis(unit.toMillis(initialDelay)), Duration.ofMillis(unit.toMillis(period)));
}
@Override
public RScheduledFuture<?> schedule(Runnable task, CronSchedule cronSchedule) {
RedissonScheduledFuture<?> future = (RedissonScheduledFuture<?>) scheduleWithoutCheckAsync(idGenerator.generateId(), task, cronSchedule);
return syncExecute(future);
}
@Override
public RScheduledFuture<?> scheduleAsync(Runnable task, CronSchedule cronSchedule) {
return scheduleWithoutCheckAsync(idGenerator.generateId(), task, cronSchedule);
}
@Override
public RScheduledFuture<?> scheduleWithFixedDelay(Runnable task, long initialDelay, long delay, TimeUnit unit) {
RedissonScheduledFuture<?> future = (RedissonScheduledFuture<?>) scheduleWithoutCheckWithFixedDelayAsync(idGenerator.generateId(), task, Duration.ofMillis(unit.toMillis(initialDelay)), Duration.ofMillis(unit.toMillis(delay)));
return syncExecute(future);
}
@Override
public RScheduledFuture<?> scheduleWithFixedDelayAsync(Runnable task, long initialDelay, long delay, TimeUnit unit) {
return scheduleWithoutCheckWithFixedDelayAsync(idGenerator.generateId(), task, Duration.ofMillis(unit.toMillis(initialDelay)), Duration.ofMillis(unit.toMillis(delay)));
}
@Override
public Boolean cancelTask(String taskId) {
return commandExecutor.get(cancelTaskAsync(taskId));
}
@Override
public RFuture<Boolean> cancelTaskAsync(String taskId) {
return scheduledRemoteService.cancelExecutionAsync(taskId);
}
private <T> T poll(List<CompletableFuture<?>> futures, long timeout, TimeUnit timeUnit) throws InterruptedException, TimeoutException {
CompletableFuture<Object> future = CompletableFuture.anyOf(futures.toArray(new CompletableFuture[0]));
try {
if (timeout == -1) {
return (T) future.get();
} else {
return (T) future.get(timeout, timeUnit);
}
} catch (ExecutionException e) {
throw commandExecutor.convertException(e);
}
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException {
try {
return invokeAny(tasks, -1, null);
} catch (TimeoutException cannotHappen) {
return null;
}
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks,
long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
if (tasks == null) {
throw new NullPointerException();
}
List<CompletableFuture<?>> futures = new ArrayList<>();
for (Callable<T> callable : tasks) {
RExecutorFuture<T> future = submit(callable);
futures.add(future.toCompletableFuture());
}
T result = poll(futures, timeout, unit);
for (CompletableFuture<?> f : futures) {
f.cancel(true);
}
return result;
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException {
if (tasks == null) {
throw new NullPointerException();
}
RExecutorBatchFuture future = submit(tasks.toArray(new Callable[0]));
try {
future.toCompletableFuture().join();
} catch (Exception e) {
// skip
}
List<?> futures = future.getTaskFutures();
return (List<Future<T>>) futures;
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks,
long timeout, TimeUnit unit) throws InterruptedException {
if (tasks == null || unit == null) {
throw new NullPointerException();
}
RExecutorBatchFuture future = submit(tasks.toArray(new Callable[0]));
try {
future.toCompletableFuture().get(timeout, unit);
} catch (ExecutionException e) {
LOGGER.error(e.getMessage(), e);
} catch (TimeoutException | CancellationException e) {
// skip
}
List<?> futures = future.getTaskFutures();
return (List<Future<T>>) futures;
}
@Override
public <T> RExecutorFuture<T> submit(String id, Callable<T> task) {
RemotePromise<T> promise = (RemotePromise<T>) submitAsync(id, task).toCompletableFuture();
syncExecute(promise);
return createFuture(promise);
}
@Override
public <T> RExecutorFuture<T> submitAsync(String id, Callable<T> task) {
return executeWithCheckAsync(id, task, () -> {
TaskParameters params = createTaskParameters(id, task);
return asyncService.executeCallable(params);
});
}
private <T> RExecutorFuture<T> submitWithoutCheckAsync(String id, Callable<T> task) {
check(task);
TaskParameters params = createTaskParameters(id, task);
RemotePromise<T> result = (RemotePromise<T>) asyncService.executeCallable(params).toCompletableFuture();
addListener(result);
return createFuture(result);
}
@Override
public <T> RExecutorFuture<T> submit(String id, Callable<T> task, Duration timeToLive) {
RemotePromise<T> promise = (RemotePromise<T>) submitAsync(id, task, timeToLive).toCompletableFuture();
syncExecute(promise);
return createFuture(promise);
}
@Override
public <T> RExecutorFuture<T> submitAsync(String id, Callable<T> task, Duration timeToLive) {
return executeWithCheckAsync(id, task, () -> {
TaskParameters taskParameters = createTaskParameters(id, task);
taskParameters.setTtl(timeToLive.toMillis());
return asyncService.executeCallable(taskParameters);
});
}
private <T> RExecutorFuture<T> submitWithoutCheckAsync(String id, Callable<T> task, Duration timeToLive) {
check(task);
TaskParameters taskParameters = createTaskParameters(id, task);
taskParameters.setTtl(timeToLive.toMillis());
RemotePromise<T> result = (RemotePromise<T>) asyncService.executeCallable(taskParameters).toCompletableFuture();
addListener(result);
return createFuture(result);
}
@Override
public RExecutorFuture<?> submit(String id, Runnable task, Duration timeToLive) {
RemotePromise<Void> promise = (RemotePromise<Void>) submitAsync(id, task, timeToLive).toCompletableFuture();
syncExecute(promise);
return createFuture(promise);
}
@Override
public RExecutorFuture<?> submitAsync(String id, Runnable task, Duration timeToLive) {
return executeWithCheckAsync(id, task, () -> {
TaskParameters taskParameters = createTaskParameters(id, task);
taskParameters.setTtl(timeToLive.toMillis());
return asyncService.executeRunnable(taskParameters);
});
}
private RExecutorFuture<?> submitWithoutCheckAsync(String id, Runnable task, Duration timeToLive) {
check(task);
TaskParameters taskParameters = createTaskParameters(id, task);
taskParameters.setTtl(timeToLive.toMillis());
RemotePromise<Void> result = (RemotePromise<Void>) asyncService.executeRunnable(taskParameters).toCompletableFuture();
addListener(result);
return createFuture(result);
}
@Override
public RExecutorFuture<?> submit(String id, Runnable task) {
RemotePromise<Void> promise = (RemotePromise<Void>) submitAsync(id, task).toCompletableFuture();
syncExecute(promise);
return createFuture(promise);
}
@Override
public RExecutorFuture<?> submitAsync(String id, Runnable task) {
return executeWithCheckAsync(id, task, () -> {
TaskParameters params = createTaskParameters(id, task);
return (RFuture<?>) asyncService.executeRunnable(params);
});
}
private <T> RScheduledFuture<T> executeWithCheckAsync(String id, Object task, long starTime, Supplier<RFuture<T>> function) {
RemotePromise<T> f = executeWithCheck(id, task, function);
return createFuture(f, starTime);
}
private <T> RExecutorFuture<T> executeWithCheckAsync(String id, Object task, Supplier<RFuture<T>> function) {
RemotePromise<T> f = executeWithCheck(id, task, function);
return createFuture(f);
}
private <T> RemotePromise<T> executeWithCheck(String id, Object task, Supplier<RFuture<T>> function) {
check(task);
MasterSlaveServersConfig config = commandExecutor.getServiceManager().getConfig();
long timeout = (config.getTimeout() + config.getRetryDelay().calcDelay(config.getRetryAttempts()).toMillis()) * config.getRetryAttempts();
timeout = Math.max(timeout, 1);
String taskName = tasksLatchName + ":" + id;
RFuture<Boolean> r = commandExecutor.evalWriteNoRetryAsync(getName(), LongCodec.INSTANCE, RedisCommands.EVAL_BOOLEAN,
"if redis.call('hexists', KEYS[1], ARGV[2]) == 0 then "
+ "if redis.call('set', KEYS[2], 1, 'NX', 'PX', ARGV[1]) ~= nil then "
+ "return 0; "
+ "end;"
+ "end;"
+ "return 1; ",
Arrays.asList(tasksName, taskName),
timeout, id);
AtomicReference<RemotePromise<T>> ref = new AtomicReference<>();
RemotePromise<T> promise = new RemotePromise<T>(id) {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
if (ref.get() != null) {
return ref.get().cancel(mayInterruptIfRunning);
}
return super.cancel(mayInterruptIfRunning);
}
};
CompletableFuture<Boolean> addFuture = new CompletableFuture<>();
promise.setAddFuture(addFuture);
r.whenComplete((v, e) -> {
if (e != null) {
addFuture.completeExceptionally(e);
promise.completeExceptionally(e);
return;
}
if (v) {
addFuture.completeExceptionally(new IllegalArgumentException("Duplicated id: '" + id + "' is not allowed"));
return;
}
RemotePromise<T> result = (RemotePromise<T>) function.get().toCompletableFuture();
addListener(result);
ref.set(result);
commandExecutor.getServiceManager().transfer(result, promise);
commandExecutor.getServiceManager().transfer(result.getAddFuture(), addFuture);
});
return promise;
}
private RExecutorFuture<?> submitWithoutCheckAsync(String id, Runnable task) {
check(task);
RemotePromise<Void> result = (RemotePromise<Void>) asyncService.executeRunnable(createTaskParameters(id, task)).toCompletableFuture();
addListener(result);
return createFuture(result);
}
@Override
public RScheduledFuture<?> schedule(String id, Runnable command, Duration delay) {
RedissonScheduledFuture<?> future = (RedissonScheduledFuture<?>) scheduleAsync(id, command, delay);
return syncExecute(future);
}
@Override
public RScheduledFuture<?> scheduleAsync(String id, Runnable task, Duration delay) {
return scheduleAsync(id, task, delay, Duration.ZERO);
}
@Override
public RScheduledFuture<?> schedule(String id, Runnable command, Duration delay, Duration timeToLive) {
RedissonScheduledFuture<?> future = (RedissonScheduledFuture<?>) scheduleAsync(id, command, delay, timeToLive);
return syncExecute(future);
}
@Override
public RScheduledFuture<?> scheduleAsync(String id, Runnable task, Duration delay, Duration timeToLive) {
long startTime = System.currentTimeMillis() + delay.toMillis();
return executeWithCheckAsync(id, task, startTime, () -> {
ClassBody classBody = getClassBody(task);
byte[] state = encode(task);
ScheduledParameters params = createScheduledParameters(id, timeToLive, classBody, state, startTime);
return asyncScheduledService.scheduleRunnable(params);
});
}
private RScheduledFuture<?> scheduleWithoutCheckAsync(String id, Runnable task, Duration delay, Duration timeToLive) {
check(task);
ClassBody classBody = getClassBody(task);
byte[] state = encode(task);
long startTime = System.currentTimeMillis() + delay.toMillis();
ScheduledParameters params = createScheduledParameters(id, timeToLive, classBody, state, startTime);
RemotePromise<Void> result = (RemotePromise<Void>) asyncScheduledService.scheduleRunnable(params).toCompletableFuture();
addListener(result);
return createFuture(result, startTime);
}
@Override
public <V> RScheduledFuture<V> schedule(String id, Callable<V> callable, Duration delay) {
RedissonScheduledFuture<V> future = (RedissonScheduledFuture<V>) scheduleAsync(id, callable, delay);
return syncExecute(future);
}
@Override
public <V> RScheduledFuture<V> scheduleAsync(String id, Callable<V> task, Duration delay) {
return scheduleAsync(id, task, delay, Duration.ZERO);
}
@Override
public <V> RScheduledFuture<V> schedule(String id, Callable<V> callable, Duration delay, Duration timeToLive) {
RedissonScheduledFuture<V> future = (RedissonScheduledFuture<V>) scheduleAsync(id, callable, delay, timeToLive);
return syncExecute(future);
}
@Override
public <V> RScheduledFuture<V> scheduleAsync(String id, Callable<V> task, Duration delay, Duration timeToLive) {
long startTime = System.currentTimeMillis() + delay.toMillis();
return executeWithCheckAsync(id, task, startTime, () -> {
ClassBody classBody = getClassBody(task);
byte[] state = encode(task);
ScheduledParameters params = createScheduledParameters(id, timeToLive, classBody, state, startTime);
return asyncScheduledService.scheduleCallable(params);
});
}
private <V> RScheduledFuture<V> scheduleWithoutCheckAsync(String id, Callable<V> task, Duration delay, Duration timeToLive) {
check(task);
ClassBody classBody = getClassBody(task);
byte[] state = encode(task);
long startTime = System.currentTimeMillis() + delay.toMillis();
ScheduledParameters params = createScheduledParameters(id, timeToLive, classBody, state, startTime);
RemotePromise<V> result = (RemotePromise<V>) asyncScheduledService.scheduleCallable(params).toCompletableFuture();
addListener(result);
return createFuture(result, startTime);
}
@Override
public RScheduledFuture<?> scheduleAtFixedRate(String id, Runnable command, Duration initialDelay, Duration period) {
RedissonScheduledFuture<?> future = (RedissonScheduledFuture<?>) scheduleAtFixedRateAsync(id, command, initialDelay, period);
return syncExecute(future);
}
@Override
public RScheduledFuture<?> scheduleAtFixedRateAsync(String id, Runnable task, Duration initialDelay, Duration period) {
long startTime = System.currentTimeMillis() + initialDelay.toMillis();
return executeWithCheckAsync(id, task, startTime, () -> {
ClassBody classBody = getClassBody(task);
byte[] state = encode(task);
ScheduledAtFixedRateParameters params = new ScheduledAtFixedRateParameters(id);
params.setClassName(classBody.getClazzName());
params.setClassBody(classBody.getClazz());
params.setLambdaBody(classBody.getLambda());
params.setState(state);
params.setStartTime(startTime);
params.setPeriod(period.toMillis());
params.setExecutorId(executorId);
return asyncScheduledServiceAtFixed.scheduleAtFixedRate(params);
});
}
private RScheduledFuture<?> scheduleWithoutCheckAtFixedRateAsync(String id, Runnable task, Duration initialDelay, Duration period) {
check(task);
ClassBody classBody = getClassBody(task);
byte[] state = encode(task);
long startTime = System.currentTimeMillis() + initialDelay.toMillis();
String taskId = id;
ScheduledAtFixedRateParameters params = new ScheduledAtFixedRateParameters(taskId);
params.setClassName(classBody.getClazzName());
params.setClassBody(classBody.getClazz());
params.setLambdaBody(classBody.getLambda());
params.setState(state);
params.setStartTime(startTime);
params.setPeriod(period.toMillis());
params.setExecutorId(executorId);
RemotePromise<Void> result = (RemotePromise<Void>) asyncScheduledServiceAtFixed.scheduleAtFixedRate(params).toCompletableFuture();
addListener(result);
return createFuture(result, startTime);
}
@Override
public RScheduledFuture<?> scheduleWithFixedDelay(String id, Runnable command, Duration initialDelay, Duration delay) {
RedissonScheduledFuture<?> future = (RedissonScheduledFuture<?>) scheduleWithFixedDelayAsync(id, command, initialDelay, delay);
return syncExecute(future);
}
@Override
public RScheduledFuture<?> scheduleWithFixedDelayAsync(String id, Runnable task, Duration initialDelay, Duration delay) {
long startTime = System.currentTimeMillis() + initialDelay.toMillis();
return executeWithCheckAsync(id, task, startTime, () -> {
ClassBody classBody = getClassBody(task);
byte[] state = encode(task);
ScheduledWithFixedDelayParameters params = new ScheduledWithFixedDelayParameters(id);
params.setClassName(classBody.getClazzName());
params.setClassBody(classBody.getClazz());
params.setLambdaBody(classBody.getLambda());
params.setState(state);
params.setStartTime(startTime);
params.setDelay(delay.toMillis());
params.setExecutorId(executorId);
return asyncScheduledServiceAtFixed.scheduleWithFixedDelay(params);
});
}
private RScheduledFuture<?> scheduleWithoutCheckWithFixedDelayAsync(String id, Runnable task, Duration initialDelay, Duration delay) {
check(task);
ClassBody classBody = getClassBody(task);
byte[] state = encode(task);
long startTime = System.currentTimeMillis() + initialDelay.toMillis();
ScheduledWithFixedDelayParameters params = new ScheduledWithFixedDelayParameters(id);
params.setClassName(classBody.getClazzName());
params.setClassBody(classBody.getClazz());
params.setLambdaBody(classBody.getLambda());
params.setState(state);
params.setStartTime(startTime);
params.setDelay(delay.toMillis());
params.setExecutorId(executorId);
RemotePromise<Void> result = (RemotePromise<Void>) asyncScheduledServiceAtFixed.scheduleWithFixedDelay(params).toCompletableFuture();
addListener(result);
return createFuture(result, startTime);
}
@Override
public RScheduledFuture<?> schedule(String id, Runnable task, CronSchedule cronSchedule) {
RedissonScheduledFuture<?> future = (RedissonScheduledFuture<?>) scheduleAsync(id, task, cronSchedule);
return syncExecute(future);
}
@Override
public RScheduledFuture<?> scheduleAsync(String id, Runnable task, CronSchedule cronSchedule) {
ClassBody classBody = getClassBody(task);
byte[] state = encode(task);
Date startDate = cronSchedule.getExpression().getNextValidTimeAfter(new Date());
if (startDate == null) {
throw new IllegalArgumentException("Wrong cron expression! Unable to calculate start date");
}
long startTime = startDate.getTime();
RemotePromise<Void> r = executeWithCheck(id, task, () -> {
ScheduledCronExpressionParameters params = new ScheduledCronExpressionParameters(id);
params.setClassName(classBody.getClazzName());
params.setClassBody(classBody.getClazz());
params.setLambdaBody(classBody.getLambda());
params.setState(state);
params.setStartTime(startTime);
params.setCronExpression(cronSchedule.getExpression().getCronExpression());
params.setTimezone(cronSchedule.getZoneId().toString());
params.setExecutorId(executorId);
return asyncScheduledServiceAtFixed.schedule(params);
});
RedissonScheduledFuture<Void> f = new RedissonScheduledFuture<Void>(r, startTime) {
public long getDelay(TimeUnit unit) {
return unit.convert(startDate.getTime() - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
};
};
storeReference(f, r.getRequestId());
return f;
}
private RScheduledFuture<?> scheduleWithoutCheckAsync(String id, Runnable task, CronSchedule cronSchedule) {
check(task);
ClassBody classBody = getClassBody(task);
byte[] state = encode(task);
Date startDate = cronSchedule.getExpression().getNextValidTimeAfter(new Date());
if (startDate == null) {
throw new IllegalArgumentException("Wrong cron expression! Unable to calculate start date");
}
long startTime = startDate.getTime();
ScheduledCronExpressionParameters params = new ScheduledCronExpressionParameters(id);
params.setClassName(classBody.getClazzName());
params.setClassBody(classBody.getClazz());
params.setLambdaBody(classBody.getLambda());
params.setState(state);
params.setStartTime(startTime);
params.setCronExpression(cronSchedule.getExpression().getCronExpression());
params.setTimezone(cronSchedule.getZoneId().toString());
params.setExecutorId(executorId);
RemotePromise<Void> result = (RemotePromise<Void>) asyncScheduledServiceAtFixed.schedule(params).toCompletableFuture();
addListener(result);
RedissonScheduledFuture<Void> f = new RedissonScheduledFuture<Void>(result, startTime) {
public long getDelay(TimeUnit unit) {
return unit.convert(startDate.getTime() - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
};
};
storeReference(f, result.getRequestId());
return f;
}
}
|
and
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java
|
{
"start": 955,
"end": 1198
}
|
class ____ extends JobTaskRequest<Request> {
public Request(String jobId) {
super(jobId);
}
public Request(StreamInput in) throws IOException {
super(in);
}
}
public static
|
Request
|
java
|
spring-projects__spring-boot
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/options/OptionHandler.java
|
{
"start": 4138,
"end": 4873
}
|
class ____ implements HelpFormatter {
private final List<OptionHelp> help = new ArrayList<>();
@Override
public String format(Map<String, ? extends OptionDescriptor> options) {
Comparator<OptionDescriptor> comparator = Comparator
.comparing((optionDescriptor) -> optionDescriptor.options().iterator().next());
Set<OptionDescriptor> sorted = new TreeSet<>(comparator);
sorted.addAll(options.values());
for (OptionDescriptor descriptor : sorted) {
if (!descriptor.representsNonOptions()) {
this.help.add(new OptionHelpAdapter(descriptor));
}
}
return "";
}
Collection<OptionHelp> getOptionHelp() {
return Collections.unmodifiableList(this.help);
}
}
private static
|
OptionHelpFormatter
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/support/DefaultCamelContextEndpointCacheTest.java
|
{
"start": 1154,
"end": 2250
}
|
class ____ extends ContextTestSupport {
@Test
public void testCacheEndpoints() throws Exception {
// test that we cache at most 1000 endpoints in camel context to avoid
// it eating to much memory
for (int i = 0; i < 1234; i++) {
String uri = "my:endpoint?id=" + i;
DefaultEndpoint e = new DefaultEndpoint() {
public Producer createProducer() {
return null;
}
public Consumer createConsumer(Processor processor) {
return null;
}
public boolean isSingleton() {
return true;
}
};
e.setCamelContext(context);
e.setEndpointUri(uri);
context.addEndpoint(uri, e);
}
// the eviction is async so force cleanup
context.getEndpointRegistry().cleanUp();
Collection<Endpoint> col = context.getEndpoints();
assertEquals(1000, col.size(), "Size should be 1000");
}
}
|
DefaultCamelContextEndpointCacheTest
|
java
|
spring-projects__spring-boot
|
smoke-test/spring-boot-smoke-test-hibernate/src/main/java/smoketest/jpa/repository/JpaTagRepository.java
|
{
"start": 871,
"end": 1156
}
|
class ____ implements TagRepository {
@PersistenceContext
@SuppressWarnings("NullAway.Init")
private EntityManager entityManager;
@Override
public List<Tag> findAll() {
return this.entityManager.createQuery("SELECT t FROM Tag t", Tag.class).getResultList();
}
}
|
JpaTagRepository
|
java
|
apache__dubbo
|
dubbo-metadata/dubbo-metadata-api/src/main/java/org/apache/dubbo/metadata/MetadataRequest.java
|
{
"start": 9202,
"end": 20152
}
|
class ____ extends com.google.protobuf.GeneratedMessageV3.Builder<Builder>
implements
// @@protoc_insertion_point(builder_implements:org.apache.dubbo.metadata.MetadataRequest)
MetadataRequestOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return MetadataServiceV2OuterClass.internal_static_org_apache_dubbo_metadata_MetadataRequest_descriptor;
}
@Override
protected FieldAccessorTable internalGetFieldAccessorTable() {
return MetadataServiceV2OuterClass
.internal_static_org_apache_dubbo_metadata_MetadataRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(MetadataRequest.class, Builder.class);
}
// Construct using org.apache.dubbo.metadata.MetadataRequest.newBuilder()
private Builder() {}
private Builder(BuilderParent parent) {
super(parent);
}
@Override
public Builder clear() {
super.clear();
bitField0_ = 0;
revision_ = "";
return this;
}
@Override
public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
return MetadataServiceV2OuterClass.internal_static_org_apache_dubbo_metadata_MetadataRequest_descriptor;
}
@Override
public MetadataRequest getDefaultInstanceForType() {
return MetadataRequest.getDefaultInstance();
}
@Override
public MetadataRequest build() {
MetadataRequest result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@Override
public MetadataRequest buildPartial() {
MetadataRequest result = new MetadataRequest(this);
if (bitField0_ != 0) {
buildPartial0(result);
}
onBuilt();
return result;
}
private void buildPartial0(MetadataRequest result) {
int from_bitField0_ = bitField0_;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.revision_ = revision_;
}
}
@Override
public Builder clone() {
return super.clone();
}
@Override
public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) {
return super.setField(field, value);
}
@Override
public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@Override
public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) {
return super.setRepeatedField(field, index, value);
}
@Override
public Builder addRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) {
return super.addRepeatedField(field, value);
}
@Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof MetadataRequest) {
return mergeFrom((MetadataRequest) other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(MetadataRequest other) {
if (other == MetadataRequest.getDefaultInstance()) {
return this;
}
if (!other.getRevision().isEmpty()) {
revision_ = other.revision_;
bitField0_ |= 0x00000001;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@Override
public final boolean isInitialized() {
return true;
}
@Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
revision_ = input.readStringRequireUtf8();
bitField0_ |= 0x00000001;
break;
} // case 10
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private Object revision_ = "";
/**
* <pre>
* The revision of the metadata.
* </pre>
*
* <code>string revision = 1;</code>
* @return The revision.
*/
public String getRevision() {
Object ref = revision_;
if (!(ref instanceof String)) {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
revision_ = s;
return s;
} else {
return (String) ref;
}
}
/**
* <pre>
* The revision of the metadata.
* </pre>
*
* <code>string revision = 1;</code>
* @return The bytes for revision.
*/
public com.google.protobuf.ByteString getRevisionBytes() {
Object ref = revision_;
if (ref instanceof String) {
com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref);
revision_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <pre>
* The revision of the metadata.
* </pre>
*
* <code>string revision = 1;</code>
* @param value The revision to set.
* @return This builder for chaining.
*/
public Builder setRevision(String value) {
if (value == null) {
throw new NullPointerException();
}
revision_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* <pre>
* The revision of the metadata.
* </pre>
*
* <code>string revision = 1;</code>
* @return This builder for chaining.
*/
public Builder clearRevision() {
revision_ = getDefaultInstance().getRevision();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
/**
* <pre>
* The revision of the metadata.
* </pre>
*
* <code>string revision = 1;</code>
* @param value The bytes for revision to set.
* @return This builder for chaining.
*/
public Builder setRevisionBytes(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
revision_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
@Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@Override
public final Builder mergeUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:org.apache.dubbo.metadata.MetadataRequest)
}
// @@protoc_insertion_point(class_scope:org.apache.dubbo.metadata.MetadataRequest)
private static final MetadataRequest DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new MetadataRequest();
}
public static MetadataRequest getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<MetadataRequest> PARSER =
new com.google.protobuf.AbstractParser<MetadataRequest>() {
@Override
public MetadataRequest parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser<MetadataRequest> parser() {
return PARSER;
}
@Override
public com.google.protobuf.Parser<MetadataRequest> getParserForType() {
return PARSER;
}
@Override
public MetadataRequest getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
|
Builder
|
java
|
google__dagger
|
javatests/dagger/functional/producers/provisions/Provisions.java
|
{
"start": 1255,
"end": 1577
}
|
class ____ {
final Producer<InjectedClass> injectedClass1;
final Producer<InjectedClass> injectedClass2;
Output(Producer<InjectedClass> injectedClass1, Producer<InjectedClass> injectedClass2) {
this.injectedClass1 = injectedClass1;
this.injectedClass2 = injectedClass2;
}
}
@Qualifier @
|
Output
|
java
|
quarkusio__quarkus
|
extensions/reactive-oracle-client/deployment/src/test/java/io/quarkus/reactive/oracle/client/OraclePoolCreatorTest.java
|
{
"start": 269,
"end": 954
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(CustomCredentialsProvider.class)
.addClass(CredentialsTestResource.class)
.addClass(LocalhostOraclePoolCreator.class)
.addAsResource("application-credentials-with-erroneous-url.properties", "application.properties"));
@Test
public void testConnect() {
given()
.when().get("/test")
.then()
.statusCode(200)
.body(CoreMatchers.equalTo("OK"));
}
}
|
OraclePoolCreatorTest
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/main/java/io/quarkus/qute/deployment/TemplateDataBuilder.java
|
{
"start": 416,
"end": 3047
}
|
class ____ {
private final List<String> ignores;
private boolean ignoreSuperclasses;
private boolean properties;
private String namespace;
private AnnotationTarget annotationTarget;
public TemplateDataBuilder() {
ignores = new ArrayList<>();
ignoreSuperclasses = false;
properties = false;
namespace = TemplateData.UNDERSCORED_FQCN;
}
/**
*
* @see TemplateData#ignore()
* @return self
*/
public TemplateDataBuilder addIgnore(String value) {
ignores.add(value);
return this;
}
/**
*
* @see TemplateData#ignoreSuperclasses()
* @return self
*/
public TemplateDataBuilder ignoreSuperclasses(boolean value) {
ignoreSuperclasses = value;
return this;
}
public TemplateDataBuilder properties(boolean value) {
properties = value;
return this;
}
public TemplateDataBuilder namespace(String value) {
namespace = Objects.requireNonNull(value);
return this;
}
public TemplateDataBuilder annotationTarget(AnnotationTarget value) {
annotationTarget = Objects.requireNonNull(value);
return this;
}
public AnnotationInstance build() {
AnnotationValue ignoreValue;
if (ignores.isEmpty()) {
ignoreValue = AnnotationValue.createArrayValue(ValueResolverGenerator.IGNORE, new AnnotationValue[] {});
} else {
AnnotationValue[] values = new AnnotationValue[ignores.size()];
for (int i = 0; i < ignores.size(); i++) {
values[i] = AnnotationValue.createStringValue(ValueResolverGenerator.IGNORE + i, ignores.get(i));
}
ignoreValue = AnnotationValue.createArrayValue(ValueResolverGenerator.IGNORE, values);
}
AnnotationValue propertiesValue = AnnotationValue.createBooleanValue(ValueResolverGenerator.PROPERTIES, properties);
AnnotationValue ignoreSuperclassesValue = AnnotationValue.createBooleanValue(ValueResolverGenerator.IGNORE_SUPERCLASSES,
ignoreSuperclasses);
AnnotationValue namespaceValue = AnnotationValue.createStringValue("namespace", namespace);
AnnotationValue targetValue = AnnotationValue.createClassValue("target",
Type.create(ValueResolverGenerator.TEMPLATE_DATA, Kind.CLASS));
return AnnotationInstance.create(ValueResolverGenerator.TEMPLATE_DATA, annotationTarget,
new AnnotationValue[] { targetValue, ignoreValue, propertiesValue, ignoreSuperclassesValue, namespaceValue });
}
}
|
TemplateDataBuilder
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/core/ReactiveAdapterRegistry.java
|
{
"start": 12563,
"end": 13393
}
|
class ____ {
@SuppressWarnings("KotlinInternalInJava")
void registerAdapters(ReactiveAdapterRegistry registry) {
registry.registerReactiveType(
ReactiveTypeDescriptor.singleOptionalValue(kotlinx.coroutines.Deferred.class,
() -> kotlinx.coroutines.CompletableDeferredKt.CompletableDeferred(null)),
source -> CoroutinesUtils.deferredToMono((kotlinx.coroutines.Deferred<?>) source),
source -> CoroutinesUtils.monoToDeferred(Mono.from(source)));
registry.registerReactiveType(
ReactiveTypeDescriptor.multiValue(kotlinx.coroutines.flow.Flow.class, kotlinx.coroutines.flow.FlowKt::emptyFlow),
source -> kotlinx.coroutines.reactor.ReactorFlowKt.asFlux((kotlinx.coroutines.flow.Flow<?>) source),
kotlinx.coroutines.reactive.ReactiveFlowKt::asFlow);
}
}
private static
|
CoroutinesRegistrar
|
java
|
alibaba__fastjson
|
src/test/java/data/media/MediaContentDeserializer.java
|
{
"start": 277,
"end": 1372
}
|
class ____ implements ObjectDeserializer {
private ObjectDeserializer mediaDeserializer;
private ObjectDeserializer imageDeserializer;
private final char[] mediaPrefix = "\"media\":".toCharArray();
private final char[] imagePrefix = "\"images\":".toCharArray();
public <T> T deserialze(DefaultJSONParser parser, Type clazz, Object fieldName) {
final JSONScanner lexer = (JSONScanner) parser.getLexer();
MediaContent object = new MediaContent();
lexer.matchField(mediaPrefix);
if (mediaDeserializer == null) {
//mediaDeserializer = parser.getMapping().getDeserializer(ObjectDeserializer.class);
}
mediaDeserializer.deserialze(parser, clazz, null);
lexer.matchField(imagePrefix);
imageDeserializer.deserialze(parser, clazz, null);
// if (lexer.token() != JSONToken.RBRACE)
// TODO Auto-generated method stub
return null;
}
public int getFastMatchToken() {
return JSONToken.LBRACE;
}
}
|
MediaContentDeserializer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.