language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_2300/Issue2397.java | {
"start": 238,
"end": 574
} | class ____ extends TestCase {
public void test_for_bug(){
String jsonStr = "{\"items\":[{\"id\":1,\"name\":\"kata\"}]}";
TestReply testReply = JSON.parseObject(jsonStr, new TypeReference<TestReply>() {
});
Assert.assertEquals(testReply.getItems().get(0).getId() , 1);
}
public static | Issue2397 |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/lifecycle/VolumeImpl.java | {
"start": 2521,
"end": 2748
} | class ____ the volume states and state transition
* according to the CSI volume lifecycle. Volume states are stored in
* {@link org.apache.hadoop.yarn.server.resourcemanager.volume.csi.VolumeStates}
* class.
*/
public | maintains |
java | apache__camel | components/camel-mybatis/src/test/java/org/apache/camel/component/mybatis/MyBatisSelectOneTest.java | {
"start": 1052,
"end": 2323
} | class ____ extends MyBatisTestSupport {
@Test
public void testSelectOne() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.message(0).body().isInstanceOf(Account.class);
template.sendBody("direct:start", 456);
MockEndpoint.assertIsSatisfied(context);
Account account = mock.getReceivedExchanges().get(0).getIn().getBody(Account.class);
assertEquals("Claus", account.getFirstName());
}
@Test
public void tesSelectOneNotFound() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.message(0).body().isNull();
template.sendBody("direct:start", 999);
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// START SNIPPET: e1
from("direct:start")
.to("mybatis:selectAccountById?statementType=SelectOne")
.to("mock:result");
// END SNIPPET: e1
}
};
}
}
| MyBatisSelectOneTest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/ResourceManagerTaskExecutorTest.java | {
"start": 3000,
"end": 16678
} | class ____ {
private static final Duration TIMEOUT = TestingUtils.infiniteTime();
private static final ResourceProfile DEFAULT_SLOT_PROFILE =
ResourceProfile.fromResources(1.0, 1234);
@RegisterExtension
static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_EXTENSION =
TestingUtils.defaultExecutorExtension();
private static TestingRpcService rpcService;
private TestingTaskExecutorGateway taskExecutorGateway;
private final int dataPort = 1234;
private final int jmxPort = 23456;
private final HardwareDescription hardwareDescription = new HardwareDescription(1, 2L, 3L, 4L);
private ResourceID taskExecutorResourceID;
private TestingResourceManagerService rmService;
private ResourceManagerGateway rmGateway;
private ResourceManagerGateway wronglyFencedGateway;
@BeforeAll
static void setupClass() {
rpcService = new TestingRpcService();
}
@BeforeEach
void setup() throws Exception {
rpcService = new TestingRpcService();
createAndRegisterTaskExecutorGateway();
taskExecutorResourceID = ResourceID.generate();
createAndStartResourceManager();
wronglyFencedGateway =
rpcService
.connect(
rmGateway.getAddress(),
ResourceManagerId.generate(),
ResourceManagerGateway.class)
.get();
}
private void createAndRegisterTaskExecutorGateway() {
taskExecutorGateway =
new TestingTaskExecutorGatewayBuilder().createTestingTaskExecutorGateway();
rpcService.registerGateway(taskExecutorGateway.getAddress(), taskExecutorGateway);
}
private void createAndStartResourceManager() throws Exception {
final TestingLeaderElection leaderElection = new TestingLeaderElection();
rmService =
TestingResourceManagerService.newBuilder()
.setRpcService(rpcService)
.setRmLeaderElection(leaderElection)
.build();
rmService.start();
rmService.isLeader(UUID.randomUUID()).join();
rmGateway =
rmService
.getResourceManagerGateway()
.orElseThrow(
() ->
new AssertionError(
"RM not available after confirming leadership."));
}
@AfterEach
void teardown() throws Exception {
if (rmService != null) {
rmService.rethrowFatalErrorIfAny();
rmService.cleanUp();
}
}
@AfterAll
static void teardownClass() throws Exception {
if (rpcService != null) {
RpcUtils.terminateRpcService(rpcService);
}
}
/**
* Test receive normal registration from task executor and receive duplicate registration from
* task executor.
*/
@Test
void testRegisterTaskExecutor() throws Exception {
// test response successful
CompletableFuture<RegistrationResponse> successfulFuture =
registerTaskExecutor(rmGateway, taskExecutorGateway.getAddress());
RegistrationResponse response = successfulFuture.get();
assertThat(response).isInstanceOf(TaskExecutorRegistrationSuccess.class);
final TaskManagerInfoWithSlots taskManagerInfoWithSlots =
rmGateway.requestTaskManagerDetailsInfo(taskExecutorResourceID, TIMEOUT).get();
assertThat(taskManagerInfoWithSlots.getTaskManagerInfo().getResourceId())
.isEqualTo(taskExecutorResourceID);
// test response successful with instanceID not equal to previous when receive duplicate
// registration from taskExecutor
CompletableFuture<RegistrationResponse> duplicateFuture =
registerTaskExecutor(rmGateway, taskExecutorGateway.getAddress());
RegistrationResponse duplicateResponse = duplicateFuture.get();
assertThat(duplicateResponse).isInstanceOf(TaskExecutorRegistrationSuccess.class);
assertThat(((TaskExecutorRegistrationSuccess) response).getRegistrationId())
.isNotEqualTo(
((TaskExecutorRegistrationSuccess) duplicateResponse).getRegistrationId());
assertThat(rmGateway.requestResourceOverview(TIMEOUT).get().getNumberTaskManagers())
.isOne();
}
/**
* Test delayed registration of task executor where the delay is introduced during connection
* from resource manager to the registering task executor.
*/
@Test
void testDelayedRegisterTaskExecutor() throws Exception {
final Duration fastTimeout = Duration.ofMillis(1L);
try {
final OneShotLatch startConnection = new OneShotLatch();
final OneShotLatch finishConnection = new OneShotLatch();
// first registration is with blocking connection
rpcService.setRpcGatewayFutureFunction(
rpcGateway ->
CompletableFuture.supplyAsync(
() -> {
startConnection.trigger();
try {
finishConnection.await();
} catch (InterruptedException ignored) {
}
return rpcGateway;
},
EXECUTOR_EXTENSION.getExecutor()));
TaskExecutorRegistration taskExecutorRegistration =
new TaskExecutorRegistration(
taskExecutorGateway.getAddress(),
taskExecutorResourceID,
dataPort,
jmxPort,
hardwareDescription,
new TaskExecutorMemoryConfiguration(
1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L),
DEFAULT_SLOT_PROFILE,
DEFAULT_SLOT_PROFILE,
taskExecutorGateway.getAddress());
CompletableFuture<RegistrationResponse> firstFuture =
rmGateway.registerTaskExecutor(taskExecutorRegistration, fastTimeout);
assertThatFuture(firstFuture)
.as(
"Should have failed because connection to taskmanager is delayed beyond timeout")
.eventuallyFails()
.withThrowableOfType(Exception.class)
.withCauseInstanceOf(TimeoutException.class)
.withMessageContaining("ResourceManagerGateway.registerTaskExecutor");
startConnection.await();
// second registration after timeout is with no delay, expecting it to be succeeded
rpcService.resetRpcGatewayFutureFunction();
CompletableFuture<RegistrationResponse> secondFuture =
rmGateway.registerTaskExecutor(taskExecutorRegistration, TIMEOUT);
RegistrationResponse response = secondFuture.get();
assertThat(response).isInstanceOf(TaskExecutorRegistrationSuccess.class);
// on success, send slot report for taskmanager registration
final SlotReport slotReport =
new SlotReport(
new SlotStatus(
new SlotID(taskExecutorResourceID, 0), ResourceProfile.ANY));
rmGateway
.sendSlotReport(
taskExecutorResourceID,
((TaskExecutorRegistrationSuccess) response).getRegistrationId(),
slotReport,
TIMEOUT)
.get();
// let the remaining part of the first registration proceed
finishConnection.trigger();
Thread.sleep(1L);
// verify that the latest registration is valid not being unregistered by the delayed
// one
final TaskManagerInfoWithSlots taskManagerInfoWithSlots =
rmGateway.requestTaskManagerDetailsInfo(taskExecutorResourceID, TIMEOUT).get();
assertThat(taskManagerInfoWithSlots.getTaskManagerInfo().getResourceId())
.isEqualTo(taskExecutorResourceID);
assertThat(taskManagerInfoWithSlots.getTaskManagerInfo().getNumberSlots()).isOne();
} finally {
rpcService.resetRpcGatewayFutureFunction();
}
}
/** Tests that a TaskExecutor can disconnect from the {@link ResourceManager}. */
@Test
void testDisconnectTaskExecutor() throws Exception {
final int numberSlots = 10;
final TaskExecutorRegistration taskExecutorRegistration =
new TaskExecutorRegistration(
taskExecutorGateway.getAddress(),
taskExecutorResourceID,
dataPort,
jmxPort,
hardwareDescription,
new TaskExecutorMemoryConfiguration(
1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L),
DEFAULT_SLOT_PROFILE,
DEFAULT_SLOT_PROFILE.multiply(numberSlots),
taskExecutorGateway.getAddress());
final RegistrationResponse registrationResponse =
rmGateway.registerTaskExecutor(taskExecutorRegistration, TIMEOUT).get();
assertThat(registrationResponse).isInstanceOf(TaskExecutorRegistrationSuccess.class);
final InstanceID registrationId =
((TaskExecutorRegistrationSuccess) registrationResponse).getRegistrationId();
final Collection<SlotStatus> slots = createSlots(numberSlots);
final SlotReport slotReport = new SlotReport(slots);
rmGateway.sendSlotReport(taskExecutorResourceID, registrationId, slotReport, TIMEOUT).get();
final ResourceOverview resourceOverview = rmGateway.requestResourceOverview(TIMEOUT).get();
assertThat(resourceOverview.getNumberTaskManagers()).isOne();
assertThat(resourceOverview.getNumberRegisteredSlots()).isEqualTo(numberSlots);
rmGateway.disconnectTaskManager(
taskExecutorResourceID, new FlinkException("testDisconnectTaskExecutor"));
final ResourceOverview afterDisconnectResourceOverview =
rmGateway.requestResourceOverview(TIMEOUT).get();
assertThat(afterDisconnectResourceOverview.getNumberTaskManagers()).isZero();
assertThat(afterDisconnectResourceOverview.getNumberRegisteredSlots()).isZero();
}
private Collection<SlotStatus> createSlots(int numberSlots) {
return IntStream.range(0, numberSlots)
.mapToObj(
index ->
new SlotStatus(
new SlotID(taskExecutorResourceID, index),
ResourceProfile.ANY))
.collect(Collectors.toList());
}
/** Test receive registration with unmatched leadershipId from task executor. */
@Test
void testRegisterTaskExecutorWithUnmatchedLeaderSessionId() {
// test throw exception when receive a registration from taskExecutor which takes unmatched
// leaderSessionId
CompletableFuture<RegistrationResponse> unMatchedLeaderFuture =
registerTaskExecutor(wronglyFencedGateway, taskExecutorGateway.getAddress());
assertThatFuture(unMatchedLeaderFuture)
.withFailMessage(
"Should have failed because we are using a wrongly fenced ResourceManagerGateway.")
.eventuallyFails()
.withThrowableOfType(ExecutionException.class)
.withCauseInstanceOf(FencingTokenException.class);
}
/** Test receive registration with invalid address from task executor. */
@Test
void testRegisterTaskExecutorFromInvalidAddress() {
// test throw exception when receive a registration from taskExecutor which takes invalid
// address
String invalidAddress = "/taskExecutor2";
CompletableFuture<RegistrationResponse> invalidAddressFuture =
registerTaskExecutor(rmGateway, invalidAddress);
assertThatFuture(invalidAddressFuture)
.eventuallySucceeds()
.isInstanceOf(RegistrationResponse.Failure.class);
}
private CompletableFuture<RegistrationResponse> registerTaskExecutor(
ResourceManagerGateway resourceManagerGateway, String taskExecutorAddress) {
return resourceManagerGateway.registerTaskExecutor(
new TaskExecutorRegistration(
taskExecutorAddress,
taskExecutorResourceID,
dataPort,
jmxPort,
hardwareDescription,
new TaskExecutorMemoryConfiguration(
1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L),
DEFAULT_SLOT_PROFILE,
DEFAULT_SLOT_PROFILE,
taskExecutorAddress),
TIMEOUT);
}
}
| ResourceManagerTaskExecutorTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/MessageHistoryCopyMessageTest.java | {
"start": 1195,
"end": 3196
} | class ____ extends ContextTestSupport {
@Test
public void testCopyMessage() throws Exception {
getMockEndpoint("mock:a").expectedMessageCount(1);
getMockEndpoint("mock:b").expectedMessageCount(1);
getMockEndpoint("mock:bar").expectedMessageCount(1);
Exchange out = template.request("direct:start", e -> {
e.getMessage().setBody("Hello World");
});
assertMockEndpointsSatisfied();
// only the step eips are in the history
List<MessageHistory> history = out.getProperty(Exchange.MESSAGE_HISTORY, List.class);
assertNotNull(history);
assertEquals(3, history.size());
assertEquals("step", history.get(0).getNode().getShortName());
assertEquals("a", history.get(0).getNode().getId());
assertEquals("Hello World", history.get(0).getMessage().getBody());
assertEquals("step", history.get(1).getNode().getShortName());
assertEquals("b", history.get(1).getNode().getId());
assertEquals("Bye World", history.get(1).getMessage().getBody());
assertEquals("step", history.get(2).getNode().getShortName());
assertEquals("bar", history.get(2).getNode().getId());
assertEquals("Hi World", history.get(2).getMessage().getBody());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
context.setMessageHistory(true);
context.getMessageHistoryFactory().setNodePattern("step");
context.getMessageHistoryFactory().setCopyMessage(true);
from("direct:start").step("a").transform().constant("Bye World").to("mock:a").end().step("b").transform()
.constant("Hi World").to("direct:bar").to("mock:b").end();
from("direct:bar").step("bar").to("log:bar").to("mock:bar").end();
}
};
}
}
| MessageHistoryCopyMessageTest |
java | spring-projects__spring-framework | spring-expression/src/main/java/org/springframework/expression/spel/CompiledExpression.java | {
"start": 942,
"end": 1106
} | class ____ implement the {@link #getValue} method. It is not intended
* to be subclassed by user code.
*
* @author Andy Clement
* @since 4.1
*/
public abstract | and |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/api/iterable/ThrowingExtractor.java | {
"start": 1299,
"end": 1665
} | interface ____<F, T, EXCEPTION extends Exception> extends Function<F, T> {
@Override
default T apply(final F input) {
try {
return extractThrows(input);
} catch (final RuntimeException e) {
throw e;
} catch (final Exception e) {
throw new RuntimeException(e);
}
}
T extractThrows(F input) throws EXCEPTION;
}
| ThrowingExtractor |
java | alibaba__nacos | naming/src/test/java/com/alibaba/nacos/naming/push/v2/executor/PushExecutorDelegateTest.java | {
"start": 1267,
"end": 2958
} | class ____ {
private final String udpClientId = "1.1.1.1:60000#true";
private final String rpcClientId = UUID.randomUUID().toString();
@Mock
private PushExecutorRpcImpl pushExecutorRpc;
@Mock
private PushExecutorUdpImpl pushExecutorUdp;
@Mock
private Subscriber subscriber;
@Mock
private NamingPushCallback pushCallBack;
private PushDataWrapper pushdata;
private PushExecutorDelegate delegate;
private ServiceMetadata serviceMetadata;
@BeforeEach
void setUp() throws Exception {
serviceMetadata = new ServiceMetadata();
pushdata = new PushDataWrapper(serviceMetadata, new ServiceInfo("G@@S"));
delegate = new PushExecutorDelegate(pushExecutorRpc, pushExecutorUdp);
}
@Test
void testDoPushForUdp() {
delegate.doPush(udpClientId, subscriber, pushdata);
verify(pushExecutorUdp).doPush(udpClientId, subscriber, pushdata);
}
@Test
void testDoPushForRpc() {
delegate.doPush(rpcClientId, subscriber, pushdata);
verify(pushExecutorRpc).doPush(rpcClientId, subscriber, pushdata);
}
@Test
void doPushWithCallbackForUdp() {
delegate.doPushWithCallback(udpClientId, subscriber, pushdata, pushCallBack);
verify(pushExecutorUdp).doPushWithCallback(udpClientId, subscriber, pushdata, pushCallBack);
}
@Test
void doPushWithCallbackForRpc() {
delegate.doPushWithCallback(rpcClientId, subscriber, pushdata, pushCallBack);
verify(pushExecutorRpc).doPushWithCallback(rpcClientId, subscriber, pushdata, pushCallBack);
}
}
| PushExecutorDelegateTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/JSONReaderScannerTest_error4.java | {
"start": 1055,
"end": 1406
} | class ____ extends StringReader {
public MyReader(String s){
super(s);
}
public int read(char cbuf[], int off, int len) throws IOException {
int x = super.read(cbuf, off, len);
if (x < 0) {
throw new IOException();
}
return x;
}
}
}
| MyReader |
java | google__guava | android/guava-tests/benchmark/com/google/common/base/EnumsBenchmark.java | {
"start": 970,
"end": 2436
} | class ____ {
@Param({"Small", "Medium", "Large"})
String enumSize;
@Param({"0.2", "0.8"})
float hitRate;
// We could avoid the raw type here by initializing this with a ternary (? SmallEnum.class : ...).
// However, we end up needing a raw type in getIfPresent, as discussed there.
@SuppressWarnings("rawtypes")
private Class<? extends Enum> enumType;
private String[] sampleData;
@BeforeExperiment
void setUp() throws ClassNotFoundException {
Preconditions.checkArgument(hitRate >= 0 && hitRate <= 1, "hitRate must be in the range [0,1]");
enumType =
Class.forName(EnumsBenchmark.class.getCanonicalName() + "$" + enumSize + "Enum")
.asSubclass(Enum.class);
Enum<?>[] allConstants = enumType.getEnumConstants();
List<String> hits = new ArrayList<>();
for (int i = 0; i < hitRate * 256 / 3; ++i) {
hits.add(allConstants[0].name());
hits.add(allConstants[allConstants.length / 2].name());
hits.add(allConstants[allConstants.length - 1].name());
}
List<String> misses = new ArrayList<>();
for (int i = 0; i < 256 - hits.size(); ++i) {
misses.add("INVALID");
}
List<String> sampleDataList = new ArrayList<>();
sampleDataList.addAll(hits);
sampleDataList.addAll(misses);
Collections.shuffle(sampleDataList);
sampleData = sampleDataList.toArray(new String[sampleDataList.size()]);
}
// Since we can't pass a concrete SomeEnum. | EnumsBenchmark |
java | netty__netty | codec-base/src/main/java/io/netty/handler/codec/DefaultHeaders.java | {
"start": 2117,
"end": 2617
} | interface ____<K> {
/**
* Verify that {@code name} is valid.
* @param name The name to validate.
* @throws RuntimeException if {@code name} is not valid.
*/
void validateName(K name);
@SuppressWarnings("rawtypes")
NameValidator NOT_NULL = new NameValidator() {
@Override
public void validateName(Object name) {
checkNotNull(name, "name");
}
};
}
public | NameValidator |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/api/parallel/ResourceLocksProviderTests.java | {
"start": 8861,
"end": 9779
} | class ____ implements ResourceLocksProvider {
private static boolean isProvideForMethodCalled = false;
@Override
public Set<Lock> provideForClass(Class<?> testClass) {
fail("'provideForClass' should not be called");
return emptySet();
}
@Override
public Set<Lock> provideForNestedClass(List<Class<?>> enclosingInstanceTypes, Class<?> testClass) {
fail("'provideForNestedClass' should not be called");
return emptySet();
}
@Override
public Set<Lock> provideForMethod(List<Class<?>> enclosingInstanceTypes, Class<?> testClass,
Method testMethod) {
isProvideForMethodCalled = true;
assertEquals(List.of(MethodLevelProviderInNestedClassTestCase.class), enclosingInstanceTypes);
assertEquals(MethodLevelProviderInNestedClassTestCase.NestedClass.class, testClass);
assertEquals("nestedTest", testMethod.getName());
return emptySet();
}
}
}
}
| Provider |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1000/Issue1083.java | {
"start": 348,
"end": 653
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
Map map = new HashMap();
map.put("userId", 456);
String json = JSON.toJSONString(map, SerializerFeature.WriteNonStringValueAsString);
assertEquals("{\"userId\":\"456\"}", json);
}
}
| Issue1083 |
java | apache__camel | components/camel-olingo2/camel-olingo2-api/src/main/java/org/apache/camel/component/olingo2/api/batch/Olingo2BatchChangeRequest.java | {
"start": 1917,
"end": 3383
} | class ____ {
private Olingo2BatchChangeRequest request = new Olingo2BatchChangeRequest();
public Olingo2BatchChangeRequestBuilder resourcePath(String resourcePath) {
request.resourcePath = resourcePath;
return this;
}
public Olingo2BatchChangeRequestBuilder headers(Map<String, String> headers) {
request.headers = headers;
return this;
}
public Olingo2BatchChangeRequestBuilder contentId(String contentId) {
request.contentId = contentId;
return this;
}
public Olingo2BatchChangeRequestBuilder operation(Operation operation) {
request.operation = operation;
return this;
}
public Olingo2BatchChangeRequestBuilder body(Object body) {
request.body = body;
return this;
}
public Olingo2BatchChangeRequest build() {
// avoid later NPEs
if (request.resourcePath == null) {
throw new IllegalArgumentException("Null resourcePath");
}
if (request.operation == null) {
throw new IllegalArgumentException("Null operation");
}
if (request.operation != Operation.DELETE && request.body == null) {
throw new IllegalArgumentException("Null body");
}
return request;
}
}
}
| Olingo2BatchChangeRequestBuilder |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/xml/AbstractSingleBeanDefinitionParser.java | {
"start": 5116,
"end": 6320
} | class ____ of the bean that is being defined via parsing
* the supplied {@code Element}, or {@code null} if none
* @see #getBeanClass
*/
protected @Nullable String getBeanClassName(Element element) {
return null;
}
/**
* Parse the supplied {@link Element} and populate the supplied
* {@link BeanDefinitionBuilder} as required.
* <p>The default implementation delegates to the {@code doParse}
* version without ParserContext argument.
* @param element the XML element being parsed
* @param parserContext the object encapsulating the current state of the parsing process
* @param builder used to define the {@code BeanDefinition}
* @see #doParse(Element, BeanDefinitionBuilder)
*/
protected void doParse(Element element, ParserContext parserContext, BeanDefinitionBuilder builder) {
doParse(element, builder);
}
/**
* Parse the supplied {@link Element} and populate the supplied
* {@link BeanDefinitionBuilder} as required.
* <p>The default implementation does nothing.
* @param element the XML element being parsed
* @param builder used to define the {@code BeanDefinition}
*/
protected void doParse(Element element, BeanDefinitionBuilder builder) {
}
}
| name |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TimeSyncConfigTests.java | {
"start": 615,
"end": 1663
} | class ____ extends AbstractXContentSerializingTestCase<TimeSyncConfig> {
public static TimeSyncConfig randomTimeSyncConfig() {
return new TimeSyncConfig(randomAlphaOfLengthBetween(1, 10), new TimeValue(randomNonNegativeLong()));
}
@Override
protected TimeSyncConfig doParseInstance(XContentParser parser) throws IOException {
return TimeSyncConfig.fromXContent(parser, false);
}
@Override
protected TimeSyncConfig createTestInstance() {
return randomTimeSyncConfig();
}
@Override
protected TimeSyncConfig mutateInstance(TimeSyncConfig instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Reader<TimeSyncConfig> instanceReader() {
return TimeSyncConfig::new;
}
public void testDefaultDelay() {
TimeSyncConfig config = new TimeSyncConfig(randomAlphaOfLength(10), null);
assertThat(config.getDelay(), equalTo(TimeSyncConfig.DEFAULT_DELAY));
}
}
| TimeSyncConfigTests |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/chain/TestChainErrors.java | {
"start": 7456,
"end": 7712
} | class ____ extends
Reducer<LongWritable, Text, LongWritable, Text> {
public void reduce(LongWritable key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
throw new IOException();
}
}
}
| FailReduce |
java | apache__camel | components/camel-box/camel-box-api/src/main/java/org/apache/camel/component/box/api/BoxFoldersManager.java | {
"start": 1521,
"end": 16286
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(BoxFoldersManager.class);
/**
* Box connection to authenticated user account.
*/
private BoxAPIConnection boxConnection;
/**
* Create folder manager to manage folders of Box connection's authenticated user.
*
* @param boxConnection - Box connection to authenticated user account.
*/
public BoxFoldersManager(BoxAPIConnection boxConnection) {
this.boxConnection = boxConnection;
}
/**
* Return the root folder of authenticated user.
*
* @return The root folder of authenticated user.
*/
public BoxFolder getRootFolder() {
try {
LOG.debug("Getting root folder");
return BoxFolder.getRootFolder(boxConnection);
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
/**
* Return the Box folder referenced by <code>path</code>.
*
* @param path - Sequence of Box folder names from root folder to returned folder.
*
* @return The Box folder referenced by <code>path</code> or <code>null</code> if folder is not found.
*/
public BoxFolder getFolder(String... path) {
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Getting folder at path={}", Arrays.toString(path));
}
BoxFolder folder = BoxFolder.getRootFolder(boxConnection);
if (path == null || path.length == 0) {
// Return root folder if path is null or empty.
return folder;
}
searchPath: for (int folderIndex = 0; folderIndex < path.length; folderIndex++) {
for (BoxItem.Info itemInfo : folder) {
if (itemInfo instanceof BoxFolder.Info && itemInfo.getName().equals(path[folderIndex])) {
folder = (BoxFolder) itemInfo.getResource();
continue searchPath;
}
}
// Failed to find named folder in path: return null
return null;
}
return folder;
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
/**
* Returns a specific range of child items in folder and specifies which fields of each item to retrieve.
*
* @param folderId - the id of folder.
* @param offset - the index of first child item to retrieve; if <code>null</code> all child items are retrieved.
* @param limit - the maximum number of children to retrieve after the offset; if <code>null</code> all child
* items are retrieved.
* @param fields - the item fields to retrieve for each child item; if <code>null</code> all item fields are
* retrieved.
* @return The Items in folder
*/
public Collection<BoxItem.Info> getFolderItems(String folderId, Long offset, Long limit, String... fields) {
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Getting folder items in folder(id={}) at offset={} and limit={} with fields={}",
folderId, offset, limit, Arrays.toString(fields));
}
BoxHelper.notNull(folderId, FOLDER_ID);
BoxFolder folder = new BoxFolder(boxConnection, folderId);
if (fields == null) {
fields = new String[0];
}
if (offset != null && limit != null) {
return folder.getChildrenRange(offset, limit, fields);
} else {
Collection<BoxItem.Info> folderItems = new ArrayList<>();
Iterable<BoxItem.Info> iterable;
if (fields.length > 0) {
iterable = folder.getChildren(fields);
} else {
iterable = folder.getChildren();
}
for (BoxItem.Info itemInfo : iterable) {
folderItems.add(itemInfo);
}
return folderItems;
}
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
/**
* Create a folder in parent folder with given <code>parentFolderId</code>.
*
* @param parentFolderId - the id of parent folder.
* @param folderName the name of created folder.
* @return The created folder.
*/
public BoxFolder createFolder(String parentFolderId, String folderName) {
try {
LOG.debug("Creating folder with name '{}' in parent_folder(id={})", folderName, parentFolderId);
BoxHelper.notNull(parentFolderId, BoxHelper.PARENT_FOLDER_ID);
BoxHelper.notNull(folderName, BoxHelper.FOLDER_NAME);
BoxFolder parentFolder = new BoxFolder(boxConnection, parentFolderId);
return parentFolder.createFolder(folderName).getResource();
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
/**
* Create a folder specified by path from parent folder with given <code>parentFolderId</code>, creating
* intermediate directories as required.
*
* @param parentFolderId - the id of parent folder.
* @param path - Sequence of Box folder names from parent folder to returned folder.
* @return The last folder in path, no fault will be thrown if it already exists.
*/
public BoxFolder createFolder(String parentFolderId, String... path) {
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Creating folder with path '{}' in parent_folder(id={})", Arrays.toString(path), parentFolderId);
}
BoxHelper.notNull(parentFolderId, BoxHelper.PARENT_FOLDER_ID);
BoxHelper.notNull(path, BoxHelper.PATH);
BoxFolder folder = new BoxFolder(boxConnection, parentFolderId);
searchPath: for (int folderIndex = 0; folderIndex < path.length; folderIndex++) {
for (BoxItem.Info itemInfo : folder) {
if (itemInfo instanceof BoxFolder.Info && itemInfo.getName().equals(path[folderIndex])) {
folder = (BoxFolder) itemInfo.getResource();
continue searchPath;
}
}
folder = folder.createFolder(path[folderIndex]).getResource();
}
return folder;
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
/**
* Copy folder to destination folder while optionally giving it a new name.
*
* @param folderId - the id of folder to copy.
* @param destinationFolderId - the id of the destination folder.
* @param newName - the new name for copied folder; if <code>newName</code> is <code>null</code>, the
* copied folder has same name as the original.
* @return The copied folder.
*/
public BoxFolder copyFolder(String folderId, String destinationFolderId, String newName) {
try {
LOG.debug("Copying folder(id={}) to destination_folder(id={}) {}",
folderId, destinationFolderId, newName == null ? "" : " with new name '" + newName + "'");
BoxHelper.notNull(folderId, FOLDER_ID);
BoxHelper.notNull(destinationFolderId, BoxHelper.DESTINATION_FOLDER_ID);
BoxFolder folderToCopy = new BoxFolder(boxConnection, folderId);
BoxFolder destinationFolder = new BoxFolder(boxConnection, destinationFolderId);
if (newName == null) {
return folderToCopy.copy(destinationFolder).getResource();
} else {
return folderToCopy.copy(destinationFolder, newName).getResource();
}
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
/**
* Move folder to destination folder while optionally giving it a new name.
*
* @param folderId - the id of folder to move.
* @param destinationFolderId - the id of the destination folder.
* @param newName - the new name of moved folder; if <code>newName</code> is <code>null</code>, the
* moved folder has same name as the original.
* @return The moved folder.
*/
public BoxFolder moveFolder(String folderId, String destinationFolderId, String newName) {
try {
LOG.debug("Moving folder(id={}) to destination_folder(id={}) {}",
folderId, destinationFolderId, newName == null ? "" : " with new name '" + newName + "'");
BoxHelper.notNull(folderId, FOLDER_ID);
BoxHelper.notNull(destinationFolderId, BoxHelper.DESTINATION_FOLDER_ID);
BoxFolder folderToMove = new BoxFolder(boxConnection, folderId);
BoxFolder destinationFolder = new BoxFolder(boxConnection, destinationFolderId);
if (newName == null) {
return (BoxFolder) folderToMove.move(destinationFolder).getResource();
} else {
return (BoxFolder) folderToMove.move(destinationFolder, newName).getResource();
}
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
/**
* Rename folder giving it the name <code>newName</code>
*
* @param folderId - the id of folder to rename.
* @param newFolderName - the new name of folder.
* @return The renamed folder.
*/
public BoxFolder renameFolder(String folderId, String newFolderName) {
try {
LOG.debug("Renaming folder(id={}}) to '{}'", folderId, newFolderName);
BoxHelper.notNull(folderId, FOLDER_ID);
BoxHelper.notNull(newFolderName, BoxHelper.NEW_FOLDER_NAME);
BoxFolder folderToRename = new BoxFolder(boxConnection, folderId);
folderToRename.rename(newFolderName);
return folderToRename;
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
/**
* Delete folder.
*
* @param folderId - the id of folder to delete.
*/
public void deleteFolder(String folderId) {
try {
LOG.debug("Deleting folder(id={})", folderId);
BoxHelper.notNull(folderId, FOLDER_ID);
BoxFolder folder = new BoxFolder(boxConnection, folderId);
folder.delete(true);
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
/**
* Get folder information.
*
* @param folderId - the id of folder.
* @param fields - the information fields to retrieve; if <code>null</code> all information fields are retrieved.
* @return The folder information.
*/
public BoxFolder.Info getFolderInfo(String folderId, String... fields) {
try {
LOG.debug("Getting info for folder(id={})", folderId);
BoxHelper.notNull(folderId, FOLDER_ID);
BoxFolder folder = new BoxFolder(boxConnection, folderId);
if (fields == null || fields.length == 0) {
return folder.getInfo();
} else {
return folder.getInfo(fields);
}
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
/**
* Update folder information.
*
* @param folderId - the id of folder to update.
* @param info - the updated information
* @return The updated folder.
*/
public BoxFolder updateFolderInfo(String folderId, BoxFolder.Info info) {
try {
LOG.debug("Updating info for folder(id={})", folderId);
BoxHelper.notNull(folderId, FOLDER_ID);
BoxHelper.notNull(info, "info");
BoxFolder folder = new BoxFolder(boxConnection, folderId);
folder.updateInfo(info);
return folder;
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
/**
* Create a shared link to folder.
*
* @param folderId - the id of folder to create shared link on.
* @param access - the access level of the shared link.
* @param unshareDate - the date and time at which time the created shared link will expire; if
* <code>unsharedDate</code> is <code>null</code> then a non-expiring link is created.
* @param permissions - the permissions of the created link; if <code>permissions</code> is <code>null</code> then
* the created shared link is create with default permissions.
* @return The created shared link.
*/
public BoxSharedLink createFolderSharedLink(
String folderId, BoxSharedLink.Access access, Date unshareDate,
BoxSharedLink.Permissions permissions) {
try {
LOG.debug("Creating shared link for folder(id={}) with access={} {}",
folderId, access, unshareDate == null
? ""
: " unsharedDate=" + DateFormat.getDateTimeInstance().format(unshareDate)
+ " permissions=" + permissions);
BoxHelper.notNull(folderId, FOLDER_ID);
BoxHelper.notNull(access, BoxHelper.ACCESS);
BoxFolder folder = new BoxFolder(boxConnection, folderId);
BoxSharedLinkRequest request = new BoxSharedLinkRequest();
request.access(access).unsharedDate(unshareDate)
.permissions(permissions.getCanDownload(), permissions.getCanPreview(), permissions.getCanEdit());
return folder.createSharedLink(request);
} catch (BoxAPIException e) {
throw new RuntimeCamelException(
buildBoxApiErrorMessage(e), e);
}
}
}
| BoxFoldersManager |
java | google__auto | common/src/main/java/com/google/auto/common/MoreElements.java | {
"start": 16372,
"end": 19658
} | class ____ interface
* that has more than one superinterface, the interfaces are in the order of their appearance in
* {@code implements} or {@code extends}.
*
* @param type the type whose own and inherited methods are to be returned
* @param typeUtils a {@link Types} object, typically returned by {@link
* javax.annotation.processing.AbstractProcessor#processingEnv processingEnv}.{@link
* javax.annotation.processing.ProcessingEnvironment#getTypeUtils getTypeUtils()}
* @param elementUtils an {@link Elements} object, typically returned by {@link
* javax.annotation.processing.AbstractProcessor#processingEnv processingEnv}.{@link
* javax.annotation.processing.ProcessingEnvironment#getElementUtils getElementUtils()}
*/
public static ImmutableSet<ExecutableElement> getLocalAndInheritedMethods(
TypeElement type, Types typeUtils, Elements elementUtils) {
return getLocalAndInheritedMethods(type, new ExplicitOverrides(typeUtils));
}
private static ImmutableSet<ExecutableElement> getLocalAndInheritedMethods(
TypeElement type, Overrides overrides) {
PackageElement pkg = getPackage(type);
ImmutableSet.Builder<ExecutableElement> methods = ImmutableSet.builder();
for (ExecutableElement method : getAllMethods(type, overrides)) {
// Filter out all static and non-visible methods.
if (!method.getModifiers().contains(STATIC) && methodVisibleFromPackage(method, pkg)) {
methods.add(method);
}
}
return methods.build();
}
/**
* Tests whether one method, as a member of a given type, overrides another method.
*
* <p>This method does the same thing as {@link Elements#overrides(ExecutableElement,
* ExecutableElement, TypeElement)}, but in a way that is more consistent between compilers, in
* particular between javac and ecj (the Eclipse compiler).
*
* @param overrider the first method, possible overrider
* @param overridden the second method, possibly being overridden
* @param type the type of which the first method is a member
* @return {@code true} if and only if the first method overrides the second
*/
public static boolean overrides(
ExecutableElement overrider,
ExecutableElement overridden,
TypeElement type,
Types typeUtils) {
return new ExplicitOverrides(typeUtils).overrides(overrider, overridden, type);
}
/**
* Returns the set of all methods from {@code type}, including methods that it inherits from its
* ancestors. Inherited methods that are overridden are not included in the result. So if {@code
* type} defines {@code public String toString()}, the returned set will contain that method, but
* not the {@code toString()} method defined by {@code Object}.
*
* <p>The returned set may contain more than one method with the same signature, if {@code type}
* inherits those methods from different ancestors. For example, if it inherits from unrelated
* interfaces {@code One} and {@code Two} which each define {@code void foo();}, and if it does
* not itself override the {@code foo()} method, then both {@code One.foo()} and {@code Two.foo()}
* will be in the returned set.
*
* <p>The order of the returned set is deterministic: within a | or |
java | google__auto | value/src/test/java/com/google/auto/value/extension/memoized/MemoizedTest.java | {
"start": 5923,
"end": 16857
} | class ____ {
int hashCodeCount;
@Override
@Memoized
public int hashCode() {
hashCodeCount++;
return 1;
}
@Override
public final boolean equals(Object that) {
return that instanceof HashCodeEqualsOptimizationOffWhenEqualsIsFinal;
}
}
@Before
public void setUp() {
value =
new AutoValue_MemoizedTest_Value(
"string", "stringWithTypeAnnotation", new HashCodeAndToStringCounter());
listValue = new AutoValue_MemoizedTest_ListValue<Integer, String>(0, "hello");
}
@Test
public void listValueList() {
assertThat(listValue.myTypedList()).containsExactly(listValue.value());
}
@Test
public void listValueString() {
assertThat(listValue.otherValue()).isEqualTo("hello");
}
@Test
public void primitive() {
assertThat(value.primitive()).isEqualTo(1);
assertThat(value.primitive()).isEqualTo(1);
assertThat(value.primitiveCount).isEqualTo(1);
}
@Test
public void notNullable() {
assertThat(value.notNullable()).isEqualTo("derived string 1");
assertThat(value.notNullable()).isSameInstanceAs(value.notNullable());
assertThat(value.notNullableCount).isEqualTo(1);
}
@Test
public void nullable() {
assertThat(value.nullable()).isEqualTo("nullable derived string 1");
assertThat(value.nullable()).isSameInstanceAs(value.nullable());
assertThat(value.nullableCount).isEqualTo(1);
}
@Test
public void nullableWithTypeAnnotation() {
assertThat(value.nullableWithTypeAnnotation())
.isEqualTo("nullable derived stringWithTypeAnnotation 1");
assertThat(value.nullableWithTypeAnnotation())
.isSameInstanceAs(value.nullableWithTypeAnnotation());
assertThat(value.nullableWithTypeAnnotationCount).isEqualTo(1);
}
@Test
public void returnsNull() {
assertThat(value.returnsNull()).isNull();
assertThat(value.returnsNull()).isNull();
assertThat(value.returnsNullCount).isEqualTo(1);
}
@Test
public void returnsNullWithTypeAnnotation() {
assertThat(value.returnsNullWithTypeAnnotation()).isNull();
assertThat(value.returnsNullWithTypeAnnotation()).isNull();
assertThat(value.returnsNullWithTypeAnnotationCount).isEqualTo(1);
}
@Test
public void notNullableButReturnsNull() {
try {
value.notNullableButReturnsNull();
fail();
} catch (NullPointerException expected) {
assertThat(expected)
.hasMessageThat()
.isEqualTo("notNullableButReturnsNull() cannot return null");
}
assertThat(value.notNullableButReturnsNullCount).isEqualTo(1);
}
@Test
public void methodThrows() {
// The exception is thrown.
try {
value.throwsException();
fail();
} catch (SomeCheckedException expected1) {
// The exception is not memoized.
try {
value.throwsException();
fail();
} catch (SomeCheckedException expected2) {
assertThat(expected2).isNotSameInstanceAs(expected1);
}
}
assertThat(value.throwsExceptionCount).isEqualTo(2);
}
@Test
public void testHashCode() {
assertThat(value.hashCode()).isEqualTo(value.hashCode());
assertThat(value.counter().hashCodeCount).isEqualTo(1);
}
@Test
public void testToString() {
assertThat(value.toString()).isEqualTo(value.toString());
assertThat(value.counter().toStringCount).isEqualTo(1);
}
@Test
public void keywords() throws Exception {
ValueWithKeywordName value =
new AutoValue_MemoizedTest_ValueWithKeywordName(true, false, "foo");
assertThat(value.getNative()).isTrue();
assertThat(value.getMemoizedNative()).isTrue();
assertThat(value.getNative0()).isFalse();
assertThat(value.getMemoizedNative0()).isFalse();
Constructor<?> constructor =
value.getClass().getDeclaredConstructor(boolean.class, boolean.class, String.class);
ImmutableList<String> names =
stream(constructor.getParameters()).map(Parameter::getName).collect(toImmutableList());
assertThat(names).contains("notKeyword");
}
@Test
public void copyClassAnnotations_valueWithCopyAnnotations_copiesAnnotation() throws Exception {
ValueWithCopyAnnotations valueWithCopyAnnotations =
new AutoValue_MemoizedTest_ValueWithCopyAnnotations("test");
assertThat(
valueWithCopyAnnotations
.getClass()
.isAnnotationPresent(javax.annotation.Nullable.class))
.isTrue();
}
@Test
public void copyClassAnnotations_valueWithoutCopyAnnotations_doesNotCopyAnnotation()
throws Exception {
ValueWithoutCopyAnnotations valueWithoutCopyAnnotations =
new AutoValue_MemoizedTest_ValueWithoutCopyAnnotations("test");
assertThat(
valueWithoutCopyAnnotations
.getClass()
.isAnnotationPresent(javax.annotation.Nullable.class))
.isFalse();
}
@Test
public void copyClassAnnotations_valueWithExcludedCopyAnnotations_doesNotCopyAnnotation()
throws Exception {
ValueWithExcludedCopyAnnotations valueWithExcludedCopyAnnotations =
new AutoValue_MemoizedTest_ValueWithExcludedCopyAnnotations("test");
assertThat(
valueWithExcludedCopyAnnotations
.getClass()
.isAnnotationPresent(javax.annotation.Nullable.class))
.isFalse();
}
@Test
public void copyMethodAnnotations_valueWithCopyAnnotations_copiesAnnotation() throws Exception {
ValueWithCopyAnnotations valueWithCopyAnnotations =
new AutoValue_MemoizedTest_ValueWithCopyAnnotations("test");
assertThat(
valueWithCopyAnnotations
.getClass()
.getMethod("getMemoizedNative")
.isAnnotationPresent(javax.annotation.Nullable.class))
.isTrue();
}
@Test
public void copyMethodAnnotations_valueWithoutCopyAnnotations_copiesAnnotation()
throws Exception {
ValueWithoutCopyAnnotations valueWithoutCopyAnnotations =
new AutoValue_MemoizedTest_ValueWithoutCopyAnnotations("test");
assertThat(
valueWithoutCopyAnnotations
.getClass()
.getMethod("getMemoizedNative")
.isAnnotationPresent(javax.annotation.Nullable.class))
.isTrue();
}
@Test
public void copyMethodAnnotations_valueWithExcludedCopyAnnotations_doesNotCopyAnnotation()
throws Exception {
ValueWithExcludedCopyAnnotations valueWithExcludedCopyAnnotations =
new AutoValue_MemoizedTest_ValueWithExcludedCopyAnnotations("test");
assertThat(
valueWithExcludedCopyAnnotations
.getClass()
.getMethod("getMemoizedNative")
.isAnnotationPresent(javax.annotation.Nullable.class))
.isFalse();
}
@Test
public void nullableHasAnnotation() throws ReflectiveOperationException {
Method nullable = AutoValue_MemoizedTest_Value.class.getDeclaredMethod("nullable");
assertThat(nullable.isAnnotationPresent(javax.annotation.Nullable.class)).isTrue();
}
@Test
public void nullableWithTypeAnnotationHasAnnotation() throws ReflectiveOperationException {
Method nullable =
AutoValue_MemoizedTest_Value.class.getDeclaredMethod("nullableWithTypeAnnotation");
AnnotatedType returnType = nullable.getAnnotatedReturnType();
assertThat(
returnType.isAnnotationPresent(
org.checkerframework.checker.nullness.qual.Nullable.class))
.isTrue();
}
@Test
public void nullableConstructorParameter() throws ReflectiveOperationException {
// Constructor parameters are potentially:
// [0] @javax.annotation.Nullable String string,
// [1] @org.checkerframework.checker.nullness.qual.Nullable String stringWithTypeAnnotation,
// [2] HashCodeAndToStringCounter counter
// We don't currently copy @javax.annotation.Nullable because it is not a TYPE_USE annotation.
Constructor<?> constructor =
AutoValue_MemoizedTest_Value.class.getDeclaredConstructor(
String.class, String.class, HashCodeAndToStringCounter.class);
AnnotatedType paramType = constructor.getAnnotatedParameterTypes()[1];
assertThat(
paramType.isAnnotationPresent(
org.checkerframework.checker.nullness.qual.Nullable.class))
.isTrue();
}
@Test
public void hashCodeEqualsOptimization() {
HashCodeEqualsOptimization first =
new AutoValue_MemoizedTest_HashCodeEqualsOptimization(new EqualsCounter());
HashCodeEqualsOptimization second =
new AutoValue_MemoizedTest_HashCodeEqualsOptimization(new EqualsCounter());
first.overrideHashCode = 2;
second.overrideHashCode = 2;
assertThat(first.equals(second)).isTrue();
assertThat(first.equalsCounter().equalsCount).isEqualTo(1);
HashCodeEqualsOptimization otherwiseEqualsButDifferentHashCode =
new AutoValue_MemoizedTest_HashCodeEqualsOptimization(new EqualsCounter());
otherwiseEqualsButDifferentHashCode.overrideHashCode = 4;
assertThat(otherwiseEqualsButDifferentHashCode.equals(first)).isFalse();
assertThat(otherwiseEqualsButDifferentHashCode.equalsCounter().equalsCount).isEqualTo(0);
}
@Test
public void hashCodeEqualsOptimization_otherTypes() {
HashCodeEqualsOptimization optimizedEquals =
new AutoValue_MemoizedTest_HashCodeEqualsOptimization(new EqualsCounter());
assertThat(optimizedEquals.equals(new Object())).isFalse();
assertThat(optimizedEquals.equals(null)).isFalse();
assertThat(optimizedEquals.equalsCounter().equalsCount).isEqualTo(0);
assertThat(optimizedEquals.hashCodeCount).isEqualTo(0);
}
@Test
public void hashCodeEqualsOptimization_hashCodeIgnoredForSameInstance() {
HashCodeEqualsOptimization optimizedEquals =
new AutoValue_MemoizedTest_HashCodeEqualsOptimization(new EqualsCounter());
assertThat(optimizedEquals.equals(optimizedEquals)).isTrue();
assertThat(optimizedEquals.equalsCounter().equalsCount).isEqualTo(0);
assertThat(optimizedEquals.hashCodeCount).isEqualTo(0);
}
@Test
public void hashCodeEqualsOptimization_offWhenEqualsIsFinal() {
HashCodeEqualsOptimizationOffWhenEqualsIsFinal memoizedHashCodeAndFinalEqualsMethod =
new AutoValue_MemoizedTest_HashCodeEqualsOptimizationOffWhenEqualsIsFinal();
HashCodeEqualsOptimizationOffWhenEqualsIsFinal second =
new AutoValue_MemoizedTest_HashCodeEqualsOptimizationOffWhenEqualsIsFinal();
assertThat(memoizedHashCodeAndFinalEqualsMethod.equals(second)).isTrue();
assertThat(memoizedHashCodeAndFinalEqualsMethod.hashCodeCount).isEqualTo(0);
int unused1 = memoizedHashCodeAndFinalEqualsMethod.hashCode();
int unused2 = memoizedHashCodeAndFinalEqualsMethod.hashCode();
assertThat(memoizedHashCodeAndFinalEqualsMethod.hashCodeCount).isEqualTo(1);
}
| HashCodeEqualsOptimizationOffWhenEqualsIsFinal |
java | netty__netty | transport-classes-io_uring/src/main/java/io/netty/channel/uring/SockaddrIn.java | {
"start": 1035,
"end": 9676
} | class ____ {
static final byte[] IPV4_MAPPED_IPV6_PREFIX = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, (byte) 0xff, (byte) 0xff };
static final int IPV4_ADDRESS_LENGTH = 4;
static final int IPV6_ADDRESS_LENGTH = 16;
static final byte[] SOCKADDR_IN6_EMPTY_ARRAY = new byte[Native.SIZEOF_SOCKADDR_IN6];
static final byte[] SOCKADDR_IN_EMPTY_ARRAY = new byte[Native.SIZEOF_SOCKADDR_IN];
private SockaddrIn() { }
static int set(boolean ipv6, ByteBuffer memory, InetSocketAddress address) {
if (ipv6) {
return setIPv6(memory, address.getAddress(), address.getPort());
}
return setIPv4(memory, address.getAddress(), address.getPort());
}
/**
* <pre>{@code
* struct sockaddr_in {
* sa_family_t sin_family; // address family: AF_INET
* in_port_t sin_port; // port in network byte order
* struct in_addr sin_addr; // internet address
* };
*
* // Internet address.
* struct in_addr {
* uint32_t s_addr; // address in network byte order
* };
* }</pre>
*/
static int setIPv4(ByteBuffer memory, InetAddress address, int port) {
int position = memory.position();
memory.mark();
try {
// memset
memory.put(SOCKADDR_IN_EMPTY_ARRAY);
memory.putShort(position + Native.SOCKADDR_IN_OFFSETOF_SIN_FAMILY, Native.AF_INET);
memory.putShort(position + Native.SOCKADDR_IN_OFFSETOF_SIN_PORT, handleNetworkOrder(memory.order(),
(short) port));
byte[] bytes = address.getAddress();
int offset = 0;
if (bytes.length == IPV6_ADDRESS_LENGTH) {
// IPV6 mapped IPV4 address, we only need the last 4 bytes.
offset = IPV4_MAPPED_IPV6_PREFIX.length;
}
assert bytes.length == offset + IPV4_ADDRESS_LENGTH;
memory.position(position + Native.SOCKADDR_IN_OFFSETOF_SIN_ADDR + Native.IN_ADDRESS_OFFSETOF_S_ADDR);
memory.put(bytes, offset, IPV4_ADDRESS_LENGTH);
return Native.SIZEOF_SOCKADDR_IN;
} finally {
// Restore position as we did change it via memory.put(byte[]...).
memory.reset();
}
}
/**
* <pre>{@code
* struct sockaddr_in6 {
* sa_family_t sin6_family; // AF_INET6
* in_port_t sin6_port; // port number
* uint32_t sin6_flowinfo; // IPv6 flow information
* struct in6_addr sin6_addr; // IPv6 address
* uint32_t sin6_scope_id; /* Scope ID (new in 2.4)
* };
*
* struct in6_addr {
* unsigned char s6_addr[16]; // IPv6 address
* };
* }</pre>
*/
static int setIPv6(ByteBuffer memory, InetAddress address, int port) {
int position = memory.position();
memory.mark();
try {
// memset
memory.put(SOCKADDR_IN6_EMPTY_ARRAY);
memory.putShort(position + Native.SOCKADDR_IN6_OFFSETOF_SIN6_FAMILY, Native.AF_INET6);
memory.putShort(position + Native.SOCKADDR_IN6_OFFSETOF_SIN6_PORT,
handleNetworkOrder(memory.order(), (short) port));
// Skip sin6_flowinfo as we did memset before
byte[] bytes = address.getAddress();
int offset = Native.SOCKADDR_IN6_OFFSETOF_SIN6_ADDR + Native.IN6_ADDRESS_OFFSETOF_S6_ADDR;
if (bytes.length == IPV4_ADDRESS_LENGTH) {
memory.position(position + offset);
memory.put(IPV4_MAPPED_IPV6_PREFIX);
memory.put(bytes, 0, IPV4_ADDRESS_LENGTH);
// Skip sin6_scope_id as we did memset before
} else {
memory.position(position + offset);
memory.put(bytes, 0, IPV6_ADDRESS_LENGTH);
memory.putInt(position + Native.SOCKADDR_IN6_OFFSETOF_SIN6_SCOPE_ID,
((Inet6Address) address).getScopeId());
}
return Native.SIZEOF_SOCKADDR_IN6;
} finally {
memory.reset();
}
}
/**
* <pre>{@code
* struct sockaddr_un {
* sa_family_t sun_family; // AF_UNIX
* char sun_path[108]; // Pathname
* }
* }</pre>
*/
static int setUds(ByteBuffer memory, DomainSocketAddress address) {
byte[] path = address.path().getBytes(StandardCharsets.UTF_8);
// Check if this is an abstract namespace socket (starts with '\0')
boolean isAbstract = path.length > 0 && path[0] == 0;
// For pathname sockets, we need space for the null terminator
// For abstract sockets, we don't add a null terminator
int requiredLength = isAbstract ? path.length : path.length + 1;
if (requiredLength > Native.MAX_SUN_PATH_LEN) {
throw new IllegalArgumentException("path too long: " + address.path());
}
int position = memory.position();
memory.mark();
try {
memory.putShort(position + Native.SOCKADDR_UN_OFFSETOF_SUN_FAMILY, Native.AF_UNIX);
memory.position(position + Native.SOCKADDR_UN_OFFSETOF_SUN_PATH);
memory.put(path);
// Only add null terminator for pathname sockets, not for abstract sockets
if (!isAbstract) {
memory.put((byte) 0);
}
// Return the actual address length:
// - For pathname sockets: offsetof(sun_path) + strlen(path) + 1
// - For abstract sockets: offsetof(sun_path) + name_length
return Native.SOCKADDR_UN_OFFSETOF_SUN_PATH + requiredLength;
} finally {
memory.reset();
}
}
static InetSocketAddress getIPv4(ByteBuffer memory, byte[] tmpArray) {
assert tmpArray.length == IPV4_ADDRESS_LENGTH;
int position = memory.position();
memory.mark();
try {
int port = handleNetworkOrder(memory.order(),
memory.getShort(position + Native.SOCKADDR_IN_OFFSETOF_SIN_PORT)) & 0xFFFF;
memory.position(position + Native.SOCKADDR_IN_OFFSETOF_SIN_ADDR + Native.IN_ADDRESS_OFFSETOF_S_ADDR);
memory.get(tmpArray);
try {
return new InetSocketAddress(InetAddress.getByAddress(tmpArray), port);
} catch (UnknownHostException ignore) {
return null;
}
} finally {
memory.reset();
}
}
static InetSocketAddress getIPv6(ByteBuffer memory, byte[] ipv6Array, byte[] ipv4Array) {
assert ipv6Array.length == IPV6_ADDRESS_LENGTH;
assert ipv4Array.length == IPV4_ADDRESS_LENGTH;
int position = memory.position();
memory.mark();
try {
int port = handleNetworkOrder(memory.order(), memory.getShort(
position + Native.SOCKADDR_IN6_OFFSETOF_SIN6_PORT)) & 0xFFFF;
memory.position(position + Native.SOCKADDR_IN6_OFFSETOF_SIN6_ADDR + Native.IN6_ADDRESS_OFFSETOF_S6_ADDR);
memory.get(ipv6Array);
if (PlatformDependent.equals(
ipv6Array, 0, IPV4_MAPPED_IPV6_PREFIX, 0, IPV4_MAPPED_IPV6_PREFIX.length)) {
System.arraycopy(ipv6Array, IPV4_MAPPED_IPV6_PREFIX.length, ipv4Array, 0, IPV4_ADDRESS_LENGTH);
try {
return new InetSocketAddress(Inet4Address.getByAddress(ipv4Array), port);
} catch (UnknownHostException ignore) {
return null;
}
} else {
int scopeId = memory.getInt(position + Native.SOCKADDR_IN6_OFFSETOF_SIN6_SCOPE_ID);
try {
return new InetSocketAddress(Inet6Address.getByAddress(null, ipv6Array, scopeId), port);
} catch (UnknownHostException ignore) {
return null;
}
}
} finally {
memory.reset();
}
}
static boolean hasPortIpv4(ByteBuffer memory) {
int port = memory.getShort(memory.position() + Native.SOCKADDR_IN_OFFSETOF_SIN_PORT) & 0xFFFF;
return port > 0;
}
static boolean hasPortIpv6(ByteBuffer memory) {
int port = memory.getShort(memory.position() + Native.SOCKADDR_IN6_OFFSETOF_SIN6_PORT) & 0xFFFF;
return port > 0;
}
private static short handleNetworkOrder(ByteOrder order, short v) {
return order != ByteOrder.nativeOrder() ? v : Short.reverseBytes(v);
}
}
| SockaddrIn |
java | apache__camel | test-infra/camel-test-infra-pulsar/src/test/java/org/apache/camel/test/infra/pulsar/services/PulsarService.java | {
"start": 1043,
"end": 1133
} | interface ____ extends TestService, PulsarInfraService, ContainerTestService {
}
| PulsarService |
java | grpc__grpc-java | binder/src/main/java/io/grpc/binder/UntrustedSecurityPolicies.java | {
"start": 910,
"end": 1475
} | class ____ {
private UntrustedSecurityPolicies() {}
/**
* Return a security policy which allows any peer on device. Servers should only use this policy
* if they intend to expose a service to all applications on device. Clients should only use this
* policy if they don't need to trust the application they're connecting to.
*/
public static SecurityPolicy untrustedPublic() {
return new SecurityPolicy() {
@Override
public Status checkAuthorization(int uid) {
return Status.OK;
}
};
}
}
| UntrustedSecurityPolicies |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/service/TransportCreateServiceAccountTokenActionTests.java | {
"start": 1387,
"end": 3418
} | class ____ extends ESTestCase {
private ServiceAccountService serviceAccountService;
private SecurityContext securityContext;
private TransportCreateServiceAccountTokenAction transportCreateServiceAccountTokenAction;
@Before
@SuppressForbidden(reason = "Allow accessing localhost")
public void init() throws IOException {
serviceAccountService = mock(ServiceAccountService.class);
securityContext = mock(SecurityContext.class);
TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor();
transportCreateServiceAccountTokenAction = new TransportCreateServiceAccountTokenAction(
transportService,
new ActionFilters(Collections.emptySet()),
serviceAccountService,
securityContext
);
}
public void testAuthenticationIsRequired() {
when(securityContext.getAuthentication()).thenReturn(null);
final PlainActionFuture<CreateServiceAccountTokenResponse> future = new PlainActionFuture<>();
transportCreateServiceAccountTokenAction.doExecute(mock(Task.class), mock(CreateServiceAccountTokenRequest.class), future);
final IllegalStateException e = expectThrows(IllegalStateException.class, future::actionGet);
assertThat(e.getMessage(), containsString("authentication is required"));
}
public void testExecutionWillDelegate() {
final Authentication authentication = AuthenticationTestHelper.builder().build();
when(securityContext.getAuthentication()).thenReturn(authentication);
final CreateServiceAccountTokenRequest request = mock(CreateServiceAccountTokenRequest.class);
final PlainActionFuture<CreateServiceAccountTokenResponse> future = new PlainActionFuture<>();
transportCreateServiceAccountTokenAction.doExecute(mock(Task.class), request, future);
verify(serviceAccountService).createIndexToken(authentication, request, future);
}
}
| TransportCreateServiceAccountTokenActionTests |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RedissonRxClient.java | {
"start": 52905,
"end": 54636
} | interface ____ operations over Redis keys with specified <code>options</code>.
* Each of Redis/Redisson object is associated with own key.
*
* @return Keys object
*/
RKeysRx getKeys(KeysOptions options);
/**
* Use {@link RedissonClient#shutdown()} instead
*/
@Deprecated
void shutdown();
/**
* Allows to get configuration provided
* during Redisson instance creation. Further changes on
* this object not affect Redisson instance.
*
* @return Config object
*/
Config getConfig();
/**
* Use {@link org.redisson.api.RedissonClient#getRedisNodes(org.redisson.api.redisnode.RedisNodes)} instead
*
* @return NodesGroup object
*/
@Deprecated
NodesGroup<Node> getNodesGroup();
/**
* Use {@link org.redisson.api.RedissonClient#getRedisNodes(org.redisson.api.redisnode.RedisNodes)} instead
*
* @return NodesGroup object
*/
@Deprecated
NodesGroup<ClusterNode> getClusterNodesGroup();
/**
* Returns {@code true} if this Redisson instance has been shut down.
*
* @return <code>true</code> if this Redisson instance has been shut down otherwise <code>false</code>
*/
boolean isShutdown();
/**
* Returns {@code true} if this Redisson instance was started to be shutdown
* or was shutdown {@link #isShutdown()} already.
*
* @return <code>true</code> if this Redisson instance was started to be shutdown
* or was shutdown {@link #isShutdown()} already otherwise <code>false</code>
*/
boolean isShuttingDown();
/**
* Returns id of this Redisson instance
*
* @return id
*/
String getId();
}
| for |
java | micronaut-projects__micronaut-core | http-server/src/main/java/io/micronaut/http/server/exceptions/response/ErrorResponseBodyProvider.java | {
"start": 879,
"end": 1272
} | interface ____<T> {
/**
*
* @param errorContext Error Context
* @param response Base HTTP Response
* @return The HTTP Response Body
*/
@NonNull
T body(@NonNull ErrorContext errorContext, @NonNull HttpResponse<?> response);
/**
* @return The content type of the HTTP response
*/
@NonNull
String contentType();
}
| ErrorResponseBodyProvider |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncSnapshot.java | {
"start": 1777,
"end": 4761
} | class ____ extends RouterAsyncProtocolTestBase {
private final String testFile = "/testdir/testSnapshot.file";
private FileSystem routerFs;
private RouterAsyncSnapshot asyncSnapshot;
@BeforeEach
public void setup() throws IOException {
routerFs = getRouterFs();
asyncSnapshot = new RouterAsyncSnapshot(getRouterAsyncRpcServer());
FSDataOutputStream fsDataOutputStream = routerFs.create(
new Path(testFile), true);
fsDataOutputStream.write(new byte[1024]);
fsDataOutputStream.close();
}
@Test
public void testRouterAsyncSnapshot() throws Exception {
asyncSnapshot.allowSnapshot("/testdir");
syncReturn(null);
asyncSnapshot.createSnapshot("/testdir", "testdirSnapshot");
String snapshotName = syncReturn(String.class);
assertEquals("/testdir/.snapshot/testdirSnapshot", snapshotName);
asyncSnapshot.getSnapshottableDirListing();
SnapshottableDirectoryStatus[] snapshottableDirectoryStatuses =
syncReturn(SnapshottableDirectoryStatus[].class);
assertEquals(1, snapshottableDirectoryStatuses.length);
asyncSnapshot.getSnapshotListing("/testdir");
SnapshotStatus[] snapshotStatuses = syncReturn(SnapshotStatus[].class);
assertEquals(1, snapshotStatuses.length);
FSDataOutputStream fsDataOutputStream = routerFs.append(
new Path("/testdir/testSnapshot.file"), true);
fsDataOutputStream.write(new byte[1024]);
fsDataOutputStream.close();
asyncSnapshot.createSnapshot("/testdir", "testdirSnapshot1");
snapshotName = syncReturn(String.class);
assertEquals("/testdir/.snapshot/testdirSnapshot1", snapshotName);
asyncSnapshot.getSnapshotDiffReport("/testdir",
"testdirSnapshot", "testdirSnapshot1");
SnapshotDiffReport snapshotDiffReport = syncReturn(SnapshotDiffReport.class);
assertEquals(MODIFY, snapshotDiffReport.getDiffList().get(0).getType());
asyncSnapshot.getSnapshotDiffReportListing("/testdir",
"testdirSnapshot", "testdirSnapshot1", new byte[]{}, -1);
SnapshotDiffReportListing snapshotDiffReportListing =
syncReturn(SnapshotDiffReportListing.class);
assertEquals(1, snapshotDiffReportListing.getModifyList().size());
LambdaTestUtils.intercept(SnapshotException.class, () -> {
asyncSnapshot.disallowSnapshot("/testdir");
syncReturn(null);
});
asyncSnapshot.renameSnapshot("/testdir",
"testdirSnapshot1", "testdirSnapshot2");
syncReturn(null);
LambdaTestUtils.intercept(SnapshotException.class,
"Cannot delete snapshot testdirSnapshot1 from path /testdir",
() -> {
asyncSnapshot.deleteSnapshot("/testdir", "testdirSnapshot1");
syncReturn(null);
});
asyncSnapshot.deleteSnapshot("/testdir", "testdirSnapshot2");
syncReturn(null);
asyncSnapshot.deleteSnapshot("/testdir", "testdirSnapshot");
syncReturn(null);
asyncSnapshot.disallowSnapshot("/testdir");
syncReturn(null);
}
} | TestRouterAsyncSnapshot |
java | apache__camel | core/camel-console/src/generated/java/org/apache/camel/impl/console/EventConsoleConfigurer.java | {
"start": 706,
"end": 2665
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("CamelContext", org.apache.camel.CamelContext.class);
map.put("Capacity", int.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.impl.console.EventConsole target = (org.apache.camel.impl.console.EventConsole) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "camelcontext":
case "camelContext": target.setCamelContext(property(camelContext, org.apache.camel.CamelContext.class, value)); return true;
case "capacity": target.setCapacity(property(camelContext, int.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "camelcontext":
case "camelContext": return org.apache.camel.CamelContext.class;
case "capacity": return int.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.impl.console.EventConsole target = (org.apache.camel.impl.console.EventConsole) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "camelcontext":
case "camelContext": return target.getCamelContext();
case "capacity": return target.getCapacity();
default: return null;
}
}
}
| EventConsoleConfigurer |
java | apache__camel | components/camel-infinispan/camel-infinispan-embedded/src/test/java/org/apache/camel/component/infinispan/embedded/InfinispanEmbeddedClusteredTestSupport.java | {
"start": 1797,
"end": 4225
} | class ____ extends MultipleCacheManagersTest {
protected final CacheMode cacheMode;
protected final int clusterSize;
protected ConfigurationBuilder builderUsed;
protected String cacheName;
public ClusteredCacheSupport(CacheMode cacheMode, int clusterSize) {
this.cacheMode = cacheMode;
this.clusterSize = clusterSize;
}
@Override
public void createCacheManagers() {
builderUsed = new ConfigurationBuilder();
builderUsed.clustering().cacheMode(cacheMode);
if (cacheMode.isDistributed()) {
builderUsed.clustering().hash().numOwners(1);
}
if (cacheName != null) {
createClusteredCaches(clusterSize, cacheName, TestDataSCI.INSTANCE, builderUsed);
} else {
createClusteredCaches(clusterSize, TestDataSCI.INSTANCE, builderUsed);
}
}
}
@Override
public void setupResources() throws Exception {
ClusteredCacheSupport cluster = new ClusteredCacheSupport(CacheMode.DIST_SYNC, 2);
cluster.createCacheManagers();
clusteredCacheContainers = Objects.requireNonNull(cluster.getCacheManagers());
super.setupResources();
}
@Override
public void cleanupResources() throws Exception {
super.cleanupResources();
if (clusteredCacheContainers != null) {
// Has to be done later, maybe CamelTestSupport should
for (BasicCacheContainer container : clusteredCacheContainers) {
container.stop();
}
}
}
protected Cache<Object, Object> getCache(int index) {
return clusteredCacheContainers.get(index).getCache();
}
protected void injectTimeService() {
ts0 = new ControlledTimeService();
TestingUtil.replaceComponent(clusteredCacheContainers.get(0), TimeService.class, ts0, true);
ts1 = new ControlledTimeService();
TestingUtil.replaceComponent(clusteredCacheContainers.get(1), TimeService.class, ts1, true);
}
@BindToRegistry
public ComponentCustomizer infinispanComponentCustomizer() {
return ComponentCustomizer.forType(
InfinispanEmbeddedComponent.class,
component -> component.getConfiguration().setCacheContainer(clusteredCacheContainers.get(0)));
}
}
| ClusteredCacheSupport |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/fetching/GraphParsingTest.java | {
"start": 6945,
"end": 7026
} | class ____ {
@Id
private Integer id;
@ManyToOne
Showing showing;
}
}
| Ticket |
java | quarkusio__quarkus | extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/tracing/TracerUtil.java | {
"start": 449,
"end": 1188
} | class ____ {
private TracerUtil() {
}
public static Resource mapResourceAttributes(List<String> resourceAttributes, String serviceName, String hostname) {
final AttributesBuilder attributesBuilder = Attributes.builder();
if (!resourceAttributes.isEmpty()) {
OpenTelemetryUtil
.convertKeyValueListToMap(resourceAttributes)
.forEach(attributesBuilder::put);
}
if (serviceName != null) {
attributesBuilder.put(SERVICE_NAME.getKey(), serviceName);
}
if (hostname != null) {
attributesBuilder.put(HOST_NAME, hostname);
}
return Resource.create(attributesBuilder.build());
}
}
| TracerUtil |
java | quarkusio__quarkus | extensions/reactive-datasource/runtime/src/main/java/io/quarkus/reactive/datasource/runtime/DataSourcesReactiveBuildTimeConfig.java | {
"start": 2242,
"end": 2442
} | interface ____ {
/**
* The Reactive build time configuration.
*/
public DataSourceReactiveBuildTimeConfig reactive();
}
}
| DataSourceReactiveOuterNamedBuildTimeConfig |
java | spring-projects__spring-framework | spring-websocket/src/main/java/org/springframework/web/socket/sockjs/support/AbstractSockJsService.java | {
"start": 21341,
"end": 21479
} | interface ____ {
void handle(ServerHttpRequest request, ServerHttpResponse response) throws IOException;
}
private | SockJsRequestHandler |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/task/TaskExecutorConfigurations.java | {
"start": 7496,
"end": 8206
} | class ____ {
@Bean
static BeanPostProcessor applicationTaskExecutorAsyncConfigurerBeanPostProcessor(
ObjectProvider<BeanFactory> beanFactory) {
return new BeanPostProcessor() {
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof AsyncConfigurer asyncConfigurer
&& !(bean instanceof ApplicationTaskExecutorAsyncConfigurer)) {
return new ApplicationTaskExecutorAsyncConfigurer(beanFactory.getObject(), asyncConfigurer);
}
return bean;
}
};
}
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnMissingBean(AsyncConfigurer.class)
static | AsyncConfigurerWrapperConfiguration |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/authx/EntraIdTestContext.java | {
"start": 108,
"end": 2091
} | class ____ {
private static final String AZURE_CLIENT_ID = "AZURE_CLIENT_ID";
private static final String AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET";
private static final String AZURE_AUTHORITY = "AZURE_AUTHORITY";
private static final String AZURE_REDIS_SCOPES = "AZURE_REDIS_SCOPES";
private static final String AZURE_USER_ASSIGNED_MANAGED_ID = "AZURE_USER_ASSIGNED_MANAGED_ID";
private final String clientId;
private final String authority;
private final String clientSecret;
private Set<String> redisScopes;
private String userAssignedManagedIdentity;
public static final EntraIdTestContext DEFAULT = new EntraIdTestContext();
private EntraIdTestContext() {
clientId = System.getenv(AZURE_CLIENT_ID);
authority = System.getenv(AZURE_AUTHORITY);
clientSecret = System.getenv(AZURE_CLIENT_SECRET);
this.userAssignedManagedIdentity = System.getenv(AZURE_USER_ASSIGNED_MANAGED_ID);
}
public EntraIdTestContext(String clientId, String authority, String clientSecret, Set<String> redisScopes,
String userAssignedManagedIdentity) {
this.clientId = clientId;
this.authority = authority;
this.clientSecret = clientSecret;
this.redisScopes = redisScopes;
this.userAssignedManagedIdentity = userAssignedManagedIdentity;
}
public String getClientId() {
return clientId;
}
public String getAuthority() {
return authority;
}
public String getClientSecret() {
return clientSecret;
}
public Set<String> getRedisScopes() {
if (redisScopes == null) {
String redisScopesEnv = System.getenv(AZURE_REDIS_SCOPES);
this.redisScopes = new HashSet<>(Arrays.asList(redisScopesEnv.split(";")));
}
return redisScopes;
}
public String getUserAssignedManagedIdentity() {
return userAssignedManagedIdentity;
}
}
| EntraIdTestContext |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/EnumCreatorTest.java | {
"start": 1484,
"end": 1891
} | enum ____
{
ENUM_A(1), ENUM_B(2), ENUM_C(3);
private final int id;
private TestEnumFromInt(int id) {
this.id = id;
}
@JsonCreator public static TestEnumFromInt fromId(int id) {
for (TestEnumFromInt e: values()) {
if (e.id == id) return e;
}
return null;
}
}
protected | TestEnumFromInt |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/NullArgumentForNonNullParameterTest.java | {
"start": 5934,
"end": 6604
} | class ____ {
void consume(String s) {}
void foo() {
// BUG: Diagnostic contains:
consume(null);
}
}
""")
.doTest();
}
@Test
public void negativeNullMarkedPackageInfoCountermanded() {
aggressiveHelper
.addSourceLines(
"p/package-info.java",
"""
@org.jspecify.annotations.NullMarked
package p;
""")
.addSourceLines(
"p/Foo.java",
"""
package p;
import org.jspecify.annotations.NullUnmarked;
@NullUnmarked
| Foo |
java | apache__rocketmq | test/src/test/java/org/apache/rocketmq/test/container/ContainerIntegrationTestBase.java | {
"start": 29526,
"end": 31470
} | class ____ {
private String clusterName;
private String brokerName;
private String brokerAddr;
private long brokerId;
public BrokerConfigLite(final String clusterName, final String brokerName, final String brokerAddr,
final long brokerId) {
this.clusterName = clusterName;
this.brokerName = brokerName;
this.brokerAddr = brokerAddr;
this.brokerId = brokerId;
}
public String getClusterName() {
return clusterName;
}
public String getBrokerName() {
return brokerName;
}
public String getBrokerAddr() {
return brokerAddr;
}
public long getBrokerId() {
return brokerId;
}
@Override
public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
final BrokerConfigLite lite = (BrokerConfigLite) o;
return new EqualsBuilder()
.append(clusterName, lite.clusterName)
.append(brokerName, lite.brokerName)
.append(brokerAddr, lite.brokerAddr)
.append(brokerId, lite.brokerId)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(clusterName)
.append(brokerName)
.append(brokerAddr)
.append(brokerId)
.toHashCode();
}
}
public static ConfigContext buildConfigContext(BrokerConfig brokerConfig, MessageStoreConfig messageStoreConfig) {
return new ConfigContext.Builder()
.brokerConfig(brokerConfig)
.messageStoreConfig(messageStoreConfig)
.build();
}
}
| BrokerConfigLite |
java | quarkusio__quarkus | integration-tests/grpc-streaming/src/test/java/io/quarkus/grpc/example/streaming/LongStreamTestBase.java | {
"start": 872,
"end": 4538
} | class ____ {
private final Logger log = LoggerFactory.getLogger(getClass());
@GrpcClient("streaming")
Streaming streamSvc;
@Test
@Timeout(10)
public void testQuickFailure() {
Multi<StringRequest> multi = Multi.createFrom().range(1, 1000)
// delaying stream to make it a bit longer
.call(() -> Uni.createFrom().nullItem().onItem().delayIt().by(Duration.of(1000, ChronoUnit.NANOS)))
.map(x -> StringRequest.newBuilder()
.setAnyValue(x.toString())
.build())
.select().first(10);
UniAssertSubscriber<StringReply> subscriber = streamSvc.quickStringStream(multi)
.subscribe().withSubscriber(UniAssertSubscriber.create());
subscriber
.awaitFailure()
.assertFailedWith(StatusRuntimeException.class);
}
@Test
@Timeout(10)
public void testMidFailure() {
AtomicBoolean cancelled = new AtomicBoolean();
Multi<StringRequest> multi = Multi.createFrom().range(1, 1000)
// delaying stream to make it a bit longer
.call(() -> Uni.createFrom().nullItem().onItem().delayIt().by(Duration.of(500, ChronoUnit.MILLIS)))
.map(x -> StringRequest.newBuilder()
.setAnyValue(x.toString())
.build())
.onCancellation().invoke(() -> cancelled.set(true))
.select().first(10);
UniAssertSubscriber<StringReply> subscriber = streamSvc.midStringStream(multi)
.subscribe().withSubscriber(UniAssertSubscriber.create());
subscriber
.awaitFailure()
.assertFailedWith(StatusRuntimeException.class);
await().untilAtomic(cancelled, is(true));
}
@Test
@Timeout(10)
public void testQuickFailureWithBidi() {
Multi<StringRequest> multi = Multi.createFrom().range(1, 1000)
// delaying stream to make it a bit longer
.call(() -> Uni.createFrom().nullItem().onItem().delayIt().by(Duration.of(500, ChronoUnit.MILLIS)))
.map(x -> StringRequest.newBuilder()
.setAnyValue(x.toString())
.build())
.select().first(10);
AssertSubscriber<StringReply> subscriber = streamSvc.quickStringBiDiStream(multi)
.subscribe().withSubscriber(AssertSubscriber.create(100));
subscriber
.awaitFailure()
.assertFailedWith(StatusRuntimeException.class);
}
@Timeout(10)
@RepeatedTest(5)
public void testMidFailureWithBiDi() {
AtomicBoolean cancelled = new AtomicBoolean();
Multi<StringRequest> multi = Multi.createFrom().range(1, 1000)
// delaying stream to make it a bit longer
.call(() -> Uni.createFrom().nullItem().onItem().delayIt().by(Duration.of(500, ChronoUnit.MILLIS)))
.map(x -> StringRequest.newBuilder()
.setAnyValue(x.toString())
.build())
.onCancellation().invoke(() -> cancelled.set(true)).log("source")
.select().first(10);
AssertSubscriber<StringReply> subscriber = streamSvc.midStringBiDiStream(multi)
.log("downstream")
.subscribe().withSubscriber(AssertSubscriber.create(10));
subscriber
.awaitFailure()
.assertFailedWith(StatusRuntimeException.class);
await().untilAtomic(cancelled, is(true));
}
}
| LongStreamTestBase |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/imports/innerclasses/BeanWithInnerEnum.java | {
"start": 208,
"end": 305
} | class ____ {
private String test;
private InnerEnum innerEnum;
public | BeanWithInnerEnum |
java | quarkusio__quarkus | test-framework/common/src/main/java/io/quarkus/test/common/ArtifactLauncher.java | {
"start": 1063,
"end": 1344
} | interface ____ extends AutoCloseable {
Map<String, String> properties();
String networkId();
boolean manageNetwork();
CuratedApplication getCuratedApplication();
void close();
}
}
| DevServicesLaunchResult |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/CronEndpointBuilderFactory.java | {
"start": 8112,
"end": 8769
} | interface ____ triggering events at times specified through
* the Unix cron syntax.
*
* Category: scheduling
* Since: 3.1
* Maven coordinates: org.apache.camel:camel-cron
*
* Syntax: <code>cron:name</code>
*
* Path parameter: name (required)
* The name of the cron trigger
*
* @param path name
* @return the dsl builder
*/
default CronEndpointBuilder cron(String path) {
return CronEndpointBuilderFactory.endpointBuilder("cron", path);
}
/**
* Cron (camel-cron)
* A generic | for |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/masterreplica/MasterReplicaConnectionProvider.java | {
"start": 11452,
"end": 12269
} | class ____ {
private final String host;
private final int port;
ConnectionKey(String host, int port) {
this.host = host;
this.port = port;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (!(o instanceof ConnectionKey))
return false;
ConnectionKey that = (ConnectionKey) o;
if (port != that.port)
return false;
return !(host != null ? !host.equals(that.host) : that.host != null);
}
@Override
public int hashCode() {
int result = (host != null ? host.hashCode() : 0);
result = 31 * result + port;
return result;
}
}
}
| ConnectionKey |
java | square__retrofit | retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java | {
"start": 14106,
"end": 14156
} | class ____ extends HashMap<String, String> {}
| Foo |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/secondary/ids/EmbIdSecondary.java | {
"start": 884,
"end": 2431
} | class ____ {
private EmbId id;
@BeforeClassTemplate
public void initData(SessionFactoryScope scope) {
id = new EmbId( 1, 2 );
SecondaryEmbIdTestEntity ste = new SecondaryEmbIdTestEntity( id, "a", "1" );
// Revision 1
scope.inTransaction( em -> {
em.persist( ste );
} );
// Revision 2
scope.inTransaction( em -> {
SecondaryEmbIdTestEntity entity = em.find( SecondaryEmbIdTestEntity.class, ste.getId() );
entity.setS1( "b" );
entity.setS2( "2" );
} );
}
@Test
public void testRevisionsCounts(SessionFactoryScope scope) {
scope.inSession( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( Arrays.asList( 1, 2 ), auditReader.getRevisions( SecondaryEmbIdTestEntity.class, id ) );
} );
}
@Test
public void testHistoryOfId(SessionFactoryScope scope) {
SecondaryEmbIdTestEntity ver1 = new SecondaryEmbIdTestEntity( id, "a", "1" );
SecondaryEmbIdTestEntity ver2 = new SecondaryEmbIdTestEntity( id, "b", "2" );
scope.inSession( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( ver1, auditReader.find( SecondaryEmbIdTestEntity.class, id, 1 ) );
assertEquals( ver2, auditReader.find( SecondaryEmbIdTestEntity.class, id, 2 ) );
} );
}
@Test
public void testTableNames(DomainModelScope scope) {
assertEquals( "sec_embid_versions", scope.getDomainModel().getEntityBinding(
"org.hibernate.orm.test.envers.integration.secondary.ids.SecondaryEmbIdTestEntity_AUD" ).getJoins()
.get( 0 ).getTable().getName() );
}
}
| EmbIdSecondary |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java | {
"start": 2362,
"end": 3250
} | class ____ responsible for tracking the replication group with its progress and safety markers (local and global checkpoints).
*
* The global checkpoint is the highest sequence number for which all lower (or equal) sequence number have been processed
* on all shards that are currently active. Since shards count as "active" when the master starts
* them, and before this primary shard has been notified of this fact, we also include shards that have completed recovery. These shards
* have received all old operations via the recovery mechanism and are kept up to date by the various replications actions. The set of
* shards that are taken into account for the global checkpoint calculation are called the "in-sync shards".
* <p>
* The global checkpoint is maintained by the primary shard and is replicated to all the replicas (via {@link GlobalCheckpointSyncAction}).
*/
public | is |
java | google__dagger | javatests/dagger/internal/codegen/DuplicateBindingsValidationTest.java | {
"start": 43524,
"end": 44035
} | interface ____ {",
" @Provides static String one(Optional<Object> optional) { return \"one\"; }",
" @Provides static String two() { return \"two\"; }",
" @BindsOptionalOf Object optional();",
"}");
Source child =
CompilerTests.javaSource(
"test.Child",
"package test;",
"",
"import dagger.Subcomponent;",
"",
"@Subcomponent(modules = ChildModule.class)",
" | ParentModule |
java | quarkusio__quarkus | extensions/netty/runtime/src/main/java/io/quarkus/netty/runtime/graal/NettySubstitutions.java | {
"start": 8683,
"end": 8885
} | class ____ {
@Alias
Target_io_netty_handler_ssl_ResumptionController() {
}
}
@TargetClass(className = "io.netty.handler.ssl.SslContext")
final | Target_io_netty_handler_ssl_ResumptionController |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/results/internal/implicit/package-info.java | {
"start": 325,
"end": 439
} | class ____ no explicit property mappings</li>
* </ul>
*/
package org.hibernate.query.results.internal.implicit;
| with |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timeline/TestTimelineRecords.java | {
"start": 1502,
"end": 13859
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestTimelineRecords.class);
@Test
void testEntities() throws Exception {
TimelineEntities entities = new TimelineEntities();
for (int j = 0; j < 2; ++j) {
TimelineEntity entity = new TimelineEntity();
entity.setEntityId("entity id " + j);
entity.setEntityType("entity type " + j);
entity.setStartTime(System.currentTimeMillis());
for (int i = 0; i < 2; ++i) {
TimelineEvent event = new TimelineEvent();
event.setTimestamp(System.currentTimeMillis());
event.setEventType("event type " + i);
event.addEventInfo("key1", "val1");
event.addEventInfo("key2", "val2");
entity.addEvent(event);
}
entity.addRelatedEntity("test ref type 1", "test ref id 1");
entity.addRelatedEntity("test ref type 2", "test ref id 2");
entity.addPrimaryFilter("pkey1", "pval1");
entity.addPrimaryFilter("pkey2", "pval2");
entity.addOtherInfo("okey1", "oval1");
entity.addOtherInfo("okey2", "oval2");
entity.setDomainId("domain id " + j);
entities.addEntity(entity);
}
LOG.info("Entities in JSON:");
LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(entities, true));
assertEquals(2, entities.getEntities().size());
TimelineEntity entity1 = entities.getEntities().get(0);
assertEquals("entity id 0", entity1.getEntityId());
assertEquals("entity type 0", entity1.getEntityType());
assertEquals(2, entity1.getRelatedEntities().size());
assertEquals(2, entity1.getEvents().size());
assertEquals(2, entity1.getPrimaryFilters().size());
assertEquals(2, entity1.getOtherInfo().size());
assertEquals("domain id 0", entity1.getDomainId());
TimelineEntity entity2 = entities.getEntities().get(1);
assertEquals("entity id 1", entity2.getEntityId());
assertEquals("entity type 1", entity2.getEntityType());
assertEquals(2, entity2.getRelatedEntities().size());
assertEquals(2, entity2.getEvents().size());
assertEquals(2, entity2.getPrimaryFilters().size());
assertEquals(2, entity2.getOtherInfo().size());
assertEquals("domain id 1", entity2.getDomainId());
}
@Test
void testEvents() throws Exception {
TimelineEvents events = new TimelineEvents();
for (int j = 0; j < 2; ++j) {
TimelineEvents.EventsOfOneEntity partEvents =
new TimelineEvents.EventsOfOneEntity();
partEvents.setEntityId("entity id " + j);
partEvents.setEntityType("entity type " + j);
for (int i = 0; i < 2; ++i) {
TimelineEvent event = new TimelineEvent();
event.setTimestamp(System.currentTimeMillis());
event.setEventType("event type " + i);
event.addEventInfo("key1", "val1");
event.addEventInfo("key2", "val2");
partEvents.addEvent(event);
}
events.addEvent(partEvents);
}
LOG.info("Events in JSON:");
LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(events, true));
assertEquals(2, events.getAllEvents().size());
TimelineEvents.EventsOfOneEntity partEvents1 = events.getAllEvents().get(0);
assertEquals("entity id 0", partEvents1.getEntityId());
assertEquals("entity type 0", partEvents1.getEntityType());
assertEquals(2, partEvents1.getEvents().size());
TimelineEvent event11 = partEvents1.getEvents().get(0);
assertEquals("event type 0", event11.getEventType());
assertEquals(2, event11.getEventInfo().size());
TimelineEvent event12 = partEvents1.getEvents().get(1);
assertEquals("event type 1", event12.getEventType());
assertEquals(2, event12.getEventInfo().size());
TimelineEvents.EventsOfOneEntity partEvents2 = events.getAllEvents().get(1);
assertEquals("entity id 1", partEvents2.getEntityId());
assertEquals("entity type 1", partEvents2.getEntityType());
assertEquals(2, partEvents2.getEvents().size());
TimelineEvent event21 = partEvents2.getEvents().get(0);
assertEquals("event type 0", event21.getEventType());
assertEquals(2, event21.getEventInfo().size());
TimelineEvent event22 = partEvents2.getEvents().get(1);
assertEquals("event type 1", event22.getEventType());
assertEquals(2, event22.getEventInfo().size());
}
@Test
void testTimelinePutErrors() throws Exception {
TimelinePutResponse TimelinePutErrors = new TimelinePutResponse();
TimelinePutError error1 = new TimelinePutError();
error1.setEntityId("entity id 1");
error1.setEntityId("entity type 1");
error1.setErrorCode(TimelinePutError.NO_START_TIME);
TimelinePutErrors.addError(error1);
List<TimelinePutError> response = new ArrayList<TimelinePutError>();
response.add(error1);
TimelinePutError error2 = new TimelinePutError();
error2.setEntityId("entity id 2");
error2.setEntityId("entity type 2");
error2.setErrorCode(TimelinePutError.IO_EXCEPTION);
response.add(error2);
TimelinePutErrors.addErrors(response);
LOG.info("Errors in JSON:");
LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(TimelinePutErrors, true));
assertEquals(3, TimelinePutErrors.getErrors().size());
TimelinePutError e = TimelinePutErrors.getErrors().get(0);
assertEquals(error1.getEntityId(), e.getEntityId());
assertEquals(error1.getEntityType(), e.getEntityType());
assertEquals(error1.getErrorCode(), e.getErrorCode());
e = TimelinePutErrors.getErrors().get(1);
assertEquals(error1.getEntityId(), e.getEntityId());
assertEquals(error1.getEntityType(), e.getEntityType());
assertEquals(error1.getErrorCode(), e.getErrorCode());
e = TimelinePutErrors.getErrors().get(2);
assertEquals(error2.getEntityId(), e.getEntityId());
assertEquals(error2.getEntityType(), e.getEntityType());
assertEquals(error2.getErrorCode(), e.getErrorCode());
}
@Test
void testTimelineDomain() throws Exception {
TimelineDomains domains = new TimelineDomains();
TimelineDomain domain = null;
for (int i = 0; i < 2; ++i) {
domain = new TimelineDomain();
domain.setId("test id " + (i + 1));
domain.setDescription("test description " + (i + 1));
domain.setOwner("test owner " + (i + 1));
domain.setReaders("test_reader_user_" + (i + 1) +
" test_reader_group+" + (i + 1));
domain.setWriters("test_writer_user_" + (i + 1) +
" test_writer_group+" + (i + 1));
domain.setCreatedTime(0L);
domain.setModifiedTime(1L);
domains.addDomain(domain);
}
LOG.info("Domain in JSON:");
LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(domains, true));
assertEquals(2, domains.getDomains().size());
for (int i = 0; i < domains.getDomains().size(); ++i) {
domain = domains.getDomains().get(i);
assertEquals("test id " + (i + 1), domain.getId());
assertEquals("test description " + (i + 1),
domain.getDescription());
assertEquals("test owner " + (i + 1), domain.getOwner());
assertEquals("test_reader_user_" + (i + 1) +
" test_reader_group+" + (i + 1), domain.getReaders());
assertEquals("test_writer_user_" + (i + 1) +
" test_writer_group+" + (i + 1), domain.getWriters());
assertEquals(Long.valueOf(0L), domain.getCreatedTime());
assertEquals(Long.valueOf(1L), domain.getModifiedTime());
}
}
@Test
void testMapInterfaceOrTimelineRecords() throws Exception {
TimelineEntity entity = new TimelineEntity();
List<Map<String, Set<Object>>> primaryFiltersList =
new ArrayList<Map<String, Set<Object>>>();
primaryFiltersList.add(
Collections.singletonMap("pkey", Collections.singleton((Object) "pval")));
Map<String, Set<Object>> primaryFilters = new TreeMap<String, Set<Object>>();
primaryFilters.put("pkey1", Collections.singleton((Object) "pval1"));
primaryFilters.put("pkey2", Collections.singleton((Object) "pval2"));
primaryFiltersList.add(primaryFilters);
entity.setPrimaryFilters(null);
for (Map<String, Set<Object>> primaryFiltersToSet : primaryFiltersList) {
entity.setPrimaryFilters(primaryFiltersToSet);
assertPrimaryFilters(entity);
Map<String, Set<Object>> primaryFiltersToAdd =
new WeakHashMap<String, Set<Object>>();
primaryFiltersToAdd.put("pkey3", Collections.singleton((Object) "pval3"));
entity.addPrimaryFilters(primaryFiltersToAdd);
assertPrimaryFilters(entity);
}
List<Map<String, Set<String>>> relatedEntitiesList =
new ArrayList<Map<String, Set<String>>>();
relatedEntitiesList.add(
Collections.singletonMap("rkey", Collections.singleton("rval")));
Map<String, Set<String>> relatedEntities = new TreeMap<String, Set<String>>();
relatedEntities.put("rkey1", Collections.singleton("rval1"));
relatedEntities.put("rkey2", Collections.singleton("rval2"));
relatedEntitiesList.add(relatedEntities);
entity.setRelatedEntities(null);
for (Map<String, Set<String>> relatedEntitiesToSet : relatedEntitiesList) {
entity.setRelatedEntities(relatedEntitiesToSet);
assertRelatedEntities(entity);
Map<String, Set<String>> relatedEntitiesToAdd =
new WeakHashMap<String, Set<String>>();
relatedEntitiesToAdd.put("rkey3", Collections.singleton("rval3"));
entity.addRelatedEntities(relatedEntitiesToAdd);
assertRelatedEntities(entity);
}
List<Map<String, Object>> otherInfoList =
new ArrayList<Map<String, Object>>();
otherInfoList.add(Collections.singletonMap("okey", (Object) "oval"));
Map<String, Object> otherInfo = new TreeMap<String, Object>();
otherInfo.put("okey1", "oval1");
otherInfo.put("okey2", "oval2");
otherInfoList.add(otherInfo);
entity.setOtherInfo(null);
for (Map<String, Object> otherInfoToSet : otherInfoList) {
entity.setOtherInfo(otherInfoToSet);
assertOtherInfo(entity);
Map<String, Object> otherInfoToAdd = new WeakHashMap<String, Object>();
otherInfoToAdd.put("okey3", "oval3");
entity.addOtherInfo(otherInfoToAdd);
assertOtherInfo(entity);
}
TimelineEvent event = new TimelineEvent();
List<Map<String, Object>> eventInfoList =
new ArrayList<Map<String, Object>>();
eventInfoList.add(Collections.singletonMap("ekey", (Object) "eval"));
Map<String, Object> eventInfo = new TreeMap<String, Object>();
eventInfo.put("ekey1", "eval1");
eventInfo.put("ekey2", "eval2");
eventInfoList.add(eventInfo);
event.setEventInfo(null);
for (Map<String, Object> eventInfoToSet : eventInfoList) {
event.setEventInfo(eventInfoToSet);
assertEventInfo(event);
Map<String, Object> eventInfoToAdd = new WeakHashMap<String, Object>();
eventInfoToAdd.put("ekey3", "eval3");
event.addEventInfo(eventInfoToAdd);
assertEventInfo(event);
}
}
private static void assertPrimaryFilters(TimelineEntity entity) {
assertNotNull(entity.getPrimaryFilters());
assertNotNull(entity.getPrimaryFiltersJAXB());
assertTrue(entity.getPrimaryFilters() instanceof HashMap);
assertTrue(entity.getPrimaryFiltersJAXB() instanceof HashMap);
assertEquals(
entity.getPrimaryFilters(), entity.getPrimaryFiltersJAXB());
}
private static void assertRelatedEntities(TimelineEntity entity) {
assertNotNull(entity.getRelatedEntities());
assertNotNull(entity.getRelatedEntitiesJAXB());
assertTrue(entity.getRelatedEntities() instanceof HashMap);
assertTrue(entity.getRelatedEntitiesJAXB() instanceof HashMap);
assertEquals(
entity.getRelatedEntities(), entity.getRelatedEntitiesJAXB());
}
private static void assertOtherInfo(TimelineEntity entity) {
assertNotNull(entity.getOtherInfo());
assertNotNull(entity.getOtherInfoJAXB());
assertTrue(entity.getOtherInfo() instanceof HashMap);
assertTrue(entity.getOtherInfoJAXB() instanceof HashMap);
assertEquals(entity.getOtherInfo(), entity.getOtherInfoJAXB());
}
private static void assertEventInfo(TimelineEvent event) {
assertNotNull(event);
assertNotNull(event.getEventInfoJAXB());
assertTrue(event.getEventInfo() instanceof HashMap);
assertTrue(event.getEventInfoJAXB() instanceof HashMap);
assertEquals(event.getEventInfo(), event.getEventInfoJAXB());
}
}
| TestTimelineRecords |
java | google__guava | android/guava-testlib/src/com/google/common/collect/testing/google/MultimapEqualsTester.java | {
"start": 1773,
"end": 3671
} | class ____<K extends @Nullable Object, V extends @Nullable Object>
extends AbstractMultimapTester<K, V, Multimap<K, V>> {
public void testEqualsTrue() {
new EqualsTester()
.addEqualityGroup(multimap(), getSubjectGenerator().create(getSampleElements().toArray()))
.testEquals();
}
public void testEqualsFalse() {
List<Entry<K, V>> targetEntries = new ArrayList<>(getSampleElements());
targetEntries.add(mapEntry(k0(), v3()));
new EqualsTester()
.addEqualityGroup(multimap())
.addEqualityGroup(getSubjectGenerator().create(targetEntries.toArray()))
.testEquals();
}
@CollectionSize.Require(absent = ZERO)
@MapFeature.Require(ALLOWS_NULL_KEYS)
public void testEqualsMultimapWithNullKey() {
Multimap<K, V> original = multimap();
initMultimapWithNullKey();
Multimap<K, V> withNull = multimap();
new EqualsTester()
.addEqualityGroup(original)
.addEqualityGroup(
withNull, getSubjectGenerator().create((Object[]) createArrayWithNullKey()))
.testEquals();
}
@CollectionSize.Require(absent = ZERO)
@MapFeature.Require(ALLOWS_NULL_VALUES)
public void testEqualsMultimapWithNullValue() {
Multimap<K, V> original = multimap();
initMultimapWithNullValue();
Multimap<K, V> withNull = multimap();
new EqualsTester()
.addEqualityGroup(original)
.addEqualityGroup(
withNull, getSubjectGenerator().create((Object[]) createArrayWithNullValue()))
.testEquals();
}
@CollectionSize.Require(absent = ZERO)
public void testNotEqualsEmpty() {
new EqualsTester()
.addEqualityGroup(multimap())
.addEqualityGroup(getSubjectGenerator().create())
.testEquals();
}
public void testHashCodeMatchesAsMap() {
assertEquals(multimap().asMap().hashCode(), multimap().hashCode());
}
}
| MultimapEqualsTester |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/config/ConfigEnabledFalseAndEntityTest.java | {
"start": 495,
"end": 1846
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot(jar -> jar.addClass(MyEntity.class))
.withConfigurationResource("application.properties")
// This should disable Hibernate ORM even if there is an entity
.overrideConfigKey("quarkus.hibernate-orm.enabled", "false");
@Test
public void entityManagerFactory() {
// The bean is not defined during static init, so it's null.
assertThat(Arc.container().instance(EntityManagerFactory.class).get())
.isNull();
}
@Test
public void sessionFactory() {
// The bean is not defined during static init, so it's null.
assertThat(Arc.container().instance(SessionFactory.class).get())
.isNull();
}
@Test
@ActivateRequestContext
public void entityManager() {
// The bean is not defined during static init, so it's null.
assertThat(Arc.container().instance(EntityManager.class).get())
.isNull();
}
@Test
@ActivateRequestContext
public void session() {
// The bean is not defined during static init, so it's null.
assertThat(Arc.container().instance(Session.class).get())
.isNull();
}
}
| ConfigEnabledFalseAndEntityTest |
java | netty__netty | transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollDomainSocketSslEchoTest.java | {
"start": 932,
"end": 1330
} | class ____ extends SocketSslEchoTest {
@Override
protected SocketAddress newSocketAddress() {
return EpollSocketTestPermutation.newDomainSocketAddress();
}
@Override
protected List<TestsuitePermutation.BootstrapComboFactory<ServerBootstrap, Bootstrap>> newFactories() {
return EpollSocketTestPermutation.INSTANCE.domainSocket();
}
}
| EpollDomainSocketSslEchoTest |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/RemoveHeaderDefinition.java | {
"start": 1253,
"end": 2260
} | class ____ extends NoOutputDefinition<RemoveHeaderDefinition> {
@XmlAttribute(required = true)
private String name;
public RemoveHeaderDefinition() {
}
protected RemoveHeaderDefinition(RemoveHeaderDefinition source) {
super(source);
this.name = source.name;
}
public RemoveHeaderDefinition(String headerName) {
setName(headerName);
}
@Override
public RemoveHeaderDefinition copyDefinition() {
return new RemoveHeaderDefinition(this);
}
@Override
public String toString() {
return "RemoveHeader[" + getName() + "]";
}
@Override
public String getShortName() {
return "removeHeader";
}
@Override
public String getLabel() {
return "removeHeader[" + getName() + "]";
}
public String getName() {
return name;
}
/**
* Name of header to remove
*/
public void setName(String name) {
this.name = name;
}
}
| RemoveHeaderDefinition |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestDeleteWatchAction.java | {
"start": 1084,
"end": 2231
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(DELETE, "/_watcher/watch/{id}"));
}
@Override
public String getName() {
return "watcher_delete_watch";
}
@Override
protected RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) {
DeleteWatchRequest deleteWatchRequest = new DeleteWatchRequest(request.param("id"));
return channel -> client.execute(DeleteWatchAction.INSTANCE, deleteWatchRequest, new RestBuilderListener<>(channel) {
@Override
public RestResponse buildResponse(DeleteWatchResponse response, XContentBuilder builder) throws Exception {
builder.startObject()
.field("_id", response.getId())
.field("_version", response.getVersion())
.field("found", response.isFound())
.endObject();
RestStatus status = response.isFound() ? OK : NOT_FOUND;
return new RestResponse(status, builder);
}
});
}
}
| RestDeleteWatchAction |
java | bumptech__glide | library/test/src/test/java/com/bumptech/glide/load/resource/file/FileDecoderTest.java | {
"start": 422,
"end": 873
} | class ____ {
private FileDecoder decoder;
private Options options;
@Before
public void setUp() {
decoder = new FileDecoder();
options = new Options();
}
@Test
public void testReturnsGivenFileAsResource() throws IOException {
File expected = new File("testFile");
Resource<File> decoded = Preconditions.checkNotNull(decoder.decode(expected, 1, 1, options));
assertEquals(expected, decoded.get());
}
}
| FileDecoderTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/write/DynamicUpdateTests.java | {
"start": 7003,
"end": 7715
} | class ____ {
@Id
private Integer id;
@Basic
private String code;
@Basic
private String controller;
private AllJob() {
// for use by Hibernate
}
public AllJob(Integer id, String code, String controller) {
this.id = id;
this.code = code;
this.controller = controller;
}
public Integer getId() {
return id;
}
public String getCode() {
return code;
}
public void setCode(String code) {
this.code = code;
}
public String getController() {
return controller;
}
public void setController(String controller) {
this.controller = controller;
}
}
@Entity( name = "VersionedJob" )
@Table( name = "VersionedJob" )
@DynamicUpdate
public static | AllJob |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/aggfunctions/BatchApproxCountDistinctAggFunctionTest.java | {
"start": 10476,
"end": 10971
} | class ____
extends NumberApproxCountDistinctAggFunctionTestBase<Integer> {
@Override
protected AggregateFunction<Long, HllBuffer> getAggregator() {
return new TimeApproxCountDistinctAggFunction();
}
@Override
protected Integer getValue(String v) {
return Integer.valueOf(v);
}
}
/** Test for {@link TimestampApproxCountDistinctAggFunction}. */
@Nested
final | TimeApproxCountDistinctAggFunctionTest |
java | netty__netty | codec-socks/src/main/java/io/netty/handler/codec/socks/SocksAuthStatus.java | {
"start": 683,
"end": 1289
} | enum ____ {
SUCCESS((byte) 0x00),
FAILURE((byte) 0xff);
private final byte b;
SocksAuthStatus(byte b) {
this.b = b;
}
/**
* @deprecated Use {@link #valueOf(byte)} instead.
*/
@Deprecated
public static SocksAuthStatus fromByte(byte b) {
return valueOf(b);
}
public static SocksAuthStatus valueOf(byte b) {
for (SocksAuthStatus code : values()) {
if (code.b == b) {
return code;
}
}
return FAILURE;
}
public byte byteValue() {
return b;
}
}
| SocksAuthStatus |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/StandardTypeComparatorTests.java | {
"start": 4859,
"end": 5101
} | class ____ implements Comparable<ComparableType> {
private final int id;
public ComparableType(int id) {
this.id = id;
}
@Override
public int compareTo(ComparableType other) {
return this.id - other.id;
}
}
}
| ComparableType |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/api/operators/AbstractUdfStreamOperatorLifecycleTest.java | {
"start": 4561,
"end": 4884
} | interface ____.apache.flink.streaming.api.operators.StreamTaskStateInitializer], "
+ "notifyCheckpointAborted[long], "
+ "notifyCheckpointComplete[long], "
+ "open[], "
+ "prepareSnapshotPreBarrier[long], "
+ "setCurrentKey[ | org |
java | apache__camel | core/camel-core-reifier/src/main/java/org/apache/camel/reifier/rest/RestBindingReifier.java | {
"start": 1337,
"end": 4549
} | class ____ extends AbstractReifier {
private final RestBindingDefinition definition;
public RestBindingReifier(Route route, RestBindingDefinition definition) {
super(route);
this.definition = definition;
}
public RestBindingAdvice createRestBindingAdvice() throws Exception {
RestConfiguration config = CamelContextHelper.getRestConfiguration(camelContext, definition.getComponent());
RestBindingConfiguration rbc = new RestBindingConfiguration();
// these options can be overridden per rest verb
String mode = config.getBindingMode().name();
if (definition.getBindingMode() != null) {
mode = parse(RestBindingMode.class, definition.getBindingMode()).name();
}
rbc.setBindingMode(mode);
rbc.setBindingPackageScan(config.getBindingPackageScan());
boolean cors = config.isEnableCORS();
if (definition.getEnableCORS() != null) {
cors = parseBoolean(definition.getEnableCORS(), false);
}
rbc.setEnableCORS(cors);
boolean noContentResponse = config.isEnableNoContentResponse();
if (definition.getEnableNoContentResponse() != null) {
noContentResponse = parseBoolean(definition.getEnableNoContentResponse(), false);
}
rbc.setEnableNoContentResponse(noContentResponse);
boolean skip = config.isSkipBindingOnErrorCode();
if (definition.getSkipBindingOnErrorCode() != null) {
skip = parseBoolean(definition.getSkipBindingOnErrorCode(), false);
}
rbc.setSkipBindingOnErrorCode(skip);
boolean validation = config.isClientRequestValidation();
if (definition.getClientRequestValidation() != null) {
validation = parseBoolean(definition.getClientRequestValidation(), false);
}
rbc.setClientRequestValidation(validation);
validation = config.isClientResponseValidation();
if (definition.getClientResponseValidation() != null) {
validation = parseBoolean(definition.getClientResponseValidation(), false);
}
rbc.setClientResponseValidation(validation);
rbc.setConsumes(parseString(definition.getConsumes()));
rbc.setProduces(parseString(definition.getProduces()));
rbc.setCorsHeaders(config.getCorsHeaders());
rbc.setQueryDefaultValues(definition.getDefaultValues());
rbc.setQueryAllowedValues(definition.getAllowedValues());
rbc.setRequiredBody(definition.getRequiredBody() != null && definition.getRequiredBody());
rbc.setRequiredQueryParameters(definition.getRequiredQueryParameters());
rbc.setRequiredHeaders(definition.getRequiredHeaders());
rbc.setType(parseString(definition.getType()));
rbc.setTypeClass(definition.getTypeClass());
rbc.setOutType(parseString(definition.getOutType()));
rbc.setOutTypeClass(definition.getOutTypeClass());
rbc.setResponseCodes(definition.getResponseCodes());
rbc.setResponseHeaders(definition.getResponseHeaders());
// use factory to create advice
return RestBindingAdviceFactory.build(camelContext, rbc);
}
}
| RestBindingReifier |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/authorization/method/SecuredAuthorizationManagerTests.java | {
"start": 9011,
"end": 9117
} | interface ____ {
@Secured("ROLE_ADMIN")
void inheritedAnnotations();
}
public | InterfaceAnnotationsOne |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/PromqlBaseParser.java | {
"start": 61554,
"end": 64027
} | class ____ extends ParserRuleContext {
public OffsetContext offset() {
return getRuleContext(OffsetContext.class,0);
}
public AtContext at() {
return getRuleContext(AtContext.class,0);
}
@SuppressWarnings("this-escape")
public EvaluationContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_evaluation; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof PromqlBaseParserListener ) ((PromqlBaseParserListener)listener).enterEvaluation(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof PromqlBaseParserListener ) ((PromqlBaseParserListener)listener).exitEvaluation(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof PromqlBaseParserVisitor ) return ((PromqlBaseParserVisitor<? extends T>)visitor).visitEvaluation(this);
else return visitor.visitChildren(this);
}
}
public final EvaluationContext evaluation() throws RecognitionException {
EvaluationContext _localctx = new EvaluationContext(_ctx, getState());
enterRule(_localctx, 30, RULE_evaluation);
try {
setState(241);
_errHandler.sync(this);
switch (_input.LA(1)) {
case OFFSET:
enterOuterAlt(_localctx, 1);
{
setState(233);
offset();
setState(235);
_errHandler.sync(this);
switch ( getInterpreter().adaptivePredict(_input,31,_ctx) ) {
case 1:
{
setState(234);
at();
}
break;
}
}
break;
case AT:
enterOuterAlt(_localctx, 2);
{
setState(237);
at();
setState(239);
_errHandler.sync(this);
switch ( getInterpreter().adaptivePredict(_input,32,_ctx) ) {
case 1:
{
setState(238);
offset();
}
break;
}
}
break;
default:
throw new NoViableAltException(this);
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
exitRule();
}
return _localctx;
}
@SuppressWarnings("CheckReturnValue")
public static | EvaluationContext |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java | {
"start": 6065,
"end": 25860
} | class ____ {
private static final File TMP_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")),
TestNMLeveldbStateStoreService.class.getName());
YarnConfiguration conf;
NMLeveldbStateStoreService stateStore;
@BeforeEach
public void setup() throws IOException {
FileUtil.fullyDelete(TMP_DIR);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
conf.set(YarnConfiguration.NM_RECOVERY_DIR, TMP_DIR.toString());
restartStateStore();
}
@AfterEach
public void cleanup() throws IOException {
if (stateStore != null) {
stateStore.close();
}
FileUtil.fullyDelete(TMP_DIR);
}
private List<RecoveredContainerState> loadContainersState(
RecoveryIterator<RecoveredContainerState> it) throws IOException {
List<RecoveredContainerState> containers =
new ArrayList<RecoveredContainerState>();
while (it.hasNext()) {
RecoveredContainerState rcs = it.next();
containers.add(rcs);
}
return containers;
}
private List<ContainerManagerApplicationProto> loadApplicationProtos(
RecoveryIterator<ContainerManagerApplicationProto> it)
throws IOException {
List<ContainerManagerApplicationProto> applicationProtos =
new ArrayList<ContainerManagerApplicationProto>();
while (it.hasNext()) {
applicationProtos.add(it.next());
}
return applicationProtos;
}
private List<DeletionServiceDeleteTaskProto> loadDeletionTaskProtos(
RecoveryIterator<DeletionServiceDeleteTaskProto> it) throws IOException {
List<DeletionServiceDeleteTaskProto> deleteTaskProtos =
new ArrayList<DeletionServiceDeleteTaskProto>();
while (it.hasNext()) {
deleteTaskProtos.add(it.next());
}
return deleteTaskProtos;
}
private Map<String, RecoveredUserResources> loadUserResources(
RecoveryIterator<Map.Entry<String, RecoveredUserResources>> it)
throws IOException {
Map<String, RecoveredUserResources> userResources =
new HashMap<String, RecoveredUserResources>();
while (it.hasNext()) {
Map.Entry<String, RecoveredUserResources> entry = it.next();
userResources.put(entry.getKey(), entry.getValue());
}
return userResources;
}
private Map<ApplicationAttemptId, MasterKey> loadNMTokens(
RecoveryIterator<Map.Entry<ApplicationAttemptId, MasterKey>> it)
throws IOException {
Map<ApplicationAttemptId, MasterKey> nmTokens =
new HashMap<ApplicationAttemptId, MasterKey>();
while (it.hasNext()) {
Map.Entry<ApplicationAttemptId, MasterKey> entry = it.next();
nmTokens.put(entry.getKey(), entry.getValue());
}
return nmTokens;
}
private Map<ContainerId, Long> loadContainerTokens(
RecoveryIterator<Map.Entry<ContainerId, Long>> it) throws IOException {
Map<ContainerId, Long> containerTokens =
new HashMap<ContainerId, Long>();
while (it.hasNext()) {
Map.Entry<ContainerId, Long> entry = it.next();
containerTokens.put(entry.getKey(), entry.getValue());
}
return containerTokens;
}
private List<LocalizedResourceProto> loadCompletedResources(
RecoveryIterator<LocalizedResourceProto> it) throws IOException {
List<LocalizedResourceProto> completedResources =
new ArrayList<LocalizedResourceProto>();
while (it != null && it.hasNext()) {
completedResources.add(it.next());
}
return completedResources;
}
private Map<LocalResourceProto, Path> loadStartedResources(
RecoveryIterator <Map.Entry<LocalResourceProto, Path>> it)
throws IOException {
Map<LocalResourceProto, Path> startedResources =
new HashMap<LocalResourceProto, Path>();
while (it != null &&it.hasNext()) {
Map.Entry<LocalResourceProto, Path> entry = it.next();
startedResources.put(entry.getKey(), entry.getValue());
}
return startedResources;
}
private void restartStateStore() throws IOException {
// need to close so leveldb releases database lock
if (stateStore != null) {
stateStore.close();
}
stateStore = new NMLeveldbStateStoreService();
stateStore.init(conf);
stateStore.start();
}
private void verifyEmptyState() throws IOException {
RecoveredLocalizationState state = stateStore.loadLocalizationState();
assertNotNull(state);
LocalResourceTrackerState pubts = state.getPublicTrackerState();
assertNotNull(pubts);
assertTrue(loadCompletedResources(pubts.getCompletedResourcesIterator())
.isEmpty());
assertTrue(loadStartedResources(pubts.getStartedResourcesIterator())
.isEmpty());
assertTrue(loadUserResources(state.getIterator()).isEmpty());
}
@Test
public void testIsNewlyCreated() throws IOException {
assertTrue(stateStore.isNewlyCreated());
restartStateStore();
assertFalse(stateStore.isNewlyCreated());
}
@Test
public void testEmptyState() throws IOException {
assertTrue(stateStore.canRecover());
verifyEmptyState();
}
@Test
public void testCheckVersion() throws IOException {
// default version
Version defaultVersion = stateStore.getCurrentVersion();
assertEquals(defaultVersion, stateStore.loadVersion());
// compatible version
Version compatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion(),
defaultVersion.getMinorVersion() + 2);
stateStore.storeVersion(compatibleVersion);
assertEquals(compatibleVersion, stateStore.loadVersion());
restartStateStore();
// overwrite the compatible version
assertEquals(defaultVersion, stateStore.loadVersion());
// incompatible version
Version incompatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion() + 1,
defaultVersion.getMinorVersion());
stateStore.storeVersion(incompatibleVersion);
try {
restartStateStore();
fail("Incompatible version, should expect fail here.");
} catch (ServiceStateException e) {
assertTrue(e.getMessage().contains("Incompatible version for NM state:"),
"Exception message mismatch");
}
}
@Test
public void testApplicationStorage() throws IOException {
// test empty when no state
RecoveredApplicationsState state = stateStore.loadApplicationsState();
List<ContainerManagerApplicationProto> apps =
loadApplicationProtos(state.getIterator());
assertTrue(apps.isEmpty());
// store an application and verify recovered
final ApplicationId appId1 = ApplicationId.newInstance(1234, 1);
ContainerManagerApplicationProto.Builder builder =
ContainerManagerApplicationProto.newBuilder();
builder.setId(((ApplicationIdPBImpl) appId1).getProto());
builder.setUser("user1");
ContainerManagerApplicationProto appProto1 = builder.build();
stateStore.storeApplication(appId1, appProto1);
restartStateStore();
state = stateStore.loadApplicationsState();
apps = loadApplicationProtos(state.getIterator());
assertEquals(1, apps.size());
assertEquals(appProto1, apps.get(0));
// add a new app
final ApplicationId appId2 = ApplicationId.newInstance(1234, 2);
builder = ContainerManagerApplicationProto.newBuilder();
builder.setId(((ApplicationIdPBImpl) appId2).getProto());
builder.setUser("user2");
ContainerManagerApplicationProto appProto2 = builder.build();
stateStore.storeApplication(appId2, appProto2);
restartStateStore();
state = stateStore.loadApplicationsState();
apps = loadApplicationProtos(state.getIterator());
assertEquals(2, apps.size());
assertTrue(apps.contains(appProto1));
assertTrue(apps.contains(appProto2));
// test removing an application
stateStore.removeApplication(appId2);
restartStateStore();
state = stateStore.loadApplicationsState();
apps = loadApplicationProtos(state.getIterator());
assertEquals(1, apps.size());
assertEquals(appProto1, apps.get(0));
}
@Test
public void testContainerStorageWhenContainerIsRequested()
throws IOException {
final ContainerStateConstructParams containerParams =
storeContainerInStateStore();
restartStateStore();
List<RecoveredContainerState> recoveredContainers =
loadContainersState(stateStore.getContainerStateIterator());
assertEquals(1, recoveredContainers.size());
final RecoveredContainerState rcs = recoveredContainers.get(0);
assertEquals(0, rcs.getVersion());
assertEquals(containerParams.getContainerStartTime().longValue(),
rcs.getStartTime());
assertEquals(RecoveredContainerStatus.REQUESTED, rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
assertEquals(false, rcs.getKilled());
assertEquals(containerParams.getContainerRequest(), rcs.getStartRequest());
assertTrue(rcs.getDiagnostics().isEmpty());
assertEquals(containerParams.getContainerResource(), rcs.getCapability());
}
@Test
public void testContainerStorageWhenContainerIsQueued()
throws IOException {
ContainerStateConstructParams containerParams =
storeContainerInStateStore();
ContainerId containerId = containerParams.getContainerId();
StartContainerRequest containerReq = containerParams.getContainerRequest();
Resource containerResource = containerParams.getContainerResource();
ApplicationAttemptId appAttemptId = containerParams.getAppAttemptId();
storeNewContainerRecordWithoutStartContainerRequest(appAttemptId);
stateStore.storeContainerQueued(containerId);
restartStateStore();
List<RecoveredContainerState> recoveredContainers =
loadContainersState(stateStore.getContainerStateIterator());
assertEquals(1, recoveredContainers.size());
RecoveredContainerState rcs = recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.QUEUED, rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
assertEquals(false, rcs.getKilled());
assertEquals(containerReq, rcs.getStartRequest());
assertTrue(rcs.getDiagnostics().isEmpty());
assertEquals(containerResource, rcs.getCapability());
}
@Test
public void testContainerStorageWhenContainerIsLaunched()
throws IOException {
ContainerStateConstructParams containerParams =
storeContainerInStateStore();
ContainerId containerId = containerParams.getContainerId();
StartContainerRequest containerReq = containerParams.getContainerRequest();
Resource containerResource = containerParams.getContainerResource();
ApplicationAttemptId appAttemptId = containerParams.getAppAttemptId();
storeNewContainerRecordWithoutStartContainerRequest(appAttemptId);
stateStore.storeContainerQueued(containerId);
StringBuilder diags = launchContainerWithDiagnostics(containerId);
restartStateStore();
List<RecoveredContainerState> recoveredContainers =
loadContainersState(stateStore.getContainerStateIterator());
assertEquals(1, recoveredContainers.size());
RecoveredContainerState rcs = recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.LAUNCHED, rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
assertEquals(false, rcs.getKilled());
assertEquals(containerReq, rcs.getStartRequest());
assertEquals(diags.toString(), rcs.getDiagnostics());
assertEquals(containerResource, rcs.getCapability());
}
@Test
public void testContainerStorageWhenContainerIsPaused()
throws IOException {
ContainerStateConstructParams containerParams =
storeContainerInStateStore();
ContainerId containerId = containerParams.getContainerId();
StartContainerRequest containerReq = containerParams.getContainerRequest();
ApplicationAttemptId appAttemptId = containerParams.getAppAttemptId();
storeNewContainerRecordWithoutStartContainerRequest(appAttemptId);
stateStore.storeContainerQueued(containerId);
stateStore.storeContainerPaused(containerId);
restartStateStore();
List<RecoveredContainerState> recoveredContainers =
loadContainersState(stateStore.getContainerStateIterator());
assertEquals(1, recoveredContainers.size());
RecoveredContainerState rcs = recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.PAUSED, rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
assertEquals(false, rcs.getKilled());
assertEquals(containerReq, rcs.getStartRequest());
// Resume the container
stateStore.removeContainerPaused(containerId);
restartStateStore();
recoveredContainers =
loadContainersState(stateStore.getContainerStateIterator());
assertEquals(1, recoveredContainers.size());
}
@Test
public void testContainerStorageWhenContainerSizeIncreased()
throws IOException {
ContainerStateConstructParams containerParams =
storeContainerInStateStore();
ContainerId containerId = containerParams.getContainerId();
ApplicationAttemptId appAttemptId = containerParams.getAppAttemptId();
storeNewContainerRecordWithoutStartContainerRequest(appAttemptId);
stateStore.storeContainerQueued(containerId);
launchContainerWithDiagnostics(containerId);
increaseContainerSize(containerId);
restartStateStore();
List<RecoveredContainerState> recoveredContainers =
loadContainersState(stateStore.getContainerStateIterator());
assertEquals(1, recoveredContainers.size());
RecoveredContainerState rcs = recoveredContainers.get(0);
assertEquals(0, rcs.getVersion());
assertEquals(RecoveredContainerStatus.LAUNCHED, rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
assertEquals(false, rcs.getKilled());
assertEquals(Resource.newInstance(2468, 4), rcs.getCapability());
}
@Test
public void testContainerStorageWhenContainerMarkedAsKilled()
throws IOException {
ContainerStateConstructParams containerParams =
storeContainerInStateStore();
ContainerId containerId = containerParams.getContainerId();
ApplicationAttemptId appAttemptId = containerParams.getAppAttemptId();
storeNewContainerRecordWithoutStartContainerRequest(appAttemptId);
stateStore.storeContainerQueued(containerId);
StringBuilder diags = launchContainerWithDiagnostics(containerId);
ContainerTokenIdentifier updateTokenIdentifier =
increaseContainerSize(containerId);
markContainerAsKilled(containerId, diags);
restartStateStore();
List<RecoveredContainerState> recoveredContainers =
loadContainersState(stateStore.getContainerStateIterator());
assertEquals(1, recoveredContainers.size());
RecoveredContainerState rcs = recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.LAUNCHED, rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
assertTrue(rcs.getKilled());
ContainerTokenIdentifier tokenReadFromRequest = BuilderUtils
.newContainerTokenIdentifier(rcs.getStartRequest()
.getContainerToken());
assertEquals(updateTokenIdentifier, tokenReadFromRequest);
assertEquals(diags.toString(), rcs.getDiagnostics());
}
@Test
public void testContainerStorageWhenContainerCompleted()
throws IOException {
ContainerStateConstructParams containerParams =
storeContainerInStateStore();
ContainerId containerId = containerParams.getContainerId();
ApplicationAttemptId appAttemptId = containerParams.getAppAttemptId();
storeNewContainerRecordWithoutStartContainerRequest(appAttemptId);
stateStore.storeContainerQueued(containerId);
StringBuilder diags = launchContainerWithDiagnostics(containerId);
markContainerAsKilled(containerId, diags);
// add yet more diags, mark container completed
diags.append("some final diags");
stateStore.storeContainerDiagnostics(containerId, diags);
stateStore.storeContainerCompleted(containerId, 21);
restartStateStore();
List<RecoveredContainerState> recoveredContainers =
loadContainersState(stateStore.getContainerStateIterator());
assertEquals(1, recoveredContainers.size());
RecoveredContainerState rcs = recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.COMPLETED, rcs.getStatus());
assertEquals(21, rcs.getExitCode());
assertTrue(rcs.getKilled());
assertEquals(diags.toString(), rcs.getDiagnostics());
}
@Test
public void testContainerStorage() throws IOException {
ContainerStateConstructParams containerParams =
storeContainerInStateStore();
ContainerId containerId = containerParams.getContainerId();
// remaining retry attempts, work dir and log dir are stored
stateStore.storeContainerRemainingRetryAttempts(containerId, 6);
stateStore.storeContainerWorkDir(containerId, "/test/workdir");
stateStore.storeContainerLogDir(containerId, "/test/logdir");
restartStateStore();
List<RecoveredContainerState> recoveredContainers =
loadContainersState(stateStore.getContainerStateIterator());
assertEquals(1, recoveredContainers.size());
RecoveredContainerState rcs = recoveredContainers.get(0);
assertEquals(6, rcs.getRemainingRetryAttempts());
assertEquals("/test/workdir", rcs.getWorkDir());
assertEquals("/test/logdir", rcs.getLogDir());
validateRetryAttempts(containerId);
}
@Test
public void testContainerStorageWhenContainerRemoved()
throws IOException {
ContainerStateConstructParams containerParams =
storeContainerInStateStore();
ContainerId containerId = containerParams.getContainerId();
// remove the container and verify not recovered
stateStore.removeContainer(containerId);
restartStateStore();
List<RecoveredContainerState> recoveredContainers =
loadContainersState(stateStore.getContainerStateIterator());
assertTrue(recoveredContainers.isEmpty());
// recover again to check remove clears all containers
restartStateStore();
NMStateStoreService nmStoreSpy = spy(stateStore);
loadContainersState(nmStoreSpy.getContainerStateIterator());
verify(nmStoreSpy, times(0)).removeContainer(any(ContainerId.class));
}
private ContainerStateConstructParams storeContainerInStateStore()
throws IOException {
// test empty when no state
List<RecoveredContainerState> recoveredContainers =
loadContainersState(stateStore.getContainerStateIterator());
assertTrue(recoveredContainers.isEmpty());
// create a container request
ApplicationId appId = ApplicationId.newInstance(1234, 3);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 4);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 5);
Resource containerResource = Resource.newInstance(1024, 2);
StartContainerRequest containerReq =
createContainerRequest(containerId, containerResource);
long anyContainerStartTime = 1573155078494L;
stateStore.storeContainer(containerId, 0, anyContainerStartTime,
containerReq);
// verify the container version key is not stored for new containers
DB db = stateStore.getDB();
assertNull(db.get(bytes(stateStore.getContainerVersionKey(containerId.toString()))),
"version key present for new container");
return new ContainerStateConstructParams()
.setContainerRequest(containerReq)
.setContainerResource(containerResource)
.setContainerStartTime(anyContainerStartTime)
.setAppAttemptId(appAttemptId)
.setContainerId(containerId);
}
private static | TestNMLeveldbStateStoreService |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryParenthesesTest.java | {
"start": 3844,
"end": 4249
} | class ____ {
Predicate<Test> foo(Predicate<Test> a) {
// BUG: Diagnostic contains:
return foo((this::equals));
}
}
""")
.doTest();
}
@Test
public void lambdaLambda() {
helper
.addSourceLines(
"Test.java",
"""
import java.util.function.Function;
| Test |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/engine/FlushListeners.java | {
"start": 1011,
"end": 5902
} | class ____ implements Closeable {
private final Logger logger;
private final ThreadContext threadContext;
private volatile Tuple<Long, Translog.Location> lastCommit;
/**
* Is this closed? If true then we won't add more listeners and have flushed all pending listeners.
*/
private volatile boolean closed = false;
private volatile List<Tuple<Translog.Location, ActionListener<Long>>> locationCommitListeners = null;
public FlushListeners(final Logger logger, final ThreadContext threadContext) {
this.logger = logger;
this.threadContext = threadContext;
}
public void addOrNotify(Translog.Location location, ActionListener<Long> listener) {
requireNonNull(listener, "listener cannot be null");
requireNonNull(location, "location cannot be null");
Tuple<Long, Translog.Location> lastCommitBeforeSynchronized = lastCommit;
if (lastCommitBeforeSynchronized != null && lastCommitBeforeSynchronized.v2().compareTo(location) >= 0) {
// Location already visible, just call the listener
listener.onResponse(lastCommitBeforeSynchronized.v1());
return;
}
synchronized (this) {
if (closed) {
throw new IllegalStateException("can't wait for flush on a closed index");
}
Tuple<Long, Translog.Location> lastCommitAfterSynchronized = lastCommit;
if (lastCommitAfterSynchronized != null && lastCommitAfterSynchronized.v2().compareTo(location) >= 0) {
// Location already visible, just call the listener
listener.onResponse(lastCommitAfterSynchronized.v1());
return;
}
List<Tuple<Translog.Location, ActionListener<Long>>> listeners = locationCommitListeners;
ActionListener<Long> contextPreservingListener = ContextPreservingActionListener.wrapPreservingContext(listener, threadContext);
if (listeners == null) {
listeners = new ArrayList<>();
}
// We have a free slot so register the listener
listeners.add(new Tuple<>(location, contextPreservingListener));
locationCommitListeners = listeners;
}
}
public void afterFlush(final long generation, final Translog.Location lastCommitLocation) {
this.lastCommit = new Tuple<>(generation, lastCommitLocation);
List<Tuple<Translog.Location, ActionListener<Long>>> listenersToFire = null;
List<Tuple<Translog.Location, ActionListener<Long>>> listenersToReregister = null;
synchronized (this) {
// No listeners to check so just bail early
if (locationCommitListeners == null) {
return;
}
for (Tuple<Translog.Location, ActionListener<Long>> tuple : locationCommitListeners) {
Translog.Location location = tuple.v1();
if (location.compareTo(lastCommitLocation) <= 0) {
if (listenersToFire == null) {
listenersToFire = new ArrayList<>();
}
listenersToFire.add(tuple);
} else {
if (listenersToReregister == null) {
listenersToReregister = new ArrayList<>();
}
listenersToReregister.add(tuple);
}
}
locationCommitListeners = listenersToReregister;
}
fireListeners(generation, listenersToFire);
}
private void fireListeners(final long generation, final List<Tuple<Translog.Location, ActionListener<Long>>> listenersToFire) {
if (listenersToFire != null) {
for (final Tuple<Translog.Location, ActionListener<Long>> listener : listenersToFire) {
try {
listener.v2().onResponse(generation);
} catch (final Exception e) {
logger.warn("error firing location refresh listener", e);
}
}
}
}
@Override
public void close() {
synchronized (this) {
if (closed == false) {
closed = true;
if (locationCommitListeners != null) {
for (final Tuple<Translog.Location, ActionListener<Long>> listener : locationCommitListeners) {
try {
listener.v2().onFailure(new AlreadyClosedException("shard is closed"));
} catch (final Exception e) {
logger.warn("error firing checkpoint refresh listener", e);
assert false;
}
}
locationCommitListeners = null;
}
}
}
}
}
| FlushListeners |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/queue/DequeMoveDestination.java | {
"start": 734,
"end": 1239
} | interface ____ extends DequeMoveArgs {
/**
* Define to add removed element as the head element of destination queue.
*
* @param name - name of destination queue
* @return arguments object
*/
DequeMoveArgs addFirstTo(String name);
/**
* Define to add removed element as the head element of specified queue.
*
* @param name - name of destination queue
* @return arguments object
*/
DequeMoveArgs addLastTo(String name);
}
| DequeMoveDestination |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UserRoleMapper.java | {
"start": 6617,
"end": 8724
} | class ____ {
private static final Logger LOGGER = LogManager.getLogger(DistinguishedNameNormalizer.class);
private static final SoftReference<String> NULL_REF = new SoftReference<>(null);
private final Map<String, SoftReference<String>> cache = new HashMap<>();
/**
* Parse the input string to a DN and returns its normalized form.
* @param str String that may represent a DN
* @return The normalized DN form of the input string or {@code null} if input string is not a DN
*/
public String normalize(String str) {
final SoftReference<String> normalizedDnRef = cache.get(str);
if (normalizedDnRef == NULL_REF) {
return null;
}
if (normalizedDnRef != null) {
final String normalizedDn = normalizedDnRef.get();
if (normalizedDn != null) {
return normalizedDn;
}
}
final String normalizedDn = doNormalize(str);
if (normalizedDn == null) {
cache.put(str, NULL_REF);
} else {
cache.put(str, new SoftReference<>(normalizedDn));
}
return normalizedDn;
}
String doNormalize(String str) {
final DN dn;
try {
dn = new DN(str);
} catch (LDAPException | LDAPSDKUsageException e) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(() -> "failed to parse [" + str + "] as a DN", e);
}
return null;
}
return dn.toNormalizedString();
}
}
/**
* A specialised predicate for fields that might be a DistinguishedName (e.g "dn" or "groups").
*
* The X500 specs define how to compare DistinguishedNames (but we mostly rely on {@link DN#equals(Object)}),
* which means "CN=me,DC=example,DC=com" should be equal to "cn=me, dc=Example, dc=COM" (and other variations).
* The {@link FieldExpression} | DistinguishedNameNormalizer |
java | apache__camel | components/camel-jms/src/main/java/org/apache/camel/component/jms/JmsMessage.java | {
"start": 1544,
"end": 9252
} | class ____ extends DefaultMessage {
private static final Logger LOG = LoggerFactory.getLogger(JmsMessage.class);
private Message jmsMessage;
private Session jmsSession;
private JmsBinding binding;
public JmsMessage(Exchange exchange, Message jmsMessage, Session jmsSession, JmsBinding binding) {
super(exchange);
setJmsMessage(jmsMessage);
setJmsSession(jmsSession);
setBinding(binding);
}
public void init(Exchange exchange, Message jmsMessage, Session jmsSession, JmsBinding binding) {
setExchange(exchange);
setJmsMessage(jmsMessage);
setJmsSession(jmsSession);
setBinding(binding);
// need to populate initial headers when we use pooled exchanges
populateInitialHeaders(getHeaders());
}
@Override
public void reset() {
super.reset();
setExchange(null);
jmsMessage = null;
jmsSession = null;
binding = null;
}
@Override
public String toString() {
// do not print jmsMessage as there could be sensitive details
if (jmsMessage != null) {
try {
return "JmsMessage[JmsMessageID: " + jmsMessage.getJMSMessageID() + "]";
} catch (Exception e) {
// ignore
}
}
return "JmsMessage@" + ObjectHelper.getIdentityHashCode(this);
}
@Override
public void copyFrom(org.apache.camel.Message that) {
if (that == this) {
// the same instance so do not need to copy
return;
}
// must initialize headers before we set the JmsMessage to avoid Camel
// populating it before we do the copy
getHeaders().clear();
boolean copyMessageId = true;
if (that instanceof JmsMessage thatMessage) {
this.jmsMessage = thatMessage.jmsMessage;
if (this.jmsMessage != null) {
// for performance lets not copy the messageID if we are a JMS message
copyMessageId = false;
}
}
if (copyMessageId) {
setMessageId(that.getMessageId());
}
// cover over exchange if none has been assigned
if (getExchange() == null) {
setExchange(that.getExchange());
}
// copy body and fault flag
copyBody(that, this);
// we have already cleared the headers
if (that.hasHeaders()) {
getHeaders().putAll(that.getHeaders());
}
}
public JmsBinding getBinding() {
if (binding == null) {
binding = ExchangeHelper.getBinding(getExchange(), JmsBinding.class);
}
return binding;
}
public void setBinding(JmsBinding binding) {
this.binding = binding;
}
/**
* Returns the underlying JMS message
*/
public Message getJmsMessage() {
return jmsMessage;
}
public void setJmsMessage(Message jmsMessage) {
if (jmsMessage != null) {
try {
setMessageId(jmsMessage.getJMSMessageID());
} catch (JMSException e) {
LOG.warn("Unable to retrieve JMSMessageID from JMS Message", e);
}
}
this.jmsMessage = jmsMessage;
setPayloadForTrait(MessageTrait.REDELIVERY, JmsMessageHelper.evalRedeliveryMessageTrait(jmsMessage));
}
/**
* Returns the underlying JMS session.
* <p/>
* This may be <tt>null</tt> if using {@link org.apache.camel.component.jms.JmsPollingConsumer}, or the broker
* component from Apache ActiveMQ 5.11.x or older.
*/
public Session getJmsSession() {
return jmsSession;
}
public void setJmsSession(Session jmsSession) {
this.jmsSession = jmsSession;
}
@Override
public void setBody(Object body) {
super.setBody(body);
if (body == null) {
// preserver headers even if we set body to null
ensureInitialHeaders();
// remove underlying jmsMessage since we mutated body to null
jmsMessage = null;
}
}
@Override
public Object getHeader(String name) {
ensureInitialHeaders();
return super.getHeader(name);
}
@Override
public Map<String, Object> getHeaders() {
ensureInitialHeaders();
return super.getHeaders();
}
@Override
public Object removeHeader(String name) {
ensureInitialHeaders();
return super.removeHeader(name);
}
@Override
public void setHeaders(Map<String, Object> headers) {
ensureInitialHeaders();
super.setHeaders(headers);
}
@Override
public void setHeader(String name, Object value) {
ensureInitialHeaders();
super.setHeader(name, value);
}
@Override
public JmsMessage newInstance() {
JmsMessage answer = new JmsMessage(null, null, null, binding);
answer.setCamelContext(getCamelContext());
return answer;
}
/**
* Returns true if a new JMS message instance should be created to send to the next component
*/
public boolean shouldCreateNewMessage() {
return super.hasPopulatedHeaders();
}
/**
* Ensure that the headers have been populated from the underlying JMS message before we start mutating the headers
*/
protected void ensureInitialHeaders() {
if (jmsMessage != null && !hasPopulatedHeaders()) {
// we have not populated headers so force this by creating
// new headers and set it on super
super.setHeaders(createHeaders());
}
}
@Override
protected Object createBody() {
if (jmsMessage != null) {
return getBinding().extractBodyFromJms(getExchange(), jmsMessage);
}
return null;
}
@Override
protected void populateInitialHeaders(Map<String, Object> map) {
if (jmsMessage != null && map != null) {
map.putAll(getBinding().extractHeadersFromJms(jmsMessage, getExchange()));
try {
map.put(Exchange.MESSAGE_TIMESTAMP, jmsMessage.getJMSTimestamp());
} catch (JMSException e) {
// ignore
}
}
}
@Override
protected String createMessageId() {
if (jmsMessage == null) {
LOG.trace("No jakarta.jms.Message set so generating a new message id");
return super.createMessageId();
}
try {
String id = getDestinationAsString(jmsMessage.getJMSDestination());
if (id != null) {
id += jmsMessage.getJMSMessageID();
} else {
id = jmsMessage.getJMSMessageID();
}
return getSanitizedString(id);
} catch (JMSException e) {
throw new RuntimeExchangeException("Unable to retrieve JMSMessageID from JMS Message", getExchange(), e);
}
}
private String getDestinationAsString(Destination destination) throws JMSException {
String result = null;
if (destination == null) {
result = "null destination!" + File.separator;
} else if (destination instanceof Topic topic) {
result = "topic" + File.separator + topic.getTopicName() + File.separator;
} else if (destination instanceof Queue queue) {
result = "queue" + File.separator + queue.getQueueName() + File.separator;
}
return result;
}
private String getSanitizedString(Object value) {
return value != null ? value.toString().replaceAll("[^a-zA-Z0-9\\.\\_\\-]", "_") : "";
}
}
| JmsMessage |
java | apache__camel | components/camel-atmosphere-websocket/src/test/java/org/apache/camel/component/atmosphere/websocket/WebsocketRoute3Test.java | {
"start": 1417,
"end": 5645
} | class ____ extends WebsocketCamelRouterTestSupport {
private static final String RESPONSE_GREETING = "Hola ";
private static final byte[] RESPONSE_GREETING_BYTES = { 0x48, 0x6f, 0x6c, 0x61, 0x20 };
@Test
void testWebsocketSingleClientForReader() throws Exception {
WebsocketTestClient wsclient = new WebsocketTestClient("ws://localhost:" + PORT + "/hola3");
wsclient.connect();
wsclient.sendTextMessage("Cerveza");
assertTrue(wsclient.await(10));
List<String> received = wsclient.getReceived(String.class);
assertEquals(1, received.size());
assertEquals("Hola Cerveza", received.get(0));
wsclient.close();
}
@Test
void testWebsocketSingleClientForInputStream() throws Exception {
WebsocketTestClient wsclient = new WebsocketTestClient("ws://localhost:" + PORT + "/hola3");
wsclient.connect();
wsclient.sendBytesMessage("Cerveza".getBytes("UTF-8"));
assertTrue(wsclient.await(10));
List<String> received = wsclient.getReceived(String.class);
assertEquals(1, received.size());
assertEquals("Hola Cerveza", received.get(0));
wsclient.close();
}
// START SNIPPET: payload
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// route for a single stream line
from("atmosphere-websocket:///hola3?useStreaming=true").to("log:info").process(new Processor() {
public void process(final Exchange exchange) {
createResponse(exchange, true);
}
}).to("atmosphere-websocket:///hola3");
}
};
}
private static void createResponse(Exchange exchange, boolean streaming) {
Object msg = exchange.getIn().getBody();
if (streaming) {
assertTrue(msg instanceof Reader || msg instanceof InputStream, "Expects Reader or InputStream");
} else {
assertTrue(msg instanceof String || msg instanceof byte[], "Expects String or byte[]");
}
if (msg instanceof String) {
exchange.getIn().setBody(RESPONSE_GREETING + msg);
} else if (msg instanceof byte[]) {
exchange.getIn().setBody(createByteResponse((byte[]) msg));
} else if (msg instanceof Reader) {
exchange.getIn().setBody(new StringReader(RESPONSE_GREETING + readAll((Reader) msg)));
} else if (msg instanceof InputStream) {
exchange.getIn().setBody(createByteResponse(readAll((InputStream) msg)));
}
}
private static void checkEventsResendingDisabled(Exchange exchange) {
Object eventType = exchange.getIn().getHeader(WebsocketConstants.EVENT_TYPE);
if (eventType instanceof Integer) {
if (eventType.equals(WebsocketConstants.ONOPEN_EVENT_TYPE)
|| eventType.equals(WebsocketConstants.ONCLOSE_EVENT_TYPE)
|| eventType.equals(WebsocketConstants.ONERROR_EVENT_TYPE)) {
exchange.getIn().setBody("Error. This place should never be reached.");
}
}
}
private static byte[] createByteResponse(byte[] req) {
byte[] resp = new byte[req.length + RESPONSE_GREETING_BYTES.length];
System.arraycopy(RESPONSE_GREETING_BYTES, 0, resp, 0, RESPONSE_GREETING_BYTES.length);
System.arraycopy(req, 0, resp, RESPONSE_GREETING_BYTES.length, req.length);
return resp;
}
private static String readAll(Reader reader) {
try {
return IOHelper.toString(reader);
} catch (IOException e) {
}
return "";
}
private static byte[] readAll(InputStream is) {
ByteArrayOutputStream byteBuf = new ByteArrayOutputStream();
try {
is.transferTo(byteBuf);
} catch (IOException e) {
// ignore
} finally {
try {
is.close();
} catch (IOException e) {
// ignore
}
}
return byteBuf.toByteArray();
}
// END SNIPPET: payload
}
| WebsocketRoute3Test |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/JSONObjectTest_getObj_2.java | {
"start": 838,
"end": 973
} | class ____ {
}
public static <T> Type getType() {
return new TypeReference<T[]>() {}.getType();
}
}
| Model |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/threadpool/ScheduledCancellableAdapter.java | {
"start": 691,
"end": 1460
} | class ____ implements Scheduler.ScheduledCancellable {
private final ScheduledFuture<?> scheduledFuture;
ScheduledCancellableAdapter(ScheduledFuture<?> scheduledFuture) {
assert scheduledFuture != null;
this.scheduledFuture = scheduledFuture;
}
@Override
public long getDelay(TimeUnit unit) {
return scheduledFuture.getDelay(unit);
}
@Override
public int compareTo(Delayed other) {
// unwrap other by calling on it.
return -other.compareTo(scheduledFuture);
}
@Override
public boolean cancel() {
return FutureUtils.cancel(scheduledFuture);
}
@Override
public boolean isCancelled() {
return scheduledFuture.isCancelled();
}
}
| ScheduledCancellableAdapter |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java | {
"start": 24315,
"end": 30934
} | class ____ implements AbortableResult {
/**
* Had the stream already been closed/aborted?
*/
private final boolean alreadyClosed;
/**
* Was any exception raised during non-essential
* cleanup actions (i.e. MPU abort)?
*/
private final IOException anyCleanupException;
/**
* Constructor.
* @param alreadyClosed Had the stream already been closed/aborted?
* @param anyCleanupException Was any exception raised during cleanup?
*/
private AbortableResultImpl(final boolean alreadyClosed,
final IOException anyCleanupException) {
this.alreadyClosed = alreadyClosed;
this.anyCleanupException = anyCleanupException;
}
@Override
public boolean alreadyClosed() {
return alreadyClosed;
}
@Override
public IOException anyCleanupException() {
return anyCleanupException;
}
@Override
public String toString() {
return new StringJoiner(", ",
AbortableResultImpl.class.getSimpleName() + "[", "]")
.add("alreadyClosed=" + alreadyClosed)
.add("anyCleanupException=" + anyCleanupException)
.toString();
}
}
/**
* Upload the current block as a single PUT request; if the buffer is empty a
* 0-byte PUT will be invoked, as it is needed to create an entry at the far
* end.
* @return number of bytes uploaded.
* @throws IOException any problem.
*/
@Retries.RetryTranslated
private long putObject() throws IOException {
LOG.debug("Executing regular upload for {}", writeOperationHelper);
final S3ADataBlocks.DataBlock block = getActiveBlock();
final long size = block.dataSize();
final S3ADataBlocks.BlockUploadData uploadData = block.startUpload();
final PutObjectRequest putObjectRequest =
writeOperationHelper.createPutObjectRequest(
key,
uploadData.getSize(),
builder.putOptions);
clearActiveBlock();
BlockUploadProgress progressCallback =
new BlockUploadProgress(block, progressListener, now());
statistics.blockUploadQueued(size);
try {
progressCallback.progressChanged(PUT_STARTED_EVENT);
// the putObject call automatically closes the upload data
writeOperationHelper.putObject(putObjectRequest,
builder.putOptions,
uploadData,
statistics);
progressCallback.progressChanged(REQUEST_BYTE_TRANSFER_EVENT);
progressCallback.progressChanged(PUT_COMPLETED_EVENT);
} catch (InterruptedIOException ioe){
progressCallback.progressChanged(PUT_INTERRUPTED_EVENT);
throw ioe;
} catch (IOException ioe){
progressCallback.progressChanged(PUT_FAILED_EVENT);
throw ioe;
} finally {
cleanupWithLogger(LOG, uploadData, block);
}
return size;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"S3ABlockOutputStream{");
sb.append(writeOperationHelper.toString());
sb.append(", blockSize=").append(blockSize);
sb.append(", isMultipartUploadEnabled=").append(isMultipartUploadEnabled);
// unsynced access; risks consistency in exchange for no risk of deadlock.
S3ADataBlocks.DataBlock block = activeBlock;
if (block != null) {
sb.append(", activeBlock=").append(block);
}
sb.append(" Statistics=")
.append(IOStatisticsLogging.ioStatisticsSourceToString(this));
sb.append('}');
return sb.toString();
}
private void incrementWriteOperations() {
writeOperationHelper.incrementWriteOperations();
}
/**
* Current time in milliseconds.
* @return time
*/
private Instant now() {
return Instant.now();
}
/**
* Get the statistics for this stream.
* @return stream statistics
*/
BlockOutputStreamStatistics getStatistics() {
return statistics;
}
/**
* Return the stream capabilities.
* This stream always returns false when queried about hflush and hsync.
* If asked about {@link CommitConstants#STREAM_CAPABILITY_MAGIC_OUTPUT}
* it will return true iff this is an active "magic" output stream.
* @param capability string to query the stream support for.
* @return true if the capability is supported by this instance.
*/
@SuppressWarnings("deprecation")
@Override
public boolean hasCapability(String capability) {
final String cap = capability.toLowerCase(Locale.ENGLISH);
switch (cap) {
// does the output stream have delayed visibility
case CommitConstants.STREAM_CAPABILITY_MAGIC_OUTPUT:
case CommitConstants.STREAM_CAPABILITY_MAGIC_OUTPUT_OLD:
return !putTracker.outputImmediatelyVisible();
// The flush/sync options are absolutely not supported
case StreamCapabilities.HFLUSH:
case StreamCapabilities.HSYNC:
return false;
// yes, we do statistics.
case StreamCapabilities.IOSTATISTICS:
return true;
// S3A supports abort.
case StreamCapabilities.ABORTABLE_STREAM:
return true;
// IOStatistics context support for thread-level IOStatistics.
case StreamCapabilities.IOSTATISTICS_CONTEXT:
return true;
default:
// scan flags for the capability
for (WriteObjectFlags flag : writeObjectFlags) {
if (flag.hasKey(cap)) {
return true;
}
}
return false;
}
}
@Override
public void hflush() throws IOException {
statistics.hflushInvoked();
// do not reject these, but downgrade to a no-oop
LOG.debug("Hflush invoked");
}
@Override
public void hsync() throws IOException {
statistics.hsyncInvoked();
handleSyncableInvocation();
}
/**
* Processing of Syncable operation reporting/downgrade.
* @throws UnsupportedOperationException if required.
*/
private void handleSyncableInvocation() {
final UnsupportedOperationException ex
= new UnsupportedOperationException(E_NOT_SYNCABLE);
if (!downgradeSyncableExceptions) {
throw ex;
}
// downgrading.
WARN_ON_SYNCABLE.warn("Application invoked the Syncable API against"
+ " stream writing to {}. This is Unsupported",
key);
// and log at debug
LOG.debug("Downgrading Syncable call", ex);
}
@Override
public IOStatistics getIOStatistics() {
return iostatistics;
}
/**
* Get the IOStatistics aggregator passed in the builder.
* @return an aggregator
*/
protected IOStatisticsAggregator getThreadIOStatistics() {
return threadIOStatisticsAggregator;
}
/**
* Multiple partition upload.
*/
private | AbortableResultImpl |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/AvailabilityProvider.java | {
"start": 1143,
"end": 3779
} | interface ____ {
/**
* Constant that allows to avoid volatile checks {@link CompletableFuture#isDone()}. Check
* {@link #isAvailable()} and {@link #isApproximatelyAvailable()} for more explanation.
*/
CompletableFuture<?> AVAILABLE = CompletableFuture.completedFuture(null);
/**
* @return a future that is completed if the respective provider is available.
*/
CompletableFuture<?> getAvailableFuture();
/**
* In order to best-effort avoid volatile access in {@link CompletableFuture#isDone()}, we check
* the condition of <code>future == AVAILABLE</code> firstly for getting probable performance
* benefits while hot looping.
*
* <p>It is always safe to use this method in performance nonsensitive scenarios to get the
* precise state.
*
* @return true if this instance is available for further processing.
*/
default boolean isAvailable() {
CompletableFuture<?> future = getAvailableFuture();
return future == AVAILABLE || future.isDone();
}
/**
* Checks whether this instance is available only via constant {@link #AVAILABLE} to avoid
* performance concern caused by volatile access in {@link CompletableFuture#isDone()}. So it is
* mainly used in the performance sensitive scenarios which do not always need the precise
* state.
*
* <p>This method is still safe to get the precise state if {@link #getAvailableFuture()} was
* touched via (.get(), .wait(), .isDone(), ...) before, which also has a "happen-before"
* relationship with this call.
*
* @return true if this instance is available for further processing.
*/
default boolean isApproximatelyAvailable() {
return getAvailableFuture() == AVAILABLE;
}
static CompletableFuture<?> and(CompletableFuture<?> first, CompletableFuture<?> second) {
if (first == AVAILABLE && second == AVAILABLE) {
return AVAILABLE;
} else if (first == AVAILABLE) {
return second;
} else if (second == AVAILABLE) {
return first;
} else {
return CompletableFuture.allOf(first, second);
}
}
static CompletableFuture<?> or(CompletableFuture<?> first, CompletableFuture<?> second) {
if (first == AVAILABLE || second == AVAILABLE) {
return AVAILABLE;
}
return CompletableFuture.anyOf(first, second);
}
/**
* A availability implementation for providing the helpful functions of resetting the
* available/unavailable states.
*/
final | AvailabilityProvider |
java | apache__rocketmq | common/src/main/java/org/apache/rocketmq/common/future/FutureTaskExt.java | {
"start": 933,
"end": 1346
} | class ____<V> extends FutureTask<V> {
private final Runnable runnable;
public FutureTaskExt(final Callable<V> callable) {
super(callable);
this.runnable = null;
}
public FutureTaskExt(final Runnable runnable, final V result) {
super(runnable, result);
this.runnable = runnable;
}
public Runnable getRunnable() {
return runnable;
}
}
| FutureTaskExt |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/common/hash/BufferedMurmur3HasherTests.java | {
"start": 647,
"end": 5891
} | class ____ extends ESTestCase {
private final BufferedMurmur3Hasher bufferedHasher = new BufferedMurmur3Hasher(0, randomIntBetween(32, 128));
private final Murmur3Hasher hasher = new Murmur3Hasher(0);
public void testAddString() {
String testString = randomUnicodeOfLengthBetween(0, 1024);
bufferedHasher.addString(testString);
BytesRef bytesRef = new BytesRef(testString);
hasher.update(bytesRef.bytes, bytesRef.offset, bytesRef.length);
assertEquals(hasher.digestHash(), bufferedHasher.digestHash());
}
public void testConstructorWithInvalidBufferSize() {
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new BufferedMurmur3Hasher(0, 31));
assertEquals("Buffer size must be at least 32 bytes", exception.getMessage());
}
public void testAddLong() {
long value = randomLong();
bufferedHasher.addLong(value);
hasher.update(toBytes(value), 0, Long.BYTES);
assertEquals(hasher.digestHash(), bufferedHasher.digestHash());
}
public void testAddLongs() {
long value1 = randomLong();
long value2 = randomLong();
long value3 = randomLong();
long value4 = randomLong();
bufferedHasher.addLong(value1);
bufferedHasher.addLongs(value1, value2);
bufferedHasher.addLongs(value1, value2, value3, value4);
hasher.update(toBytes(value1));
hasher.update(toBytes(value1));
hasher.update(toBytes(value2));
hasher.update(toBytes(value1));
hasher.update(toBytes(value2));
hasher.update(toBytes(value3));
hasher.update(toBytes(value4));
assertEquals(hasher.digestHash(), bufferedHasher.digestHash());
}
public void testAddTwoLongs() {
long value1 = randomLong();
long value2 = randomLong();
bufferedHasher.addLongs(value1, value2);
hasher.update(toBytes(value1));
hasher.update(toBytes(value2));
assertEquals(hasher.digestHash(), bufferedHasher.digestHash());
}
public void testAddFourLongs() {
long value1 = randomLong();
long value2 = randomLong();
long value3 = randomLong();
long value4 = randomLong();
bufferedHasher.addLongs(value1, value2, value3, value4);
hasher.update(toBytes(value1));
hasher.update(toBytes(value2));
hasher.update(toBytes(value3));
hasher.update(toBytes(value4));
assertEquals(hasher.digestHash(), bufferedHasher.digestHash());
}
public void testRandomAdds() {
int numAdds = randomIntBetween(128, 1024);
for (int i = 0; i < numAdds; i++) {
switch (randomIntBetween(0, 4)) {
case 0 -> {
String randomString = randomUnicodeOfLengthBetween(0, 64);
bufferedHasher.addString(randomString);
BytesRef bytesRef = new BytesRef(randomString);
hasher.update(bytesRef.bytes, bytesRef.offset, bytesRef.length);
}
case 1 -> {
String emptyString = "";
bufferedHasher.addString(emptyString);
BytesRef bytesRef = new BytesRef(emptyString);
hasher.update(bytesRef.bytes, bytesRef.offset, bytesRef.length);
}
case 2 -> {
long randomLong = randomLong();
bufferedHasher.addLong(randomLong);
hasher.update(toBytes(randomLong));
}
case 3 -> {
long randomLong1 = randomLong();
long randomLong2 = randomLong();
bufferedHasher.addLongs(randomLong1, randomLong2);
hasher.update(toBytes(randomLong1));
hasher.update(toBytes(randomLong2));
}
case 4 -> {
long randomLong1 = randomLong();
long randomLong2 = randomLong();
long randomLong3 = randomLong();
long randomLong4 = randomLong();
bufferedHasher.addLongs(randomLong1, randomLong2, randomLong3, randomLong4);
hasher.update(toBytes(randomLong1));
hasher.update(toBytes(randomLong2));
hasher.update(toBytes(randomLong3));
hasher.update(toBytes(randomLong4));
}
}
}
assertEquals(hasher.digestHash(), bufferedHasher.digestHash());
}
public void testReset() {
bufferedHasher.addString(randomUnicodeOfLengthBetween(0, 1024));
bufferedHasher.addLong(randomLong());
bufferedHasher.addLongs(randomLong(), randomLong());
bufferedHasher.addLongs(randomLong(), randomLong(), randomLong(), randomLong());
bufferedHasher.reset();
assertEquals(new MurmurHash3.Hash128(0, 0), bufferedHasher.digestHash());
}
private byte[] toBytes(long value) {
byte[] bytes = new byte[Long.BYTES];
ByteUtils.writeLongLE(value, bytes, 0);
return bytes;
}
}
| BufferedMurmur3HasherTests |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceEvent.java | {
"start": 1031,
"end": 1370
} | class ____ extends ContainerEvent {
private final LocalResourceRequest rsrc;
public ContainerResourceEvent(ContainerId container,
ContainerEventType type, LocalResourceRequest rsrc) {
super(container, type);
this.rsrc = rsrc;
}
public LocalResourceRequest getResource() {
return rsrc;
}
}
| ContainerResourceEvent |
java | quarkusio__quarkus | extensions/panache/rest-data-panache/deployment/src/main/java/io/quarkus/rest/data/panache/deployment/ResourceMetadata.java | {
"start": 146,
"end": 197
} | class ____ {
/**
* Generated | ResourceMetadata |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/internal/GeneratorBinder.java | {
"start": 3651,
"end": 18101
} | class ____ {
public static final String ASSIGNED_GENERATOR_NAME = "assigned";
public static final GeneratorCreator ASSIGNED_IDENTIFIER_GENERATOR_CREATOR =
new GeneratorCreator() {
@Override
public Generator createGenerator(GeneratorCreationContext context) {
return new Assigned();
}
@Override
public boolean isAssigned() {
return true;
}
};
/**
* Create a generator, based on a {@link GeneratedValue} annotation.
*/
public static void makeIdGenerator(
SimpleValue identifierValue,
MemberDetails idMember,
String generatorType,
String generatorName,
MetadataBuildingContext context,
Map<String, ? extends IdentifierGeneratorDefinition> localGenerators) {
//generator settings
final var configuration = initializeGeneratorSettings( identifierValue, generatorName );
final String generatorStrategy;
if ( generatorName.isEmpty() ) {
if ( idMember.hasDirectAnnotationUsage( GeneratedValue.class )
&& handleDefaultGenerator( identifierValue, context, localGenerators, idMember, configuration ) ) {
// we found an appropriate a "default" generator (as per JPA 3.2)
return; // EARLY EXIT
}
else {
generatorStrategy = generatorType;
}
}
else if ( generatorName.isBlank() ) {
throw new MappingException( "Generator name is cannot be blank" );
}
else {
//we have a named generator
generatorStrategy = determineStrategy(
idMember,
generatorType,
generatorName,
context,
localGenerators,
configuration
);
}
setGeneratorCreator( identifierValue, configuration, generatorStrategy, context );
}
/**
* Called if {@link @GeneratedValue} specified no name.
* This is a new special case added in JPA 3.2.
* We look for an appropriate matching "default generator recipe"
* based on the {@link GenerationType}.
*/
private static boolean handleDefaultGenerator(
SimpleValue identifierValue,
MetadataBuildingContext context,
Map<String, ? extends IdentifierGeneratorDefinition> localGenerators,
MemberDetails idMember,
Map<String, Object> configuration) {
final var strategy = idMember.getDirectAnnotationUsage( GeneratedValue.class ).strategy();
final String strategyGeneratorClassName = correspondingGeneratorName( strategy );
final var impliedGenerator =
determineImpliedGenerator( strategy, strategyGeneratorClassName, localGenerators );
if ( impliedGenerator != null ) {
configuration.putAll( impliedGenerator.getParameters() );
instantiateNamedStrategyGenerator( identifierValue, strategyGeneratorClassName, configuration, context );
return true;
}
else {
return false;
}
}
private static Map<String, Object> initializeGeneratorSettings(SimpleValue identifierValue, String generatorName) {
final Map<String,Object> configuration = new HashMap<>();
configuration.put( GENERATOR_NAME, generatorName );
configuration.put( PersistentIdentifierGenerator.TABLE, identifierValue.getTable().getName() );
if ( identifierValue.getColumnSpan() == 1 ) {
configuration.put( PersistentIdentifierGenerator.PK, identifierValue.getColumns().get(0).getName() );
}
return configuration;
}
private static IdentifierGeneratorDefinition determineImpliedGenerator(
GenerationType strategy,
String strategyGeneratorClassName,
Map<String, ? extends IdentifierGeneratorDefinition> localGenerators) {
if ( localGenerators == null ) {
return null;
}
if ( localGenerators.size() == 1 ) {
final var generatorDefinition = localGenerators.values().iterator().next();
// NOTE: a little bit of a special rule here for the case of just one -
// consider it a match, based on strategy, if the strategy is AUTO or matches
if ( strategy == AUTO
|| isImpliedGenerator( strategy, strategyGeneratorClassName, generatorDefinition ) ) {
return generatorDefinition;
}
}
return matchingLocalGenerator( strategy, strategyGeneratorClassName, localGenerators );
}
private static IdentifierGeneratorDefinition matchingLocalGenerator(
GenerationType strategy,
String strategyGeneratorClassName,
Map<String, ? extends IdentifierGeneratorDefinition> localGenerators) {
IdentifierGeneratorDefinition matching = null;
for ( var localGenerator : localGenerators.values() ) {
if ( isImpliedGenerator( strategy, strategyGeneratorClassName, localGenerator ) ) {
if ( matching != null ) {
// we found multiple matching generators
return null;
}
matching = localGenerator;
}
}
return matching;
}
private static boolean isImpliedGenerator(
GenerationType strategy,
String strategyGeneratorClassName,
IdentifierGeneratorDefinition generatorDefinition) {
return generatorDefinition.getStrategy().equals( strategyGeneratorClassName );
}
private static String correspondingGeneratorName(GenerationType strategy) {
return switch ( strategy ) {
// case UUID -> org.hibernate.id.uuid.UuidGenerator.class.getName();
case UUID -> UuidValueGenerator.class.getName();
case TABLE -> org.hibernate.id.enhanced.TableGenerator.class.getName();
case IDENTITY -> null;
default -> SequenceStyleGenerator.class.getName();
};
}
private static String determineStrategy(
MemberDetails idAttributeMember,
String generatorType,
String generatorName,
MetadataBuildingContext context,
Map<String, ? extends IdentifierGeneratorDefinition> localGenerators,
Map<String, Object> configuration) {
final var definition =
makeIdentifierGeneratorDefinition( generatorName, idAttributeMember, localGenerators, context );
if ( definition == null ) {
throw new AnnotationException( "No id generator was declared with the name '" + generatorName
+ "' specified by '@GeneratedValue'"
+ " (define a named generator using '@SequenceGenerator', '@TableGenerator', or '@GenericGenerator')" );
}
configuration.putAll( definition.getParameters() );
// This is quite vague in the spec, but a generator could override the generator choice
return generatorStrategy( generatorType, definition );
}
private static String generatorStrategy(String generatorType, IdentifierGeneratorDefinition definition) {
return generatorType != null
// Yuck! this is a hack to not override 'AUTO',
// even if GeneratedValue.generator is specified
&& definition.getStrategy().equals( "identity" )
? generatorType
: definition.getStrategy();
}
private static IdentifierGeneratorDefinition makeIdentifierGeneratorDefinition(
String name,
MemberDetails idAttributeMember,
Map<String, ? extends IdentifierGeneratorDefinition> localGenerators,
MetadataBuildingContext buildingContext) {
if ( localGenerators != null ) {
final var result = localGenerators.get( name );
if ( result != null ) {
return result;
}
}
final var globalDefinition =
buildingContext.getMetadataCollector()
.getIdentifierGenerator( name );
if ( globalDefinition != null ) {
return globalDefinition;
}
else {
final var generatedValue = idAttributeMember.getDirectAnnotationUsage( GeneratedValue.class );
if ( generatedValue == null ) {
throw new AssertionFailure( "No @GeneratedValue annotation" );
}
return IdentifierGeneratorDefinition.createImplicit(
name,
idAttributeMember.getType(),
generatedValue.generator(),
interpretGenerationType( generatedValue )
);
}
}
private static GenerationType interpretGenerationType(GeneratedValue generatedValueAnn) {
// todo (jpa32) : when can this ever be null?
final var strategy = generatedValueAnn.strategy();
return strategy == null ? AUTO : strategy;
}
public static void visitIdGeneratorDefinitions(
AnnotationTarget annotatedElement,
Consumer<IdentifierGeneratorDefinition> consumer,
MetadataBuildingContext buildingContext) {
final var modelsContext = buildingContext.getBootstrapContext().getModelsContext();
annotatedElement.forEachAnnotationUsage( TableGenerator.class, modelsContext,
usage -> consumer.accept( buildTableIdGenerator( usage ) ) );
annotatedElement.forEachAnnotationUsage( SequenceGenerator.class, modelsContext,
usage -> consumer.accept( buildSequenceIdGenerator( usage ) ) );
annotatedElement.forEachAnnotationUsage( GenericGenerator.class, modelsContext,
usage -> consumer.accept( buildIdGenerator( usage ) ) );
}
public static void registerGlobalGenerators(
AnnotationTarget annotatedElement,
MetadataBuildingContext context) {
if ( context.getBootstrapContext().getJpaCompliance().isGlobalGeneratorScopeEnabled() ) {
final var metadataCollector = context.getMetadataCollector();
visitIdGeneratorDefinitions(
annotatedElement,
definition -> {
if ( !definition.getName().isEmpty() ) {
metadataCollector.addIdentifierGenerator( definition );
}
},
context
);
}
}
private static IdentifierGeneratorDefinition buildIdGenerator(GenericGenerator generatorAnnotation) {
final var definitionBuilder = genericDefinitionBuilder( generatorAnnotation );
if ( BOOT_LOGGER.isTraceEnabled() ) {
BOOT_LOGGER.addedGenerator( definitionBuilder.getName(), definitionBuilder.getStrategy() );
}
return definitionBuilder.build();
}
private static IdentifierGeneratorDefinition.Builder genericDefinitionBuilder(GenericGenerator generatorAnnotation) {
final var definitionBuilder = new IdentifierGeneratorDefinition.Builder();
definitionBuilder.setName( generatorAnnotation.name() );
final var generatorClass = generatorAnnotation.type();
final String strategy =
generatorClass.equals( Generator.class )
? generatorAnnotation.strategy()
: generatorClass.getName();
definitionBuilder.setStrategy( strategy );
definitionBuilder.addParams( extractParameterMap( generatorAnnotation.parameters() ) );
return definitionBuilder;
}
private static IdentifierGeneratorDefinition buildSequenceIdGenerator(SequenceGenerator generatorAnnotation) {
final var definitionBuilder = new IdentifierGeneratorDefinition.Builder();
interpretSequenceGenerator( generatorAnnotation, definitionBuilder );
if ( BOOT_LOGGER.isTraceEnabled() ) {
BOOT_LOGGER.addedSequenceGenerator( definitionBuilder.getName() );
}
return definitionBuilder.build();
}
private static IdentifierGeneratorDefinition buildTableIdGenerator(TableGenerator generatorAnnotation) {
final var definitionBuilder = new IdentifierGeneratorDefinition.Builder();
interpretTableGenerator( generatorAnnotation, definitionBuilder );
if ( BOOT_LOGGER.isTraceEnabled() ) {
BOOT_LOGGER.addedTableGenerator( definitionBuilder.getName() );
}
return definitionBuilder.build();
}
private static void checkGeneratorClass(Class<? extends Generator> generatorClass) {
if ( !BeforeExecutionGenerator.class.isAssignableFrom( generatorClass )
&& !OnExecutionGenerator.class.isAssignableFrom( generatorClass ) ) {
throw new MappingException( "Generator class '" + generatorClass.getName()
+ "' must implement either 'BeforeExecutionGenerator' or 'OnExecutionGenerator'" );
}
}
private static void checkGeneratorInterfaces(Class<? extends Generator> generatorClass) {
// A regular value generator should not implement legacy IdentifierGenerator
if ( IdentifierGenerator.class.isAssignableFrom( generatorClass ) ) {
throw new AnnotationException( "Generator class '" + generatorClass.getName()
+ "' implements 'IdentifierGenerator' and may not be used with '@ValueGenerationType'" );
}
}
/**
* Return a {@link GeneratorCreator} for an attribute annotated
* with a {@linkplain ValueGenerationType generator annotation}.
*/
private static GeneratorCreator generatorCreator(
MemberDetails memberDetails,
Value value,
Annotation annotation,
BeanContainer beanContainer) {
final var annotationType = annotation.annotationType();
final var generatorAnnotation = annotationType.getAnnotation( ValueGenerationType.class );
assert generatorAnnotation != null;
final var generatorClass = generatorAnnotation.generatedBy();
checkGeneratorClass( generatorClass );
checkGeneratorInterfaces( generatorClass );
return creationContext -> {
final var generator =
instantiateAndInitializeGenerator(
value,
annotation,
beanContainer,
creationContext,
generatorClass,
memberDetails,
annotationType
);
checkVersionGenerationAlways( memberDetails, generator );
return generator;
};
}
private static Generator instantiateAndInitializeGenerator(
Value value,
Annotation annotation,
BeanContainer beanContainer,
GeneratorCreationContext creationContext,
Class<? extends Generator> generatorClass,
MemberDetails memberDetails,
Class<? extends Annotation> annotationType) {
final var generator = instantiateGenerator(
annotation,
beanContainer,
creationContext,
generatorClass,
memberDetails,
annotationType
);
callInitialize( annotation, memberDetails, creationContext, generator );
callConfigure( creationContext, generator, emptyMap(), value );
return generator;
}
/**
* Return a {@link GeneratorCreator} for an id attribute annotated
* with an {@linkplain IdGeneratorType id generator annotation}.
*/
private static GeneratorCreator identifierGeneratorCreator(
MemberDetails idAttributeMember,
Annotation annotation,
SimpleValue identifierValue,
BeanContainer beanContainer) {
final var annotationType = annotation.annotationType();
final var idGeneratorAnnotation = annotationType.getAnnotation( IdGeneratorType.class );
assert idGeneratorAnnotation != null;
final var generatorClass = idGeneratorAnnotation.value();
checkGeneratorClass( generatorClass );
return creationContext -> {
final Generator generator =
instantiateAndInitializeGenerator(
identifierValue,
annotation,
beanContainer,
creationContext,
generatorClass,
idAttributeMember,
annotationType
);
checkIdGeneratorTiming( annotationType, generator );
return generator;
};
}
/**
* Instantiate a {@link Generator}, using the given {@link BeanContainer} if any,
* for the case where the generator was specified using a generator annotation.
*
* @param annotation the generator annotation
* @param beanContainer an optional {@code BeanContainer}
* @param generatorClass a | GeneratorBinder |
java | spring-projects__spring-boot | module/spring-boot-data-elasticsearch/src/test/java/org/springframework/boot/data/elasticsearch/autoconfigure/DataElasticsearchAutoConfigurationTests.java | {
"start": 5818,
"end": 6053
} | class ____ {
@Bean
ReactiveElasticsearchClient customClient(ElasticsearchTransport transport) {
return new ReactiveElasticsearchClient(transport);
}
}
@Configuration(proxyBeanMethods = false)
static | CustomClientConfiguration |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy-common/runtime/src/main/java/io/quarkus/resteasy/common/runtime/QuarkusConstructorInjector.java | {
"start": 470,
"end": 2109
} | class ____ implements ConstructorInjector {
private volatile Supplier<? extends InstanceHandle<?>> factory;
private final ConstructorInjector delegate;
private final Constructor<?> ctor;
public QuarkusConstructorInjector(Constructor<?> ctor, ConstructorInjector delegate) {
this.ctor = ctor;
this.delegate = delegate;
}
@Override
public Object construct(boolean unwrapAsync) {
if (factory != null) {
return factory.get().get();
}
factory = Arc.container().beanInstanceSupplier(this.ctor.getDeclaringClass());
if (factory == null) {
return delegate.construct(unwrapAsync);
}
return factory.get().get();
}
@Override
public Object construct(HttpRequest request, HttpResponse response, boolean unwrapAsync)
throws Failure, WebApplicationException, ApplicationException {
if (factory != null) {
return factory.get().get();
}
factory = Arc.container().beanInstanceSupplier(this.ctor.getDeclaringClass());
if (factory == null) {
return delegate.construct(request, response, unwrapAsync);
}
return factory.get().get();
}
@Override
public Object injectableArguments(boolean unwrapAsync) {
return this.delegate.injectableArguments(unwrapAsync);
}
@Override
public Object injectableArguments(HttpRequest request, HttpResponse response, boolean unwrapAsync)
throws Failure {
return this.delegate.injectableArguments(request, response, unwrapAsync);
}
}
| QuarkusConstructorInjector |
java | hibernate__hibernate-orm | hibernate-vector/src/main/java/org/hibernate/vector/internal/OracleVectorFunctionContributor.java | {
"start": 332,
"end": 1640
} | class ____ implements FunctionContributor {
@Override
public void contributeFunctions(FunctionContributions functionContributions) {
final Dialect dialect = functionContributions.getDialect();
if ( dialect instanceof OracleDialect && dialect.getVersion().isSameOrAfter( 23, 4 ) ) {
final VectorFunctionFactory vectorFunctionFactory = new VectorFunctionFactory( functionContributions );
vectorFunctionFactory.cosineDistance( "vector_distance(?1,?2,COSINE)" );
vectorFunctionFactory.euclideanDistance( "vector_distance(?1,?2,EUCLIDEAN)" );
vectorFunctionFactory.euclideanSquaredDistance( "vector_distance(?1,?2,EUCLIDEAN_SQUARED)" );
vectorFunctionFactory.l1Distance( "vector_distance(?1,?2,MANHATTAN)" );
vectorFunctionFactory.hammingDistance( "vector_distance(?1,?2,HAMMING)" );
vectorFunctionFactory.jaccardDistance( "vector_distance(?1,?2,JACCARD)" );
vectorFunctionFactory.innerProduct( "vector_distance(?1,?2,DOT)*-1" );
vectorFunctionFactory.negativeInnerProduct( "vector_distance(?1,?2,DOT)" );
vectorFunctionFactory.vectorDimensions();
vectorFunctionFactory.vectorNorm();
functionContributions.getFunctionRegistry().registerAlternateKey( "l2_norm", "vector_norm" );
}
}
@Override
public int ordinal() {
return 200;
}
}
| OracleVectorFunctionContributor |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/IdentifierNameTest.java | {
"start": 13954,
"end": 14232
} | class ____ {
private Object Object() {
return null;
}
void call() {
Object();
}
}
""")
.addOutputLines(
"Test.java",
"""
| Test |
java | spring-projects__spring-security | web/src/test/java/org/springframework/security/web/authentication/password/HaveIBeenPwnedRestApiPasswordCheckerTests.java | {
"start": 1218,
"end": 3678
} | class ____ {
private final String pwnedPasswords = """
2CDE4CDCFA5AD7D223BD1800338FBEAA04E:1
2CF90F92EE1941547BB13DFC7D0E0AFE504:1
2D10A6654B6D75908AE572559542245CBFA:6
2D4FCF535FE92B8B950424E16E65EFBFED3:1
2D6980B9098804E7A83DC5831BFBAF3927F:1
2D8D1B3FAACCA6A3C6A91617B2FA32E2F57:1
2DC183F740EE76F27B78EB39C8AD972A757:300185
2DE4C0087846D223DBBCCF071614590F300:3
2DEA2B1D02714099E4B7A874B4364D518F6:1
2E750AE8C4756A20CE040BF3DDF094FA7EC:1
2E90B7B3C5C1181D16C48E273D9AC7F3C16:5
2E991A9162F24F01826D8AF73CA20F2B430:1
2EAE5EA981BFAF29A8869A40BDDADF3879B:2
2F1AC09E3846595E436BBDDDD2189358AF9:1
""";
private final MockWebServer server = new MockWebServer();
private final HaveIBeenPwnedRestApiPasswordChecker passwordChecker = new HaveIBeenPwnedRestApiPasswordChecker();
@BeforeEach
void setup() throws IOException {
this.server.start();
HttpUrl url = this.server.url("/range/");
this.passwordChecker.setRestClient(RestClient.builder().baseUrl(url.toString()).build());
}
@AfterEach
void tearDown() throws IOException {
this.server.shutdown();
}
@Test
void checkWhenPasswordIsLeakedThenIsCompromised() throws InterruptedException {
this.server.enqueue(new MockResponse().setBody(this.pwnedPasswords).setResponseCode(200));
CompromisedPasswordDecision check = this.passwordChecker.check("P@ssw0rd");
assertThat(check.isCompromised()).isTrue();
assertThat(this.server.takeRequest().getPath()).isEqualTo("/range/21BD1");
}
@Test
void checkWhenPasswordNotLeakedThenNotCompromised() {
this.server.enqueue(new MockResponse().setBody(this.pwnedPasswords).setResponseCode(200));
CompromisedPasswordDecision check = this.passwordChecker.check("My1nCr3d!bL3P@SS0W0RD");
assertThat(check.isCompromised()).isFalse();
}
@Test
void checkWhenNoPasswordsReturnedFromApiCallThenNotCompromised() {
this.server.enqueue(new MockResponse().setResponseCode(200));
CompromisedPasswordDecision check = this.passwordChecker.check("123456");
assertThat(check.isCompromised()).isFalse();
}
@Test
void checkWhenResponseStatusNot200ThenNotCompromised() {
this.server.enqueue(new MockResponse().setResponseCode(503));
assertThatNoException().isThrownBy(() -> this.passwordChecker.check("123456"));
this.server.enqueue(new MockResponse().setResponseCode(404));
assertThatNoException().isThrownBy(() -> this.passwordChecker.check("123456"));
}
}
| HaveIBeenPwnedRestApiPasswordCheckerTests |
java | junit-team__junit5 | platform-tooling-support-tests/src/test/java/platform/tooling/support/tests/ManifestTests.java | {
"start": 1086,
"end": 3358
} | class ____ {
@ParameterizedTest(quoteTextArguments = false)
@MethodSource("platform.tooling.support.Helper#loadModuleDirectoryNames")
void manifestEntriesAdhereToConventions(String module) throws Exception {
var version = Helper.version();
var jarFile = MavenRepo.jar(module).toFile();
try (var jar = new Jar(jarFile)) {
var manifest = jar.getManifest();
var attributes = manifest.getMainAttributes();
assertValue(attributes, "Built-By", "JUnit Team");
assertValue(attributes, "Specification-Title", module);
assertValue(attributes, "Specification-Version", specificationVersion(version));
assertValue(attributes, "Specification-Vendor", "junit.org");
assertValue(attributes, "Implementation-Title", module);
assertValue(attributes, "Implementation-Version", version);
assertValue(attributes, "Implementation-Vendor", "junit.org");
assertValue(attributes, "Automatic-Module-Name", null);
assertValue(attributes, "Bundle-ManifestVersion", "2");
assertValue(attributes, "Bundle-SymbolicName", module);
assertValue(attributes, "Bundle-Version",
MavenVersion.parseMavenString(version).getOSGiVersion().toString());
if (module.equals("junit-platform-console")) {
assertValue(attributes, "Main-Class", "org.junit.platform.console.ConsoleLauncher");
}
var domain = Domain.domain(manifest);
domain.getExportPackage().forEach((pkg, attrs) -> {
final var stringVersion = attrs.get(VERSION_ATTRIBUTE);
assertNotNull(stringVersion);
assertDoesNotThrow(() -> new Version(stringVersion));
});
domain.getImportPackage().forEach((pkg, attrs) -> {
final var stringVersionRange = attrs.get(VERSION_ATTRIBUTE);
if (stringVersionRange == null) {
return;
}
assertDoesNotThrow(() -> new VersionRange(stringVersionRange));
});
}
}
private static String specificationVersion(String version) {
var dash = version.indexOf('-');
if (dash < 0) {
return version;
}
return version.substring(0, dash);
}
private static void assertValue(Attributes attributes, String name, String expected) {
var actual = attributes.getValue(name);
assertEquals(expected, actual,
"Manifest attribute %s expected to be %s, but is: %s".formatted(name, expected, actual));
}
}
| ManifestTests |
java | apache__camel | test-infra/camel-test-infra-mosquitto/src/test/java/org/apache/camel/test/infra/mosquitto/services/MosquittoServiceFactory.java | {
"start": 951,
"end": 1417
} | class ____ {
private MosquittoServiceFactory() {
}
public static SimpleTestServiceBuilder<MosquittoService> builder() {
return new SimpleTestServiceBuilder<>("mosquitto");
}
public static MosquittoService createService() {
return builder()
.addLocalMapping(MosquittoLocalContainerService::new)
.addRemoteMapping(MosquittoRemoteService::new)
.build();
}
}
| MosquittoServiceFactory |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cdi/lifecycle/ExtendedBeanManagerNotAvailableDuringCustomUserTypeInitTest.java | {
"start": 2105,
"end": 3774
} | class ____ implements UserType<Object>, DynamicParameterizedType {
@Override
public int getSqlType() {
return Types.VARCHAR;
}
@Override
public Class<Object> returnedClass() {
return Object.class;
}
@Override
public boolean equals(Object x, Object y) throws HibernateException {
if (x == null) {
return y == null;
} else {
return x.equals(y);
}
}
@Override
public int hashCode(Object x) throws HibernateException {
return x == null ? 0 : x.hashCode();
}
@Override
public Object nullSafeGet(ResultSet rs, int i, WrapperOptions sharedSessionContractImplementor)
throws SQLException {
String xmldoc = rs.getString(i);
return rs.wasNull() ? null : xmldoc;
}
@Override
public void nullSafeSet(PreparedStatement st, Object value, int index, WrapperOptions options)
throws SQLException {
if (value == null) {
st.setNull(index, Types.OTHER);
} else {
st.setObject(index, value, Types.OTHER);
}
}
@Override
public Object deepCopy(Object value) throws HibernateException {
return value;
}
@Override
public boolean isMutable() {
return false;
}
@Override
public Serializable disassemble(Object value) throws HibernateException {
return (String) value;
}
@Override
public Object assemble(Serializable cached, Object owner) throws HibernateException {
return cached;
}
@Override
public Object replace(Object original, Object target, Object owner) throws HibernateException {
return original;
}
@Override
public void setParameterValues(Properties parameters) {
// nothing
}
}
public static | CustomDynamicParameterizedUserType |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/onetoone/embeddedid/OneToOneWithEmbeddedIdTest.java | {
"start": 2168,
"end": 2293
} | class ____ implements Serializable {
String id = SafeRandomUUIDGenerator.safeRandomUUIDAsString();
}
@Embeddable
static | ID1 |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java | {
"start": 28904,
"end": 29270
} | class ____ extends ReplicatedWriteRequest<TestRequest> {
TestRequest(StreamInput in) throws IOException {
super(in);
}
TestRequest() {
super(new ShardId("test", "test", 1));
}
@Override
public String toString() {
return "TestRequest{}";
}
}
private static | TestRequest |
java | micronaut-projects__micronaut-core | http-netty/src/main/java/io/micronaut/http/netty/content/HttpContentUtil.java | {
"start": 1024,
"end": 2479
} | class ____ {
public static final byte[] OPEN_BRACKET = "[".getBytes(StandardCharsets.UTF_8);
public static final byte[] CLOSE_BRACKET = "]".getBytes(StandardCharsets.UTF_8);
public static final byte[] COMMA = ",".getBytes(StandardCharsets.UTF_8);
/**
* @return Produces HTTP content for {@code ]}
*/
public static HttpContent closeBracket() {
return new DefaultHttpContent(Unpooled.wrappedBuffer(CLOSE_BRACKET));
}
/**
* @param httpContent The http content to prefix
* @return Produces HTTP content for {@code ]}
*/
public static HttpContent prefixComma(HttpContent httpContent) {
CompositeByteBuf compositeByteBuf = Unpooled.compositeBuffer(2);
compositeByteBuf.addComponent(true, Unpooled.wrappedBuffer(COMMA));
compositeByteBuf.addComponent(true, httpContent.content());
return httpContent.replace(
compositeByteBuf
);
}
/**
* @param httpContent The http content to prefix
* @return Produces HTTP content prefixed by an open bracket
*/
public static HttpContent prefixOpenBracket(HttpContent httpContent) {
CompositeByteBuf compositeByteBuf = Unpooled.compositeBuffer(2);
compositeByteBuf.addComponent(true, Unpooled.wrappedBuffer(OPEN_BRACKET));
compositeByteBuf.addComponent(true, httpContent.content());
return httpContent.replace(compositeByteBuf);
}
}
| HttpContentUtil |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/AbstractRedisReactiveCommands.java | {
"start": 4231,
"end": 137953
} | class ____<K, V>
implements RedisAclReactiveCommands<K, V>, RedisHashReactiveCommands<K, V>, RedisKeyReactiveCommands<K, V>,
RedisStringReactiveCommands<K, V>, RedisListReactiveCommands<K, V>, RedisSetReactiveCommands<K, V>,
RedisSortedSetReactiveCommands<K, V>, RedisScriptingReactiveCommands<K, V>, RedisServerReactiveCommands<K, V>,
RedisHLLReactiveCommands<K, V>, BaseRedisReactiveCommands<K, V>, RedisTransactionalReactiveCommands<K, V>,
RedisGeoReactiveCommands<K, V>, RedisClusterReactiveCommands<K, V>, RedisJsonReactiveCommands<K, V>,
RedisVectorSetReactiveCommands<K, V>, RediSearchReactiveCommands<K, V> {
private final StatefulConnection<K, V> connection;
private final RedisCommandBuilder<K, V> commandBuilder;
private final RedisJsonCommandBuilder<K, V> jsonCommandBuilder;
private final RediSearchCommandBuilder<K, V> searchCommandBuilder;
private final RedisVectorSetCommandBuilder<K, V> vectorSetCommandBuilder;
private final Supplier<JsonParser> parser;
private final ClientResources clientResources;
private final boolean tracingEnabled;
private volatile EventExecutorGroup scheduler;
/**
* Initialize a new instance.
*
* @param connection the connection to operate on.
* @param codec the codec for command encoding.
* @param parser the implementation of the {@link JsonParser} to use
*/
public AbstractRedisReactiveCommands(StatefulConnection<K, V> connection, RedisCodec<K, V> codec,
Supplier<JsonParser> parser) {
this.connection = connection;
this.parser = parser;
this.commandBuilder = new RedisCommandBuilder<>(codec);
this.jsonCommandBuilder = new RedisJsonCommandBuilder<>(codec, parser);
this.vectorSetCommandBuilder = new RedisVectorSetCommandBuilder<>(codec, parser);
this.searchCommandBuilder = new RediSearchCommandBuilder<>(codec);
this.clientResources = connection.getResources();
this.tracingEnabled = clientResources.tracing().isEnabled();
}
/**
* Initialize a new instance.
*
* @param connection the connection to operate on.
* @param codec the codec for command encoding.
*/
public AbstractRedisReactiveCommands(StatefulConnection<K, V> connection, RedisCodec<K, V> codec) {
this(connection, codec, DEFAULT_JSON_PARSER);
}
private EventExecutorGroup getScheduler() {
EventExecutorGroup scheduler = this.scheduler;
if (scheduler != null) {
return scheduler;
}
EventExecutorGroup schedulerToUse = ImmediateEventExecutor.INSTANCE;
if (connection.getOptions().isPublishOnScheduler()) {
schedulerToUse = connection.getResources().eventExecutorGroup();
}
return this.scheduler = schedulerToUse;
}
@Override
public JsonParser getJsonParser() {
return parser.get();
}
@Override
public Mono<Set<AclCategory>> aclCat() {
return createMono(commandBuilder::aclCat);
}
@Override
public Mono<Set<CommandType>> aclCat(AclCategory category) {
return createMono(() -> commandBuilder.aclCat(category));
}
@Override
public Mono<Long> aclDeluser(String... usernames) {
return createMono(() -> commandBuilder.aclDeluser(usernames));
}
@Override
public Mono<String> aclDryRun(String username, String command, String... args) {
return createMono(() -> commandBuilder.aclDryRun(username, command, args));
}
@Override
public Mono<String> aclDryRun(String username, RedisCommand<K, V, ?> command) {
return createMono(() -> commandBuilder.aclDryRun(username, command));
}
@Override
public Mono<String> aclGenpass() {
return createMono(commandBuilder::aclGenpass);
}
@Override
public Mono<String> aclGenpass(int bits) {
return createMono(() -> commandBuilder.aclGenpass(bits));
}
@Override
public Mono<List<Object>> aclGetuser(String username) {
return createMono(() -> commandBuilder.aclGetuser(username));
}
@Override
public Flux<String> aclList() {
return createDissolvingFlux(commandBuilder::aclList);
}
@Override
public Mono<String> aclLoad() {
return createMono(commandBuilder::aclLoad);
}
@Override
public Flux<Map<String, Object>> aclLog() {
return createDissolvingFlux(commandBuilder::aclLog);
}
@Override
public Flux<Map<String, Object>> aclLog(int count) {
return createDissolvingFlux(() -> commandBuilder.aclLog(count));
}
@Override
public Mono<String> aclLogReset() {
return createMono(commandBuilder::aclLogReset);
}
@Override
public Mono<String> aclSave() {
return createMono(commandBuilder::aclSave);
}
@Override
public Mono<String> aclSetuser(String username, AclSetuserArgs args) {
return createMono(() -> commandBuilder.aclSetuser(username, args));
}
@Override
public Flux<String> aclUsers() {
return createDissolvingFlux(commandBuilder::aclUsers);
}
@Override
public Mono<String> aclWhoami() {
return createMono(commandBuilder::aclWhoami);
}
@Override
public Mono<Long> append(K key, V value) {
return createMono(() -> commandBuilder.append(key, value));
}
@Override
public Mono<String> asking() {
return createMono(commandBuilder::asking);
}
@Override
public Mono<String> auth(CharSequence password) {
return createMono(() -> commandBuilder.auth(password));
}
@Override
public Mono<String> auth(String username, CharSequence password) {
return createMono(() -> commandBuilder.auth(username, password));
}
@Override
public Mono<String> bgrewriteaof() {
return createMono(commandBuilder::bgrewriteaof);
}
@Override
public Mono<String> bgsave() {
return createMono(commandBuilder::bgsave);
}
@Override
public Mono<Long> bitcount(K key) {
return createMono(() -> commandBuilder.bitcount(key));
}
@Override
public Mono<Long> bitcount(K key, long start, long end) {
return createMono(() -> commandBuilder.bitcount(key, start, end));
}
@Override
public Flux<Value<Long>> bitfield(K key, BitFieldArgs args) {
return createDissolvingFlux(() -> commandBuilder.bitfieldValue(key, args));
}
@Override
public Mono<Long> bitopAnd(K destination, K... keys) {
return createMono(() -> commandBuilder.bitopAnd(destination, keys));
}
@Override
public Mono<Long> bitopNot(K destination, K source) {
return createMono(() -> commandBuilder.bitopNot(destination, source));
}
@Override
public Mono<Long> bitopOr(K destination, K... keys) {
return createMono(() -> commandBuilder.bitopOr(destination, keys));
}
@Override
public Mono<Long> bitopXor(K destination, K... keys) {
return createMono(() -> commandBuilder.bitopXor(destination, keys));
}
@Override
public Mono<Long> bitopDiff(K destination, K sourceKey, K... keys) {
return createMono(() -> commandBuilder.bitopDiff(destination, sourceKey, keys));
}
@Override
public Mono<Long> bitopDiff1(K destination, K sourceKey, K... keys) {
return createMono(() -> commandBuilder.bitopDiff1(destination, sourceKey, keys));
}
@Override
public Mono<Long> bitopAndor(K destination, K sourceKey, K... keys) {
return createMono(() -> commandBuilder.bitopAndor(destination, sourceKey, keys));
}
@Override
public Mono<Long> bitopOne(K destination, K... keys) {
return createMono(() -> commandBuilder.bitopOne(destination, keys));
}
@Override
public Mono<Long> bitpos(K key, boolean state) {
return createMono(() -> commandBuilder.bitpos(key, state));
}
@Override
public Mono<Long> bitpos(K key, boolean state, long start) {
return createMono(() -> commandBuilder.bitpos(key, state, start));
}
@Override
public Mono<Long> bitpos(K key, boolean state, long start, long end) {
return createMono(() -> commandBuilder.bitpos(key, state, start, end));
}
@Override
public Mono<V> blmove(K source, K destination, LMoveArgs args, long timeout) {
return createMono(() -> commandBuilder.blmove(source, destination, args, timeout));
}
@Override
public Mono<V> blmove(K source, K destination, LMoveArgs args, double timeout) {
return createMono(() -> commandBuilder.blmove(source, destination, args, timeout));
}
@Override
public Mono<KeyValue<K, List<V>>> blmpop(long timeout, LMPopArgs args, K... keys) {
return createMono(() -> commandBuilder.blmpop(timeout, args, keys));
}
@Override
public Mono<KeyValue<K, List<V>>> blmpop(double timeout, LMPopArgs args, K... keys) {
return createMono(() -> commandBuilder.blmpop(timeout, args, keys));
}
@Override
public Mono<KeyValue<K, V>> blpop(long timeout, K... keys) {
return createMono(() -> commandBuilder.blpop(timeout, keys));
}
@Override
public Mono<KeyValue<K, V>> blpop(double timeout, K... keys) {
return createMono(() -> commandBuilder.blpop(timeout, keys));
}
@Override
public Mono<KeyValue<K, V>> brpop(long timeout, K... keys) {
return createMono(() -> commandBuilder.brpop(timeout, keys));
}
@Override
public Mono<KeyValue<K, V>> brpop(double timeout, K... keys) {
return createMono(() -> commandBuilder.brpop(timeout, keys));
}
@Override
public Mono<V> brpoplpush(long timeout, K source, K destination) {
return createMono(() -> commandBuilder.brpoplpush(timeout, source, destination));
}
@Override
public Mono<V> brpoplpush(double timeout, K source, K destination) {
return createMono(() -> commandBuilder.brpoplpush(timeout, source, destination));
}
@Override
public Mono<String> clientCaching(boolean enabled) {
return createMono(() -> commandBuilder.clientCaching(enabled));
}
@Override
public Mono<K> clientGetname() {
return createMono(commandBuilder::clientGetname);
}
@Override
public Mono<Long> clientGetredir() {
return createMono(commandBuilder::clientGetredir);
}
@Override
public Mono<String> clientKill(String addr) {
return createMono(() -> commandBuilder.clientKill(addr));
}
@Override
public Mono<Long> clientKill(KillArgs killArgs) {
return createMono(() -> commandBuilder.clientKill(killArgs));
}
@Override
public Mono<String> clientList() {
return createMono(commandBuilder::clientList);
}
@Override
public Mono<String> clientList(ClientListArgs clientListArgs) {
return createMono(() -> commandBuilder.clientList(clientListArgs));
}
@Override
public Mono<String> clientInfo() {
return createMono(commandBuilder::clientInfo);
}
@Override
public Mono<String> clientNoEvict(boolean on) {
return createMono(() -> commandBuilder.clientNoEvict(on));
}
@Override
public Mono<Long> clientId() {
return createMono(commandBuilder::clientId);
}
@Override
public Mono<String> clientPause(long timeout) {
return createMono(() -> commandBuilder.clientPause(timeout));
}
@Override
public Mono<String> clientSetname(K name) {
return createMono(() -> commandBuilder.clientSetname(name));
}
@Override
public Mono<String> clientSetinfo(String key, String value) {
return createMono(() -> commandBuilder.clientSetinfo(key, value));
}
@Override
public Mono<String> clientTracking(TrackingArgs args) {
return createMono(() -> commandBuilder.clientTracking(args));
}
@Override
public Mono<TrackingInfo> clientTrackinginfo() {
return createMono(commandBuilder::clientTrackinginfo);
}
@Override
public Mono<Long> clientUnblock(long id, UnblockType type) {
return createMono(() -> commandBuilder.clientUnblock(id, type));
}
@Override
public Mono<String> clusterAddSlots(int... slots) {
return createMono(() -> commandBuilder.clusterAddslots(slots));
}
@Override
public Mono<String> clusterAddSlotsRange(Range<Integer>... ranges) {
return createMono(() -> commandBuilder.clusterAddSlotsRange(ranges));
}
@Override
public Mono<String> clusterBumpepoch() {
return createMono(() -> commandBuilder.clusterBumpepoch());
}
@Override
public Mono<Long> clusterCountFailureReports(String nodeId) {
return createMono(() -> commandBuilder.clusterCountFailureReports(nodeId));
}
@Override
public Mono<Long> clusterCountKeysInSlot(int slot) {
return createMono(() -> commandBuilder.clusterCountKeysInSlot(slot));
}
@Override
public Mono<String> clusterDelSlots(int... slots) {
return createMono(() -> commandBuilder.clusterDelslots(slots));
}
@Override
public Mono<String> clusterDelSlotsRange(Range<Integer>... ranges) {
return createMono(() -> commandBuilder.clusterDelSlotsRange(ranges));
}
@Override
public Mono<String> clusterFailover(boolean force) {
return createMono(() -> commandBuilder.clusterFailover(force));
}
@Override
public Mono<String> clusterFailover(boolean force, boolean takeOver) {
return createMono(() -> commandBuilder.clusterFailover(force, takeOver));
}
@Override
public Mono<String> clusterFlushslots() {
return createMono(commandBuilder::clusterFlushslots);
}
@Override
public Mono<String> clusterForget(String nodeId) {
return createMono(() -> commandBuilder.clusterForget(nodeId));
}
@Override
public Flux<K> clusterGetKeysInSlot(int slot, int count) {
return createDissolvingFlux(() -> commandBuilder.clusterGetKeysInSlot(slot, count));
}
@Override
public Mono<String> clusterInfo() {
return createMono(commandBuilder::clusterInfo);
}
@Override
public Mono<Long> clusterKeyslot(K key) {
return createMono(() -> commandBuilder.clusterKeyslot(key));
}
@Override
public Mono<String> clusterMeet(String ip, int port) {
return createMono(() -> commandBuilder.clusterMeet(ip, port));
}
@Override
public Mono<String> clusterMyId() {
return createMono(commandBuilder::clusterMyId);
}
@Override
public Mono<String> clusterMyShardId() {
return createMono(commandBuilder::clusterMyShardId);
}
@Override
public Mono<String> clusterNodes() {
return createMono(commandBuilder::clusterNodes);
}
@Override
public Mono<String> clusterReplicate(String nodeId) {
return createMono(() -> commandBuilder.clusterReplicate(nodeId));
}
@Override
public Flux<String> clusterReplicas(String nodeId) {
return createDissolvingFlux(() -> commandBuilder.clusterReplicas(nodeId));
}
@Override
public Mono<String> clusterReset(boolean hard) {
return createMono(() -> commandBuilder.clusterReset(hard));
}
@Override
public Mono<String> clusterSaveconfig() {
return createMono(() -> commandBuilder.clusterSaveconfig());
}
@Override
public Mono<String> clusterSetConfigEpoch(long configEpoch) {
return createMono(() -> commandBuilder.clusterSetConfigEpoch(configEpoch));
}
@Override
public Mono<String> clusterSetSlotImporting(int slot, String nodeId) {
return createMono(() -> commandBuilder.clusterSetSlotImporting(slot, nodeId));
}
@Override
public Mono<String> clusterSetSlotMigrating(int slot, String nodeId) {
return createMono(() -> commandBuilder.clusterSetSlotMigrating(slot, nodeId));
}
@Override
public Mono<String> clusterSetSlotNode(int slot, String nodeId) {
return createMono(() -> commandBuilder.clusterSetSlotNode(slot, nodeId));
}
@Override
public Mono<String> clusterSetSlotStable(int slot) {
return createMono(() -> commandBuilder.clusterSetSlotStable(slot));
}
@Override
public Mono<List<Object>> clusterShards() {
return createMono(() -> commandBuilder.clusterShards());
}
@Override
public Flux<String> clusterSlaves(String nodeId) {
return createDissolvingFlux(() -> commandBuilder.clusterSlaves(nodeId));
}
@Override
public Flux<Object> clusterSlots() {
return createDissolvingFlux(commandBuilder::clusterSlots);
}
@Override
public Flux<Object> command() {
return createDissolvingFlux(commandBuilder::command);
}
@Override
public Mono<Long> commandCount() {
return createMono(commandBuilder::commandCount);
}
@Override
public Flux<Object> commandInfo(String... commands) {
return createDissolvingFlux(() -> commandBuilder.commandInfo(commands));
}
@Override
public Flux<Object> commandInfo(CommandType... commands) {
String[] stringCommands = new String[commands.length];
for (int i = 0; i < commands.length; i++) {
stringCommands[i] = commands[i].name();
}
return commandInfo(stringCommands);
}
@Override
public Mono<Map<String, String>> configGet(String parameter) {
return createMono(() -> commandBuilder.configGet(parameter));
}
@Override
public Mono<Map<String, String>> configGet(String... parameters) {
return createMono(() -> commandBuilder.configGet(parameters));
}
@Override
public Mono<String> configResetstat() {
return createMono(commandBuilder::configResetstat);
}
@Override
public Mono<String> configRewrite() {
return createMono(commandBuilder::configRewrite);
}
@Override
public Mono<String> configSet(String parameter, String value) {
return createMono(() -> commandBuilder.configSet(parameter, value));
}
@Override
public Mono<String> configSet(Map<String, String> kvs) {
return createMono(() -> commandBuilder.configSet(kvs));
}
@SuppressWarnings("unchecked")
public <T, R> Flux<R> createDissolvingFlux(Supplier<RedisCommand<K, V, T>> commandSupplier) {
return (Flux<R>) createFlux(commandSupplier, true);
}
public <T> Flux<T> createFlux(Supplier<RedisCommand<K, V, T>> commandSupplier) {
return createFlux(commandSupplier, false);
}
private <T> Flux<T> createFlux(Supplier<RedisCommand<K, V, T>> commandSupplier, boolean dissolve) {
if (tracingEnabled) {
return withTraceContext().flatMapMany(it -> Flux
.from(new RedisPublisher<>(decorate(commandSupplier, it), connection, dissolve, getScheduler().next())));
}
return Flux.from(new RedisPublisher<>(commandSupplier, connection, dissolve, getScheduler().next()));
}
private Mono<TraceContext> withTraceContext() {
return Tracing.getContext()
.switchIfEmpty(Mono.fromSupplier(() -> clientResources.tracing().initialTraceContextProvider()))
.flatMap(TraceContextProvider::getTraceContextLater).defaultIfEmpty(TraceContext.EMPTY);
}
protected <T> Mono<T> createMono(CommandType type, CommandOutput<K, V, T> output, CommandArgs<K, V> args) {
return createMono(() -> new Command<>(type, output, args));
}
public <T> Mono<T> createMono(Supplier<RedisCommand<K, V, T>> commandSupplier) {
if (tracingEnabled) {
return withTraceContext().flatMap(it -> Mono
.from(new RedisPublisher<>(decorate(commandSupplier, it), connection, false, getScheduler().next())));
}
return Mono.from(new RedisPublisher<>(commandSupplier, connection, false, getScheduler().next()));
}
private <T> Supplier<RedisCommand<K, V, T>> decorate(Supplier<RedisCommand<K, V, T>> commandSupplier,
TraceContext traceContext) {
return () -> new TracedCommand<>(commandSupplier.get(), traceContext);
}
@Override
public Mono<Long> dbsize() {
return createMono(commandBuilder::dbsize);
}
@Override
public Mono<String> debugCrashAndRecover(Long delay) {
return createMono(() -> (commandBuilder.debugCrashAndRecover(delay)));
}
@Override
public Mono<String> debugHtstats(int db) {
return createMono(() -> commandBuilder.debugHtstats(db));
}
@Override
public Mono<String> debugObject(K key) {
return createMono(() -> commandBuilder.debugObject(key));
}
@Override
public Mono<Void> debugOom() {
return createMono(commandBuilder::debugOom).then();
}
@Override
public Mono<String> debugReload() {
return createMono(() -> (commandBuilder.debugReload()));
}
@Override
public Mono<String> debugRestart(Long delay) {
return createMono(() -> (commandBuilder.debugRestart(delay)));
}
@Override
public Mono<String> debugSdslen(K key) {
return createMono(() -> (commandBuilder.debugSdslen(key)));
}
@Override
public Mono<Void> debugSegfault() {
return createFlux(commandBuilder::debugSegfault).then();
}
@Override
public Mono<Long> decr(K key) {
return createMono(() -> commandBuilder.decr(key));
}
@Override
public Mono<Long> decrby(K key, long amount) {
return createMono(() -> commandBuilder.decrby(key, amount));
}
@Override
public Mono<Long> del(K... keys) {
return createMono(() -> commandBuilder.del(keys));
}
public Mono<Long> del(Iterable<K> keys) {
return createMono(() -> commandBuilder.del(keys));
}
@Override
public Mono<Long> delex(K key, CompareCondition<V> condition) {
return createMono(() -> commandBuilder.delex(key, condition));
}
@Override
public String digest(String script) {
return digest(encodeScript(script));
}
@Override
public String digest(byte[] script) {
return Base16.digest(script);
}
@Override
public Mono<String> discard() {
return createMono(commandBuilder::discard);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public <T> Flux<T> dispatch(ProtocolKeyword type, CommandOutput<K, V, ?> output) {
LettuceAssert.notNull(type, "Command type must not be null");
LettuceAssert.notNull(output, "CommandOutput type must not be null");
return (Flux) createFlux(() -> new Command<>(type, output));
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public <T> Flux<T> dispatch(ProtocolKeyword type, CommandOutput<K, V, ?> output, CommandArgs<K, V> args) {
LettuceAssert.notNull(type, "Command type must not be null");
LettuceAssert.notNull(output, "CommandOutput type must not be null");
LettuceAssert.notNull(args, "CommandArgs type must not be null");
return (Flux) createFlux(() -> new Command<>(type, output, args));
}
@Override
public Mono<byte[]> dump(K key) {
return createMono(() -> commandBuilder.dump(key));
}
@Override
public Mono<V> echo(V msg) {
return createMono(() -> commandBuilder.echo(msg));
}
@Override
@SuppressWarnings("unchecked")
public <T> Flux<T> eval(String script, ScriptOutputType type, K... keys) {
return eval(encodeScript(script), type, keys);
}
@Override
@SuppressWarnings("unchecked")
public <T> Flux<T> eval(byte[] script, ScriptOutputType type, K... keys) {
return createFlux(() -> commandBuilder.eval(script, type, keys));
}
@Override
@SuppressWarnings("unchecked")
public <T> Flux<T> eval(String script, ScriptOutputType type, K[] keys, V... values) {
return eval(encodeScript(script), type, keys, values);
}
@Override
@SuppressWarnings("unchecked")
public <T> Flux<T> eval(byte[] script, ScriptOutputType type, K[] keys, V... values) {
return createFlux(() -> commandBuilder.eval(script, type, keys, values));
}
@Override
@SuppressWarnings("unchecked")
public <T> Flux<T> evalReadOnly(String script, ScriptOutputType type, K[] keys, V... values) {
return evalReadOnly(encodeScript(script), type, keys, values);
}
@Override
public <T> Flux<T> evalReadOnly(byte[] script, ScriptOutputType type, K[] keys, V... values) {
return createFlux(() -> commandBuilder.eval(script, type, true, keys, values));
}
@Override
@SuppressWarnings("unchecked")
public <T> Flux<T> evalsha(String digest, ScriptOutputType type, K... keys) {
return createFlux(() -> commandBuilder.evalsha(digest, type, keys));
}
@Override
@SuppressWarnings("unchecked")
public <T> Flux<T> evalsha(String digest, ScriptOutputType type, K[] keys, V... values) {
return createFlux(() -> commandBuilder.evalsha(digest, type, keys, values));
}
@Override
public <T> Flux<T> evalshaReadOnly(String digest, ScriptOutputType type, K[] keys, V... values) {
return createFlux(() -> commandBuilder.evalsha(digest, type, true, keys, values));
}
@Override
public Mono<TransactionResult> exec() {
return createMono(EXEC, null, null);
}
public Mono<Boolean> exists(K key) {
return createMono(() -> commandBuilder.exists(key));
}
@Override
public Mono<Long> exists(K... keys) {
return createMono(() -> commandBuilder.exists(keys));
}
public Mono<Long> exists(Iterable<K> keys) {
return createMono(() -> commandBuilder.exists(keys));
}
@Override
public Mono<Boolean> expire(K key, long seconds) {
return expire(key, seconds, null);
}
@Override
public Mono<Boolean> expire(K key, long seconds, ExpireArgs expireArgs) {
return createMono(() -> commandBuilder.expire(key, seconds, expireArgs));
}
@Override
public Mono<Boolean> expire(K key, Duration seconds) {
return expire(key, seconds, null);
}
@Override
public Mono<Boolean> expire(K key, Duration seconds, ExpireArgs expireArgs) {
LettuceAssert.notNull(seconds, "Timeout must not be null");
return expire(key, seconds.toMillis() / 1000, expireArgs);
}
@Override
public Flux<Long> hexpire(K key, long seconds, K... fields) {
return hexpire(key, seconds, null, fields);
}
@Override
public Flux<Long> hexpire(K key, long seconds, ExpireArgs expireArgs, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hexpire(key, seconds, expireArgs, fields));
}
@Override
public Flux<Long> hexpire(K key, Duration seconds, K... fields) {
return hexpire(key, seconds, null, fields);
}
@Override
public Flux<Long> hexpire(K key, Duration seconds, ExpireArgs expireArgs, K... fields) {
LettuceAssert.notNull(seconds, "Timeout must not be null");
return hexpire(key, seconds.toMillis() / 1000, expireArgs, fields);
}
@Override
public Mono<Boolean> expireat(K key, long timestamp) {
return expireat(key, timestamp, null);
}
@Override
public Mono<Boolean> expireat(K key, long timestamp, ExpireArgs expireArgs) {
return createMono(() -> commandBuilder.expireat(key, timestamp, expireArgs));
}
@Override
public Mono<Boolean> expireat(K key, Date timestamp) {
return expireat(key, timestamp, null);
}
@Override
public Mono<Boolean> expireat(K key, Date timestamp, ExpireArgs expireArgs) {
LettuceAssert.notNull(timestamp, "Timestamp must not be null");
return expireat(key, timestamp.getTime() / 1000, expireArgs);
}
@Override
public Mono<Boolean> expireat(K key, Instant timestamp) {
return expireat(key, timestamp, null);
}
@Override
public Mono<Boolean> expireat(K key, Instant timestamp, ExpireArgs expireArgs) {
LettuceAssert.notNull(timestamp, "Timestamp must not be null");
return expireat(key, timestamp.toEpochMilli() / 1000, expireArgs);
}
@Override
public Flux<Long> hexpireat(K key, long timestamp, K... fields) {
return hexpireat(key, timestamp, null, fields);
}
@Override
public Flux<Long> hexpireat(K key, long timestamp, ExpireArgs expireArgs, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hexpireat(key, timestamp, expireArgs, fields));
}
@Override
public Flux<Long> hexpireat(K key, Date timestamp, K... fields) {
return hexpireat(key, timestamp, null, fields);
}
@Override
public Flux<Long> hexpireat(K key, Date timestamp, ExpireArgs expireArgs, K... fields) {
LettuceAssert.notNull(timestamp, "Timestamp must not be null");
return hexpireat(key, timestamp.getTime() / 1000, expireArgs, fields);
}
@Override
public Flux<Long> hexpireat(K key, Instant timestamp, K... fields) {
return hexpireat(key, timestamp, null, fields);
}
@Override
public Flux<Long> hexpireat(K key, Instant timestamp, ExpireArgs expireArgs, K... fields) {
LettuceAssert.notNull(timestamp, "Timestamp must not be null");
return hexpireat(key, timestamp.toEpochMilli() / 1000, expireArgs, fields);
}
@Override
public Mono<Long> expiretime(K key) {
return createMono(() -> commandBuilder.expiretime(key));
}
@Override
public Flux<Long> hexpiretime(K key, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hexpiretime(key, fields));
}
@Override
public Flux<Long> httl(K key, K... fields) {
return createDissolvingFlux(() -> commandBuilder.httl(key, fields));
}
@Override
public Flux<Long> hpexpire(K key, long milliseconds, K... fields) {
return hpexpire(key, milliseconds, null, fields);
}
@Override
public Flux<Long> hpexpire(K key, long milliseconds, ExpireArgs expireArgs, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hpexpire(key, milliseconds, expireArgs, fields));
}
@Override
public Flux<Long> hpexpire(K key, Duration milliseconds, K... fields) {
return hpexpire(key, milliseconds, null, fields);
}
@Override
public Flux<Long> hpexpire(K key, Duration milliseconds, ExpireArgs expireArgs, K... fields) {
LettuceAssert.notNull(milliseconds, "Timeout must not be null");
return hpexpire(key, milliseconds.toMillis(), expireArgs, fields);
}
@Override
public Flux<Long> hpexpireat(K key, Date timestamp, K... fields) {
return hpexpireat(key, timestamp, null, fields);
}
@Override
public Flux<Long> hpexpireat(K key, Date timestamp, ExpireArgs expireArgs, K... fields) {
LettuceAssert.notNull(timestamp, "Timestamp must not be null");
return hpexpireat(key, timestamp.getTime(), expireArgs, fields);
}
@Override
public Flux<Long> hpexpireat(K key, Instant timestamp, K... fields) {
return hpexpireat(key, timestamp, null, fields);
}
@Override
public Flux<Long> hpexpireat(K key, Instant timestamp, ExpireArgs expireArgs, K... fields) {
LettuceAssert.notNull(timestamp, "Timestamp must not be null");
return hpexpireat(key, timestamp.toEpochMilli(), expireArgs, fields);
}
@Override
public Flux<Long> hpexpireat(K key, long timestamp, K... fields) {
return hpexpireat(key, timestamp, null, fields);
}
@Override
public Flux<Long> hpexpireat(K key, long timestamp, ExpireArgs expireArgs, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hpexpireat(key, timestamp, expireArgs, fields));
}
@Override
public Flux<Long> hpexpiretime(K key, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hpexpiretime(key, fields));
}
@Override
public Flux<Long> hpttl(K key, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hpttl(key, fields));
}
@Override
public <T> Flux<T> fcall(String function, ScriptOutputType type, K... keys) {
return createFlux(() -> commandBuilder.fcall(function, type, false, keys));
}
@Override
public <T> Flux<T> fcall(String function, ScriptOutputType type, K[] keys, V... values) {
return createFlux(() -> commandBuilder.fcall(function, type, false, keys, values));
}
@Override
public <T> Flux<T> fcallReadOnly(String function, ScriptOutputType type, K... keys) {
return createFlux(() -> commandBuilder.fcall(function, type, true, keys));
}
@Override
public <T> Flux<T> fcallReadOnly(String function, ScriptOutputType type, K[] keys, V... values) {
return createFlux(() -> commandBuilder.fcall(function, type, true, keys, values));
}
@Override
public Mono<String> functionLoad(String functionCode) {
return functionLoad(functionCode, false);
}
@Override
public Mono<String> functionLoad(String functionCode, boolean replace) {
return createMono(() -> commandBuilder.functionLoad(encodeScript(functionCode), replace));
}
@Override
public Mono<byte[]> functionDump() {
return createMono(commandBuilder::functionDump);
}
@Override
public Mono<String> functionRestore(byte[] dump) {
return createMono(() -> commandBuilder.functionRestore(dump, null));
}
@Override
public Mono<String> functionRestore(byte[] dump, FunctionRestoreMode mode) {
return createMono(() -> commandBuilder.functionRestore(dump, mode));
}
@Override
public Mono<String> functionFlush(FlushMode flushMode) {
return createMono(() -> commandBuilder.functionFlush(flushMode));
}
@Override
public Mono<String> functionKill() {
return createMono(commandBuilder::functionKill);
}
@Override
public Flux<Map<String, Object>> functionList() {
return createDissolvingFlux(() -> commandBuilder.functionList(null));
}
@Override
public Flux<Map<String, Object>> functionList(String libraryName) {
return createDissolvingFlux(() -> commandBuilder.functionList(libraryName));
}
@Override
public Mono<String> flushall() {
return createMono(commandBuilder::flushall);
}
@Override
public Mono<String> flushall(FlushMode flushMode) {
return createMono(() -> commandBuilder.flushall(flushMode));
}
@Override
public Mono<String> flushallAsync() {
return flushall(FlushMode.ASYNC);
}
@Override
public Mono<String> flushdb() {
return createMono(commandBuilder::flushdb);
}
@Override
public Mono<String> flushdb(FlushMode flushMode) {
return createMono(() -> commandBuilder.flushdb(flushMode));
}
@Override
public Mono<String> flushdbAsync() {
return flushdb(FlushMode.ASYNC);
}
@Override
public Mono<Long> geoadd(K key, double longitude, double latitude, V member) {
return geoadd(key, longitude, latitude, member, null);
}
@Override
public Mono<Long> geoadd(K key, double longitude, double latitude, V member, GeoAddArgs args) {
return createMono(() -> commandBuilder.geoadd(key, longitude, latitude, member, args));
}
@Override
public Mono<Long> geoadd(K key, Object... lngLatMember) {
return createMono(() -> commandBuilder.geoadd(key, lngLatMember, null));
}
@Override
public Mono<Long> geoadd(K key, GeoValue<V>... values) {
return createMono(() -> commandBuilder.geoadd(key, values, null));
}
@Override
public Mono<Long> geoadd(K key, GeoAddArgs args, Object... lngLatMember) {
return createMono(() -> commandBuilder.geoadd(key, lngLatMember, args));
}
@Override
public Mono<Long> geoadd(K key, GeoAddArgs args, GeoValue<V>... values) {
return createMono(() -> commandBuilder.geoadd(key, values, args));
}
@Override
public Mono<Double> geodist(K key, V from, V to, Unit unit) {
return createMono(() -> commandBuilder.geodist(key, from, to, unit));
}
@Override
public Flux<Value<String>> geohash(K key, V... members) {
return createDissolvingFlux(() -> commandBuilder.geohash(key, members));
}
@Override
public Flux<Value<GeoCoordinates>> geopos(K key, V... members) {
return createDissolvingFlux(() -> commandBuilder.geoposValues(key, members));
}
@Override
public Flux<V> georadius(K key, double longitude, double latitude, double distance, GeoArgs.Unit unit) {
return georadius_ro(key, longitude, latitude, distance, unit);
}
@Override
public Flux<GeoWithin<V>> georadius(K key, double longitude, double latitude, double distance, GeoArgs.Unit unit,
GeoArgs geoArgs) {
return georadius_ro(key, longitude, latitude, distance, unit, geoArgs);
}
@Override
public Mono<Long> georadius(K key, double longitude, double latitude, double distance, Unit unit,
GeoRadiusStoreArgs<K> geoRadiusStoreArgs) {
return createMono(() -> commandBuilder.georadius(key, longitude, latitude, distance, unit.name(), geoRadiusStoreArgs));
}
protected Flux<V> georadius_ro(K key, double longitude, double latitude, double distance, Unit unit) {
return createDissolvingFlux(
() -> commandBuilder.georadius(GEORADIUS_RO, key, longitude, latitude, distance, unit.name()));
}
protected Flux<GeoWithin<V>> georadius_ro(K key, double longitude, double latitude, double distance, Unit unit,
GeoArgs geoArgs) {
return createDissolvingFlux(
() -> commandBuilder.georadius(GEORADIUS_RO, key, longitude, latitude, distance, unit.name(), geoArgs));
}
@Override
public Flux<V> georadiusbymember(K key, V member, double distance, GeoArgs.Unit unit) {
return georadiusbymember_ro(key, member, distance, unit);
}
@Override
public Flux<GeoWithin<V>> georadiusbymember(K key, V member, double distance, GeoArgs.Unit unit, GeoArgs geoArgs) {
return georadiusbymember_ro(key, member, distance, unit, geoArgs);
}
@Override
public Mono<Long> georadiusbymember(K key, V member, double distance, Unit unit, GeoRadiusStoreArgs<K> geoRadiusStoreArgs) {
return createMono(() -> commandBuilder.georadiusbymember(key, member, distance, unit.name(), geoRadiusStoreArgs));
}
protected Flux<V> georadiusbymember_ro(K key, V member, double distance, Unit unit) {
return createDissolvingFlux(
() -> commandBuilder.georadiusbymember(GEORADIUSBYMEMBER_RO, key, member, distance, unit.name()));
}
protected Flux<GeoWithin<V>> georadiusbymember_ro(K key, V member, double distance, Unit unit, GeoArgs geoArgs) {
return createDissolvingFlux(
() -> commandBuilder.georadiusbymember(GEORADIUSBYMEMBER_RO, key, member, distance, unit.name(), geoArgs));
}
@Override
public Flux<V> geosearch(K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate) {
return createDissolvingFlux(() -> commandBuilder.geosearch(key, reference, predicate));
}
@Override
public Flux<GeoWithin<V>> geosearch(K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate,
GeoArgs geoArgs) {
return createDissolvingFlux(() -> commandBuilder.geosearch(key, reference, predicate, geoArgs));
}
@Override
public Mono<Long> geosearchstore(K destination, K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate,
GeoArgs geoArgs, boolean storeDist) {
return createMono(() -> commandBuilder.geosearchstore(destination, key, reference, predicate, geoArgs, storeDist));
}
@Override
public Mono<V> get(K key) {
return createMono(() -> commandBuilder.get(key));
}
@Override
public Mono<String> digestKey(K key) {
return createMono(() -> commandBuilder.digestKey(key));
}
public StatefulConnection<K, V> getConnection() {
return connection;
}
@Override
public Mono<Long> getbit(K key, long offset) {
return createMono(() -> commandBuilder.getbit(key, offset));
}
@Override
public Mono<V> getdel(K key) {
return createMono(() -> commandBuilder.getdel(key));
}
@Override
public Mono<V> getex(K key, GetExArgs args) {
return createMono(() -> commandBuilder.getex(key, args));
}
@Override
public Mono<V> getrange(K key, long start, long end) {
return createMono(() -> commandBuilder.getrange(key, start, end));
}
@Override
public Mono<V> getset(K key, V value) {
return createMono(() -> commandBuilder.getset(key, value));
}
@Override
public Mono<Long> hdel(K key, K... fields) {
return createMono(() -> commandBuilder.hdel(key, fields));
}
@Override
public Mono<Boolean> hexists(K key, K field) {
return createMono(() -> commandBuilder.hexists(key, field));
}
@Override
public Mono<V> hget(K key, K field) {
return createMono(() -> commandBuilder.hget(key, field));
}
@Override
public Flux<KeyValue<K, V>> hgetall(K key) {
return createDissolvingFlux(() -> commandBuilder.hgetallKeyValue(key));
}
@Override
public Mono<Long> hgetall(KeyValueStreamingChannel<K, V> channel, K key) {
return createMono(() -> commandBuilder.hgetall(channel, key));
}
@Override
public Mono<Long> hincrby(K key, K field, long amount) {
return createMono(() -> commandBuilder.hincrby(key, field, amount));
}
@Override
public Mono<Double> hincrbyfloat(K key, K field, double amount) {
return createMono(() -> commandBuilder.hincrbyfloat(key, field, amount));
}
@Override
public Flux<K> hkeys(K key) {
return createDissolvingFlux(() -> commandBuilder.hkeys(key));
}
@Override
public Mono<Long> hkeys(KeyStreamingChannel<K> channel, K key) {
return createMono(() -> commandBuilder.hkeys(channel, key));
}
@Override
public Mono<Long> hlen(K key) {
return createMono(() -> commandBuilder.hlen(key));
}
@Override
public Flux<KeyValue<K, V>> hmget(K key, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hmgetKeyValue(key, fields));
}
@Override
public Mono<Long> hmget(KeyValueStreamingChannel<K, V> channel, K key, K... fields) {
return createMono(() -> commandBuilder.hmget(channel, key, fields));
}
@Override
public Mono<K> hrandfield(K key) {
return createMono(() -> commandBuilder.hrandfield(key));
}
@Override
public Flux<K> hrandfield(K key, long count) {
return createDissolvingFlux(() -> commandBuilder.hrandfield(key, count));
}
@Override
public Mono<KeyValue<K, V>> hrandfieldWithvalues(K key) {
return createMono(() -> commandBuilder.hrandfieldWithvalues(key));
}
@Override
public Flux<KeyValue<K, V>> hrandfieldWithvalues(K key, long count) {
return createDissolvingFlux(() -> commandBuilder.hrandfieldWithvalues(key, count));
}
@Override
public Mono<String> hmset(K key, Map<K, V> map) {
return createMono(() -> commandBuilder.hmset(key, map));
}
@Override
public Mono<MapScanCursor<K, V>> hscan(K key) {
return createMono(() -> commandBuilder.hscan(key));
}
@Override
public Mono<KeyScanCursor<K>> hscanNovalues(K key) {
return createMono(() -> commandBuilder.hscanNovalues(key));
}
@Override
public Mono<MapScanCursor<K, V>> hscan(K key, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.hscan(key, scanArgs));
}
@Override
public Mono<KeyScanCursor<K>> hscanNovalues(K key, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.hscanNovalues(key, scanArgs));
}
@Override
public Mono<MapScanCursor<K, V>> hscan(K key, ScanCursor scanCursor, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.hscan(key, scanCursor, scanArgs));
}
@Override
public Mono<KeyScanCursor<K>> hscanNovalues(K key, ScanCursor scanCursor, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.hscanNovalues(key, scanCursor, scanArgs));
}
@Override
public Mono<MapScanCursor<K, V>> hscan(K key, ScanCursor scanCursor) {
return createMono(() -> commandBuilder.hscan(key, scanCursor));
}
@Override
public Mono<KeyScanCursor<K>> hscanNovalues(K key, ScanCursor scanCursor) {
return createMono(() -> commandBuilder.hscanNovalues(key, scanCursor));
}
@Override
public Mono<StreamScanCursor> hscan(KeyValueStreamingChannel<K, V> channel, K key) {
return createMono(() -> commandBuilder.hscanStreaming(channel, key));
}
@Override
public Mono<StreamScanCursor> hscanNovalues(KeyStreamingChannel<K> channel, K key) {
return createMono(() -> commandBuilder.hscanNoValuesStreaming(channel, key));
}
@Override
public Mono<StreamScanCursor> hscan(KeyValueStreamingChannel<K, V> channel, K key, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.hscanStreaming(channel, key, scanArgs));
}
@Override
public Mono<StreamScanCursor> hscanNovalues(KeyStreamingChannel<K> channel, K key, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.hscanNoValuesStreaming(channel, key, scanArgs));
}
@Override
public Mono<StreamScanCursor> hscan(KeyValueStreamingChannel<K, V> channel, K key, ScanCursor scanCursor,
ScanArgs scanArgs) {
return createMono(() -> commandBuilder.hscanStreaming(channel, key, scanCursor, scanArgs));
}
@Override
public Mono<StreamScanCursor> hscanNovalues(KeyStreamingChannel<K> channel, K key, ScanCursor scanCursor,
ScanArgs scanArgs) {
return createMono(() -> commandBuilder.hscanNoValuesStreaming(channel, key, scanCursor, scanArgs));
}
@Override
public Mono<StreamScanCursor> hscan(KeyValueStreamingChannel<K, V> channel, K key, ScanCursor scanCursor) {
return createMono(() -> commandBuilder.hscanStreaming(channel, key, scanCursor));
}
@Override
public Mono<StreamScanCursor> hscanNovalues(KeyStreamingChannel<K> channel, K key, ScanCursor scanCursor) {
return createMono(() -> commandBuilder.hscanNoValuesStreaming(channel, key, scanCursor));
}
@Override
public Mono<Boolean> hset(K key, K field, V value) {
return createMono(() -> commandBuilder.hset(key, field, value));
}
@Override
public Mono<Long> hset(K key, Map<K, V> map) {
return createMono(() -> commandBuilder.hset(key, map));
}
@Override
public Mono<Long> hsetex(K key, Map<K, V> map) {
return createMono(() -> commandBuilder.hsetex(key, map));
}
@Override
public Mono<Long> hsetex(K key, HSetExArgs hSetExArgs, Map<K, V> map) {
return createMono(() -> commandBuilder.hsetex(key, hSetExArgs, map));
}
@Override
public Flux<KeyValue<K, V>> hgetex(K key, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hgetex(key, fields));
}
@Override
public Flux<KeyValue<K, V>> hgetex(K key, HGetExArgs hGetExArgs, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hgetex(key, hGetExArgs, fields));
}
@Override
public Mono<Long> hgetex(KeyValueStreamingChannel<K, V> channel, K key, HGetExArgs hGetExArgs, K... fields) {
return createMono(() -> commandBuilder.hgetex(channel, key, hGetExArgs, fields));
}
@Override
public Flux<KeyValue<K, V>> hgetdel(K key, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hgetdel(key, fields));
}
@Override
public Mono<Long> hgetdel(KeyValueStreamingChannel<K, V> channel, K key, K... fields) {
return createMono(() -> commandBuilder.hgetdel(channel, key, fields));
}
@Override
public Mono<Boolean> hsetnx(K key, K field, V value) {
return createMono(() -> commandBuilder.hsetnx(key, field, value));
}
@Override
public Mono<Long> hstrlen(K key, K field) {
return createMono(() -> commandBuilder.hstrlen(key, field));
}
@Override
public Flux<V> hvals(K key) {
return createDissolvingFlux(() -> commandBuilder.hvals(key));
}
@Override
public Mono<Long> hvals(ValueStreamingChannel<V> channel, K key) {
return createMono(() -> commandBuilder.hvals(channel, key));
}
@Override
public Mono<Long> incr(K key) {
return createMono(() -> commandBuilder.incr(key));
}
@Override
public Mono<Long> incrby(K key, long amount) {
return createMono(() -> commandBuilder.incrby(key, amount));
}
@Override
public Mono<Double> incrbyfloat(K key, double amount) {
return createMono(() -> commandBuilder.incrbyfloat(key, amount));
}
@Override
public Mono<String> info() {
return createMono(commandBuilder::info);
}
@Override
public Mono<String> info(String section) {
return createMono(() -> commandBuilder.info(section));
}
@Override
public Mono<String> ftCreate(String index, CreateArgs<K, V> options, List<FieldArgs<K>> fieldArgs) {
return createMono(() -> searchCommandBuilder.ftCreate(index, options, fieldArgs));
}
@Override
public Mono<String> ftCreate(String index, List<FieldArgs<K>> fieldArgs) {
return createMono(() -> searchCommandBuilder.ftCreate(index, null, fieldArgs));
}
@Override
public Mono<String> ftAliasadd(String alias, String index) {
return createMono(() -> searchCommandBuilder.ftAliasadd(alias, index));
}
@Override
public Mono<String> ftAliasupdate(String alias, String index) {
return createMono(() -> searchCommandBuilder.ftAliasupdate(alias, index));
}
@Override
public Mono<String> ftAliasdel(String alias) {
return createMono(() -> searchCommandBuilder.ftAliasdel(alias));
}
@Override
public Mono<String> ftAlter(String index, boolean skipInitialScan, List<FieldArgs<K>> fieldArgs) {
return createMono(() -> searchCommandBuilder.ftAlter(index, skipInitialScan, fieldArgs));
}
@Override
public Flux<V> ftTagvals(String index, String fieldName) {
return createDissolvingFlux(() -> searchCommandBuilder.ftTagvals(index, fieldName));
}
@Override
public Mono<SpellCheckResult<V>> ftSpellcheck(String index, V query) {
return createMono(() -> searchCommandBuilder.ftSpellcheck(index, query));
}
@Override
public Mono<SpellCheckResult<V>> ftSpellcheck(String index, V query, SpellCheckArgs<K, V> args) {
return createMono(() -> searchCommandBuilder.ftSpellcheck(index, query, args));
}
@Override
public Mono<Long> ftDictadd(String dict, V... terms) {
return createMono(() -> searchCommandBuilder.ftDictadd(dict, terms));
}
@Override
public Mono<Long> ftDictdel(String dict, V... terms) {
return createMono(() -> searchCommandBuilder.ftDictdel(dict, terms));
}
@Override
public Flux<V> ftDictdump(String dict) {
return createDissolvingFlux(() -> searchCommandBuilder.ftDictdump(dict));
}
@Override
public Mono<String> ftExplain(String index, V query) {
return createMono(() -> searchCommandBuilder.ftExplain(index, query));
}
@Override
public Mono<String> ftExplain(String index, V query, ExplainArgs<K, V> args) {
return createMono(() -> searchCommandBuilder.ftExplain(index, query, args));
}
@Override
public Flux<V> ftList() {
return createDissolvingFlux(() -> searchCommandBuilder.ftList());
}
@Override
public Mono<Map<V, List<V>>> ftSyndump(String index) {
return createMono(() -> searchCommandBuilder.ftSyndump(index));
}
@Override
public Mono<String> ftSynupdate(String index, V synonymGroupId, V... terms) {
return createMono(() -> searchCommandBuilder.ftSynupdate(index, synonymGroupId, terms));
}
@Override
public Mono<String> ftSynupdate(String index, V synonymGroupId, SynUpdateArgs<K, V> args, V... terms) {
return createMono(() -> searchCommandBuilder.ftSynupdate(index, synonymGroupId, args, terms));
}
@Override
public Mono<Long> ftSugadd(K key, V string, double score) {
return createMono(() -> searchCommandBuilder.ftSugadd(key, string, score));
}
@Override
public Mono<Long> ftSugadd(K key, V string, double score, SugAddArgs<K, V> args) {
return createMono(() -> searchCommandBuilder.ftSugadd(key, string, score, args));
}
@Override
public Mono<Boolean> ftSugdel(K key, V string) {
return createMono(() -> searchCommandBuilder.ftSugdel(key, string));
}
@Override
public Flux<Suggestion<V>> ftSugget(K key, V prefix) {
return createDissolvingFlux(() -> searchCommandBuilder.ftSugget(key, prefix));
}
@Override
public Flux<Suggestion<V>> ftSugget(K key, V prefix, SugGetArgs<K, V> args) {
return createDissolvingFlux(() -> searchCommandBuilder.ftSugget(key, prefix, args));
}
@Override
public Mono<Long> ftSuglen(K key) {
return createMono(() -> searchCommandBuilder.ftSuglen(key));
}
@Override
public Mono<String> ftAlter(String index, List<FieldArgs<K>> fieldArgs) {
return createMono(() -> searchCommandBuilder.ftAlter(index, false, fieldArgs));
}
@Override
public Mono<String> ftCursordel(String index, Cursor cursor) {
return createMono(() -> {
if (cursor == null) {
throw new IllegalArgumentException("cursor must not be null");
}
long cursorId = cursor.getCursorId();
return searchCommandBuilder.ftCursordel(index, cursorId > 0 ? cursorId : 0);
});
}
@Override
public Mono<String> ftDropindex(String index, boolean deleteDocumentKeys) {
return createMono(() -> searchCommandBuilder.ftDropindex(index, deleteDocumentKeys));
}
@Override
public Mono<String> ftDropindex(String index) {
return createMono(() -> searchCommandBuilder.ftDropindex(index, false));
}
@Override
public Mono<SearchReply<K, V>> ftSearch(String index, V query, SearchArgs<K, V> args) {
return createMono(() -> searchCommandBuilder.ftSearch(index, query, args));
}
@Override
public Mono<SearchReply<K, V>> ftSearch(String index, V query) {
return createMono(() -> searchCommandBuilder.ftSearch(index, query, SearchArgs.<K, V> builder().build()));
}
@Override
public Mono<AggregationReply<K, V>> ftAggregate(String index, V query, AggregateArgs<K, V> args) {
return createMono(() -> searchCommandBuilder.ftAggregate(index, query, args));
}
@Override
public Mono<AggregationReply<K, V>> ftAggregate(String index, V query) {
return createMono(() -> searchCommandBuilder.ftAggregate(index, query, null));
}
@Override
public Mono<AggregationReply<K, V>> ftCursorread(String index, Cursor cursor, int count) {
return createMono(() -> {
if (cursor == null) {
throw new IllegalArgumentException("cursor must not be null");
}
long cursorId = cursor.getCursorId();
return searchCommandBuilder.ftCursorread(index, cursorId > 0 ? cursorId : 0, count);
});
}
@Override
public Mono<AggregationReply<K, V>> ftCursorread(String index, Cursor cursor) {
return ftCursorread(index, cursor, -1);
}
@Override
public Flux<Long> jsonArrappend(K key, JsonPath jsonPath, JsonValue... values) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrappend(key, jsonPath, values));
}
@Override
public Flux<Long> jsonArrappend(K key, JsonValue... values) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrappend(key, JsonPath.ROOT_PATH, values));
}
@Override
public Flux<Long> jsonArrappend(K key, JsonPath jsonPath, String... jsonStrings) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrappend(key, jsonPath, jsonStrings));
}
@Override
public Flux<Long> jsonArrappend(K key, String... jsonStrings) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrappend(key, JsonPath.ROOT_PATH, jsonStrings));
}
@Override
public Flux<Long> jsonArrindex(K key, JsonPath jsonPath, JsonValue value, JsonRangeArgs range) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrindex(key, jsonPath, value, range));
}
@Override
public Flux<Long> jsonArrindex(K key, JsonPath jsonPath, JsonValue value) {
final JsonRangeArgs args = JsonRangeArgs.Builder.defaults();
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrindex(key, jsonPath, value, args));
}
@Override
public Flux<Long> jsonArrindex(K key, JsonPath jsonPath, String jsonString, JsonRangeArgs range) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrindex(key, jsonPath, jsonString, range));
}
@Override
public Flux<Long> jsonArrindex(K key, JsonPath jsonPath, String jsonString) {
return createDissolvingFlux(
() -> jsonCommandBuilder.jsonArrindex(key, jsonPath, jsonString, JsonRangeArgs.Builder.defaults()));
}
@Override
public Flux<Long> jsonArrinsert(K key, JsonPath jsonPath, int index, JsonValue... values) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrinsert(key, jsonPath, index, values));
}
@Override
public Flux<Long> jsonArrinsert(K key, JsonPath jsonPath, int index, String... jsonStrings) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrinsert(key, jsonPath, index, jsonStrings));
}
@Override
public Flux<Long> jsonArrlen(K key, JsonPath jsonPath) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrlen(key, jsonPath));
}
@Override
public Flux<Long> jsonArrlen(K key) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrlen(key, JsonPath.ROOT_PATH));
}
@Override
public Flux<JsonValue> jsonArrpop(K key, JsonPath jsonPath, int index) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrpop(key, jsonPath, index));
}
@Override
public Flux<JsonValue> jsonArrpop(K key, JsonPath jsonPath) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrpop(key, jsonPath, -1));
}
@Override
public Flux<JsonValue> jsonArrpop(K key) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrpop(key, JsonPath.ROOT_PATH, -1));
}
@Override
public Flux<String> jsonArrpopRaw(K key, JsonPath jsonPath, int index) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrpopRaw(key, jsonPath, index));
}
@Override
public Flux<String> jsonArrpopRaw(K key, JsonPath jsonPath) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrpopRaw(key, jsonPath, -1));
}
@Override
public Flux<String> jsonArrpopRaw(K key) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrpopRaw(key, JsonPath.ROOT_PATH, -1));
}
@Override
public Flux<Long> jsonArrtrim(K key, JsonPath jsonPath, JsonRangeArgs range) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrtrim(key, jsonPath, range));
}
@Override
public Mono<Long> jsonClear(K key, JsonPath jsonPath) {
return createMono(() -> jsonCommandBuilder.jsonClear(key, jsonPath));
}
@Override
public Mono<Long> jsonClear(K key) {
return createMono(() -> jsonCommandBuilder.jsonClear(key, JsonPath.ROOT_PATH));
}
@Override
public Mono<Long> jsonDel(K key, JsonPath jsonPath) {
return createMono(() -> jsonCommandBuilder.jsonDel(key, jsonPath));
}
@Override
public Flux<String> jsonGetRaw(K key, JsonGetArgs options, JsonPath... jsonPaths) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonGetRaw(key, options, jsonPaths));
}
@Override
public Flux<String> jsonGetRaw(K key, JsonPath... jsonPaths) {
final JsonGetArgs args = JsonGetArgs.Builder.defaults();
return createDissolvingFlux(() -> jsonCommandBuilder.jsonGetRaw(key, args, jsonPaths));
}
@Override
public Mono<Long> jsonDel(K key) {
return createMono(() -> jsonCommandBuilder.jsonDel(key, JsonPath.ROOT_PATH));
}
@Override
public Flux<JsonValue> jsonGet(K key, JsonGetArgs options, JsonPath... jsonPaths) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonGet(key, options, jsonPaths));
}
@Override
public Flux<JsonValue> jsonGet(K key, JsonPath... jsonPaths) {
final JsonGetArgs args = JsonGetArgs.Builder.defaults();
return createDissolvingFlux(() -> jsonCommandBuilder.jsonGet(key, args, jsonPaths));
}
@Override
public Mono<String> jsonMerge(K key, JsonPath jsonPath, JsonValue value) {
return createMono(() -> jsonCommandBuilder.jsonMerge(key, jsonPath, value));
}
@Override
public Mono<String> jsonMerge(K key, JsonPath jsonPath, String jsonString) {
return createMono(() -> jsonCommandBuilder.jsonMerge(key, jsonPath, jsonString));
}
@Override
public Flux<JsonValue> jsonMGet(JsonPath jsonPath, K... keys) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonMGet(jsonPath, keys));
}
@Override
public Mono<String> jsonMSet(List<JsonMsetArgs<K, V>> arguments) {
return createMono(() -> jsonCommandBuilder.jsonMSet(arguments));
}
@Override
public Flux<String> jsonMGetRaw(JsonPath jsonPath, K... keys) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonMGetRaw(jsonPath, keys));
}
@Override
public Flux<Number> jsonNumincrby(K key, JsonPath jsonPath, Number number) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonNumincrby(key, jsonPath, number));
}
@Override
public Flux<V> jsonObjkeys(K key, JsonPath jsonPath) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonObjkeys(key, jsonPath));
}
@Override
public Flux<V> jsonObjkeys(K key) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonObjkeys(key, JsonPath.ROOT_PATH));
}
@Override
public Flux<Long> jsonObjlen(K key, JsonPath jsonPath) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonObjlen(key, jsonPath));
}
@Override
public Flux<Long> jsonObjlen(K key) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonObjlen(key, JsonPath.ROOT_PATH));
}
@Override
public Mono<String> jsonSet(K key, JsonPath jsonPath, JsonValue value, JsonSetArgs options) {
return createMono(() -> jsonCommandBuilder.jsonSet(key, jsonPath, value, options));
}
@Override
public Mono<String> jsonSet(K key, JsonPath jsonPath, JsonValue value) {
final JsonSetArgs args = JsonSetArgs.Builder.defaults();
return createMono(() -> jsonCommandBuilder.jsonSet(key, jsonPath, value, args));
}
@Override
public Mono<String> jsonSet(K key, JsonPath jsonPath, String jsonString, JsonSetArgs options) {
return createMono(() -> jsonCommandBuilder.jsonSet(key, jsonPath, jsonString, options));
}
@Override
public Mono<String> jsonSet(K key, JsonPath jsonPath, String jsonString) {
return createMono(() -> jsonCommandBuilder.jsonSet(key, jsonPath, jsonString, JsonSetArgs.Builder.defaults()));
}
@Override
public Flux<Long> jsonStrappend(K key, JsonPath jsonPath, JsonValue value) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonStrappend(key, jsonPath, value));
}
@Override
public Flux<Long> jsonStrappend(K key, JsonValue value) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonStrappend(key, JsonPath.ROOT_PATH, value));
}
@Override
public Flux<Long> jsonStrappend(K key, JsonPath jsonPath, String jsonString) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonStrappend(key, jsonPath, jsonString));
}
@Override
public Flux<Long> jsonStrappend(K key, String jsonString) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonStrappend(key, JsonPath.ROOT_PATH, jsonString));
}
@Override
public Flux<Long> jsonStrlen(K key, JsonPath jsonPath) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonStrlen(key, jsonPath));
}
@Override
public Flux<Long> jsonStrlen(K key) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonStrlen(key, JsonPath.ROOT_PATH));
}
@Override
public Flux<Long> jsonToggle(K key, JsonPath jsonPath) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonToggle(key, jsonPath));
}
@Override
public Flux<JsonType> jsonType(K key, JsonPath jsonPath) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonType(key, jsonPath));
}
@Override
public Flux<JsonType> jsonType(K key) {
return createDissolvingFlux(() -> jsonCommandBuilder.jsonType(key, JsonPath.ROOT_PATH));
}
@Override
public Mono<Boolean> vadd(K key, V element, Double... vectors) {
return createMono(() -> vectorSetCommandBuilder.vadd(key, element, null, vectors));
}
@Override
public Mono<Boolean> vadd(K key, int dimensionality, V element, Double... vectors) {
return createMono(() -> vectorSetCommandBuilder.vadd(key, dimensionality, element, null, vectors));
}
@Override
public Mono<Boolean> vadd(K key, V element, VAddArgs args, Double... vectors) {
return createMono(() -> vectorSetCommandBuilder.vadd(key, element, args, vectors));
}
@Override
public Mono<Boolean> vadd(K key, int dimensionality, V element, VAddArgs args, Double... vectors) {
return createMono(() -> vectorSetCommandBuilder.vadd(key, dimensionality, element, args, vectors));
}
@Override
public Mono<Long> vcard(K key) {
return createMono(() -> vectorSetCommandBuilder.vcard(key));
}
@Override
public Mono<Boolean> vClearAttributes(K key, V element) {
return createMono(() -> vectorSetCommandBuilder.vsetattr(key, element, ""));
}
@Override
public Mono<Long> vdim(K key) {
return createMono(() -> vectorSetCommandBuilder.vdim(key));
}
@Override
public Flux<Double> vemb(K key, V element) {
return createDissolvingFlux(() -> vectorSetCommandBuilder.vemb(key, element));
}
@Override
public Mono<RawVector> vembRaw(K key, V element) {
return createMono(() -> vectorSetCommandBuilder.vembRaw(key, element));
}
@Override
public Mono<String> vgetattr(K key, V element) {
return createMono(() -> vectorSetCommandBuilder.vgetattr(key, element));
}
@Override
public Flux<JsonValue> vgetattrAsJsonValue(K key, V element) {
return createDissolvingFlux(() -> vectorSetCommandBuilder.vgetattrAsJsonValue(key, element));
}
@Override
public Mono<VectorMetadata> vinfo(K key) {
return createMono(() -> vectorSetCommandBuilder.vinfo(key));
}
@Override
public Flux<V> vlinks(K key, V element) {
return createDissolvingFlux(() -> vectorSetCommandBuilder.vlinks(key, element));
}
@Override
public Mono<Map<V, Double>> vlinksWithScores(K key, V element) {
return createMono(() -> vectorSetCommandBuilder.vlinksWithScores(key, element));
}
@Override
public Mono<V> vrandmember(K key) {
return createMono(() -> vectorSetCommandBuilder.vrandmember(key));
}
@Override
public Flux<V> vrandmember(K key, int count) {
return createDissolvingFlux(() -> vectorSetCommandBuilder.vrandmember(key, count));
}
@Override
public Mono<Boolean> vrem(K key, V element) {
return createMono(() -> vectorSetCommandBuilder.vrem(key, element));
}
@Override
public Mono<Boolean> vsetattr(K key, V element, String json) {
return createMono(() -> vectorSetCommandBuilder.vsetattr(key, element, json));
}
@Override
public Mono<Boolean> vsetattr(K key, V element, JsonValue json) {
return createMono(() -> vectorSetCommandBuilder.vsetattr(key, element, json));
}
@Override
public Flux<V> vsim(K key, Double... vectors) {
return createDissolvingFlux(() -> vectorSetCommandBuilder.vsim(key, null, vectors));
}
@Override
public Flux<V> vsim(K key, V element) {
return createDissolvingFlux(() -> vectorSetCommandBuilder.vsim(key, null, element));
}
@Override
public Flux<V> vsim(K key, VSimArgs args, Double... vectors) {
return createDissolvingFlux(() -> vectorSetCommandBuilder.vsim(key, args, vectors));
}
@Override
public Flux<V> vsim(K key, VSimArgs args, V element) {
return createDissolvingFlux(() -> vectorSetCommandBuilder.vsim(key, args, element));
}
@Override
public Mono<Map<V, Double>> vsimWithScore(K key, Double... vectors) {
return createMono(() -> vectorSetCommandBuilder.vsimWithScore(key, null, vectors));
}
@Override
public Mono<Map<V, Double>> vsimWithScore(K key, V element) {
return createMono(() -> vectorSetCommandBuilder.vsimWithScore(key, null, element));
}
@Override
public Mono<Map<V, Double>> vsimWithScore(K key, VSimArgs args, Double... vectors) {
return createMono(() -> vectorSetCommandBuilder.vsimWithScore(key, args, vectors));
}
@Override
public Mono<Map<V, Double>> vsimWithScore(K key, VSimArgs args, V element) {
return createMono(() -> vectorSetCommandBuilder.vsimWithScore(key, args, element));
}
@Override
public Mono<Map<V, VSimScoreAttribs>> vsimWithScoreWithAttribs(K key, Double... vectors) {
return createMono(() -> vectorSetCommandBuilder.vsimWithScoreWithAttribs(key, null, vectors));
}
@Override
public Mono<Map<V, VSimScoreAttribs>> vsimWithScoreWithAttribs(K key, V element) {
return createMono(() -> vectorSetCommandBuilder.vsimWithScoreWithAttribs(key, null, element));
}
@Override
public Mono<Map<V, VSimScoreAttribs>> vsimWithScoreWithAttribs(K key, VSimArgs args, Double... vectors) {
return createMono(() -> vectorSetCommandBuilder.vsimWithScoreWithAttribs(key, args, vectors));
}
@Override
public Mono<Map<V, VSimScoreAttribs>> vsimWithScoreWithAttribs(K key, VSimArgs args, V element) {
return createMono(() -> vectorSetCommandBuilder.vsimWithScoreWithAttribs(key, args, element));
}
@Override
public Flux<K> keys(String pattern) {
return createDissolvingFlux(() -> commandBuilder.keys(pattern));
}
/**
* Find all keys matching the given pattern (legacy overload).
*
* @param pattern the pattern type: patternkey (pattern).
* @return K array-reply list of keys matching {@code pattern}.
* @deprecated Use {@link #keys(String)} instead. This legacy overload will be removed in a later version.
*/
@Deprecated
@Override
public Flux<K> keysLegacy(K pattern) {
return createDissolvingFlux(() -> commandBuilder.keysLegacy(pattern));
}
@Override
public Mono<Long> keys(KeyStreamingChannel<K> channel, String pattern) {
return createMono(() -> commandBuilder.keys(channel, pattern));
}
/**
* Find all keys matching the given pattern (legacy overload).
*
* @param channel the channel.
* @param pattern the pattern.
* @return Long array-reply list of keys matching {@code pattern}.
* @deprecated Use {@link #keys(KeyStreamingChannel, String)} instead. This legacy overload will be removed in a later
* version.
*/
@Deprecated
@Override
public Mono<Long> keysLegacy(KeyStreamingChannel<K> channel, K pattern) {
return createMono(() -> commandBuilder.keysLegacy(channel, pattern));
}
@Override
public Mono<Date> lastsave() {
return createMono(commandBuilder::lastsave);
}
@Override
public Mono<V> lindex(K key, long index) {
return createMono(() -> commandBuilder.lindex(key, index));
}
@Override
public Mono<Long> linsert(K key, boolean before, V pivot, V value) {
return createMono(() -> commandBuilder.linsert(key, before, pivot, value));
}
@Override
public Mono<Long> llen(K key) {
return createMono(() -> commandBuilder.llen(key));
}
@Override
public Mono<V> lmove(K source, K destination, LMoveArgs args) {
return createMono(() -> commandBuilder.lmove(source, destination, args));
}
@Override
public Mono<KeyValue<K, List<V>>> lmpop(LMPopArgs args, K... keys) {
return createMono(() -> commandBuilder.lmpop(args, keys));
}
@Override
public Mono<V> lpop(K key) {
return createMono(() -> commandBuilder.lpop(key));
}
@Override
public Flux<V> lpop(K key, long count) {
return createDissolvingFlux(() -> commandBuilder.lpop(key, count));
}
@Override
public Mono<Long> lpos(K key, V value) {
return lpos(key, value, null);
}
@Override
public Mono<Long> lpos(K key, V value, LPosArgs args) {
return createMono(() -> commandBuilder.lpos(key, value, args));
}
@Override
public Flux<Long> lpos(K key, V value, int count) {
return lpos(key, value, count, null);
}
@Override
public Flux<Long> lpos(K key, V value, int count, LPosArgs args) {
return createDissolvingFlux(() -> commandBuilder.lpos(key, value, count, args));
}
@Override
public Mono<Long> lpush(K key, V... values) {
return createMono(() -> commandBuilder.lpush(key, values));
}
@Override
public Mono<Long> lpushx(K key, V... values) {
return createMono(() -> commandBuilder.lpushx(key, values));
}
@Override
public Flux<V> lrange(K key, long start, long stop) {
return createDissolvingFlux(() -> commandBuilder.lrange(key, start, stop));
}
@Override
public Mono<Long> lrange(ValueStreamingChannel<V> channel, K key, long start, long stop) {
return createMono(() -> commandBuilder.lrange(channel, key, start, stop));
}
@Override
public Mono<Long> lrem(K key, long count, V value) {
return createMono(() -> commandBuilder.lrem(key, count, value));
}
@Override
public Mono<String> lset(K key, long index, V value) {
return createMono(() -> commandBuilder.lset(key, index, value));
}
@Override
public Mono<String> ltrim(K key, long start, long stop) {
return createMono(() -> commandBuilder.ltrim(key, start, stop));
}
@Override
public Mono<Long> memoryUsage(K key) {
return createMono(() -> commandBuilder.memoryUsage(key));
}
@Override
public Flux<KeyValue<K, V>> mget(K... keys) {
return createDissolvingFlux(() -> commandBuilder.mgetKeyValue(keys));
}
public Flux<KeyValue<K, V>> mget(Iterable<K> keys) {
return createDissolvingFlux(() -> commandBuilder.mgetKeyValue(keys));
}
@Override
public Mono<Long> mget(KeyValueStreamingChannel<K, V> channel, K... keys) {
return createMono(() -> commandBuilder.mget(channel, keys));
}
public Mono<Long> mget(ValueStreamingChannel<V> channel, Iterable<K> keys) {
return createMono(() -> commandBuilder.mget(channel, keys));
}
public Mono<Long> mget(KeyValueStreamingChannel<K, V> channel, Iterable<K> keys) {
return createMono(() -> commandBuilder.mget(channel, keys));
}
@Override
public Mono<String> migrate(String host, int port, K key, int db, long timeout) {
return createMono(() -> commandBuilder.migrate(host, port, key, db, timeout));
}
@Override
public Mono<String> migrate(String host, int port, int db, long timeout, MigrateArgs<K> migrateArgs) {
return createMono(() -> commandBuilder.migrate(host, port, db, timeout, migrateArgs));
}
@Override
public Mono<Boolean> move(K key, int db) {
return createMono(() -> commandBuilder.move(key, db));
}
@Override
public Mono<String> mset(Map<K, V> map) {
return createMono(() -> commandBuilder.mset(map));
}
@Override
public Mono<Boolean> msetnx(Map<K, V> map) {
return createMono(() -> commandBuilder.msetnx(map));
}
@Override
public Mono<Boolean> msetex(Map<K, V> map, MSetExArgs args) {
return createMono(() -> commandBuilder.msetex(map, args));
}
@Override
public Mono<String> multi() {
return createMono(commandBuilder::multi);
}
@Override
public Mono<String> objectEncoding(K key) {
return createMono(() -> commandBuilder.objectEncoding(key));
}
@Override
public Mono<Long> objectFreq(K key) {
return createMono(() -> commandBuilder.objectFreq(key));
}
@Override
public Mono<Long> objectIdletime(K key) {
return createMono(() -> commandBuilder.objectIdletime(key));
}
@Override
public Mono<Long> objectRefcount(K key) {
return createMono(() -> commandBuilder.objectRefcount(key));
}
@Override
public Mono<Boolean> persist(K key) {
return createMono(() -> commandBuilder.persist(key));
}
@Override
public Flux<Long> hpersist(K key, K... fields) {
return createDissolvingFlux(() -> commandBuilder.hpersist(key, fields));
}
@Override
public Mono<Boolean> pexpire(K key, long milliseconds) {
return pexpire(key, milliseconds, null);
}
@Override
public Mono<Boolean> pexpire(K key, long milliseconds, ExpireArgs expireArgs) {
return createMono(() -> commandBuilder.pexpire(key, milliseconds, expireArgs));
}
@Override
public Mono<Boolean> pexpire(K key, Duration milliseconds) {
return pexpire(key, milliseconds, null);
}
@Override
public Mono<Boolean> pexpire(K key, Duration milliseconds, ExpireArgs expireArgs) {
LettuceAssert.notNull(milliseconds, "Timeout must not be null");
return pexpire(key, milliseconds.toMillis(), expireArgs);
}
@Override
public Mono<Boolean> pexpireat(K key, Date timestamp) {
return pexpireat(key, timestamp, null);
}
@Override
public Mono<Boolean> pexpireat(K key, Date timestamp, ExpireArgs expireArgs) {
LettuceAssert.notNull(timestamp, "Timestamp must not be null");
return pexpireat(key, timestamp.getTime(), expireArgs);
}
@Override
public Mono<Boolean> pexpireat(K key, Instant timestamp) {
return pexpireat(key, timestamp, null);
}
@Override
public Mono<Boolean> pexpireat(K key, Instant timestamp, ExpireArgs expireArgs) {
LettuceAssert.notNull(timestamp, "Timestamp must not be null");
return pexpireat(key, timestamp.toEpochMilli(), expireArgs);
}
@Override
public Mono<Boolean> pexpireat(K key, long timestamp) {
return pexpireat(key, timestamp, null);
}
@Override
public Mono<Boolean> pexpireat(K key, long timestamp, ExpireArgs expireArgs) {
return createMono(() -> commandBuilder.pexpireat(key, timestamp, expireArgs));
}
@Override
public Mono<Long> pexpiretime(K key) {
return createMono(() -> commandBuilder.pexpiretime(key));
}
@Override
public Mono<Long> pfadd(K key, V... values) {
return createMono(() -> commandBuilder.pfadd(key, values));
}
public Mono<Long> pfadd(K key, V value, V... values) {
return createMono(() -> commandBuilder.pfadd(key, value, values));
}
@Override
public Mono<Long> pfcount(K... keys) {
return createMono(() -> commandBuilder.pfcount(keys));
}
public Mono<Long> pfcount(K key, K... keys) {
return createMono(() -> commandBuilder.pfcount(key, keys));
}
@Override
public Mono<String> pfmerge(K destkey, K... sourcekeys) {
return createMono(() -> commandBuilder.pfmerge(destkey, sourcekeys));
}
public Mono<String> pfmerge(K destkey, K sourceKey, K... sourcekeys) {
return createMono(() -> commandBuilder.pfmerge(destkey, sourceKey, sourcekeys));
}
@Override
public Mono<String> ping() {
return createMono(commandBuilder::ping);
}
@Override
public Mono<String> psetex(K key, long milliseconds, V value) {
return createMono(() -> commandBuilder.psetex(key, milliseconds, value));
}
@Override
public Mono<Long> pttl(K key) {
return createMono(() -> commandBuilder.pttl(key));
}
@Override
public Mono<Long> publish(K channel, V message) {
return createMono(() -> commandBuilder.publish(channel, message));
}
@Override
public Flux<K> pubsubChannels() {
return createDissolvingFlux(commandBuilder::pubsubChannels);
}
@Override
public Flux<K> pubsubChannels(K channel) {
return createDissolvingFlux(() -> commandBuilder.pubsubChannels(channel));
}
@Override
public Mono<Long> pubsubNumpat() {
return createMono(commandBuilder::pubsubNumpat);
}
@Override
public Mono<Map<K, Long>> pubsubNumsub(K... channels) {
return createMono(() -> commandBuilder.pubsubNumsub(channels));
}
@Override
public Flux<K> pubsubShardChannels() {
return createDissolvingFlux(commandBuilder::pubsubShardChannels);
}
@Override
public Flux<K> pubsubShardChannels(K pattern) {
return createDissolvingFlux(() -> commandBuilder.pubsubShardChannels(pattern));
}
@Override
public Mono<Map<K, Long>> pubsubShardNumsub(K... shardChannels) {
return createMono(() -> commandBuilder.pubsubShardNumsub(shardChannels));
}
@Override
public Mono<String> quit() {
return createMono(commandBuilder::quit);
}
@Override
public Mono<K> randomkey() {
return createMono(commandBuilder::randomkey);
}
@Override
public Mono<String> readOnly() {
return createMono(commandBuilder::readOnly);
}
@Override
public Mono<String> readWrite() {
return createMono(commandBuilder::readWrite);
}
@Override
public Mono<String> rename(K key, K newKey) {
return createMono(() -> commandBuilder.rename(key, newKey));
}
@Override
public Mono<Boolean> renamenx(K key, K newKey) {
return createMono(() -> commandBuilder.renamenx(key, newKey));
}
@Override
public Mono<String> replicaof(String host, int port) {
return createMono(() -> commandBuilder.replicaof(host, port));
}
@Override
public Mono<String> replicaofNoOne() {
return createMono(() -> commandBuilder.replicaofNoOne());
}
@Override
public Mono<String> restore(K key, long ttl, byte[] value) {
return createMono(() -> commandBuilder.restore(key, value, RestoreArgs.Builder.ttl(ttl)));
}
@Override
public Mono<String> restore(K key, byte[] value, RestoreArgs args) {
return createMono(() -> commandBuilder.restore(key, value, args));
}
@Override
public Flux<Object> role() {
return createDissolvingFlux(commandBuilder::role);
}
@Override
public Mono<V> rpop(K key) {
return createMono(() -> commandBuilder.rpop(key));
}
@Override
public Flux<V> rpop(K key, long count) {
return createDissolvingFlux(() -> commandBuilder.rpop(key, count));
}
@Override
public Mono<V> rpoplpush(K source, K destination) {
return createMono(() -> commandBuilder.rpoplpush(source, destination));
}
@Override
public Mono<Long> rpush(K key, V... values) {
return createMono(() -> commandBuilder.rpush(key, values));
}
@Override
public Mono<Long> rpushx(K key, V... values) {
return createMono(() -> commandBuilder.rpushx(key, values));
}
@Override
public Mono<Long> sadd(K key, V... members) {
return createMono(() -> commandBuilder.sadd(key, members));
}
@Override
public Mono<String> save() {
return createMono(commandBuilder::save);
}
@Override
public Mono<KeyScanCursor<K>> scan() {
return createMono(commandBuilder::scan);
}
@Override
public Mono<KeyScanCursor<K>> scan(ScanArgs scanArgs) {
return createMono(() -> commandBuilder.scan(scanArgs));
}
@Override
public Mono<KeyScanCursor<K>> scan(ScanCursor scanCursor, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.scan(scanCursor, scanArgs));
}
@Override
public Mono<KeyScanCursor<K>> scan(ScanCursor scanCursor) {
return createMono(() -> commandBuilder.scan(scanCursor));
}
@Override
public Mono<StreamScanCursor> scan(KeyStreamingChannel<K> channel) {
return createMono(() -> commandBuilder.scanStreaming(channel));
}
@Override
public Mono<StreamScanCursor> scan(KeyStreamingChannel<K> channel, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.scanStreaming(channel, scanArgs));
}
@Override
public Mono<StreamScanCursor> scan(KeyStreamingChannel<K> channel, ScanCursor scanCursor, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.scanStreaming(channel, scanCursor, scanArgs));
}
@Override
public Mono<StreamScanCursor> scan(KeyStreamingChannel<K> channel, ScanCursor scanCursor) {
return createMono(() -> commandBuilder.scanStreaming(channel, scanCursor));
}
@Override
public Mono<Long> scard(K key) {
return createMono(() -> commandBuilder.scard(key));
}
@Override
public Flux<Boolean> scriptExists(String... digests) {
return createDissolvingFlux(() -> commandBuilder.scriptExists(digests));
}
@Override
public Mono<String> scriptFlush() {
return createMono(commandBuilder::scriptFlush);
}
@Override
public Mono<String> scriptFlush(FlushMode flushMode) {
return createMono(() -> commandBuilder.scriptFlush(flushMode));
}
@Override
public Mono<String> scriptKill() {
return createMono(commandBuilder::scriptKill);
}
@Override
public Mono<String> scriptLoad(String script) {
return scriptLoad(encodeScript(script));
}
@Override
public Mono<String> scriptLoad(byte[] script) {
return createMono(() -> commandBuilder.scriptLoad(script));
}
@Override
public Flux<V> sdiff(K... keys) {
return createDissolvingFlux(() -> commandBuilder.sdiff(keys));
}
@Override
public Mono<Long> sdiff(ValueStreamingChannel<V> channel, K... keys) {
return createMono(() -> commandBuilder.sdiff(channel, keys));
}
@Override
public Mono<Long> sdiffstore(K destination, K... keys) {
return createMono(() -> commandBuilder.sdiffstore(destination, keys));
}
public Mono<String> select(int db) {
return createMono(() -> commandBuilder.select(db));
}
@Override
public Mono<String> set(K key, V value) {
return createMono(() -> commandBuilder.set(key, value));
}
@Override
public Mono<String> set(K key, V value, SetArgs setArgs) {
return createMono(() -> commandBuilder.set(key, value, setArgs));
}
@Override
public Mono<V> setGet(K key, V value) {
return createMono(() -> commandBuilder.setGet(key, value));
}
@Override
public Mono<V> setGet(K key, V value, SetArgs setArgs) {
return createMono(() -> commandBuilder.setGet(key, value, setArgs));
}
@Override
public void setTimeout(Duration timeout) {
connection.setTimeout(timeout);
}
@Override
public Mono<Long> setbit(K key, long offset, int value) {
return createMono(() -> commandBuilder.setbit(key, offset, value));
}
@Override
public Mono<String> setex(K key, long seconds, V value) {
return createMono(() -> commandBuilder.setex(key, seconds, value));
}
@Override
public Mono<Boolean> setnx(K key, V value) {
return createMono(() -> commandBuilder.setnx(key, value));
}
@Override
public Mono<Long> setrange(K key, long offset, V value) {
return createMono(() -> commandBuilder.setrange(key, offset, value));
}
@Override
public Mono<Void> shutdown(boolean save) {
return createMono(() -> commandBuilder.shutdown(save)).then();
}
@Override
public Mono<Void> shutdown(ShutdownArgs args) {
return createMono(() -> commandBuilder.shutdown(args)).then();
}
@Override
public Flux<V> sinter(K... keys) {
return createDissolvingFlux(() -> commandBuilder.sinter(keys));
}
@Override
public Mono<Long> sinter(ValueStreamingChannel<V> channel, K... keys) {
return createMono(() -> commandBuilder.sinter(channel, keys));
}
@Override
public Mono<Long> sintercard(K... keys) {
return createMono(() -> commandBuilder.sintercard(keys));
}
@Override
public Mono<Long> sintercard(long limit, K... keys) {
return createMono(() -> commandBuilder.sintercard(limit, keys));
}
@Override
public Mono<Long> sinterstore(K destination, K... keys) {
return createMono(() -> commandBuilder.sinterstore(destination, keys));
}
@Override
public Mono<Boolean> sismember(K key, V member) {
return createMono(() -> commandBuilder.sismember(key, member));
}
@Override
public Mono<String> slaveof(String host, int port) {
return createMono(() -> commandBuilder.slaveof(host, port));
}
@Override
public Mono<String> slaveofNoOne() {
return createMono(() -> commandBuilder.slaveofNoOne());
}
@Override
public Flux<Object> slowlogGet() {
return createDissolvingFlux(() -> commandBuilder.slowlogGet());
}
@Override
public Flux<Object> slowlogGet(int count) {
return createDissolvingFlux(() -> commandBuilder.slowlogGet(count));
}
@Override
public Mono<Long> slowlogLen() {
return createMono(() -> commandBuilder.slowlogLen());
}
@Override
public Mono<String> slowlogReset() {
return createMono(() -> commandBuilder.slowlogReset());
}
@Override
public Flux<V> smembers(K key) {
return createDissolvingFlux(() -> commandBuilder.smembers(key));
}
@Override
public Mono<Long> smembers(ValueStreamingChannel<V> channel, K key) {
return createMono(() -> commandBuilder.smembers(channel, key));
}
@Override
public Flux<Boolean> smismember(K key, V... members) {
return createDissolvingFlux(() -> commandBuilder.smismember(key, members));
}
@Override
public Mono<Boolean> smove(K source, K destination, V member) {
return createMono(() -> commandBuilder.smove(source, destination, member));
}
@Override
public Flux<V> sort(K key) {
return createDissolvingFlux(() -> commandBuilder.sort(key));
}
@Override
public Mono<Long> sort(ValueStreamingChannel<V> channel, K key) {
return createMono(() -> commandBuilder.sort(channel, key));
}
@Override
public Flux<V> sort(K key, SortArgs sortArgs) {
return createDissolvingFlux(() -> commandBuilder.sort(key, sortArgs));
}
@Override
public Mono<Long> sort(ValueStreamingChannel<V> channel, K key, SortArgs sortArgs) {
return createMono(() -> commandBuilder.sort(channel, key, sortArgs));
}
@Override
public Flux<V> sortReadOnly(K key) {
return createDissolvingFlux(() -> commandBuilder.sortReadOnly(key));
}
@Override
public Mono<Long> sortReadOnly(ValueStreamingChannel<V> channel, K key) {
return createMono(() -> commandBuilder.sortReadOnly(channel, key));
}
@Override
public Flux<V> sortReadOnly(K key, SortArgs sortArgs) {
return createDissolvingFlux(() -> commandBuilder.sortReadOnly(key, sortArgs));
}
@Override
public Mono<Long> sortReadOnly(ValueStreamingChannel<V> channel, K key, SortArgs sortArgs) {
return createMono(() -> commandBuilder.sortReadOnly(channel, key, sortArgs));
}
@Override
public Mono<Long> sortStore(K key, SortArgs sortArgs, K destination) {
return createMono(() -> commandBuilder.sortStore(key, sortArgs, destination));
}
@Override
public Mono<V> spop(K key) {
return createMono(() -> commandBuilder.spop(key));
}
@Override
public Flux<V> spop(K key, long count) {
return createDissolvingFlux(() -> commandBuilder.spop(key, count));
}
@Override
public Mono<Long> spublish(K shardChannel, V message) {
return createMono(() -> commandBuilder.spublish(shardChannel, message));
}
@Override
public Mono<V> srandmember(K key) {
return createMono(() -> commandBuilder.srandmember(key));
}
@Override
public Flux<V> srandmember(K key, long count) {
return createDissolvingFlux(() -> commandBuilder.srandmember(key, count));
}
@Override
public Mono<Long> srandmember(ValueStreamingChannel<V> channel, K key, long count) {
return createMono(() -> commandBuilder.srandmember(channel, key, count));
}
@Override
public Mono<Long> srem(K key, V... members) {
return createMono(() -> commandBuilder.srem(key, members));
}
@Override
public Mono<ValueScanCursor<V>> sscan(K key) {
return createMono(() -> commandBuilder.sscan(key));
}
@Override
public Mono<ValueScanCursor<V>> sscan(K key, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.sscan(key, scanArgs));
}
@Override
public Mono<ValueScanCursor<V>> sscan(K key, ScanCursor scanCursor, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.sscan(key, scanCursor, scanArgs));
}
@Override
public Mono<ValueScanCursor<V>> sscan(K key, ScanCursor scanCursor) {
return createMono(() -> commandBuilder.sscan(key, scanCursor));
}
@Override
public Mono<StreamScanCursor> sscan(ValueStreamingChannel<V> channel, K key) {
return createMono(() -> commandBuilder.sscanStreaming(channel, key));
}
@Override
public Mono<StreamScanCursor> sscan(ValueStreamingChannel<V> channel, K key, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.sscanStreaming(channel, key, scanArgs));
}
@Override
public Mono<StreamScanCursor> sscan(ValueStreamingChannel<V> channel, K key, ScanCursor scanCursor, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.sscanStreaming(channel, key, scanCursor, scanArgs));
}
@Override
public Mono<StreamScanCursor> sscan(ValueStreamingChannel<V> channel, K key, ScanCursor scanCursor) {
return createMono(() -> commandBuilder.sscanStreaming(channel, key, scanCursor));
}
@Override
public Mono<Long> strlen(K key) {
return createMono(() -> commandBuilder.strlen(key));
}
@Override
public Mono<StringMatchResult> stralgoLcs(StrAlgoArgs strAlgoArgs) {
return createMono(() -> commandBuilder.stralgoLcs(strAlgoArgs));
}
@Override
public Mono<StringMatchResult> lcs(LcsArgs lcsArgs) {
return createMono(() -> commandBuilder.lcs(lcsArgs));
}
@Override
public Flux<V> sunion(K... keys) {
return createDissolvingFlux(() -> commandBuilder.sunion(keys));
}
@Override
public Mono<Long> sunion(ValueStreamingChannel<V> channel, K... keys) {
return createMono(() -> commandBuilder.sunion(channel, keys));
}
@Override
public Mono<Long> sunionstore(K destination, K... keys) {
return createMono(() -> commandBuilder.sunionstore(destination, keys));
}
public Mono<String> swapdb(int db1, int db2) {
return createMono(() -> commandBuilder.swapdb(db1, db2));
}
@Override
public Flux<V> time() {
return createDissolvingFlux(commandBuilder::time);
}
@Override
public Mono<Long> touch(K... keys) {
return createMono(() -> commandBuilder.touch(keys));
}
public Mono<Long> touch(Iterable<K> keys) {
return createMono(() -> commandBuilder.touch(keys));
}
@Override
public Mono<Long> ttl(K key) {
return createMono(() -> commandBuilder.ttl(key));
}
@Override
public Mono<String> type(K key) {
return createMono(() -> commandBuilder.type(key));
}
@Override
public Mono<Long> unlink(K... keys) {
return createMono(() -> commandBuilder.unlink(keys));
}
public Mono<Long> unlink(Iterable<K> keys) {
return createMono(() -> commandBuilder.unlink(keys));
}
@Override
public Mono<Boolean> copy(K source, K destination) {
return createMono(() -> commandBuilder.copy(source, destination));
}
@Override
public Mono<Boolean> copy(K source, K destination, CopyArgs copyArgs) {
return createMono(() -> commandBuilder.copy(source, destination, copyArgs));
}
@Override
public Mono<String> unwatch() {
return createMono(commandBuilder::unwatch);
}
@Override
public Mono<Long> waitForReplication(int replicas, long timeout) {
return createMono(() -> commandBuilder.wait(replicas, timeout));
}
@Override
public Mono<String> watch(K... keys) {
return createMono(() -> commandBuilder.watch(keys));
}
@Override
public Mono<Long> xack(K key, K group, String... messageIds) {
return createMono(() -> commandBuilder.xack(key, group, messageIds));
}
@Override
public Flux<StreamEntryDeletionResult> xackdel(K key, K group, String... messageIds) {
return createDissolvingFlux(() -> commandBuilder.xackdel(key, group, messageIds));
}
@Override
public Flux<StreamEntryDeletionResult> xackdel(K key, K group, StreamDeletionPolicy policy, String... messageIds) {
return createDissolvingFlux(() -> commandBuilder.xackdel(key, group, policy, messageIds));
}
@Override
public Mono<String> xadd(K key, Map<K, V> body) {
return createMono(() -> commandBuilder.xadd(key, null, body));
}
@Override
public Mono<String> xadd(K key, XAddArgs args, Map<K, V> body) {
return createMono(() -> commandBuilder.xadd(key, args, body));
}
@Override
public Mono<String> xadd(K key, Object... keysAndValues) {
return createMono(() -> commandBuilder.xadd(key, null, keysAndValues));
}
@Override
public Mono<String> xadd(K key, XAddArgs args, Object... keysAndValues) {
return createMono(() -> commandBuilder.xadd(key, args, keysAndValues));
}
@Override
public Mono<ClaimedMessages<K, V>> xautoclaim(K key, XAutoClaimArgs<K> args) {
return createMono(() -> commandBuilder.xautoclaim(key, args));
}
@Override
public Flux<StreamMessage<K, V>> xclaim(K key, Consumer<K> consumer, long minIdleTime, String... messageIds) {
return createDissolvingFlux(
() -> commandBuilder.xclaim(key, consumer, XClaimArgs.Builder.minIdleTime(minIdleTime), messageIds));
}
@Override
public Flux<StreamMessage<K, V>> xclaim(K key, Consumer<K> consumer, XClaimArgs args, String... messageIds) {
return createDissolvingFlux(() -> commandBuilder.xclaim(key, consumer, args, messageIds));
}
@Override
public Mono<Long> xdel(K key, String... messageIds) {
return createMono(() -> commandBuilder.xdel(key, messageIds));
}
@Override
public Flux<StreamEntryDeletionResult> xdelex(K key, String... messageIds) {
return createDissolvingFlux(() -> commandBuilder.xdelex(key, messageIds));
}
@Override
public Flux<StreamEntryDeletionResult> xdelex(K key, StreamDeletionPolicy policy, String... messageIds) {
return createDissolvingFlux(() -> commandBuilder.xdelex(key, policy, messageIds));
}
@Override
public Mono<String> xgroupCreate(XReadArgs.StreamOffset<K> streamOffset, K group) {
return createMono(() -> commandBuilder.xgroupCreate(streamOffset, group, null));
}
@Override
public Mono<String> xgroupCreate(XReadArgs.StreamOffset<K> streamOffset, K group, XGroupCreateArgs args) {
return createMono(() -> commandBuilder.xgroupCreate(streamOffset, group, args));
}
@Override
public Mono<Boolean> xgroupCreateconsumer(K key, Consumer<K> consumer) {
return createMono(() -> commandBuilder.xgroupCreateconsumer(key, consumer));
}
@Override
public Mono<Long> xgroupDelconsumer(K key, Consumer<K> consumer) {
return createMono(() -> commandBuilder.xgroupDelconsumer(key, consumer));
}
@Override
public Mono<Boolean> xgroupDestroy(K key, K group) {
return createMono(() -> commandBuilder.xgroupDestroy(key, group));
}
@Override
public Mono<String> xgroupSetid(XReadArgs.StreamOffset<K> streamOffset, K group) {
return createMono(() -> commandBuilder.xgroupSetid(streamOffset, group));
}
@Override
public Flux<Object> xinfoStream(K key) {
return createDissolvingFlux(() -> commandBuilder.xinfoStream(key));
}
@Override
public Flux<Object> xinfoGroups(K key) {
return createDissolvingFlux(() -> commandBuilder.xinfoGroups(key));
}
@Override
public Flux<Object> xinfoConsumers(K key, K group) {
return createDissolvingFlux(() -> commandBuilder.xinfoConsumers(key, group));
}
@Override
public Mono<Long> xlen(K key) {
return createMono(() -> commandBuilder.xlen(key));
}
@Override
public Mono<PendingMessages> xpending(K key, K group) {
return createMono(() -> commandBuilder.xpending(key, group));
}
@Override
public Flux<PendingMessage> xpending(K key, K group, Range<String> range, Limit limit) {
return createDissolvingFlux(() -> commandBuilder.xpending(key, group, range, limit));
}
@Override
public Flux<PendingMessage> xpending(K key, Consumer<K> consumer, Range<String> range, Limit limit) {
return createDissolvingFlux(() -> commandBuilder.xpending(key, consumer, range, limit));
}
@Override
public Flux<PendingMessage> xpending(K key, XPendingArgs<K> args) {
return createDissolvingFlux(() -> commandBuilder.xpending(key, args));
}
@Override
public Flux<StreamMessage<K, V>> xrange(K key, Range<String> range) {
return createDissolvingFlux(() -> commandBuilder.xrange(key, range, Limit.unlimited()));
}
@Override
public Flux<StreamMessage<K, V>> xrange(K key, Range<String> range, Limit limit) {
return createDissolvingFlux(() -> commandBuilder.xrange(key, range, limit));
}
@Override
public Flux<StreamMessage<K, V>> xread(XReadArgs.StreamOffset<K>... streams) {
return createDissolvingFlux(() -> commandBuilder.xread(null, streams));
}
@Override
public Flux<StreamMessage<K, V>> xread(XReadArgs args, XReadArgs.StreamOffset<K>... streams) {
return createDissolvingFlux(() -> commandBuilder.xread(args, streams));
}
@Override
public Flux<StreamMessage<K, V>> xreadgroup(Consumer<K> consumer, XReadArgs.StreamOffset<K>... streams) {
return createDissolvingFlux(() -> commandBuilder.xreadgroup(consumer, null, streams));
}
@Override
public Flux<StreamMessage<K, V>> xreadgroup(Consumer<K> consumer, XReadArgs args, XReadArgs.StreamOffset<K>... streams) {
return createDissolvingFlux(() -> commandBuilder.xreadgroup(consumer, args, streams));
}
@Override
public Flux<StreamMessage<K, V>> xrevrange(K key, Range<String> range) {
return xrevrange(key, range, Limit.unlimited());
}
@Override
public Flux<StreamMessage<K, V>> xrevrange(K key, Range<String> range, Limit limit) {
return createDissolvingFlux(() -> commandBuilder.xrevrange(key, range, limit));
}
@Override
public Mono<Long> xtrim(K key, long count) {
return xtrim(key, false, count);
}
@Override
public Mono<Long> xtrim(K key, boolean approximateTrimming, long count) {
return createMono(() -> commandBuilder.xtrim(key, approximateTrimming, count));
}
@Override
public Mono<Long> xtrim(K key, XTrimArgs args) {
return createMono(() -> commandBuilder.xtrim(key, args));
}
@Override
public Mono<KeyValue<K, ScoredValue<V>>> bzmpop(long timeout, ZPopArgs args, K... keys) {
return createMono(() -> commandBuilder.bzmpop(timeout, args, keys));
}
@Override
public Mono<KeyValue<K, List<ScoredValue<V>>>> bzmpop(long timeout, long count, ZPopArgs args, K... keys) {
return createMono(() -> commandBuilder.bzmpop(timeout, count, args, keys));
}
@Override
public Mono<KeyValue<K, ScoredValue<V>>> bzmpop(double timeout, ZPopArgs args, K... keys) {
return createMono(() -> commandBuilder.bzmpop(timeout, args, keys));
}
@Override
public Mono<KeyValue<K, List<ScoredValue<V>>>> bzmpop(double timeout, int count, ZPopArgs args, K... keys) {
return createMono(() -> commandBuilder.bzmpop(timeout, count, args, keys));
}
@Override
public Mono<KeyValue<K, ScoredValue<V>>> bzpopmin(long timeout, K... keys) {
return createMono(() -> commandBuilder.bzpopmin(timeout, keys));
}
@Override
public Mono<KeyValue<K, ScoredValue<V>>> bzpopmin(double timeout, K... keys) {
return createMono(() -> commandBuilder.bzpopmin(timeout, keys));
}
@Override
public Mono<KeyValue<K, ScoredValue<V>>> bzpopmax(long timeout, K... keys) {
return createMono(() -> commandBuilder.bzpopmax(timeout, keys));
}
@Override
public Mono<KeyValue<K, ScoredValue<V>>> bzpopmax(double timeout, K... keys) {
return createMono(() -> commandBuilder.bzpopmax(timeout, keys));
}
@Override
public Mono<Long> zadd(K key, double score, V member) {
return createMono(() -> commandBuilder.zadd(key, null, score, member));
}
@Override
public Mono<Long> zadd(K key, Object... scoresAndValues) {
return createMono(() -> commandBuilder.zadd(key, null, scoresAndValues));
}
@Override
public Mono<Long> zadd(K key, ScoredValue<V>... scoredValues) {
return createMono(() -> commandBuilder.zadd(key, null, (Object[]) scoredValues));
}
@Override
public Mono<Long> zadd(K key, ZAddArgs zAddArgs, double score, V member) {
return createMono(() -> commandBuilder.zadd(key, zAddArgs, score, member));
}
@Override
public Mono<Long> zadd(K key, ZAddArgs zAddArgs, Object... scoresAndValues) {
return createMono(() -> commandBuilder.zadd(key, zAddArgs, scoresAndValues));
}
@Override
public Mono<Long> zadd(K key, ZAddArgs zAddArgs, ScoredValue<V>... scoredValues) {
return createMono(() -> commandBuilder.zadd(key, zAddArgs, (Object[]) scoredValues));
}
@Override
public Mono<Double> zaddincr(K key, double score, V member) {
return createMono(() -> commandBuilder.zaddincr(key, null, score, member));
}
@Override
public Mono<Double> zaddincr(K key, ZAddArgs zAddArgs, double score, V member) {
return createMono(() -> commandBuilder.zaddincr(key, zAddArgs, score, member));
}
@Override
public Mono<Long> zcard(K key) {
return createMono(() -> commandBuilder.zcard(key));
}
public Mono<Long> zcount(K key, double min, double max) {
return createMono(() -> commandBuilder.zcount(key, min, max));
}
@Override
public Mono<Long> zcount(K key, String min, String max) {
return createMono(() -> commandBuilder.zcount(key, min, max));
}
@Override
public Mono<Long> zcount(K key, Range<? extends Number> range) {
return createMono(() -> commandBuilder.zcount(key, range));
}
@Override
public Flux<V> zdiff(K... keys) {
return createDissolvingFlux(() -> commandBuilder.zdiff(keys));
}
@Override
public Mono<Long> zdiffstore(K destKey, K... srcKeys) {
return createMono(() -> commandBuilder.zdiffstore(destKey, srcKeys));
}
@Override
public Flux<ScoredValue<V>> zdiffWithScores(K... keys) {
return createDissolvingFlux(() -> commandBuilder.zdiffWithScores(keys));
}
@Override
public Mono<Double> zincrby(K key, double amount, V member) {
return createMono(() -> commandBuilder.zincrby(key, amount, member));
}
@Override
public Flux<V> zinter(K... keys) {
return createDissolvingFlux(() -> commandBuilder.zinter(keys));
}
@Override
public Flux<V> zinter(ZAggregateArgs aggregateArgs, K... keys) {
return createDissolvingFlux(() -> commandBuilder.zinter(aggregateArgs, keys));
}
@Override
public Mono<Long> zintercard(K... keys) {
return createMono(() -> commandBuilder.zintercard(keys));
}
@Override
public Mono<Long> zintercard(long limit, K... keys) {
return createMono(() -> commandBuilder.zintercard(limit, keys));
}
@Override
public Flux<ScoredValue<V>> zinterWithScores(K... keys) {
return createDissolvingFlux(() -> commandBuilder.zinterWithScores(keys));
}
@Override
public Flux<ScoredValue<V>> zinterWithScores(ZAggregateArgs aggregateArgs, K... keys) {
return createDissolvingFlux(() -> commandBuilder.zinterWithScores(aggregateArgs, keys));
}
@Override
public Mono<Long> zinterstore(K destination, K... keys) {
return createMono(() -> commandBuilder.zinterstore(destination, keys));
}
@Override
public Mono<Long> zinterstore(K destination, ZStoreArgs zStoreArgs, K... keys) {
return createMono(() -> commandBuilder.zinterstore(destination, zStoreArgs, keys));
}
@Override
public Mono<Long> zlexcount(K key, String min, String max) {
return createMono(() -> commandBuilder.zlexcount(key, min, max));
}
@Override
public Mono<Long> zlexcount(K key, Range<? extends V> range) {
return createMono(() -> commandBuilder.zlexcount(key, range));
}
@Override
public Mono<List<Double>> zmscore(K key, V... members) {
return createMono(() -> commandBuilder.zmscore(key, members));
}
@Override
public Mono<KeyValue<K, ScoredValue<V>>> zmpop(ZPopArgs args, K... keys) {
return createMono(() -> commandBuilder.zmpop(args, keys));
}
@Override
public Mono<KeyValue<K, List<ScoredValue<V>>>> zmpop(int count, ZPopArgs args, K... keys) {
return createMono(() -> commandBuilder.zmpop(count, args, keys));
}
@Override
public Mono<ScoredValue<V>> zpopmin(K key) {
return createMono(() -> commandBuilder.zpopmin(key));
}
@Override
public Flux<ScoredValue<V>> zpopmin(K key, long count) {
return createDissolvingFlux(() -> commandBuilder.zpopmin(key, count));
}
@Override
public Mono<ScoredValue<V>> zpopmax(K key) {
return createMono(() -> commandBuilder.zpopmax(key));
}
@Override
public Flux<ScoredValue<V>> zpopmax(K key, long count) {
return createDissolvingFlux(() -> commandBuilder.zpopmax(key, count));
}
@Override
public Mono<V> zrandmember(K key) {
return createMono(() -> commandBuilder.zrandmember(key));
}
@Override
public Flux<V> zrandmember(K key, long count) {
return createDissolvingFlux(() -> commandBuilder.zrandmember(key, count));
}
@Override
public Mono<ScoredValue<V>> zrandmemberWithScores(K key) {
return createMono(() -> commandBuilder.zrandmemberWithScores(key));
}
@Override
public Flux<ScoredValue<V>> zrandmemberWithScores(K key, long count) {
return createDissolvingFlux(() -> commandBuilder.zrandmemberWithScores(key, count));
}
@Override
public Flux<V> zrange(K key, long start, long stop) {
return createDissolvingFlux(() -> commandBuilder.zrange(key, start, stop));
}
@Override
public Mono<Long> zrange(ValueStreamingChannel<V> channel, K key, long start, long stop) {
return createMono(() -> commandBuilder.zrange(channel, key, start, stop));
}
@Override
public Flux<ScoredValue<V>> zrangeWithScores(K key, long start, long stop) {
return createDissolvingFlux(() -> commandBuilder.zrangeWithScores(key, start, stop));
}
@Override
public Mono<Long> zrangeWithScores(ScoredValueStreamingChannel<V> channel, K key, long start, long stop) {
return createMono(() -> commandBuilder.zrangeWithScores(channel, key, start, stop));
}
@Override
public Flux<V> zrangebylex(K key, String min, String max) {
return createDissolvingFlux(() -> commandBuilder.zrangebylex(key, min, max));
}
@Override
public Flux<V> zrangebylex(K key, Range<? extends V> range) {
return createDissolvingFlux(() -> commandBuilder.zrangebylex(key, range, Limit.unlimited()));
}
@Override
public Flux<V> zrangebylex(K key, String min, String max, long offset, long count) {
return createDissolvingFlux(() -> commandBuilder.zrangebylex(key, min, max, offset, count));
}
@Override
public Flux<V> zrangebylex(K key, Range<? extends V> range, Limit limit) {
return createDissolvingFlux(() -> commandBuilder.zrangebylex(key, range, limit));
}
@Override
public Flux<V> zrangebyscore(K key, double min, double max) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscore(key, min, max));
}
@Override
public Flux<V> zrangebyscore(K key, String min, String max) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscore(key, min, max));
}
@Override
public Flux<V> zrangebyscore(K key, double min, double max, long offset, long count) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscore(key, min, max, offset, count));
}
@Override
public Flux<V> zrangebyscore(K key, String min, String max, long offset, long count) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscore(key, min, max, offset, count));
}
@Override
public Flux<V> zrangebyscore(K key, Range<? extends Number> range) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscore(key, range, Limit.unlimited()));
}
@Override
public Flux<V> zrangebyscore(K key, Range<? extends Number> range, Limit limit) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscore(key, range, limit));
}
@Override
public Mono<Long> zrangebyscore(ValueStreamingChannel<V> channel, K key, double min, double max) {
return createMono(() -> commandBuilder.zrangebyscore(channel, key, min, max));
}
@Override
public Mono<Long> zrangebyscore(ValueStreamingChannel<V> channel, K key, String min, String max) {
return createMono(() -> commandBuilder.zrangebyscore(channel, key, min, max));
}
@Override
public Mono<Long> zrangebyscore(ValueStreamingChannel<V> channel, K key, double min, double max, long offset, long count) {
return createMono(() -> commandBuilder.zrangebyscore(channel, key, min, max, offset, count));
}
@Override
public Mono<Long> zrangebyscore(ValueStreamingChannel<V> channel, K key, Range<? extends Number> range) {
return createMono(() -> commandBuilder.zrangebyscore(channel, key, range, Limit.unlimited()));
}
@Override
public Mono<Long> zrangebyscore(ValueStreamingChannel<V> channel, K key, String min, String max, long offset, long count) {
return createMono(() -> commandBuilder.zrangebyscore(channel, key, min, max, offset, count));
}
@Override
public Mono<Long> zrangebyscore(ValueStreamingChannel<V> channel, K key, Range<? extends Number> range, Limit limit) {
return createMono(() -> commandBuilder.zrangebyscore(channel, key, range, limit));
}
@Override
public Flux<ScoredValue<V>> zrangebyscoreWithScores(K key, double min, double max) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscoreWithScores(key, min, max));
}
@Override
public Flux<ScoredValue<V>> zrangebyscoreWithScores(K key, String min, String max) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscoreWithScores(key, min, max));
}
@Override
public Flux<ScoredValue<V>> zrangebyscoreWithScores(K key, double min, double max, long offset, long count) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscoreWithScores(key, min, max, offset, count));
}
@Override
public Flux<ScoredValue<V>> zrangebyscoreWithScores(K key, String min, String max, long offset, long count) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscoreWithScores(key, min, max, offset, count));
}
@Override
public Flux<ScoredValue<V>> zrangebyscoreWithScores(K key, Range<? extends Number> range) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscoreWithScores(key, range, Limit.unlimited()));
}
@Override
public Flux<ScoredValue<V>> zrangebyscoreWithScores(K key, Range<? extends Number> range, Limit limit) {
return createDissolvingFlux(() -> commandBuilder.zrangebyscoreWithScores(key, range, limit));
}
@Override
public Mono<Long> zrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, double min, double max) {
return createMono(() -> commandBuilder.zrangebyscoreWithScores(channel, key, min, max));
}
@Override
public Mono<Long> zrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, String min, String max) {
return createMono(() -> commandBuilder.zrangebyscoreWithScores(channel, key, min, max));
}
@Override
public Mono<Long> zrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, Range<? extends Number> range) {
return createMono(() -> commandBuilder.zrangebyscoreWithScores(channel, key, range, Limit.unlimited()));
}
@Override
public Mono<Long> zrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, double min, double max,
long offset, long count) {
return createMono(() -> commandBuilder.zrangebyscoreWithScores(channel, key, min, max, offset, count));
}
@Override
public Mono<Long> zrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, String min, String max,
long offset, long count) {
return createMono(() -> commandBuilder.zrangebyscoreWithScores(channel, key, min, max, offset, count));
}
@Override
public Mono<Long> zrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, Range<? extends Number> range,
Limit limit) {
return createMono(() -> commandBuilder.zrangebyscoreWithScores(channel, key, range, limit));
}
@Override
public Mono<Long> zrangestore(K dstKey, K srcKey, Range<Long> range) {
return createMono(() -> commandBuilder.zrangestore(dstKey, srcKey, range, false));
}
@Override
public Mono<Long> zrangestorebylex(K dstKey, K srcKey, Range<? extends V> range, Limit limit) {
return createMono(() -> commandBuilder.zrangestorebylex(dstKey, srcKey, range, limit, false));
}
@Override
public Mono<Long> zrangestorebyscore(K dstKey, K srcKey, Range<? extends Number> range, Limit limit) {
return createMono(() -> commandBuilder.zrangestorebyscore(dstKey, srcKey, range, limit, false));
}
@Override
public Mono<Long> zrank(K key, V member) {
return createMono(() -> commandBuilder.zrank(key, member));
}
@Override
public Mono<ScoredValue<Long>> zrankWithScore(K key, V member) {
return createMono(() -> commandBuilder.zrankWithScore(key, member));
}
@Override
public Mono<Long> zrem(K key, V... members) {
return createMono(() -> commandBuilder.zrem(key, members));
}
@Override
public Mono<Long> zremrangebylex(K key, String min, String max) {
return createMono(() -> commandBuilder.zremrangebylex(key, min, max));
}
@Override
public Mono<Long> zremrangebylex(K key, Range<? extends V> range) {
return createMono(() -> commandBuilder.zremrangebylex(key, range));
}
@Override
public Mono<Long> zremrangebyrank(K key, long start, long stop) {
return createMono(() -> commandBuilder.zremrangebyrank(key, start, stop));
}
@Override
public Mono<Long> zremrangebyscore(K key, double min, double max) {
return createMono(() -> commandBuilder.zremrangebyscore(key, min, max));
}
@Override
public Mono<Long> zremrangebyscore(K key, String min, String max) {
return createMono(() -> commandBuilder.zremrangebyscore(key, min, max));
}
@Override
public Mono<Long> zremrangebyscore(K key, Range<? extends Number> range) {
return createMono(() -> commandBuilder.zremrangebyscore(key, range));
}
@Override
public Flux<V> zrevrange(K key, long start, long stop) {
return createDissolvingFlux(() -> commandBuilder.zrevrange(key, start, stop));
}
@Override
public Mono<Long> zrevrange(ValueStreamingChannel<V> channel, K key, long start, long stop) {
return createMono(() -> commandBuilder.zrevrange(channel, key, start, stop));
}
@Override
public Flux<ScoredValue<V>> zrevrangeWithScores(K key, long start, long stop) {
return createDissolvingFlux(() -> commandBuilder.zrevrangeWithScores(key, start, stop));
}
@Override
public Mono<Long> zrevrangeWithScores(ScoredValueStreamingChannel<V> channel, K key, long start, long stop) {
return createMono(() -> commandBuilder.zrevrangeWithScores(channel, key, start, stop));
}
@Override
public Flux<V> zrevrangebylex(K key, Range<? extends V> range) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebylex(key, range, Limit.unlimited()));
}
@Override
public Flux<V> zrevrangebylex(K key, Range<? extends V> range, Limit limit) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebylex(key, range, limit));
}
@Override
public Flux<V> zrevrangebyscore(K key, double max, double min) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscore(key, max, min));
}
@Override
public Flux<V> zrevrangebyscore(K key, String max, String min) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscore(key, max, min));
}
@Override
public Flux<V> zrevrangebyscore(K key, Range<? extends Number> range) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscore(key, range, Limit.unlimited()));
}
@Override
public Flux<V> zrevrangebyscore(K key, double max, double min, long offset, long count) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscore(key, max, min, offset, count));
}
@Override
public Flux<V> zrevrangebyscore(K key, String max, String min, long offset, long count) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscore(key, max, min, offset, count));
}
@Override
public Flux<V> zrevrangebyscore(K key, Range<? extends Number> range, Limit limit) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscore(key, range, limit));
}
@Override
public Mono<Long> zrevrangebyscore(ValueStreamingChannel<V> channel, K key, double max, double min) {
return createMono(() -> commandBuilder.zrevrangebyscore(channel, key, max, min));
}
@Override
public Mono<Long> zrevrangebyscore(ValueStreamingChannel<V> channel, K key, String max, String min) {
return createMono(() -> commandBuilder.zrevrangebyscore(channel, key, max, min));
}
@Override
public Mono<Long> zrevrangebyscore(ValueStreamingChannel<V> channel, K key, Range<? extends Number> range) {
return createMono(() -> commandBuilder.zrevrangebyscore(channel, key, range, Limit.unlimited()));
}
@Override
public Mono<Long> zrevrangebyscore(ValueStreamingChannel<V> channel, K key, double max, double min, long offset,
long count) {
return createMono(() -> commandBuilder.zrevrangebyscore(channel, key, max, min, offset, count));
}
@Override
public Mono<Long> zrevrangebyscore(ValueStreamingChannel<V> channel, K key, String max, String min, long offset,
long count) {
return createMono(() -> commandBuilder.zrevrangebyscore(channel, key, max, min, offset, count));
}
@Override
public Mono<Long> zrevrangebyscore(ValueStreamingChannel<V> channel, K key, Range<? extends Number> range, Limit limit) {
return createMono(() -> commandBuilder.zrevrangebyscore(channel, key, range, limit));
}
@Override
public Flux<ScoredValue<V>> zrevrangebyscoreWithScores(K key, double max, double min) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscoreWithScores(key, max, min));
}
@Override
public Flux<ScoredValue<V>> zrevrangebyscoreWithScores(K key, String max, String min) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscoreWithScores(key, max, min));
}
@Override
public Flux<ScoredValue<V>> zrevrangebyscoreWithScores(K key, Range<? extends Number> range) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscoreWithScores(key, range, Limit.unlimited()));
}
@Override
public Flux<ScoredValue<V>> zrevrangebyscoreWithScores(K key, double max, double min, long offset, long count) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscoreWithScores(key, max, min, offset, count));
}
@Override
public Flux<ScoredValue<V>> zrevrangebyscoreWithScores(K key, String max, String min, long offset, long count) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscoreWithScores(key, max, min, offset, count));
}
@Override
public Flux<ScoredValue<V>> zrevrangebyscoreWithScores(K key, Range<? extends Number> range, Limit limit) {
return createDissolvingFlux(() -> commandBuilder.zrevrangebyscoreWithScores(key, range, limit));
}
@Override
public Mono<Long> zrevrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, double max, double min) {
return createMono(() -> commandBuilder.zrevrangebyscoreWithScores(channel, key, max, min));
}
@Override
public Mono<Long> zrevrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, String max, String min) {
return createMono(() -> commandBuilder.zrevrangebyscoreWithScores(channel, key, max, min));
}
@Override
public Mono<Long> zrevrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, Range<? extends Number> range) {
return createMono(() -> commandBuilder.zrevrangebyscoreWithScores(channel, key, range, Limit.unlimited()));
}
@Override
public Mono<Long> zrevrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, double max, double min,
long offset, long count) {
return createMono(() -> commandBuilder.zrevrangebyscoreWithScores(channel, key, max, min, offset, count));
}
@Override
public Mono<Long> zrevrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, String max, String min,
long offset, long count) {
return createMono(() -> commandBuilder.zrevrangebyscoreWithScores(channel, key, max, min, offset, count));
}
@Override
public Mono<Long> zrevrangebyscoreWithScores(ScoredValueStreamingChannel<V> channel, K key, Range<? extends Number> range,
Limit limit) {
return createMono(() -> commandBuilder.zrevrangebyscoreWithScores(channel, key, range, limit));
}
@Override
public Mono<Long> zrevrangestore(K dstKey, K srcKey, Range<Long> range) {
return createMono(() -> commandBuilder.zrangestore(dstKey, srcKey, range, true));
}
@Override
public Mono<Long> zrevrangestorebylex(K dstKey, K srcKey, Range<? extends V> range, Limit limit) {
return createMono(() -> commandBuilder.zrangestorebylex(dstKey, srcKey, range, limit, true));
}
@Override
public Mono<Long> zrevrangestorebyscore(K dstKey, K srcKey, Range<? extends Number> range, Limit limit) {
return createMono(() -> commandBuilder.zrangestorebyscore(dstKey, srcKey, range, limit, true));
}
@Override
public Mono<Long> zrevrank(K key, V member) {
return createMono(() -> commandBuilder.zrevrank(key, member));
}
@Override
public Mono<ScoredValue<Long>> zrevrankWithScore(K key, V member) {
return createMono(() -> commandBuilder.zrevrankWithScore(key, member));
}
@Override
public Mono<ScoredValueScanCursor<V>> zscan(K key) {
return createMono(() -> commandBuilder.zscan(key));
}
@Override
public Mono<ScoredValueScanCursor<V>> zscan(K key, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.zscan(key, scanArgs));
}
@Override
public Mono<ScoredValueScanCursor<V>> zscan(K key, ScanCursor scanCursor, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.zscan(key, scanCursor, scanArgs));
}
@Override
public Mono<ScoredValueScanCursor<V>> zscan(K key, ScanCursor scanCursor) {
return createMono(() -> commandBuilder.zscan(key, scanCursor));
}
@Override
public Mono<StreamScanCursor> zscan(ScoredValueStreamingChannel<V> channel, K key) {
return createMono(() -> commandBuilder.zscanStreaming(channel, key));
}
@Override
public Mono<StreamScanCursor> zscan(ScoredValueStreamingChannel<V> channel, K key, ScanArgs scanArgs) {
return createMono(() -> commandBuilder.zscanStreaming(channel, key, scanArgs));
}
@Override
public Mono<StreamScanCursor> zscan(ScoredValueStreamingChannel<V> channel, K key, ScanCursor scanCursor,
ScanArgs scanArgs) {
return createMono(() -> commandBuilder.zscanStreaming(channel, key, scanCursor, scanArgs));
}
@Override
public Mono<StreamScanCursor> zscan(ScoredValueStreamingChannel<V> channel, K key, ScanCursor scanCursor) {
return createMono(() -> commandBuilder.zscanStreaming(channel, key, scanCursor));
}
@Override
public Mono<Double> zscore(K key, V member) {
return createMono(() -> commandBuilder.zscore(key, member));
}
@Override
public Flux<V> zunion(K... keys) {
return createDissolvingFlux(() -> commandBuilder.zunion(keys));
}
@Override
public Flux<V> zunion(ZAggregateArgs aggregateArgs, K... keys) {
return createDissolvingFlux(() -> commandBuilder.zunion(aggregateArgs, keys));
}
@Override
public Flux<ScoredValue<V>> zunionWithScores(K... keys) {
return createDissolvingFlux(() -> commandBuilder.zunionWithScores(keys));
}
@Override
public Flux<ScoredValue<V>> zunionWithScores(ZAggregateArgs aggregateArgs, K... keys) {
return createDissolvingFlux(() -> commandBuilder.zunionWithScores(aggregateArgs, keys));
}
@Override
public Mono<Long> zunionstore(K destination, K... keys) {
return createMono(() -> commandBuilder.zunionstore(destination, keys));
}
@Override
public Mono<Long> zunionstore(K destination, ZStoreArgs zStoreArgs, K... keys) {
return createMono(() -> commandBuilder.zunionstore(destination, zStoreArgs, keys));
}
@Override
public Mono<List<Map<String, Object>>> clusterLinks() {
return createMono(commandBuilder::clusterLinks);
}
private byte[] encodeFunction(String functionCode) {
LettuceAssert.notNull(functionCode, "Function code must not be null");
LettuceAssert.notEmpty(functionCode, "Function code script must not be empty");
return functionCode.getBytes(getConnection().getOptions().getScriptCharset());
}
private byte[] encodeScript(String script) {
LettuceAssert.notNull(script, "Lua script must not be null");
LettuceAssert.notEmpty(script, "Lua script must not be empty");
return script.getBytes(getConnection().getOptions().getScriptCharset());
}
}
| AbstractRedisReactiveCommands |
java | netty__netty | handler/src/main/java/io/netty/handler/stream/ChunkedWriteHandler.java | {
"start": 13529,
"end": 14402
} | class ____ {
final Object msg;
final ChannelPromise promise;
PendingWrite(Object msg, ChannelPromise promise) {
this.msg = msg;
this.promise = promise;
}
void fail(Throwable cause) {
ReferenceCountUtil.release(msg);
promise.tryFailure(cause);
}
void success(long total) {
if (promise.isDone()) {
// No need to notify the progress or fulfill the promise because it's done already.
return;
}
progress(total, total);
promise.trySuccess();
}
void progress(long progress, long total) {
if (promise instanceof ChannelProgressivePromise) {
((ChannelProgressivePromise) promise).tryProgress(progress, total);
}
}
}
}
| PendingWrite |
java | netty__netty | resolver-dns/src/test/java/io/netty/resolver/dns/DnsNameResolverTest.java | {
"start": 107158,
"end": 200352
} | class ____ extends TestDnsServer {
private final String dnsAddress;
private final String domain;
RedirectingTestDnsServer(String domain, String dnsAddress) {
super(Collections.singleton(domain));
this.domain = domain;
this.dnsAddress = dnsAddress;
}
@Override
protected DnsMessage filterMessage(DnsMessage message) {
// Clear the answers as we want to add our own stuff to test dns redirects.
message.getAnswerRecords().clear();
message.getAuthorityRecords().clear();
message.getAdditionalRecords().clear();
String name = domain;
for (int i = 0 ;; i++) {
int idx = name.indexOf('.');
if (idx <= 0) {
break;
}
name = name.substring(idx + 1); // skip the '.' as well.
String dnsName = "dns" + idx + '.' + domain;
message.getAuthorityRecords().add(newNsRecord(name, dnsName));
message.getAdditionalRecords().add(newARecord(dnsName, i == 0 ? dnsAddress : "1.2.3." + idx));
// Add an unresolved NS record (with no additionals as well)
message.getAuthorityRecords().add(newNsRecord(name, "unresolved." + dnsName));
}
return message;
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
@Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testTimeoutNotCached(DnsNameResolverChannelStrategy strategy) {
DnsCache cache = new DnsCache() {
@Override
public void clear() {
// NOOP
}
@Override
public boolean clear(String hostname) {
return false;
}
@Override
public List<? extends DnsCacheEntry> get(String hostname, DnsRecord[] additionals) {
return Collections.emptyList();
}
@Override
public DnsCacheEntry cache(String hostname, DnsRecord[] additionals, InetAddress address,
long originalTtl, EventLoop loop) {
fail("Should not be cached");
return null;
}
@Override
public DnsCacheEntry cache(String hostname, DnsRecord[] additionals, Throwable cause, EventLoop loop) {
fail("Should not be cached");
return null;
}
};
DnsNameResolverBuilder builder = newResolver(strategy);
builder.queryTimeoutMillis(100)
.authoritativeDnsServerCache(cache)
.resolveCache(cache)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(
new InetSocketAddress(NetUtil.LOCALHOST, 12345)));
DnsNameResolver resolver = builder.build();
Future<InetAddress> result = resolver.resolve("doesnotexist.netty.io").awaitUninterruptibly();
Throwable cause = result.cause();
assertInstanceOf(UnknownHostException.class, cause);
assertInstanceOf(DnsNameResolverTimeoutException.class, cause.getCause());
assertTrue(DnsNameResolver.isTimeoutError(cause));
assertTrue(DnsNameResolver.isTransportOrTimeoutError(cause));
resolver.close();
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testTimeoutIpv4PreferredA(DnsNameResolverChannelStrategy strategy) throws IOException {
testTimeoutOneQuery(strategy, ResolvedAddressTypes.IPV4_PREFERRED, RecordType.A, RecordType.AAAA);
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testTimeoutIpv4PreferredAAAA(DnsNameResolverChannelStrategy strategy) throws IOException {
testTimeoutOneQuery(strategy, ResolvedAddressTypes.IPV4_PREFERRED, RecordType.AAAA, RecordType.A);
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testTimeoutIpv6PreferredA(DnsNameResolverChannelStrategy strategy) throws IOException {
testTimeoutOneQuery(strategy, ResolvedAddressTypes.IPV6_PREFERRED, RecordType.A, RecordType.AAAA);
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testTimeoutIpv6PreferredAAAA(DnsNameResolverChannelStrategy strategy) throws IOException {
testTimeoutOneQuery(strategy, ResolvedAddressTypes.IPV6_PREFERRED, RecordType.AAAA, RecordType.A);
}
private static void testTimeoutOneQuery(DnsNameResolverChannelStrategy strategy, ResolvedAddressTypes type,
final RecordType recordType, RecordType dropType)
throws IOException {
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
Set<ResourceRecord> records = new LinkedHashSet<ResourceRecord>(2);
Map<String, Object> map1 = new HashMap<String, Object>();
if (question.getRecordType() == RecordType.A) {
map1.put(DnsAttribute.IP_ADDRESS.toLowerCase(), "10.0.0.2");
} else {
map1.put(DnsAttribute.IP_ADDRESS.toLowerCase(), "::1");
}
records.add(new TestDnsServer.TestResourceRecord(
question.getDomainName(), recordType, map1));
return records;
}
});
dnsServer2.start(dropType);
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(true)
.queryTimeoutMillis(500)
.resolvedAddressTypes(type)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()));
resolver = builder.build();
List<InetAddress> resolvedAddresses =
resolver.resolveAll("somehost.netty.io").syncUninterruptibly().getNow();
assertEquals(1, resolvedAddresses.size());
if (recordType == RecordType.A) {
assertTrue(resolvedAddresses.contains(InetAddress.getByAddress(new byte[] { 10, 0, 0, 2 })));
} else {
assertTrue(resolvedAddresses.contains(InetAddress.getByAddress(
new byte[]{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 })));
}
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
@Test
public void testDnsNameResolverBuilderCopy() {
ChannelFactory<DatagramChannel> channelFactory =
new ReflectiveChannelFactory<DatagramChannel>(NioDatagramChannel.class);
DnsNameResolverBuilder builder = new DnsNameResolverBuilder(group.next())
.datagramChannelFactory(channelFactory);
DnsNameResolverBuilder copiedBuilder = builder.copy();
// change channel factory does not propagate to previously made copy
ChannelFactory<DatagramChannel> newChannelFactory =
new ReflectiveChannelFactory<DatagramChannel>(NioDatagramChannel.class);
builder.datagramChannelFactory(newChannelFactory);
assertEquals(channelFactory, copiedBuilder.datagramChannelFactory());
assertEquals(newChannelFactory, builder.datagramChannelFactory());
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testFollowCNAMEEvenIfARecordIsPresent(DnsNameResolverChannelStrategy strategy) throws IOException {
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
if (question.getDomainName().equals("cname.netty.io")) {
Map<String, Object> map1 = new HashMap<String, Object>();
map1.put(DnsAttribute.IP_ADDRESS.toLowerCase(), "10.0.0.99");
return Collections.<ResourceRecord>singleton(
new TestDnsServer.TestResourceRecord(question.getDomainName(), RecordType.A, map1));
} else {
Set<ResourceRecord> records = new LinkedHashSet<ResourceRecord>(2);
Map<String, Object> map = new HashMap<String, Object>();
map.put(DnsAttribute.DOMAIN_NAME.toLowerCase(), "cname.netty.io");
records.add(new TestDnsServer.TestResourceRecord(
question.getDomainName(), RecordType.CNAME, map));
Map<String, Object> map1 = new HashMap<String, Object>();
map1.put(DnsAttribute.IP_ADDRESS.toLowerCase(), "10.0.0.2");
records.add(new TestDnsServer.TestResourceRecord(
question.getDomainName(), RecordType.A, map1));
return records;
}
}
});
dnsServer2.start();
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(true)
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_ONLY)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()));
resolver = builder.build();
List<InetAddress> resolvedAddresses =
resolver.resolveAll("somehost.netty.io").syncUninterruptibly().getNow();
assertEquals(2, resolvedAddresses.size());
assertTrue(resolvedAddresses.contains(InetAddress.getByAddress(new byte[] { 10, 0, 0, 99 })));
assertTrue(resolvedAddresses.contains(InetAddress.getByAddress(new byte[] { 10, 0, 0, 2 })));
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
//
// This should only result in one query.
// ;; ANSWER SECTION:
// somehost.netty.io. 594 IN CNAME cname.netty.io.
// cname.netty.io. 9042 IN CNAME cname2.netty.io.
// cname2.netty.io. 1312 IN CNAME cname3.netty.io.io.
// cname3.netty.io. 20 IN A 10.0.0.2
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testCNAMEFollowInResponseWithoutExtraQuery(DnsNameResolverChannelStrategy strategy) throws IOException {
final AtomicInteger queryCount = new AtomicInteger();
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
queryCount.incrementAndGet();
if (question.getDomainName().equals("somehost.netty.io")) {
Set<ResourceRecord> records = new LinkedHashSet<ResourceRecord>(2);
Map<String, Object> map = new HashMap<String, Object>();
map.put(DnsAttribute.DOMAIN_NAME.toLowerCase(), "cname.netty.io");
records.add(new TestDnsServer.TestResourceRecord(
question.getDomainName(), RecordType.CNAME, map));
map = new HashMap<String, Object>();
map.put(DnsAttribute.DOMAIN_NAME.toLowerCase(), "cname2.netty.io");
records.add(new TestDnsServer.TestResourceRecord(
"cname.netty.io", RecordType.CNAME, map));
map = new HashMap<String, Object>();
map.put(DnsAttribute.DOMAIN_NAME.toLowerCase(), "cname3.netty.io");
records.add(new TestDnsServer.TestResourceRecord(
"cname2.netty.io", RecordType.CNAME, map));
Map<String, Object> map1 = new HashMap<String, Object>();
map1.put(DnsAttribute.IP_ADDRESS.toLowerCase(), "10.0.0.2");
records.add(new TestDnsServer.TestResourceRecord(
"cname3.netty.io", RecordType.A, map1));
return records;
}
return null;
}
});
dnsServer2.start();
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(true)
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_ONLY)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()));
resolver = builder.build();
List<InetAddress> resolvedAddresses =
resolver.resolveAll("somehost.netty.io").syncUninterruptibly().getNow();
assertEquals(1, resolvedAddresses.size());
assertTrue(resolvedAddresses.contains(InetAddress.getByAddress(new byte[] { 10, 0, 0, 2 })));
assertEquals(1, queryCount.get());
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testFollowCNAMELoop(DnsNameResolverChannelStrategy strategy) throws IOException {
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
Set<ResourceRecord> records = new LinkedHashSet<ResourceRecord>(4);
records.add(new TestDnsServer.TestResourceRecord("x." + question.getDomainName(),
RecordType.A, Collections.<String, Object>singletonMap(
DnsAttribute.IP_ADDRESS.toLowerCase(), "10.0.0.99")));
records.add(new TestDnsServer.TestResourceRecord(
"cname2.netty.io", RecordType.CNAME,
Collections.<String, Object>singletonMap(
DnsAttribute.DOMAIN_NAME.toLowerCase(), "cname.netty.io")));
records.add(new TestDnsServer.TestResourceRecord(
"cname.netty.io", RecordType.CNAME,
Collections.<String, Object>singletonMap(
DnsAttribute.DOMAIN_NAME.toLowerCase(), "cname2.netty.io")));
records.add(new TestDnsServer.TestResourceRecord(
question.getDomainName(), RecordType.CNAME,
Collections.<String, Object>singletonMap(
DnsAttribute.DOMAIN_NAME.toLowerCase(), "cname.netty.io")));
return records;
}
});
dnsServer2.start();
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(false)
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_ONLY)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()));
resolver = builder.build();
final DnsNameResolver finalResolver = resolver;
assertThrows(UnknownHostException.class, new Executable() {
@Override
public void execute() throws Throwable {
finalResolver.resolveAll("somehost.netty.io").syncUninterruptibly().getNow();
}
});
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testCNAMELoopInCache(DnsNameResolverChannelStrategy strategy) throws Throwable {
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(false)
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_ONLY)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer.localAddress()));
resolver = builder.build();
// Add a CNAME loop into the cache
final String name = "somehost.netty.io.";
String name2 = "cname.netty.io.";
resolver.cnameCache().cache(name, name2, Long.MAX_VALUE, resolver.executor());
resolver.cnameCache().cache(name2, name, Long.MAX_VALUE, resolver.executor());
final DnsNameResolver finalResolver = resolver;
assertThrows(UnknownHostException.class, new Executable() {
@Override
public void execute() throws Throwable {
finalResolver.resolve(name).syncUninterruptibly().getNow();
}
});
} finally {
if (resolver != null) {
resolver.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testSearchDomainQueryFailureForSingleAddressTypeCompletes(
final DnsNameResolverChannelStrategy strategy) {
assertThrows(UnknownHostException.class, new Executable() {
@Override
public void execute() {
testSearchDomainQueryFailureCompletes(strategy, ResolvedAddressTypes.IPV4_ONLY);
}
});
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testSearchDomainQueryFailureForMultipleAddressTypeCompletes(
final DnsNameResolverChannelStrategy strategy) {
assertThrows(UnknownHostException.class, new Executable() {
@Override
public void execute() throws Throwable {
testSearchDomainQueryFailureCompletes(strategy, ResolvedAddressTypes.IPV4_PREFERRED);
}
});
}
private void testSearchDomainQueryFailureCompletes(
DnsNameResolverChannelStrategy strategy, ResolvedAddressTypes types) {
DnsNameResolver resolver = newResolver(strategy)
.resolvedAddressTypes(types)
.ndots(1)
.searchDomains(singletonList(".")).build();
try {
resolver.resolve("invalid.com").syncUninterruptibly();
} finally {
resolver.close();
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
@Timeout(value = 2000, unit = TimeUnit.MILLISECONDS)
public void testCachesClearedOnClose(DnsNameResolverChannelStrategy strategy) throws Exception {
final CountDownLatch resolveLatch = new CountDownLatch(1);
final CountDownLatch authoritativeLatch = new CountDownLatch(1);
DnsNameResolver resolver = newResolver(strategy).resolveCache(new DnsCache() {
@Override
public void clear() {
resolveLatch.countDown();
}
@Override
public boolean clear(String hostname) {
return false;
}
@Override
public List<? extends DnsCacheEntry> get(String hostname, DnsRecord[] additionals) {
return null;
}
@Override
public DnsCacheEntry cache(
String hostname, DnsRecord[] additionals, InetAddress address, long originalTtl, EventLoop loop) {
return null;
}
@Override
public DnsCacheEntry cache(
String hostname, DnsRecord[] additionals, Throwable cause, EventLoop loop) {
return null;
}
}).authoritativeDnsServerCache(new DnsCache() {
@Override
public void clear() {
authoritativeLatch.countDown();
}
@Override
public boolean clear(String hostname) {
return false;
}
@Override
public List<? extends DnsCacheEntry> get(String hostname, DnsRecord[] additionals) {
return null;
}
@Override
public DnsCacheEntry cache(
String hostname, DnsRecord[] additionals, InetAddress address, long originalTtl, EventLoop loop) {
return null;
}
@Override
public DnsCacheEntry cache(String hostname, DnsRecord[] additionals, Throwable cause, EventLoop loop) {
return null;
}
}).build();
resolver.close();
resolveLatch.await();
authoritativeLatch.await();
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResolveACachedWithDot(DnsNameResolverChannelStrategy strategy) {
final DnsCache cache = new DefaultDnsCache();
DnsNameResolver resolver = newResolver(strategy, ResolvedAddressTypes.IPV4_ONLY)
.resolveCache(cache).build();
try {
String domain = DOMAINS.iterator().next();
String domainWithDot = domain + '.';
resolver.resolve(domain).syncUninterruptibly();
List<? extends DnsCacheEntry> cached = cache.get(domain, null);
List<? extends DnsCacheEntry> cached2 = cache.get(domainWithDot, null);
assertEquals(1, cached.size());
assertEquals(cached, cached2);
} finally {
resolver.close();
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResolveACachedWithDotSearchDomain(DnsNameResolverChannelStrategy strategy) throws Exception {
final TestDnsCache cache = new TestDnsCache(new DefaultDnsCache());
TestDnsServer server = new TestDnsServer(Collections.singleton("test.netty.io"));
server.start();
DnsNameResolver resolver = newResolver(strategy, ResolvedAddressTypes.IPV4_ONLY)
.searchDomains(Collections.singletonList("netty.io"))
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(server.localAddress()))
.resolveCache(cache).build();
try {
resolver.resolve("test").syncUninterruptibly();
assertNull(cache.cacheHits.get("test.netty.io"));
List<? extends DnsCacheEntry> cached = cache.cache.get("test.netty.io", null);
List<? extends DnsCacheEntry> cached2 = cache.cache.get("test.netty.io.", null);
assertEquals(1, cached.size());
assertEquals(cached, cached2);
Promise<List<InetAddress>> promise = ImmediateEventExecutor.INSTANCE.newPromise();
boolean isCached = DnsNameResolver.doResolveAllCached("test", null, promise, cache,
resolver.searchDomains(), resolver.ndots(), resolver.resolvedInternetProtocolFamiliesUnsafe());
assertTrue(isCached);
promise.sync();
List<? extends DnsCacheEntry> entries = cache.cacheHits.get("test.netty.io");
assertFalse(entries.isEmpty());
} finally {
resolver.close();
server.stop();
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testCNameCached(DnsNameResolverChannelStrategy strategy) throws Exception {
final Map<String, String> cache = new ConcurrentHashMap<String, String>();
final AtomicInteger cnameQueries = new AtomicInteger();
final AtomicInteger aQueries = new AtomicInteger();
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
if ("cname.netty.io".equals(question.getDomainName())) {
aQueries.incrementAndGet();
return Collections.<ResourceRecord>singleton(new TestDnsServer.TestResourceRecord(
question.getDomainName(), RecordType.A,
Collections.<String, Object>singletonMap(
DnsAttribute.IP_ADDRESS.toLowerCase(), "10.0.0.99")));
}
if ("x.netty.io".equals(question.getDomainName())) {
cnameQueries.incrementAndGet();
return Collections.<ResourceRecord>singleton(new TestDnsServer.TestResourceRecord(
question.getDomainName(), RecordType.CNAME,
Collections.<String, Object>singletonMap(
DnsAttribute.DOMAIN_NAME.toLowerCase(), "cname.netty.io")));
}
if ("y.netty.io".equals(question.getDomainName())) {
cnameQueries.incrementAndGet();
return Collections.<ResourceRecord>singleton(new TestDnsServer.TestResourceRecord(
question.getDomainName(), RecordType.CNAME,
Collections.<String, Object>singletonMap(
DnsAttribute.DOMAIN_NAME.toLowerCase(), "x.netty.io")));
}
return Collections.emptySet();
}
});
dnsServer2.start();
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(true)
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_ONLY)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()))
.resolveCache(NoopDnsCache.INSTANCE)
.cnameCache(new DnsCnameCache() {
@Override
public String get(String hostname) {
assertTrue(hostname.endsWith("."), hostname);
return cache.get(hostname);
}
@Override
public void cache(String hostname, String cname, long originalTtl, EventLoop loop) {
assertTrue(hostname.endsWith("."), hostname);
cache.put(hostname, cname);
}
@Override
public void clear() {
// NOOP
}
@Override
public boolean clear(String hostname) {
return false;
}
});
resolver = builder.build();
List<InetAddress> resolvedAddresses =
resolver.resolveAll("x.netty.io").syncUninterruptibly().getNow();
assertEquals(1, resolvedAddresses.size());
assertTrue(resolvedAddresses.contains(InetAddress.getByAddress(new byte[] { 10, 0, 0, 99 })));
assertEquals("cname.netty.io.", cache.get("x.netty.io."));
assertEquals(1, cnameQueries.get());
assertEquals(1, aQueries.get());
resolvedAddresses =
resolver.resolveAll("x.netty.io").syncUninterruptibly().getNow();
assertEquals(1, resolvedAddresses.size());
assertTrue(resolvedAddresses.contains(InetAddress.getByAddress(new byte[] { 10, 0, 0, 99 })));
// Should not have queried for the CNAME again.
assertEquals(1, cnameQueries.get());
assertEquals(2, aQueries.get());
resolvedAddresses =
resolver.resolveAll("y.netty.io").syncUninterruptibly().getNow();
assertEquals(1, resolvedAddresses.size());
assertTrue(resolvedAddresses.contains(InetAddress.getByAddress(new byte[] { 10, 0, 0, 99 })));
assertEquals("x.netty.io.", cache.get("y.netty.io."));
// Will only query for one CNAME
assertEquals(2, cnameQueries.get());
assertEquals(3, aQueries.get());
resolvedAddresses =
resolver.resolveAll("y.netty.io").syncUninterruptibly().getNow();
assertEquals(1, resolvedAddresses.size());
assertTrue(resolvedAddresses.contains(InetAddress.getByAddress(new byte[] { 10, 0, 0, 99 })));
// Should not have queried for the CNAME again.
assertEquals(2, cnameQueries.get());
assertEquals(4, aQueries.get());
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
@Test
public void testInstanceWithNullPreferredAddressType() {
new DnsNameResolver(
group.next(), // eventLoop
new ReflectiveChannelFactory<DatagramChannel>(NioDatagramChannel.class), // channelFactory
NoopDnsCache.INSTANCE, // resolveCache
NoopAuthoritativeDnsServerCache.INSTANCE, // authoritativeDnsServerCache
NoopDnsQueryLifecycleObserverFactory.INSTANCE, // dnsQueryLifecycleObserverFactory
100, // queryTimeoutMillis
null, // resolvedAddressTypes, see https://github.com/netty/netty/pull/8445
true, // recursionDesired
1, // maxQueriesPerResolve
false, // traceEnabled
4096, // maxPayloadSize
true, // optResourceEnabled
HostsFileEntriesResolver.DEFAULT, // hostsFileEntriesResolver
DnsServerAddressStreamProviders.platformDefault(), // dnsServerAddressStreamProvider
null, // searchDomains
1, // ndots
true // decodeIdn
).close();
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testQueryTxt(DnsNameResolverChannelStrategy strategy) throws Exception {
final String hostname = "txt.netty.io";
final String txt1 = "some text";
final String txt2 = "some more text";
TestDnsServer server = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
if (question.getDomainName().equals(hostname)) {
Map<String, Object> map1 = new HashMap<String, Object>();
map1.put(DnsAttribute.CHARACTER_STRING.toLowerCase(), txt1);
Map<String, Object> map2 = new HashMap<String, Object>();
map2.put(DnsAttribute.CHARACTER_STRING.toLowerCase(), txt2);
Set<ResourceRecord> records = new HashSet<ResourceRecord>();
records.add(new TestDnsServer.TestResourceRecord(question.getDomainName(), RecordType.TXT, map1));
records.add(new TestDnsServer.TestResourceRecord(question.getDomainName(), RecordType.TXT, map2));
return records;
}
return Collections.emptySet();
}
});
server.start();
DnsNameResolver resolver = newResolver(strategy, ResolvedAddressTypes.IPV4_ONLY)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(server.localAddress()))
.build();
try {
AddressedEnvelope<DnsResponse, InetSocketAddress> envelope = resolver.query(
new DefaultDnsQuestion(hostname, DnsRecordType.TXT)).syncUninterruptibly().getNow();
assertNotNull(envelope.sender());
DnsResponse response = envelope.content();
assertNotNull(response);
assertEquals(DnsResponseCode.NOERROR, response.code());
int count = response.count(DnsSection.ANSWER);
assertEquals(2, count);
List<String> txts = new ArrayList<String>();
for (int i = 0; i < 2; i++) {
txts.addAll(decodeTxt(response.recordAt(DnsSection.ANSWER, i)));
}
assertTrue(txts.contains(txt1));
assertTrue(txts.contains(txt2));
envelope.release();
} finally {
resolver.close();
server.stop();
}
}
private static List<String> decodeTxt(DnsRecord record) {
if (!(record instanceof DnsRawRecord)) {
return Collections.emptyList();
}
List<String> list = new ArrayList<String>();
ByteBuf data = ((DnsRawRecord) record).content();
int idx = data.readerIndex();
int wIdx = data.writerIndex();
while (idx < wIdx) {
int len = data.getUnsignedByte(idx++);
list.add(data.toString(idx, len, CharsetUtil.UTF_8));
idx += len;
}
return list;
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testNotIncludeDuplicates(DnsNameResolverChannelStrategy strategy) throws IOException {
final String name = "netty.io";
final String ipv4Addr = "1.2.3.4";
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
Set<ResourceRecord> records = new LinkedHashSet<ResourceRecord>(4);
String qName = question.getDomainName().toLowerCase();
if (qName.equals(name)) {
records.add(new TestDnsServer.TestResourceRecord(
qName, RecordType.CNAME,
Collections.<String, Object>singletonMap(
DnsAttribute.DOMAIN_NAME.toLowerCase(), "cname.netty.io")));
records.add(new TestDnsServer.TestResourceRecord(qName,
RecordType.A, Collections.<String, Object>singletonMap(
DnsAttribute.IP_ADDRESS.toLowerCase(), ipv4Addr)));
} else {
records.add(new TestDnsServer.TestResourceRecord(qName,
RecordType.A, Collections.<String, Object>singletonMap(
DnsAttribute.IP_ADDRESS.toLowerCase(), ipv4Addr)));
}
return records;
}
});
dnsServer2.start();
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(true)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()));
builder.resolvedAddressTypes(ResolvedAddressTypes.IPV4_ONLY);
resolver = builder.build();
List<InetAddress> resolvedAddresses = resolver.resolveAll(name).syncUninterruptibly().getNow();
assertEquals(Collections.singletonList(InetAddress.getByAddress(name, new byte[] { 1, 2, 3, 4 })),
resolvedAddresses);
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testIncludeDuplicates(DnsNameResolverChannelStrategy strategy) throws IOException {
final String name = "netty.io";
final String ipv4Addr = "1.2.3.4";
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
Set<ResourceRecord> records = new LinkedHashSet<ResourceRecord>(2);
String qName = question.getDomainName().toLowerCase();
records.add(new TestDnsServer.TestResourceRecord(qName,
RecordType.A, Collections.<String, Object>singletonMap(
DnsAttribute.IP_ADDRESS.toLowerCase(), ipv4Addr)));
records.add(new TestDnsServer.TestResourceRecord(qName,
RecordType.A, Collections.<String, Object>singletonMap(
DnsAttribute.IP_ADDRESS.toLowerCase(), ipv4Addr)));
return records;
}
});
dnsServer2.start();
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(true)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()));
builder.resolvedAddressTypes(ResolvedAddressTypes.IPV4_ONLY);
resolver = builder.build();
List<DnsRecord> resolvedAddresses = resolver.resolveAll(new DefaultDnsQuestion(name, A))
.syncUninterruptibly().getNow();
assertEquals(2, resolvedAddresses.size());
for (DnsRecord record: resolvedAddresses) {
ReferenceCountUtil.release(record);
}
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testDropAAAA(DnsNameResolverChannelStrategy strategy) throws IOException {
String host = "somehost.netty.io";
TestDnsServer dnsServer2 = new TestDnsServer(Collections.singleton(host));
dnsServer2.start(RecordType.AAAA);
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(false)
.queryTimeoutMillis(500)
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_PREFERRED)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()));
resolver = builder.build();
List<InetAddress> addressList = resolver.resolveAll(host).syncUninterruptibly().getNow();
assertEquals(1, addressList.size());
assertEquals(host, addressList.get(0).getHostName());
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
@Timeout(value = 2000, unit = TimeUnit.MILLISECONDS)
public void testDropAAAAResolveFast(DnsNameResolverChannelStrategy strategy) throws IOException {
String host = "somehost.netty.io";
TestDnsServer dnsServer2 = new TestDnsServer(Collections.singleton(host));
dnsServer2.start(RecordType.AAAA);
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(false)
.queryTimeoutMillis(10000)
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_PREFERRED)
.completeOncePreferredResolved(true)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()));
resolver = builder.build();
InetAddress address = resolver.resolve(host).syncUninterruptibly().getNow();
assertEquals(host, address.getHostName());
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
@Timeout(value = 2000, unit = TimeUnit.MILLISECONDS)
public void testDropAAAAResolveAllFast(DnsNameResolverChannelStrategy strategy) throws IOException {
final String host = "somehost.netty.io";
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) throws DnsException {
String name = question.getDomainName();
if (name.equals(host)) {
Set<ResourceRecord> records = new HashSet<ResourceRecord>(2);
records.add(new TestDnsServer.TestResourceRecord(name, RecordType.A,
Collections.<String, Object>singletonMap(DnsAttribute.IP_ADDRESS.toLowerCase(),
"10.0.0.1")));
records.add(new TestDnsServer.TestResourceRecord(name, RecordType.A,
Collections.<String, Object>singletonMap(DnsAttribute.IP_ADDRESS.toLowerCase(),
"10.0.0.2")));
return records;
}
return null;
}
});
dnsServer2.start(RecordType.AAAA);
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(false)
.queryTimeoutMillis(10000)
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_PREFERRED)
.completeOncePreferredResolved(true)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()));
resolver = builder.build();
List<InetAddress> addresses = resolver.resolveAll(host).syncUninterruptibly().getNow();
assertEquals(2, addresses.size());
for (InetAddress address: addresses) {
assertInstanceOf(Inet4Address.class, address);
assertEquals(host, address.getHostName());
}
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testTruncatedWithoutTcpFallback(DnsNameResolverChannelStrategy strategy) throws IOException {
testTruncated0(strategy, false, false);
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testTruncatedWithTcpFallback(DnsNameResolverChannelStrategy strategy) throws IOException {
testTruncated0(strategy, true, false);
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testTruncatedWithTcpFallbackBecauseOfMtu(DnsNameResolverChannelStrategy strategy) throws IOException {
testTruncated0(strategy, true, true);
}
private static DnsMessageModifier modifierFrom(DnsMessage message) {
DnsMessageModifier modifier = new DnsMessageModifier();
modifier.setAcceptNonAuthenticatedData(message.isAcceptNonAuthenticatedData());
modifier.setAdditionalRecords(message.getAdditionalRecords());
modifier.setAnswerRecords(message.getAnswerRecords());
modifier.setAuthoritativeAnswer(message.isAuthoritativeAnswer());
modifier.setAuthorityRecords(message.getAuthorityRecords());
modifier.setMessageType(message.getMessageType());
modifier.setOpCode(message.getOpCode());
modifier.setQuestionRecords(message.getQuestionRecords());
modifier.setRecursionAvailable(message.isRecursionAvailable());
modifier.setRecursionDesired(message.isRecursionDesired());
modifier.setReserved(message.isReserved());
modifier.setResponseCode(message.getResponseCode());
modifier.setTransactionId(message.getTransactionId());
modifier.setTruncated(message.isTruncated());
return modifier;
}
private static void testTruncated0(DnsNameResolverChannelStrategy strategy,
boolean tcpFallback, final boolean truncatedBecauseOfMtu) throws IOException {
ServerSocket serverSocket = null;
final String host = "somehost.netty.io";
final String txt = "this is a txt record";
final AtomicReference<DnsMessage> messageRef = new AtomicReference<DnsMessage>();
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
String name = question.getDomainName();
if (name.equals(host)) {
return Collections.<ResourceRecord>singleton(
new TestDnsServer.TestResourceRecord(name, RecordType.TXT,
Collections.<String, Object>singletonMap(
DnsAttribute.CHARACTER_STRING.toLowerCase(), txt)));
}
return null;
}
}) {
@Override
protected DnsMessage filterMessage(DnsMessage message) {
// Store a original message so we can replay it later on.
messageRef.set(message);
if (!truncatedBecauseOfMtu) {
// Create a copy of the message but set the truncated flag.
DnsMessageModifier modifier = modifierFrom(message);
modifier.setTruncated(true);
return modifier.getDnsMessage();
}
return message;
}
};
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy);
final DatagramChannel datagramChannel = new NioDatagramChannel();
ChannelFactory<DatagramChannel> channelFactory = new ChannelFactory<DatagramChannel>() {
@Override
public DatagramChannel newChannel() {
return datagramChannel;
}
};
builder.datagramChannelFactory(channelFactory);
if (tcpFallback) {
// If we are configured to use TCP as a fallback also bind a TCP socket
serverSocket = startDnsServerAndCreateServerSocket(dnsServer2);
// If we are configured to use TCP as a fallback also bind a TCP socket
builder.socketChannelType(NioSocketChannel.class);
} else {
dnsServer2.start();
}
builder.queryTimeoutMillis(10000)
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_PREFERRED)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()));
resolver = builder.build();
if (truncatedBecauseOfMtu) {
datagramChannel.pipeline().addFirst(new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
if (msg instanceof DatagramPacket) {
// Truncate the packet by 1 byte.
DatagramPacket packet = (DatagramPacket) msg;
packet.content().writerIndex(packet.content().writerIndex() - 1);
}
ctx.fireChannelRead(msg);
}
});
}
Future<AddressedEnvelope<DnsResponse, InetSocketAddress>> envelopeFuture = resolver.query(
new DefaultDnsQuestion(host, DnsRecordType.TXT));
if (tcpFallback) {
// If we are configured to use TCP as a fallback lets replay the dns message over TCP
Socket socket = serverSocket.accept();
responseViaSocket(socket, messageRef.get());
// Let's wait until we received the envelope before closing the socket.
envelopeFuture.syncUninterruptibly();
socket.close();
serverSocket.close();
}
AddressedEnvelope<DnsResponse, InetSocketAddress> envelope = envelopeFuture.syncUninterruptibly().getNow();
assertNotNull(envelope.sender());
DnsResponse response = envelope.content();
assertNotNull(response);
assertEquals(DnsResponseCode.NOERROR, response.code());
int count = response.count(DnsSection.ANSWER);
assertEquals(1, count);
List<String> texts = decodeTxt(response.recordAt(DnsSection.ANSWER, 0));
assertEquals(1, texts.size());
assertEquals(txt, texts.get(0));
if (tcpFallback) {
assertFalse(envelope.content().isTruncated());
} else {
assertTrue(envelope.content().isTruncated());
}
assertTrue(envelope.release());
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
private static void responseViaSocket(Socket socket, DnsMessage message) throws IOException {
InputStream in = socket.getInputStream();
assertTrue((in.read() << 8 | (in.read() & 0xff)) > 2); // skip length field
int txnId = in.read() << 8 | (in.read() & 0xff);
IoBuffer ioBuffer = IoBuffer.allocate(1024);
// Must replace the transactionId with the one from the TCP request
DnsMessageModifier modifier = modifierFrom(message);
modifier.setTransactionId(txnId);
new DnsMessageEncoder().encode(ioBuffer, modifier.getDnsMessage());
ioBuffer.flip();
ByteBuffer lenBuffer = ByteBuffer.allocate(2);
lenBuffer.putShort((short) ioBuffer.remaining());
lenBuffer.flip();
while (lenBuffer.hasRemaining()) {
socket.getOutputStream().write(lenBuffer.get());
}
while (ioBuffer.hasRemaining()) {
socket.getOutputStream().write(ioBuffer.get());
}
socket.getOutputStream().flush();
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testTcpFallbackWhenTimeout(DnsNameResolverChannelStrategy strategy) throws IOException {
testTcpFallbackWhenTimeout(strategy, true);
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testTcpFallbackFailedWhenTimeout(DnsNameResolverChannelStrategy strategy) throws IOException {
testTcpFallbackWhenTimeout(strategy, false);
}
private static ServerSocket startDnsServerAndCreateServerSocket(TestDnsServer dns) throws IOException {
for (int i = 0;; i++) {
ServerSocket serverSocket = new ServerSocket();
serverSocket.setReuseAddress(true);
serverSocket.bind(new InetSocketAddress(NetUtil.LOCALHOST4, 0));
try {
dns.start(null, (InetSocketAddress) serverSocket.getLocalSocketAddress());
return serverSocket;
} catch (IOException e) {
serverSocket.close();
if (i == 10) {
// We tried 10 times without success
throw new IllegalStateException(
"Unable to bind TestDnsServer and ServerSocket to the same address", e);
}
// We could not start the DnsServer which is most likely because the localAddress was already used,
// let's retry
}
}
}
private void testTcpFallbackWhenTimeout(DnsNameResolverChannelStrategy strategy, boolean tcpSuccess)
throws IOException {
final String host = "somehost.netty.io";
final String txt = "this is a txt record";
final AtomicReference<DnsMessage> messageRef = new AtomicReference<DnsMessage>();
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
String name = question.getDomainName();
if (name.equals(host)) {
return Collections.<ResourceRecord>singleton(
new TestDnsServer.TestResourceRecord(name, RecordType.TXT,
Collections.<String, Object>singletonMap(
DnsAttribute.CHARACTER_STRING.toLowerCase(), txt)));
}
return null;
}
}) {
@Override
protected DnsMessage filterMessage(DnsMessage message) {
// Store a original message so we can replay it later on.
messageRef.set(message);
return null;
}
};
DnsNameResolver resolver = null;
ServerSocket serverSocket = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy);
final DatagramChannel datagramChannel = new NioDatagramChannel();
ChannelFactory<DatagramChannel> channelFactory = new ChannelFactory<DatagramChannel>() {
@Override
public DatagramChannel newChannel() {
return datagramChannel;
}
};
builder.datagramChannelFactory(channelFactory);
serverSocket = startDnsServerAndCreateServerSocket(dnsServer2);
// If we are configured to use TCP as a fallback also bind a TCP socket
builder.socketChannelType(NioSocketChannel.class, true);
builder.queryTimeoutMillis(1000)
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_PREFERRED)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()))
.datagramChannelStrategy(strategy);
resolver = builder.build();
Future<AddressedEnvelope<DnsResponse, InetSocketAddress>> envelopeFuture = resolver.query(
new DefaultDnsQuestion(host, DnsRecordType.TXT));
// If we are configured to use TCP as a fallback lets replay the dns message over TCP
Socket socket = serverSocket.accept();
if (tcpSuccess) {
responseViaSocket(socket, messageRef.get());
// Let's wait until we received the envelope before closing the socket.
envelopeFuture.syncUninterruptibly();
socket.close();
AddressedEnvelope<DnsResponse, InetSocketAddress> envelope =
envelopeFuture.syncUninterruptibly().getNow();
assertNotNull(envelope.sender());
DnsResponse response = envelope.content();
assertNotNull(response);
assertEquals(DnsResponseCode.NOERROR, response.code());
int count = response.count(DnsSection.ANSWER);
assertEquals(1, count);
List<String> texts = decodeTxt(response.recordAt(DnsSection.ANSWER, 0));
assertEquals(1, texts.size());
assertEquals(txt, texts.get(0));
assertFalse(envelope.content().isTruncated());
assertTrue(envelope.release());
} else {
// Just close the socket. This should cause the original exception to be used.
socket.close();
Throwable error = envelopeFuture.awaitUninterruptibly().cause();
assertInstanceOf(DnsNameResolverTimeoutException.class, error);
assertThat(error.getSuppressed().length).isGreaterThanOrEqualTo(1);
}
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
if (serverSocket != null) {
serverSocket.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testCancelPromise(DnsNameResolverChannelStrategy strategy) throws Exception {
final EventLoop eventLoop = group.next();
final Promise<InetAddress> promise = eventLoop.newPromise();
final TestDnsServer dnsServer1 = new TestDnsServer(Collections.<String>emptySet()) {
@Override
protected DnsMessage filterMessage(DnsMessage message) {
promise.cancel(true);
return message;
}
};
dnsServer1.start();
final AtomicBoolean isQuerySentToSecondServer = new AtomicBoolean();
final TestDnsServer dnsServer2 = new TestDnsServer(Collections.<String>emptySet()) {
@Override
protected DnsMessage filterMessage(DnsMessage message) {
isQuerySentToSecondServer.set(true);
return message;
}
};
dnsServer2.start();
DnsServerAddressStreamProvider nameServerProvider =
new SequentialDnsServerAddressStreamProvider(dnsServer1.localAddress(),
dnsServer2.localAddress());
final DnsNameResolver resolver = new DnsNameResolverBuilder(group.next())
.dnsQueryLifecycleObserverFactory(new TestRecursiveCacheDnsQueryLifecycleObserverFactory())
.datagramChannelType(NioDatagramChannel.class)
.optResourceEnabled(false)
.nameServerProvider(nameServerProvider)
.datagramChannelStrategy(strategy)
.build();
try {
resolver.resolve("non-existent.netty.io", promise).sync();
fail();
} catch (Exception e) {
assertInstanceOf(CancellationException.class, e);
}
assertFalse(isQuerySentToSecondServer.get());
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testCNAMERecursiveResolveDifferentNameServersForDomains(DnsNameResolverChannelStrategy strategy)
throws IOException {
final String firstName = "firstname.com";
final String secondName = "secondname.com";
final String lastName = "lastname.com";
final String ipv4Addr = "1.2.3.4";
final TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
ResourceRecordModifier rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(question.getDomainName());
rm.setDnsTtl(100);
if (question.getDomainName().equals(firstName)) {
rm.setDnsType(RecordType.CNAME);
rm.put(DnsAttribute.DOMAIN_NAME, secondName);
} else if (question.getDomainName().equals(lastName)) {
rm.setDnsType(question.getRecordType());
rm.put(DnsAttribute.IP_ADDRESS, ipv4Addr);
} else {
return null;
}
return Collections.singleton(rm.getEntry());
}
});
dnsServer2.start();
final TestDnsServer dnsServer3 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
if (question.getDomainName().equals(secondName)) {
ResourceRecordModifier rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(question.getDomainName());
rm.setDnsTtl(100);
rm.setDnsType(RecordType.CNAME);
rm.put(DnsAttribute.DOMAIN_NAME, lastName);
return Collections.singleton(rm.getEntry());
}
return null;
}
});
dnsServer3.start();
DnsNameResolver resolver = null;
try {
resolver = newResolver(strategy)
.resolveCache(NoopDnsCache.INSTANCE)
.cnameCache(NoopDnsCnameCache.INSTANCE)
.recursionDesired(true)
.maxQueriesPerResolve(16)
.nameServerProvider(new DnsServerAddressStreamProvider() {
@Override
public DnsServerAddressStream nameServerAddressStream(String hostname) {
if (hostname.equals(secondName + '.')) {
return DnsServerAddresses.singleton(dnsServer3.localAddress()).stream();
}
return DnsServerAddresses.singleton(dnsServer2.localAddress()).stream();
}
})
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_PREFERRED).build();
assertResolvedAddress(resolver.resolve(firstName).syncUninterruptibly().getNow(), ipv4Addr, firstName);
} finally {
dnsServer2.stop();
dnsServer3.stop();
if (resolver != null) {
resolver.close();
}
}
}
private static void assertResolvedAddress(InetAddress resolvedAddress, String ipAddr, String hostname) {
assertEquals(ipAddr, resolvedAddress.getHostAddress());
assertEquals(hostname, resolvedAddress.getHostName());
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testAllNameServers(DnsNameResolverChannelStrategy strategy) throws IOException {
final String domain = "netty.io";
final String ipv4Addr = "1.2.3.4";
final AtomicInteger server2Counter = new AtomicInteger();
final TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
server2Counter.incrementAndGet();
ResourceRecordModifier rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(question.getDomainName());
rm.setDnsTtl(100);
rm.setDnsType(question.getRecordType());
rm.put(DnsAttribute.IP_ADDRESS, ipv4Addr);
return Collections.singleton(rm.getEntry());
}
});
dnsServer2.start();
final AtomicInteger server3Counter = new AtomicInteger();
final TestDnsServer dnsServer3 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
server3Counter.incrementAndGet();
ResourceRecordModifier rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(question.getDomainName());
rm.setDnsTtl(100);
rm.setDnsType(question.getRecordType());
rm.put(DnsAttribute.IP_ADDRESS, ipv4Addr);
return Collections.singleton(rm.getEntry());
}
});
dnsServer3.start();
DnsNameResolver resolver = null;
try {
resolver = newResolver(strategy)
.resolveCache(NoopDnsCache.INSTANCE)
.cnameCache(NoopDnsCnameCache.INSTANCE)
.recursionDesired(true)
.maxQueriesPerResolve(16)
.nameServerProvider(new DnsServerAddressStreamProvider() {
private final DnsServerAddresses addresses =
DnsServerAddresses.rotational(dnsServer2.localAddress(), dnsServer3.localAddress());
@Override
public DnsServerAddressStream nameServerAddressStream(String hostname) {
return addresses.stream();
}
})
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_ONLY).build();
assertResolvedAddress(resolver.resolve(domain).syncUninterruptibly().getNow(), ipv4Addr, domain);
assertEquals(1, server2Counter.get());
assertEquals(0, server3Counter.get());
assertResolvedAddress(resolver.resolve(domain).syncUninterruptibly().getNow(), ipv4Addr, domain);
assertEquals(1, server2Counter.get());
assertEquals(1, server3Counter.get());
assertResolvedAddress(resolver.resolve(domain).syncUninterruptibly().getNow(), ipv4Addr, domain);
assertEquals(2, server2Counter.get());
assertEquals(1, server3Counter.get());
assertResolvedAddress(resolver.resolve(domain).syncUninterruptibly().getNow(), ipv4Addr, domain);
assertEquals(2, server2Counter.get());
assertEquals(2, server3Counter.get());
} finally {
dnsServer2.stop();
dnsServer3.stop();
if (resolver != null) {
resolver.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
@Timeout(value = 2000, unit = TimeUnit.MILLISECONDS)
public void testSrvWithCnameNotCached(DnsNameResolverChannelStrategy strategy) throws Exception {
final AtomicBoolean alias = new AtomicBoolean();
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
String name = question.getDomainName();
if (name.equals("service.netty.io")) {
Set<ResourceRecord> records = new HashSet<ResourceRecord>(2);
ResourceRecordModifier rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(name);
rm.setDnsTtl(10);
rm.setDnsType(RecordType.CNAME);
rm.put(DnsAttribute.DOMAIN_NAME, "alias.service.netty.io");
records.add(rm.getEntry());
rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(name);
rm.setDnsTtl(10);
rm.setDnsType(RecordType.SRV);
rm.put(DnsAttribute.DOMAIN_NAME, "foo.service.netty.io");
rm.put(DnsAttribute.SERVICE_PORT, "8080");
rm.put(DnsAttribute.SERVICE_PRIORITY, "10");
rm.put(DnsAttribute.SERVICE_WEIGHT, "1");
records.add(rm.getEntry());
return records;
}
if (name.equals("foo.service.netty.io")) {
ResourceRecordModifier rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(name);
rm.setDnsTtl(10);
rm.setDnsType(RecordType.A);
rm.put(DnsAttribute.IP_ADDRESS, "10.0.0.1");
return Collections.singleton(rm.getEntry());
}
if (alias.get()) {
ResourceRecordModifier rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(name);
rm.setDnsTtl(10);
rm.setDnsType(RecordType.SRV);
rm.put(DnsAttribute.DOMAIN_NAME, "foo.service.netty.io");
rm.put(DnsAttribute.SERVICE_PORT, "8080");
rm.put(DnsAttribute.SERVICE_PRIORITY, "10");
rm.put(DnsAttribute.SERVICE_WEIGHT, "1");
return Collections.singleton(rm.getEntry());
}
return null;
}
});
dnsServer2.start();
DnsNameResolver resolver = null;
try {
DnsNameResolverBuilder builder = newResolver(strategy)
.recursionDesired(false)
.queryTimeoutMillis(10000)
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_PREFERRED)
.completeOncePreferredResolved(true)
.maxQueriesPerResolve(16)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()));
resolver = builder.build();
assertNotEmptyAndRelease(resolver.resolveAll(new DefaultDnsQuestion("service.netty.io", SRV)));
alias.set(true);
assertNotEmptyAndRelease(resolver.resolveAll(new DefaultDnsQuestion("service.netty.io", SRV)));
alias.set(false);
assertNotEmptyAndRelease(resolver.resolveAll(new DefaultDnsQuestion("service.netty.io", SRV)));
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testCNAMENotTriedOnAddressLookupsWhenDisabled(DnsNameResolverChannelStrategy strategy)
throws Exception {
TRY_FINAL_CNAME_ON_ADDRESS_LOOKUPS = false;
testFollowUpCNAME(strategy, false);
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testCNAMEOnlyTriedOnAddressLookups(DnsNameResolverChannelStrategy strategy) throws Exception {
TRY_FINAL_CNAME_ON_ADDRESS_LOOKUPS = true;
try {
testFollowUpCNAME(strategy, true);
} finally {
TRY_FINAL_CNAME_ON_ADDRESS_LOOKUPS = false;
}
}
private void testFollowUpCNAME(DnsNameResolverChannelStrategy strategy, final boolean enabled) throws Exception {
final AtomicInteger cnameQueries = new AtomicInteger();
TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord questionRecord) {
if (questionRecord.getRecordType() == RecordType.CNAME) {
cnameQueries.incrementAndGet();
}
return Collections.emptySet();
}
});
DnsNameResolver resolver = null;
try {
dnsServer2.start();
resolver = newNonCachedResolver(strategy, ResolvedAddressTypes.IPV4_PREFERRED)
.maxQueriesPerResolve(4)
.searchDomains(Collections.<String>emptyList())
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()))
.build();
// We expect these resolves to fail with UnknownHostException,
// and then check that no unexpected CNAME queries were performed.
assertInstanceOf(UnknownHostException.class,
resolver.resolveAll(new DefaultDnsQuestion("lookup-srv.netty.io", SRV)).await().cause());
assertEquals(0, cnameQueries.get());
assertInstanceOf(UnknownHostException.class,
resolver.resolveAll(new DefaultDnsQuestion("lookup-naptr.netty.io", NAPTR)).await().cause());
assertEquals(0, cnameQueries.get());
assertInstanceOf(UnknownHostException.class,
resolver.resolveAll(new DefaultDnsQuestion("lookup-cname.netty.io", CNAME)).await().cause());
assertEquals(1, cnameQueries.getAndSet(0));
assertInstanceOf(UnknownHostException.class,
resolver.resolveAll(new DefaultDnsQuestion("lookup-a.netty.io", A)).await().cause());
assertEquals(enabled ? 1 : 0, cnameQueries.getAndSet(0));
assertInstanceOf(UnknownHostException.class,
resolver.resolveAll(new DefaultDnsQuestion("lookup-aaaa.netty.io", AAAA)).await().cause());
assertEquals(enabled ? 1 : 0, cnameQueries.getAndSet(0));
assertInstanceOf(UnknownHostException.class,
resolver.resolveAll("lookup-address.netty.io").await().cause());
assertEquals(enabled ? 1 : 0, cnameQueries.getAndSet(0));
} finally {
dnsServer2.stop();
if (resolver != null) {
resolver.close();
}
}
}
private static void assertNotEmptyAndRelease(Future<List<DnsRecord>> recordsFuture) throws Exception {
List<DnsRecord> records = recordsFuture.get();
assertFalse(records.isEmpty());
for (DnsRecord record : records) {
ReferenceCountUtil.release(record);
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResolveIpv6WithScopeId(DnsNameResolverChannelStrategy strategy) throws Exception {
testResolveIpv6WithScopeId0(strategy, false);
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResolveAllIpv6WithScopeId(DnsNameResolverChannelStrategy strategy) throws Exception {
testResolveIpv6WithScopeId0(strategy, true);
}
private void testResolveIpv6WithScopeId0(DnsNameResolverChannelStrategy strategy, boolean resolveAll)
throws Exception {
DnsNameResolver resolver = newResolver(strategy).build();
String address = "fe80:0:0:0:1c31:d1d1:4824:72a9";
int scopeId = 15;
String addressString = address + '%' + scopeId;
byte[] bytes = NetUtil.createByteArrayFromIpAddressString(address);
Inet6Address inet6Address = Inet6Address.getByAddress(null, bytes, scopeId);
try {
final InetAddress addr;
if (resolveAll) {
List<InetAddress> addressList = resolver.resolveAll(addressString).getNow();
assertEquals(1, addressList.size());
addr = addressList.get(0);
} else {
addr = resolver.resolve(addressString).getNow();
}
assertEquals(inet6Address, addr);
} finally {
resolver.close();
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResolveIpv6WithoutScopeId(DnsNameResolverChannelStrategy strategy) throws Exception {
testResolveIpv6WithoutScopeId0(strategy, false);
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResolveAllIpv6WithoutScopeId(DnsNameResolverChannelStrategy strategy) throws Exception {
testResolveIpv6WithoutScopeId0(strategy, true);
}
private void testResolveIpv6WithoutScopeId0(DnsNameResolverChannelStrategy strategy, boolean resolveAll)
throws Exception {
DnsNameResolver resolver = newResolver(strategy).build();
String addressString = "fe80:0:0:0:1c31:d1d1:4824:72a9";
byte[] bytes = NetUtil.createByteArrayFromIpAddressString(addressString);
Inet6Address inet6Address = (Inet6Address) InetAddress.getByAddress(bytes);
try {
final InetAddress addr;
if (resolveAll) {
List<InetAddress> addressList = resolver.resolveAll(addressString).getNow();
assertEquals(1, addressList.size());
addr = addressList.get(0);
} else {
addr = resolver.resolve(addressString).getNow();
}
assertEquals(inet6Address, addr);
} finally {
resolver.close();
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResolveIp4(DnsNameResolverChannelStrategy strategy) throws Exception {
testResolveIp4(strategy, false);
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResolveAllIp4(DnsNameResolverChannelStrategy strategy) throws Exception {
testResolveIp4(strategy, true);
}
private void testResolveIp4(DnsNameResolverChannelStrategy strategy, boolean resolveAll) throws Exception {
DnsNameResolver resolver = newResolver(strategy).build();
String addressString = "10.0.0.1";
byte[] bytes = NetUtil.createByteArrayFromIpAddressString(addressString);
InetAddress inetAddress = InetAddress.getByAddress(bytes);
try {
final InetAddress addr;
if (resolveAll) {
List<InetAddress> addressList = resolver.resolveAll(addressString).getNow();
assertEquals(1, addressList.size());
addr = addressList.get(0);
} else {
addr = resolver.resolve(addressString).getNow();
}
assertEquals(inetAddress, addr);
} finally {
resolver.close();
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResolveSearchDomainStopOnFirstSuccess(DnsNameResolverChannelStrategy strategy) throws Exception {
final String addressString = "10.0.0.1";
final Queue<String> names = new ConcurrentLinkedQueue<String>();
final TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
private int called;
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
names.offer(question.getDomainName());
if (++called == 2) {
ResourceRecordModifier rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(question.getDomainName());
rm.setDnsTtl(100);
rm.setDnsType(question.getRecordType());
rm.put(DnsAttribute.IP_ADDRESS, addressString);
return Collections.singleton(rm.getEntry());
}
return null;
}
});
dnsServer2.start();
DnsNameResolver resolver = newResolver(strategy).searchDomains(
Arrays.asList("search1.netty.io", "search2.netty.io", "search3.netty.io"))
.ndots(2).nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()))
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_ONLY)
.build();
byte[] bytes = NetUtil.createByteArrayFromIpAddressString(addressString);
InetAddress inetAddress = InetAddress.getByAddress(bytes);
try {
final InetAddress addr = resolver.resolve("netty.io").sync().getNow();
assertEquals(inetAddress, addr);
} finally {
resolver.close();
dnsServer2.stop();
assertEquals("netty.io.search1.netty.io", names.poll());
assertEquals("netty.io.search2.netty.io", names.poll());
assertTrue(names.isEmpty());
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResolveTryWithoutSearchDomainFirst(DnsNameResolverChannelStrategy strategy) throws Exception {
testResolveTryWithoutSearchDomainFirst(strategy, true);
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResolveTryWithoutSearchDomainFirstButContinue(DnsNameResolverChannelStrategy strategy)
throws Exception {
testResolveTryWithoutSearchDomainFirst(strategy, false);
}
private static void testResolveTryWithoutSearchDomainFirst(
DnsNameResolverChannelStrategy strategy, final boolean absoluteSuccess) throws Exception {
final String addressString = "10.0.0.1";
final Queue<String> names = new ConcurrentLinkedQueue<String>();
final TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
private int called;
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
names.offer(question.getDomainName());
++called;
if ((absoluteSuccess && called == 1) || called == 3) {
ResourceRecordModifier rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(question.getDomainName());
rm.setDnsTtl(100);
rm.setDnsType(question.getRecordType());
rm.put(DnsAttribute.IP_ADDRESS, addressString);
return Collections.singleton(rm.getEntry());
}
return null;
}
});
dnsServer2.start();
DnsNameResolver resolver = newResolver(strategy).searchDomains(
Arrays.asList("search1.netty.io", "search2.netty.io", "search3.netty.io"))
.ndots(1).nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()))
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_ONLY)
.build();
byte[] bytes = NetUtil.createByteArrayFromIpAddressString(addressString);
InetAddress inetAddress = InetAddress.getByAddress(bytes);
try {
final InetAddress addr = resolver.resolve("netty.io").sync().getNow();
assertEquals(inetAddress, addr);
} finally {
resolver.close();
dnsServer2.stop();
assertEquals("netty.io", names.poll());
if (!absoluteSuccess) {
assertEquals("netty.io.search1.netty.io", names.poll());
assertEquals("netty.io.search2.netty.io", names.poll());
}
assertTrue(names.isEmpty());
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testInflightQueries(DnsNameResolverChannelStrategy strategy) throws Exception {
final String addressString = "10.0.0.1";
final AtomicInteger called = new AtomicInteger();
final CountDownLatch latch = new CountDownLatch(1);
final TestDnsServer dnsServer2 = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord question) {
called.incrementAndGet();
try {
latch.await();
ResourceRecordModifier rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(question.getDomainName());
rm.setDnsTtl(100);
rm.setDnsType(question.getRecordType());
rm.put(DnsAttribute.IP_ADDRESS, addressString);
return Collections.singleton(rm.getEntry());
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
});
dnsServer2.start();
DnsNameResolver resolver = newResolver(strategy)
.nameServerProvider(new SingletonDnsServerAddressStreamProvider(dnsServer2.localAddress()))
.resolvedAddressTypes(ResolvedAddressTypes.IPV4_ONLY)
.consolidateCacheSize(2)
.build();
byte[] bytes = NetUtil.createByteArrayFromIpAddressString(addressString);
InetAddress inetAddress = InetAddress.getByAddress(bytes);
try {
Future<InetAddress> f = resolver.resolve("netty.io");
Future<InetAddress> f2 = resolver.resolve("netty.io");
assertFalse(f.isDone());
assertFalse(f2.isDone());
// Now unblock so we receive the response back for our query.
latch.countDown();
assertEquals(inetAddress, f.sync().getNow());
assertEquals(inetAddress, f2.sync().getNow());
} finally {
resolver.close();
dnsServer2.stop();
assertEquals(1, called.get());
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testAddressAlreadyInUse(DnsNameResolverChannelStrategy strategy) throws Exception {
DatagramSocket datagramSocket = new DatagramSocket();
try {
assertTrue(datagramSocket.isBound());
try {
final DnsNameResolver resolver = newResolver(strategy)
.localAddress(datagramSocket.getLocalSocketAddress()).build();
try {
Throwable cause = assertThrows(UnknownHostException.class, new Executable() {
@Override
public void execute() throws Throwable {
resolver.resolve("netty.io").sync();
}
});
assertInstanceOf(BindException.class, cause.getCause());
} finally {
resolver.close();
}
} catch (IllegalStateException cause) {
// We might also throw directly here... in this case let's verify that we use the correct exception.
assertInstanceOf(BindException.class, cause.getCause());
}
} finally {
datagramSocket.close();
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testResponseFeedbackStream(DnsNameResolverChannelStrategy strategy) {
final AtomicBoolean successCalled = new AtomicBoolean();
final AtomicBoolean failureCalled = new AtomicBoolean();
final AtomicBoolean returnSuccess = new AtomicBoolean(false);
final DnsNameResolver resolver = newResolver(strategy, true, new DnsServerAddressStreamProvider() {
@Override
public DnsServerAddressStream nameServerAddressStream(String hostname) {
return new DnsServerResponseFeedbackAddressStream() {
@Override
public void feedbackSuccess(InetSocketAddress address, long queryResponseTimeNanos) {
assertThat(queryResponseTimeNanos).isGreaterThanOrEqualTo(0L);
successCalled.set(true);
}
@Override
public void feedbackFailure(InetSocketAddress address,
Throwable failureCause,
long queryResponseTimeNanos) {
assertThat(queryResponseTimeNanos).isGreaterThanOrEqualTo(0L);
assertNotNull(failureCause);
failureCalled.set(true);
}
@Override
public InetSocketAddress next() {
if (returnSuccess.get()) {
return dnsServer.localAddress();
}
try {
return new InetSocketAddress(InetAddress.getByAddress("foo.com",
new byte[] {(byte) 169, (byte) 254, 12, 34 }), 53);
} catch (UnknownHostException e) {
throw new Error(e);
}
}
@Override
public int size() {
return 1;
}
@Override
public DnsServerAddressStream duplicate() {
return this;
}
};
}
}).build();
try {
// setup call to be successful and verify
returnSuccess.set(true);
resolver.resolve("google.com").syncUninterruptibly().getNow();
assertTrue(successCalled.get());
assertFalse(failureCalled.get());
// reset state for next query
successCalled.set(false);
failureCalled.set(false);
// setup call to fail and verify
returnSuccess.set(false);
try {
resolver.resolve("yahoo.com").syncUninterruptibly().getNow();
fail();
} catch (Exception e) {
// expected
assertInstanceOf(UnknownHostException.class, e);
} finally {
assertFalse(successCalled.get());
assertTrue(failureCalled.get());
}
} finally {
if (resolver != null) {
resolver.close();
}
}
}
@ParameterizedTest
@EnumSource(DnsNameResolverChannelStrategy.class)
public void testCnameWithAAndAdditionalsAndAuthorities(DnsNameResolverChannelStrategy strategy) throws Exception {
final String hostname = "test.netty.io";
final String cname = "cname.netty.io";
final List<String> nameServers = new ArrayList<String>();
for (int i = 0; i < 13; i++) {
nameServers.add("ns" + i + ".foo.bar");
}
TestDnsServer server = new TestDnsServer(new RecordStore() {
@Override
public Set<ResourceRecord> getRecords(QuestionRecord questionRecord) {
ResourceRecordModifier rm = new ResourceRecordModifier();
rm.setDnsClass(RecordClass.IN);
rm.setDnsName(hostname);
rm.setDnsTtl(10000);
rm.setDnsType(RecordType.CNAME);
rm.put(DnsAttribute.DOMAIN_NAME, cname);
Set<ResourceRecord> records = new LinkedHashSet<ResourceRecord>();
records.add(rm.getEntry());
records.add(newARecord(cname, "10.0.0.2"));
return records;
}
}) {
@Override
protected DnsMessage filterMessage(DnsMessage message) {
for (QuestionRecord record: message.getQuestionRecords()) {
if (record.getDomainName().equals(hostname)) {
// Let's add some extra records.
message.getAuthorityRecords().clear();
message.getAdditionalRecords().clear();
for (String nameserver: nameServers) {
message.getAuthorityRecords().add(TestDnsServer.newNsRecord(".", nameserver));
message.getAdditionalRecords().add(newAddressRecord(nameserver, RecordType.A, "10.0.0.1"));
message.getAdditionalRecords().add(newAddressRecord(nameserver, RecordType.AAAA, "::1"));
}
return message;
}
}
return message;
}
};
server.start();
EventLoopGroup group = new MultiThreadIoEventLoopGroup(1, NioIoHandler.newFactory());
SingletonDnsServerAddressStreamProvider provider =
new SingletonDnsServerAddressStreamProvider(server.localAddress());
final DnsNameResolver resolver = new DnsNameResolver(
group.next(), new ReflectiveChannelFactory<DatagramChannel>(NioDatagramChannel.class), null, false,
NoopDnsCache.INSTANCE, NoopDnsCnameCache.INSTANCE, NoopAuthoritativeDnsServerCache.INSTANCE, null,
NoopDnsQueryLifecycleObserverFactory.INSTANCE, 2000, ResolvedAddressTypes.IPV4_ONLY,
true, 8, true, 4096,
false, HostsFileEntriesResolver.DEFAULT,
provider, new ThreadLocalNameServerAddressStream(provider),
new String [] { "k8se-apps.svc.cluster.local, svc.cluster.local, cluster.local" }, 1,
true, false, 0, strategy);
try {
InetAddress address = resolver.resolve(hostname).sync().getNow();
assertArrayEquals(new byte[] { 10, 0, 0, 2 }, address.getAddress());
} finally {
resolver.close();
group.shutdownGracefully(0, 0, TimeUnit.SECONDS);
server.stop();
}
}
}
| RedirectingTestDnsServer |
java | google__guava | android/guava/src/com/google/common/graph/ValueGraph.java | {
"start": 1260,
"end": 4676
} | interface ____ satisfies your use case. See the <a
* href="https://github.com/google/guava/wiki/GraphsExplained#choosing-the-right-graph-type">
* "Choosing the right graph type"</a> section of the Guava User Guide for more details.
*
* <h3>Capabilities</h3>
*
* <p>{@code ValueGraph} supports the following use cases (<a
* href="https://github.com/google/guava/wiki/GraphsExplained#definitions">definitions of
* terms</a>):
*
* <ul>
* <li>directed graphs
* <li>undirected graphs
* <li>graphs that do/don't allow self-loops
* <li>graphs whose nodes/edges are insertion-ordered, sorted, or unordered
* <li>graphs whose edges have associated values
* </ul>
*
* <p>{@code ValueGraph}, as a subtype of {@code Graph}, explicitly does not support parallel edges,
* and forbids implementations or extensions with parallel edges. If you need parallel edges, use
* {@link Network}. (You can use a positive {@code Integer} edge value as a loose representation of
* edge multiplicity, but the {@code *degree()} and mutation methods will not reflect your
* interpretation of the edge value as its multiplicity.)
*
* <h3>Building a {@code ValueGraph}</h3>
*
* <p>The implementation classes that {@code common.graph} provides are not public, by design. To
* create an instance of one of the built-in implementations of {@code ValueGraph}, use the {@link
* ValueGraphBuilder} class:
*
* {@snippet :
* MutableValueGraph<Integer, Double> graph = ValueGraphBuilder.directed().build();
* }
*
* <p>{@link ValueGraphBuilder#build()} returns an instance of {@link MutableValueGraph}, which is a
* subtype of {@code ValueGraph} that provides methods for adding and removing nodes and edges. If
* you do not need to mutate a graph (e.g. if you write a method than runs a read-only algorithm on
* the graph), you should use the non-mutating {@link ValueGraph} interface, or an {@link
* ImmutableValueGraph}.
*
* <p>You can create an immutable copy of an existing {@code ValueGraph} using {@link
* ImmutableValueGraph#copyOf(ValueGraph)}:
*
* {@snippet :
* ImmutableValueGraph<Integer, Double> immutableGraph = ImmutableValueGraph.copyOf(graph);
* }
*
* <p>Instances of {@link ImmutableValueGraph} do not implement {@link MutableValueGraph}
* (obviously!) and are contractually guaranteed to be unmodifiable and thread-safe.
*
* <p>The Guava User Guide has <a
* href="https://github.com/google/guava/wiki/GraphsExplained#building-graph-instances">more
* information on (and examples of) building graphs</a>.
*
* <h3>Additional documentation</h3>
*
* <p>See the Guava User Guide for the {@code common.graph} package (<a
* href="https://github.com/google/guava/wiki/GraphsExplained">"Graphs Explained"</a>) for
* additional documentation, including:
*
* <ul>
* <li><a
* href="https://github.com/google/guava/wiki/GraphsExplained#equals-hashcode-and-graph-equivalence">
* {@code equals()}, {@code hashCode()}, and graph equivalence</a>
* <li><a href="https://github.com/google/guava/wiki/GraphsExplained#synchronization">
* Synchronization policy</a>
* <li><a href="https://github.com/google/guava/wiki/GraphsExplained#notes-for-implementors">Notes
* for implementors</a>
* </ul>
*
* @author James Sexton
* @author Joshua O'Madadhain
* @param <N> Node parameter type
* @param <V> Value parameter type
* @since 20.0
*/
@Beta
public | that |
java | elastic__elasticsearch | modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java | {
"start": 1040,
"end": 5922
} | class ____ extends ESTestCase {
private final ScriptService scriptService = mock(ScriptService.class);
public void testCreate() throws Exception {
Processor processor = new TestProcessor(ingestDocument -> {});
Map<String, Processor.Factory> registry = new HashMap<>();
registry.put("_name", (r, t, description, c, projectId) -> processor);
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("field", "_field");
config.put("processor", Map.of("_name", Collections.emptyMap()));
ForEachProcessor forEachProcessor = forEachFactory.create(registry, null, null, config, null);
assertThat(forEachProcessor, notNullValue());
assertThat(forEachProcessor.getField(), equalTo("_field"));
assertThat(forEachProcessor.getInnerProcessor(), sameInstance(processor));
assertFalse(forEachProcessor.isIgnoreMissing());
}
public void testSetIgnoreMissing() throws Exception {
Processor processor = new TestProcessor(ingestDocument -> {});
Map<String, Processor.Factory> registry = new HashMap<>();
registry.put("_name", (r, t, description, c, projectId) -> processor);
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("field", "_field");
config.put("processor", Map.of("_name", Collections.emptyMap()));
config.put("ignore_missing", true);
ForEachProcessor forEachProcessor = forEachFactory.create(registry, null, null, config, null);
assertThat(forEachProcessor, notNullValue());
assertThat(forEachProcessor.getField(), equalTo("_field"));
assertThat(forEachProcessor.getInnerProcessor(), sameInstance(processor));
assertTrue(forEachProcessor.isIgnoreMissing());
}
public void testCreateWithTooManyProcessorTypes() throws Exception {
Processor processor = new TestProcessor(ingestDocument -> {});
Map<String, Processor.Factory> registry = new HashMap<>();
registry.put("_first", (r, t, description, c, projectId) -> processor);
registry.put("_second", (r, t, description, c, projectId) -> processor);
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("field", "_field");
Map<String, Object> processorTypes = new HashMap<>();
processorTypes.put("_first", Map.of());
processorTypes.put("_second", Map.of());
config.put("processor", processorTypes);
Exception exception = expectThrows(
ElasticsearchParseException.class,
() -> forEachFactory.create(registry, null, null, config, null)
);
assertThat(exception.getMessage(), equalTo("[processor] Must specify exactly one processor type"));
}
public void testCreateWithNonExistingProcessorType() throws Exception {
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("field", "_field");
config.put("processor", Map.of("_name", Collections.emptyMap()));
Exception expectedException = expectThrows(
ElasticsearchParseException.class,
() -> forEachFactory.create(Map.of(), null, null, config, null)
);
assertThat(expectedException.getMessage(), equalTo("No processor type exists with name [_name]"));
}
public void testCreateWithMissingField() throws Exception {
Processor processor = new TestProcessor(ingestDocument -> {});
Map<String, Processor.Factory> registry = new HashMap<>();
registry.put("_name", (r, t, description, c, projectId) -> processor);
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("processor", List.of(Map.of("_name", Map.of())));
Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(registry, null, null, config, null));
assertThat(exception.getMessage(), equalTo("[field] required property is missing"));
}
public void testCreateWithMissingProcessor() {
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("field", "_field");
Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(Map.of(), null, null, config, null));
assertThat(exception.getMessage(), equalTo("[processor] required property is missing"));
}
}
| ForEachProcessorFactoryTests |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/abstractclass/generics/AbstractHuman.java | {
"start": 219,
"end": 313
} | class ____
implements GenericIdentifiable<KeyOfAllBeings>, Identifiable<Key> {
}
| AbstractHuman |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/NestedValueFetcher.java | {
"start": 1014,
"end": 4023
} | class ____ implements ValueFetcher {
private final String nestedFieldPath;
private final FieldFetcher nestedFieldFetcher;
// the name of the nested field without the full path, i.e. in foo.bar.baz it would be baz
private final String nestedFieldName;
private final String[] nestedPathParts;
public NestedValueFetcher(String nestedField, FieldFetcher nestedFieldFetcher) {
assert nestedField != null && nestedField.isEmpty() == false;
this.nestedFieldPath = nestedField;
this.nestedFieldFetcher = nestedFieldFetcher;
this.nestedPathParts = nestedFieldPath.split("\\.");
this.nestedFieldName = nestedPathParts[nestedPathParts.length - 1];
}
@Override
public List<Object> fetchValues(Source source, int doc, List<Object> includedValues) throws IOException {
ArrayList<Object> nestedEntriesToReturn = new ArrayList<>();
Map<String, Object> filteredSource = new HashMap<>();
Map<String, Object> stub = createSourceMapStub(filteredSource);
List<?> nestedValues = XContentMapValues.extractNestedSources(nestedFieldPath, source.source());
if (nestedValues == null) {
return Collections.emptyList();
}
for (Object entry : nestedValues) {
// add this one entry only to the stub and use this as source lookup
stub.put(nestedFieldName, entry);
Map<String, DocumentField> fetchResult = nestedFieldFetcher.fetch(
Source.fromMap(filteredSource, source.sourceContentType()),
doc
);
Map<String, Object> nestedEntry = new HashMap<>();
for (DocumentField field : fetchResult.values()) {
List<Object> fetchValues = field.getValues();
if (fetchValues.isEmpty() == false) {
String keyInNestedMap = field.getName().substring(nestedFieldPath.length() + 1);
nestedEntry.put(keyInNestedMap, fetchValues);
}
}
if (nestedEntry.isEmpty() == false) {
nestedEntriesToReturn.add(nestedEntry);
}
}
nestedEntriesToReturn.trimToSize();
return nestedEntriesToReturn;
}
// create a filtered source map stub which contains the nested field path
private Map<String, Object> createSourceMapStub(Map<String, Object> filteredSource) {
Map<String, Object> next = filteredSource;
for (int i = 0; i < nestedPathParts.length - 1; i++) {
String part = nestedPathParts[i];
Map<String, Object> newMap = new HashMap<>();
next.put(part, newMap);
next = newMap;
}
return next;
}
@Override
public void setNextReader(LeafReaderContext context) {
this.nestedFieldFetcher.setNextReader(context);
}
@Override
public StoredFieldsSpec storedFieldsSpec() {
return StoredFieldsSpec.NEEDS_SOURCE;
}
}
| NestedValueFetcher |
java | apache__camel | components/camel-dns/src/main/java/org/apache/camel/component/dns/cloud/DnsServiceDiscoveryFactory.java | {
"start": 1226,
"end": 2432
} | class ____ implements ServiceDiscoveryFactory {
private final DnsConfiguration configuration;
public DnsServiceDiscoveryFactory() {
this.configuration = new DnsConfiguration();
}
public DnsServiceDiscoveryFactory(DnsConfiguration configuration) {
this.configuration = configuration;
}
// *************************************************************************
// Properties
// *************************************************************************
public String getProto() {
return configuration.getProto();
}
public void setProto(String proto) {
configuration.setProto(proto);
}
public String getDomain() {
return configuration.getDomain();
}
public void setDomain(String domain) {
configuration.setDomain(domain);
}
// *************************************************************************
// Factory
// *************************************************************************
@Override
public ServiceDiscovery newInstance(CamelContext camelContext) throws Exception {
return new DnsServiceDiscovery(configuration);
}
}
| DnsServiceDiscoveryFactory |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/javadoc/MalformedInlineTagTest.java | {
"start": 1841,
"end": 2230
} | class ____ {
/**
* Add one to value.
*
* @param x an @{code int} value to increment
* @return @{code x} + 1
*/
int addOne(int x) {
return x + 1;
}
}
""")
.addOutputLines(
"Test.java",
"""
| Test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.